B
    ٻd             	   @   s\  d Z ddlZddlZddlZddlZddlZddlZddlZddl	m
Z
 ddl	mZ ddl	mZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm Z  ddlm!Z! ddlm"Z" ddlm#Z# ddlmZ$ ddlm%Z% ddlm&Z' ddlm(Z( ddlm)Z) ddlm*Z* ddlm+Z+ ddl,m-Z- ddl,m.Z. ddl,m/Z/ ddl,m0Z0 dd l,m1Z2 dd!l,m3Z3 dd"l,m4Z4 dd#l,m5Z5 dd$l,m6Z6 dd%l,m7Z7 dd&l,m8Z8 dd'l,m9Z9 dd(l:m;Z; dd)l<m=Z= dd*l<m>Z? dd+l<m@ZA dd,lBmCZC dd-lDmEZE dd.lDmFZF dd
lDmZG dd/lHmIZI dd0lJmKZK ejLZLeFMd1eN d2ZOeFMd3eN d4ZPeFMd5eN d6ZQe#Rd7 d8ZSeKd9TeUd: eKd;TeUd: d8ZVd<ZWd=ZXd>ZYd?ZZeKd@TeUdA eKdBTeUdC dDdE Z[dFdG Z\eKdHg dIG dJdK dKeIj]e?j^ej_ej`dLZaeKdHgdIG dMdN dNeaZbec r|eaZdnebZdG dOdP dPebZedQdR ZfeKdSgdIdTdU ZgeKdVgdIddWdXZheKdYdZd[ ZieKd\gdId]d^ ZjeKd_gdId`da ZkeKdbgdIdcdd ZlG dedf dfeaZmG dgdh dheaZnG didj djenZoG dkdl dleaZpG dmdn dnej_ZqeKdodpdq ZreKdrdsdt ZseKdududvgdIG dwdx dxe+jtZuG dydz dzZvG d{d| d|eAjwZxG d}d~ d~emZyG dd demZzG dd demZ{G dd deaZ|G dd deaZ}G dd deoZ~G dd demZG dd deoZG dd deoZG dd deoZG dd deoZG dd deoZG dd denZG dd denZdd Zdd Zdd Zdd ZG dd denZG dd denZG dd denZG dd denZG dd denZG dd denZG dd deoZG dd denZG dd deoZdd ZG dd denZG dd denZG dd denZG dd demZdd ZdddZdddĄZddƄ ZddȄ Zddʄ ZG dd̄ deoZG dd΄ deoZG ddЄ deoZG dd҄ denZG ddԄ deaZddք Zdd؄ Zejddڄ ZdaeKd܃ddބ Zdd ZdS )zPython wrappers for Datasets.    N)dataset_metadata_pb2)dataset_options_pb2)	graph_pb2)tf2)iterator_ops)options)structured_function)nest)random_seed)	structure)traverse)context)auto_control_deps)auto_control_deps_utils)composite_tensor)constant_op)dtypes)function)ops)
smart_cond)sparse_tensor)tensor_shape)tensor_spec)tensor_util)	type_spec)	array_ops)	check_ops)control_flow_ops)gen_dataset_ops)gen_experimental_dataset_ops)
gen_io_ops)gen_stateless_random_ops)logging_ops)math_ops)
random_ops)
script_ops)
string_ops)ragged_tensor)asset)base)resource)trace)deprecation)lazy_loader)collections_abc)	tf_exportwrap_functionz%tensorflow.python.eager.wrap_functiondef_functionz$tensorflow.python.eager.def_functionparsing_opsz!tensorflow.python.ops.parsing_opsReduceDatasetzdata.AUTOTUNEAUTOTUNEzdata.experimental.AUTOTUNEZGZIPNONEzdataset_spec.pbzdata.INFINITE_CARDINALITYINFINITEzdata.UNKNOWN_CARDINALITYUNKNOWNc             C   s   |   std| dS )Na	  Invalid `name`. The argument `name` needs to be a valid identifier. Value is considered a valid identifier if it only contains alphanumeric characters (a-z), (A-Z), and (0-9), or underscores (_). A valid identifier cannot start with a number, or contain any spaces.zutf-8)isidentifier
ValueErrorencode)name r>   X/var/www/html/venv/lib/python3.7/site-packages/tensorflow/python/data/ops/dataset_ops.py_validate_and_encodet   s    r@   c             C   s    t | tjr|  S t| S dS )z0Returns the type of `value` if it is a TypeSpec.N)
isinstancer   TypeSpec
value_typetype)valuer>   r>   r?   	_get_type~   s    rF   zdata.Dataset)Zv1c                   sV  e Zd ZdZdd Zedd Zejdd Ze	ddd	dde
jjfd
dZdd Zejjf fdd	Zdd Zejdd Zedd Zejdd Zdd Zdd Zedd Zdd Zdd  Zd!d" Zd#d$ ZeZ d%d& Z!ej"d'd( Z#d)d* Z$d+d, Z%d-d. Z&ed/d0 Z'ed1d2 Z(ed3d4 Z)ed5d6 Z*ed7d8 Z+ed9d: Z,e-dd;d<Z.e-dd=d>Z/G d?d@ d@Z0e-e	ddAdBdCddDdEZ1e-dFdG Z2e-ddHdIZ3ddJdKZ4e-dLdMe5j6dfdNdOZ7ddQdRZ8ddSdTZ9e-ddUdVZ:ddWdXZ;ddYdZZ<dd[d\Z=dd^d_Z>dd`daZ?ddbdcZ@ddddeZAddfdgZBe-ddhdiZCddjdkZDddldmZEddndoZFddpdqZGddrdsZHddtduZIddvdwZJdxdy ZKddzd{ZLdd|d}ZMdd~dZNdddZOdddZPdd ZQdddZRdddZSe-dddZTdddZUdddZVdddZWdddZXdddZYe-dddZZe-dddZ[  Z\S )	DatasetV2ax  Represents a potentially large set of elements.

  The `tf.data.Dataset` API supports writing descriptive and efficient input
  pipelines. `Dataset` usage follows a common pattern:

  1. Create a source dataset from your input data.
  2. Apply dataset transformations to preprocess the data.
  3. Iterate over the dataset and process the elements.

  Iteration happens in a streaming fashion, so the full dataset does not need to
  fit into memory.

  Source Datasets:

  The simplest way to create a dataset is to create it from a python `list`:

  >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
  >>> for element in dataset:
  ...   print(element)
  tf.Tensor(1, shape=(), dtype=int32)
  tf.Tensor(2, shape=(), dtype=int32)
  tf.Tensor(3, shape=(), dtype=int32)

  To process lines from files, use `tf.data.TextLineDataset`:

  >>> dataset = tf.data.TextLineDataset(["file1.txt", "file2.txt"])

  To process records written in the `TFRecord` format, use `TFRecordDataset`:

  >>> dataset = tf.data.TFRecordDataset(["file1.tfrecords", "file2.tfrecords"])

  To create a dataset of all files matching a pattern, use
  `tf.data.Dataset.list_files`:

  ```python
  dataset = tf.data.Dataset.list_files("/path/*.txt")
  ```

  See `tf.data.FixedLengthRecordDataset` and `tf.data.Dataset.from_generator`
  for more ways to create datasets.

  Transformations:

  Once you have a dataset, you can apply transformations to prepare the data for
  your model:

  >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
  >>> dataset = dataset.map(lambda x: x*2)
  >>> list(dataset.as_numpy_iterator())
  [2, 4, 6]

  Common Terms:

  **Element**: A single output from calling `next()` on a dataset iterator.
    Elements may be nested structures containing multiple components. For
    example, the element `(1, (3, "apple"))` has one tuple nested in another
    tuple. The components are `1`, `3`, and `"apple"`.

  **Component**: The leaf in the nested structure of an element.

  Supported types:

  Elements can be nested structures of tuples, named tuples, and dictionaries.
  Note that Python lists are *not* treated as nested structures of components.
  Instead, lists are converted to tensors and treated as components. For
  example, the element `(1, [1, 2, 3])` has only two components; the tensor `1`
  and the tensor `[1, 2, 3]`. Element components can be of any type
  representable by `tf.TypeSpec`, including `tf.Tensor`, `tf.data.Dataset`,
  `tf.sparse.SparseTensor`, `tf.RaggedTensor`, and `tf.TensorArray`.

  ```python
  a = 1 # Integer element
  b = 2.0 # Float element
  c = (1, 2) # Tuple element with 2 components
  d = {"a": (2, 2), "b": 3} # Dict element with 3 components
  Point = collections.namedtuple("Point", ["x", "y"])
  e = Point(1, 2) # Named tuple
  f = tf.data.Dataset.range(10) # Dataset element
  ```

  For more information,
  read [this guide](https://www.tensorflow.org/guide/data).
  c             C   s   || _ t | _t | _x|  D ]}d}t|t	rvt
|drt|jtsltdt|  dt|j d|jj}n0t|tr|j}ntdt|  dt| d|dk	r$| j|| _q$W | jd dS )aW  Creates a DatasetV2 object.

    This is a difference between DatasetV1 and DatasetV2. DatasetV1 does not
    take anything in its constructor whereas in the DatasetV2, we expect
    subclasses to create a variant_tensor and pass it in to the super() call.

    Args:
      variant_tensor: A DT_VARIANT tensor that represents the dataset.
    N_datasetzEach input of dataset z; should be a subclass of `tf.data.Dataset` but encountered .F)_variant_tensor_attrr   get_default_graph_graph_attroptions_libOptions_options_attr_inputsrA   	DatasetV1hasattrrH   rG   	TypeErrorrD   merge_set_mutable)selfvariant_tensorinput_datasetZinput_optionsr>   r>   r?   __init__   s$    






zDatasetV2.__init__c             C   s   | j S )N)rJ   )rV   r>   r>   r?   _variant_tensor  s    zDatasetV2._variant_tensorc             C   s   t dd S )Nz2The `_variant_tensor` property cannot be modified.)r;   )rV   _r>   r>   r?   rZ     s    Nz!Use external_state_policy insteadallow_statefulc             C   sB   |r|j }tj| j||dS |r2tj| j||dS tj| j|dS )a  Produces serialized graph representation of the dataset.

    Args:
      allow_stateful: If true, we allow stateful ops to be present in the graph
        def. In that case, the state in these ops would be thrown away.
      strip_device_assignment: If true, non-local (i.e. job and task) device
        assignment is stripped from ops in the serialized graph.
      external_state_policy: The ExternalStatePolicy enum that determines how we
        handle input pipelines that depend on external state. By default, its
        set to WARN.

    Returns:
      A scalar `tf.Tensor` of `tf.string` type, representing this dataset as a
      serialized graph.
    )external_state_policystrip_device_assignment)r\   r^   )r\   )rE   r   Zdataset_to_graph_v2rZ   Zdataset_to_graph)rV   r\   r^   r]   policyr>   r>   r?   _as_serialized_graph  s    zDatasetV2._as_serialized_graphc                s   i }x(|j D ]  jdrd| jd < qW |s6i S x|j D ]x  j|kr> jd j}t 2 t	d t
| tj }W dQ R X W dQ R X  fddt|D | j< q>W |S )a  Finds and tracks nodes in `graph_def` that refer to asset files.

    Args:
      graph_def: Serialized graph representation of this dataset.

    Returns:
      A dictionary mapping the node name of an asset constant to a tracked
      `asset.Asset` object.
    ZFileIdentityNr   rE   CPUc                s4   g | ],\}}j t| jd  t| ddqS )r[   T)r=   	overwrite)Z_track_trackabler(   ZAssetr=   str).0in)noderV   r>   r?   
<listcomp>L  s   z1DatasetV2._maybe_track_assets.<locals>.<listcomp>)rg   r=   
startswithinputattrZtensorr   
eager_moder   devicer2   Zparse_tensorSerializeToStringr   stringnumpy	enumerate)rV   	graph_defasset_trackerZtensor_protoZ
node_valuer>   )rg   rV   r?   _maybe_track_assets3  s    

&zDatasetV2._maybe_track_assetsc                sZ   |t jjkri S tjg dd fdd}|  tt j|f|}t	 j
||d< |S )NF)Zinput_signatureZ	autographc                 s       } | S )N)_trace_variant_creation)r*   )rV   r>   r?   _creatorZ  s    
z/DatasetV2._trackable_children.<locals>._creatorZ_variant_tracker)tracking_baseSaveType
SAVEDMODELr1   r   Zget_concrete_functionsuperrG   _trackable_children_VariantTrackerrZ   )rV   	save_typekwargsrv   children)	__class__)rV   r?   r{   R  s    
zDatasetV2._trackable_childrenc          
   C   s:  | j }t|tjstdt : td$ t	 
| jtjjd }W dQ R X W dQ R X g }x|jD ]}|jdkrn|j}qnW t|dkrtdt| d| d	|d
 }i }t jr| |}x0|D ](}dd || D }	tj|	d
d||< qW tj|g |d |d}
x |  D ]}|j|
j qW |
S )a%  Traces a function which outputs a variant `tf.Tensor` for this dataset.

    Note that creating this function involves evaluating an op, and is currently
    only supported when executing eagerly.

    Returns:
      A zero-argument `ConcreteFunction` which outputs a variant `tf.Tensor`.
    zConstructing a tf.function that reproduces a given dataset is only supported for datasets created eagerly. Please file a feature request if this is important to you.ra   )r]   NZ_Retval   zBDataset graph is expected to only have one return value but found z return values: rI   r   c             S   s   g | ]}t j|jd dqS )r   )axis)r   expand_dimsZ
asset_path)rd   r(   r>   r>   r?   rh     s   z5DatasetV2._trace_variant_creation.<locals>.<listcomp>)r   z:0)inputsoutputsZcaptures) rZ   rA   r   ZEagerTensorNotImplementedErrorr   rl   rm   r   ZGraphDef
FromStringr`   rM   ExternalStatePolicyFAILrp   rg   oprj   lenAssertionErrorrK   Zbuilding_functionrt   r   concatr0   Zfunction_from_graph_def
_functionsr   add_to_graphgraph)rV   variantrr   Zoutput_node_namesrg   Zoutput_node_nameZfile_path_nodesrs   keyZassets_listZvariant_functionZused_functionr>   r>   r?   ru   e  s<    	*




z!DatasetV2._trace_variant_creationc             C   s   t t|  ddS )z4Returns a list of the input datasets of the dataset.z
._inputs()N)r   rD   )rV   r>   r>   r?   rP     s    zDatasetV2._inputsc             C   s   | j S )N)rL   )rV   r>   r>   r?   _graph  s    zDatasetV2._graphc             C   s   t dd S )Nz)The `_graph` property cannot be modified.)r;   )rV   r[   r>   r>   r?   r     s    c             C   s   g S )zReturns a list of functions associated with this dataset.

    Returns:
      A list of `StructuredFunctionWrapper` objects.
    r>   )rV   r>   r>   r?   r     s    zDatasetV2._functionsc             C   s   t | jS )z,Returns the options tensor for this dataset.)r   Zget_optionsrZ   )rV   r>   r>   r?   _options  s    zDatasetV2._optionsc             C   s6   t  }t|dk	r2tjt|}|| |S )z2Converts options tensor to tf.data.Options object.N)rM   rN   r   constant_valuer   r   Z_from_proto)clsZserialized_optionsr   Zpbr>   r>   r?   _options_tensor_to_options  s    

z$DatasetV2._options_tensor_to_optionsc             C   s4   t  r$| |  }|d |S td | jS )zReturns the options for this dataset and its inputs.

    Returns:
      A `tf.data.Options` object representing the dataset options.
    Fa  To make it possible to preserve tf.data options across serialization boundaries, their implementation has moved to be part of the TensorFlow graph. As a consequence, the options value is in general no longer known at graph construction time. Invoking this method in graph mode retains the legacy behavior of the original implementation, but note that the returned value might not reflect the actual value of the options.)r   executing_eagerlyr   r   rU   warningswarnrO   )rV   r   r>   r>   r?   r     s    

zDatasetV2.optionsc             C   s@   t r8t }d|j_d|j_d|j_d|j_t	| |}n| }|S )NF)

DEBUG_MODErM   rN   ZautotuneenabledZexperimental_optimizationZfilter_parallelizationZmap_and_batch_fusionZmap_parallelization_OptionsDataset)rV   r   datasetr>   r>   r?   _apply_debug_options  s    zDatasetV2._apply_debug_optionsc          	   C   s<   t  st r0t| j t| S Q R X ntddS )a  Creates an iterator for elements of this dataset.

    The returned iterator implements the Python Iterator protocol.

    Returns:
      An `tf.data.Iterator` for the elements of this dataset.

    Raises:
      RuntimeError: If not inside of tf.function and not executing eagerly.
    Nz[`tf.data.Dataset` only supports Python-style iteration in eager mode or within tf.function.)	r   r   r   inside_functioncolocate_withrZ   r   OwnedIteratorRuntimeError)rV   r>   r>   r?   __iter__  s    zDatasetV2.__iter__c             C   s   dS )NTr>   )rV   r>   r>   r?   __bool__  s    zDatasetV2.__bool__c             C   sD   t  std|  }| tkr,td| tkr@td|S )a	  Returns the length of the dataset if it is known and finite.

    This method requires that you are running in eager mode, and that the
    length of the dataset is known and non-infinite. When the length may be
    unknown or infinite, or if you are running in graph mode, use
    `tf.data.Dataset.cardinality` instead.

    Returns:
      An integer representing the length of the dataset.

    Raises:
      RuntimeError: If the dataset length is unknown or infinite, or if eager
        execution is not enabled.
    za`tf.data.Dataset` only supports `len` in eager mode. Use `tf.data.Dataset.cardinality()` instead.zThe dataset is infinite.zThe dataset length is unknown.)r   r   rS   cardinalityrp   r8   r9   )rV   lengthr>   r>   r?   __len__  s    zDatasetV2.__len__c             C   s   t t|  ddS )a  The type specification of an element of this dataset.

    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
    >>> dataset.element_spec
    TensorSpec(shape=(), dtype=tf.int32, name=None)

    For more information,
    read [this guide](https://www.tensorflow.org/guide/data#dataset_structure).

    Returns:
      A (nested) structure of `tf.TypeSpec` objects matching the structure of an
      element of this dataset and specifying the type of individual components.
    z.element_spec()N)r   rD   )rV   r>   r>   r?   element_spec  s    zDatasetV2.element_specc             C   s.   t t| tr| jn| }d|j d| j dS )N<z element_spec=>)rD   rA   DatasetV1AdapterrH   __name__r   )rV   type_r>   r>   r?   __repr__'  s    zDatasetV2.__repr__c                s`   g }| dfg}xF|rT|  \} |d  t|  | fdd| D  qW d|S )zReturns a string showing the type of the dataset and its inputs.

    This string is intended only for debugging purposes, and may change without
    warning.
    r   z--c                s   g | ]}| d  fqS )r   r>   )rd   ds)depthr>   r?   rh   6  s    z.DatasetV2.__debug_string__.<locals>.<listcomp>
)popappendreprextendrP   join)rV   linesZ
to_processr   r>   )r   r?   __debug_string__+  s    
 zDatasetV2.__debug_string__c             C   sX   t  stdx>t| jD ].}t|tjt	j
tjtjfstd|j qW t| S )a  Returns an iterator which converts all elements of the dataset to numpy.

    Use `as_numpy_iterator` to inspect the content of your dataset. To see
    element shapes and types, print dataset elements directly instead of using
    `as_numpy_iterator`.

    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
    >>> for element in dataset:
    ...   print(element)
    tf.Tensor(1, shape=(), dtype=int32)
    tf.Tensor(2, shape=(), dtype=int32)
    tf.Tensor(3, shape=(), dtype=int32)

    This method requires that you are running in eager mode and the dataset's
    element_spec contains only `TensorSpec` components.

    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
    >>> for element in dataset.as_numpy_iterator():
    ...   print(element)
    1
    2
    3

    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
    >>> print(list(dataset.as_numpy_iterator()))
    [1, 2, 3]

    `as_numpy_iterator()` will preserve the nested structure of dataset
    elements.

    >>> dataset = tf.data.Dataset.from_tensor_slices({'a': ([1, 2], [3, 4]),
    ...                                               'b': [5, 6]})
    >>> list(dataset.as_numpy_iterator()) == [{'a': (1, 3), 'b': 5},
    ...                                       {'a': (2, 4), 'b': 6}]
    True

    Returns:
      An iterable over the elements of the dataset, with their tensors converted
      to numpy arrays.

    Raises:
      TypeError: if an element contains a non-`Tensor` value.
      RuntimeError: if eager execution is not enabled.
    zF`tf.data.Dataset.as_numpy_iterator()` is only supported in eager mode.z``tf.data.Dataset.as_numpy_iterator()` is not supported for datasets that produce values of type )r   r   r   r	   flattenr   rA   r   
TensorSpecr'   ZRaggedTensorSpecsparse_tensor_libZSparseTensorSpecr   ZNoneTensorSpecrS   rC   _NumpyIterator)rV   component_specr>   r>   r?   as_numpy_iterator9  s    -zDatasetV2.as_numpy_iteratorc             C   s   t | jS )zReturns a list `tf.TensorShapes`s for the element tensor representation.

    Returns:
      A list `tf.TensorShapes`s for the element tensor representation.
    )r   get_flat_tensor_shapesr   )rV   r>   r>   r?   _flat_shapest  s    zDatasetV2._flat_shapesc             C   s   t | jS )zReturns a list `tf.DType`s for the element tensor representation.

    Returns:
      A list `tf.DType`s for the element tensor representation.
    )r   get_flat_tensor_typesr   )rV   r>   r>   r?   _flat_types}  s    zDatasetV2._flat_typesc             C   s   | j | jdS )a
  Helper for setting `output_shapes` and `output_types` attrs of an op.

    Most dataset op constructors expect `output_shapes` and `output_types`
    arguments that represent the flattened structure of an element. This helper
    function generates these attrs as a keyword argument dictionary, allowing
    `Dataset._variant_tensor` implementations to pass `**self._flat_structure`
    to the op constructor.

    Returns:
      A dictionary of keyword arguments that can be passed to a dataset op
      constructor.
    )output_shapesoutput_types)r   r   )rV   r>   r>   r?   _flat_structure  s    zDatasetV2._flat_structurec             C   s   t  }| jrt| j|_|S )z'Helper for generating dataset metadata.)r   Metadata_namer@   r=   )rV   metadatar>   r>   r?   	_metadata  s    zDatasetV2._metadatac             C   s   | j  | j| jdS )ap  Helper for generating arguments that are common across most dataset ops.

    Most dataset op constructors expect `output_shapes` and `output_types`
    arguments that represent the flattened structure of an element, as well as a
    `metadata` argument for additional metadata such as user-defined dataset
    name. This helper function generates common attributes as a keyword argument
    dictionary, allowing `Dataset._variant_tensor` implementations to pass
    `**self._common_args` to the op constructor.

    Returns:
      A dictionary of keyword arguments that can be passed to a dataset op
      constructor.
    )r   r   r   )r   rn   r   r   )rV   r>   r>   r?   _common_args  s    zDatasetV2._common_argsc             C   s
   t | jS )N)DatasetSpecr   )rV   r>   r>   r?   
_type_spec  s    zDatasetV2._type_specc             C   s   t | |dS )a  Creates a `Dataset` with a single element, comprising the given tensors.

    `from_tensors` produces a dataset containing only a single element. To slice
    the input tensor into multiple elements, use `from_tensor_slices` instead.

    >>> dataset = tf.data.Dataset.from_tensors([1, 2, 3])
    >>> list(dataset.as_numpy_iterator())
    [array([1, 2, 3], dtype=int32)]
    >>> dataset = tf.data.Dataset.from_tensors(([1, 2, 3], 'A'))
    >>> list(dataset.as_numpy_iterator())
    [(array([1, 2, 3], dtype=int32), b'A')]

    >>> # You can use `from_tensors` to produce a dataset which repeats
    >>> # the same example many times.
    >>> example = tf.constant([1,2,3])
    >>> dataset = tf.data.Dataset.from_tensors(example).repeat(2)
    >>> list(dataset.as_numpy_iterator())
    [array([1, 2, 3], dtype=int32), array([1, 2, 3], dtype=int32)]

    Note that if `tensors` contains a NumPy array, and eager execution is not
    enabled, the values will be embedded in the graph as one or more
    `tf.constant` operations. For large datasets (> 1 GB), this can waste
    memory and run into byte limits of graph serialization. If `tensors`
    contains one or more large NumPy arrays, consider the alternative described
    in [this
    guide](https://tensorflow.org/guide/data#consuming_numpy_arrays).

    Args:
      tensors: A dataset "element". Supported values are documented
        [here](https://www.tensorflow.org/guide/data#dataset_structure).
      name: (Optional.) A name for the tf.data operation.

    Returns:
      Dataset: A `Dataset`.
    )r=   )TensorDataset)tensorsr=   r>   r>   r?   from_tensors  s    %zDatasetV2.from_tensorsc             C   s   ddl m} || |S )a  Creates a `Dataset` whose elements are slices of the given tensors.

    The given tensors are sliced along their first dimension. This operation
    preserves the structure of the input tensors, removing the first dimension
    of each tensor and using it as the dataset dimension. All input tensors
    must have the same size in their first dimensions.

    >>> # Slicing a 1D tensor produces scalar tensor elements.
    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
    >>> list(dataset.as_numpy_iterator())
    [1, 2, 3]

    >>> # Slicing a 2D tensor produces 1D tensor elements.
    >>> dataset = tf.data.Dataset.from_tensor_slices([[1, 2], [3, 4]])
    >>> list(dataset.as_numpy_iterator())
    [array([1, 2], dtype=int32), array([3, 4], dtype=int32)]

    >>> # Slicing a tuple of 1D tensors produces tuple elements containing
    >>> # scalar tensors.
    >>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2], [3, 4], [5, 6]))
    >>> list(dataset.as_numpy_iterator())
    [(1, 3, 5), (2, 4, 6)]

    >>> # Dictionary structure is also preserved.
    >>> dataset = tf.data.Dataset.from_tensor_slices({"a": [1, 2], "b": [3, 4]})
    >>> list(dataset.as_numpy_iterator()) == [{'a': 1, 'b': 3},
    ...                                       {'a': 2, 'b': 4}]
    True

    >>> # Two tensors can be combined into one Dataset object.
    >>> features = tf.constant([[1, 3], [2, 1], [3, 3]]) # ==> 3x2 tensor
    >>> labels = tf.constant(['A', 'B', 'A']) # ==> 3x1 tensor
    >>> dataset = Dataset.from_tensor_slices((features, labels))
    >>> # Both the features and the labels tensors can be converted
    >>> # to a Dataset object separately and combined after.
    >>> features_dataset = Dataset.from_tensor_slices(features)
    >>> labels_dataset = Dataset.from_tensor_slices(labels)
    >>> dataset = Dataset.zip((features_dataset, labels_dataset))
    >>> # A batched feature and label set can be converted to a Dataset
    >>> # in similar fashion.
    >>> batched_features = tf.constant([[[1, 3], [2, 3]],
    ...                                 [[2, 1], [1, 2]],
    ...                                 [[3, 3], [3, 2]]], shape=(3, 2, 2))
    >>> batched_labels = tf.constant([['A', 'A'],
    ...                               ['B', 'B'],
    ...                               ['A', 'B']], shape=(3, 2, 1))
    >>> dataset = Dataset.from_tensor_slices((batched_features, batched_labels))
    >>> for element in dataset.as_numpy_iterator():
    ...   print(element)
    (array([[1, 3],
           [2, 3]], dtype=int32), array([[b'A'],
           [b'A']], dtype=object))
    (array([[2, 1],
           [1, 2]], dtype=int32), array([[b'B'],
           [b'B']], dtype=object))
    (array([[3, 3],
           [3, 2]], dtype=int32), array([[b'A'],
           [b'B']], dtype=object))

    Note that if `tensors` contains a NumPy array, and eager execution is not
    enabled, the values will be embedded in the graph as one or more
    `tf.constant` operations. For large datasets (> 1 GB), this can waste
    memory and run into byte limits of graph serialization. If `tensors`
    contains one or more large NumPy arrays, consider the alternative described
    in [this guide](
    https://tensorflow.org/guide/data#consuming_numpy_arrays).

    Args:
      tensors: A dataset element, whose components have the same first
        dimension. Supported values are documented
        [here](https://www.tensorflow.org/guide/data#dataset_structure).
      name: (Optional.) A name for the tf.data operation.

    Returns:
      Dataset: A `Dataset`.
    r   )from_tensor_slices_op)tensorflow.python.data.opsr   from_tensor_slices)r   r=   r   r>   r>   r?   r     s    PzDatasetV2.from_tensor_slicesc               @   s8   e Zd ZdZdd Zdd Zdd Zdd	 Zd
d ZdS )zDatasetV2._GeneratorStatea  Stores outstanding iterators created from a Python generator.

    This class keeps track of potentially multiple iterators that may have
    been created from a generator, e.g. in the case that the dataset is
    repeated, or nested within a parallel computation.
    c             C   s&   || _ t | _d| _i | _i | _d S )Nr   )
_generator	threadingLock_lock_next_id_args
_iterators)rV   	generatorr>   r>   r?   rY   <  s
    
z"DatasetV2._GeneratorState.__init__c             C   s   t |tjr| S |S )N)rA   npndarrayitem)rV   iterator_idr>   r>   r?   _normalize_idC  s    z'DatasetV2._GeneratorState._normalize_idc          	   G   s@   | j  | j}|  jd7  _W d Q R X || j|< tj|tjdS )Nr   )dtype)r   r   r   r   arrayint64)rV   argsretr>   r>   r?   get_next_idJ  s
    
z%DatasetV2._GeneratorState.get_next_idc             C   sN   |  |}y
| j| S  tk
rH   t| j| j| }|| j|< |S X d S )N)r   r   KeyErroriterr   r   r   )rV   r   iteratorr>   r>   r?   get_iteratorT  s    


z&DatasetV2._GeneratorState.get_iteratorc             C   s   | j | |= d S )N)r   r   )rV   r   r>   r>   r?   iterator_completed]  s    z,DatasetV2._GeneratorState.iterator_completedN)	r   
__module____qualname____doc__rY   r   r   r   r   r>   r>   r>   r?   _GeneratorState4  s   
	r   zUse output_signature insteadr   r   c       
         s  t | stddk	rndk	r(tddk	r8tdxDtD ]$}t|tjsDtdt| dqDW ndkr~tddkrdkrtdd	 nt	t
jt	tjtd
d tD rtdd tD tdd tD  dkr$d nttj dd t|  fddfddfddfdd}d}tj|d}	|	j|dS )a  Creates a `Dataset` whose elements are generated by `generator`.

    Note: The current implementation of `Dataset.from_generator()` uses
    `tf.numpy_function` and inherits the same constraints. In particular, it
    requires the dataset and iterator related operations to be placed
    on a device in the same process as the Python program that called
    `Dataset.from_generator()`. In particular, using `from_generator` will
    preclude the use of tf.data service for scaling out dataset processing.
    The body of `generator` will not be serialized in a `GraphDef`, and you
    should not use this method if you need to serialize your model and restore
    it in a different environment.

    The `generator` argument must be a callable object that returns
    an object that supports the `iter()` protocol (e.g. a generator function).

    The elements generated by `generator` must be compatible with either the
    given `output_signature` argument or with the given `output_types` and
    (optionally) `output_shapes` arguments, whichever was specified.

    The recommended way to call `from_generator` is to use the
    `output_signature` argument. In this case the output will be assumed to
    consist of objects with the classes, shapes and types defined by
    `tf.TypeSpec` objects from `output_signature` argument:

    >>> def gen():
    ...   ragged_tensor = tf.ragged.constant([[1, 2], [3]])
    ...   yield 42, ragged_tensor
    >>>
    >>> dataset = tf.data.Dataset.from_generator(
    ...      gen,
    ...      output_signature=(
    ...          tf.TensorSpec(shape=(), dtype=tf.int32),
    ...          tf.RaggedTensorSpec(shape=(2, None), dtype=tf.int32)))
    >>>
    >>> list(dataset.take(1))
    [(<tf.Tensor: shape=(), dtype=int32, numpy=42>,
    <tf.RaggedTensor [[1, 2], [3]]>)]

    There is also a deprecated way to call `from_generator` by either with
    `output_types` argument alone or together with `output_shapes` argument.
    In this case the output of the function will be assumed to consist of
    `tf.Tensor` objects with the types defined by `output_types` and with the
    shapes which are either unknown or defined by `output_shapes`.

    Note: If `generator` depends on mutable global variables or other external
    state, be aware that the runtime may invoke `generator` multiple times
    (in order to support repeating the `Dataset`) and at any time
    between the call to `Dataset.from_generator()` and the production of the
    first element from the generator. Mutating global variables or external
    state can cause undefined behavior, and we recommend that you explicitly
    cache any external state in `generator` before calling
    `Dataset.from_generator()`.

    Note: While the `output_signature` parameter makes it possible to yield
    `Dataset` elements, the scope of `Dataset.from_generator()` should be
    limited to logic that cannot be expressed through tf.data operations. Using
    tf.data operations within the generator function is an anti-pattern and may
    result in incremental memory growth.

    Args:
      generator: A callable object that returns an object that supports the
        `iter()` protocol. If `args` is not specified, `generator` must take no
        arguments; otherwise it must take as many arguments as there are values
        in `args`.
      output_types: (Optional.) A (nested) structure of `tf.DType` objects
        corresponding to each component of an element yielded by `generator`.
      output_shapes: (Optional.) A (nested) structure of `tf.TensorShape`
        objects corresponding to each component of an element yielded by
        `generator`.
      args: (Optional.) A tuple of `tf.Tensor` objects that will be evaluated
        and passed to `generator` as NumPy-array arguments.
      output_signature: (Optional.) A (nested) structure of `tf.TypeSpec`
        objects corresponding to each component of an element yielded by
        `generator`.
      name: (Optional.) A name for the tf.data operations used by
        `from_generator`.

    Returns:
      Dataset: A `Dataset`.
    z&`generator` must be a Python callable.NzZThe `output_types` argument can not be used together with the `output_signature` argument.z[The `output_shapes` argument can not be used together with the `output_signature` argument.zU`output_signature` must contain objects that are subclass of `tf.TypeSpec` but found z which is not.zzTo specify the output signature you need to provide either the `output_signature` argument or the `output_types` argument.c             S   s
   t d S )N)r   TensorShape)r[   r>   r>   r?   <lambda>      z*DatasetV2.from_generator.<locals>.<lambda>c             s   s   | ]}t |tjV  qd S )N)rA   r   r   )rd   xr>   r>   r?   	<genexpr>  s   z+DatasetV2.from_generator.<locals>.<genexpr>c             S   s   g | ]
}|j qS r>   )r   )rd   r   r>   r>   r?   rh     s    z,DatasetV2.from_generator.<locals>.<listcomp>c             S   s   g | ]
}|j qS r>   )shape)rd   r   r>   r>   r?   rh     s    r>   r   )r=   c                s   t j tjS )aU  Creates a unique `iterator_id` for each pass over the dataset.

      The returned `iterator_id` disambiguates between multiple concurrently
      existing iterators.

      Args:
        unused_dummy: Ignored value.

      Returns:
        A `tf.int64` tensor whose value uniquely identifies an iterator in
        `generator_state`.
      )r%   numpy_functionr   r   r   )Zunused_dummy)r   generator_stater>   r?   get_iterator_id_fn  s    
z4DatasetV2.from_generator.<locals>.get_iterator_id_fnc                s   rrdd t D t   fdd}t|| g}t|ttfs\|g}dk	rx t| D ]\}}|| qpW t 	|S t
}fdd}tj|| g|dS dS )a  Generates the next element from iterator with ID `iterator_id_t`.

      We map this function across an infinite repetition of the
      `iterator_id_t`, and raise `StopIteration` to terminate the iteration.

      Args:
        iterator_id_t: A `tf.int64` tensor whose value uniquely identifies the
          iterator in `generator_state` from which to generate an element.

      Returns:
        The next element to generate from the iterator.
      c             S   s   g | ]}t |qS r>   )r   Zas_dtype)rd   dtr>   r>   r?   rh     s    zGDatasetV2.from_generator.<locals>.generator_next_fn.<locals>.<listcomp>c       
         sD  t | }yt|}W n> ttfk
r\ } ztd d| d|W dd}~X Y nX g }xtt|D ]f\}}y|tj	j
||jd W qn ttfk
r } ztd|j d| d|W dd}~X Y qnX qnW xft| D ]V\}}}	|j|jkrtd|j d|j d	|	|jstd
|j d|	 d	qW |S )z7A `py_func` that will be called to invoke the iterator.ze`generator` yielded an element that did not match the expected structure. The expected structure was z, but the yielded element was rI   N)r   zg`generator` yielded an element that could not be converted to the expected type. The expected type was z'`generator` yielded an element of type z where an element of type z was expected.z(`generator` yielded an element of shape z where an element of shape )nextr   r	   flatten_up_torS   r;   zipr   r%   ZFuncRegistry_convertas_numpy_dtyper=   r   is_compatible_withr   )
r   valuesZflattened_valueseZ
ret_arraysr   r   Z	ret_arrayZexpected_dtypeZexpected_shape)flattened_shapesflattened_typesr   r   r>   r?   generator_py_func  s8    zNDatasetV2.from_generator.<locals>.generator_next_fn.<locals>.generator_py_funcNc          
      s   t  |  }yt|}W n> ttfk
r` } ztd d| d|W dd}~X Y nX t|}t|std| d dt	|S )z7A `py_func` that will be called to invoke the iterator.ze`generator` yielded an element that did not match the expected structure. The expected structure was z, but the yielded element was rI   Nz"`generator` yielded an element of z where an element of z was expected.)
r   r   rp   r   normalize_elementrS   r;   type_spec_from_valueare_compatibleto_tensor_list)r   r   r  Zvalues_spec)r   output_signaturer>   r?   r  M  s    
)inpZTout)r	   r   r%   r   rA   listtupler   	set_shapepack_sequence_asr   r   Zeager_py_func)iterator_id_tr  Zflat_valuesZret_tr   Zflat_output_types)r   r   r	  r   )r  r  r?   generator_next_fn  s"    
/
z3DatasetV2.from_generator.<locals>.generator_next_fnc                s    fdd}t || gtjS )zBReleases host-side state for the iterator with ID `iterator_id_t`.c                s     |  tjdtjdS )Nr   )r   )r   r   r   r   )r   )r   r>   r?   finalize_py_funck  s    
zGDatasetV2.from_generator.<locals>.finalize_fn.<locals>.finalize_py_func)r%   r   r   r   )r  r  )r   r>   r?   finalize_fnh  s    	
z-DatasetV2.from_generator.<locals>.finalize_fnc                s   t |  dS )N)r=   )_GeneratorDataset)Z	dummy_arg)r  r  r   r=   r	  r>   r?   flat_map_fny  s    z-DatasetV2.from_generator.<locals>.flat_map_fnr   )callablerS   r	   r   rA   r   rB   rD   map_structuremap_structure_up_tor   as_shaper   r   allr  r  r   Zconvert_n_to_tensorrG   r   Datasetr   flat_map)
r   r   r   r   r	  r=   specr  dummyZ
id_datasetr>   )	r   r  r  r   r   r=   r   r	  r   r?   from_generator`  sP    Y

pzDatasetV2.from_generatorc              O   s
   t | |S )a  Creates a `Dataset` of a step-separated range of values.

    >>> list(Dataset.range(5).as_numpy_iterator())
    [0, 1, 2, 3, 4]
    >>> list(Dataset.range(2, 5).as_numpy_iterator())
    [2, 3, 4]
    >>> list(Dataset.range(1, 5, 2).as_numpy_iterator())
    [1, 3]
    >>> list(Dataset.range(1, 5, -2).as_numpy_iterator())
    []
    >>> list(Dataset.range(5, 1).as_numpy_iterator())
    []
    >>> list(Dataset.range(5, 1, -2).as_numpy_iterator())
    [5, 3]
    >>> list(Dataset.range(2, 5, output_type=tf.int32).as_numpy_iterator())
    [2, 3, 4]
    >>> list(Dataset.range(1, 5, 2, output_type=tf.float32).as_numpy_iterator())
    [1.0, 3.0]

    Args:
      *args: follows the same semantics as python's range.
        len(args) == 1 -> start = 0, stop = args[0], step = 1.
        len(args) == 2 -> start = args[0], stop = args[1], step = 1.
        len(args) == 3 -> start = args[0], stop = args[1], step = args[2].
      **kwargs:
        - output_type: Its expected dtype. (Optional, default: `tf.int64`).
        - name: (Optional.) A name for the tf.data operation.

    Returns:
      Dataset: A `RangeDataset`.

    Raises:
      ValueError: if len(args) == 0.
    )RangeDataset)r   r~   r>   r>   r?   range  s    $zDatasetV2.rangec             C   s   t | |dS )a8  Creates a `Dataset` by zipping together the given datasets.

    This method has similar semantics to the built-in `zip()` function
    in Python, with the main difference being that the `datasets`
    argument can be a (nested) structure of `Dataset` objects. The supported
    nesting mechanisms are documented
    [here] (https://www.tensorflow.org/guide/data#dataset_structure).

    >>> # The nested structure of the `datasets` argument determines the
    >>> # structure of elements in the resulting dataset.
    >>> a = tf.data.Dataset.range(1, 4)  # ==> [ 1, 2, 3 ]
    >>> b = tf.data.Dataset.range(4, 7)  # ==> [ 4, 5, 6 ]
    >>> ds = tf.data.Dataset.zip((a, b))
    >>> list(ds.as_numpy_iterator())
    [(1, 4), (2, 5), (3, 6)]
    >>> ds = tf.data.Dataset.zip((b, a))
    >>> list(ds.as_numpy_iterator())
    [(4, 1), (5, 2), (6, 3)]
    >>>
    >>> # The `datasets` argument may contain an arbitrary number of datasets.
    >>> c = tf.data.Dataset.range(7, 13).batch(2)  # ==> [ [7, 8],
    ...                                            #       [9, 10],
    ...                                            #       [11, 12] ]
    >>> ds = tf.data.Dataset.zip((a, b, c))
    >>> for element in ds.as_numpy_iterator():
    ...   print(element)
    (1, 4, array([7, 8]))
    (2, 5, array([ 9, 10]))
    (3, 6, array([11, 12]))
    >>>
    >>> # The number of elements in the resulting dataset is the same as
    >>> # the size of the smallest dataset in `datasets`.
    >>> d = tf.data.Dataset.range(13, 15)  # ==> [ 13, 14 ]
    >>> ds = tf.data.Dataset.zip((a, d))
    >>> list(ds.as_numpy_iterator())
    [(1, 13), (2, 14)]

    Args:
      datasets: A (nested) structure of datasets.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )
ZipDataset)datasetsr=   r>   r>   r?   r     s    .zDatasetV2.zipc             C   s   t | ||dS )a  Creates a `Dataset` by concatenating the given dataset with this dataset.

    >>> a = tf.data.Dataset.range(1, 4)  # ==> [ 1, 2, 3 ]
    >>> b = tf.data.Dataset.range(4, 8)  # ==> [ 4, 5, 6, 7 ]
    >>> ds = a.concatenate(b)
    >>> list(ds.as_numpy_iterator())
    [1, 2, 3, 4, 5, 6, 7]
    >>> # The input dataset and dataset to be concatenated should have
    >>> # compatible element specs.
    >>> c = tf.data.Dataset.zip((a, b))
    >>> a.concatenate(c)
    Traceback (most recent call last):
    TypeError: Two datasets to concatenate have different types
    <dtype: 'int64'> and (tf.int64, tf.int64)
    >>> d = tf.data.Dataset.from_tensor_slices(["a", "b", "c"])
    >>> a.concatenate(d)
    Traceback (most recent call last):
    TypeError: Two datasets to concatenate have different types
    <dtype: 'int64'> and <dtype: 'string'>

    Args:
      dataset: `Dataset` to be concatenated.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )ConcatenateDataset)rV   r   r=   r>   r>   r?   concatenate  s    zDatasetV2.concatenater   r   c             C   s   ddl m} |j| |||dS )a)  Creates a `Dataset` that counts from `start` in steps of size `step`.

    Unlike `tf.data.Dataset.range`, which stops at some ending number,
    `tf.data.Dataset.counter` produces elements indefinitely.

    >>> dataset = tf.data.experimental.Counter().take(5)
    >>> list(dataset.as_numpy_iterator())
    [0, 1, 2, 3, 4]
    >>> dataset.element_spec
    TensorSpec(shape=(), dtype=tf.int64, name=None)
    >>> dataset = tf.data.experimental.Counter(dtype=tf.int32)
    >>> dataset.element_spec
    TensorSpec(shape=(), dtype=tf.int32, name=None)
    >>> dataset = tf.data.experimental.Counter(start=2).take(5)
    >>> list(dataset.as_numpy_iterator())
    [2, 3, 4, 5, 6]
    >>> dataset = tf.data.experimental.Counter(start=2, step=5).take(5)
    >>> list(dataset.as_numpy_iterator())
    [2, 7, 12, 17, 22]
    >>> dataset = tf.data.experimental.Counter(start=10, step=-1).take(5)
    >>> list(dataset.as_numpy_iterator())
    [10, 9, 8, 7, 6]

    Args:
      start: (Optional.) The starting value for the counter. Defaults to 0.
      step: (Optional.) The step size for the counter. Defaults to 1.
      dtype: (Optional.) The data type for counter elements. Defaults to
        `tf.int64`.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A `Dataset` of scalar `dtype` elements.
    r   )
counter_op)r=   )r   r%  counter)startstepr   r=   r%  r>   r>   r?   r&    s    %zDatasetV2.counterFc             C   s   ddl m} |j| |||dS )a  Creates a `Dataset` that rebatches the elements from this dataset.

    `rebatch(N)` is functionally equivalent to `unbatch().batch(N)`, but is
    more efficient, performing one copy instead of two.

    >>> ds = tf.data.Dataset.range(6)
    >>> ds = ds.batch(2)
    >>> ds = ds.rebatch(3)
    >>> list(ds.as_numpy_iterator())
    [array([0, 1, 2]), array([3, 4, 5])]

    >>> ds = tf.data.Dataset.range(7)
    >>> ds = ds.batch(4)
    >>> ds = ds.rebatch(3)
    >>> list(ds.as_numpy_iterator())
    [array([0, 1, 2]), array([3, 4, 5]), array([6])]

    >>> ds = tf.data.Dataset.range(7)
    >>> ds = ds.batch(2)
    >>> ds = ds.rebatch(3, drop_remainder=True)
    >>> list(ds.as_numpy_iterator())
    [array([0, 1, 2]), array([3, 4, 5])]

    If the `batch_size` argument is a list, `rebatch` cycles through the list
    to determine the size of each batch.

    >>> ds = tf.data.Dataset.range(8)
    >>> ds = ds.batch(4)
    >>> ds = ds.rebatch([2, 1, 1])
    >>> list(ds.as_numpy_iterator())
    [array([0, 1]), array([2]), array([3]), array([4, 5]), array([6]),
    array([7])]

    Args:
      batch_size: A `tf.int64` scalar or vector, representing the size of
        batches to produce. If this argument is a vector, these values are
        cycled through in round robin fashion.
      drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
        whether the last batch should be dropped in the case it has fewer than
        `batch_size[cycle_index]` elements; the default behavior is not to drop
        the smaller batch.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A `Dataset` of scalar `dtype` elements.
    r   )
rebatch_op)r=   )r   r)  rebatch)rV   
batch_sizedrop_remainderr=   r)  r>   r>   r?   r*  3  s    1zDatasetV2.rebatchc             C   s   t r| S t| ||dS )a  Creates a `Dataset` that prefetches elements from this dataset.

    Most dataset input pipelines should end with a call to `prefetch`. This
    allows later elements to be prepared while the current element is being
    processed. This often improves latency and throughput, at the cost of
    using additional memory to store prefetched elements.

    Note: Like other `Dataset` methods, prefetch operates on the
    elements of the input dataset. It has no concept of examples vs. batches.
    `examples.prefetch(2)` will prefetch two elements (2 examples),
    while `examples.batch(20).prefetch(2)` will prefetch 2 elements
    (2 batches, of 20 examples each).

    >>> dataset = tf.data.Dataset.range(3)
    >>> dataset = dataset.prefetch(2)
    >>> list(dataset.as_numpy_iterator())
    [0, 1, 2]

    Args:
      buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the maximum
        number of elements that will be buffered when prefetching. If the value
        `tf.data.AUTOTUNE` is used, then the buffer size is dynamically tuned.
      name: Optional. A name for the tf.data transformation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )r   PrefetchDataset)rV   buffer_sizer=   r>   r>   r?   prefetchg  s    zDatasetV2.prefetchc          
   C   s
  t d |dkrd}t j| tjdd} t| }tjt	
|d ddd}tjd	tj| d
ddd}tj||gddd}t |g t	|}W dQ R X ddlm} |j|d|d}	tttrt|	}	|rtt	j
|tjdd d}
|	j|
||d}	|	S Q R X dS )a  A dataset of all files matching one or more glob patterns.

    The `file_pattern` argument should be a small number of glob patterns.
    If your filenames have already been globbed, use
    `Dataset.from_tensor_slices(filenames)` instead, as re-globbing every
    filename with `list_files` may result in poor performance with remote
    storage systems.

    Note: The default behavior of this method is to return filenames in
    a non-deterministic random shuffled order. Pass a `seed` or `shuffle=False`
    to get results in a deterministic order.

    Example:
      If we had the following files on our filesystem:

        - /path/to/dir/a.txt
        - /path/to/dir/b.py
        - /path/to/dir/c.py

      If we pass "/path/to/dir/*.py" as the directory, the dataset
      would produce:

        - /path/to/dir/b.py
        - /path/to/dir/c.py

    Args:
      file_pattern: A string, a list of strings, or a `tf.Tensor` of string type
        (scalar or vector), representing the filename glob (i.e. shell wildcard)
        pattern(s) that will be matched.
      shuffle: (Optional.) If `True`, the file names will be shuffled randomly.
        Defaults to `True`.
      seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
        seed that will be used to create the distribution. See
        `tf.random.set_seed` for behavior.
      name: Optional. A name for the tf.data operations used by `list_files`.

    Returns:
     Dataset: A `Dataset` of strings corresponding to file names.
    
list_filesNTfile_pattern)r   r=   r   Zmatch_not_empty)r=   zNo files matched pattern: z, )	separatormessager   assert_not_empty)	summarizer=   )r   )Zis_filesr=   )Zout_type)seedr=   )r   
name_scopeconvert_to_tensorr   ro   r    matching_filesr#   Zgreaterr   r   addr&   Zreduce_joinr   Assertcontrol_dependenciesidentityr   r   ZTensorSliceDataset
issubclassr  rQ   r   maximumr   shuffle)r1  r@  r6  r=   r9  	conditionr3  r4  r   r   r.  r>   r>   r?   r0    s2    )

zDatasetV2.list_filesc             C   s   t | ||dS )ad  Repeats this dataset so each original value is seen `count` times.

    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
    >>> dataset = dataset.repeat(3)
    >>> list(dataset.as_numpy_iterator())
    [1, 2, 3, 1, 2, 3, 1, 2, 3]

    Note: If the input dataset depends on global state (e.g. a random number
    generator) or its output is non-deterministic (e.g. because of upstream
    `shuffle`), then different repetitions may produce different elements.

    Args:
      count: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
        number of times the dataset should be repeated. The default behavior (if
        `count` is `None` or `-1`) is for the dataset be repeated indefinitely.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )RepeatDataset)rV   countr=   r>   r>   r?   repeat  s    zDatasetV2.repeatc             C   s<   t tjjj}tj|||d}t|d}tj	|| f|dS )a  Enumerates the elements of this dataset.

    It is similar to python's `enumerate`.

    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
    >>> dataset = dataset.enumerate(start=5)
    >>> for element in dataset.as_numpy_iterator():
    ...   print(element)
    (5, 1)
    (6, 2)
    (7, 3)

    >>> # The (nested) structure of the input dataset determines the
    >>> # structure of elements in the resulting dataset.
    >>> dataset = tf.data.Dataset.from_tensor_slices([(7, 8), (9, 10)])
    >>> dataset = dataset.enumerate()
    >>> for element in dataset.as_numpy_iterator():
    ...   print(element)
    (0, array([7, 8], dtype=int32))
    (1, array([ 9, 10], dtype=int32))

    Args:
      start: A `tf.int64` scalar `tf.Tensor`, representing the start value for
        enumeration.
      name: Optional. A name for the tf.data operations used by `enumerate`.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   replicate_on_split)
r   iinfor   r   r   maxr  r   _apply_rewriter   )rV   r'  r=   Z	max_valuerange_datasetr>   r>   r?   rq     s    
zDatasetV2.enumeratec             C   s   t | ||||dS )a	  Randomly shuffles the elements of this dataset.

    This dataset fills a buffer with `buffer_size` elements, then randomly
    samples elements from this buffer, replacing the selected elements with new
    elements. For perfect shuffling, a buffer size greater than or equal to the
    full size of the dataset is required.

    For instance, if your dataset contains 10,000 elements but `buffer_size` is
    set to 1,000, then `shuffle` will initially select a random element from
    only the first 1,000 elements in the buffer. Once an element is selected,
    its space in the buffer is replaced by the next (i.e. 1,001-st) element,
    maintaining the 1,000 element buffer.

    `reshuffle_each_iteration` controls whether the shuffle order should be
    different for each epoch. In TF 1.X, the idiomatic way to create epochs
    was through the `repeat` transformation:

    ```python
    dataset = tf.data.Dataset.range(3)
    dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
    dataset = dataset.repeat(2)
    # [1, 0, 2, 1, 2, 0]

    dataset = tf.data.Dataset.range(3)
    dataset = dataset.shuffle(3, reshuffle_each_iteration=False)
    dataset = dataset.repeat(2)
    # [1, 0, 2, 1, 0, 2]
    ```

    In TF 2.0, `tf.data.Dataset` objects are Python iterables which makes it
    possible to also create epochs through Python iteration:

    ```python
    dataset = tf.data.Dataset.range(3)
    dataset = dataset.shuffle(3, reshuffle_each_iteration=True)
    list(dataset.as_numpy_iterator())
    # [1, 0, 2]
    list(dataset.as_numpy_iterator())
    # [1, 2, 0]
    ```

    ```python
    dataset = tf.data.Dataset.range(3)
    dataset = dataset.shuffle(3, reshuffle_each_iteration=False)
    list(dataset.as_numpy_iterator())
    # [1, 0, 2]
    list(dataset.as_numpy_iterator())
    # [1, 0, 2]
    ```

    Args:
      buffer_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
        elements from this dataset from which the new dataset will sample.
      seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
        seed that will be used to create the distribution. See
        `tf.random.set_seed` for behavior.
      reshuffle_each_iteration: (Optional.) A boolean, which if true indicates
        that the dataset should be pseudorandomly reshuffled each time it is
        iterated over. (Defaults to `True`.)
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )ShuffleDataset)rV   r.  r6  reshuffle_each_iterationr=   r>   r>   r?   r@    s    EzDatasetV2.shuffle c             C   s   t | ||dS )a  Caches the elements in this dataset.

    The first time the dataset is iterated over, its elements will be cached
    either in the specified file or in memory. Subsequent iterations will
    use the cached data.

    Note: To guarantee that the cache gets finalized, the input dataset must be
    iterated through in its entirety, until it raises StopIteration. Otherwise,
    subsequent iterations may not use cached data.

    >>> dataset = tf.data.Dataset.range(5)
    >>> dataset = dataset.map(lambda x: x**2)
    >>> dataset = dataset.cache()
    >>> # The first time reading through the data will generate the data using
    >>> # `range` and `map`.
    >>> list(dataset.as_numpy_iterator())
    [0, 1, 4, 9, 16]
    >>> # Subsequent iterations read from the cache.
    >>> list(dataset.as_numpy_iterator())
    [0, 1, 4, 9, 16]

    When caching to a file, the cached data will persist across runs. Even the
    first iteration through the data will read from the cache file. Changing
    the input pipeline before the call to `.cache()` will have no effect until
    the cache file is removed or the filename is changed.

    ```python
    dataset = tf.data.Dataset.range(5)
    dataset = dataset.cache("/path/to/file")
    list(dataset.as_numpy_iterator())
    # [0, 1, 2, 3, 4]
    dataset = tf.data.Dataset.range(10)
    dataset = dataset.cache("/path/to/file")  # Same file!
    list(dataset.as_numpy_iterator())
    # [0, 1, 2, 3, 4]
    ```

    Note: `cache` will produce exactly the same elements during each iteration
    through the dataset. If you wish to randomize the iteration order, make sure
    to call `shuffle` *after* calling `cache`.

    Args:
      filename: A `tf.string` scalar `tf.Tensor`, representing the name of a
        directory on the filesystem to use for caching elements in this Dataset.
        If a filename is not provided, the dataset will be cached in memory.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )CacheDataset)rV   filenamer=   r>   r>   r?   cacheZ  s    3zDatasetV2.cachec             C   s   t | ||dS )a  Creates a `Dataset` with at most `count` elements from this dataset.

    >>> dataset = tf.data.Dataset.range(10)
    >>> dataset = dataset.take(3)
    >>> list(dataset.as_numpy_iterator())
    [0, 1, 2]

    Args:
      count: A `tf.int64` scalar `tf.Tensor`, representing the number of
        elements of this dataset that should be taken to form the new dataset.
        If `count` is -1, or if `count` is greater than the size of this
        dataset, the new dataset will contain all elements of this dataset.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )TakeDataset)rV   rC  r=   r>   r>   r?   take  s    zDatasetV2.takec             C   s   t | ||dS )a  Creates a `Dataset` that skips `count` elements from this dataset.

    >>> dataset = tf.data.Dataset.range(10)
    >>> dataset = dataset.skip(7)
    >>> list(dataset.as_numpy_iterator())
    [7, 8, 9]

    Args:
      count: A `tf.int64` scalar `tf.Tensor`, representing the number of
        elements of this dataset that should be skipped to form the new dataset.
        If `count` is greater than the size of this dataset, the new dataset
        will contain no elements.  If `count` is -1, skips the entire dataset.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )SkipDataset)rV   rC  r=   r>   r>   r?   skip  s    zDatasetV2.skipc             C   s   t | |||dS )a	  Creates a `Dataset` that includes only 1/`num_shards` of this dataset.

    `shard` is deterministic. The Dataset produced by `A.shard(n, i)` will
    contain all elements of A whose index mod n = i.

    >>> A = tf.data.Dataset.range(10)
    >>> B = A.shard(num_shards=3, index=0)
    >>> list(B.as_numpy_iterator())
    [0, 3, 6, 9]
    >>> C = A.shard(num_shards=3, index=1)
    >>> list(C.as_numpy_iterator())
    [1, 4, 7]
    >>> D = A.shard(num_shards=3, index=2)
    >>> list(D.as_numpy_iterator())
    [2, 5, 8]

    This dataset operator is very useful when running distributed training, as
    it allows each worker to read a unique subset.

    When reading a single input file, you can shard elements as follows:

    ```python
    d = tf.data.TFRecordDataset(input_file)
    d = d.shard(num_workers, worker_index)
    d = d.repeat(num_epochs)
    d = d.shuffle(shuffle_buffer_size)
    d = d.map(parser_fn, num_parallel_calls=num_map_threads)
    ```

    Important caveats:

    - Be sure to shard before you use any randomizing operator (such as
      shuffle).
    - Generally it is best if the shard operator is used early in the dataset
      pipeline. For example, when reading from a set of TFRecord files, shard
      before converting the dataset to input samples. This avoids reading every
      file on every worker. The following is an example of an efficient
      sharding strategy within a complete pipeline:

    ```python
    d = Dataset.list_files(pattern)
    d = d.shard(num_workers, worker_index)
    d = d.repeat(num_epochs)
    d = d.shuffle(shuffle_buffer_size)
    d = d.interleave(tf.data.TFRecordDataset,
                     cycle_length=num_readers, block_length=1)
    d = d.map(parser_fn, num_parallel_calls=num_map_threads)
    ```

    Args:
      num_shards: A `tf.int64` scalar `tf.Tensor`, representing the number of
        shards operating in parallel.
      index: A `tf.int64` scalar `tf.Tensor`, representing the worker index.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.

    Raises:
      InvalidArgumentError: if `num_shards` or `index` are illegal values.

        Note: error checking is done on a best-effort basis, and errors aren't
        guaranteed to be caught upon dataset creation. (e.g. providing in a
        placeholder tensor bypasses the early checking, and will instead result
        in an error during a session.run call.)
    )r=   )ShardDataset)rV   
num_shardsindexr=   r>   r>   r?   shard  s    CzDatasetV2.shardc             C   s"   ddl m} || |||| dS )a
  Saves the content of the given dataset.

      Example usage:

      >>> import tempfile
      >>> path = os.path.join(tempfile.gettempdir(), "saved_data")
      >>> # Save a dataset
      >>> dataset = tf.data.Dataset.range(2)
      >>> dataset.save(path)
      >>> new_dataset = tf.data.Dataset.load(path)
      >>> for elem in new_dataset:
      ...   print(elem)
      tf.Tensor(0, shape=(), dtype=int64)
      tf.Tensor(1, shape=(), dtype=int64)

      The saved dataset is saved in multiple file "shards". By default, the
      dataset output is divided to shards in a round-robin fashion but custom
      sharding can be specified via the `shard_func` function. For example, you
      can save the dataset to using a single shard as follows:

      ```python
      dataset = make_dataset()
      def custom_shard_func(element):
        return np.int64(0)
      dataset.save(
          path="/path/to/data", ..., shard_func=custom_shard_func)
      ```

      To enable checkpointing, pass in `checkpoint_args` to the `save` method
      as follows:

      ```python
      dataset = tf.data.Dataset.range(100)
      save_dir = "..."
      checkpoint_prefix = "..."
      step_counter = tf.Variable(0, trainable=False)
      checkpoint_args = {
        "checkpoint_interval": 50,
        "step_counter": step_counter,
        "directory": checkpoint_prefix,
        "max_to_keep": 20,
      }
      dataset.save(dataset, save_dir, checkpoint_args=checkpoint_args)
      ```

      NOTE: The directory layout and file format used for saving the dataset is
      considered an implementation detail and may change. For this reason,
      datasets saved through `tf.data.Dataset.save` should only be consumed
      through `tf.data.Dataset.load`, which is guaranteed to be
      backwards compatible.

    Args:
     path: Required. A directory to use for saving the dataset.
     compression: Optional. The algorithm to use to compress data when writing
          it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
     shard_func: Optional. A function to control the mapping of dataset
          elements to file shards. The function is expected to map elements of
          the input dataset to int64 shard IDs. If present, the function will be
          traced and executed as graph computation.
     checkpoint_args: Optional args for checkpointing which will be passed into
          the `tf.train.CheckpointManager`. If `checkpoint_args` are not
          specified, then checkpointing will not be performed. The `save()`
          implementation creates a `tf.train.Checkpoint` object internally, so
          users should not set the `checkpoint` argument in `checkpoint_args`.

    Raises:
      ValueError if `checkpoint` is passed into `checkpoint_args`.
    r   )save_opN)r   rX  save)rV   pathcompression
shard_funcZcheckpoint_argsrX  r>   r>   r?   rY    s    KzDatasetV2.savec             C   s   ddl m} |j| |||dS )aC	  Loads a previously saved dataset.

    Example usage:

    >>> import tempfile
    >>> path = os.path.join(tempfile.gettempdir(), "saved_data")
    >>> # Save a dataset
    >>> dataset = tf.data.Dataset.range(2)
    >>> tf.data.Dataset.save(dataset, path)
    >>> new_dataset = tf.data.Dataset.load(path)
    >>> for elem in new_dataset:
    ...   print(elem)
    tf.Tensor(0, shape=(), dtype=int64)
    tf.Tensor(1, shape=(), dtype=int64)


    If the default option of sharding the saved dataset was used, the element
    order of the saved dataset will be preserved when loading it.

    The `reader_func` argument can be used to specify a custom order in which
    elements should be loaded from the individual shards. The `reader_func` is
    expected to take a single argument -- a dataset of datasets, each containing
    elements of one of the shards -- and return a dataset of elements. For
    example, the order of shards can be shuffled when loading them as follows:

    ```python
    def custom_reader_func(datasets):
      datasets = datasets.shuffle(NUM_SHARDS)
      return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)

    dataset = tf.data.Dataset.load(
        path="/path/to/data", ..., reader_func=custom_reader_func)
    ```

    Args:
      path: Required. A path pointing to a previously saved dataset.
      element_spec: Optional. A nested structure of `tf.TypeSpec` objects
        matching the structure of an element of the saved dataset and specifying
        the type of individual element components. If not provided, the nested
        structure of `tf.TypeSpec` saved with the saved dataset is used. Note
        that this argument is required in graph mode.
      compression: Optional. The algorithm to use to decompress the data when
        reading it. Supported options are `GZIP` and `NONE`. Defaults to `NONE`.
      reader_func: Optional. A function to control how to read data from shards.
        If present, the function will be traced and executed as graph
        computation.

    Returns:
      A `tf.data.Dataset` instance.

    Raises:
      FileNotFoundError: If `element_spec` is not specified and the saved nested
        structure of `tf.TypeSpec` can not be located with the saved dataset.
      ValueError: If `element_spec` is not specified and the method is executed
        in graph mode.
    r   )load_op)rZ  r   r[  reader_func)r   r]  load)rZ  r   r[  r^  r]  r>   r>   r?   r_  J  s    <zDatasetV2.loadc             C   sJ   |dkst r2|dk	r"t s"td t| |||dS t| |||||dS dS )a

  Combines consecutive elements of this dataset into batches.

    >>> dataset = tf.data.Dataset.range(8)
    >>> dataset = dataset.batch(3)
    >>> list(dataset.as_numpy_iterator())
    [array([0, 1, 2]), array([3, 4, 5]), array([6, 7])]

    >>> dataset = tf.data.Dataset.range(8)
    >>> dataset = dataset.batch(3, drop_remainder=True)
    >>> list(dataset.as_numpy_iterator())
    [array([0, 1, 2]), array([3, 4, 5])]

    The components of the resulting element will have an additional outer
    dimension, which will be `batch_size` (or `N % batch_size` for the last
    element if `batch_size` does not divide the number of input elements `N`
    evenly and `drop_remainder` is `False`). If your program depends on the
    batches having the same outer dimension, you should set the `drop_remainder`
    argument to `True` to prevent the smaller batch from being produced.

    Note: If your program requires data to have a statically known shape (e.g.,
    when using XLA), you should use `drop_remainder=True`. Without
    `drop_remainder=True` the shape of the output dataset will have an unknown
    leading dimension due to the possibility of a smaller final batch.

    Args:
      batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
        consecutive elements of this dataset to combine in a single batch.
      drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
        whether the last batch should be dropped in the case it has fewer than
        `batch_size` elements; the default behavior is not to drop the smaller
        batch.
      num_parallel_calls: (Optional.) A `tf.int64` scalar `tf.Tensor`,
        representing the number of batches to compute asynchronously in
        parallel.
        If not specified, batches will be computed sequentially. If the value
        `tf.data.AUTOTUNE` is used, then the number of parallel
        calls is set dynamically based on available resources.
      deterministic: (Optional.) When `num_parallel_calls` is specified, if this
        boolean is specified (`True` or `False`), it controls the order in which
        the transformation produces elements. If set to `False`, the
        transformation is allowed to yield elements out of order to trade
        determinism for performance. If not specified, the
        `tf.data.Options.deterministic` option (`True` by default) controls the
        behavior.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    NzaThe `deterministic` argument has no effect unless the `num_parallel_calls` argument is specified.)r=   )r   r   r   BatchDatasetParallelBatchDataset)rV   r+  r,  num_parallel_callsdeterministicr=   r>   r>   r?   batch  s    7
zDatasetV2.batchc             C   sT   |dkr@t | }x.tt|D ]\}}|s td| dq W t| |||||dS )a^  Combines consecutive elements of this dataset into padded batches.

    This transformation combines multiple consecutive elements of the input
    dataset into a single element.

    Like `tf.data.Dataset.batch`, the components of the resulting element will
    have an additional outer dimension, which will be `batch_size` (or
    `N % batch_size` for the last element if `batch_size` does not divide the
    number of input elements `N` evenly and `drop_remainder` is `False`). If
    your program depends on the batches having the same outer dimension, you
    should set the `drop_remainder` argument to `True` to prevent the smaller
    batch from being produced.

    Unlike `tf.data.Dataset.batch`, the input elements to be batched may have
    different shapes, and this transformation will pad each component to the
    respective shape in `padded_shapes`. The `padded_shapes` argument
    determines the resulting shape for each dimension of each component in an
    output element:

    * If the dimension is a constant, the component will be padded out to that
      length in that dimension.
    * If the dimension is unknown, the component will be padded out to the
      maximum length of all elements in that dimension.

    >>> A = (tf.data.Dataset
    ...      .range(1, 5, output_type=tf.int32)
    ...      .map(lambda x: tf.fill([x], x)))
    >>> # Pad to the smallest per-batch size that fits all elements.
    >>> B = A.padded_batch(2)
    >>> for element in B.as_numpy_iterator():
    ...   print(element)
    [[1 0]
     [2 2]]
    [[3 3 3 0]
     [4 4 4 4]]
    >>> # Pad to a fixed size.
    >>> C = A.padded_batch(2, padded_shapes=5)
    >>> for element in C.as_numpy_iterator():
    ...   print(element)
    [[1 0 0 0 0]
     [2 2 0 0 0]]
    [[3 3 3 0 0]
     [4 4 4 4 0]]
    >>> # Pad with a custom value.
    >>> D = A.padded_batch(2, padded_shapes=5, padding_values=-1)
    >>> for element in D.as_numpy_iterator():
    ...   print(element)
    [[ 1 -1 -1 -1 -1]
     [ 2  2 -1 -1 -1]]
    [[ 3  3  3 -1 -1]
     [ 4  4  4  4 -1]]
    >>> # Components of nested elements can be padded independently.
    >>> elements = [([1, 2, 3], [10]),
    ...             ([4, 5], [11, 12])]
    >>> dataset = tf.data.Dataset.from_generator(
    ...     lambda: iter(elements), (tf.int32, tf.int32))
    >>> # Pad the first component of the tuple to length 4, and the second
    >>> # component to the smallest size that fits.
    >>> dataset = dataset.padded_batch(2,
    ...     padded_shapes=([4], [None]),
    ...     padding_values=(-1, 100))
    >>> list(dataset.as_numpy_iterator())
    [(array([[ 1,  2,  3, -1], [ 4,  5, -1, -1]], dtype=int32),
      array([[ 10, 100], [ 11,  12]], dtype=int32))]
    >>> # Pad with a single value and multiple components.
    >>> E = tf.data.Dataset.zip((A, A)).padded_batch(2, padding_values=-1)
    >>> for element in E.as_numpy_iterator():
    ...   print(element)
    (array([[ 1, -1],
           [ 2,  2]], dtype=int32), array([[ 1, -1],
           [ 2,  2]], dtype=int32))
    (array([[ 3,  3,  3, -1],
           [ 4,  4,  4,  4]], dtype=int32), array([[ 3,  3,  3, -1],
           [ 4,  4,  4,  4]], dtype=int32))

    See also `tf.data.experimental.dense_to_sparse_batch`, which combines
    elements that may have different shapes into a `tf.sparse.SparseTensor`.

    Args:
      batch_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
        consecutive elements of this dataset to combine in a single batch.
      padded_shapes: (Optional.) A (nested) structure of `tf.TensorShape` or
        `tf.int64` vector tensor-like objects representing the shape to which
        the respective component of each input element should be padded prior
        to batching. Any unknown dimensions will be padded to the maximum size
        of that dimension in each batch. If unset, all dimensions of all
        components are padded to the maximum size in the batch. `padded_shapes`
        must be set if any component has an unknown rank.
      padding_values: (Optional.) A (nested) structure of scalar-shaped
        `tf.Tensor`, representing the padding values to use for the respective
        components. None represents that the (nested) structure should be padded
        with default values.  Defaults are `0` for numeric types and the empty
        string for string types. The `padding_values` should have the same
        (nested) structure as the input dataset. If `padding_values` is a single
        element and the input dataset has multiple components, then the same
        `padding_values` will be used to pad every component of the dataset.
        If `padding_values` is a scalar, then its value will be broadcasted
        to match the shape of each component.
      drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
        whether the last batch should be dropped in the case it has fewer than
        `batch_size` elements; the default behavior is not to drop the smaller
        batch.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.

    Raises:
      ValueError: If a component has an unknown rank, and the `padded_shapes`
        argument is not set.
      TypeError: If a component is of an unsupported type. The list of supported
        types is documented in
        https://www.tensorflow.org/guide/data#dataset_structure.
    Nz<You must provide `padded_shapes` argument because component z has unknown rank.)r=   )get_legacy_output_shapesrq   r	   r   r;   PaddedBatchDataset)rV   r+  padded_shapespadding_valuesr,  r=   re   r   r>   r>   r?   padded_batch  s    xzDatasetV2.padded_batchc             C   sJ   |dkst r2|dk	r"t s"td t| |d|dS t| |||d|dS dS )a  Maps `map_func` across the elements of this dataset.

    This transformation applies `map_func` to each element of this dataset, and
    returns a new dataset containing the transformed elements, in the same
    order as they appeared in the input. `map_func` can be used to change both
    the values and the structure of a dataset's elements. Supported structure
    constructs are documented
    [here](https://www.tensorflow.org/guide/data#dataset_structure).

    For example, `map` can be used for adding 1 to each element, or projecting a
    subset of element components.

    >>> dataset = Dataset.range(1, 6)  # ==> [ 1, 2, 3, 4, 5 ]
    >>> dataset = dataset.map(lambda x: x + 1)
    >>> list(dataset.as_numpy_iterator())
    [2, 3, 4, 5, 6]

    The input signature of `map_func` is determined by the structure of each
    element in this dataset.

    >>> dataset = Dataset.range(5)
    >>> # `map_func` takes a single argument of type `tf.Tensor` with the same
    >>> # shape and dtype.
    >>> result = dataset.map(lambda x: x + 1)

    >>> # Each element is a tuple containing two `tf.Tensor` objects.
    >>> elements = [(1, "foo"), (2, "bar"), (3, "baz")]
    >>> dataset = tf.data.Dataset.from_generator(
    ...     lambda: elements, (tf.int32, tf.string))
    >>> # `map_func` takes two arguments of type `tf.Tensor`. This function
    >>> # projects out just the first component.
    >>> result = dataset.map(lambda x_int, y_str: x_int)
    >>> list(result.as_numpy_iterator())
    [1, 2, 3]

    >>> # Each element is a dictionary mapping strings to `tf.Tensor` objects.
    >>> elements =  ([{"a": 1, "b": "foo"},
    ...               {"a": 2, "b": "bar"},
    ...               {"a": 3, "b": "baz"}])
    >>> dataset = tf.data.Dataset.from_generator(
    ...     lambda: elements, {"a": tf.int32, "b": tf.string})
    >>> # `map_func` takes a single argument of type `dict` with the same keys
    >>> # as the elements.
    >>> result = dataset.map(lambda d: str(d["a"]) + d["b"])

    The value or values returned by `map_func` determine the structure of each
    element in the returned dataset.

    >>> dataset = tf.data.Dataset.range(3)
    >>> # `map_func` returns two `tf.Tensor` objects.
    >>> def g(x):
    ...   return tf.constant(37.0), tf.constant(["Foo", "Bar", "Baz"])
    >>> result = dataset.map(g)
    >>> result.element_spec
    (TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(3,), dtype=tf.string, name=None))
    >>> # Python primitives, lists, and NumPy arrays are implicitly converted to
    >>> # `tf.Tensor`.
    >>> def h(x):
    ...   return 37.0, ["Foo", "Bar"], np.array([1.0, 2.0], dtype=np.float64)
    >>> result = dataset.map(h)
    >>> result.element_spec
    (TensorSpec(shape=(), dtype=tf.float32, name=None), TensorSpec(shape=(2,), dtype=tf.string, name=None), TensorSpec(shape=(2,), dtype=tf.float64, name=None))
    >>> # `map_func` can return nested structures.
    >>> def i(x):
    ...   return (37.0, [42, 16]), "foo"
    >>> result = dataset.map(i)
    >>> result.element_spec
    ((TensorSpec(shape=(), dtype=tf.float32, name=None),
      TensorSpec(shape=(2,), dtype=tf.int32, name=None)),
     TensorSpec(shape=(), dtype=tf.string, name=None))

    `map_func` can accept as arguments and return any type of dataset element.

    Note that irrespective of the context in which `map_func` is defined (eager
    vs. graph), tf.data traces the function and executes it as a graph. To use
    Python code inside of the function you have a few options:

    1) Rely on AutoGraph to convert Python code into an equivalent graph
    computation. The downside of this approach is that AutoGraph can convert
    some but not all Python code.

    2) Use `tf.py_function`, which allows you to write arbitrary Python code but
    will generally result in worse performance than 1). For example:

    >>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])
    >>> # transform a string tensor to upper case string using a Python function
    >>> def upper_case_fn(t: tf.Tensor):
    ...   return t.numpy().decode('utf-8').upper()
    >>> d = d.map(lambda x: tf.py_function(func=upper_case_fn,
    ...           inp=[x], Tout=tf.string))
    >>> list(d.as_numpy_iterator())
    [b'HELLO', b'WORLD']

    3) Use `tf.numpy_function`, which also allows you to write arbitrary
    Python code. Note that `tf.py_function` accepts `tf.Tensor` whereas
    `tf.numpy_function` accepts numpy arrays and returns only numpy arrays.
    For example:

    >>> d = tf.data.Dataset.from_tensor_slices(['hello', 'world'])
    >>> def upper_case_fn(t: np.ndarray):
    ...   return t.decode('utf-8').upper()
    >>> d = d.map(lambda x: tf.numpy_function(func=upper_case_fn,
    ...           inp=[x], Tout=tf.string))
    >>> list(d.as_numpy_iterator())
    [b'HELLO', b'WORLD']

    Note that the use of `tf.numpy_function` and `tf.py_function`
    in general precludes the possibility of executing user-defined
    transformations in parallel (because of Python GIL).

    Performance can often be improved by setting `num_parallel_calls` so that
    `map` will use multiple threads to process elements. If deterministic order
    isn't required, it can also improve performance to set
    `deterministic=False`.

    >>> dataset = Dataset.range(1, 6)  # ==> [ 1, 2, 3, 4, 5 ]
    >>> dataset = dataset.map(lambda x: x + 1,
    ...     num_parallel_calls=tf.data.AUTOTUNE,
    ...     deterministic=False)

    The order of elements yielded by this transformation is deterministic if
    `deterministic=True`. If `map_func` contains stateful operations and
    `num_parallel_calls > 1`, the order in which that state is accessed is
    undefined, so the values of output elements may not be deterministic
    regardless of the `deterministic` flag value.

    Args:
      map_func: A function mapping a dataset element to another dataset element.
      num_parallel_calls: (Optional.) A `tf.int64` scalar `tf.Tensor`,
        representing the number elements to process asynchronously in parallel.
        If not specified, elements will be processed sequentially. If the value
        `tf.data.AUTOTUNE` is used, then the number of parallel
        calls is set dynamically based on available CPU.
      deterministic: (Optional.) When `num_parallel_calls` is specified, if this
        boolean is specified (`True` or `False`), it controls the order in which
        the transformation produces elements. If set to `False`, the
        transformation is allowed to yield elements out of order to trade
        determinism for performance. If not specified, the
        `tf.data.Options.deterministic` option (`True` by default) controls the
        behavior.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    NzaThe `deterministic` argument has no effect unless the `num_parallel_calls` argument is specified.T)preserve_cardinalityr=   )r   r   r   
MapDatasetParallelMapDataset)rV   map_funcrb  rc  r=   r>   r>   r?   mapY  s     
zDatasetV2.mapc             C   s   t | ||dS )a	  Maps `map_func` across this dataset and flattens the result.

    The type signature is:

    ```
    def flat_map(
      self: Dataset[T],
      map_func: Callable[[T], Dataset[S]]
    ) -> Dataset[S]
    ```

    Use `flat_map` if you want to make sure that the order of your dataset
    stays the same. For example, to flatten a dataset of batches into a
    dataset of their elements:

    >>> dataset = tf.data.Dataset.from_tensor_slices(
    ...     [[1, 2, 3], [4, 5, 6], [7, 8, 9]])
    >>> dataset = dataset.flat_map(tf.data.Dataset.from_tensor_slices)
    >>> list(dataset.as_numpy_iterator())
    [1, 2, 3, 4, 5, 6, 7, 8, 9]

    `tf.data.Dataset.interleave()` is a generalization of `flat_map`, since
    `flat_map` produces the same output as
    `tf.data.Dataset.interleave(cycle_length=1)`

    Args:
      map_func: A function mapping a dataset element to a dataset.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )FlatMapDataset)rV   rm  r=   r>   r>   r?   r   	  s    !zDatasetV2.flat_mapc             C   s   ddl m} || ||S )a  Drops elements that cause errors.

    >>> dataset = tf.data.Dataset.from_tensor_slices([1., 2., 0., 4.])
    >>> dataset = dataset.map(lambda x: tf.debugging.check_numerics(1. / x, ""))
    >>> list(dataset.as_numpy_iterator())
    Traceback (most recent call last):
    ...
    InvalidArgumentError: ... Tensor had Inf values
    >>> dataset = dataset.ignore_errors()
    >>> list(dataset.as_numpy_iterator())
    [1.0, 0.5, 0.25]

    Args:
      log_warning: (Optional.) A bool indicating whether or not ignored errors
        should be logged to stderr. Defaults to `False`.
      name: (Optional.) A string indicating a name for the `tf.data` operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    r   )ignore_errors_op)r   rp  ignore_errors)rV   Zlog_warningr=   rp  r>   r>   r?   rq  #	  s    zDatasetV2.ignore_errorsc          	   C   sf   |dkrd}|dkrt }|dks$trL|dk	r:ts:td t| ||||dS t| ||||||dS dS )a  Maps `map_func` across this dataset, and interleaves the results.

    The type signature is:

    ```
    def interleave(
      self: Dataset[T],
      map_func: Callable[[T], Dataset[S]]
    ) -> Dataset[S]
    ```

    For example, you can use `Dataset.interleave()` to process many input files
    concurrently:

    >>> # Preprocess 4 files concurrently, and interleave blocks of 16 records
    >>> # from each file.
    >>> filenames = ["/var/data/file1.txt", "/var/data/file2.txt",
    ...              "/var/data/file3.txt", "/var/data/file4.txt"]
    >>> dataset = tf.data.Dataset.from_tensor_slices(filenames)
    >>> def parse_fn(filename):
    ...   return tf.data.Dataset.range(10)
    >>> dataset = dataset.interleave(lambda x:
    ...     tf.data.TextLineDataset(x).map(parse_fn, num_parallel_calls=1),
    ...     cycle_length=4, block_length=16)

    The `cycle_length` and `block_length` arguments control the order in which
    elements are produced. `cycle_length` controls the number of input elements
    that are processed concurrently. If you set `cycle_length` to 1, this
    transformation will handle one input element at a time, and will produce
    identical results to `tf.data.Dataset.flat_map`. In general,
    this transformation will apply `map_func` to `cycle_length` input elements,
    open iterators on the returned `Dataset` objects, and cycle through them
    producing `block_length` consecutive elements from each iterator, and
    consuming the next input element each time it reaches the end of an
    iterator.

    For example:

    >>> dataset = Dataset.range(1, 6)  # ==> [ 1, 2, 3, 4, 5 ]
    >>> # NOTE: New lines indicate "block" boundaries.
    >>> dataset = dataset.interleave(
    ...     lambda x: Dataset.from_tensors(x).repeat(6),
    ...     cycle_length=2, block_length=4)
    >>> list(dataset.as_numpy_iterator())
    [1, 1, 1, 1,
     2, 2, 2, 2,
     1, 1,
     2, 2,
     3, 3, 3, 3,
     4, 4, 4, 4,
     3, 3,
     4, 4,
     5, 5, 5, 5,
     5, 5]

    Note: The order of elements yielded by this transformation is
    deterministic, as long as `map_func` is a pure function and
    `deterministic=True`. If `map_func` contains any stateful operations, the
    order in which that state is accessed is undefined.

    Performance can often be improved by setting `num_parallel_calls` so that
    `interleave` will use multiple threads to fetch elements. If determinism
    isn't required, it can also improve performance to set
    `deterministic=False`.

    >>> filenames = ["/var/data/file1.txt", "/var/data/file2.txt",
    ...              "/var/data/file3.txt", "/var/data/file4.txt"]
    >>> dataset = tf.data.Dataset.from_tensor_slices(filenames)
    >>> dataset = dataset.interleave(lambda x: tf.data.TFRecordDataset(x),
    ...     cycle_length=4, num_parallel_calls=tf.data.AUTOTUNE,
    ...     deterministic=False)

    Args:
      map_func: A function that takes a dataset element and returns a
        `tf.data.Dataset`.
      cycle_length: (Optional.) The number of input elements that will be
        processed concurrently. If not set, the tf.data runtime decides what it
        should be based on available CPU. If `num_parallel_calls` is set to
        `tf.data.AUTOTUNE`, the `cycle_length` argument identifies
        the maximum degree of parallelism.
      block_length: (Optional.) The number of consecutive elements to produce
        from each input element before cycling to another input element. If not
        set, defaults to 1.
      num_parallel_calls: (Optional.) If specified, the implementation creates a
        threadpool, which is used to fetch inputs from cycle elements
        asynchronously and in parallel. The default behavior is to fetch inputs
        from cycle elements synchronously with no parallelism. If the value
        `tf.data.AUTOTUNE` is used, then the number of parallel
        calls is set dynamically based on available CPU.
      deterministic: (Optional.) When `num_parallel_calls` is specified, if this
        boolean is specified (`True` or `False`), it controls the order in which
        the transformation produces elements. If set to `False`, the
        transformation is allowed to yield elements out of order to trade
        determinism for performance. If not specified, the
        `tf.data.Options.deterministic` option (`True` by default) controls the
        behavior.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    Nr   zaThe `deterministic` argument has no effect unless the `num_parallel_calls` argument is specified.)r=   )rc  r=   )r5   r   r   r   InterleaveDatasetParallelInterleaveDataset)rV   rm  cycle_lengthblock_lengthrb  rc  r=   r>   r>   r?   
interleave=	  s"    l
zDatasetV2.interleavec             C   s   ddl m} || ||S )a  Filters this dataset according to `predicate`.

    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
    >>> dataset = dataset.filter(lambda x: x < 3)
    >>> list(dataset.as_numpy_iterator())
    [1, 2]
    >>> # `tf.math.equal(x, y)` is required for equality comparison
    >>> def filter_fn(x):
    ...   return tf.math.equal(x, 1)
    >>> dataset = dataset.filter(filter_fn)
    >>> list(dataset.as_numpy_iterator())
    [1]

    Args:
      predicate: A function mapping a dataset element to a boolean.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    r   )	filter_op)r   rw  filter)rV   	predicater=   rw  r>   r>   r?   rx  	  s    zDatasetV2.filterc             C   s2   || }t |ts&tdt| d| g|_|S )a  Applies a transformation function to this dataset.

    `apply` enables chaining of custom `Dataset` transformations, which are
    represented as functions that take one `Dataset` argument and return a
    transformed `Dataset`.

    >>> dataset = tf.data.Dataset.range(100)
    >>> def dataset_fn(ds):
    ...   return ds.filter(lambda x: x < 5)
    >>> dataset = dataset.apply(dataset_fn)
    >>> list(dataset.as_numpy_iterator())
    [0, 1, 2, 3, 4]

    Args:
      transformation_func: A function that takes one `Dataset` argument and
        returns a `Dataset`.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    zB`transformation_func` must return a `tf.data.Dataset` object. Got rI   )rA   rG   rS   rD   _input_datasets)rV   transformation_funcr   r>   r>   r?   apply	  s    
zDatasetV2.applyc             C   s    |dkr|}t | |||||dS )a  Returns a dataset of "windows".

    Each "window" is a dataset that contains a subset of elements of the
    input dataset. These are finite datasets of size `size` (or possibly fewer
    if there are not enough input elements to fill the window and
    `drop_remainder` evaluates to `False`).

    For example:

    >>> dataset = tf.data.Dataset.range(7).window(3)
    >>> for window in dataset:
    ...   print(window)
    <...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int64, name=None)>
    <...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int64, name=None)>
    <...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int64, name=None)>

    Since windows are datasets, they can be iterated over:

    >>> for window in dataset:
    ...   print(list(window.as_numpy_iterator()))
    [0, 1, 2]
    [3, 4, 5]
    [6]

    #### Shift

    The `shift` argument determines the number of input elements to shift
    between the start of each window. If windows and elements are both numbered
    starting at 0, the first element in window `k` will be element `k * shift`
    of the input dataset. In particular, the first element of the first window
    will always be the first element of the input dataset.

    >>> dataset = tf.data.Dataset.range(7).window(3, shift=1,
    ...                                           drop_remainder=True)
    >>> for window in dataset:
    ...   print(list(window.as_numpy_iterator()))
    [0, 1, 2]
    [1, 2, 3]
    [2, 3, 4]
    [3, 4, 5]
    [4, 5, 6]

    #### Stride

    The `stride` argument determines the stride between input elements within a
    window.

    >>> dataset = tf.data.Dataset.range(7).window(3, shift=1, stride=2,
    ...                                           drop_remainder=True)
    >>> for window in dataset:
    ...   print(list(window.as_numpy_iterator()))
    [0, 2, 4]
    [1, 3, 5]
    [2, 4, 6]

    #### Nested elements

    When the `window` transformation is applied to a dataset whos elements are
    nested structures, it produces a dataset where the elements have the same
    nested structure but each leaf is replaced by a window. In other words,
    the nesting is applied outside of the windows as opposed inside of them.

    The type signature is:

    ```
    def window(
        self: Dataset[Nest[T]], ...
    ) -> Dataset[Nest[Dataset[T]]]
    ```

    Applying `window` to a `Dataset` of tuples gives a tuple of windows:

    >>> dataset = tf.data.Dataset.from_tensor_slices(([1, 2, 3, 4, 5],
    ...                                               [6, 7, 8, 9, 10]))
    >>> dataset = dataset.window(2)
    >>> windows = next(iter(dataset))
    >>> windows
    (<...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int32, name=None)>,
     <...Dataset element_spec=TensorSpec(shape=(), dtype=tf.int32, name=None)>)

    >>> def to_numpy(ds):
    ...   return list(ds.as_numpy_iterator())
    >>>
    >>> for windows in dataset:
    ...   print(to_numpy(windows[0]), to_numpy(windows[1]))
    [1, 2] [6, 7]
    [3, 4] [8, 9]
    [5] [10]

    Applying `window` to a `Dataset` of dictionaries gives a dictionary of
    `Datasets`:

    >>> dataset = tf.data.Dataset.from_tensor_slices({'a': [1, 2, 3],
    ...                                               'b': [4, 5, 6],
    ...                                               'c': [7, 8, 9]})
    >>> dataset = dataset.window(2)
    >>> def to_numpy(ds):
    ...   return list(ds.as_numpy_iterator())
    >>>
    >>> for windows in dataset:
    ...   print(tf.nest.map_structure(to_numpy, windows))
    {'a': [1, 2], 'b': [4, 5], 'c': [7, 8]}
    {'a': [3], 'b': [6], 'c': [9]}

    #### Flatten a dataset of windows

    The `Dataset.flat_map` and `Dataset.interleave` methods can be used to
    flatten a dataset of windows into a single dataset.

    The argument to `flat_map` is a function that takes an element from the
    dataset and returns a `Dataset`. `flat_map` chains together the resulting
    datasets sequentially.

    For example, to turn each window into a dense tensor:

    >>> dataset = tf.data.Dataset.range(7).window(3, shift=1,
    ...                                           drop_remainder=True)
    >>> batched = dataset.flat_map(lambda x:x.batch(3))
    >>> for batch in batched:
    ...   print(batch.numpy())
    [0 1 2]
    [1 2 3]
    [2 3 4]
    [3 4 5]
    [4 5 6]

    Args:
      size: A `tf.int64` scalar `tf.Tensor`, representing the number of elements
        of the input dataset to combine into a window. Must be positive.
      shift: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
        number of input elements by which the window moves in each iteration.
        Defaults to `size`. Must be positive.
      stride: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the
        stride of the input elements in the sliding window. Must be positive.
        The default value of 1 means "retain every input element".
      drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
        whether the last windows should be dropped if their size is smaller than
        `size`.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    N)r=   )WindowDataset)rV   sizeshiftstrider,  r=   r>   r>   r?   window	  s     zDatasetV2.windowc             C   s   t d t|}W dQ R X t|}d}x~|rtj|d|| jfdd}|j}t	
dd |}xDtt	|t	|D ]*\}	}
t|	|
s~td	| d
|j dq~W |j}t	
dd |}xBtt	|t	|D ](\}}||krtd| d
|j dqW |j}t	
dd |}t	|}t	|}dd t||D }d}xHt||D ]:\}}|jdk	rT|jdks| | krTd}P qTW |r2t|t	|||}q2W |j}|t   |  }t }|rt||_t|tj|j t!|||j"|t#|t$||% dS )a  Reduces the input dataset to a single element.

    The transformation calls `reduce_func` successively on every element of
    the input dataset until the dataset is exhausted, aggregating information in
    its internal state. The `initial_state` argument is used for the initial
    state and the final state is returned as the result.

    >>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, _: x + 1).numpy()
    5
    >>> tf.data.Dataset.range(5).reduce(np.int64(0), lambda x, y: x + y).numpy()
    10

    Args:
      initial_state: An element representing the initial state of the
        transformation.
      reduce_func: A function that maps `(old_state, input_element)` to
        `new_state`. It must take two arguments and return a new element
        The structure of `new_state` must match the structure of
        `initial_state`.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A dataset element corresponding to the final state of the transformation.

    initial_stateNTzreduce()F)input_structurer   c             S   s   |   S )N)_to_legacy_output_classes)r   r>   r>   r?   r   
  r   z"DatasetV2.reduce.<locals>.<lambda>zMThe element classes for the new state must match the initial state. Expected z	 but got rI   c             S   s   |   S )N)_to_legacy_output_types)r   r>   r>   r?   r   
  r   zKThe element types for the new state must match the initial state. Expected c             S   s   |   S )N)_to_legacy_output_shapes)r   r>   r>   r?   r   
  r   c             S   s   g | ]\}}| |qS r>   )most_specific_compatible_shape)rd   originalnewr>   r>   r?   rh   
  s   z$DatasetV2.reduce.<locals>.<listcomp>)fr   r   r   )&r   r7  r   r  r  r   StructuredFunctionWrapperr   output_classesr	   r  r   r   r>  rS   r   r   ndimsas_listconvert_legacy_structurer  r   r   rK   r   r   r   r@   r=   from_compatible_tensor_listr   Zreduce_datasetrZ   r  captured_inputsr   r   rn   )rV   r  reduce_funcr=   Zstate_structureneed_to_rerunwrapped_funcr  Zstate_classesnew_state_classZstate_classr   Zstate_typesnew_state_typeZ
state_typer   Zstate_shapesflat_state_shapesflat_new_state_shapesweakened_state_shapesoriginal_shapeweakened_shaper   r   r>   r>   r?   reduce
  s    








zDatasetV2.reducec             C   s>   t  }|rt||_t| jtj| j	fd|
 i| jS )a  Returns the single element of the `dataset`.

    The function enables you to use a `tf.data.Dataset` in a stateless
    "tensor-in tensor-out" expression, without creating an iterator.
    This facilitates the ease of data transformation on tensors using the
    optimized `tf.data.Dataset` abstraction on top of them.

    For example, lets consider a `preprocessing_fn` which would take as an
    input the raw features and returns the processed feature along with
    it's label.

    ```python
    def preprocessing_fn(raw_feature):
      # ... the raw_feature is preprocessed as per the use-case
      return feature

    raw_features = ...  # input batch of BATCH_SIZE elements.
    dataset = (tf.data.Dataset.from_tensor_slices(raw_features)
              .map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
              .batch(BATCH_SIZE))

    processed_features = dataset.get_single_element()
    ```

    In the above example, the `raw_features` tensor of length=BATCH_SIZE
    was converted to a `tf.data.Dataset`. Next, each of the `raw_feature` was
    mapped using the `preprocessing_fn` and the processed features were
    grouped into a single batch. The final `dataset` contains only one element
    which is a batch of all the processed features.

    NOTE: The `dataset` should contain only one element.

    Now, instead of creating an iterator for the `dataset` and retrieving the
    batch of features, the `tf.data.get_single_element()` function is used
    to skip the iterator creation process and directly output the batch of
    features.

    This can be particularly useful when your tensor transformations are
    expressed as `tf.data.Dataset` operations, and you want to use those
    transformations while serving your model.

    #### Keras

    ```python

    model = ... # A pre-built or custom model

    class PreprocessingModel(tf.keras.Model):
      def __init__(self, model):
        super().__init__(self)
        self.model = model

      @tf.function(input_signature=[...])
      def serving_fn(self, data):
        ds = tf.data.Dataset.from_tensor_slices(data)
        ds = ds.map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
        ds = ds.batch(batch_size=BATCH_SIZE)
        return tf.argmax(self.model(ds.get_single_element()), axis=-1)

    preprocessing_model = PreprocessingModel(model)
    your_exported_model_dir = ... # save the model to this path.
    tf.saved_model.save(preprocessing_model, your_exported_model_dir,
                  signatures={'serving_default': preprocessing_model.serving_fn}
                  )
    ```

    #### Estimator

    In the case of estimators, you need to generally define a `serving_input_fn`
    which would require the features to be processed by the model while
    inferencing.

    ```python
    def serving_input_fn():

      raw_feature_spec = ... # Spec for the raw_features
      input_fn = tf.estimator.export.build_parsing_serving_input_receiver_fn(
          raw_feature_spec, default_batch_size=None)
      )
      serving_input_receiver = input_fn()
      raw_features = serving_input_receiver.features

      def preprocessing_fn(raw_feature):
        # ... the raw_feature is preprocessed as per the use-case
        return feature

      dataset = (tf.data.Dataset.from_tensor_slices(raw_features)
                .map(preprocessing_fn, num_parallel_calls=BATCH_SIZE)
                .batch(BATCH_SIZE))

      processed_features = dataset.get_single_element()

      # Please note that the value of `BATCH_SIZE` should be equal to
      # the size of the leading dimension of `raw_features`. This ensures
      # that `dataset` has only element, which is a pre-requisite for
      # using `dataset.get_single_element()`.

      return tf.estimator.export.ServingInputReceiver(
          processed_features, serving_input_receiver.receiver_tensors)

    estimator = ... # A pre-built or custom estimator
    estimator.export_saved_model(your_exported_model_dir, serving_input_fn)
    ```

    Args:
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A nested structure of `tf.Tensor` objects, corresponding to the single
      element of `dataset`.

    Raises:
      InvalidArgumentError: (at runtime) if `dataset` does not contain exactly
        one element.
    r   )r   r   r@   r=   r   r  r   r   Zdataset_to_single_elementrZ   rn   r   )rV   r=   r   r>   r>   r?   get_single_element   s    u
zDatasetV2.get_single_elementc             C   s   t | }t||dS )a  Splits elements of a dataset into multiple elements.

    For example, if elements of the dataset are shaped `[B, a0, a1, ...]`,
    where `B` may vary for each input element, then for each element in the
    dataset, the unbatched dataset will contain `B` consecutive elements
    of shape `[a0, a1, ...]`.

    >>> elements = [ [1, 2, 3], [1, 2], [1, 2, 3, 4] ]
    >>> dataset = tf.data.Dataset.from_generator(lambda: elements, tf.int64)
    >>> dataset = dataset.unbatch()
    >>> list(dataset.as_numpy_iterator())
    [1, 2, 3, 1, 2, 1, 2, 3, 4]

    Note: `unbatch` requires a data copy to slice up the batched tensor into
    smaller, unbatched tensors. When optimizing performance, try to avoid
    unnecessary usage of `unbatch`.

    Args:
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )normalize_to_dense_UnbatchDataset)rV   r=   normalized_datasetr>   r>   r?   unbatch  s    zDatasetV2.unbatchc             C   s   t | ||dS )a  Returns a new `tf.data.Dataset` with the given options set.

    The options are "global" in the sense they apply to the entire dataset.
    If options are set multiple times, they are merged as long as different
    options do not use different non-default values.

    >>> ds = tf.data.Dataset.range(5)
    >>> ds = ds.interleave(lambda x: tf.data.Dataset.range(5),
    ...                    cycle_length=3,
    ...                    num_parallel_calls=3)
    >>> options = tf.data.Options()
    >>> # This will make the interleave order non-deterministic.
    >>> options.deterministic = False
    >>> ds = ds.with_options(options)

    Args:
      options: A `tf.data.Options` that identifies the options the use.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.

    Raises:
      ValueError: when an option is set more than once to a non-default value
    )r=   )r   )rV   r   r=   r>   r>   r?   with_options  s    zDatasetV2.with_optionsc             C   s   t | jS )a  Returns the cardinality of the dataset, if known.

    `cardinality` may return `tf.data.INFINITE_CARDINALITY` if the dataset
    contains an infinite number of elements or `tf.data.UNKNOWN_CARDINALITY` if
    the analysis fails to determine the number of elements in the dataset
    (e.g. when the dataset source is a file).

    >>> dataset = tf.data.Dataset.range(42)
    >>> print(dataset.cardinality().numpy())
    42
    >>> dataset = dataset.repeat()
    >>> cardinality = dataset.cardinality()
    >>> print((cardinality == tf.data.INFINITE_CARDINALITY).numpy())
    True
    >>> dataset = dataset.filter(lambda x: True)
    >>> cardinality = dataset.cardinality()
    >>> print((cardinality == tf.data.UNKNOWN_CARDINALITY).numpy())
    True

    Returns:
      A scalar `tf.int64` `Tensor` representing the cardinality of the dataset.
      If the cardinality is infinite or unknown, `cardinality` returns the
      named constants `tf.data.INFINITE_CARDINALITY` and
      `tf.data.UNKNOWN_CARDINALITY` respectively.
    )r   Zdataset_cardinalityrZ   )rV   r>   r>   r?   r     s    zDatasetV2.cardinalityc                sV    dk	r|s dk	s |s t d dk	r8 fdd}|}|dk	sDtt| ||||dS )aW  Groups windows of elements by key and reduces them.

    This transformation maps each consecutive element in a dataset to a key
    using `key_func` and groups the elements by key. It then applies
    `reduce_func` to at most `window_size_func(key)` elements matching the same
    key. All except the final window for each key will contain
    `window_size_func(key)` elements; the final window may be smaller.

    You may provide either a constant `window_size` or a window size determined
    by the key through `window_size_func`.

    >>> dataset = tf.data.Dataset.range(10)
    >>> window_size = 5
    >>> key_func = lambda x: x%2
    >>> reduce_func = lambda key, dataset: dataset.batch(window_size)
    >>> dataset = dataset.group_by_window(
    ...           key_func=key_func,
    ...           reduce_func=reduce_func,
    ...           window_size=window_size)
    >>> for elem in dataset.as_numpy_iterator():
    ...   print(elem)
    [0 2 4 6 8]
    [1 3 5 7 9]

    Args:
      key_func: A function mapping a nested structure of tensors (having shapes
        and types defined by `self.output_shapes` and `self.output_types`) to a
        scalar `tf.int64` tensor.
      reduce_func: A function mapping a key and a dataset of up to `window_size`
        consecutive elements matching that key to another dataset.
      window_size: A `tf.int64` scalar `tf.Tensor`, representing the number of
        consecutive elements matching the same key to combine in a single batch,
        which will be passed to `reduce_func`. Mutually exclusive with
        `window_size_func`.
      window_size_func: A function mapping a key to a `tf.int64` scalar
        `tf.Tensor`, representing the number of consecutive elements matching
        the same key to combine in a single batch, which will be passed to
        `reduce_func`. Mutually exclusive with `window_size`.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.

    Raises:
      ValueError: if neither or both of {`window_size`, `window_size_func`} are
        passed.
    NzWEither the `window_size` argument or the `window_size_func` argument must be specified.c                s   t j tjdS )N)r   )r   r8  r   r   )Z
unused_key)window_sizer>   r?   constant_window_func  s    z7DatasetV2.group_by_window.<locals>.constant_window_func)r=   )r;   r   _GroupByWindowDataset)rV   key_funcr  r  window_size_funcr=   r  r>   )r  r?   group_by_window  s    5zDatasetV2.group_by_windowc
          
      s   t t d kr2tdt  dt  dtjtjd fdd}
 fdd	ddd	
f
dd}| j|
|dS )a  A transformation that buckets elements in a `Dataset` by length.

    Elements of the `Dataset` are grouped together by length and then are padded
    and batched.

    This is useful for sequence tasks in which the elements have variable
    length. Grouping together elements that have similar lengths reduces the
    total fraction of padding in a batch which increases training step
    efficiency.

    Below is an example to bucketize the input data to the 3 buckets
    "[0, 3), [3, 5), [5, inf)" based on sequence length, with batch size 2.

    >>> elements = [
    ...   [0], [1, 2, 3, 4], [5, 6, 7],
    ...   [7, 8, 9, 10, 11], [13, 14, 15, 16, 19, 20], [21, 22]]
    >>> dataset = tf.data.Dataset.from_generator(
    ...     lambda: elements, tf.int64, output_shapes=[None])
    >>> dataset = dataset.bucket_by_sequence_length(
    ...         element_length_func=lambda elem: tf.shape(elem)[0],
    ...         bucket_boundaries=[3, 5],
    ...         bucket_batch_sizes=[2, 2, 2])
    >>> for elem in dataset.as_numpy_iterator():
    ...   print(elem)
    [[1 2 3 4]
    [5 6 7 0]]
    [[ 7  8  9 10 11  0]
    [13 14 15 16 19 20]]
    [[ 0  0]
    [21 22]]

    Args:
      element_length_func: function from element in `Dataset` to `tf.int32`,
        determines the length of the element, which will determine the bucket it
        goes into.
      bucket_boundaries: `list<int>`, upper length boundaries of the buckets.
      bucket_batch_sizes: `list<int>`, batch size per bucket. Length should be
        `len(bucket_boundaries) + 1`.
      padded_shapes: Nested structure of `tf.TensorShape` to pass to
        `tf.data.Dataset.padded_batch`. If not provided, will use
        `dataset.output_shapes`, which will result in variable length dimensions
        being padded out to the maximum length in each batch.
      padding_values: Values to pad with, passed to
        `tf.data.Dataset.padded_batch`. Defaults to padding with 0.
      pad_to_bucket_boundary: bool, if `False`, will pad dimensions with unknown
        size to maximum length in batch. If `True`, will pad dimensions with
        unknown size to bucket boundary minus 1 (i.e., the maximum length in
        each bucket), and caller must ensure that the source `Dataset` does not
        contain any elements with length longer than `max(bucket_boundaries)`.
      no_padding: `bool`, indicates whether to pad the batch features (features
        need to be either of type `tf.sparse.SparseTensor` or of same shape).
      drop_remainder: (Optional.) A `tf.bool` scalar `tf.Tensor`, representing
        whether the last batch should be dropped in the case it has fewer than
        `batch_size` elements; the default behavior is not to drop the smaller
        batch.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.

    Raises:
      ValueError: if `len(bucket_batch_sizes) != len(bucket_boundaries) + 1`.
    r   z_`len(bucket_batch_sizes)` must equal `len(bucket_boundaries) + 1` but `len(bucket_batch_sizes)=z` and `len(bucket_boundaries)=z`.)r   c                 sh   |  }t  }ttjjg| }|ttjjg }tt||t	||}t
t|}|S )z6Return int64 id of the length bucket for this element.)r  r   rF  int32minrG  r#   logical_andZ
less_equalless
reduce_minr   where)r   Z
seq_length
boundariesZbuckets_minZbuckets_maxZconditions_c	bucket_id)bucket_boundarieselement_length_funcr>   r?   element_to_bucket_idi  s    
zADatasetV2.bucket_by_sequence_length.<locals>.element_to_bucket_idc                s    |  }|S )Nr>   )r  r  )batch_sizesr>   r?   window_size_fnw  s    z;DatasetV2.bucket_by_sequence_length.<locals>.window_size_fnNc                sJ   g }x8t | D ]*}t|} fdd|D }|| qW t | |S )Nc                s"   g | ]}t |d kr n|qS )N)r   Zdimension_value)rd   d)none_fillerr>   r?   rh     s   zSDatasetV2.bucket_by_sequence_length.<locals>.make_padded_shapes.<locals>.<listcomp>)r	   r   r   r   r   r  )shapesr  Zpaddedr   r>   )r  r?   make_padded_shapes|  s    

z?DatasetV2.bucket_by_sequence_length.<locals>.make_padded_shapesc       
   	      s   	| }r|j |dS d}rd}tj| tjt d tjd|d}t	|g& tjtjd}||  }|d }W dQ R X t
|}p||d}	|j||	dS )zBatch elements in dataset.)r,  r=   NzUWhen pad_to_bucket_boundary=True, elements must have length < max(bucket_boundaries).r   )r   )r3  )r  )rd  r   Zassert_lessr   constantr   r   r   r   r<  re  ri  )
r  Zgrouped_datasetr+  r  err_msgcheckr  Zbucket_boundaryinput_shapesr  )
bucket_batch_sizesr  r,  r  r=   
no_paddingpad_to_bucket_boundaryrg  rh  r  r>   r?   batching_fn  s4    z8DatasetV2.bucket_by_sequence_length.<locals>.batching_fn)r  r  r  r=   )N)r   r;   r   r  r   r   r  )rV   r  r  r  rg  rh  r  r  r,  r=   r  r  r>   )r  r  r  r,  r  r  r=   r  r  rg  rh  r  r?   bucket_by_sequence_length  s    I
z#DatasetV2.bucket_by_sequence_lengthc             C   s   t | |dS )a  Creates a `Dataset` of pseudorandom values.

    The dataset generates a sequence of uniformly distributed integer values.

    >>> ds1 = tf.data.Dataset.random(seed=4).take(10)
    >>> ds2 = tf.data.Dataset.random(seed=4).take(10)
    >>> print(list(ds1.as_numpy_iterator())==list(ds2.as_numpy_iterator()))
    True

    Args:
      seed: (Optional) If specified, the dataset produces a deterministic
        sequence of values.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      Dataset: A `Dataset`.
    )r6  r=   )RandomDataset)r6  r=   r>   r>   r?   random  s    zDatasetV2.randomAUTOc       
      C   s`   d}| }|dkr.|j |d}dd }dd }n|}t||||||d}	|dk	r\|	j||d}	|	S )a  API to persist the output of the input dataset.

    The snapshot API allows users to transparently persist the output of their
    preprocessing pipeline to disk, and materialize the pre-processed data on a
    different training run.

    This API enables repeated preprocessing steps to be consolidated, and allows
    re-use of already processed data, trading off disk storage and network
    bandwidth for freeing up more valuable CPU resources and accelerator compute
    time.

    https://github.com/tensorflow/community/blob/master/rfcs/20200107-tf-data-snapshot.md
    has detailed design documentation of this feature.

    Users can specify various options to control the behavior of snapshot,
    including how snapshots are read from and written to by passing in
    user-defined functions to the `reader_func` and `shard_func` parameters.

    `shard_func` is a user specified function that maps input elements to
    snapshot shards.

    Users may want to specify this function to control how snapshot files should
    be written to disk. Below is an example of how a potential `shard_func`
    could be written.

    ```python
    dataset = ...
    dataset = dataset.enumerate()
    dataset = dataset.snapshot("/path/to/snapshot/dir",
        shard_func=lambda x, y: x % NUM_SHARDS, ...)
    dataset = dataset.map(lambda x, y: y)
    ```

    `reader_func` is a user specified function that accepts a single argument:
    (1) a Dataset of Datasets, each representing a "split" of elements of the
    original dataset. The cardinality of the input dataset matches the
    number of the shards specified in the `shard_func` (see above). The function
    should return a Dataset of elements of the original dataset.

    Users may want specify this function to control how snapshot files should be
    read from disk, including the amount of shuffling and parallelism.

    Here is an example of a standard reader function a user can define. This
    function enables both dataset shuffling and parallel reading of datasets:

    ```python
    def user_reader_func(datasets):
      # shuffle the datasets splits
      datasets = datasets.shuffle(NUM_CORES)
      # read datasets in parallel and interleave their elements
      return datasets.interleave(lambda x: x, num_parallel_calls=AUTOTUNE)

    dataset = dataset.snapshot("/path/to/snapshot/dir",
        reader_func=user_reader_func)
    ```

    By default, snapshot parallelizes reads by the number of cores available on
    the system, but will not attempt to shuffle the data.

    Args:
      path: Required. A directory to use for storing / loading the snapshot to /
        from.
      compression: Optional. The type of compression to apply to the snapshot
        written to disk. Supported options are `GZIP`, `SNAPPY`, `AUTO` or None.
        Defaults to `AUTO`, which attempts to pick an appropriate compression
        algorithm for the dataset.
      reader_func: Optional. A function to control how to read data from
        snapshot shards.
      shard_func: Optional. A function to control how to shard data when writing
        a snapshot.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    N)r=   c             S   s   | t   S )N)multiprocessing	cpu_count)rV  r[   r>   r>   r?   r     r   z$DatasetV2.snapshot.<locals>.<lambda>c             S   s   |S )Nr>   )r[   elemr>   r>   r?   r     r   )rX   rZ  r[  r^  r\  r=   )rq   _SnapshotDatasetrn  )
rV   rZ  r[  r^  r\  r=   Zproject_funcrX   Zlocal_shard_funcr   r>   r>   r?   snapshot  s"    R
zDatasetV2.snapshotc             C   s   t | |||dS )a  A transformation that scans a function across an input dataset.

    This transformation is a stateful relative of `tf.data.Dataset.map`.
    In addition to mapping `scan_func` across the elements of the input dataset,
    `scan()` accumulates one or more state tensors, whose initial values are
    `initial_state`.

    >>> dataset = tf.data.Dataset.range(10)
    >>> initial_state = tf.constant(0, dtype=tf.int64)
    >>> scan_func = lambda state, i: (state + i, state + i)
    >>> dataset = dataset.scan(initial_state=initial_state, scan_func=scan_func)
    >>> list(dataset.as_numpy_iterator())
    [0, 1, 3, 6, 10, 15, 21, 28, 36, 45]

    Args:
      initial_state: A nested structure of tensors, representing the initial
        state of the accumulator.
      scan_func: A function that maps `(old_state, input_element)` to
        `(new_state, output_element)`. It must take two arguments and return a
        pair of nested structures of tensors. The `new_state` must match the
        structure of `initial_state`.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r  	scan_funcr=   )_ScanDataset)rV   r  r  r=   r>   r>   r?   scan+  s    zDatasetV2.scanc             C   s   t | ||dS )aL  A transformation that stops dataset iteration based on a `predicate`.

    >>> dataset = tf.data.Dataset.range(10)
    >>> dataset = dataset.take_while(lambda x: x < 5)
    >>> list(dataset.as_numpy_iterator())
    [0, 1, 2, 3, 4]

    Args:
      predicate: A function that maps a nested structure of tensors (having
        shapes and types defined by `self.output_shapes` and
        `self.output_types`) to a scalar `tf.bool` tensor.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )_TakeWhileDataset)rV   ry  r=   r>   r>   r?   
take_whileJ  s    zDatasetV2.take_whilec             C   s   t | |dS )a  A transformation that discards duplicate elements of a `Dataset`.

    Use this transformation to produce a dataset that contains one instance of
    each unique element in the input. For example:

    >>> dataset = tf.data.Dataset.from_tensor_slices([1, 37, 2, 37, 2, 1])
    >>> dataset = dataset.unique()
    >>> sorted(list(dataset.as_numpy_iterator()))
    [1, 2, 37]

    Note: This transformation only supports datasets which fit into memory
    and have elements of either `tf.int32`, `tf.int64` or `tf.string` type.

    Args:
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    )r=   )_UniqueDataset)rV   r=   r>   r>   r?   unique^  s    zDatasetV2.uniquec                sv  t j|ddttj|dk	rt j|dd}t|tj}t|\}}tj||dj	|d}	tj||dj	|d}
tj||dj	|d}nRt
| j |d|d}	|	jfdd|d}|jdd |d}
|jdd |d}t| |
|	 |}|jd	|d}|dk	rt|nd} fd
d}|dkr>| j||dS |dkrL|S tj| ||g|dd |ddS dS )aC  Resamples elements to reach a target distribution.

    Note: This implementation can reject **or repeat** elements in order to
    reach the `target_dist`. So, in some cases, the output `Dataset` may be
    larger than the input `Dataset`.

    >>> initial_dist = [0.6, 0.4]
    >>> n = 1000
    >>> elems = np.random.choice(len(initial_dist), size=n, p=initial_dist)
    >>> dataset = tf.data.Dataset.from_tensor_slices(elems)
    >>> zero, one = np.bincount(list(dataset.as_numpy_iterator())) / n

    Following from `initial_dist`, `zero` is ~0.6 and `one` is ~0.4.

    >>> target_dist = [0.5, 0.5]
    >>> dataset = dataset.rejection_resample(
    ...    class_func=lambda x: x,
    ...    target_dist=target_dist,
    ...    initial_dist=initial_dist)
    >>> dataset = dataset.map(lambda class_func_result, data: data)
    >>> zero, one = np.bincount(list(dataset.as_numpy_iterator())) / n

    Following from `target_dist`, `zero` is ~0.5 and `one` is ~0.5.

    Args:
      class_func: A function mapping an element of the input dataset to a scalar
        `tf.int32` tensor. Values should be in `[0, num_classes)`.
      target_dist: A floating point type tensor, shaped `[num_classes]`.
      initial_dist: (Optional.)  A floating point type tensor, shaped
        `[num_classes]`.  If not provided, the true class distribution is
        estimated live in a streaming fashion.
      seed: (Optional.) Python integer seed for the resampler.
      name: (Optional.) A name for the tf.data operation.

    Returns:
      A new `Dataset` with the transformation applied as described above.
    target_dist)r=   Ninitial_distc                s
   t |  S )N)'_calculate_acceptance_probs_with_mixing)initial)target_dist_tr>   r?   r     s   z.DatasetV2.rejection_resample.<locals>.<lambda>c             S   s   | S )Nr>   )Zaccept_probr[   r>   r>   r?   r     r   c             S   s   |S )Nr>   )r[   Zprob_originalr>   r>   r?   r     r      c                 s,   t | dkr |  | d fS  |  | fS d S )Nr   r   )r   )r   )
class_funcr>   r?   add_class_value  s    z5DatasetV2.rejection_resample.<locals>.add_class_valuer   r   c             S   s   | d|  fgS )Ng      ?r>   )Zprobr>   r>   r?   r     r   T)weightsr6  stop_on_empty_dataset)r   r8  r#   castr   float32r  rG   r   rD  _estimate_initial_dist_dsrn  
_filter_dsr/  _get_prob_original_staticr  sample_from_datasets)rV   r  r  r  r6  r=   initial_dist_tZacceptance_distZprob_of_originalinitial_dist_dsacceptance_dist_dsZprob_of_original_dsZacceptance_and_original_prob_dsfiltered_dsZprob_original_staticr  r>   )r  r  r?   rejection_resampleu  sH    .



zDatasetV2.rejection_resamplec       
         s  dd }| st dt|ts8|dkr:dgt|  g nt|tjrl|jt| gst d|j dn.t| t|krt dt| d	t|  d
t|tjs|| |\} }tj|dd}|j	t
jt
jfkrtd|j	 dttj|ddd t| dkr| d S  fdd}tt|d|dd}n<|dd }dd }t|t|df}	t|	|dd}t|| |S )a  Samples elements at random from the datasets in `datasets`.

    Creates a dataset by interleaving elements of `datasets` with `weight[i]`
    probability of picking an element from dataset `i`. Sampling is done without
    replacement. For example, suppose we have 2 datasets:

    ```python
    dataset1 = tf.data.Dataset.range(0, 3)
    dataset2 = tf.data.Dataset.range(100, 103)
    ```

    Suppose that we sample from these 2 datasets with the following weights:

    ```python
    sample_dataset = tf.data.Dataset.sample_from_datasets(
        [dataset1, dataset2], weights=[0.5, 0.5])
    ```

    One possible outcome of elements in sample_dataset is:

    ```
    print(list(sample_dataset.as_numpy_iterator()))
    # [100, 0, 1, 101, 2, 102]
    ```

    Args:
      datasets: A non-empty list of `tf.data.Dataset` objects with compatible
        structure.
      weights: (Optional.) A list or Tensor of `len(datasets)` floating-point
        values where `weights[i]` represents the probability to sample from
        `datasets[i]`, or a `tf.data.Dataset` object where each element is such
        a list. Defaults to a uniform distribution across `datasets`.
      seed: (Optional.) A `tf.int64` scalar `tf.Tensor`, representing the random
        seed that will be used to create the distribution. See
        `tf.random.set_seed` for behavior.
      stop_on_empty_dataset: If `True`, sampling stops if it encounters an empty
        dataset. If `False`, it continues sampling and skips any empty datasets.
        It is recommended to set it to `True`. Otherwise, the distribution of
        samples starts off as the user intends, but may change as input datasets
        become empty. This can be difficult to detect since the dataset starts
        off looking correct. Default to `False` for backward compatibility.

    Returns:
      A dataset that interleaves elements from `datasets` at random, according
      to `weights` if provided, otherwise with uniform probability.

    Raises:
      TypeError: If the `datasets` or `weights` arguments have the wrong type.
      ValueError:
        - If `datasets` is empty, or
        - If `weights` is specified and does not match the length of `datasets`.
    c             S   s6   dd t | |D }|r t | S | d dgdgfS )Nc             S   s    g | ]\}}|d kr||fqS )r   r>   )rd   r   weightr>   r>   r?   rh     s   z[DatasetV2.sample_from_datasets.<locals>._skip_datasets_with_zero_weight.<locals>.<listcomp>r   g      ?)r   rQ  )r"  r  Zdatasets_and_weightsr>   r>   r?   _skip_datasets_with_zero_weight  s    zGDatasetV2.sample_from_datasets.<locals>._skip_datasets_with_zero_weightz3Invalid `datasets`. `datasets` should not be empty.Ng      ?z]Invalid `weights`. The shape of `weights` should be compatible with `[len(datasets)]` but is rI   z]Invalid `weights`. `weights` should have the same length as `datasets` but got `len(weights)=z` vs. `len(datasets)=z`.r  )r=   zUInvalid `weights`. `weights` type must be either `tf.float32` or `tf.float64` but is logitsr   r   c                s   t jtj d| dddgdS )Nr   )r6  r   )r   )r   squeezer!   stateless_multinomial)r6  )r  r>   r?   select_dataset_constant_logitsA  s    
zFDatasetV2.sample_from_datasets.<locals>.select_dataset_constant_logits   F)use_inter_op_parallelismc              W   s   t j| ddS )Nr  )r=   )r#   log)pr>   r>   r?   r   R  r   z0DatasetV2.sample_from_datasets.<locals>.<lambda>c             S   s   t jtj| d|dddgdS )Nr   )r6  r   )r   )r   r  r!   r  )r  r6  r>   r>   r?   select_dataset_varying_logitsT  s    
zEDatasetV2.sample_from_datasets.<locals>.select_dataset_varying_logits)r;   rA   rG   r   r   ZTensorr   r   r8  r   r   r  Zfloat64rS   r   r   r#   r  rk  r  rd  rn  r  r   _DirectedInterleaveDataset)
r"  r  r6  r  r  r  selector_inputZ	logits_dsr  Zlogits_and_seedsr>   )r  r?   r    sB    :
zDatasetV2.sample_from_datasetsTc             C   sj   | st dt|ts*tdt| dt|jt	g t
jsTtd|j dt|d}t|| |S )a  Creates a dataset that deterministically chooses elements from `datasets`.

    For example, given the following datasets:

    ```python
    datasets = [tf.data.Dataset.from_tensors("foo").repeat(),
                tf.data.Dataset.from_tensors("bar").repeat(),
                tf.data.Dataset.from_tensors("baz").repeat()]

    # Define a dataset containing `[0, 1, 2, 0, 1, 2, 0, 1, 2]`.
    choice_dataset = tf.data.Dataset.range(3).repeat(3)

    result = tf.data.Dataset.choose_from_datasets(datasets, choice_dataset)
    ```

    The elements of `result` will be:

    ```
    "foo", "bar", "baz", "foo", "bar", "baz", "foo", "bar", "baz"
    ```

    Args:
      datasets: A non-empty list of `tf.data.Dataset` objects with compatible
        structure.
      choice_dataset: A `tf.data.Dataset` of scalar `tf.int64` tensors between
        `0` and `len(datasets) - 1`.
      stop_on_empty_dataset: If `True`, selection stops if it encounters an
        empty dataset. If `False`, it skips empty datasets. It is recommended to
        set it to `True`. Otherwise, the selected elements start off as the user
        intends, but may change as input datasets become empty. This can be
        difficult to detect since the dataset starts off looking correct.
        Defaults to `True`.

    Returns:
      A new `Dataset` with the transformation applied as described above.

    Raises:
      TypeError: If `datasets` or `choice_dataset` has the wrong type.
      ValueError: If `datasets` is empty.
    z3Invalid `datasets`. `datasets` should not be empty.zPInvalid `choice_dataset`. `choice_dataset` should be a `tf.data.Dataset` but is rI   zaInvalid `choice_dataset`. Elements of `choice_dataset` must be scalar `tf.int64` tensors but are rE  )r;   rA   rG   rS   rD   r   r  r   r   r   r   r   rH  r  )r"  Zchoice_datasetr  r>   r>   r?   choose_from_datasetsc  s    ,

zDatasetV2.choose_from_datasets)N)N)NNNNN)N)N)FN)N)NNN)NN)r   N)NNN)rL  N)N)N)N)NNN)NNN)FNNN)NNFN)NNN)N)FN)NNNNN)N)Nr   FN)N)N)N)N)NNN)NNFFFN)NN)r  NNN)N)N)N)NNN)NNF)T)]r   r   r   r   rY   propertyrZ   setterr,   deprecated_argsrM   r   WARNr`   rt   rw   rx   
CHECKPOINTr{   ru   abcabstractmethodrP   r   r   r   classmethodr   r   r   r   r   __nonzero__r   abstractpropertyr   r   r   r   r   r   r   r   r   r   staticmethodr   r   r   r  r   r   r$  r   r   r&  r*  r/  r0  rD  rq   r@  rO  rQ  rS  rW  rY  r_  rd  ri  rn  r  rq  rv  rx  r|  r  r  r  r  r  r   r  r  r  r  r  r  r  r  r  r  __classcell__r>   r>   )r   r?   rG      s  X& 7
;		&R,      1&/
'
4
 L

)  
D
5


G  
JD   
B   
   
 $
#
    
|

 
v


  
E     
    
f


  
]   	rG   )	metaclassc                   sx  e Zd ZdZ fddZejdd Ze	dddd	 Z
d
d Ze	ddd[ddZd\ddZee	dddd Zee	dddd Zee	dddd Zedd Zeeejd]ddZeeejd^ddZee	dd d!d" Zeeejedd#d$d%d_d&d'Zeeejd(d) Zeeejd`d*d+Zeejda fd,d-	Zeejdb fd.d/	Zeeej dcd0d1Z eej!dd fd2d3	Z!eej"de fd4d5	Z"eej#df fd7d8	Z#eej$dg fd9d:	Z$eej%dh fd;d<	Z%eej&di fd=d>	Z&eej'dj fd@dA	Z'eej(dk fdBdC	Z(eej)dldDdEZ)e	ddFdmdGdHZ*eej+dn fdIdJ	Z+eej,do fdKdL	Z,eej-dp fdMdN	Z-e	ddOdPdQ Z.eej/ fdRdSZ/eej0dq fdUdV	Z0eej1dr fdWdX	Z1eej2ds fdYdZ	Z2  Z3S )trQ   zRepresents a potentially large set of elements.

  A `Dataset` can be used to represent an input pipeline as a
  collection of elements and a "logical plan" of transformations that act on
  those elements.
  c          
      sf   y|   }W nD tk
rP } z&dt|kr2tdtd|W d d }~X Y nX tt| | d S )N_as_variant_tensorzoPlease use `_variant_tensor` instead of `_as_variant_tensor()` to obtain the variant associated with a dataset.aC  {}: A likely cause of this error is that the super call for this dataset is not the last line of the `__init__` method. The base class invokes the `_as_variant_tensor()` method in its constructor and if that method uses attributes defined in the `__init__` method, those attributes need to be defined before the super call.)r  AttributeErrorrc   formatrz   rQ   rY   )rV   rW   r  )r   r>   r?   rY     s    zDatasetV1.__init__c             C   s   t t|  ddS )zCreates a scalar `tf.Tensor` of `tf.variant` representing this dataset.

    Returns:
      A scalar `tf.Tensor` of `tf.variant` type, which represents this dataset.
    z.as_variant_tensor()N)r   rD   )rV   r>   r>   r?   r    s    zDatasetV1._as_variant_tensorNa	  This is a deprecated API that should only be used in TF 1 graph mode and legacy TF 2 graph mode available through `tf.compat.v1`. In all other situations -- namely, eager mode and inside `tf.function` -- you can consume dataset elements using `for elem in dataset: ...` or by explicitly creating iterator via `iterator = iter(dataset)` and fetching its elements via `values = next(iterator)`. Furthermore, this API is not available in TF 2. During the transition from TF 1 to TF 2 you can use `tf.compat.v1.data.make_one_shot_iterator(dataset)` to create a TF 1 graph mode style iterator for a dataset created through TF 2 APIs. Note that this should be a transient state of your code base as there are in general no guarantees about the interoperability of TF 1 and TF 2 code.c             C   s   |   S )ak  Creates an iterator for elements of this dataset.

    Note: The returned iterator will be initialized automatically.
    A "one-shot" iterator does not currently support re-initialization. For
    that see `make_initializable_iterator`.

    Example:

    ```python
    # Building graph ...
    dataset = ...
    next_value = dataset.make_one_shot_iterator().get_next()

    # ... from within a session ...
    try:
      while True:
        value = sess.run(next_value)
        ...
    except tf.errors.OutOfRangeError:
        pass
    ```

    Returns:
      An `tf.data.Iterator` for elements of this dataset.
    )_make_one_shot_iterator)rV   r>   r>   r?   make_one_shot_iterator  s    'z DatasetV1.make_one_shot_iteratorc          
      s  t  r&tj tS Q R X t t	}t
d \ tjd|d fdd}y|t  W nB tk
r } z$dt|krtd|d n W d d }~X Y nX tj2 ttjf d|ijd tttS Q R X d S )NT)Zcapture_by_valueallowlisted_stateful_opsc                 s8    dk	r*dk	st t d  d   } | jS )zFactory function for a dataset.Ni9l    )r   core_random_seedZset_random_seedr   rZ   )r   )graph_level_seedop_level_seedrV   r>   r?   _make_dataset  s    z8DatasetV1._make_one_shot_iterator.<locals>._make_datasetzCannot capture a stateful nodea  {}: A likely cause of this error is that the dataset for which you are calling `make_one_shot_iterator()` captures a stateful object, such as a `tf.Variable` or `tf.lookup.StaticHashTable`, which is not supported. Use `make_initializable_iterator()` instead.Zdataset_factory)r   r   r   r   rZ   r   r   _ensure_same_dataset_graphr   Zobtain_capture_by_value_opsr  get_seedr   ZDefunr   rK   r;   rc   r  Iteratorr   Zone_shot_iteratorr   get_legacy_output_typesre  get_legacy_output_classes)rV   r  r  errr>   )r	  r
  rV   r?   r    s.    
z!DatasetV1._make_one_shot_iteratora  This is a deprecated API that should only be used in TF 1 graph mode and legacy TF 2 graph mode available through `tf.compat.v1`. In all other situations -- namely, eager mode and inside `tf.function` -- you can consume dataset elements using `for elem in dataset: ...` or by explicitly creating iterator via `iterator = iter(dataset)` and fetching its elements via `values = next(iterator)`. Furthermore, this API is not available in TF 2. During the transition from TF 1 to TF 2 you can use `tf.compat.v1.data.make_initializable_iterator(dataset)` to create a TF 1 graph mode style iterator for a dataset created through TF 2 APIs. Note that this should be a transient state of your code base as there are in general no guarantees about the interoperability of TF 1 and TF 2 code.c             C   s
   |  |S )a  Creates an iterator for elements of this dataset.

    Note: The returned iterator will be in an uninitialized state,
    and you must run the `iterator.initializer` operation before using it:

    ```python
    # Building graph ...
    dataset = ...
    iterator = dataset.make_initializable_iterator()
    next_value = iterator.get_next()  # This is a Tensor.

    # ... from within a session ...
    sess.run(iterator.initializer)
    try:
      while True:
        value = sess.run(next_value)
        ...
    except tf.errors.OutOfRangeError:
        pass
    ```

    Args:
      shared_name: (Optional.) If non-empty, the returned iterator will be
        shared under the given name across multiple sessions that share the same
        devices (e.g. when using a remote server).

    Returns:
      A `tf.data.Iterator` for elements of this dataset.

    Raises:
      RuntimeError: If eager execution is enabled.
    )_make_initializable_iterator)rV   shared_namer>   r>   r?   make_initializable_iterator!  s    /z%DatasetV1.make_initializable_iteratorc          	   C   s   t  rtdt|  |  }|d kr,d}t| jF tj	f d|d| j
}t|j|}t||t|t|t|S Q R X d S )Nzc`make_initializable_iterator()` is not supported in eager mode. Use Python-style iteration instead.rL  )	containerr  )r   r   r   r  r   r   r   rZ   r   Ziterator_v2r   Zmake_iteratorr   r  r  re  r  )rV   r  r   iterator_resourceZinitializerr>   r>   r?   r  R  s     z&DatasetV1._make_initializable_iteratorz4Use `tf.compat.v1.data.get_output_classes(dataset)`.c             C   s   t dd | jS )zReturns the class of each component of an element of this dataset.

    Returns:
      A (nested) structure of Python `type` objects corresponding to each
      component of an element of this dataset.
    c             S   s   |   S )N)r  )r   r>   r>   r?   r   t  r   z*DatasetV1.output_classes.<locals>.<lambda>)r	   r  r   )rV   r>   r>   r?   r  i  s    
zDatasetV1.output_classesz3Use `tf.compat.v1.data.get_output_shapes(dataset)`.c             C   s   t dd | jS )zReturns the shape of each component of an element of this dataset.

    Returns:
      A (nested) structure of `tf.TensorShape` objects corresponding to each
      component of an element of this dataset.
    c             S   s   |   S )N)r  )r   r>   r>   r?   r     r   z)DatasetV1.output_shapes.<locals>.<lambda>)r	   r  r   )rV   r>   r>   r?   r   w  s    
zDatasetV1.output_shapesz2Use `tf.compat.v1.data.get_output_types(dataset)`.c             C   s   t dd | jS )zReturns the type of each component of an element of this dataset.

    Returns:
      A (nested) structure of `tf.DType` objects corresponding to each component
      of an element of this dataset.
    c             S   s   |   S )N)r  )r   r>   r>   r?   r     r   z(DatasetV1.output_types.<locals>.<lambda>)r	   r  r   )rV   r>   r>   r?   r     s    
zDatasetV1.output_typesc             C   s   t | j| j| jS )N)r   r  r   r   r  )rV   r>   r>   r?   r     s    zDatasetV1.element_specc             C   s   t tj| |dS )N)r=   )r   rG   r   )r   r=   r>   r>   r?   r     s    zDatasetV1.from_tensorsc             C   s   t tj| |dS )N)r=   )r   rG   r   )r   r=   r>   r>   r?   r     s    zDatasetV1.from_tensor_slicesz+Use `tf.data.Dataset.from_tensor_slices()`.c             C   s   t t| S )zSplits each rank-N `tf.sparse.SparseTensor` in this dataset row-wise.

    Args:
      sparse_tensor: A `tf.sparse.SparseTensor`.

    Returns:
      Dataset: A `Dataset` of rank-(N-1) sparse tensors.
    )r   SparseTensorSliceDataset)r   r>   r>   r?   from_sparse_tensor_slices  s    z#DatasetV1.from_sparse_tensor_sliceszUse output_signature insteadr   r   c          
   C   s.   t   ttj| |||||dS Q R X d S )N)r=   )r,   Zsilencer   rG   r  )r   r   r   r   r	  r=   r>   r>   r?   r    s    
zDatasetV1.from_generatorc              O   s   t tj| |S )N)r   rG   r   )r   r~   r>   r>   r?   r     s    zDatasetV1.rangec             C   s   t tj| |dS )N)r=   )r   rG   r   )r"  r=   r>   r>   r?   r     s    zDatasetV1.zipc                s   t tt| j||dS )N)r=   )r   rz   rQ   r$  )rV   r   r=   )r   r>   r?   r$    s    zDatasetV1.concatenatec                s   t tt| j||dS )N)r=   )r   rz   rQ   r/  )rV   r.  r=   )r   r>   r?   r/    s    zDatasetV1.prefetchc             C   s   t tj| |||dS )N)r=   )r   rG   r0  )r1  r@  r6  r=   r>   r>   r?   r0    s    zDatasetV1.list_filesc                s   t tt| j||dS )N)r=   )r   rz   rQ   rD  )rV   rC  r=   )r   r>   r?   rD    s    zDatasetV1.repeatc                s   t tt| j||||dS )N)r=   )r   rz   rQ   r@  )rV   r.  r6  rK  r=   )r   r>   r?   r@    s    
zDatasetV1.shufflerL  c                s   t tt| j||dS )N)r=   )r   rz   rQ   rO  )rV   rN  r=   )r   r>   r?   rO    s    zDatasetV1.cachec                s   t tt| j||dS )N)r=   )r   rz   rQ   rQ  )rV   rC  r=   )r   r>   r?   rQ    s    zDatasetV1.takec                s   t tt| j||dS )N)r=   )r   rz   rQ   rS  )rV   rC  r=   )r   r>   r?   rS    s    zDatasetV1.skipc                s   t tt| j|||dS )N)r=   )r   rz   rQ   rW  )rV   rU  rV  r=   )r   r>   r?   rW    s    zDatasetV1.shardFc                s   t tt| j|||||dS )N)r=   )r   rz   rQ   rd  )rV   r+  r,  rb  rc  r=   )r   r>   r?   rd     s    
zDatasetV1.batchc                s   t tt| j|||||dS )N)r=   )r   rz   rQ   ri  )rV   r+  rg  rh  r,  r=   )r   r>   r?   ri    s    
zDatasetV1.padded_batchc             C   s8   |d kst rtt| |ddS tt| |||ddS d S )NF)rj  )r   r   rk  rl  )rV   rm  rb  rc  r=   r>   r>   r?   rn    s    zDatasetV1.mapzUse `tf.data.Dataset.map()c          	   C   sJ   |dkr.|dk	rt d tt| |dddS tt| |||dddS dS )a  Maps `map_func` across the elements of this dataset.

    Note: This is an escape hatch for existing uses of `map` that do not work
    with V2 functions. New uses are strongly discouraged and existing uses
    should migrate to `map` as this method will be removed in V2.

    Args:
      map_func: A function mapping a (nested) structure of tensors (having
        shapes and types defined by `self.output_shapes` and
        `self.output_types`) to another (nested) structure of tensors.
      num_parallel_calls: (Optional.) A `tf.int32` scalar `tf.Tensor`,
        representing the number elements to process asynchronously in parallel.
        If not specified, elements will be processed sequentially. If the value
        `tf.data.AUTOTUNE` is used, then the number of parallel calls is set
        dynamically based on available CPU.
      deterministic: (Optional.) When `num_parallel_calls` is specified, this
        boolean controls the order in which the transformation produces
        elements. If set to `False`, the transformation is allowed to yield
        elements out of order to trade determinism for performance. If not
        specified, the `tf.data.Options.deterministic` option (`True` by
        default) controls the behavior.

    Returns:
      Dataset: A `Dataset`.
    NzaThe `deterministic` argument has no effect unless the `num_parallel_calls` argument is specified.FT)rj  use_legacy_function)r   r   r   rk  rl  )rV   rm  rb  rc  r>   r>   r?   map_with_legacy_function0  s"    

z"DatasetV1.map_with_legacy_functionc                s   t tt| j||dS )N)r=   )r   rz   rQ   r  )rV   rm  r=   )r   r>   r?   r  b  s    zDatasetV1.flat_mapc          	      s    t tt| j||||||dS )N)r=   )r   rz   rQ   rv  )rV   rm  rt  ru  rb  rc  r=   )r   r>   r?   rv  g  s    
zDatasetV1.interleavec                s   t tt| j||dS )N)r=   )r   rz   rQ   rx  )rV   ry  r=   )r   r>   r?   rx  x  s    zDatasetV1.filterzUse `tf.data.Dataset.filter()c             C   s   ddl m} |j| |ddS )ab  Filters this dataset according to `predicate`.

    Note: This is an escape hatch for existing uses of `filter` that do not work
    with V2 functions. New uses are strongly discouraged and existing uses
    should migrate to `filter` as this method will be removed in V2.

    Args:
      predicate: A function mapping a (nested) structure of tensors (having
        shapes and types defined by `self.output_shapes` and
        `self.output_types`) to a scalar `tf.bool` tensor.

    Returns:
      Dataset: The `Dataset` containing the elements of this dataset for which
          `predicate` is `True`.
    r   )rw  T)r  )r   rw  ZFilterDataset)rV   ry  rw  r>   r>   r?   filter_with_legacy_function|  s    z%DatasetV1.filter_with_legacy_functionc                s   t tt| |S )N)r   rz   rQ   r|  )rV   r{  )r   r>   r?   r|    s    zDatasetV1.applyr   c                s   t tt| j|||||dS )N)r=   )r   rz   rQ   r  )rV   r~  r  r  r,  r=   )r   r>   r?   r    s    zDatasetV1.windowc                s   t tt| j|dS )N)r=   )r   rz   rQ   r  )rV   r=   )r   r>   r?   r    s    zDatasetV1.unbatchc                s   t tt| j||dS )N)r=   )r   rz   rQ   r  )rV   r   r=   )r   r>   r?   r    s    zDatasetV1.with_options)N)N)N)N)NNNNN)N)N)N)NNN)NN)NNN)rL  N)N)N)N)FNNN)NNFN)NNN)NN)N)NNNNN)N)Nr   FN)N)N)4r   r   r   r   rY   r  r  r  r,   
deprecatedr  r  r  r  r  r  r   r   r   r  	functoolswrapsrG   r   r   r  r  r  r   r   r$  r/  r0  rD  r@  rO  rQ  rS  rW  rd  ri  rn  r  r  rv  rx  r  r|  r  r  r  r   r>   r>   )r   r?   rQ     s   	(3#



    





  




   	
   	
  
 .

    




rQ   c                   sT   e Zd ZdZ fddZdd Zdd Zdd	 Zd
d Ze	dd Z
dd Z  ZS )r   zCWraps a V2 `Dataset` object in the `tf.compat.v1.data.Dataset` API.c                s   || _ tt|   d S )N)rH   rz   r   rY   )rV   r   )r   r>   r?   rY     s    zDatasetV1Adapter.__init__c             C   s   | j jS )N)rH   rZ   )rV   r>   r>   r?   r    s    z#DatasetV1Adapter._as_variant_tensorc             C   s
   | j  S )N)rH   rP   )rV   r>   r>   r?   rP     s    zDatasetV1Adapter._inputsc             C   s
   | j  S )N)rH   r   )rV   r>   r>   r?   r     s    zDatasetV1Adapter._functionsc             C   s
   | j  S )N)rH   r   )rV   r>   r>   r?   r     s    zDatasetV1Adapter.optionsc             C   s   | j jS )N)rH   r   )rV   r>   r>   r?   r     s    zDatasetV1Adapter.element_specc             C   s
   t | jS )N)r   rH   )rV   r>   r>   r?   r     s    zDatasetV1Adapter.__iter__)r   r   r   r   rY   r  rP   r   r   r  r   r   r   r>   r>   )r   r?   r     s   r   c             C   s   t  }t }||  g }xn| s| }|| |j}||krft	d| d| d|j
 dx"| D ]}||krp|| qpW q W dS )zHWalks the dataset graph to ensure all datasets come from the same graph.z
The graph z- of the iterator is different from the graph z the dataset: z was created in. If you are using the Estimator API, make sure that no part of the dataset returned by the `input_fn` function is defined outside the `input_fn` function. Otherwise, make sure that the dataset is created in the same graph as the iterator.N)r   rK   queueQueueputemptygetr   r   r;   rZ   rP   )r   Zcurrent_graphZbfs_qvisitedr   Zds_graphZinput_dsr>   r>   r?   r    s    


r  zdata.make_one_shot_iteratorc             C   s*   y|   S  tk
r$   t|   S X dS )aT  Creates an iterator for elements of `dataset`.

  Note: The returned iterator will be initialized automatically.
  A "one-shot" iterator does not support re-initialization.

  Args:
    dataset: A `tf.data.Dataset`.

  Returns:
    A `tf.data.Iterator` for elements of `dataset`.

  @compatibility(TF2)
  This is a legacy API for consuming dataset elements and should only be used
  during transition from TF 1 to TF 2. Note that using this API should be
  a transient state of your code base as there are in general no guarantees
  about the interoperability of TF 1 and TF 2 code.

  In TF 2 datasets are Python iterables which means you can consume their
  elements using `for elem in dataset: ...` or by explicitly creating iterator
  via `iterator = iter(dataset)` and fetching its elements via
  `values = next(iterator)`.
  @end_compatibility
  N)r  r  r   )r   r>   r>   r?   r    s    r  z data.make_initializable_iteratorc             C   s.   y
|  |S  tk
r(   t|  |S X dS )a  Creates an iterator for elements of `dataset`.

  Note: The returned iterator will be in an uninitialized state,
  and you must run the `iterator.initializer` operation before using it:

  ```python
  dataset = ...
  iterator = tf.compat.v1.data.make_initializable_iterator(dataset)
  # ...
  sess.run(iterator.initializer)
  ```

  Args:
    dataset: A `tf.data.Dataset`.
    shared_name: (Optional.) If non-empty, the returned iterator will be shared
      under the given name across multiple sessions that share the same devices
      (e.g. when using a remote server).

  Returns:
    A `tf.data.Iterator` for elements of `dataset`.

  Raises:
    RuntimeError: If eager execution is enabled.

  @compatibility(TF2)
  This is a legacy API for consuming dataset elements and should only be used
  during transition from TF 1 to TF 2. Note that using this API should be
  a transient state of your code base as there are in general no guarantees
  about the interoperability of TF 1 and TF 2 code.

  In TF 2 datasets are Python iterables which means you can consume their
  elements using `for elem in dataset: ...` or by explicitly creating iterator
  via `iterator = iter(dataset)` and fetching its elements via
  `values = next(iterator)`.
  @end_compatibility
  N)r  r  r   )r   r  r>   r>   r?   r     s    &
r  zdata.experimental.get_structurec             C   s4   y| j S  tk
r.   tdt|  dY nX dS )a\  Returns the type signature for elements of the input dataset / iterator.

  For example, to get the structure of a `tf.data.Dataset`:

  >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
  >>> tf.data.experimental.get_structure(dataset)
  TensorSpec(shape=(), dtype=tf.int32, name=None)

  >>> dataset = tf.data.experimental.from_list([(1, 'a'), (2, 'b'), (3, 'c')])
  >>> tf.data.experimental.get_structure(dataset)
  (TensorSpec(shape=(), dtype=tf.int32, name=None),
   TensorSpec(shape=(), dtype=tf.string, name=None))

  To get the structure of an `tf.data.Iterator`:

  >>> dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
  >>> tf.data.experimental.get_structure(iter(dataset))
  TensorSpec(shape=(), dtype=tf.int32, name=None)

  Args:
    dataset_or_iterator: A `tf.data.Dataset` or an `tf.data.Iterator`.

  Returns:
    A (nested) structure of `tf.TypeSpec` objects matching the structure of an
    element of `dataset_or_iterator` and specifying the type of individual
    components.

  Raises:
    TypeError: If input is not a `tf.data.Dataset` or an `tf.data.Iterator`
      object.
  zuInvalid `dataset_or_iterator`. `dataset_or_iterator` must be a `tf.data.Dataset` or tf.data.Iterator object, but got rI   N)r   r  rS   rD   )dataset_or_iteratorr>   r>   r?   get_structure.  s    !r&  zdata.get_output_classesc             C   s   t dd t| S )a  Returns the output classes for elements of the input dataset / iterator.

  Args:
    dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.

  Returns:
    A (nested) structure of Python `type` objects matching the structure of the
    dataset / iterator elements and specifying the class of the individual
    components.

  @compatibility(TF2)
  This is a legacy API for inspecting the type signature of dataset elements. In
  TF 2, you should use the `tf.data.Dataset.element_spec` attribute instead.
  @end_compatibility
  c             S   s   |   S )N)r  )r   r>   r>   r?   r   i  r   z+get_legacy_output_classes.<locals>.<lambda>)r	   r  r&  )r%  r>   r>   r?   r  W  s    r  zdata.get_output_shapesc             C   s   t dd t| S )a  Returns the output shapes for elements of the input dataset / iterator.

  Args:
    dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.

  Returns:
    A (nested) structure of `tf.TensorShape` objects matching the structure of
    the dataset / iterator elements and specifying the shape of the individual
    components.

  @compatibility(TF2)
  This is a legacy API for inspecting the type signature of dataset elements. In
  TF 2, you should use the `tf.data.Dataset.element_spec` attribute instead.
  @end_compatibility
  c             S   s   |   S )N)r  )r   r>   r>   r?   r     r   z*get_legacy_output_shapes.<locals>.<lambda>)r	   r  r&  )r%  r>   r>   r?   re  m  s    re  zdata.get_output_typesc             C   s   t dd t| S )a  Returns the output shapes for elements of the input dataset / iterator.

  Args:
    dataset_or_iterator: A `tf.data.Dataset` or `tf.data.Iterator`.

  Returns:
    A (nested) structure of `tf.DType` objects matching the structure of
    dataset / iterator elements and specifying the shape of the individual
    components.

  @compatibility(TF2)
  This is a legacy API for inspecting the type signature of dataset elements. In
  TF 2, you should use the `tf.data.Dataset.element_spec` attribute instead.
  @end_compatibility
  c             S   s   |   S )N)r  )r   r>   r>   r?   r     r   z)get_legacy_output_types.<locals>.<lambda>)r	   r  r&  )r%  r>   r>   r?   r    s    r  c               @   s   e Zd ZdZdd ZdS )DatasetSourcez5Abstract class representing a dataset with no inputs.c             C   s   g S )Nr>   )rV   r>   r>   r?   rP     s    zDatasetSource._inputsN)r   r   r   r   rP   r>   r>   r>   r?   r'    s   r'  c                   s(   e Zd ZdZ fddZdd Z  ZS )UnaryDatasetz5Abstract class representing a dataset with one input.c                s   || _ tt| | d S )N)_input_datasetrz   r(  rY   )rV   rX   rW   )r   r>   r?   rY     s    zUnaryDataset.__init__c             C   s   | j gS )N)r)  )rV   r>   r>   r?   rP     s    zUnaryDataset._inputs)r   r   r   r   rY   rP   r   r>   r>   )r   r?   r(    s   r(  c                   s,   e Zd ZdZ fddZedd Z  ZS )UnaryUnchangedStructureDatasetzDRepresents a unary dataset with the same input and output structure.c                s   || _ tt| || d S )N)r)  rz   r*  rY   )rV   rX   rW   )r   r>   r?   rY     s    
z'UnaryUnchangedStructureDataset.__init__c             C   s   | j jS )N)r)  r   )rV   r>   r>   r?   r     s    z+UnaryUnchangedStructureDataset.element_spec)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   r*    s   r*  c                   s4   e Zd ZdZ fddZdd Zedd Z  ZS )_VariantDatasetz@A Dataset wrapper around a `tf.variant`-typed function argument.c                s   || _ tt| | d S )N)_element_specrz   r+  rY   )rV   Zdataset_variantr   )r   r>   r?   rY     s    z_VariantDataset.__init__c             C   s   g S )Nr>   )rV   r>   r>   r?   rP     s    z_VariantDataset._inputsc             C   s   | j S )N)r,  )rV   r>   r>   r?   r     s    z_VariantDataset.element_spec)	r   r   r   r   rY   rP   r  r   r   r>   r>   )r   r?   r+    s   r+  c               @   s    e Zd Zdd Zedd ZdS )_NestedVariantc             C   s   || _ || _|| _d S )N)rZ   r,  _dataset_shape)rV   rW   r   dataset_shaper>   r>   r?   rY     s    z_NestedVariant.__init__c             C   s   t | j| jS )N)r   r,  r.  )rV   r>   r>   r?   r     s    z_NestedVariant._type_specN)r   r   r   rY   r  r   r>   r>   r>   r?   r-    s   r-  zdata.experimental.from_variantc             C   s
   t | |S )a=  Constructs a dataset from the given variant and (nested) structure.

  Args:
    variant: A scalar `tf.variant` tensor representing a dataset.
    structure: A (nested) structure of `tf.TypeSpec` objects representing the
      structure of each element in the dataset.

  Returns:
    A `tf.data.Dataset` instance.
  )r+  )r   r   r>   r>   r?   from_variant  s    r0  zdata.experimental.to_variantc             C   s   | j S )zReturns a variant representing the given dataset.

  Args:
    dataset: A `tf.data.Dataset`.

  Returns:
    A scalar `tf.variant` tensor representing the given dataset.
  )rZ   )r   r>   r>   r?   
to_variant  s    
r1  zdata.DatasetSpecz"data.experimental.DatasetStructurec               @   s   e Zd ZdZddgZd,ddZedd Zed	d
 Zdd Z	dd Z
dd Zedd Zdd Zdd Zdd Zedd Zdd Zdd Zdd  Zd!d" Zd#d$ Zd%d& Zd'd( Zd)d* Zd+S )-r   a$  Type specification for `tf.data.Dataset`.

  See `tf.TypeSpec` for more information about TensorFlow type specifications.

  >>> dataset = tf.data.Dataset.range(3)
  >>> tf.data.DatasetSpec.from_value(dataset)
  DatasetSpec(TensorSpec(shape=(), dtype=tf.int64, name=None), TensorShape([]))
  r,  r.  r>   c             C   s   || _ t|| _d S )N)r,  r   r  r.  )rV   r   r/  r>   r>   r?   rY     s    zDatasetSpec.__init__c             C   s   t S )N)r  )rV   r>   r>   r?   rC     s    zDatasetSpec.value_typec             C   s   | j S )zThe inner element spec.)r,  )rV   r>   r>   r?   r     s    zDatasetSpec.element_specc          	   C   s   t | t |k	rdS yt| j|j W n ttfk
r@   dS X t| j}t|j}dd }x$t||D ]\}}|||sndS qnW | j	|jS )zSee base class.Fc             S   s"   t | tjr| |S | |kS d S )N)rA   r+   	TraceTypeis_subtype_of)abr>   r>   r?   is_subtype_or_equal  s    
z6DatasetSpec.is_subtype_of.<locals>.is_subtype_or_equal)
rD   tf_nestassert_same_structurer   rS   r;   r   r   r.  r3  )rV   otherZself_elementsZother_elementsr6  Zself_elementZother_elementr>   r>   r?   r3    s    
zDatasetSpec.is_subtype_ofc       
   	      s  t fdd|D sdS y"x|D ]}tj|j q"W W n ttfk
rT   dS X tj}dd |D }dgt| }dd }xHt|D ]<\ }|| fdd|D | < |dk	r|  dkrdS qW t	j
|}jd	d |D }	|	dkrdS t||	S )
zSee base class.c             3   s   | ]}t  t |kV  qd S )N)rD   )rd   r9  )rV   r>   r?   r   '  s    z=DatasetSpec.most_specific_common_supertype.<locals>.<genexpr>Nc             S   s   g | ]}t |jqS r>   )r7  r   r   )rd   r9  r>   r>   r?   rh   2  s    z>DatasetSpec.most_specific_common_supertype.<locals>.<listcomp>c                s8   t  tjr |S t fdd|D r0 S d S d S )Nc             3   s   | ]} |kV  qd S )Nr>   )rd   r5  )r4  r>   r?   r   :  s    z`DatasetSpec.most_specific_common_supertype.<locals>.common_supertype_or_equal.<locals>.<genexpr>)rA   r+   r2  most_specific_common_supertyper  )r4  bsr>   )r4  r?   common_supertype_or_equal6  s    
zMDatasetSpec.most_specific_common_supertype.<locals>.common_supertype_or_equalc                s   g | ]}|  qS r>   r>   )rd   Zother_components)re   r>   r?   rh   ?  s    c             S   s   g | ]
}|j qS r>   )r.  )rd   r9  r>   r>   r?   rh   F  s    )r  r7  r8  r   rS   r;   r   r   rq   r  r,  r.  r:  r   )
rV   Zothersr9  Zself_componentsZothers_componentsZcommon_componentsr<  Zself_componentZcommon_element_specZcommon_dataset_shaper>   )re   rV   r?   r:  %  s0    

z*DatasetSpec.most_specific_common_supertypec             C   s   | j | jfS )N)r,  r.  )rV   r>   r>   r?   
_serializeO  s    zDatasetSpec._serializec             C   s   t | jtjS )N)r   r   r.  r   r   )rV   r>   r>   r?   _component_specsR  s    zDatasetSpec._component_specsc             C   s   |j S )N)rZ   )rV   rE   r>   r>   r?   _to_componentsV  s    zDatasetSpec._to_componentsc             C   s,   | j jdkrt|| jS t|| j| j S d S )Nr   )r.  r  r+  r,  r-  )rV   
componentsr>   r>   r?   _from_componentsY  s    zDatasetSpec._from_componentsc             C   s   t tdd |gS )Nc             S   s   | j S )N)rZ   )r   r>   r>   r?   r   c  r   z-DatasetSpec._to_tensor_list.<locals>.<lambda>)r   r8  r7  r  )rV   rE   r>   r>   r?   _to_tensor_list`  s    zDatasetSpec._to_tensor_listc             C   s
   t | jS )z>Creates a `DatasetSpec` for the given `tf.data.Dataset` value.)r   r   )rE   r>   r>   r?   
from_valuef  s    zDatasetSpec.from_valuec             C   s   t | jt|g| jS )N)r   r,  r   r   r$  r.  )rV   r+  r>   r>   r?   _batchk  s    zDatasetSpec._batchc             C   s*   | j jdkrtdt| j| j dd  S )Nr   z5Slicing dataset elements is not supported for rank 0.r   )r.  r  r;   r   r,  )rV   r>   r>   r?   _unbatchp  s    zDatasetSpec._unbatchc             C   s   | j jdkrtd| |S )Nr   z5Slicing dataset elements is not supported for rank 0.)r.  r  r;   rB  )rV   rE   r>   r>   r?   _to_batched_tensor_listu  s    z#DatasetSpec._to_batched_tensor_listc             C   s   | S )Nr>   )rV   r>   r>   r?   r  z  s    z#DatasetSpec._to_legacy_output_typesc             C   s   | S )Nr>   )rV   r>   r>   r?   r  }  s    z$DatasetSpec._to_legacy_output_shapesc             C   s   | S )Nr>   )rV   r>   r>   r?   r    s    z%DatasetSpec._to_legacy_output_classesc             C   s   t tS )N)hashr   )rV   r>   r>   r?   __hash__  s    zDatasetSpec.__hash__c             C   s"   t |to | j|jko | j|jkS )N)rA   r   r,  r.  )rV   r9  r>   r>   r?   __eq__  s    
zDatasetSpec.__eq__N)r>   )r   r   r   r   	__slots__rY   r  rC   r   r3  r:  r=  r>  r?  rA  rB  r  rC  rD  rE  rF  r  r  r  rH  rI  r>   r>   r>   r?   r     s*   
*r   c               @   s6   e Zd ZdZdgZdd Zdd Zdd Zd	d
 ZdS )r   z9Iterator over a dataset with elements converted to numpy.	_iteratorc             C   s   t || _d S )N)r   rK  )rV   r   r>   r>   r?   rY     s    z_NumpyIterator.__init__c             C   s   | S )Nr>   )rV   r>   r>   r?   r     s    z_NumpyIterator.__iter__c             C   s   dd }t |t| jS )Nc             S   s$   |   }t|tjr |jdd |S )NF)write)Z_numpyrA   r   r   Zsetflags)r   rp   r>   r>   r?   to_numpy  s    z)_NumpyIterator.__next__.<locals>.to_numpy)r	   r  r   rK  )rV   rM  r>   r>   r?   __next__  s    z_NumpyIterator.__next__c             C   s   |   S )N)rN  )rV   r>   r>   r?   r     s    z_NumpyIterator.nextN)	r   r   r   r   rJ  rY   r   rN  r   r>   r>   r>   r?   r     s   r   c                   s4   e Zd ZdZ fddZejjf fdd	Z  Z	S )r|   a`  Allows export of functions capturing a Dataset in SavedModels.

  When saving a SavedModel, `tf.saved_model.save` traverses the object
  graph. Since Datasets reference _VariantTracker objects, that traversal will
  find a _VariantTracker for each Dataset and so know how to save and restore
  functions which reference the Dataset's variant Tensor.
  c                s6   t t| jdd || _t|tjs,td|| _dS )a  Record that `variant_tensor` is associated with `resource_creator`.

    Args:
      variant_tensor: The variant-dtype Tensor associated with the Dataset. This
        Tensor will be a captured input to functions which use the Dataset, and
        is used by saving code to identify the corresponding _VariantTracker.
      resource_creator: A zero-argument function which creates a new
        variant-dtype Tensor. This function will be included in SavedModels and
        run to re-create the Dataset's variant Tensor on restore.
    ra   )rm   z1Resource creator should already be a tf.function.N)	rz   r|   rY   Z_resource_handlerA   r1   FunctionrS   _create_resource)rV   rW   Zresource_creator)r   r>   r?   rY     s
    z_VariantTracker.__init__c                s2   |t jjkri S tt| j|f|}| j|d< |S )NrP  )rw   rx   ry   rz   r|   r{   rP  )rV   r}   r~   r   )r   r>   r?   r{     s    
z#_VariantTracker._trackable_children)
r   r   r   r   rY   rw   rx   r  r{   r   r>   r>   )r   r?   r|     s   r|   c                   s.   e Zd ZdZd fdd	Zedd Z  ZS )r   z"A `Dataset` with a single element.Nc                s`   t |}t || _t | j|| _|| _tj| jt 	| j| j
 d}tt| | dS )z)See `Dataset.from_tensors()` for details.)r   r   N)r   r  r  
_structurer  Z_tensorsr   r   Ztensor_datasetr   r   rn   rz   r   rY   )rV   elementr=   rW   )r   r>   r?   rY     s    

zTensorDataset.__init__c             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    zTensorDataset.element_spec)N)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   r     s   r   c                   s,   e Zd ZdZ fddZedd Z  ZS )r  zHA `Dataset` that splits a rank-N `tf.sparse.SparseTensor` into its rows.c                s   t |tjs tdt| d|| _| jj }| jj }|j	d d 
|j	d d }td|gtjtdg| jjt|gtjf| _t| jj| jj| jj}tt| | dS )z6See `Dataset.from_sparse_tensor_slices()` for details.zQInvalid `sparse_tensor`. `sparse_tensor` must be a `tf.sparse.SparseTensor`. Got rI   r   r   N)rA   r   ZSparseTensorrS   rD   Z_sparse_tensorindices	get_shapeZdense_shapedims
merge_withr   r   r   r   r   rQ  r   Zsparse_tensor_slice_datasetr   rz   r  rY   )rV   r   Zindices_shapeZshape_shapeZrankrW   )r   r>   r?   rY     s    
z!SparseTensorSliceDataset.__init__c             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    z%SparseTensorSliceDataset.element_spec)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   r    s   r  c                   s6   e Zd ZdZd	 fdd	Zedd Zdd Z  ZS )
r  z;A `Dataset` that generates elements by invoking a function.Nc                s   || _ t|| _tj||  | jd| _tj||  | jjd| _	tj||  | jjd| _
|| _|| _tjt| j| j | jjj | j	jj| j
jjf| jj| j	j| j
jd| j}tt| | dS )a  Constructs a `_GeneratorDataset`.

    Args:
      init_args: A (nested) structure representing the arguments to `init_func`.
      init_func: A TensorFlow function that will be called on `init_args` each
        time a C++ iterator over this dataset is constructed. Returns a (nested)
        structure representing the "state" of the dataset.
      next_func: A TensorFlow function that will be called on the result of
        `init_func` to produce each element, and that raises `OutOfRangeError`
        to terminate iteration.
      finalize_func: A TensorFlow function that will be called on the result of
        `init_func` immediately before a C++ iterator over this dataset is
        destroyed. The return value is ignored.
      output_signature: A (nested) structure of `tf.TypeSpec` objects describing
        the output of `next_func`.
      name: Optional. A name for the tf.data transformation.
    )r  )	init_func	next_funcfinalize_funcN)Z
_init_argsr   r  Z_init_structurer   r  _transformation_nameZ
_init_funcoutput_structureZ
_next_funcZ_finalize_func_output_signaturer   r   Zgenerator_datasetr  r   r  r   rz   r  rY   )rV   Z	init_argsrW  rX  rY  r	  r=   rW   )r   r>   r?   rY     s4    



z_GeneratorDataset.__init__c             C   s   | j S )N)r\  )rV   r>   r>   r?   r   ?  s    z_GeneratorDataset.element_specc             C   s   dS )NzDataset.from_generator()r>   )rV   r>   r>   r?   rZ  C  s    z&_GeneratorDataset._transformation_name)N)	r   r   r   r   rY   r  r   rZ  r   r>   r>   )r   r?   r    s   4r  c                   s6   e Zd ZdZd	 fdd	Zdd Zedd Z  ZS )
r!  z*A `Dataset` that zips its inputs together.Nc                s   xDt |D ]6}t|tst|tr.tdqtdt| dqW || _t | jdd t | jD | _	|| _
tjdd t | jD f| j}tt| | dS )z See `Dataset.zip()` for details.zInvalid `datasets`. `datasets` is expected to be a (nested) structure of `tf.data.Dataset` objects. Python `list` is not supported and you should use `tuple` instead.zInvalid `datasets`. `datasets` is expected to be a (nested) structure of `tf.data.Dataset` objects but encountered object of type rI   c             S   s   g | ]
}|j qS r>   )r   )rd   r   r>   r>   r?   rh   Z  s    z'ZipDataset.__init__.<locals>.<listcomp>c             S   s   g | ]
}|j qS r>   )rZ   )rd   r   r>   r>   r?   rh   ]  s    N)r	   r   rA   rG   r  rS   rD   	_datasetsr  rQ  r   r   Zzip_datasetr   rz   r!  rY   )rV   r"  r=   r   rW   )r   r>   r?   rY   J  s    


zZipDataset.__init__c             C   s   t | jS )N)r	   r   r]  )rV   r>   r>   r?   rP   a  s    zZipDataset._inputsc             C   s   | j S )N)rQ  )rV   r>   r>   r?   r   d  s    zZipDataset.element_spec)N)	r   r   r   r   rY   rP   r  r   r   r>   r>   )r   r?   r!  G  s   r!  c                   s6   e Zd ZdZd	 fdd	Zdd Zedd Z  ZS )
r#  z;A `Dataset` that concatenates its input with given dataset.Nc          
      s   || _ || _dd }yt||j|j| _W n@ ttfk
rl } ztd|j d|j |W dd}~X Y nX ||g| _|| _	t
j|j|jf| j}tt| | dS )z(See `Dataset.concatenate()` for details.c             S   s.   |  |g}|d kr*td|  d| d|S )NzNo common supertype of z and rI   )r:  rS   )r4  r5  resultr>   r>   r?   common_supertypeq  s    z5ConcatenateDataset.__init__.<locals>.common_supertypez!Incompatible dataset elements:
  z vs.   N)r)  Z_dataset_to_concatenater7  r  r   rQ  rS   r;   rz  r   r   Zconcatenate_datasetrZ   r   rz   r#  rY   )rV   rX   Zdataset_to_concatenater=   r_  r  rW   )r   r>   r?   rY   l  s"    

zConcatenateDataset.__init__c             C   s   | j S )N)rz  )rV   r>   r>   r?   rP     s    zConcatenateDataset._inputsc             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    zConcatenateDataset.element_spec)N)	r   r   r   r   rY   rP   r  r   r   r>   r>   )r   r?   r#  i  s   r#  c                   s"   e Zd ZdZd fdd	Z  ZS )rB  z1A `Dataset` that repeats its input several times.Nc                sp   || _ |dkr$tjdtjdd| _ntj|tjdd| _|| _t	j
|jfd| ji| j}tt| || dS )z#See `Dataset.repeat()` for details.Nr4   rC  )r   r=   )r)  r   r  r   r   _countr   r8  r   r   Zrepeat_datasetrZ   r   rz   rB  rY   )rV   rX   rC  r=   rW   )r   r>   r?   rY     s    
zRepeatDataset.__init__)N)r   r   r   r   rY   r   r>   r>   )r   r?   rB    s   rB  c                   s<   e Zd ZdZ fddZdd Zdd Zedd	 Z  Z	S )
r  z0A `Dataset` of a step separated range of values.c                sP   | j || tg | j| _tjf | j| j| j	d| j
}tt| | dS )z"See `Dataset.range()` for details.)r'  stopr(  N)_parse_argsr   r   _output_typerQ  r   rI  _start_stop_stepr   rz   r  rY   )rV   r   r~   rW   )r   r>   r?   rY     s    
zRangeDataset.__init__c             O   s  t |dkr<| dd| _| |d d| _| dd| _nt |dkr|| |d d| _| |d d| _| dd| _nXt |dkr| |d d| _| |d d| _| |d d| _ntdt | d	d
|kr|d
 | _ntj| _d|kr|d nd| _	dS )zEParse arguments according to the same rules as the `range()` builtin.r   r   r'  ra  r(  r  r  zGInvalid `args`. The lenght of `args` should be between 1 and 3 but was rI   output_typer=   N)
r   _build_tensorrd  re  rf  r;   rc  r   r   r   )rV   r   r~   r>   r>   r?   rb    s"    zRangeDataset._parse_argsc             C   s   t j|tj|dS )N)r   r=   )r   r8  r   r   )rV   Zint64_valuer=   r>   r>   r?   rh    s    zRangeDataset._build_tensorc             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    zRangeDataset.element_spec)
r   r   r   r   rY   rb  rh  r  r   r   r>   r>   )r   r?   r    s
   r  c                   s"   e Zd ZdZd fdd	Z  ZS )rM  z.A `Dataset` that caches elements of its input.Nc                s   || _ tj|tjdd| _|| _t r\t	
 s8t r\tj|jf| jt d| j}ntj|jfd| ji| j}tt| || dS )z"See `Dataset.cache()` for details.rN  )r   r=   )rN  rO  N)r)  r   r8  r   ro   	_filenamer   r   r   r   r   r   r   Zcache_dataset_v2rZ   Zdummy_memory_cacher   Zcache_datasetrz   rM  rY   )rV   rX   rN  r=   rW   )r   r>   r?   rY     s    

zCacheDataset.__init__)N)r   r   r   r   rY   r   r>   r>   )r   r?   rM    s   rM  c                   s"   e Zd ZdZd fdd	Z  ZS )rJ  z=A `Dataset` that randomly shuffles the elements of its input.Nc                s   || _ tj|tjdd| _t|\| _| _	|dkr8d}|| _
|| _t rt s\t rtj|jf| j| j| j	t | j
d| j}n(tj|jf| j| j| j	| j
d| j}tt| || dS )z$See `Dataset.shuffle()` for details.r.  )r   r=   NT)r.  r6  seed2Zseed_generatorrK  )r.  r6  rj  rK  )r)  r   r8  r   r   _buffer_sizer
   r  _seed_seed2Z_reshuffle_each_iterationr   r   r   r   r   r   r   Zshuffle_dataset_v3rZ   Zdummy_seed_generatorr   Zshuffle_datasetrz   rJ  rY   )rV   rX   r.  r6  rK  r=   rW   )r   r>   r?   rY     s4    
zShuffleDataset.__init__)NNN)r   r   r   r   rY   r   r>   r>   )r   r?   rJ    s     rJ  c                   s"   e Zd ZdZd fdd	Z  ZS )rP  zAA `Dataset` containing the first `count` elements from its input.Nc                sR   || _ tj|tjdd| _|| _tj|j	fd| ji| j
}tt| || dS )z!See `Dataset.take()` for details.rC  )r   r=   N)r)  r   r8  r   r   r`  r   r   Ztake_datasetrZ   r   rz   rP  rY   )rV   rX   rC  r=   rW   )r   r>   r?   rY     s    
zTakeDataset.__init__)N)r   r   r   r   rY   r   r>   r>   )r   r?   rP    s   rP  c                   s"   e Zd ZdZd fdd	Z  ZS )rR  z?A `Dataset` skipping the first `count` elements from its input.Nc                sR   || _ tj|tjdd| _|| _tj|j	fd| ji| j
}tt| || dS )z!See `Dataset.skip()` for details.rC  )r   r=   N)r)  r   r8  r   r   r`  r   r   Zskip_datasetrZ   r   rz   rR  rY   )rV   rX   rC  r=   rW   )r   r>   r?   rY   #  s    
zSkipDataset.__init__)N)r   r   r   r   rY   r   r>   r>   )r   r?   rR     s   rR  c                   s"   e Zd ZdZd fdd	Z  ZS )rT  z#A `Dataset` for sharding its input.Nc                sj   || _ tj|tjdd| _tj|tjdd| _|| _tj	|j
f| j| jd| j}tt| || dS )z"See `Dataset.shard()` for details.rU  )r   r=   rV  )rU  rV  N)r)  r   r8  r   r   Z_num_shards_indexr   r   Zshard_datasetrZ   r   rz   rT  rY   )rV   rX   rU  rV  r=   rW   )r   r>   r?   rY   2  s    
zShardDataset.__init__)N)r   r   r   r   rY   r   r>   r>   )r   r?   rT  /  s   rT  c                   s.   e Zd ZdZd fdd	Zedd Z  ZS )r`  z<A `Dataset` that batches contiguous elements from its input.Nc                s   || _ tj|tjdd| _tj|tjdd| _t	| j}|rdt	| j t
 fdd|j| _nt
dd |j| _|| _tj|jf| j| jd| j}tt| || dS )	z"See `Dataset.batch()` for details.r+  )r   r=   r,  c                s
   |   S )N)rD  )r   )constant_batch_sizer>   r?   r   T  r   z'BatchDataset.__init__.<locals>.<lambda>c             S   s
   |  d S )N)rD  )r   r>   r>   r?   r   X  r   )r+  r,  N)r)  r   r8  r   r   _batch_sizebool_drop_remainderr   r   r	   r  r   rQ  r   r   Zbatch_dataset_v2rZ   r   rz   r`  rY   )rV   rX   r+  r,  r=   constant_drop_remainderrW   )r   )ro  r?   rY   D  s*    


zBatchDataset.__init__c             C   s   | j S )N)rQ  )rV   r>   r>   r?   r   c  s    zBatchDataset.element_spec)N)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   r`  A  s   r`  c                   s.   e Zd ZdZd fdd	Zedd Z  ZS )ra  zHA `Dataset` that batches contiguous elements from its input in parallel.Nc       	         s   || _ tj|tjdd| _tj|tjdd| _tj|tjdd| _|dkrRd| _	n|r^d| _	nd| _	t
| j}|rt
| j t fd	d
|j| _ntdd
 |j| _|| _tj|jf| j| j| j| j	d| j}tt| || dS )z"See `Dataset.batch()` for details.r+  )r   r=   r,  rb  Ndefaulttruefalsec                s
   |   S )N)rD  )r   )ro  r>   r?   r     r   z/ParallelBatchDataset.__init__.<locals>.<lambda>c             S   s
   |  d S )N)rD  )r   r>   r>   r?   r     r   )r+  rb  r,  rc  )r)  r   r8  r   r   rp  rq  rr  _num_parallel_calls_deterministicr   r   r	   r  r   rQ  r   r   Zparallel_batch_datasetrZ   r   rz   ra  rY   )	rV   rX   r+  r,  rb  rc  r=   rs  rW   )r   )ro  r?   rY   k  s<    


zParallelBatchDataset.__init__c             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    z!ParallelBatchDataset.element_spec)N)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   ra  h  s   *ra  c             C   st   | j dks|j dkrdS t| j t|j kr0dS x>t| j |j D ],\}}|jdk	r@|jdk	r@|j|jk r@dS q@W dS )a  Returns `True` if `input_component_shape` can be padded to `padded_shape`.

  Args:
    padded_shape: A `tf.TensorShape`.
    input_component_shape: A `tf.TensorShape`.

  Returns:
    `True` if `input_component_shape` can be padded to `padded_shape`, otherwise
    `False`.
  NTF)rU  r   r   rE   )padded_shapeinput_component_shapeZ
padded_dimZ	input_dimr>   r>   r?    _is_padded_shape_compatible_with  s    r{  c          
   C   s   y,t | }tjdd | D tjd}W n ttfk
r } z|tj| tjd}|j	j
dk	rt|j	j
dkrtd|  d|j	 d	||jtjkrtd|  d
|jj d	|t|}W dd}~X Y nX t||std| d| d|S )aS  Converts `padded_shape` to a `tf.Tensor` representing that shape.

  Args:
    padded_shape: A shape-like object, which may be a `tf.TensorShape`, a Python
      sequence, or a 1-D `tf.Tensor` of `tf.int64` elements.
    input_component_shape: A `tf.TensorShape`, with which `padded_shape` must
      be compatible.

  Returns:
    A 1-D `tf.Tensor` of `tf.int64` elements, representing `padded_shape`.

  Raises:
    ValueError: If `padded_shape` is not a shape or not compatible with
      `input_component_shape`.
    TypeError: If `padded_shape` is not convertible to a `tf.int64` tensor.
  c             S   s   g | ]}|d k	r|ndqS )Nr4   r>   )rd   dimr>   r>   r?   rh     s   z+_padded_shape_to_tensor.<locals>.<listcomp>)r   )Zpreferred_dtypeNr   zPadded shape z7 must be a `tf.int64` vector tensor, but its shape was rI   z> must be a `tf.int64` vector tensor, but its element type was zThe padded shape z" is not compatible with the shape z& of the corresponding input component.)r   r  r   r8  r  r   r   rS   r;   r   rU  r   r   r=   r   constant_value_as_shaper{  )ry  rz  Zpadded_shape_as_shaper   r  r>   r>   r?   _padded_shape_to_tensor  s(    
r~  c             C   sX   t j| dd} | jtg s2td| j d| j|krTtd| j d| d| S )a#  Converts the padding value to a tensor.

  Args:
    value: The padding value.
    output_type: Its expected dtype.

  Returns:
    A scalar `Tensor`.

  Raises:
    ValueError: if the padding value is not a scalar.
    TypeError: if the padding value's type does not match `output_type`.
  Zpadding_value)r=   zMInvalid `padding_values`. `padding_values` values should be scalars, but got rI   z7Invalid `padding_values`. `padding_values` values type z does not match type z& of the corresponding input component.)	r   r8  r   r   r   r   r;   r   rS   )rE   rg  r>   r>   r?   _padding_value_to_tensor  s    
r  c             C   s0   dd }dd }t |t|}t | || |S )zGReturns padding values with None elements replaced with default values.c             S   sT   | j tjkrdS | j tjkr&tdn*| j tjkrBtjdtjdS t	| 
 S d S )NrL  zIUnable to create default padding value for a component of type 'variant'.r   )r   )Z
base_dtyper   ro   r   rS   Zbfloat16r   r  r   Z
zeros_liker   )tr>   r>   r?   	make_zero  s    
z-_padding_values_or_default.<locals>.make_zeroc             S   s   | d kr|S | S )Nr>   )rE   rt  r>   r>   r?   value_or_default  s    z4_padding_values_or_default.<locals>.value_or_default)r	   r  r  r  )rh  rX   r  r  Zdefault_paddingr>   r>   r?   _padding_values_or_default  s    
r  c                   s.   e Zd ZdZd fdd	Zedd Z  ZS )rf  zEA `Dataset` that batches and pads contiguous elements from its input.Nc          	      s~  |_ dd }t||j |_ tj|tjdd_t	 | t
|}t||}	g }
x,tt||	D ]\}}|
t|| qjW t||
_t|rt st fdd| t|t t|_tj|tjdd_fdd	}t|j}ttj |tj _|_tj|j jd
d tjD tjjt!jj"# d}t$t%&|| dS )z"See `Dataset.batch()` for details.c             S   s    t | tjstd|  dd S )Nzq`padded_batch` is only supported for datasets that produce tensor elements but the input dataset spec contains: `z`.)rA   r   r   rS   )r   r>   r>   r?   check_types(  s    z0PaddedBatchDataset.__init__.<locals>.check_typesr+  )r   r=   c                s    S )Nr>   )r[   )rh  r>   r?   r   D  r   z-PaddedBatchDataset.__init__.<locals>.<lambda>r,  c                s0   t t jrt jnd gt	| S )N)
r   r   r   Zsmart_constant_valuerr  r   r   rp  r$  r}  )s)rV   r>   r?   _padded_shape_to_batch_shapeM  s     zAPaddedBatchDataset.__init__.<locals>._padded_shape_to_batch_shapec             S   s   g | ]}t j|tjd qS ))r   )r   r8  r   r   )rd   r  r>   r>   r?   rh   _  s   z/PaddedBatchDataset.__init__.<locals>.<listcomp>)r+  rg  rh  r,  r   r   N)'r)  r	   r  r   r   r8  r   r   rp  r  re  r   r   r   r   r~  r  Z_padded_shapesZ	is_nestedr  r  r  Z_padding_valuesrq  rr  r   r  r  rQ  r   r   Zpadded_batch_dataset_v2rZ   r   r   rn   rz   rf  rY   )rV   rX   r+  rg  rh  r,  r=   r  r  Zflat_padded_shapesZflat_padded_shapes_as_tensorsrz  ry  r  r   rW   )r   )rh  rV   r?   rY     sR    




zPaddedBatchDataset.__init__c             C   s   | j S )N)rQ  )rV   r>   r>   r?   r   h  s    zPaddedBatchDataset.element_spec)N)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   rf    s   Drf  c                   s>   e Zd ZdZd fdd	Zdd Zed	d
 Zdd Z  Z	S )rk  z<A `Dataset` that maps a function over elements in its input.TFNc                st   || _ || _|| _tj||  ||d| _|| _tj	|j
| jjjf| jj| j| jd| j}tt| || dS )z See `Dataset.map()` for details.)r   r  )r  r  rj  N)r)  _use_inter_op_parallelism_preserve_cardinalityr   r  rZ  	_map_funcr   r   Zmap_datasetrZ   r   r  r   rz   rk  rY   )rV   rX   rm  r  rj  r  r=   rW   )r   r>   r?   rY   p  s"    


zMapDataset.__init__c             C   s   | j gS )N)r  )rV   r>   r>   r?   r     s    zMapDataset._functionsc             C   s   | j jS )N)r  r[  )rV   r>   r>   r?   r     s    zMapDataset.element_specc             C   s   dS )NzDataset.map()r>   )rV   r>   r>   r?   rZ    s    zMapDataset._transformation_name)TFFN)
r   r   r   r   rY   r   r  r   rZ  r   r>   r>   )r   r?   rk  m  s      rk  c                   s>   e Zd ZdZd fdd	Zdd Zed	d
 Zdd Z  Z	S )rl  zHA `Dataset` that maps a function over elements in its input in parallel.TFNc	       
         s   || _ || _tj||  ||d| _|dkr4d| _n|r@d| _nd| _|| _tj	|t
jdd| _|| _tj|j| jjjf| jj| j| j| j| jd| j}	tt| ||	 dS )	z See `Dataset.map()` for details.)r   r  Nrt  ru  rv  rb  )r   r=   )r  rb  rc  r  rj  )r)  r  r   r  rZ  r  rx  r  r   r8  r   r   rw  r   r   Zparallel_map_dataset_v2rZ   r   r  r   rz   rl  rY   )
rV   rX   rm  rb  rc  r  rj  r  r=   rW   )r   r>   r?   rY     s4    



zParallelMapDataset.__init__c             C   s   | j gS )N)r  )rV   r>   r>   r?   r     s    zParallelMapDataset._functionsc             C   s   | j jS )N)r  r[  )rV   r>   r>   r?   r     s    zParallelMapDataset.element_specc             C   s   dS )NzDataset.map()r>   )rV   r>   r>   r?   rZ    s    z'ParallelMapDataset._transformation_name)TFFN)
r   r   r   r   rY   r   r  r   rZ  r   r>   r>   )r   r?   rl    s      rl  c                   s>   e Zd ZdZd fdd	Zdd Zedd Zd	d
 Z  Z	S )ro  zHA `Dataset` that maps a function over its input and flattens the result.Nc                s   || _ tj||  |d| _t| jjtsBtdt	| jjd| jjj
| _|| _tj|j| jjjfd| jji| j}tt| || dS )z%See `Dataset.flat_map()` for details.)r   z<The `map_func` argument must return a `Dataset` object. Got rI   r  N)r)  r   r  rZ  r  rA   r[  r   rS   rF   r,  rQ  r   r   Zflat_map_datasetrZ   r   r  r   rz   ro  rY   )rV   rX   rm  r=   rW   )r   r>   r?   rY     s    
zFlatMapDataset.__init__c             C   s   | j gS )N)r  )rV   r>   r>   r?   r     s    zFlatMapDataset._functionsc             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    zFlatMapDataset.element_specc             C   s   dS )NzDataset.flat_map()r>   )rV   r>   r>   r?   rZ    s    z#FlatMapDataset._transformation_name)N)
r   r   r   r   rY   r   r  r   rZ  r   r>   r>   )r   r?   ro    s
   ro  c                   s>   e Zd ZdZd fdd	Zdd Zedd Zd	d
 Z  Z	S )rr  z>A `Dataset` that interleaves the result of transformed inputs.Nc                s   || _ tj||  |d| _t| jjtsBtdt	| jjd| jjj
| _tj|tjdd| _tj|tjdd| _|| _tj|j| jjj| j| jfd| jji| j}tt| || dS )	z'See `Dataset.interleave()` for details.)r   z<The `map_func` argument must return a `Dataset` object. Got rI   rt  )r   r=   ru  r  N)r)  r   r  rZ  r  rA   r[  r   rS   rF   r,  rQ  r   r8  r   r   _cycle_length_block_lengthr   r   Zinterleave_datasetrZ   r   r  r   rz   rr  rY   )rV   rX   rm  rt  ru  r=   rW   )r   r>   r?   rY     s(    
zInterleaveDataset.__init__c             C   s   | j gS )N)r  )rV   r>   r>   r?   r   
  s    zInterleaveDataset._functionsc             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    zInterleaveDataset.element_specc             C   s   dS )NzDataset.interleave()r>   )rV   r>   r>   r?   rZ    s    z&InterleaveDataset._transformation_name)N)
r   r   r   r   rY   r   r  r   rZ  r   r>   r>   )r   r?   rr    s
   rr  c                   sF   e Zd ZdZeeddf fdd	Zdd Zedd Zd	d
 Z	  Z
S )rs  zKA `Dataset` that maps a function over its input and interleaves the result.Nc
                s&  || _ tj||  |d| _t| jjtsBtdt	| jjd| jjj
| _tj|tjdd| _tj|tjdd| _tj|tjdd| _tj|tjdd| _tj|tjd	d| _|d
krd}
n|rd}
nd}
|	| _tj|j| jjj| j| j| j| j| jf| jj|
d| j}tt| || d
S )z'See `Dataset.interleave()` for details.)r   z<The `map_func` argument must return a `Dataset` object. Got rI   rt  )r   r=   ru  buffer_output_elementsprefetch_input_elementsrb  Nrt  ru  rv  )r  rc  )r)  r   r  rZ  r  rA   r[  r   rS   rF   r,  rQ  r   r8  r   r   r  r  Z_buffer_output_elementsZ_prefetch_input_elementsrw  r   r   Zparallel_interleave_dataset_v4rZ   r   r  r   rz   rs  rY   )rV   rX   rm  rt  ru  rb  r  r  rc  r=   Zdeterministic_stringrW   )r   r>   r?   rY     sP    


z"ParallelInterleaveDataset.__init__c             C   s   | j gS )N)r  )rV   r>   r>   r?   r   P  s    z$ParallelInterleaveDataset._functionsc             C   s   | j S )N)rQ  )rV   r>   r>   r?   r   S  s    z&ParallelInterleaveDataset.element_specc             C   s   dS )NzDataset.interleave()r>   )rV   r>   r>   r?   rZ  W  s    z.ParallelInterleaveDataset._transformation_name)r   r   r   r   r5   rY   r   r  r   rZ  r   r>   r>   )r   r?   rs    s   /rs  c                   s"   e Zd ZdZd fdd	Z  ZS )r-  z5A `Dataset` that asynchronously prefetches its input.Nc          	      sx   || _ |dkrt}tj|tjdd| _|| _t|j	$ t
j|j	f| j|d| j}W dQ R X tt| || dS )z%See `Dataset.prefetch()` for details.Nr.  )r   r=   )r.  slack_period)r)  r5   r   r8  r   r   rk  r   r   rZ   r   Zprefetch_datasetr   rz   r-  rY   )rV   rX   r.  r  r=   rW   )r   r>   r?   rY   ^  s    zPrefetchDataset.__init__)NN)r   r   r   r   rY   r   r>   r>   )r   r?   r-  [  s   r-  c                   s.   e Zd ZdZd fdd	Zedd Z  ZS )r}  z?A dataset that creates window datasets from the input elements.Nc                s   || _ tj|tjdd| _tj|tjdd| _tj|tjdd| _tj|tjdd| _	t
t|dd tt
t|t
t|t
t|D | _|| _tj|jf| j| j| j| j	d| j}tt| || d	S )
z See `window()` for more details.r~  )r   r=   r  r  r,  c             S   s$   g | ]\}}}t t|||qS r>   )r   r   r  )rd   Zoutput_classZoutput_shaperg  r>   r>   r?   rh     s   z*WindowDataset.__init__.<locals>.<listcomp>)r~  r  r  r,  N)r)  r   r8  r   r   _sizeZ_shiftZ_striderq  rr  r	   r  r  r   r   re  r  rQ  r   r   Zwindow_datasetrZ   r   rz   r}  rY   )rV   rX   r~  r  r  r,  r=   rW   )r   r>   r?   rY   u  s.    
zWindowDataset.__init__c             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    zWindowDataset.element_spec)N)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   r}  r  s   r}  c                   s"   e Zd ZdZd fdd	Z  ZS )r   z*An identity `Dataset` that stores options.Nc          	      s   || _ t }||  || _t|j t	j
|j| f| j}W d Q R X tt| || | jr| jd | j|| _n|| _| jd d S )NTF)r)  r   rN   ZCopyFromZ	_to_protor   r   r   rZ   r   Zoptions_datasetrn   r   rz   r   rY   rO   rU   rT   )rV   rX   r   r=   Z
options_pbrW   )r   r>   r?   rY     s    z_OptionsDataset.__init__)N)r   r   r   r   rY   r   r>   r>   )r   r?   r     s   r   c                s<   t  jr fdd}n fdd} |}t| jS )a~  Normalizes non-tensor components in a dataset to dense representations.

  This is necessary for dataset transformations that slice along the batch
  dimension and are oblivious to non-tensors, e.g. `unbatch`, `rebatch`.

  Args:
    dataset: Dataset to normalize.

  Returns:
    A dataset whose sparse and ragged tensors have been normalized to their
    dense representations.
  c                 s   t  jt| S )N)r   to_batched_tensor_listr   r  )r   )r   r>   r?   	normalize  s    z%normalize_to_dense.<locals>.normalizec                s   t  j| S )N)r   r  r   )arg)r   r>   r?   r    s    )r   Z_should_unpackr   rn  _RestructuredDataset)r   r  r  r>   )r   r?   r    s
    
r  c                   s,   e Zd ZdZ fddZedd Z  ZS )r  z>An internal helper for changing the element spec of a dataset.c                s*   || _ || _| j j}tt| || d S )N)r)  r,  rZ   rz   r  rY   )rV   r   r   rW   )r   r>   r?   rY     s    z_RestructuredDataset.__init__c             C   s   | j S )N)r,  )rV   r>   r>   r?   r     s    z!_RestructuredDataset.element_spec)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   r    s   r  c                   s.   e Zd ZdZd fdd	Zedd Z  ZS )r  zGA dataset that splits the elements of its input into multiple elements.Nc          
      s   |j }tdd |D r tdtd}xN|D ]F}y||d }W q0 tk
rt   td| d|d  dY q0X q0W || _td	d
 t	|| _
|| _tj| jjf| j}tt| || dS )z!See `unbatch()` for more details.c             s   s   | ]}|j d kV  qdS )r   N)r  )rd   r  r>   r>   r?   r     s    z+_UnbatchDataset.__init__.<locals>.<genexpr>z/Cannot unbatch an input with scalar components.Nr   z|`unbatch()` is only supported for datasets of elements whose components have a matching leading dimension. Encountered both z and rI   c             S   s   |   S )N)rE  )r   r>   r>   r?   r     r   z*_UnbatchDataset.__init__.<locals>.<lambda>)r   anyr;   r   Z	DimensionrV  r)  r	   r  r&  rQ  r   ged_opsZunbatch_datasetrZ   r   rz   r  rY   )rV   rX   r=   Zflat_shapesZknown_batch_dimr  rW   )r   r>   r?   rY     s$    

$z_UnbatchDataset.__init__c             C   s   | j S )N)rQ  )rV   r>   r>   r?   r     s    z_UnbatchDataset.element_spec)N)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   r    s   r  c                   sV   e Zd ZdZd fdd	Zdd Zdd Zd	d
 Zedd Z	dd Z
dd Z  ZS )r  zDA `Dataset` that groups its input and performs a windowed reduction.Nc                s   || _ | || | || | | || _tj| j j| jj	j
| jj	j
| jj	j
f| jj	| jj	| jj	d| j}tt| || dS )z$See `group_by_window()` for details.)r  r  r  N)r)  _make_key_func_make_reduce_func_make_window_size_funcr   r  Zgroup_by_window_datasetrZ   	_key_funcr   r  _reduce_func_window_size_funcr   rz   r  rY   )rV   rX   r  r  r  r=   rW   )r   r>   r?   rY     s    



z_GroupByWindowDataset.__init__c                s\    fdd}t j||  tg tjd| _| jj	tg tjsXt
d| jj ddS )z)Make wrapping defun for window_size_func.c                s   t j | tjdS )N)r   )r   r8  r   r   )r   )r  r>   r?   window_size_func_wrapper!  s    zN_GroupByWindowDataset._make_window_size_func.<locals>.window_size_func_wrapper)r  ztInvalid `window_size_func`. `window_size_func` must return a single `tf.int64` scalar tensor but its return type is rI   N)r   r  rZ  r   r   r   r   r  r[  r   r;   )rV   r  r  r>   )r  r?   r    s    z,_GroupByWindowDataset._make_window_size_funcc                sR    fdd}t j||  |d| _| jjtg tj	sNt
d| jj ddS )z!Make wrapping defun for key_func.c                 s   t j |  tjdS )N)r   )r   r8  r   r   )r   )r  r>   r?   key_func_wrapper2  s    z>_GroupByWindowDataset._make_key_func.<locals>.key_func_wrapper)r   zdInvalid `key_func`. `key_func` must return a single `tf.int64` scalar tensor but its return type is rI   N)r   r  rZ  r  r[  r   r   r   r   r   r;   )rV   r  rX   r  r>   )r  r?   r  /  s    z$_GroupByWindowDataset._make_key_funcc             C   sd   t |j}tg tj|f}tj||  |d| _	t
| j	jt sTtd| j	j d| j	jj| _dS )z$Make wrapping defun for reduce_func.)r  zjInvalid `reduce_func`. `reduce_func` must return a single `tf.data.Dataset` object but its return type is rI   N)r   r   r   r   r   r   r   r  rZ  r  rA   r[  rS   r,  )rV   r  rX   Znested_datasetr  r>   r>   r?   r  =  s    

z'_GroupByWindowDataset._make_reduce_funcc             C   s   | j S )N)r,  )rV   r>   r>   r?   r   L  s    z"_GroupByWindowDataset.element_specc             C   s   | j | j| jgS )N)r  r  r  )rV   r>   r>   r?   r   P  s    z _GroupByWindowDataset._functionsc             C   s   dS )NzDataset.group_by_window()r>   )rV   r>   r>   r?   rZ  S  s    z*_GroupByWindowDataset._transformation_name)N)r   r   r   r   rY   r  r  r  r  r   r   rZ  r   r>   r>   )r   r?   r    s   r  c                   s.   e Zd ZdZd fdd	Zedd Z  ZS )r  z#A `Dataset` of pseudorandom values.Nc                sH   t |\| _| _|| _tjf | j| jd| j}tt	| 
| dS )z#A `Dataset` of pseudorandom values.)r6  rj  N)r
   r  rl  rm  r   r  Zrandom_datasetr   rz   r  rY   )rV   r6  r=   rW   )r   r>   r?   rY   Z  s
    zRandomDataset.__init__c             C   s   t g tjS )N)r   r   r   r   )rV   r>   r>   r?   r   b  s    zRandomDataset.element_spec)NN)r   r   r   r   rY   r  r   r   r>   r>   )r   r?   r  W  s   r  c             C   s:   t | }t |}|dks$|dkr(dS t|| S dS )a  Returns the static probability of sampling from the original.

  `tensor_util.constant_value(prob_of_original)` returns `None` if it encounters
  an Op that it isn't defined for. We have some custom logic to avoid this.

  Args:
    initial_dist_t: A tensor of the initial distribution.
    target_dist_t: A tensor of the target distribution.

  Returns:
    The probability of sampling from the original distribution as a constant,
    if it is a constant, or `None`.
  N)r   r   r   r  )r  r  Zinit_staticZtarget_staticr>   r>   r?   r  g  s
    

r  c                sx   dd }t j||f|dj||d} fdd}t j|| f|dj||d}fdd}	|j|	|d}
|
jdd	 |dS )
a[  Filters a dataset based on per-class acceptance probabilities.

  Args:
    dataset: The dataset to be filtered.
    acceptance_dist_ds: A dataset of acceptance probabilities.
    initial_dist_ds: A dataset of the initial probability distribution, given or
      estimated.
    class_func: A function mapping an element of the input dataset to a scalar
      `tf.int32` tensor. Values should be in `[0, num_classes)`.
    seed: (Optional.) Python integer seed for the resampler.
    name: (Optional.) A name for the tf.data operation.

  Returns:
    A dataset of (class value, data) after filtering.
  c                s<   t d   tt d fdd fddS )Nr   g      ?c                  s    S )Nr>   r>   )accept_distr>   r?   r     r   zC_filter_ds.<locals>.maybe_warn_on_large_rejection.<locals>.<lambda>c                  s   t j  gddddS )Nz4Proportion of examples rejected by sampler is high: d   
   )r3  r5  Zfirst_n)r"   ZPrintr>   )r  r  proportion_rejectedr>   r?   r     s   
)r#   
reduce_sumr   Zcondr  )r  r  r>   )r  r  r  r?   maybe_warn_on_large_rejection  s
    

z1_filter_ds.<locals>.maybe_warn_on_large_rejection)r=   c                s.   t |tr | }n |}|t| ||fS )N)rA   r  r   gather)Zacceptance_probdataZ	class_val)r  r>   r?   _gather_and_copy  s    

z$_filter_ds.<locals>._gather_and_copyc                s   t jg  |jd|k S )N)r6  r   )r$   Zrandom_uniformr   )Zunused_class_valr  unused_data)r6  r>   r?   _reject  s    z_filter_ds.<locals>._rejectc             S   s   | |fS )Nr>   )Zclass_valuer[   r  r>   r>   r?   r     r   z_filter_ds.<locals>.<lambda>)rG   r   rn  rx  )r   r  r  r  r6  r=   r  r  Z+current_probabilities_and_class_and_data_dsr  r  r>   )r  r6  r?   r  ~  s    


r      r  c       	         s^   | j d pt | d }t|gt|} fdd}|j |dj|||dj|d}|S )Nr   c                s.   t || \}}tt|d dg}||fS )Nr   r   )_estimate_data_distributionr   Ztiler   )num_examples_per_class_seencZupdated_examples_per_class_seendistZ
tiled_dist)dist_estimation_batch_sizer>   r?   update_estimate_and_tile  s
    z;_estimate_initial_dist_ds.<locals>.update_estimate_and_tile)r=   )r   r   fillr   r   rd  r  r  )	r  Zclass_values_dsr  Zsmoothing_constantr=   num_classesZinitial_examples_per_class_seenr  r  r>   )r  r?   r    s    r  c             C   s   | t | jjj }|| S )N)r   Zfinfor   r   Ztiny)initial_probstarget_probsdenomr>   r>   r?   _get_target_to_initial_ratio  s    r  c          
   C   sV   |  d }t|ttj| |tjdd}t|t|}t	|tj
}||fS )a  Estimate data distribution as labels are seen.

  Args:
    c: The class labels.  Type `int32`, shape `[batch_size]`.
    num_examples_per_class_seen: Type `int64`, shape `[num_classes]`, containing
      counts.

  Returns:
    num_examples_per_lass_seen: Updated counts.  Type `int64`, shape
      `[num_classes]`.
    dist: The updated distribution.  Type `float32`, shape `[num_classes]`.
  r   )r   )rT  r#   r:  r  r   Zone_hotr   r   truedivr  r  )r  r  r  Zinit_prob_estimater  r>   r>   r?   r    s    r  c             C   s:   t | |}t|}t|}|}|| ||  }||fS )a  Calculates the acceptance probabilities and mixing ratio.

  In this case, we assume that we can *either* sample from the original data
  distribution with probability `m`, or sample from a reshaped distribution
  that comes from rejection sampling on the original distribution. This
  rejection sampling is done on a per-class basis, with `a_i` representing the
  probability of accepting data from class `i`.

  This method is based on solving the following analysis for the reshaped
  distribution:

  Let F be the probability of a rejection (on any example).
  Let p_i be the proportion of examples in the data in class i (init_probs)
  Let a_i is the rate the rejection sampler should *accept* class i
  Let t_i is the target proportion in the minibatches for class i (target_probs)

  ```
  F = sum_i(p_i * (1-a_i))
    = 1 - sum_i(p_i * a_i)     using sum_i(p_i) = 1
  ```

  An example with class `i` will be accepted if `k` rejections occur, then an
  example with class `i` is seen by the rejector, and it is accepted. This can
  be written as follows:

  ```
  t_i = sum_k=0^inf(F^k * p_i * a_i)
      = p_i * a_j / (1 - F)    using geometric series identity, since 0 <= F < 1
      = p_i * a_i / sum_j(p_j * a_j)        using F from above
  ```

  Note that the following constraints hold:
  ```
  0 <= p_i <= 1, sum_i(p_i) = 1
  0 <= a_i <= 1
  0 <= t_i <= 1, sum_i(t_i) = 1
  ```

  A solution for a_i in terms of the other variables is the following:
    ```a_i = (t_i / p_i) / max_i[t_i / p_i]```

  If we try to minimize the amount of data rejected, we get the following:

  M_max = max_i [ t_i / p_i ]
  M_min = min_i [ t_i / p_i ]

  The desired probability of accepting data if it comes from class `i`:

  a_i = (t_i/p_i - m) / (M_max - m)

  The desired probability of pulling a data element from the original dataset,
  rather than the filtered one:

  m = M_min

  Args:
    initial_probs: A Tensor of the initial probability distribution, given or
      estimated.
    target_probs: A Tensor of the corresponding classes.

  Returns:
    (A 1D Tensor with the per-class acceptance probabilities, the desired
    probability of pull from the original distribution.)
  )r  r#   Z
reduce_maxr  )r  r  Zratio_lZ	max_ratioZ	min_ratiomZa_ir>   r>   r?   r    s    A


r  c                   s2   e Zd ZdZd	 fdd	Zdd Zdd Z  ZS )
r  z>A dataset that stops iteration when `predicate` returns false.Nc                s   || _ tj||  | j d}|jtg tj	sDt
d|j d|| _|| _tj| j jf| jjj| jjd| j}tt| || dS )zSee `take_while()` for details.)r   z]Invalid `predicate`. `predicate` must return a `tf.bool` scalar tensor but its return type isrI   )Zother_argumentsry  N)r)  r   r  rZ  r[  r   r   r   r   rq  r;   
_predicater   r  Ztake_while_datasetrZ   r   r  r   rz   r  rY   )rV   rX   ry  r=   r  rW   )r   r>   r?   rY   @  s    

z_TakeWhileDataset.__init__c             C   s   | j gS )N)r  )rV   r>   r>   r?   r   V  s    z_TakeWhileDataset._functionsc             C   s   dS )NzDataset.take_while()r>   )rV   r>   r>   r?   rZ  Y  s    z&_TakeWhileDataset._transformation_name)N)r   r   r   r   rY   r   rZ  r   r>   r>   )r   r?   r  =  s   r  c                   s"   e Zd ZdZd fdd	Z  ZS )r  z8A `Dataset` contains the unique elements from its input.Nc                sr   || _ x:tt|D ](}|tjtjtjfkrtd| dqW || _	t
j| j jf| j}tt| || dS )zSee `unique()` for details.z!`unique()` does not support type z=, only `tf.int32`, `tf.int64`, and `tf.string` are supported.N)r)  r	   r   r  r   r  r   ro   rS   r   r  Zunique_datasetrZ   r   rz   r  rY   )rV   rX   r=   tyrW   )r   r>   r?   rY   `  s    z_UniqueDataset.__init__)N)r   r   r   r   rY   r   r>   r>   )r   r?   r  ]  s   r  c                   s2   e Zd ZdZd
 fdd	Zdd Zdd	 Z  ZS )r  zBA dataset that allows saving and re-use of already processed data.NFc	       
         s   |d krdd }|| _ || _|| _tj||  d tt|j|d| _tj||  d ||d| _	| j	j
tg tjs| j	j
tg tjstd| j	j
 d|| _tj|j|| jjj| j	jjf|| jj| j	jd	| j}	tt| ||	 d S )
Nc             S   s   | j dd t tdS )Nc             S   s   | S )Nr>   )r   r>   r>   r?   r   ~  r   z=_SnapshotDataset.__init__.<locals>.<lambda>.<locals>.<lambda>)rt  rb  )rv  r  r  r5   )r"  r>   r>   r?   r   }  s   z+_SnapshotDataset.__init__.<locals>.<lambda>z.reader_func)r  r  z.shard_func)r   r  z_Invalid `shard_func`. `shard_func` must return `tf.int64` scalar tensor but its return type is rI   )r[  r^  r\  )r)  _path_compressionr   r  rZ  r   r   _reader_func_shard_funcr[  r   r   r   r   r  r   rS   r   r  Zsnapshot_dataset_v2rZ   r   r  r   rz   r  rY   )
rV   rX   rZ  r\  r[  r^  Zpending_snapshot_expiry_secondsr  r=   rW   )r   r>   r?   rY   r  s>    







z_SnapshotDataset.__init__c             C   s   | j | jgS )N)r  r  )rV   r>   r>   r?   r     s    z_SnapshotDataset._functionsc             C   s   dS )NzDataset.snapshot()r>   )rV   r>   r>   r?   rZ    s    z%_SnapshotDataset._transformation_name)NNNFN)r   r   r   r   rY   r   rZ  r   r>   r>   )r   r?   r  o  s       ,r  c                   s>   e Zd ZdZd fdd	Zdd Zedd Zd	d
 Z  Z	S )r  z1A dataset that scans a function across its input.Nc                s  || _ t|| _t| j| _d}x|rtj||  | j|j	fdd}t
|jtjrht|jdksztd|j d|j\}| _|j\}}	tdd | j}
xBtt|t|
D ](\}}t||std	|
 d
| dqW |j\}}tdd | j}xDtt|t|D ]*\}}||krtd| d
| dqW |j\}}tdd | j}t|||	| _t|}t|}dd t||D }d}xHt||D ]:\}}|jdk	r|jdks| | krd}P qW |r(t|t|||
| _q(W || _| jj t!"  || _#|dk	rnt$j%| j j&t'| j| j| jjj(f| jjd|d| j)}n8t$j%| j j&t'| j| j| jjj(f| jjdd| j)}t*t+| ,|| dS )zSee `scan()` for details.TF)r  r   r  zzInvalid `scan_func`. `scan_func` should return a pair consisting of new state and the output value but its return type is rI   c             S   s   |   S )N)r  )r   r>   r>   r?   r     r   z'_ScanDataset.__init__.<locals>.<lambda>zbInvalid `scan_func`. The element classes for the new state must match the initial state. Expected z, got c             S   s   |   S )N)r  )r   r>   r>   r?   r     r   z`Invalid `scan_func`. The element types for the new state must match the initial state. Expected c             S   s   |   S )N)r  )r   r>   r>   r?   r     r   c             S   s   g | ]\}}| |qS r>   )r  )rd   r  r  r>   r>   r?   rh     s   z)_ScanDataset.__init__.<locals>.<listcomp>N)r  rj  use_default_device)r  rj  )-r)  r   r  Z_initial_stater  Z_state_structurer   r  rZ  r   rA   r   r.   Sequencer   rS   r[  r  Z_output_classesr	   r  r   r   r>  r   r  r,  r  r  r  
_scan_funcr   r   r   rK   r   r  Zscan_datasetrZ   r  r  r   rz   r  rY   )rV   rX   r  r  r  r=   r  r  Znew_state_classesr  Zold_state_classesr  Zold_state_classZnew_state_typesr   Zold_state_typesr  Zold_state_typeZnew_state_shapesr   Zold_state_shapesr  r  r  r  r  rW   )r   r>   r?   rY     s    













z_ScanDataset.__init__c             C   s   | j gS )N)r  )rV   r>   r>   r?   r   #  s    z_ScanDataset._functionsc             C   s   | j S )N)r,  )rV   r>   r>   r?   r   &  s    z_ScanDataset.element_specc             C   s   dS )NzDataset.scan()r>   )rV   r>   r>   r?   rZ  *  s    z!_ScanDataset._transformation_name)NN)
r   r   r   r   rY   r   r  r   rZ  r   r>   r>   )r   r?   r    s    nr  c                   s6   e Zd ZdZd	 fdd	Zdd Zedd Z  ZS )
r  zDA substitute for `Dataset.interleave()` on a fixed list of datasets.Fc       
         s   || _ t|| _|| _| jd j}xt| jdd  D ]v\}}dd }yt|||j}W q6 tt	fk
r } z.td|d j d|d  d|j d|W d d }~X Y q6X q6W || _
tj| j jd	d
 | jD fd| ji| j}	tt| |	 d S )Nr   r   c             S   s.   |  |g}|d kr*td|  d| d|S )NzNo common supertype of z and rI   )r:  rS   )r4  r5  r^  r>   r>   r?   r_  8  s    z=_DirectedInterleaveDataset.__init__.<locals>.common_supertypez[Invalid `datasets`. `datasets` must have compatible element specs.
 Dataset 0 element_spec=z
.
Dataset z element_spec=rI   c             S   s   g | ]
}|j qS r>   )rZ   )rd   
data_inputr>   r>   r?   rh   M  s    z7_DirectedInterleaveDataset.__init__.<locals>.<listcomp>r  )_selector_inputr  _data_inputsZ_stop_on_empty_datasetr   rq   r	   r  rS   r;   r,  r  Zdirected_interleave_datasetrZ   r   rz   r  rY   )
rV   r  Zdata_inputsr  r  re   r  r_  r  rW   )r   r>   r?   rY   1  s&    
&
z#_DirectedInterleaveDataset.__init__c             C   s   | j g| j S )N)r  r  )rV   r>   r>   r?   rP   S  s    z"_DirectedInterleaveDataset._inputsc             C   s   | j S )N)r,  )rV   r>   r>   r?   r   V  s    z'_DirectedInterleaveDataset.element_spec)F)	r   r   r   r   rY   rP   r  r   r   r>   r>   )r   r?   r  .  s   "r  c             C   s   t tj| j|f| j| jS )N)r+  r   Zrewrite_datasetrZ   r   r   )r   Zrewriter>   r>   r?   rH  [  s    rH  c             C   sP   dd }| g}t  }g }g }x*|rF|||\}}|| || qW ||fS )zDCollects resource inputs for the given ops (and its variant inputs).c             S   sV   g }g }|   }||kr ||fS || t|\}}| dd |jD  ||fS )aZ  Processes the next element of the op queue.

    Args:
      op_queue: Queue of Dataset operations to process.
      seen_ops: Already processed set of Operations.

    Returns:
      A 2-tuple containing sets of resource handles. The first tuple entry
      contains read-only handles and the second entry contains read-write
      handles.
    c             s   s    | ]}|j tjkr|jV  qd S )N)r   r   r   r   )rd   r  r>   r>   r?   r   ~  s    z=_collect_resource_inputs.<locals>._process.<locals>.<genexpr>)r   r:  	acd_utilsZget_read_write_resource_inputsr   r   )op_queueseen_opsreadswritesr   r>   r>   r?   _processf  s    
z*_collect_resource_inputs.<locals>._process)setr   )r   r  r  r  Z	all_readsZ
all_writesr  r  r>   r>   r?   _collect_resource_inputsc  s    
r  c       	      C   s   d}| j dkrbt| \}}x"|D ]}||kr d}|| q W x"|D ]}||krDd}|| qDW | j dkr| jd }dd | D }t|dkrt|d \}}x"|D ]}||krd}|| qW x"|D ]}||krd}|| qW |S )	zCUpdates resource inputs for tf.data ops with indirect dependencies.F)ZDatasetToSingleElementZDatasetToTFRecordr3   T)ZIteratorGetNextZIteratorGetNextSyncZIteratorGetNextAsOptionalr   c             S   s   g | ]}|j d kr|qS )ZMakeIterator)rD   )rd   r   r>   r>   r?   rh     s    z&_resource_resolver.<locals>.<listcomp>r   )rD   r  r:  r   Z	consumersr   )	r   Zresource_readsZresource_writesupdatedr  r  r
  r  Zmake_iterator_opsr>   r>   r?   _resource_resolver  s2    






r  Fz#data.experimental.enable_debug_modec               C   s   t  rtd ntddS )a  Enables debug mode for tf.data.

  Example usage with pdb module:
  ```
  import tensorflow as tf
  import pdb

  tf.data.experimental.enable_debug_mode()

  def func(x):
    # Python 3.7 and older requires `pdb.Pdb(nosigint=True).set_trace()`
    pdb.set_trace()
    x = x + 1
    return x

  dataset = tf.data.Dataset.from_tensor_slices([1, 2, 3])
  dataset = dataset.map(func)

  for item in dataset:
    print(item)
  ```

  The effect of debug mode is two-fold:

  1) Any transformations that would introduce asynchrony, parallelism, or
  non-determinism to the input pipeline execution will be forced to execute
  synchronously, sequentially, and deterministically.

  2) Any user-defined functions passed into tf.data transformations such as
  `map` will be wrapped in `tf.py_function` so that their body is executed
  "eagerly" as a Python function as opposed to a traced TensorFlow graph, which
  is the default behavior. Note that even when debug mode is enabled, the
  user-defined function is still traced  to infer the shape and type of its
  outputs; as a consequence, any `print` statements or breakpoints will be
  triggered once during the tracing before the actual execution of the input
  pipeline.

  NOTE: As the debug mode setting affects the construction of the tf.data input
  pipeline, it should be enabled before any tf.data definitions.

  Raises:
    ValueError: When invoked from graph mode.
  Tz5`enable_debug_mode() is only supported in eager mode.N)r   r   toggle_debug_moder;   r>   r>   r>   r?   enable_debug_mode  s    -
r  c             C   s   | a d S )N)r   )
debug_moder>   r>   r?   r    s    r  )N)N)r  r  N)r   r  r  r  r  r   r   rp   r   Ztensorflow.core.frameworkr   r   r   Ztensorflow.pythonr   r   r   r   rM   r   Ztensorflow.python.data.utilr	   r
   r   r   Ztensorflow.python.eagerr   Ztensorflow.python.frameworkr   r   r  r   r   r   r   r   r  r   r   r   r   r   r   r   Ztensorflow.python.opsr   r   r   r   r   r  r    r!   r"   r#   r$   r%   r&   Ztensorflow.python.ops.raggedr'   Ztensorflow.python.trackabler(   r)   rw   r*   Zresource_libZtensorflow.python.typesr+   Ztensorflow.python.utilr,   r-   r7  Ztensorflow.python.util.compatr.   Z tensorflow.python.util.tf_exportr/   r  
LazyLoaderglobalsr0   r1   r2   ZNotDifferentiabler5   Zexport_constantr   r8   r9   ZCOMPRESSION_GZIPZCOMPRESSION_SNAPPYZDATASET_SPEC_FILENAMEr@   rF   IterableZ	TrackableZCompositeTensorABCMetarG   rQ   r   r  r   r  r  r  r&  r  re  r  r'  r(  r*  r+  r-  r0  r1  ZBatchableTypeSpecr   r   ZCapturableResourcer|   r   r  r  r!  r#  rB  r  rM  rJ  rP  rR  rT  r`  ra  r{  r~  r  r  rf  rk  rl  ro  rr  rs  r-  r}  r   r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  r  rH  r  Zregister_acd_resource_resolverr  r   r  r  r>   r>   r>   r?   <module>   s  

	                            2    
!
-)
 *E")-('81R(4 ,F+! S
7  
M > -*(3