B
    Ù»ˆd¦%  ã               @   sr   d Z ddlZddlZddlmZ ddlmZ ddlmZ	 ddl
mZ ddlmZ ddlmZ G d	d
„ d
ejƒZdS )z6Test utilities for tf.data benchmarking functionality.é    N)Úsession)Údataset_ops)Úoptions)Únest)Úcontext)Útestc               @   s@   e Zd ZdZdd„ Zddd„Zdd	d
„Zddd„Zddd„ZdS )ÚDatasetBenchmarkBasez"Base class for dataset benchmarks.c       	      C   sr   g }t  ¡ stdƒ‚xRt|ƒD ]F}|r6t|ƒ}t|ƒ t|ƒ}t ¡ }t|ƒ t ¡ }| || ¡ qW t 	|¡S )aJ  Benchmark the iterable in eager mode.

    Runs the iterable `iters` times. In each iteration, the benchmark measures
    the time it takes to go execute the iterable.

    Args:
      iterable: The tf op or tf.data Dataset to benchmark.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.

    Returns:
      A float, representing the median time (with respect to `iters`)
      it takes for the iterable to be executed `iters` num of times.

    Raises:
      RuntimeError: When executed in graph mode.
    z7Eager mode benchmarking is not supported in graph mode.)
r   Úexecuting_eagerlyÚRuntimeErrorÚrangeÚiterÚnextÚtimeÚappendÚnpÚmedian)	ÚselfÚiterableÚitersÚwarmupÚdeltasÚ_ÚiteratorÚstartÚend© r   úb/var/www/html/venv/lib/python3.7/site-packages/tensorflow/python/data/benchmarks/benchmark_base.pyÚ_run_eager_benchmark   s    z)DatasetBenchmarkBase._run_eager_benchmarkNc          
   C   sš   g }t  ¡ rtdƒ‚xzt|ƒD ]n}tj|dJ}|rL|rB| |¡ | |¡ |rZ| |¡ t ¡ }	| |¡ t ¡ }
W dQ R X | |
|	 ¡ qW t	 
|¡S )a.  Benchmarks the iterable in graph mode.

    Runs the iterable `iters` times. In each iteration, the benchmark measures
    the time it takes to go execute the iterable.

    Args:
      iterable: The tf op or tf.data Dataset to benchmark.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.
      initializer: The initializer op required to initialize the iterable.

    Returns:
      A float, representing the median time (with respect to `iters`)
      it takes for the iterable to be executed `iters` num of times.

    Raises:
      RuntimeError: When executed in eager mode.
    z7Graph mode benchmarking is not supported in eager mode.)ÚconfigN)r   r	   r
   r   r   ÚSessionÚrunr   r   r   r   )r   r   r   r   Úsession_configÚinitializerr   r   Úsessr   r   r   r   r   Ú_run_graph_benchmarkC   s"    



z)DatasetBenchmarkBase._run_graph_benchmarké   Tc             C   s*   t  ¡ r| j|||dS | j||||dS )a¦  Benchmarks the op.

    Runs the op `iters` times. In each iteration, the benchmark measures
    the time it takes to go execute the op.

    Args:
      op: The tf op to benchmark.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.

    Returns:
      A float, representing the per-execution wall time of the op in seconds.
      This is the median time (with respect to `iters`) it takes for the op
      to be executed `iters` num of times.
    )r   r   r   )r   r   r   r!   )r   r	   r   r$   )r   Úopr   r   r!   r   r   r   Úrun_op_benchmarks   s    z%DatasetBenchmarkBase.run_op_benchmarkFc             C   s   t  ¡ }||j_| |¡}| |d ¡}t ¡ rL| j|||d}|t	|ƒ S t
 |¡}	|	 ¡ }
t |
¡d j}| j|||||	jd}|t	|ƒ S )aÃ  Benchmarks the dataset.

    Runs the dataset `iters` times. In each iteration, the benchmark measures
    the time it takes to go through `num_elements` elements of the dataset.

    Args:
      dataset: Dataset to benchmark.
      num_elements: Number of dataset elements to iterate through each benchmark
        iteration.
      iters: Number of times to repeat the timing.
      warmup: If true, warms up the session caches by running an untimed run.
      apply_default_optimizations: Determines whether default optimizations
        should be applied.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.

    Returns:
      A float, representing the per-element wall time of the dataset in seconds.
      This is the median time (with respect to `iters`) it takes for the dataset
      to go through `num_elements` elements, divided by `num_elements.`
    r%   )r   r   r   r   )r   r   r   r!   r"   )Úoptions_libÚOptionsZexperimental_optimizationÚapply_default_optimizationsZwith_optionsÚskipr   r	   r   Úfloatr   Zmake_initializable_iteratorZget_nextr   Úflattenr&   r$   r"   )r   ÚdatasetÚnum_elementsr   r   r*   r!   r   Zmedian_durationr   Znext_elementr&   r   r   r   Úrun_benchmarkŒ   s$    


z"DatasetBenchmarkBase.run_benchmarké   c	       
      C   sn   | j ||||||d}	|dkr"i }t ¡ r>d |¡}d|d< nd |¡}d|d< ||d< | j|	|||d	 |	S )
a„  Benchmarks the dataset and reports the stats.

    Runs the dataset `iters` times. In each iteration, the benchmark measures
    the time it takes to go through `num_elements` elements of the dataset.
    This is followed by logging/printing the benchmark stats.

    Args:
      dataset: Dataset to benchmark.
      num_elements: Number of dataset elements to iterate through each benchmark
        iteration.
      name: Name of the benchmark.
      iters: Number of times to repeat the timing.
      extras: A dict which maps string keys to additional benchmark info.
      warmup: If true, warms up the session caches by running an untimed run.
      apply_default_optimizations: Determines whether default optimizations
        should be applied.
      session_config: A ConfigProto protocol buffer with configuration options
        for the session. Applicable only for benchmarking in graph mode.

    Returns:
      A float, representing the per-element wall time of the dataset in seconds.
      This is the median time (with respect to `iters`) it takes for the dataset
      to go through `num_elements` elements, divided by `num_elements.`
    )r.   r/   r   r   r*   r!   Nz{}.eagerÚeagerÚimplementationz{}.graphÚgraphr/   )Ú	wall_timer   ÚnameÚextras)r0   r   r	   ÚformatZreport_benchmark)
r   r.   r/   r6   r   r7   r   r*   r!   r5   r   r   r   Úrun_and_report_benchmarkÇ   s$    !


z-DatasetBenchmarkBase.run_and_report_benchmark)N)r%   TN)r%   TFN)r1   NTFN)	Ú__name__Ú
__module__Ú__qualname__Ú__doc__r   r$   r'   r0   r9   r   r   r   r   r      s   )
+
   
9    r   )r=   r   Únumpyr   Ztensorflow.python.clientr   Ztensorflow.python.data.opsr   r   r(   Ztensorflow.python.data.utilr   Ztensorflow.python.eagerr   Ztensorflow.python.platformr   Z	Benchmarkr   r   r   r   r   Ú<module>   s   