B
    ӻd                @   s  d Z ddlZddlZddlZddlZddlZddlZddlZddlZddl	Z
ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ dd	lmZ dd
lmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlmZ ddlm Z  ddl!m"Z" ddl#m$Z$ ddl#m%Z% ddl&m'Z' ddl(m)Z) ddl(m*Z* ddl(m+Z+ ddl,m-Z- ddl.m/Z/ ddl0m1Z1 ddl2m3Z3 ddl4m5Z5 ddl6m7Z7 ddl6m8Z8 ddl6m9Z9 dd l:m;Z; dd!l:m<Z= dd"l>m?Z@ dd#lAmBZC dd$lDmEZE dd%lFmGZG dd&lHmIZI yddlJZJW n eKk
r"   dZJY nX d'ddddd(d)e3jLfd*d+ZMd'ddddd(e3jLfd,d-ZNd.d/ ZOdcd1d2ZPeGd3G d4d5 d5ZQeGd6G d7d8 d8ZReGd9G d:d; d;eRZSeGd<G d=d> d>eRZTeGd?G d@dA dAeRZUeGdBG dCdD dDeRZVeGdEG dFdG dGeRZWeGdHg dIG dJdK dKeRZXeGdLG dMdN dNeRZYeGdOG dPdQ dQeRZZeGdRG dSdT dTeRZ[dddUdVZ\eGdWg dIG dXdY dYeRe+j]Z^eGdZG d[d\ d\eRZ_eGd]G d^d_ d_eRZ`eGd`G dadb dbeRZadS )ezDCallbacks: utilities called at certain points during model training.    N)summary_pb2)checkpoint_management)checkpoint_options)iterator_ops)collective_all_reduce_strategy)distribution_strategy_context)mirrored_strategy)parameter_server_strategy_v2)tpu_strategy)context)constant_op)dtypes)errors)ops)backend)distributed_file_utils)worker_training_state)learning_rate_schedule)generic_utils)tf_utils)version_utils)Sequence)Progbar)path_to_string)ModeKeys)file_io)	array_ops)math_ops)summary_ops_v2)gfile)
tf_logging)profiler_v2)save_options)nest)keras_export)doc_controlsF   stepsc
             C   s   t | tr| S | sg } |	tjkrRt |_t g| p4g  |jg } |rR| t| t| }
|	 }|

| t|
||||||||	d	 d|
j_|
S )a  Configures callbacks for use in various training loops.

  Args:
      callbacks: List of Callbacks.
      model: Model being trained.
      do_validation: Whether or not validation loop will be run.
      batch_size: Number of samples per batch.
      epochs: Number of epoch to train.
      steps_per_epoch: Number of batches to run per training epoch.
      samples: Number of training samples.
      verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
      count_mode: One of 'steps' or 'samples'. Per-batch or per-sample count.
      mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
        Which loop mode to configure callbacks for.

  Returns:
      Instance of CallbackList used to control all Callbacks.
  )do_validation
batch_sizeepochssteps_per_epochsamplesverbosemodeF)
isinstanceCallbackListr   TRAINHistoryhistory
BaseLoggerappendProgbarLoggerZ_get_callback_model	set_modelset_callback_parametersmodelstop_training)	callbacksr9   r(   r)   r*   r+   r,   r-   
count_moder.   callback_listZcallback_model r>   S/var/www/html/venv/lib/python3.7/site-packages/tensorflow/python/keras/callbacks.pyconfigure_callbacksJ   s0    


r@   c	             C   s   |j }	x(| D ] }
t|
ttfr|	dd |
_qW g }|tjkr^t|	}|r^|dd |	D 7 }|||||||d}| | dS )aK  Sets callback parameters.

  Args:
      callback_list: CallbackList instance.
      model: Model being trained.
      do_validation: Whether or not validation loop will be run.
      batch_size: Number of samples per batch.
      epochs: Number of epoch to train.
      steps_per_epoch: Number of batches to run per training epoch.
      samples: Number of training samples.
      verbose: int, 0 or 1. Keras logging verbosity to pass to ProgbarLogger.
      mode: String. One of ModeKeys.TRAIN, ModeKeys.TEST, or ModeKeys.PREDICT.
        Which loop mode to configure callbacks for.
  r&   Nc             S   s   g | ]}d | qS )val_r>   ).0nr>   r>   r?   
<listcomp>   s    z+set_callback_parameters.<locals>.<listcomp>)r)   r*   r'   r,   r-   r(   metrics)	metrics_namesr/   r4   r6   stateful_metricsr   PREDICTcopy
set_params)r=   r9   r(   r)   r*   r+   r,   r-   r.   metric_namescbkZcallback_metricsZcallback_paramsr>   r>   r?   r8      s"    


r8   c             C   s(   t | dp&t | dp&t| ttjtjfS )z5Checks if data is a generator, Sequence, or Iterator.__next__next)hasattrr/   r   r   IteratorZIteratorBase)datar>   r>   r?   _is_generator_like   s    rR    c             C   sL   | j }|tjtjhkr@|r@x,t||D ]\}}|||| < q&W n||d< |S )z4Computes logs for sending to `on_batch_end` methods.outputs)rF   r   r1   TESTzip)r9   logsrT   r.   prefixrK   labeloutputr>   r>   r?   	make_logs   s    r[   zkeras.callbacks.CallbackListc               @   s&  e Zd ZdZd@ddZdd ZdAdd	Zd
d Zdd Zdd Z	dBddZ
dd Zdd Zdd Zdd Zdd ZdCddZdDddZdEd d!ZdFd"d#ZdGd$d%ZdHd&d'ZdId(d)ZdJd*d+ZdKd,d-ZdLd.d/ZdMd0d1ZdNd2d3ZdOd4d5ZdPd6d7ZdQd8d9ZdRd:d;Zd<d= Z d>d? Z!dS )Sr0   z*Container abstracting a list of callbacks.NFc             K   s   |rt |ng | _| || |r.| | |r<| | tdd | jD | _tdd | jD | _t	dd | jD | _
t	dd | jD | _t	dd | jD | _|   t	dd | jD | _d| _i | _d	| _g | _d	S )
a  Container for `Callback` instances.

    This object wraps a list of `Callback` instances, making it possible
    to call them all at once via a single endpoint
    (e.g. `callback_list.on_epoch_end(...)`).

    Args:
      callbacks: List of `Callback` instances.
      add_history: Whether a `History` callback should be added, if one does not
        already exist in the `callbacks` list.
      add_progbar: Whether a `ProgbarLogger` callback should be added, if one
        does not already exist in the `callbacks` list.
      model: The `Model` these callbacks are used with.
      **params: If provided, parameters will be passed to each `Callback` via
        `Callback.set_params`.
    c             s   s   | ]}t |d dV  qdS )_supports_tf_logsFN)getattr)rB   cbr>   r>   r?   	<genexpr>   s    z(CallbackList.__init__.<locals>.<genexpr>c             s   s2   | ]*}|  s| s| rt|d dV  qdS )r\   FN)_implements_train_batch_hooks_implements_test_batch_hooks_implements_predict_batch_hooksr]   )rB   r^   r>   r>   r?   r_      s   c             s   s   | ]}|  V  qd S )N)r`   )rB   r^   r>   r>   r?   r_      s    c             s   s   | ]}|  V  qd S )N)ra   )rB   r^   r>   r>   r?   r_      s    c             s   s   | ]}|  V  qd S )N)rb   )rB   r^   r>   r>   r?   r_      s    c             s   s   | ]}|j jt kV  qd S )N)	__class____name__globals)rB   rL   r>   r>   r?   r_     s       N)r#   flattenr;   _add_default_callbacksr7   rJ   allr\   _batch_hooks_support_tf_logsany_should_call_train_batch_hooks_should_call_test_batch_hooks _should_call_predict_batch_hooks$_disallow_batch_hooks_in_ps_strategy_check_timing_num_batches_for_timing_check_hook_times_batch_start_time_batch_times)selfr;   add_historyadd_progbarr9   paramsr>   r>   r?   __init__   s0    

zCallbackList.__init__c             C   s   d| _ d| _x0| jD ]&}t|tr*|| _ qt|tr|| _qW | j dkrh|rhtdd| _ | jd| j  | jdkr|rt | _| j| j dS )z)Adds `Callback`s that are always present.Nr'   )r<   r   )Z_progbar_historyr;   r/   r6   r2   insertr5   )ru   rv   rw   r^   r>   r>   r?   rh     s    


z#CallbackList._add_default_callbacksc             C   s.   |dkri S | j r|S |r$| jr$|S t|S )z?Turns tensors into numpy arrays or Python scalars if necessary.N)r\   rj   r   sync_to_numpy_or_python_type)ru   rW   is_batch_hookr>   r>   r?   _process_logs  s    
zCallbackList._process_logsc             C   s   | j | d S )N)r;   r5   )ru   callbackr>   r>   r?   r5   %  s    zCallbackList.appendc             C   s$   || _ x| jD ]}|| qW d S )N)rx   r;   rJ   )ru   rx   r   r>   r>   r?   rJ   (  s    zCallbackList.set_paramsc             C   s2   || _ | jr| j|_x| jD ]}|| qW d S )N)r9   rz   r3   r;   r7   )ru   r9   r   r>   r>   r?   r7   -  s
    zCallbackList.set_modelc             C   sL   | j s
dS |dkr"| ||| n&|dkr:| ||| ntd|dS )z4Helper function for all batch_{begin | end} methods.NbeginendzUnrecognized hook: {})r;   _call_batch_begin_hook_call_batch_end_hook
ValueErrorformat)ru   r.   hookbatchrW   r>   r>   r?   _call_batch_hook4  s    zCallbackList._call_batch_hookc             C   s.   dj |d}| ||| | jr*t | _dS )z/Helper function for `on_*_batch_begin` methods.zon_{mode}_batch_begin)r.   N)r   _call_batch_hook_helperrp   timers   )ru   r.   r   rW   	hook_namer>   r>   r?   r   @  s    z#CallbackList._call_batch_begin_hookc             C   s  dj |d}| jr4|dkr4t | j }| j| | ||| t| j| jkr|}dj |d}t	| jt| j }t	| j
| t| j
|  }	t	| j
| t| j
|  }
d| }d}|
|krt|j |||
d |	|krt|j |||	d d| _d	| _g | _i | _
d	S )
z-Helper function for `on_*_batch_end` methods.zon_{mode}_batch_end)r.   r&   zon_{mode}_batch_beging      ?zCallback method `{hook}` is slow compared to the batch time (batch time: {batch_time:.4f}s vs `{hook}` time: {hook_time:.4f}s). Check your callbacks.)r   
batch_timeZ	hook_timeFN)r   rp   r   rs   rt   r5   r   lenrq   sumrr   loggingwarning)ru   r.   r   rW   r   r   Zend_hook_nameZbegin_hook_nameZavg_batch_timeZavg_end_hook_timeZavg_begin_hook_timeZthreshold_timeZwarning_msgr>   r>   r?   r   H  s:    

z!CallbackList._call_batch_end_hookc             C   sv   | j rt }| j|dd}x"| jD ]}t||}||| q$W | j rr|| jkrZg | j|< | j| t |  dS )z+Helper function for `on_*_batch_*` methods.T)r}   N)rp   r   r~   r;   r]   rr   r5   )ru   r   r   rW   
start_timer   r   r>   r>   r?   r   n  s    


z$CallbackList._call_batch_hook_helperc             C   s4   |t jkr|   n|t jkr(|   n|   dS )z:Helper function for on_{train|test|predict}_begin methods.N)r   r1   on_train_beginrU   on_test_beginon_predict_begin)ru   r.   r>   r>   r?   _call_begin_hook}  s
    



zCallbackList._call_begin_hookc             C   s4   |t jkr|   n|t jkr(|   n|   dS )z8Helper function for on_{train|test|predict}_end methods.N)r   r1   on_train_endrU   on_test_endon_predict_end)ru   r.   r>   r>   r?   _call_end_hook  s
    



zCallbackList._call_end_hookc             C   s   | j r| jtjd||d d S )Nr   )rW   )rl   r   r   r1   )ru   r   rW   r>   r>   r?   on_batch_begin  s    zCallbackList.on_batch_beginc             C   s   | j r| jtjd||d d S )Nr   )rW   )rl   r   r   r1   )ru   r   rW   r>   r>   r?   on_batch_end  s    zCallbackList.on_batch_endc             C   s*   |  |}x| jD ]}||| qW dS )a&  Calls the `on_epoch_begin` methods of its callbacks.

    This function should only be called during TRAIN mode.

    Args:
        epoch: Integer, index of epoch.
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    N)r~   r;   on_epoch_begin)ru   epochrW   r   r>   r>   r?   r     s    

zCallbackList.on_epoch_beginc             C   s*   |  |}x| jD ]}||| qW dS )a`  Calls the `on_epoch_end` methods of its callbacks.

    This function should only be called during TRAIN mode.

    Args:
        epoch: Integer, index of epoch.
        logs: Dict, metric results for this training epoch, and for the
          validation epoch if validation is performed. Validation result keys
          are prefixed with `val_`.
    N)r~   r;   on_epoch_end)ru   r   rW   r   r>   r>   r?   r     s    
zCallbackList.on_epoch_endc             C   s   | j r| jtjd||d dS )aM  Calls the `on_train_batch_begin` methods of its callbacks.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict, contains the return value of `model.train_step`. Typically,
          the values of the `Model`'s metrics are returned.  Example:
          `{'loss': 0.2, 'accuracy': 0.7}`.
    r   )rW   N)rl   r   r   r1   )ru   r   rW   r>   r>   r?   on_train_batch_begin  s    	z!CallbackList.on_train_batch_beginc             C   s   | j r| jtjd||d dS )zCalls the `on_train_batch_end` methods of its callbacks.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict. Aggregated metric results up until this batch.
    r   )rW   N)rl   r   r   r1   )ru   r   rW   r>   r>   r?   on_train_batch_end  s    zCallbackList.on_train_batch_endc             C   s   | j r| jtjd||d dS )aK  Calls the `on_test_batch_begin` methods of its callbacks.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict, contains the return value of `model.test_step`. Typically,
          the values of the `Model`'s metrics are returned.  Example:
          `{'loss': 0.2, 'accuracy': 0.7}`.
    r   )rW   N)rm   r   r   rU   )ru   r   rW   r>   r>   r?   on_test_batch_begin  s    	z CallbackList.on_test_batch_beginc             C   s   | j r| jtjd||d dS )zCalls the `on_test_batch_end` methods of its callbacks.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict. Aggregated metric results up until this batch.
    r   )rW   N)rm   r   r   rU   )ru   r   rW   r>   r>   r?   on_test_batch_end  s    zCallbackList.on_test_batch_endc             C   s   | j r| jtjd||d dS )a9  Calls the `on_predict_batch_begin` methods of its callbacks.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict, contains the return value of `model.predict_step`,
          it typically returns a dict with a key 'outputs' containing
          the model's outputs.
    r   )rW   N)rn   r   r   rH   )ru   r   rW   r>   r>   r?   on_predict_batch_begin  s    	z#CallbackList.on_predict_batch_beginc             C   s   | j r| jtjd||d dS )zCalls the `on_predict_batch_end` methods of its callbacks.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict. Aggregated metric results up until this batch.
    r   )rW   N)rn   r   r   rH   )ru   r   rW   r>   r>   r?   on_predict_batch_end  s    z!CallbackList.on_predict_batch_endc             C   s(   |  |}x| jD ]}|| qW dS )zCalls the `on_train_begin` methods of its callbacks.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    N)r~   r;   r   )ru   rW   r   r>   r>   r?   r     s    
zCallbackList.on_train_beginc             C   s(   |  |}x| jD ]}|| qW dS )zCalls the `on_train_end` methods of its callbacks.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    N)r~   r;   r   )ru   rW   r   r>   r>   r?   r     s    
zCallbackList.on_train_endc             C   s(   |  |}x| jD ]}|| qW dS )zCalls the `on_test_begin` methods of its callbacks.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    N)r~   r;   r   )ru   rW   r   r>   r>   r?   r     s    
zCallbackList.on_test_beginc             C   s(   |  |}x| jD ]}|| qW dS )zCalls the `on_test_end` methods of its callbacks.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    N)r~   r;   r   )ru   rW   r   r>   r>   r?   r     s    
zCallbackList.on_test_endc             C   s(   |  |}x| jD ]}|| qW dS )zCalls the 'on_predict_begin` methods of its callbacks.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    N)r~   r;   r   )ru   rW   r   r>   r>   r?   r   "  s    
zCallbackList.on_predict_beginc             C   s(   |  |}x| jD ]}|| qW dS )zCalls the `on_predict_end` methods of its callbacks.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    N)r~   r;   r   )ru   rW   r   r>   r>   r?   r   -  s    
zCallbackList.on_predict_endc             C   s
   t | jS )N)iterr;   )ru   r>   r>   r?   __iter__8  s    zCallbackList.__iter__c             C   sh   t  }|jrdg }x>| jD ]4}t|ddr,q| sD| sD| r|| qW |rdt	d
|dS )z>Error out if batch-level callbacks are passed with PSStrategy.r\   FziBatch-level `Callback`s are not supported with `ParameterServerStrategy`. Found unsupported callbacks: {}N)
ds_contextZget_strategyZ_should_use_with_coordinatorr;   r]   r`   ra   rb   r5   r   r   )ru   ZstrategyZunsupported_callbacksr^   r>   r>   r?   ro   ;  s    z1CallbackList._disallow_batch_hooks_in_ps_strategy)NFFN)F)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)"rd   
__module____qualname____doc__ry   rh   r~   r5   rJ   r7   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   ro   r>   r>   r>   r?   r0      sD      
7


&		


















r0   zkeras.callbacks.Callbackc               @   sp  e Zd ZdZdd Zdd Zdd Zeje	j
d/d	d
Zeje	j
d0ddZejd1ddZejd2ddZeje	j
d3ddZeje	j
d4ddZeje	j
d5ddZeje	j
d6ddZeje	j
d7ddZeje	j
d8ddZejd9ddZejd:dd Zejd;d!d"Zejd<d#d$Zejd=d%d&Zejd>d'd(Zd)d* Zd+d, Zd-d. ZdS )?Callbacka]  Abstract base class used to build new callbacks.

  Callbacks can be passed to keras methods such as `fit`, `evaluate`, and
  `predict` in order to hook into the various stages of the model training and
  inference lifecycle.

  To create a custom callback, subclass `keras.callbacks.Callback` and override
  the method associated with the stage of interest. See
  https://www.tensorflow.org/guide/keras/custom_callback for more information.

  Example:

  >>> training_finished = False
  >>> class MyCallback(tf.keras.callbacks.Callback):
  ...   def on_train_end(self, logs=None):
  ...     global training_finished
  ...     training_finished = True
  >>> model = tf.keras.Sequential([tf.keras.layers.Dense(1, input_shape=(1,))])
  >>> model.compile(loss='mean_squared_error')
  >>> model.fit(tf.constant([[1.0]]), tf.constant([[1.0]]),
  ...           callbacks=[MyCallback()])
  >>> assert training_finished == True

  If you want to use `Callback` objects in a custom training loop:

  1. You should pack all your callbacks into a single `callbacks.CallbackList`
     so they can all be called together.
  2. You will need to manually call all the `on_*` methods at the apropriate
     locations in your loop. Like this:

     ```
     callbacks =  tf.keras.callbacks.CallbackList([...])
     callbacks.append(...)

     callbacks.on_train_begin(...)
     for epoch in range(EPOCHS):
       callbacks.on_epoch_begin(epoch)
       for i, data in dataset.enumerate():
         callbacks.on_train_batch_begin(i)
         batch_logs = model.train_step(data)
         callbacks.on_train_batch_end(i, batch_logs)
       epoch_logs = ...
       callbacks.on_epoch_end(epoch, epoch_logs)
     final_logs=...
     callbacks.on_train_end(final_logs)
     ```

  Attributes:
      params: Dict. Training parameters
          (eg. verbosity, batch size, number of epochs...).
      model: Instance of `keras.models.Model`.
          Reference of the model being trained.

  The `logs` dictionary that callback methods
  take as argument will contain keys for quantities relevant to
  the current batch or epoch (see method-specific docstrings).
  c             C   s   d | _ d | _d | _d| _d S )NF)Zvalidation_datar9   _chief_worker_onlyr\   )ru   r>   r>   r?   ry     s    zCallback.__init__c             C   s
   || _ d S )N)rx   )ru   rx   r>   r>   r?   rJ     s    zCallback.set_paramsc             C   s
   || _ d S )N)r9   )ru   r9   r>   r>   r?   r7     s    zCallback.set_modelNc             C   s   dS )z;A backwards compatibility alias for `on_train_batch_begin`.Nr>   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_batch_beginc             C   s   dS )z9A backwards compatibility alias for `on_train_batch_end`.Nr>   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_batch_endc             C   s   dS )aI  Called at the start of an epoch.

    Subclasses should override for any actions to run. This function should only
    be called during TRAIN mode.

    Args:
        epoch: Integer, index of epoch.
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    Nr>   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_epoch_beginc             C   s   dS )a	  Called at the end of an epoch.

    Subclasses should override for any actions to run. This function should only
    be called during TRAIN mode.

    Args:
        epoch: Integer, index of epoch.
        logs: Dict, metric results for this training epoch, and for the
          validation epoch if validation is performed. Validation result keys
          are prefixed with `val_`. For training epoch, the values of the
         `Model`'s metrics are returned. Example : `{'loss': 0.2, 'accuracy':
           0.7}`.
    Nr>   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_epoch_endc             C   s   | j ||d dS )a(  Called at the beginning of a training batch in `fit` methods.

    Subclasses should override for any actions to run.

    Note that if the `steps_per_execution` argument to `compile` in
    `tf.keras.Model` is set to `N`, this method will only be called every `N`
    batches.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict, contains the return value of `model.train_step`. Typically,
          the values of the `Model`'s metrics are returned.  Example:
          `{'loss': 0.2, 'accuracy': 0.7}`.
    )rW   N)r   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_train_batch_beginc             C   s   | j ||d dS )a  Called at the end of a training batch in `fit` methods.

    Subclasses should override for any actions to run.

    Note that if the `steps_per_execution` argument to `compile` in
    `tf.keras.Model` is set to `N`, this method will only be called every `N`
    batches.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict. Aggregated metric results up until this batch.
    )rW   N)r   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_train_batch_endc             C   s   dS )a  Called at the beginning of a batch in `evaluate` methods.

    Also called at the beginning of a validation batch in the `fit`
    methods, if validation data is provided.

    Subclasses should override for any actions to run.

    Note that if the `steps_per_execution` argument to `compile` in
    `tf.keras.Model` is set to `N`, this method will only be called every `N`
    batches.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict, contains the return value of `model.test_step`. Typically,
          the values of the `Model`'s metrics are returned.  Example:
          `{'loss': 0.2, 'accuracy': 0.7}`.
    Nr>   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_test_batch_beginc             C   s   dS )a  Called at the end of a batch in `evaluate` methods.

    Also called at the end of a validation batch in the `fit`
    methods, if validation data is provided.

    Subclasses should override for any actions to run.

    Note that if the `steps_per_execution` argument to `compile` in
    `tf.keras.Model` is set to `N`, this method will only be called every `N`
    batches.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict. Aggregated metric results up until this batch.
    Nr>   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_test_batch_endc             C   s   dS )a  Called at the beginning of a batch in `predict` methods.

    Subclasses should override for any actions to run.

    Note that if the `steps_per_execution` argument to `compile` in
    `tf.keras.Model` is set to `N`, this method will only be called every `N`
    batches.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict, contains the return value of `model.predict_step`,
          it typically returns a dict with a key 'outputs' containing
          the model's outputs.
    Nr>   )ru   r   rW   r>   r>   r?   r     s    zCallback.on_predict_batch_beginc             C   s   dS )a  Called at the end of a batch in `predict` methods.

    Subclasses should override for any actions to run.

    Note that if the `steps_per_execution` argument to `compile` in
    `tf.keras.Model` is set to `N`, this method will only be called every `N`
    batches.

    Args:
        batch: Integer, index of batch within the current epoch.
        logs: Dict. Aggregated metric results up until this batch.
    Nr>   )ru   r   rW   r>   r>   r?   r   "  s    zCallback.on_predict_batch_endc             C   s   dS )zCalled at the beginning of training.

    Subclasses should override for any actions to run.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    Nr>   )ru   rW   r>   r>   r?   r   2  s    	zCallback.on_train_beginc             C   s   dS )a  Called at the end of training.

    Subclasses should override for any actions to run.

    Args:
        logs: Dict. Currently the output of the last call to `on_epoch_end()`
          is passed to this argument for this method but that may change in
          the future.
    Nr>   )ru   rW   r>   r>   r?   r   =  s    
zCallback.on_train_endc             C   s   dS )zCalled at the beginning of evaluation or validation.

    Subclasses should override for any actions to run.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    Nr>   )ru   rW   r>   r>   r?   r   I  s    	zCallback.on_test_beginc             C   s   dS )a+  Called at the end of evaluation or validation.

    Subclasses should override for any actions to run.

    Args:
        logs: Dict. Currently the output of the last call to
          `on_test_batch_end()` is passed to this argument for this method
          but that may change in the future.
    Nr>   )ru   rW   r>   r>   r?   r   T  s    
zCallback.on_test_endc             C   s   dS )zCalled at the beginning of prediction.

    Subclasses should override for any actions to run.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    Nr>   )ru   rW   r>   r>   r?   r   `  s    	zCallback.on_predict_beginc             C   s   dS )zCalled at the end of prediction.

    Subclasses should override for any actions to run.

    Args:
        logs: Dict. Currently no data is passed to this argument for this method
          but that may change in the future.
    Nr>   )ru   rW   r>   r>   r?   r   k  s    	zCallback.on_predict_endc             C   s8   t | j p6t | j p6t | j p6t | j S )zBDetermines if this Callback should be called for each train batch.)r   
is_defaultr   r   r   r   )ru   r>   r>   r?   r`   v  s    z&Callback._implements_train_batch_hooksc             C   s   t | j pt | j S )zADetermines if this Callback should be called for each test batch.)r   r   r   r   )ru   r>   r>   r?   ra   }  s    z%Callback._implements_test_batch_hooksc             C   s   t | j pt | j S )zDDetermines if this Callback should be called for each predict batch.)r   r   r   r   )ru   r>   r>   r?   rb     s    z(Callback._implements_predict_batch_hooks)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)N)rd   r   r   r   ry   rJ   r7   r%   Zfor_subclass_implementersr   defaultr   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r   r`   ra   rb   r>   r>   r>   r?   r   P  s^   :	



r   zkeras.callbacks.BaseLoggerc                   s@   e Zd ZdZd fdd	ZdddZdddZdd	d
Z  ZS )r4   as  Callback that accumulates epoch averages of metrics.

  This callback is automatically applied to every Keras model.

  Args:
      stateful_metrics: Iterable of string names of metrics that
          should *not* be averaged over an epoch.
          Metrics in this list will be logged as-is in `on_epoch_end`.
          All others will be averaged in `on_epoch_end`.
  Nc                s    t t|   t|pg | _d S )N)superr4   ry   setrG   )ru   rG   )rc   r>   r?   ry     s    zBaseLogger.__init__c             C   s   d| _ i | _d S )Nr   )seentotals)ru   r   rW   r>   r>   r?   r     s    zBaseLogger.on_epoch_beginc             C   s   |pi }| dd}| dd}|  j|| 7  _xZ| D ]N\}}|| jkrZ|| j|< q<|| jkr|| j|  || 7  < q<|| | j|< q<W d S )Nsizer   	num_stepsr&   )getr   itemsrG   r   )ru   r   rW   r)   r   kvr>   r>   r?   r     s    

zBaseLogger.on_batch_endc             C   sX   |d k	rTxJ| j d D ]<}|| jkr|| jkr<| j| ||< q| j| | j ||< qW d S )NrE   )rx   r   rG   r   )ru   r   rW   r   r>   r>   r?   r     s    

zBaseLogger.on_epoch_end)N)N)N)N)	rd   r   r   r   ry   r   r   r   __classcell__r>   r>   )rc   r?   r4     s
   

r4   zkeras.callbacks.TerminateOnNaNc                   s*   e Zd ZdZ fddZdddZ  ZS )TerminateOnNaNzDCallback that terminates training when a NaN loss is encountered.
  c                s   t t|   d| _d S )NT)r   r   ry   r\   )ru   )rc   r>   r?   ry     s    zTerminateOnNaN.__init__Nc             C   sP   |pi }| d}|d k	rLt|}t|s8t|rLtd|  d| j_d S )Nlossz,Batch %d: Invalid loss, terminating trainingT)	r   r   r|   npisnanisinfprintr9   r:   )ru   r   rW   r   r>   r>   r?   r     s    

zTerminateOnNaN.on_batch_end)N)rd   r   r   r   ry   r   r   r>   r>   )rc   r?   r     s   r   zkeras.callbacks.ProgbarLoggerc                   s   e Zd ZdZd* fdd	Zdd Zd+dd	Zd,d
dZd-ddZd.ddZ	d/ddZ
d0ddZd1ddZd2ddZd3ddZd4ddZdd Zdd Zd d! Zd"d# Zd$d% Zd5d&d'Zd(d) Z  ZS )6r6   a,  Callback that prints metrics to stdout.

  Args:
      count_mode: One of `"steps"` or `"samples"`.
          Whether the progress bar should
          count samples seen or steps (batches) seen.
      stateful_metrics: Iterable of string names of metrics that
          should *not* be averaged over an epoch.
          Metrics in this list will be logged as-is.
          All others will be averaged over time (e.g. loss, etc).
          If not provided, defaults to the `Model`'s metrics.

  Raises:
      ValueError: In case of invalid `count_mode`.
  r,   Nc                s   t t|   d| _|dkr$d| _n |dkr4d| _ntdt| |rPt|nt | _d| _	d | _
d | _d| _d| _d\| _| _| _d| _d| _d S )	NTr,   Fr'   zUnknown `count_mode`: r   r&   )NNN)r   r6   ry   r\   	use_stepsr   strr   rG   r   progbartargetr-   r*   _train_step
_test_step_predict_step_call_batch_hooks_called_in_fit)ru   r<   rG   )rc   r>   r?   ry     s     zProgbarLogger.__init__c             C   s   |d | _ |d | _| jr.d|kr.|d | _n | jsHd|krH|d | _nd | _| j dk| _| jd kry"| jj| _| jj| _	| jj
| _W n tk
r   d| _Y nX d S )Nr-   r*   r'   r,   r&   T)r-   r*   r   r   r   r9   _train_counterr   _test_counterr   Z_predict_counterr   AttributeError)ru   rx   r>   r>   r?   rJ     s    




zProgbarLogger.set_paramsc             C   s
   d| _ d S )NT)r   )ru   rW   r>   r>   r?   r     s    zProgbarLogger.on_train_beginc             C   s   | j s|   |   d S )N)r   _reset_progbar_maybe_init_progbar)ru   rW   r>   r>   r?   r     s    zProgbarLogger.on_test_beginc             C   s   |    |   d S )N)r   r   )ru   rW   r>   r>   r?   r     s    zProgbarLogger.on_predict_beginc             C   s:   |    |   | jr6| jdkr6td|d | jf  d S )Nr&   zEpoch %d/%d)r   r   r-   r*   r   )ru   r   rW   r>   r>   r?   r     s    zProgbarLogger.on_epoch_beginc             C   s   |  || d S )N)_batch_update_progbar)ru   r   rW   r>   r>   r?   r     s    z ProgbarLogger.on_train_batch_endc             C   s   | j s| || d S )N)r   r   )ru   r   rW   r>   r>   r?   r     s    zProgbarLogger.on_test_batch_endc             C   s   |  |d  d S )N)r   )ru   r   rW   r>   r>   r?   r   "  s    z"ProgbarLogger.on_predict_batch_endc             C   s   |  || j d S )N)_finalize_progbarr   )ru   r   rW   r>   r>   r?   r   &  s    zProgbarLogger.on_epoch_endc             C   s   | j s| || j d S )N)r   r   r   )ru   rW   r>   r>   r?   r   )  s    zProgbarLogger.on_test_endc             C   s   |  || j d S )N)r   r   )ru   rW   r>   r>   r?   r   -  s    zProgbarLogger.on_predict_endc             C   s   d| _ d | _d S )Nr   )r   r   )ru   r>   r>   r?   r   0  s    zProgbarLogger._reset_progbarc             C   sp   t | j| _| jr2| jt dd | jjD | _| jdkr^t| j| j| j| j	rTdndd| _| j
| j dS )zDInstantiate a `Progbar` if not yet, and update the stateful metrics.c             s   s   | ]}|j V  qd S )N)name)rB   mr>   r>   r?   r_   ?  s    z4ProgbarLogger._maybe_init_progbar.<locals>.<genexpr>Nstepsample)r   r-   rG   Z	unit_name)r   rG   r9   unionrE   r   r   r   r-   r   Z_update_stateful_metrics)ru   r>   r>   r?   r   4  s    
z!ProgbarLogger._maybe_init_progbarc             C   s   | j S )N)r   )ru   r>   r>   r?   r`   J  s    z+ProgbarLogger._implements_train_batch_hooksc             C   s   | j S )N)r   )ru   r>   r>   r?   ra   M  s    z*ProgbarLogger._implements_test_batch_hooksc             C   s   | j S )N)r   )ru   r>   r>   r?   rb   P  s    z-ProgbarLogger._implements_predict_batch_hooksc             C   s   |pi }|    | jr"|d | _nDt|}|dd}|dd}|dd || }|  j|7  _| jdkrt|}| jj	| jt
| dd dS )	zUpdates the progbar.r&   r   r   r   r   NF)finalize)r   r   r   rI   popr-   r   r|   r   updatelistr   )ru   r   rW   r)   r   Zadd_seenr>   r>   r?   r   S  s    


z#ProgbarLogger._batch_update_progbarc             C   st   t |p
i }| jd krT|d k	r>| }| js>||dd9 }|pF| j| _| j| j_| jj| jt	|
 dd d S )Nr   r&   T)r   )r   r|   r   numpyr   r   r   r   r   r   r   )ru   rW   counterr>   r>   r?   r   g  s    

zProgbarLogger._finalize_progbar)r,   N)N)N)N)N)N)N)N)N)N)N)N)rd   r   r   r   ry   rJ   r   r   r   r   r   r   r   r   r   r   r   r   r`   ra   rb   r   r   r   r>   r>   )rc   r?   r6     s(   










r6   zkeras.callbacks.Historyc                   s4   e Zd ZdZ fddZd	ddZd
ddZ  ZS )r2   am  Callback that records events into a `History` object.

  This callback is automatically applied to
  every Keras model. The `History` object
  gets returned by the `fit` method of models.

  Example:

  >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
  >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
  >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
  ...                     epochs=10, verbose=1)
  >>> print(history.params)
  {'verbose': 1, 'epochs': 10, 'steps': 1}
  >>> # check the keys of history object
  >>> print(history.history.keys())
  dict_keys(['loss'])

  c                s   t t|   i | _d S )N)r   r2   ry   r3   )ru   )rc   r>   r?   ry     s    zHistory.__init__Nc             C   s
   g | _ d S )N)r   )ru   rW   r>   r>   r?   r     s    zHistory.on_train_beginc             C   sJ   |pi }| j | x(| D ]\}}| j|g | qW | | j_d S )N)r   r5   r   r3   
setdefaultr9   )ru   r   rW   r   r   r>   r>   r?   r     s
    zHistory.on_epoch_end)N)N)rd   r   r   r   ry   r   r   r   r>   r>   )rc   r?   r2   s  s   
r2   zkeras.callbacks.ModelCheckpointc                   s   e Zd ZdZd  fdd		Zd!d
dZdd Zd"ddZd#ddZd$ddZ	dd Z
dd Zdd Zdd Zdd Zdd Z  ZS )%ModelCheckpointag  Callback to save the Keras model or model weights at some frequency.

  `ModelCheckpoint` callback is used in conjunction with training using
  `model.fit()` to save a model or weights (in a checkpoint file) at some
  interval, so the model or weights can be loaded later to continue the training
  from the state saved.

  A few options this callback provides include:

  - Whether to only keep the model that has achieved the "best performance" so
    far, or whether to save the model at the end of every epoch regardless of
    performance.
  - Definition of 'best'; which quantity to monitor and whether it should be
    maximized or minimized.
  - The frequency it should save at. Currently, the callback supports saving at
    the end of every epoch, or after a fixed number of training batches.
  - Whether only weights are saved, or the whole model is saved.

  Note: If you get `WARNING:tensorflow:Can save best model only with <name>
  available, skipping` see the description of the `monitor` argument for
  details on how to get this right.

  Example:

  ```python
  model.compile(loss=..., optimizer=...,
                metrics=['accuracy'])

  EPOCHS = 10
  checkpoint_filepath = '/tmp/checkpoint'
  model_checkpoint_callback = tf.keras.callbacks.ModelCheckpoint(
      filepath=checkpoint_filepath,
      save_weights_only=True,
      monitor='val_accuracy',
      mode='max',
      save_best_only=True)

  # Model weights are saved at the end of every epoch, if it's the best seen
  # so far.
  model.fit(epochs=EPOCHS, callbacks=[model_checkpoint_callback])

  # The model weights (that are considered the best) are loaded into the model.
  model.load_weights(checkpoint_filepath)
  ```

  Args:
      filepath: string or `PathLike`, path to save the model file. e.g.
        filepath = os.path.join(working_dir, 'ckpt', file_name). `filepath`
        can contain named formatting options, which will be filled the value of
        `epoch` and keys in `logs` (passed in `on_epoch_end`). For example: if
        `filepath` is `weights.{epoch:02d}-{val_loss:.2f}.hdf5`, then the model
        checkpoints will be saved with the epoch number and the validation loss
        in the filename. The directory of the filepath should not be reused by
        any other callbacks to avoid conflicts.
      monitor: The metric name to monitor. Typically the metrics are set by the
        `Model.compile` method. Note:

        * Prefix the name with `"val_`" to monitor validation metrics.
        * Use `"loss"` or "`val_loss`" to monitor the model's total loss.
        * If you specify metrics as strings, like `"accuracy"`, pass the same
          string (with or without the `"val_"` prefix).
        * If you pass `metrics.Metric` objects, `monitor` should be set to
          `metric.name`
        * If you're not sure about the metric names you can check the contents
          of the `history.history` dictionary returned by
          `history = model.fit()`
        * Multi-output models set additional prefixes on the metric names.

      verbose: verbosity mode, 0 or 1.
      save_best_only: if `save_best_only=True`, it only saves when the model
        is considered the "best" and the latest best model according to the
        quantity monitored will not be overwritten. If `filepath` doesn't
        contain formatting options like `{epoch}` then `filepath` will be
        overwritten by each new better model.
      mode: one of {'auto', 'min', 'max'}. If `save_best_only=True`, the
        decision to overwrite the current save file is made based on either
        the maximization or the minimization of the monitored quantity.
        For `val_acc`, this should be `max`, for `val_loss` this should be
        `min`, etc. In `auto` mode, the mode is set to `max` if the quantities
        monitored are 'acc' or start with 'fmeasure' and are set to `min` for
        the rest of the quantities.
      save_weights_only: if True, then only the model's weights will be saved
        (`model.save_weights(filepath)`), else the full model is saved
        (`model.save(filepath)`).
      save_freq: `'epoch'` or integer. When using `'epoch'`, the callback saves
        the model after each epoch. When using integer, the callback saves the
        model at end of this many batches. If the `Model` is compiled with
        `steps_per_execution=N`, then the saving criteria will be
        checked every Nth batch. Note that if the saving isn't aligned to
        epochs, the monitored metric may potentially be less reliable (it
        could reflect as little as 1 batch, since the metrics get reset every
        epoch). Defaults to `'epoch'`.
      options: Optional `tf.train.CheckpointOptions` object if
        `save_weights_only` is true or optional `tf.saved_model.SaveOptions`
        object if `save_weights_only` is false.
      **kwargs: Additional arguments for backwards compatibility. Possible key
        is `period`.
  val_lossr   Fautor   Nc	       
         s  t t|   d| _|| _|| _t|| _|| _|| _	|| _
d| _d| _d| _|r|d ksft|tjrv|ppt | _qtdn,|d kst|tjr|pt | _ntdd|	kr|	d | _td nd| _d|	kr|	d | _td	 nd
| _|dkrtd| d}|dkr*tj| _tj| _n\|dkrHtj| _tj | _n>d| jksb| jdrvtj| _tj | _ntj| _tj| _| j
dkrt| j
t st!d"| j
d| _#d S )NTr   z`If save_weights_only is True, then `options` must be either None or a tf.train.CheckpointOptionsz`If save_weights_only is False, then `options` must beeither None or a tf.saved_model.SaveOptionsload_weights_on_restartz`load_weights_on_restart` argument is deprecated. Please use `model.load_weights()` for loading weights before the start of `model.fit()`.Fperiodzk`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen.r&   )r   minmaxz:ModelCheckpoint mode %s is unknown, fallback to auto mode.r   r   r   accZfmeasurer   zUnrecognized save_freq: {})$r   r   ry   r\   monitorr-   r   filepathsave_best_onlysave_weights_only	save_freqepochs_since_last_save_batches_seen_since_last_saving_last_batch_seenr/   checkpoint_options_libZCheckpointOptions_options	TypeErrorsave_options_libZSaveOptionsr   r   r   r   r   less
monitor_opInfbestgreater
startswithintr   r   r   )
ru   r   r   r-   r   r   r.   r   optionskwargs)rc   r>   r?   ry      sZ    










zModelCheckpoint.__init__c          
   C   sp   | j rl| | j}|d k	rl| |rly| j| W n6 ttfk
rj } ztd||W d d }~X Y nX d S )Nz&Error loading file from {}. Reason: {})	r   1_get_most_recently_modified_file_matching_patternr   _checkpoint_existsr9   Zload_weightsIOErrorr   r   )ru   rW   Zfilepath_to_loader>   r>   r?   r   R  s    
zModelCheckpoint.on_train_beginc             C   s
   | j dkS )Nr   )r   )ru   r>   r>   r?   r`   a  s    z-ModelCheckpoint._implements_train_batch_hooksc             C   s   |  |r| j| j|d d S )N)r   rW   )_should_save_on_batch_save_model_current_epoch)ru   r   rW   r>   r>   r?   r   e  s    
z"ModelCheckpoint.on_train_batch_endc             C   s
   || _ d S )N)r   )ru   r   rW   r>   r>   r?   r   i  s    zModelCheckpoint.on_epoch_beginc             C   s*   |  j d7  _ | jdkr&| j||d d S )Nr&   r   )r   rW   )r   r   r   )ru   r   rW   r>   r>   r?   r   l  s    
zModelCheckpoint.on_epoch_endc             C   sZ   | j dkrdS || jkr"|d }n
|| j }|  j|7  _|| _| j| j krVd| _dS dS )z?Handles batch-level saving logic, supports steps_per_execution.r   Fr&   r   T)r   r   r   )ru   r   Zadd_batchesr>   r>   r?   r   r  s    



z%ModelCheckpoint._should_save_on_batchc          
   C   s  |pi }t | jts"| j| jkrt|}d| _| ||}y | jr|	| j
}|dkrntd| j
 n| || jr| jdkrtd|d | j
| j||f  || _| jr| jj|d| jd n| jj|d| jd n&| jdkrVtd|d | j
| jf  nR| jdkr$td	|d |f  | jrB| jj|d| jd n| jj|d| jd |   W n~ tk
r } ztd
|W dd}~X Y nN tk
r } z.dt|jd  krtd
||W dd}~X Y nX dS )zSaves the model.

    Args:
        epoch: the epoch this iteration is in.
        logs: the `logs` dict passed in to `on_batch_end` or `on_epoch_end`.
    r   Nz5Can save best model only with %s available, skipping.z@
Epoch %05d: %s improved from %0.5f to %0.5f, saving model to %sr&   T)	overwriter   z*
Epoch %05d: %s did not improve from %0.5fz
Epoch %05d: saving model to %szgPlease specify a non-directory filepath for ModelCheckpoint. Filepath used is an existing directory: {}zis a directory)r/   r   r   r   r   r   r|   _get_file_pathr   r   r   r   r   r   r   r-   r   r   r9   save_weightsr   save_maybe_remove_fileIsADirectoryErrorr   r   r   argslower)ru   r   rW   r   currentr   r>   r>   r?   r     sP    



zModelCheckpoint._save_modelc          
   C   sl   y| j jf d|d i|}W n4 tk
rR } ztd| j |W dd}~X Y nX t|| jj| _| jS )z%Returns the file path for checkpoint.r   r&   z9Failed to format this callback filepath: "{}". Reason: {}N)r   r   KeyErrorr   Zwrite_filepathr9   distribute_strategy_write_filepath)ru   r   rW   	file_pathr   r>   r>   r?   r    s    zModelCheckpoint._get_file_pathc             C   s   t | j| jj d S )N)r   Zremove_temp_dir_with_filepathr  r9   r
  )ru   r>   r>   r?   r    s    z"ModelCheckpoint._maybe_remove_filec             C   s4   | drt|S t|}t|d }|p2|S )z;Returns whether the checkpoint `filepath` refers to exists.z.h5z.index)endswithr   file_exists_v2)ru   r   Ztf_saved_model_existsZ!tf_weights_only_checkpoint_existsr>   r>   r?   r     s    



z"ModelCheckpoint._checkpoint_existsc             C   s   t j|}t j|}dtdd| d }t|}|dk	rXt|t j|rX|S d}d}d}d}	t	
|rxrt |D ]d}
t||
r~t j||
}t j|}|	dks||	kr|}	||kr|}|}d}q~||kr~|d7 }q~W |dkr|S |	S dS )a  Returns the most recently modified filepath matching pattern.

    Pattern may contain python formatting placeholder. If
    `tf.train.latest_checkpoint()` does not return None, use that; otherwise,
    check for most recently modified one that matches the pattern.

    In the rare case where there are more than one pattern-matching file having
    the same modified time that is most recent among all, return the filepath
    that is largest (by `>` operator, lexicographically using the numeric
    equivalents). This provides a tie-breaker when multiple files are most
    recent. Note that a larger `filepath` can sometimes indicate a later time of
    modification (for instance, when epoch/batch is used as formatting option),
    but not necessarily (when accuracy or loss is used). The tie-breaker is
    put in the logic as best effort to return the most recent, and to avoid
    undeterministic result.

    Modified time of a file is obtained with `os.path.getmtime()`.

    This utility function is best demonstrated via an example:

    ```python
    file_pattern = 'f.batch{batch:02d}epoch{epoch:02d}.h5'
    test_dir = self.get_temp_dir()
    path_pattern = os.path.join(test_dir, file_pattern)
    file_paths = [
        os.path.join(test_dir, file_name) for file_name in
        ['f.batch03epoch02.h5', 'f.batch02epoch02.h5', 'f.batch01epoch01.h5']
    ]
    for file_path in file_paths:
      # Write something to each of the files
    self.assertEqual(
        _get_most_recently_modified_file_matching_pattern(path_pattern),
        file_paths[-1])
    ```

    Args:
        pattern: The file pattern that may optionally contain python placeholder
            such as `{epoch:02d}`.

    Returns:
        The most recently modified file's full filepath matching `pattern`. If
        `pattern` does not contain any placeholder, this returns the filepath
        that
        exactly matches `pattern`. Returns `None` if no match is found.
    ^z{.*}z.*$Nr   r&   )ospathdirnamebasenameresubr   Zlatest_checkpointmatchr   r  listdirjoingetmtime)ru   patterndir_name	base_nameZbase_name_regexZlatest_tf_checkpointZlatest_mod_timeZfile_path_with_latest_mod_timeZn_file_with_latest_mod_timeZ file_path_with_largest_file_name	file_namer  Zmod_timer>   r>   r?   r     s8    .

zAModelCheckpoint._get_most_recently_modified_file_matching_pattern)r   r   FFr   r   N)N)N)N)N)rd   r   r   r   ry   r   r`   r   r   r   r   r   r  r  r   r   r   r>   r>   )rc   r?   r     s&   c      J



=	r   z-keras.callbacks.experimental.BackupAndRestore)Zv1c                   s>   e Zd ZdZ fddZdddZdddZdd	d
Z  ZS )BackupAndRestorea
  Callback to back up and restore the training state.

  `BackupAndRestore` callback is intended to recover from interruptions that
  happened in the middle of a model.fit execution by backing up the
  training states in a temporary checkpoint file (based on TF CheckpointManager)
  at the end of each epoch. If training restarted before completion, the
  training state and model are restored to the most recently saved state at the
  beginning of a new model.fit() run.
  Note that user is responsible to bring jobs back up.
  This callback is important for the backup and restore mechanism for fault
  tolerance purpose. And the model to be restored from an previous checkpoint is
  expected to be the same as the one used to back up. If user changes arguments
  passed to compile or fit, the checkpoint saved for fault tolerance can become
  invalid.

  Note:
  1. This callback is not compatible with disabling eager execution.
  2. A checkpoint is saved at the end of each epoch, when restoring we'll redo
  any partial work from an unfinished epoch in which the training got restarted
  (so the work done before a interruption doesn't affect the final model state).
  3. This works for both single worker and multi-worker mode, only
  MirroredStrategy and MultiWorkerMirroredStrategy are supported for now.

  Example:

  >>> class InterruptingCallback(tf.keras.callbacks.Callback):
  ...   def on_epoch_begin(self, epoch, logs=None):
  ...     if epoch == 4:
  ...       raise RuntimeError('Interrupting!')
  >>> callback = tf.keras.callbacks.experimental.BackupAndRestore(
  ... backup_dir="/tmp/backup")
  >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
  >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
  >>> try:
  ...   model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
  ...             batch_size=1, callbacks=[callback, InterruptingCallback()],
  ...             verbose=0)
  ... except:
  ...   pass
  >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5), epochs=10,
  ...             batch_size=1, callbacks=[callback], verbose=0)
  >>> # Only 6 more epochs are run, since first trainning got interrupted at
  >>> # zero-indexed epoch 4, second training will continue from 4 to 9.
  >>> len(history.history['loss'])
  6

  Args:
      backup_dir: String, path to store the checkpoint.
        e.g. backup_dir = os.path.join(working_dir, 'backup')
        This is the directory in which the system stores temporary files to
        recover the model from jobs terminated unexpectedly. The directory
        cannot be reused elsewhere to store other files, e.g. by
        BackupAndRestore callback of another training, or by another callback
        (ModelCheckpoint) of the same training.
  c                s`   t t|   || _d| _tjtjt	j
t	jtjf| _t sVt rNtdntdd| _d S )NTz\This Callback's method contains Python state and should be called outside of `tf.function`s.zBackupAndRestore only supports eager mode. In graph mode, consider using ModelCheckpoint to manually save and restore weights with `model.load_weights()` and by providing `initial_epoch` in `model.fit()` for fault tolerance.F)r   r  ry   
backup_dirr\   r   ZMirroredStrategyr   ZCollectiveAllReduceStrategyr
   ZTPUStrategyZTPUStrategyV2r	   ZParameterServerStrategyV2_supported_strategiesr   Zexecuting_eagerlyr   Zinside_functionr   r   )ru   r   )rc   r>   r?   ry   u  s    

zBackupAndRestore.__init__Nc             C   sZ   | j jr.t| j j| js.tdt| j jj t	| j | j
| j _| j j| _| j  d S )Nz%s is not supported yet. Currently BackupAndRestore callback only supports empty strategy, MirroredStrategy, MultiWorkerMirroredStrategy and TPUStrategy.)r9   Z_distribution_strategyr/   r
  r!  NotImplementedErrortyperd   r   ZWorkerTrainingStater   _training_staterestore)ru   rW   r>   r>   r?   r     s    

zBackupAndRestore.on_train_beginc             C   s   | j   | ` | j` d S )N)r$  Zdelete_backupr9   )ru   rW   r>   r>   r?   r     s    
zBackupAndRestore.on_train_endc             C   s   | j | d S )N)r$  Zback_up)ru   r   rW   r>   r>   r?   r     s    zBackupAndRestore.on_epoch_end)N)N)N)	rd   r   r   r   ry   r   r   r   r   r>   r>   )rc   r?   r  ;  s
   8


r  zkeras.callbacks.EarlyStoppingc                   sP   e Zd ZdZd fdd	Zdd	d
ZdddZdddZdd Zdd Z	  Z
S )EarlyStoppinga	  Stop training when a monitored metric has stopped improving.

  Assuming the goal of a training is to minimize the loss. With this, the
  metric to be monitored would be `'loss'`, and mode would be `'min'`. A
  `model.fit()` training loop will check at end of every epoch whether
  the loss is no longer decreasing, considering the `min_delta` and
  `patience` if applicable. Once it's found no longer decreasing,
  `model.stop_training` is marked True and the training terminates.

  The quantity to be monitored needs to be available in `logs` dict.
  To make it so, pass the loss or metrics at `model.compile()`.

  Args:
    monitor: Quantity to be monitored.
    min_delta: Minimum change in the monitored quantity
        to qualify as an improvement, i.e. an absolute
        change of less than min_delta, will count as no
        improvement.
    patience: Number of epochs with no improvement
        after which training will be stopped.
    verbose: verbosity mode.
    mode: One of `{"auto", "min", "max"}`. In `min` mode,
        training will stop when the quantity
        monitored has stopped decreasing; in `"max"`
        mode it will stop when the quantity
        monitored has stopped increasing; in `"auto"`
        mode, the direction is automatically inferred
        from the name of the monitored quantity.
    baseline: Baseline value for the monitored quantity.
        Training will stop if the model doesn't show improvement over the
        baseline.
    restore_best_weights: Whether to restore model weights from
        the epoch with the best value of the monitored quantity.
        If False, the model weights obtained at the last step of
        training are used. An epoch will be restored regardless
        of the performance relative to the `baseline`. If no epoch
        improves on `baseline`, training will run for `patience`
        epochs and restore weights from the best epoch in that set.

  Example:

  >>> callback = tf.keras.callbacks.EarlyStopping(monitor='loss', patience=3)
  >>> # This callback will stop the training when there is no improvement in
  >>> # the loss for three consecutive epochs.
  >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
  >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
  >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
  ...                     epochs=10, batch_size=1, callbacks=[callback],
  ...                     verbose=0)
  >>> len(history.history['loss'])  # Only 4 epochs are run.
  4
  r   r   r   NFc                s   t t|   || _|| _|| _|| _t|| _d| _	d| _
|| _d | _|dkr`td| d}|dkrrtj| _n.|dkrtj| _nd| jkrtj| _ntj| _| jtjkr|  jd9  _n|  jd	9  _d S )
Nr   )r   r   r   z8EarlyStopping mode %s is unknown, fallback to auto mode.r   r   r   r   r&   )r   r&  ry   r   patiencer-   baselineabs	min_deltawaitstopped_epochrestore_best_weightsbest_weightsr   r   r   r   r   r   )ru   r   r+  r(  r-   r.   r)  r.  )rc   r>   r?   ry     s0    




zEarlyStopping.__init__c             C   s2   d| _ d| _| jtjkrtjntj | _d | _d S )Nr   )r,  r-  r   r   r   r   r   r/  )ru   rW   r>   r>   r?   r     s    zEarlyStopping.on_train_beginc             C   s   |  |}|d krd S | jr2| jd kr2| j | _|  jd7  _| || jr|| _| jrf| j | _| jd ks~| || jrd| _| j| j	kr|| _
d| j_| jr| jd k	r| jdkrtd | j| j d S )Nr&   r   Tz7Restoring model weights from the end of the best epoch.)get_monitor_valuer.  r/  r9   Zget_weightsr,  _is_improvementr   r)  r(  r-  r:   r-   r   Zset_weights)ru   r   rW   r  r>   r>   r?   r     s&    

zEarlyStopping.on_epoch_endc             C   s*   | j dkr&| jdkr&td| j d   d S )Nr   zEpoch %05d: early stoppingr&   )r-  r-   r   )ru   rW   r>   r>   r?   r   -  s    zEarlyStopping.on_train_endc          	   C   s>   |pi }| | j}|d kr:td| jdt|  |S )Nz[Early stopping conditioned on metric `%s` which is not available. Available metrics are: %s,)r   r   r   r   r  r   keys)ru   rW   monitor_valuer>   r>   r?   r0  1  s    zEarlyStopping.get_monitor_valuec             C   s   |  || j |S )N)r   r+  )ru   r4  Zreference_valuer>   r>   r?   r1  :  s    zEarlyStopping._is_improvement)r   r   r   r   r   NF)N)N)N)rd   r   r   r   ry   r   r   r   r0  r1  r   r>   r>   )rc   r?   r&    s   5      !


	r&  zkeras.callbacks.RemoteMonitorc                   s,   e Zd ZdZd fdd	Zdd	d
Z  ZS )RemoteMonitoraZ  Callback used to stream events to a server.

  Requires the `requests` library.
  Events are sent to `root + '/publish/epoch/end/'` by default. Calls are
  HTTP POST, with a `data` argument which is a
  JSON-encoded dictionary of event data.
  If `send_as_json=True`, the content type of the request will be
  `"application/json"`.
  Otherwise the serialized JSON will be sent within a form.

  Args:
    root: String; root url of the target server.
    path: String; path relative to `root` to which the events will be sent.
    field: String; JSON field under which the data will be stored.
        The field is used only if the payload is sent within a form
        (i.e. send_as_json is set to False).
    headers: Dictionary; optional custom HTTP headers.
    send_as_json: Boolean; whether the request should be
        sent as `"application/json"`.
  http://localhost:9000/publish/epoch/end/rQ   NFc                s0   t t|   || _|| _|| _|| _|| _d S )N)r   r5  ry   rootr  fieldheaderssend_as_json)ru   r8  r  r9  r:  r;  )rc   r>   r?   ry   U  s    zRemoteMonitor.__init__c             C   s   t d krtd|pi }i }||d< x<| D ]0\}}t|tjtjfrV| ||< q.|||< q.W yL| jrt j	| j
| j || jd n&t j	| j
| j | jt|i| jd W n, t jjk
r   tdt| j
  Y nX d S )Nz.RemoteMonitor requires the `requests` library.r   )jsonr:  )r:  z6Warning: could not reach RemoteMonitor root server at )requestsImportErrorr   r/   r   ndarrayZgenericitemr;  postr8  r  r:  r9  r<  dumps
exceptionsRequestExceptionr   r   r   )ru   r   rW   sendr   r   r>   r>   r?   r   c  s$    zRemoteMonitor.on_epoch_end)r6  r7  rQ   NF)N)rd   r   r   r   ry   r   r   r>   r>   )rc   r?   r5  >  s       	r5  z%keras.callbacks.LearningRateSchedulerc                   s6   e Zd ZdZd
 fdd	ZdddZddd	Z  ZS )LearningRateSchedulera  Learning rate scheduler.

  At the beginning of every epoch, this callback gets the updated learning rate
  value from `schedule` function provided at `__init__`, with the current epoch
  and current learning rate, and applies the updated learning rate
  on the optimizer.

  Args:
    schedule: a function that takes an epoch index (integer, indexed from 0)
        and current learning rate (float) as inputs and returns a new
        learning rate as output (float).
    verbose: int. 0: quiet, 1: update messages.

  Example:

  >>> # This function keeps the initial learning rate for the first ten epochs
  >>> # and decreases it exponentially after that.
  >>> def scheduler(epoch, lr):
  ...   if epoch < 10:
  ...     return lr
  ...   else:
  ...     return lr * tf.math.exp(-0.1)
  >>>
  >>> model = tf.keras.models.Sequential([tf.keras.layers.Dense(10)])
  >>> model.compile(tf.keras.optimizers.SGD(), loss='mse')
  >>> round(model.optimizer.lr.numpy(), 5)
  0.01

  >>> callback = tf.keras.callbacks.LearningRateScheduler(scheduler)
  >>> history = model.fit(np.arange(100).reshape(5, 20), np.zeros(5),
  ...                     epochs=15, callbacks=[callback], verbose=0)
  >>> round(model.optimizer.lr.numpy(), 5)
  0.00607

  r   c                s   t t|   || _|| _d S )N)r   rF  ry   scheduler-   )ru   rG  r-   )rc   r>   r?   ry     s    zLearningRateScheduler.__init__Nc             C   s   t | jjdstdy$tt| jjj}| ||}W n t	k
rX   | |}Y nX t
|tjttjtjfsztdt
|tjr|jjstdt| jjjt| | jdkrtd|d |f  d S )Nlrz%Optimizer must have a "lr" attribute.z6The output of the "schedule" function should be float.z#The dtype of Tensor should be floatr   z?
Epoch %05d: LearningRateScheduler setting learning rate to %s.r&   )rO   r9   	optimizerr   floatr   	get_valuerH  rG  r   r/   r   ZTensorr   float32Zfloat64dtypeZis_floating	set_valuer-   r   )ru   r   rW   rH  r>   r>   r?   r     s    
z$LearningRateScheduler.on_epoch_beginc             C   s    |pi }t | jjj|d< d S )NrH  )r   rK  r9   rI  rH  )ru   r   rW   r>   r>   r?   r     s    z"LearningRateScheduler.on_epoch_end)r   )N)N)rd   r   r   r   ry   r   r   r   r>   r>   )rc   r?   rF  }  s   $
rF  c       	   
   C   s   t  }d|j_d|j_y| }W n. tk
rR } ztd| dS d}~X Y nX t	
| d||g>\}}td tj|tjd}W dQ R X t	j||||dS Q R X dS )	ai  Writes a Keras model as JSON to as a Summary.

  Writing the Keras model configuration allows the TensorBoard graph plugin to
  render a conceptual graph, as opposed to graph of ops. In case the model fails
  to serialize as JSON, it ignores and returns False.

  Args:
    name: A name for this summary. The summary tag used for TensorBoard will be
      this name prefixed by any active name scopes.
    data: A Keras Model to write.
    step: Explicit `int64`-castable monotonic step value for this summary. If
      omitted, this defaults to `tf.summary.experimental.get_step()`, which must
      not be None.

  Returns:
    True on success, or False if no summary was written because no default
    summary writer was available.

  Raises:
    ValueError: if a default writer exists, but no step was provided and
      `tf.summary.experimental.get_step()` is None.
  Zgraph_keras_model   1z1Model failed to serialize as JSON. Ignoring... %sFNzcpu:0)rM  )tagtensorr   metadata)r   ZSummaryMetadataZplugin_dataZplugin_namecontentto_json	Exceptionr   r   r   Zsummary_scoper   Zdevicer   Zconstantr   stringwrite)	r   rQ   r   Zsummary_metadataZjson_stringexcrP  _rQ  r>   r>   r?   keras_model_summary  s    rZ  zkeras.callbacks.TensorBoardc            	       s>  e Zd ZdZdK fd	d
	Zdd Zdd Zedd Zedd Z	dd Z
dd Zdd Zdd Zdd Zdd Zdd  Zd!d" Zd#d$ ZdLd%d&ZdMd'd(ZdNd)d*ZdOd+d,Zd-d. ZdPd/d0ZdQd1d2ZdRd3d4ZdSd5d6Zd7d8 ZdTd9d:Zd;d< Zd=d> Zd?d@ Z dAdB Z!dCdD Z"dEdF Z#dGdH Z$dUdIdJZ%  Z&S )VTensorBoarda  Enable visualizations for TensorBoard.

  TensorBoard is a visualization tool provided with TensorFlow.

  This callback logs events for TensorBoard, including:

  * Metrics summary plots
  * Training graph visualization
  * Activation histograms
  * Sampled profiling

  When used in `Model.evaluate`, in addition to epoch summaries, there will be
  a summary that records evaluation metrics vs `Model.optimizer.iterations`
  written. The metric names will be prepended with `evaluation`, with
  `Model.optimizer.iterations` being the step in the visualized TensorBoard.

  If you have installed TensorFlow with pip, you should be able
  to launch TensorBoard from the command line:

  ```
  tensorboard --logdir=path_to_your_logs
  ```

  You can find more information about TensorBoard
  [here](https://www.tensorflow.org/get_started/summaries_and_tensorboard).

  Args:
      log_dir: the path of the directory where to save the log files to be
        parsed by TensorBoard. e.g. log_dir = os.path.join(working_dir, 'logs')
        This directory should not be reused by any other callbacks.
      histogram_freq: frequency (in epochs) at which to compute activation and
        weight histograms for the layers of the model. If set to 0, histograms
        won't be computed. Validation data (or split) must be specified for
        histogram visualizations.
      write_graph: whether to visualize the graph in TensorBoard. The log file
        can become quite large when write_graph is set to True.
      write_images: whether to write model weights to visualize as image in
        TensorBoard.
      write_steps_per_second: whether to log the training steps per second into
        Tensorboard. This supports both epoch and batch frequency logging.
      update_freq: `'batch'` or `'epoch'` or integer. When using `'batch'`,
        writes the losses and metrics to TensorBoard after each batch. The same
        applies for `'epoch'`. If using an integer, let's say `1000`, the
        callback will write the metrics and losses to TensorBoard every 1000
        batches. Note that writing too frequently to TensorBoard can slow down
        your training.
      profile_batch: Profile the batch(es) to sample compute characteristics.
        profile_batch must be a non-negative integer or a tuple of integers.
        A pair of positive integers signify a range of batches to profile.
        By default, it will profile the second batch. Set profile_batch=0
        to disable profiling.
      embeddings_freq: frequency (in epochs) at which embedding layers will be
        visualized. If set to 0, embeddings won't be visualized.
      embeddings_metadata: Dictionary which maps embedding layer names to the
        filename of a file in which to save metadata for the embedding layer.
        In case the same metadata file is to be
        used for all embedding layers, a single filename can be passed.

  Examples:

  Basic usage:

  ```python
  tensorboard_callback = tf.keras.callbacks.TensorBoard(log_dir="./logs")
  model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
  # Then run the tensorboard command to view the visualizations.
  ```

  Custom batch-level summaries in a subclassed Model:

  ```python
  class MyModel(tf.keras.Model):

    def build(self, _):
      self.dense = tf.keras.layers.Dense(10)

    def call(self, x):
      outputs = self.dense(x)
      tf.summary.histogram('outputs', outputs)
      return outputs

  model = MyModel()
  model.compile('sgd', 'mse')

  # Make sure to set `update_freq=N` to log a batch-level summary every N batches.
  # In addition to any `tf.summary` contained in `Model.call`, metrics added in
  # `Model.compile` will be logged every N batches.
  tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
  model.fit(x_train, y_train, callbacks=[tb_callback])
  ```

  Custom batch-level summaries in a Functional API Model:

  ```python
  def my_summary(x):
    tf.summary.histogram('x', x)
    return x

  inputs = tf.keras.Input(10)
  x = tf.keras.layers.Dense(10)(inputs)
  outputs = tf.keras.layers.Lambda(my_summary)(x)
  model = tf.keras.Model(inputs, outputs)
  model.compile('sgd', 'mse')

  # Make sure to set `update_freq=N` to log a batch-level summary every N batches.
  # In addition to any `tf.summary` contained in `Model.call`, metrics added in
  # `Model.compile` will be logged every N batches.
  tb_callback = tf.keras.callbacks.TensorBoard('./logs', update_freq=1)
  model.fit(x_train, y_train, callbacks=[tb_callback])
  ```

  Profiling:

  ```python
  # Profile a single batch, e.g. the 5th batch.
  tensorboard_callback = tf.keras.callbacks.TensorBoard(
      log_dir='./logs', profile_batch=5)
  model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])

  # Profile a range of batches, e.g. from 10 to 20.
  tensorboard_callback = tf.keras.callbacks.TensorBoard(
      log_dir='./logs', profile_batch=(10,20))
  model.fit(x_train, y_train, epochs=2, callbacks=[tensorboard_callback])
  ```
  rW   r   TFr      Nc
                s   t t|   d| _| |
 t|| _|| _|| _|| _	|| _
|dkrLdn|| _|| _|	| _| | d| _d| _d| _d| _i | _g | _d S )NTr   r&   r   )r   r[  ry   r\   _validate_kwargsr   log_dirhistogram_freqwrite_graphwrite_imageswrite_steps_per_secondupdate_freqembeddings_freqembeddings_metadata_init_profile_batch_global_train_batch_previous_epoch_iterations_train_accumulated_timers   _writers_prev_summary_state)ru   r^  r_  r`  ra  rb  rc  profile_batchrd  re  r   )rc   r>   r?   ry   o  s$    


zTensorBoard.__init__c             C   s   | ddrtd | ddr,td | ddrBtd | ddrXtd	 t| ddddh }|rtd
t| dS )z&Handle arguments were supported in V1.Zwrite_gradsFzO`write_grads` will be ignored in TensorFlow 2.0 for the `TensorBoard` Callback.r)   ze`batch_size` is no longer needed in the `TensorBoard` Callback and will be ignored in TensorFlow 2.0.Zembeddings_layer_nameszp`embeddings_layer_names` is not supported in TensorFlow 2.0. Instead, all `Embedding` layers will be visualized.Zembeddings_datazl`embeddings_data` is not supported in TensorFlow 2.0. Instead, all `Embedding` variables will be visualized.z2Unrecognized arguments in `TensorBoard` Callback: N)r   r   r   r   r3  r   r   )ru   r   Zunrecognized_kwargsr>   r>   r?   r]    s    




zTensorBoard._validate_kwargsc             C   sz   || _ |  | _tj| jd| _| j j| _tj| jd| _	| j j
| _i | _d| _| jrh|   d| _| jrv|   dS )z/Sets Keras model and writes graph if specified.trainZ
validationFTN)r9   _get_log_write_dir_log_write_dirr  r  r  
_train_dirr   r   _val_dirr   	_val_steprj  _should_write_train_graphr`  _write_keras_model_summaryrd  _configure_embeddings)ru   r9   r>   r>   r?   r7     s    


zTensorBoard.set_modelc             C   s&   d| j krt| j| j d< | j d S )Nrm  )rj  r   create_file_writer_v2rp  )ru   r>   r>   r?   _train_writer  s    
zTensorBoard._train_writerc             C   s&   d| j krt| j| j d< | j d S )Nval)rj  r   rv  rq  )ru   r>   r>   r?   _val_writer  s    
zTensorBoard._val_writerc             C   s   t | j| jjS )zBFor multi-worker, only chief should write, others write to '/tmp'.)r   Zwrite_dirpathr^  r9   r
  )ru   r>   r>   r?   rn    s    zTensorBoard._get_log_write_dirc             C   s   t | j| jj dS )z/Deletes tmp write directories for multi-worker.N)r   Zremove_temp_dirpathr^  r9   r
  )ru   r>   r>   r?   _delete_tmp_write_dir  s    z!TensorBoard._delete_tmp_write_dirc          
   C   sP   | j  < td& | jj}t|dr8t|jj W dQ R X W dQ R X dS )z7Writes Keras model train_function graph to TensorBoard.TZfunction_specN)	rw  
as_defaultr   	record_ifr9   Ztrain_tf_functionrO   graphZ_concrete_stateful_fn)ru   Ztrain_fnr>   r>   r?   _write_keras_model_train_graph  s
    
z*TensorBoard._write_keras_model_train_graphc          
   C   sZ   | j  F td0 | jjp,| jjjdk}|rBtd| jdd W dQ R X W dQ R X dS )z2Writes Keras graph network summary to TensorBoard.TZ
SequentialZkerasr   )r   N)	rw  r{  r   r|  r9   Z_is_graph_networkrc   rd   rZ  )ru   Zsummary_writabler>   r>   r?   rt    s    z&TensorBoard._write_keras_model_summaryc          	   C   s  ddl m} ddlm} ddlm} | }xp| jjD ]d}t	||j
r6|j }d}||_| jdk	r6t	| jtrz| j|_q6|j| j kr6| j|j|_q6W | jrt	| jtstdt| j  ||}tj| jd}	t|	d	}
|
| W dQ R X dS )
z'Configure the Projector for embeddings.r   )text_format)
embeddings)projector_config_pb2z:layer_with_weights-0/embeddings/.ATTRIBUTES/VARIABLE_VALUENzmUnrecognized `Embedding` layer names passed to `keras.callbacks.TensorBoard` `embeddings_metadata` argument: zprojector_config.pbtxtw)Zgoogle.protobufr  Ztensorflow.python.keras.layersr  Z tensorflow.python.keras.protobufr  ZProjectorConfigr9   layersr/   Z	EmbeddingaddZtensor_namere  r   Zmetadata_pathr   r3  r   r   ZMessageToStringr  r  r  ro  r   ZOpenrW  )ru   r  r  r  configlayerZ	embeddingr   Zconfig_pbtxtr  fr>   r>   r?   ru    s,    



z!TensorBoard._configure_embeddingsc                s\    j dkrdS  fdd}| t|f} j| |d   |d   dS )z9Sets the default writer for custom batch-level summaries.r   Nc                  s   t  j dS )Nr   )r   equalrc  r>   )ru   r   r>   r?   <lambda>	      z*TensorBoard._push_writer.<locals>.<lambda>r   r&   )rc  r{  valuer   r|  rk  r5   	__enter__)ru   writerr   Zshould_recordZsummary_contextr>   )ru   r   r?   _push_writer	  s    
zTensorBoard._push_writerc             C   s@   | j dkrdS | j }|d jt   |d jt   dS )zPops the current writer.r   Nr&   r   )rc  rk  r   __exit__sysexc_info)ru   Zprevious_contextr>   r>   r?   _pop_writer	  s
    

zTensorBoard._pop_writerc             C   s    x| j  D ]}|  qW d S )N)rj  valuesclose)ru   r  r>   r>   r?   _close_writers&	  s    zTensorBoard._close_writersc             C   s   d |}t|tr.t|d}tt|}t|trF|| _|| _n0t|t	t
frnt|dkrn|\| _| _nt|| jdk s| j| jk rt|d| _| jdkr| jdd | jdd d| _| jdko| jdk | _d	S )
a`  Validate profile_batch value and set the range of batches to profile.
    Sets values of _start_batch and _stop_batch attributes,
    specifying the start and stop batch to profile.
    Setting `profile_batch=0` disables profiling.

    Args:
      profile_batch: The range of batches to profile. Should be a non-negative
        integer or a comma separated string of pair of positive integers. A pair
        of positive integers signify a range of batches to profile.

    Raises:
      ValueError: If profile_batch is not an integer or a comma separated pair
                  of positive integers.

    zprofile_batch must be a non-negative integer or 2-tuple of positive integers. A pair of positive integers signifies a range of batches to profile. Found: {}r2  r\  r   FrS   )logdir)r  N)r   r/   r   splitr#   Zmap_structurer   _start_batch_stop_batchtupler   r   r   _profiler_started_start_profiler_stop_profiler_is_tracing_should_trace)ru   rl  Zprofile_batch_error_messager>   r>   r?   rf  *	  s&    


zTensorBoard._init_profile_batchc             C   s&   d| _ d| _d| _| | j| j d S )Nr   )rg  rh  ri  r  rw  r   )ru   rW   r>   r>   r?   r   ]	  s    zTensorBoard.on_train_beginc             C   s*   |    | jr|   |   |   d S )N)r  r  _stop_tracer  rz  )ru   rW   r>   r>   r?   r   c	  s
    zTensorBoard.on_train_endc             C   s   |  | j| j d S )N)r  ry  rr  )ru   rW   r>   r>   r?   r   l	  s    zTensorBoard.on_test_beginc          
   C   s   | j jrzt| j jdrztdT | j > x6| D ]*\}}tjd| d || j jj	
 d q8W W d Q R X W d Q R X |   d S )N
iterationsTZevaluation_Z_vs_iterations)r   )r9   rI  rO   r   r|  ry  r{  r   scalarr  Z
read_valuer  )ru   rW   r   r  r>   r>   r?   r   o	  s    
*zTensorBoard.on_test_endc             C   s   | j p
| jS )N)r  rb  )ru   r>   r>   r?   r`   y	  s    z)TensorBoard._implements_train_batch_hooksc             C   s@   |  j d7  _ | jrt | _| js(d S | j | jkr<|   d S )Nr&   )rg  rb  r   rs   r  r  _start_trace)ru   r   rW   r>   r>   r?   r   }	  s    
z TensorBoard.on_train_batch_beginc             C   st   | j r|   d| _ | jrLt | j }|  j|7  _tjdd| | jd | j	sVd S | j
rp| j| jkrp|   d S )NFZbatch_steps_per_secondg      ?)r   )rs  r~  rb  r   rs   ri  r   r  r   r  r  rg  r  r  )ru   r   rW   Zbatch_run_timer>   r>   r?   r   	  s    zTensorBoard.on_train_batch_endc             C   s    | j r| jjj | _d| _d S )Nr   )rb  r9   rI  r  r   rh  ri  )ru   r   rW   r>   r>   r?   r   	  s    zTensorBoard.on_epoch_beginc             C   sL   |  || | jr*|| j dkr*| | | jrH|| j dkrH| | dS )z2Runs metrics and histogram summaries at epoch end.r   N)_log_epoch_metricsr_  _log_weightsrd  _log_embeddings)ru   r   rW   r>   r>   r?   r   	  s
    
zTensorBoard.on_epoch_endc             C   s&   t jddd | j| jd d| _d S )NTF)r}  profiler)r  )r   Ztrace_onr  rp  r  )ru   r>   r>   r?   r  	  s    zTensorBoard._start_tracec          
   C   s^   |dkr| j }| j . td tjd| |d W dQ R X W dQ R X |   d| _dS )z$Logs the trace graph to TensorBoard.NTzbatch_%d)r   r   F)r  rw  r{  r   r|  Ztrace_exportr  r  )ru   r   r>   r>   r?   r  	  s    &zTensorBoard._stop_tracec             C   s2   t | jjdd }t|tjr.|| jjj|d< |S )NrH  Zlearning_rate)r]   r9   rI  r/   r   ZLearningRateScheduler  )ru   rW   Zlr_scheduler>   r>   r?   _collect_learning_rate	  s    z"TensorBoard._collect_learning_ratec             C   s"   | j jj }|| j | j }|S )N)r9   rI  r  r   rh  ri  )ru   Zcurrent_iterationsteps_per_secondr>   r>   r?   _compute_steps_per_second	  s    z%TensorBoard._compute_steps_per_secondc          
   C   s   |sdS dd |  D }dd |  D }| |}| jrH|  |d< td |r| j 0 x(|  D ]\}}tjd| ||d qnW W dQ R X |r| j	 < x4|  D ](\}}|d	d }tjd| ||d qW W dQ R X W dQ R X dS )
zWrites epoch metrics out as scalar summaries.

    Args:
        epoch: Int. The global step to use for TensorBoard.
        logs: Dict. Keys are scalar summary names, values are scalars.
    Nc             S   s    i | ]\}}| d s||qS )rA   )r   )rB   r   r   r>   r>   r?   
<dictcomp>	  s    z2TensorBoard._log_epoch_metrics.<locals>.<dictcomp>c             S   s    i | ]\}}| d r||qS )rA   )r   )rB   r   r   r>   r>   r?   r  	  s    r  TZepoch_)r      )
r   r  rb  r  r   r|  rw  r{  r  ry  )ru   r   rW   Z
train_logsZval_logsr   r  r>   r>   r?   r  	  s"    
"zTensorBoard._log_epoch_metricsc          
   C   s   | j  z tdd xR| jjD ]F}x@|jD ]6}|jdd}tj	|||d | j
r.| ||| q.W q"W | j   W dQ R X W dQ R X dS )z-Logs the weights of the Model to TensorBoard.T:rY  )r   N)rw  r{  r   r|  r9   r  weightsr   replaceZ	histogramra  _log_weight_as_imageflush)ru   r   r  weightweight_namer>   r>   r?   r  	  s    zTensorBoard._log_weightsc             C   s  t |}t|}t|dkr:t |d|d ddg}nt|dkr|d |d krjt |}t|}t |d|d |d dg}nVt|dkrt dkrt j|dddgd}t|}t ||d |d |d dg}t|}t|dkr|d d	krtj	|||d
 dS )z%Logs a weight as a TensorBoard image.r&   r   r\     Zchannels_last)Zpermr  r'  )r&   r  r  )r   N)
r   Zsqueezer   Z	int_shaper   ZreshapeZ	transposeZimage_data_formatr   image)ru   r  r  r   Zw_imgshaper>   r>   r?   r  	  s"    




 
z TensorBoard._log_weight_as_imagec             C   s(   t j| jdd|}| j| d S )Nrm  zkeras_embedding.ckpt-{})r  r  r  ro  r   r9   r  )ru   r   Zembeddings_ckptr>   r>   r?   r  
  s    zTensorBoard._log_embeddingsc          
   C   sX   | j r
dS ytj|d d| _ W n2 tjk
rR } ztd|j W dd}~X Y nX dS )zxStarts the profiler if currently inactive.

    Args:
      logdir: Directory where profiler results will be saved.
    N)r  TzFailed to start profiler: %s)r  r  startr   ZAlreadyExistsErrorr   errormessage)ru   r  r   r>   r>   r?   r  
  s    
zTensorBoard._start_profilerc          
   C   s`   | j s
dS zHytj|d W n2 tjk
rN } ztd|j W dd}~X Y nX W dd| _ X dS )zxStops the profiler if currently active.

    Args:
      save: Whether to save the profiler results to TensorBoard.
    N)r  zFailed to stop profiler: %sF)r  r  stopr   ZUnavailableErrorr   r  r  )ru   r  r   r>   r>   r?   r  
  s    $zTensorBoard._stop_profiler)	rW   r   TFFr   r\  r   N)N)N)N)N)N)N)N)N)N)T)'rd   r   r   r   ry   r]  r7   propertyrw  ry  rn  rz  r~  rt  ru  r  r  r  rf  r   r   r   r   r`   r   r   r   r   r  r  r  r  r  r  r  r  r  r  r   r>   r>   )rc   r?   r[    sT           	
"3

	









r[  z!keras.callbacks.ReduceLROnPlateauc                   sF   e Zd ZdZd fdd		Zd
d ZdddZdddZdd Z  Z	S )ReduceLROnPlateaua  Reduce learning rate when a metric has stopped improving.

  Models often benefit from reducing the learning rate by a factor
  of 2-10 once learning stagnates. This callback monitors a
  quantity and if no improvement is seen for a 'patience' number
  of epochs, the learning rate is reduced.

  Example:

  ```python
  reduce_lr = ReduceLROnPlateau(monitor='val_loss', factor=0.2,
                                patience=5, min_lr=0.001)
  model.fit(X_train, Y_train, callbacks=[reduce_lr])
  ```

  Args:
      monitor: quantity to be monitored.
      factor: factor by which the learning rate will be reduced.
        `new_lr = lr * factor`.
      patience: number of epochs with no improvement after which learning rate
        will be reduced.
      verbose: int. 0: quiet, 1: update messages.
      mode: one of `{'auto', 'min', 'max'}`. In `'min'` mode,
        the learning rate will be reduced when the
        quantity monitored has stopped decreasing; in `'max'` mode it will be
        reduced when the quantity monitored has stopped increasing; in `'auto'`
        mode, the direction is automatically inferred from the name of the
        monitored quantity.
      min_delta: threshold for measuring the new optimum, to only focus on
        significant changes.
      cooldown: number of epochs to wait before resuming normal operation after
        lr has been reduced.
      min_lr: lower bound on the learning rate.
  r   皙?
   r   r   -C6?c	       
         s   t t|   || _|dkr$tdd|	kr@|	d}td || _|| _	|| _
|| _|| _|| _d| _d| _d| _|| _d | _|   d S )Ng      ?z3ReduceLROnPlateau does not support a factor >= 1.0.epsilonzN`epsilon` argument is deprecated and will be removed, use `min_delta` instead.r   )r   r  ry   r   r   r   r   r   factormin_lrr+  r(  r-   cooldowncooldown_counterr,  r   r.   r   _reset)
ru   r   r  r(  r-   r.   r+  r  r  r   )rc   r>   r?   ry   K
  s&    


zReduceLROnPlateau.__init__c                s|    j dkrtd j  d _  j dks< j dkrTd jkrT fdd _tj _n fdd _tj  _d	 _d	 _	d
S )z.Resets wait counter and cooldown counter.
    )r   r   r   zBLearning rate reduction mode %s is unknown, fallback to auto mode.r   r   r   c                s   t | | j S )N)r   r   r+  )ab)ru   r>   r?   r  t
  r  z*ReduceLROnPlateau._reset.<locals>.<lambda>c                s   t | | j S )N)r   r   r+  )r  r  )ru   r>   r?   r  w
  r  r   N)
r.   r   r   r   r   r   r   r   r  r,  )ru   r>   )ru   r?   r  k
  s    



zReduceLROnPlateau._resetNc             C   s   |    d S )N)r  )ru   rW   r>   r>   r?   r   |
  s    z ReduceLROnPlateau.on_train_beginc          	   C   s*  |pi }t | jjj|d< || j}|d krPtd| jd	t
|  n|  rl|  jd8  _d| _| || jr|| _d| _n|  s&|  jd7  _| j| jkr&t | jjj}|t| jkr&|| j }t|| j}t | jjj| | jdkrtd|d |f  | j| _d| _d S )NrH  zgLearning rate reduction is conditioned on metric `%s` which is not available. Available metrics are: %sr2  r&   r   z<
Epoch %05d: ReduceLROnPlateau reducing learning rate to %s.)r   rK  r9   rI  rH  r   r   r   r   r  r   r3  in_cooldownr  r,  r   r   r(  r   rL  r  r  r   rN  r-   r   r  )ru   r   rW   r  Zold_lrZnew_lrr>   r>   r?   r   
  s2    

zReduceLROnPlateau.on_epoch_endc             C   s
   | j dkS )Nr   )r  )ru   r>   r>   r?   r  
  s    zReduceLROnPlateau.in_cooldown)r   r  r  r   r   r  r   r   )N)N)
rd   r   r   r   ry   r  r   r   r  r   r>   r>   )rc   r?   r  &
  s   #       

r  zkeras.callbacks.CSVLoggerc                   s@   e Zd ZdZd fdd	ZdddZdd	d
ZdddZ  ZS )	CSVLoggera)  Callback that streams epoch results to a CSV file.

  Supports all values that can be represented as a string,
  including 1D iterables such as `np.ndarray`.

  Example:

  ```python
  csv_logger = CSVLogger('training.log')
  model.fit(X_train, Y_train, callbacks=[csv_logger])
  ```

  Args:
      filename: Filename of the CSV file, e.g. `'run/log.csv'`.
      separator: String used to separate elements in the CSV file.
      append: Boolean. True: append if file exists (useful for continuing
          training). False: overwrite existing file.
  r2  Fc                s:   || _ t|| _|| _d | _d | _d| _tt| 	  d S )NT)
sepr   filenamer5   r  r3  append_headerr   r  ry   )ru   r  	separatorr5   )rc   r>   r?   ry   
  s    
zCSVLogger.__init__Nc          	   C   s^   | j rFt| jr@t| jd}tt|  | _	W d Q R X d}nd}t| j|| _
d S )Nrr  r  )r5   r   r  r  r   ZGFileboolr   readliner  csv_file)ru   rW   r  r.   r>   r>   r?   r   
  s    zCSVLogger.on_train_beginc                s   pi dd  j d kr(t  _ jjrHtfddj D jsG fdddtj}dgj  }tjj	||d_j
rj  td|i}| fd	dj D  j| j	  d S )
Nc             S   sR   t | tjo| jdk}t | tr$| S t | tjjrJ|sJddt	t|  S | S d S )Nr   z"[%s]"z, )
r/   r   r?  ndimr   collectionsabcIterabler  map)r   Zis_zero_dim_ndarrayr>   r>   r?   handle_value
  s    
z,CSVLogger.on_epoch_end.<locals>.handle_valuec             3   s*   | ]"}| kr| | fn|d fV  qdS )ZNANr>   )rB   r   )rW   r>   r?   r_   
  s    z)CSVLogger.on_epoch_end.<locals>.<genexpr>c                   s   e Zd Z jZdS )z-CSVLogger.on_epoch_end.<locals>.CustomDialectN)rd   r   r   r  	delimiterr>   )ru   r>   r?   CustomDialect
  s   r  r   )
fieldnamesdialectc             3   s   | ]}| | fV  qd S )Nr>   )rB   key)r  rW   r>   r?   r_   
  s    )r3  sortedr9   r:   dictr  csvexcel
DictWriterr  r  writeheaderr  OrderedDictr   writerowr  )ru   r   rW   r  r  Zrow_dictr>   )r  rW   ru   r?   r   
  s&    	


zCSVLogger.on_epoch_endc             C   s   | j   d | _d S )N)r  r  r  )ru   rW   r>   r>   r?   r   
  s    
zCSVLogger.on_train_end)r2  F)N)N)N)	rd   r   r   r   ry   r   r   r   r   r>   r>   )rc   r?   r  
  s
   	


&r  zkeras.callbacks.LambdaCallbackc                   s"   e Zd ZdZd fdd	Z  ZS )LambdaCallbacka_  Callback for creating simple, custom callbacks on-the-fly.

  This callback is constructed with anonymous functions that will be called
  at the appropriate time (during `Model.{fit | evaluate | predict}`).
  Note that the callbacks expects positional arguments, as:

  - `on_epoch_begin` and `on_epoch_end` expect two positional arguments:
    `epoch`, `logs`
  - `on_batch_begin` and `on_batch_end` expect two positional arguments:
    `batch`, `logs`
  - `on_train_begin` and `on_train_end` expect one positional argument:
    `logs`

  Args:
      on_epoch_begin: called at the beginning of every epoch.
      on_epoch_end: called at the end of every epoch.
      on_batch_begin: called at the beginning of every batch.
      on_batch_end: called at the end of every batch.
      on_train_begin: called at the beginning of model training.
      on_train_end: called at the end of model training.

  Example:

  ```python
  # Print the batch number at the beginning of every batch.
  batch_print_callback = LambdaCallback(
      on_batch_begin=lambda batch,logs: print(batch))

  # Stream the epoch loss to a file in JSON format. The file content
  # is not well-formed JSON but rather has a JSON object per line.
  import json
  json_log = open('loss_log.json', mode='wt', buffering=1)
  json_logging_callback = LambdaCallback(
      on_epoch_end=lambda epoch, logs: json_log.write(
          json.dumps({'epoch': epoch, 'loss': logs['loss']}) + '\n'),
      on_train_end=lambda logs: json_log.close()
  )

  # Terminate some processes after having finished model training.
  processes = ...
  cleanup_callback = LambdaCallback(
      on_train_end=lambda logs: [
          p.terminate() for p in processes if p.is_alive()])

  model.fit(...,
            callbacks=[batch_print_callback,
                       json_logging_callback,
                       cleanup_callback])
  ```
  Nc                s   t t|   | j| |d k	r*|| _n
dd | _|d k	rD|| _n
dd | _|d k	r^|| _n
dd | _|d k	rx|| _n
dd | _|d k	r|| _	n
dd | _	|d k	r|| _
n
dd | _
d S )Nc             S   s   d S )Nr>   )r   rW   r>   r>   r?   r  7  r  z)LambdaCallback.__init__.<locals>.<lambda>c             S   s   d S )Nr>   )r   rW   r>   r>   r?   r  ;  r  c             S   s   d S )Nr>   )r   rW   r>   r>   r?   r  ?  r  c             S   s   d S )Nr>   )r   rW   r>   r>   r?   r  C  r  c             S   s   d S )Nr>   )rW   r>   r>   r?   r  G  r  c             S   s   d S )Nr>   )rW   r>   r>   r?   r  K  r  )r   r  ry   __dict__r   r   r   r   r   r   r   )ru   r   r   r   r   r   r   r   )rc   r>   r?   ry   *  s(    




zLambdaCallback.__init__)NNNNNN)rd   r   r   r   ry   r   r>   r>   )rc   r?   r  
  s   3     r  )rS   )N)br   r  rI   r  r<  r  r  r  r   r   r   Ztensorflow.core.frameworkr   Ztensorflow.python.checkpointr   r   r   Ztensorflow.python.data.opsr   Ztensorflow.python.distributer   r   r   r   r	   r
   Ztensorflow.python.eagerr   Ztensorflow.python.frameworkr   r   r   r   Ztensorflow.python.kerasr   Z"tensorflow.python.keras.distributer   r   Z$tensorflow.python.keras.optimizer_v2r   Ztensorflow.python.keras.utilsr   r   r   Z(tensorflow.python.keras.utils.data_utilsr   Z+tensorflow.python.keras.utils.generic_utilsr   Z&tensorflow.python.keras.utils.io_utilsr   Z'tensorflow.python.keras.utils.mode_keysr   Ztensorflow.python.lib.ior   Ztensorflow.python.opsr   r   r   Ztensorflow.python.platformr   r    r   Ztensorflow.python.profilerr!   r  Ztensorflow.python.saved_modelr"   r   Ztensorflow.python.utilr#   Z tensorflow.python.util.tf_exportr$   Ztensorflow.tools.docsr%   r=  r>  r1   r@   r8   rR   r[   r0   r   r4   r   r6   r2   r   r  r&  r5  rF  rZ  ZTensorBoardVersionSelectorr[  r  r  r  r>   r>   r>   r?   <module>   s   
7(
   
  :0 ''   "r >A
-    ={R