AttributeError:“ModifiedTensorBoard”对象没有属性“_train_dir”

Vol*_*ke0 5 python python-3.x keras tensorflow

我正在按照 YouTube 上的 DeepQlearning 教程进行操作。但是,我很难让它运行。它说我没有属性“_train_dir”。当我什至没有调用该代码时。这是代码:

class ModifiedTensorBoard(TensorBoard):

    # Overriding init to set initial step and writer (we want one log file for all .fit() calls)
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.step = 1
        self.writer = tf.summary.create_file_writer(self.log_dir)
        self._log_write_dir= self.log_dir

    def _write_logs(self, logs, index):
        with self.writer.as_default():
            for name, value in logs.items():
                tf.summary.scalar(name, value, step=index)
                self.step += 1
                self.writer.flush()
                
    # Overriding this method to stop creating default log writer
    def set_model(self, model):
        pass

    # Overrided, saves logs with our step number
    # (otherwise every .fit() will start writing from 0th step)
    def on_epoch_end(self, epoch, logs=None):
        self.update_stats(**logs)

    # Overrided
    # We train for one batch only, no need to save anything at epoch end
    def on_batch_end(self, batch, logs=None):
        pass

    # Overrided, so won't close writer
    def on_train_end(self, _):
        pass

    # Custom method for saving own metrics
    # Creates writer, writes custom metrics and closes writer
    def update_stats(self, **stats):
        self._write_logs(stats, self.step)

Run Code Online (Sandbox Code Playgroud)

它编译到此时:

Traceback (most recent call last):
  File "dqn-1.py", line 387, in <module>
    agent.train(done, step)
  File "dqn-1.py", line 334, in train
    verbose=0, shuffle=False, callbacks=[self.tensorboard] if terminal_state else None)
  File "C:\Users\Anthony\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py", line 108, in _method_wrapper
    return method(self, *args, **kwargs)
  File "C:\Users\Anthony\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\engine\training.py", line 1079, in fit
    callbacks.on_train_begin()
  File "C:\Users\Anthony\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\callbacks.py", line 497, in on_train_begin
    callback.on_train_begin(logs)
  File "C:\Users\Anthony\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\callbacks.py", line 2141, in on_train_begin
    self._push_writer(self._train_writer, self._train_step)
  File "C:\Users\Anthony\AppData\Local\Programs\Python\Python37\lib\site-packages\tensorflow\python\keras\callbacks.py", line 1988, in _train_writer
    self._train_dir)
Run Code Online (Sandbox Code Playgroud)

我究竟做错了什么?

小智 0

我遇到了同样的问题,是因为tensorflow版本。我有 2.3,我的有效更改是这样的:

import tensorflow as tf
#tf.compat.v1.disable_eager_execution() # uncomment if needed
if tf.executing_eagerly():
    print('Executing eagerly')

print(f'tensorflow version {tf.__version__}')
print(f'tensorflow.keras version {tf.keras.__version__}')

# Own Tensorboard class
class ModifiedTensorBoard(TensorBoard):

    # Overriding init to set initial step and writer (we want one log file for all .fit() calls)
    def __init__(self, **kwargs):
        super().__init__(**kwargs)
        self.step = 1
        self.model = None
        self.TB_graph = tf.compat.v1.Graph()
        with self.TB_graph.as_default():
            self.writer = tf.summary.create_file_writer(self.log_dir, flush_millis=5000)
            self.writer.set_as_default()
            self.all_summary_ops = tf.compat.v1.summary.all_v2_summary_ops()
        self.TB_sess = tf.compat.v1.InteractiveSession(graph=self.TB_graph)
        self.TB_sess.run(self.writer.init())

    # Overriding this method to stop creating default log writer
    def set_model(self, model):
        self.model = model
        self._train_dir = self.log_dir + '\\train'

    # Overrided, saves logs with our step number
    # (otherwise every .fit() will start writing from 0th step)
    def on_epoch_end(self, epoch, logs=None):
        self.update_stats(**logs)

    # Overrided
    # We train for one batch only, no need to save anything at epoch end
    def on_batch_end(self, batch, logs=None):
        pass

    def on_train_begin(self, logs=None):
        pass
    
    # Overrided, so won't close writer
    def on_train_end(self, _):
        pass

    # added for performance?
    def on_train_batch_end(self, _, __):
        pass

    # Custom method for saving own metrics
    # Creates writer, writes custom metrics and closes writer
    def update_stats(self, **stats):
        self._write_logs(stats, self.step)

    def _write_logs(self, logs, index):
        for name, value in logs.items():
            self.TB_sess.run(self.all_summary_ops)
            if self.model is not None:
                name = f'{name}_{self.model.name}'
            self.TB_sess.run(tf.summary.scalar(name, value, step=index))
        self.model = None
Run Code Online (Sandbox Code Playgroud)