如何为回归和分类创建多输出 Tensorflow 2 自定义训练循环?

Nic*_*ais 2 python neural-network keras tensorflow tensorflow2.0

我用 Iris 数据集制作了一个可重复性最低的示例。我制作了一个完整的神经网络来预测虹膜特征的最后一列。我还想输出目标(类别)。因此,网络必须最小化两种不同的损失函数(连续损失函数和分类损失函数)。全部设置为下一个示例中的连续目标。但是,如何将其转化为多输出问题呢?

import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
from sklearn.datasets import load_iris
tf.keras.backend.set_floatx('float64')
iris, target = load_iris(return_X_y=True)

X = iris[:, :3]
y = iris[:, 3]
z = target

ds = tf.data.Dataset.from_tensor_slices((X, y, z)).batch(8)

class MyModel(Model):
    def __init__(self):
        super(MyModel, self).__init__()
        self.d0 = Dense(16, activation='relu')
        self.d1 = Dense(32, activation='relu')
        self.d2 = Dense(1)

    def call(self, x):
        x = self.d0(x)
        x = self.d1(x)
        x = self.d2(x)
        return x

model = MyModel()

loss_object = tf.keras.losses.MeanAbsoluteError()
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)

loss = tf.keras.metrics.Mean(name='categorical loss')
error = tf.keras.metrics.MeanAbsoluteError()

@tf.function
def train_step(inputs, target):
    with tf.GradientTape() as tape:
        output = model(inputs)
        run_loss = loss_object(target, output)

    gradients = tape.gradient(run_loss, model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))

    loss(run_loss)
    error(target, output)


for epoch in range(50):
    for xx, yy, zz in ds: # what to do with zz, the categorical target?
        train_step(xx, yy)

    template = 'Epoch {:>2}, MAE: {:>5.2f}'
    print(template.format(epoch+1,
                        loss.result()))

    loss.reset_states()
    error.reset_states()
Run Code Online (Sandbox Code Playgroud)

Nic*_*ais 6

您可以将损失列表传递给tape.gradient,如下所示:

with tf.GradientTape() as tape:
        pred_reg, pred_cat = model(inputs)
        reg_loss = loss_obj_reg(y_reg, pred_reg)
        cat_loss = loss_obj_cat(y_cat, pred_cat)

    gradients = tape.gradient([reg_loss, cat_loss], model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
Run Code Online (Sandbox Code Playgroud)

完整示例:

import tensorflow as tf
from tensorflow.keras.layers import Dense
from tensorflow.keras import Model
from sklearn.datasets import load_iris
iris, target = load_iris(return_X_y=True)

X = tf.cast(iris[:, :3], tf.float32)
y = tf.cast(iris[:, 3], tf.float32)
z = target

ds = tf.data.Dataset.from_tensor_slices((X, y, z)).shuffle(150).batch(8)

class MyModel(Model):
    def __init__(self):
        super(MyModel, self).__init__()
        self.d0 = Dense(16, activation='relu')
        self.d1 = Dense(32, activation='relu')
        self.d2 = Dense(1)
        self.d3 = Dense(3, activation='softmax')

    def call(self, x, training=None, **kwargs):
        x = self.d0(x)
        x = self.d1(x)
        a = self.d2(x)
        b = self.d3(x)
        return a, b

model = MyModel()

loss_obj_reg = tf.keras.losses.MeanAbsoluteError()
loss_obj_cat = tf.keras.losses.SparseCategoricalCrossentropy()

optimizer = tf.keras.optimizers.Adam(learning_rate=1e-4)

loss_reg = tf.keras.metrics.Mean(name='regression loss')
loss_cat = tf.keras.metrics.Mean(name='categorical loss')

error_reg = tf.keras.metrics.MeanAbsoluteError()
error_cat = tf.keras.metrics.SparseCategoricalAccuracy()

@tf.function
def train_step(inputs, y_reg, y_cat):
    with tf.GradientTape() as tape:
        pred_reg, pred_cat = model(inputs)
        reg_loss = loss_obj_reg(y_reg, pred_reg)
        cat_loss = loss_obj_cat(y_cat, pred_cat)

    gradients = tape.gradient([reg_loss, cat_loss], model.trainable_variables)
    optimizer.apply_gradients(zip(gradients, model.trainable_variables))
    loss_reg(reg_loss)
    loss_cat(cat_loss)

    error_reg(y_reg, pred_reg)
    error_cat(y_cat, pred_cat)

template = 'Epoch {:>3}, SCCE: {:>5.2f},' \
               ' MAE: {:>4.2f}, SAcc: {:>5.1%}'

for epoch in range(150):
    for xx, yy, zz in ds:
        train_step(xx, yy, zz)

    if (epoch + 1) % 10 == 0:

        print(template.format(epoch+1,
                            loss_cat.result(),
                            error_reg.result(),
                            error_cat.result()))

    loss_reg.reset_states()
    loss_cat.reset_states()

    error_reg.reset_states()
    error_cat.reset_states()
Run Code Online (Sandbox Code Playgroud)
Epoch  10, SCCE:  1.41, MAE: 0.36, SAcc: 33.3%
Epoch  20, SCCE:  1.14, MAE: 0.31, SAcc: 44.0%
Epoch  30, SCCE:  1.05, MAE: 0.26, SAcc: 41.3%
Epoch  40, SCCE:  0.99, MAE: 0.21, SAcc: 40.0%
Epoch  50, SCCE:  0.94, MAE: 0.19, SAcc: 40.0%
Epoch  60, SCCE:  0.88, MAE: 0.18, SAcc: 40.0%
Epoch  70, SCCE:  0.83, MAE: 0.17, SAcc: 44.7%
Epoch  80, SCCE:  0.77, MAE: 0.17, SAcc: 75.3%
Epoch  90, SCCE:  0.70, MAE: 0.17, SAcc: 76.7%
Epoch 100, SCCE:  0.64, MAE: 0.17, SAcc: 82.7%
Epoch 110, SCCE:  0.58, MAE: 0.16, SAcc: 82.7%
Epoch 120, SCCE:  0.54, MAE: 0.16, SAcc: 88.0%
Epoch 130, SCCE:  0.50, MAE: 0.16, SAcc: 88.7%
Epoch 140, SCCE:  0.47, MAE: 0.16, SAcc: 90.7%
Epoch 150, SCCE:  0.45, MAE: 0.16, SAcc: 90.0%
Run Code Online (Sandbox Code Playgroud)

通过此输出,您可以看到两种损失都被最小化。