使用`tensorflow.python.keras.estimator.model_to_estimator`将Keras模型转换为Estimator API时如何通知类权重?

Rod*_*ira 18 python tensorflow tensorflow-estimator

我在将一个纯Keras模型转换为不平衡数据集上的TensorFlow Estimator API时遇到了一些麻烦.

使用纯Keras API时,该class_weight参数在model.fit方法中可用,但在将Keras模型转换为TensorFlow Estimator时tensorflow.python.keras.estimator.model_to_estimator,无法通知class_weights.

怎么能克服这个?

我在Ubuntu 18,Cuda 9,Cudnn 7上使用TF 1.12

纯Keras型号:

def keras_model(n_classes=None, model_dir='./tmp-model/', config=None):
    with tf.device('/gpu:0'):

        # Inputs
        inp_raw = Input(shape=(max_len,), name='word_raw')

        # raw text LSTM network
        word_raw_emb = Embedding(
            input_dim=nunique_chars_raw,
            output_dim=EMBED_SIZE,
            input_length=MAX_WORD_LENGTH,
            trainable=True,
            name='word_raw_emb')(inp_raw)

        word_raw_emb = Dropout(rate=dropout_rate)(word_raw_emb)

        word_raw_emb_lstm = Bidirectional(
            LSTM(48, return_sequences=True))(word_raw_emb)
        word_raw_emb_gru = Bidirectional(
            GRU(48, return_sequences=False))(word_raw_emb_lstm)

        word_raw_net = Dense(16, activation='relu')(word_raw_emb_gru)
        output_raw_net = Dense(n_classes, activation='softmax')(word_raw_net)

        model = Model(inputs=inp_raw, outputs=output_raw_net)
        optz = keras.optimizers.RMSprop(
            lr=0.002, rho=0.9, epsilon=None, decay=0.0)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optz, metrics=['categorical_accuracy'])
        return model



model = keras_model(5)

model.fit(train_X, train_Y_onehot,
        batch_size=128,
        epochs=10,
        validation_data=(eval_X,eval_Y_onehot),
        class_weight=class_weights,
        verbose=1)
Run Code Online (Sandbox Code Playgroud)

Keras模型到TensorFlow Estimator:

def keras_estimator_model(n_classes=None, model_dir='./tmp-model/', config=None):
    with tf.device('/gpu:0'):

        # Inputs
        inp_raw = Input(shape=(max_len,), name='word_raw')

        # raw text LSTM network
        word_raw_emb = Embedding(
            input_dim=nunique_chars_raw,
            output_dim=EMBED_SIZE,
            input_length=MAX_WORD_LENGTH,
            trainable=True,
            name='word_raw_emb')(inp_raw)

        word_raw_emb = Dropout(rate=dropout_rate)(word_raw_emb)

        word_raw_emb_lstm = Bidirectional(
            LSTM(48, return_sequences=True))(word_raw_emb)
        word_raw_emb_gru = Bidirectional(
            GRU(48, return_sequences=False))(word_raw_emb_lstm)

        word_raw_net = Dense(16, activation='relu')(word_raw_emb_gru)
        output_raw_net = Dense(n_classes, activation='softmax')(word_raw_net)

        model = Model(inputs=inp_raw, outputs=output_raw_net)
        optz = keras.optimizers.RMSprop(
            lr=0.002, rho=0.9, epsilon=None, decay=0.0)
        model.compile(loss='categorical_crossentropy',
                      optimizer=optz, metrics=['categorical_accuracy'])

        model_estimator = model_to_estimator(keras_model=model, model_dir=model_dir, config=config)

        return model_estimator

estimator_model = keras_estimator_model(5)

train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,max_steps=10)

eval_spec = tf.estimator.EvalSpec(
        input_fn=eval_input_fn,
        steps=None,
        start_delay_secs=10,
        throttle_secs=10)

tf.estimator.train_and_evaluate(estimator_model, train_spec, eval_spec)

Run Code Online (Sandbox Code Playgroud)

Rod*_*ira 3

我已经编写了一个解决方法,它似乎有效。我

import tensorflow as tf
from tensorflow.python.keras import backend as K

def weighted_loss_fn(class_weights):    

    def _loss_fn(y_true, y_pred):
        class_weights_tensor = K.variable(class_weights)        
        y_true_labels = K.argmax(y_true,axis=1)        
        weights = K.gather(class_weights_tensor,y_true_labels)                

        return tf.losses.softmax_cross_entropy(onehot_labels=y_true, logits=y_pred, weights=weights)
    return _loss_fn




def keras_estimator_model(n_classes=None, model_dir='./tmp-model/', config=None, class_weights=None):
    with tf.device('/gpu:0'):

        # Inputs
        inp_raw = Input(shape=(max_len,), name='word_raw')

        # raw text LSTM network
        word_raw_emb = Embedding(
            input_dim=nunique_chars_raw,
            output_dim=EMBED_SIZE,
            input_length=MAX_WORD_LENGTH,
            trainable=True,
            name='word_raw_emb')(inp_raw)

        word_raw_emb = Dropout(rate=dropout_rate)(word_raw_emb)

        word_raw_emb_lstm = Bidirectional(
            LSTM(48, return_sequences=True))(word_raw_emb)
        word_raw_emb_gru = Bidirectional(
            GRU(48, return_sequences=False))(word_raw_emb_lstm)

        word_raw_net = Dense(16, activation='relu')(word_raw_emb_gru)
        output_raw_net = Dense(n_classes, activation='softmax')(word_raw_net)

        model = Model(inputs=inp_raw, outputs=output_raw_net)
        optz = keras.optimizers.RMSprop(
            lr=0.002, rho=0.9, epsilon=None, decay=0.0)

        loss_fn = weighted_loss_fn(class_weights)
        model.compile(loss=loss_fn,
                      optimizer=optz, metrics=['categorical_accuracy'])

        model_estimator = model_to_estimator(keras_model=model, model_dir=model_dir, config=config)

        return model_estimator



estimator_model = keras_estimator_model(5)
train_spec = tf.estimator.TrainSpec(input_fn=train_input_fn,max_steps=10)
eval_spec = tf.estimator.EvalSpec(
        input_fn=eval_input_fn,
        steps=None,
        start_delay_secs=10,
        throttle_secs=10)

tf.estimator.train_and_evaluate(estimator_model, train_spec, eval_spec)

Run Code Online (Sandbox Code Playgroud)

就我而言class_weights= [ 0.17041813 42.00318471 35.26470588 29.70495495 42.00318471 44.55743243]