我正在尝试第一次获得一个 keras-tuner 调整的深度学习模型。我的调整代码如下:
def build_model_test(hp):
model = models.Sequential()
model.add(layers.InputLayer(input_shape=(100,28)))
model.add(layers.Dense(28,activation = 'relu'))
model.add(BatchNormalization(momentum = 0.99))
model.add(Dropout(hp.Float('dropout', 0, 0.5, step=0.1, default=0.5)))
model.add(layers.Conv1D(filters=hp.Int(
'num_filters',
16, 128,
step=16
),kernel_size=3,strides=1,padding='same',activation='relu'))
model.add(BatchNormalization(momentum = 0.99))
model.add(Dropout(hp.Float('dropout', 0, 0.5, step=0.1, default=0.5)))
model.add(layers.Conv1D(filters=hp.Int(
'num_filters',
16, 128,
step=16
),kernel_size=3,strides=1,padding='same',activation='relu'))
model.add(BatchNormalization(momentum = 0.99))
model.add(Dropout(hp.Float('dropout', 0, 0.5, step=0.1, default=0.5)))
model.add(layers.Conv1D(filters=hp.Int(
'num_filters',
16, 128,
step=16
),kernel_size=3,strides=1,padding='same',activation='relu'))
model.add(BatchNormalization(momentum = 0.99))
model.add(Dropout(hp.Float('dropout', 0, 0.5, step=0.1, default=0.5)))
model.add(layers.Dense(units=hp.Int('units',min_value=16,max_value=512,step=32,default=128),activation = 'relu'))
model.add(Dropout(hp.Float('dropout', 0, 0.5, step=0.1, default=0.5)))
model.add(layers.Dense(1, activation = 'linear'))
model.compile(
optimizer='adam',
loss=['mean_squared_error'],
metrics=[tf.keras.metrics.RootMeanSquaredError()]
)
return …Run Code Online (Sandbox Code Playgroud)