-
Notifications
You must be signed in to change notification settings - Fork 241
Description
activation_cnn_layer = [
'relu',
'tanh']
activation_feed_layer = [
'relu',
'tanh']
optimizers=["adam",'sgd']
map from real number [0, 1] to integer ranging [1, 3]
def swap_activation_cnn(val):
if val == 1:
return 2
return int(val * 2 + 1)
def swap_activation_feed(val):
if val == 1:
return 2
return int(val * 2 + 1)
def swap_optimzer(val):
if val == 1:
return 2
return int(val * 2 + 1)
def no_of_epoch(val):
return int(50)
map from real number [0, 1] to integer ranging [5, 15]
def no_of_batch(val):
return int(val *224 + 32)
def no_of_filters(val):
return int(val*8 + 1)
def no_neurons_hidden_layer(val):
return int(val*64+8)
def kernel_size(val):
return int(val1+10)
def dropout(val):
return int(val1+0.1)
class cnnbenchmark():
def init(self):
self.Lower = 0
self.Upper = 1
def function(self):
# our definition of fitness function
def evaluate(D, solution):
acc_cnn_layer = activation_cnn_layer[swap_activation_cnn(solution[0]-1)]
print(acc_cnn_layer)
acc_feed_layer = activation_feed_layer[swap_activation_feed((solution[1]) - 1)]
optimizer = optimizers[(swap_optimzer(solution[2])- 1)]
epochs = no_of_epoch(solution[3])
batch=no_of_batch(solution[4])
filters=no_of_filters(solution[5])
neurons=no_neurons_hidden_layer(solution[6])
kernel_s=kernel_size(solution[7])
drop_neurons=dropout(solution[8])
accuracy = 1 - model.model_build(acc_cnn_layer,acc_feed_layer,optimizer,epochs,batch,filters,neurons,kernel_s,drop_neurons)
scores.append([accuracy,acc_cnn_layer,acc_feed_layer,optimizer,epochs,batch,filters,neurons,kernel_s,drop_neurons])
return accuracy
return evaluate
class model():
def model_build(acc_cnn_layer,acc_feed_layer,
optimizer,epochs,batch,filters,neurons,kernel_s,drop_neurons):
print("Activation cnn layer :",acc_cnn_layer)
print("Activation feed layer :",acc_feed_layer)
print("optimizer :",optimizer)
print("batch :",batch)
print("epochs :",epochs)
print("dropout :",drop_neurons)
print("no of filters :",filters)
model = Sequential()
model.add(Conv2D(64, (3,3), activation=acc_cnn_layer, input_shape=input_shape))
model.add(Conv2D(128, (3,3), activation=acc_cnn_layer))
model.add(Dropout(drop_neurons))
model.add(Flatten())
model.add(Dense(6*numPCAcomponents, activation=acc_feed_layer))
model.add(Dropout(drop_neurons))
model.add(Dense(16, activation='softmax'))
# Define optimization and train method
reduce_lr = ReduceLROnPlateau(monitor='val_acc', factor=0.9, patience=25,
min_lr=0.000001, verbose=1)
checkpointer = ModelCheckpoint(filepath="checkpoint.hdf5", verbose=1,
save_best_only=False)
# sgd = SGD(lr=0.001, decay=1e-6, momentum=0.9, nesterov=True)
model.compile(loss='categorical_crossentropy', optimizer=optimizer,
metrics=['accuracy'])
# Start to train model
history = model.fit(X_train, y_train,
batch_size=batch,
epochs=epochs,
verbose=1,
validation_data=(X_test, y_test),
callbacks=[reduce_lr, checkpointer],
shuffle=True)
model.save('HSI_model_epochs100.h5')
model = load_model('HSI_model_epochs100.h5')
score = model.evaluate(X_test, y_test, batch_size=32)
Test_Loss = score[0]
Test_accuracy = score[1]
return Test_accuracy
I want to find the optimal hyperparameters of above deep learning model using a grey wolf optimizer. Can you please help me in this process using EvoloPy library.