当前位置: 代码迷 >> 综合 >> tensorflow2.0实现自己的数据集的分类任务-DenseNet121
  详细解决方案

tensorflow2.0实现自己的数据集的分类任务-DenseNet121

热度:26   发布时间:2023-12-27 04:53:34.0

1、数据集要求:图片名称要用自己的标签作为开头如:crack.0.jpg和uncrack.1.jpg。

2、在代码dicClass和classnum修改为自己的标签和类别

3、运行下面程序进行训练。

import numpy as np
from tensorflow.keras.optimizers import Adam
import cv2
from tensorflow.keras.preprocessing.image import img_to_array
from sklearn.model_selection import train_test_split
from tensorflow.python.keras.callbacks import ModelCheckpoint, ReduceLROnPlateau
from tensorflow.keras.applications.densenet import DenseNet121
import os
from tensorflow.keras.models import load_model, Sequential
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
norm_size = 128
datapath = 'E:\\Users\\tf2.0_densenet\\data\\train'
EPOCHS = 100
INIT_LR = 1e-3
labelList = []
dicClass = {'uncrack': 0, 'crack': 1}
classnum = 2
batch_size = 16def loadImageData():imageList = []listImage = os.listdir(datapath)for img in listImage:labelName = dicClass[img.split('.')[0]]print(labelName)labelList.append(labelName)dataImgPath = os.path.join(datapath, img)print(dataImgPath)image = cv2.imdecode(np.fromfile(dataImgPath, dtype=np.uint8), -1)image = cv2.resize(image, (norm_size, norm_size), interpolation=cv2.INTER_LANCZOS4)image = img_to_array(image)imageList.append(image)imageList = np.array(imageList, dtype="int") / 255.0return imageListprint("开始加载数据")
imageArr = loadImageData()
labelList = np.array(labelList)
print("加载数据完成")
print(labelList)model_base = DenseNet121(include_top=False, weights=None, classes=classnum, input_shape=[norm_size, norm_size, 3])
model = Sequential([model_base,GlobalAveragePooling2D(),Dense(2, activation='softmax')
])
model.summary()
optimizer = Adam(lr=INIT_LR)
model.compile(optimizer=optimizer, loss='sparse_categorical_crossentropy', metrics=['accuracy'])
trainX, valX, trainY, valY = train_test_split(imageArr, labelList, test_size=0.3, random_state=42)
from tensorflow.keras.preprocessing.image import ImageDataGeneratortrain_datagen = ImageDataGenerator(featurewise_center=True,featurewise_std_normalization=True,rotation_range=20,width_shift_range=0.2,height_shift_range=0.2,horizontal_flip=True)
val_datagen = ImageDataGenerator()  # 验证集不做图片增强train_generator = train_datagen.flow(trainX, trainY, batch_size=batch_size, shuffle=True)
val_generator = val_datagen.flow(valX, valY, batch_size=batch_size, shuffle=True)
checkpointer = ModelCheckpoint(filepath='weights_best_Deset_model.hdf5',monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')reduce = ReduceLROnPlateau(monitor='val_accuracy', patience=10,verbose=1,factor=0.5,min_lr=1e-6)
history = model.fit_generator(train_generator,steps_per_epoch=trainX.shape[0] / batch_size,validation_data=val_generator,epochs=EPOCHS,validation_steps=valX.shape[0] / batch_size,callbacks=[checkpointer, reduce],verbose=1, shuffle=True)
model.save('my_model_Desnet.h5')
print(history)loss_trend_graph_path = r"WW_loss.jpg"
acc_trend_graph_path = r"WW_acc.jpg"
import matplotlib.pyplot as pltprint("Now,we start drawing the loss and acc trends graph...")
# summarize history for accuracy
fig = plt.figure(1)
plt.plot(history.history["accuracy"])
plt.plot(history.history["val_accuracy"])
plt.title("Model accuracy")
plt.ylabel("accuracy")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.savefig(acc_trend_graph_path)
plt.close(1)
# summarize history for loss
fig = plt.figure(2)
plt.plot(history.history["loss"])
plt.plot(history.history["val_loss"])
plt.title("Model loss")
plt.ylabel("loss")
plt.xlabel("epoch")
plt.legend(["train", "test"], loc="upper left")
plt.savefig(loss_trend_graph_path)
plt.close(2)
print("We are done, everything seems OK...")
# #windows系统设置10关机
# os.system("shutdown -s -t 10")

测试:修改路径即可使用,支持多张图像测试,(使用请点赞,谢谢!)。

import cv2
import numpy as np
from tensorflow.keras.preprocessing.image import img_to_array
from tensorflow.keras.models import load_model, Model
import time
import glob, os
from tensorflow.keras.applications.densenet import DenseNet121
from tensorflow.keras.models import load_model, Sequential
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense
import matplotlib.pyplot as plt
norm_size = 128
imagelist = []
emotion_labels = {0: 'uncrack',1: 'crack'
}
dicClass = {'uncrack': 0, 'crack': 1}
labelList=[]
datapath = 'E:\\Users\\tf2.0_densenet\\data\\test\\test_data'# def loadImageData():
#     imageList = []
#     listImage = os.listdir(datapath)
#     for img in listImage:
#         labelName = dicClass[img.split('.')[0]]
#         print(labelName)
#         labelList.append(labelName)
#         dataImgPath = os.path.join(datapath, img)
#         print(dataImgPath)
#         image = cv2.imdecode(np.fromfile(dataImgPath, dtype=np.uint8), -1)
#         image = cv2.resize(image, (norm_size, norm_size), interpolation=cv2.INTER_LANCZOS4)
#         image = img_to_array(image)
#         imageList.append(image)
#     imageList = np.array(imageList, dtype="int") / 255.0
#     return imageList# print("开始加载数据")
# test_images = loadImageData()
# test_labels = np.array(labelList)
# print("加载数据完成")
# print(labelList)emotion_classifier=load_model("my_model_Desnet.h5")
emotion_classifier.summary()
output=emotion_classifier.get_layer('final_act').output
sub_model = Model(inputs=emotion_classifier.input, outputs=output)
t1=time.time()# 测试所有结果,并打印accuracy
# test_loss, test_acc = emotion_classifier.evaluate(test_images,test_labels)
# print('test_acc=', test_acc)# 测试一张图像,并显示测试结果
test_path = 'E:/Users/tf2.0_densenet/test/*.jpg'
save_path = 'E:\\Users\\tf2.0_densenet\\test_save\\'
jpg_paths = glob.glob(test_path)
for jpg_path in jpg_paths:imagelist = []nake_name = os.path.basename(jpg_path)image1 = cv2.imdecode(np.fromfile(jpg_path, dtype=np.uint8), -1)# load the image, pre-process it, and store it in the data listimage = cv2.resize(image1, (norm_size, norm_size), interpolation=cv2.INTER_LANCZOS4)image = img_to_array(image)imagelist.append(image)imageList = np.array(imagelist, dtype="float") / 255.0# image = np.array(image, dtype="float")/255.0pre = np.argmax(emotion_classifier.predict(imageList))pre_value = preweights_list = emotion_classifier.layers[-1].get_weights()a = []for k in weights_list:print(k)a = k[:, pre_value]breakweights = aconvs = sub_model.predict(imageList)convs_reshape = np.reshape(convs, [convs.shape[1], convs.shape[2], convs.shape[3]])heat_map = np.matmul(convs_reshape,weights)plt.imshow(heat_map)plt.axis('off')plt.savefig('E:\\Users\\tf2.0_densenet\\test_save\\'+nake_name+'_map.jpg')plt.show()emotion = emotion_labels[pre]cv2.namedWindow(emotion, 0)cv2.resizeWindow(emotion, 400, 400)font = cv2.FONT_HERSHEY_SIMPLEXcv2.putText(image1, emotion, (10, 30), font,  0.8, [0, 0, 255], 2)cv2.imshow(emotion, image1)cv2.waitKey(0)cv2.imwrite(save_path + nake_name, image1)t2 = time.time()print(emotion)t3 = t2-t1print(t3)