1. 输入dnn模型文件
  2. 找到脸的位置
  3. 把脸裁剪出来,转化为blob
  4. 批量将图片转化为blob和标注,保存为npz文件
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tqdm
import os,glob
%matplotlib inline


face_detector = cv2.dnn.readNetFromCaffe('./weight/deploy.prototxt.txt','./weight/res10_300x300_ssd_iter_140000.caffemodel')


def face_detect(img):
    img_blob = cv2.dnn.blobFromImage(img,1,(300,300),(104, 117, 123),swapRB=True)
    face_detector.setInput(img_blob)
    detections = face_detector.forward()
    img_h,img_w = img.shape[:2]
    person_count = detections.shape[2]
    for face_index in range(person_count):
        confidence = detections[0,0,face_index,2]
        if confidence > 0.8:
            locations = detections[0,0,face_index,3:7] * np.array([img_w,img_h,img_w,img_h])
            l,t,r,b=locations.astype('int')
            return img[t:b,l:r]
    return None


def img2blob(img):
    img_blob = cv2.dnn.blobFromImage(img,1,(300,300),(104, 117, 123),swapRB=True)
    img_squeeze = np.squeeze(img_blob).T
    img_rotate = cv2.rotate(img_squeeze,cv2.ROTATE_90_CLOCKWISE)
    img_flip = cv2.flip(img_rotate,1)
    img_blob = np.maximum(img_flip,0) / img_flip.max()
    return img_blob


path = ['data/AFDB_face_dataset','data/AFDB_masked_face_dataset']


file_list = {0:[],1:[]}
label_list = []
img_list = []
for index, item in enumerate(path):
    file_list[index] = glob.glob(item+'/*/*.jpg')


for label in file_list:
    for file in tqdm.tqdm(file_list[label]):
        img = cv2.imread(file)
        img_crop = face_detect(img)
        if img_crop is None:
            continue
        img_blob = img2blob(img_crop)
        img_list.append(img_blob)
        label_list.append(label)


x = np.asarray(img_list)
y = np.asarray(label_list)


np.savez("./data/save.npz",x,y)
import cv2
import numpy as np
import matplotlib.pyplot as plt
import tensorflow as tf
from tensorflow.keras import layers
from tensorflow.keras import Sequential
from sklearn.preprocessing import OneHotEncoder
%matplotlib inline

# 读入模型
arr = np.load('./data/save.npz')

# 因为格式不同设置allow_pickle
arr.allow_pickle = True
img_list = arr['arr_0']
label_list = arr['arr_1']

# 把标注列表改为 x行1列
label_list = label_list.reshape(-1,1)
label_list.shape


onehot = OneHotEncoder()
label_list_onehot = onehot.fit_transform(label_list)

# 转换到稠密类型,因为categorical_crossentropy需要稠密类型,可以转换到稀疏的值
label_list_onehot_arr = label_list_onehot.toarray()

# 导入训练数据分割函数
from sklearn.model_selection import train_test_split

x_train,x_test,y_train,y_test = train_test_split(img_list,label_list_onehot_arr,test_size=0.2,random_state=42)


len(img_list)
len(x_train)

# 定义cnn模型
model = Sequential([
    # 新版本第一层统一为input
    layers.Input(shape=(300,300,3)),
   # 定义16个3x3的卷积核
    #填充为same,即和原图大小一样
    #模式为zero padding,填充的像素rgb为 0 0 0
    #激活函数relu把卷积核范围内的负数变为0,其他值保持不变
    layers.Conv2D(16,3,padding='same',activation="relu"),
    # 最大池化,大小 / 2
    layers.MaxPool2D(),
    layers.Conv2D(32,3,padding='same',activation="relu"),
    layers.MaxPool2D(),
    layers.Conv2D(64,3,padding='same',activation="relu"),
    layers.MaxPool2D(),
    # 拉伸为一维张量 比如 12*12*64 拉伸就是三个数相乘=9216
    layers.Flatten(),
    # 隐藏层 166个神经单元如何计算 9216 * 2开方约等于136。这个没有绝对公式慢慢调优
    layers.Dense(136,activation="relu"),
    # 同样 *2 开方
    layers.Dense(16,activation="relu"),
    # 最后输出的类型为2个 激活函数不懂这个了
    layers.Dense(2,activation="sigmoid"),
])

# 编译模型 优化器为adam 损失函数categorical_crossentropy,监控accuracy准确率
model.compile(optimizer=tf.keras.optimizers.Adam(learning_rate=0.001),loss=tf.keras.losses.categorical_crossentropy,metrics=['accuracy'])

# 训练
history = model.fit(x=x_train,y=y_train,validation_data=(x_test,y_test),batch_size=30,epochs=10)

# 把损失和验证的损失输出为一个图片
import pandas as pd
history_pd = pd.DataFrame(history.history)
plt.plot(history_pd['loss'])
plt.plot(history_pd['val_loss'])
plt.show()

# 保存模型
model.save('model')