盒子

盒子

https://wwc.lanzoul.com/b03j3017e
密码:3bq8

环境中缺少了torch==1.10.2,可切换清华镜像源下载

pip install -i https://pypi.tuna.tsinghua.edu.cn/simple torch==1.10.2

在Anaconda中创建python3.6环境

  • 创建: conda create -n py36 python=3.6

    • 如果创建失败,则执行:conda config --show-sources
  • 激活py36环境:conda activate py36

  • 关闭:conda deactivate

  • 注销:conda remove -n py36 --all

  • 清华园:

  • 更换清华镜像源

  • pip升级:

    • python -m pip install --upgrade pip

环境

  • pip install numpy==1.19.5
  • pip install keras==2.1.5
  • pip install pandas==1.1.5
  • pip install tensorflow==1.14.0
  • pip install dlib==19.7.0
  • pip install opencv_python==4.5.5.64
  • pip install Pillow==8.4.0
  • pip install h5py==2.10.0
  • pip install sklearn==0.0
  • 利用清华镜像源下载

pip离线下载

方法②

  • 找一台线上计算机安装同样版本的PYTHON .

  • 新建site-packages目录.

  • 进入到site-packages目录下执行pip freeze >requirements.txt

    • 查看requirements.txt当前机器的python所有依赖包列表
  • 在site-packages目录下执行pip download -r requirements.txt,下载依赖包列表中的所有依赖包。

  • 将site-packages文件夹打包,移动至我们需要这些依赖包的离线机器上.

  • 在离线机器 执行 pip install --no-index --find-links=/xxx/site-packages -r /xxx/site-packages/requirements.txt

    (/xxx/指明离线机器上的绝对路径。)

迁移完成

工具方法

import os import shutil def move_pic(dir_str, new_src): ''' # 移动文件夹 :param dir_str: 旧 :param new_src: 新地址 :return: ''' for root, dir, files in os.walk(dir_str): for file in files: # files 是一个列表 src = os.path.join(str(root), str(file)) # print(dir) # print(files) # print("-------") shutil.move(src, new_src) def rename(src): ''' 重命名,默认重命名为1,2,3... :param src: :return: ''' num = 1 for root, dir, files in os.walk(src): for file in files: old_str = os.path.join(str(root), str(file)) new_str = os.path.join(str(root), f"{num}.png") os.rename(old_str, new_str) num += 1 if __name__ == '__main__': # move_pic( r"C:\Users\lhy\Desktop\xiaolu\我的文件\智慧零售-培训改\新建文件夹\lfw", r"C:\Users\lhy\Desktop\xiaolu\我的文件\智慧零售-培训改\新建文件夹\face") # rename(r"C:\Users\lhy\Desktop\xiaolu\我的文件\智慧零售-培训改\linxiaolu__\parter_faces") print(type(1)) # os.remove(r".\1.png")

total_competition02.py

from ctypes import * import random import os, sys # sys.path.append("MvImport") # from MvImport.MvCameraControl_class import * import cv2 import dlib from keras.models import Sequential, load_model from keras.layers import Dense, Dropout, Flatten, MaxPooling2D, Convolution2D, Activation from keras.optimizers import SGD from read_data01 import load_data01 from read_data02 import load_data02 from PIL import Image, ImageDraw, ImageFont import numpy as np def match(dex): ''' 用于匹配标签 :return:标签类型 (str) ''' if dex == 0: return "草莓" elif dex == 1: return "方形蛋糕" elif dex == 2: return "圆形蛋糕" elif dex == 3: return "莲雾果" elif dex == 4: return "苹果" elif dex == 5: return "红椒" elif dex == 6: return "全开盖" elif dex == 7: return "易拉盖" elif dex == 8: return "易撕盖" # def print_info(info): # printer_name = win32print.GetDefaultPrinter() # # if sys.version_info >= (3,): # raw_data = bytes(f"\n\n\n\n\n\n\n\n{info}\n\n\n\n\n\n\n\n", "gbk") # else: # raw_data = "testss" # # hPrinter = win32print.OpenPrinter(printer_name) # try: # hJob = win32print.StartDocPrinter(hPrinter, 1, ("testss of raw data", None, "RAW")) # try: # win32print.StartPagePrinter(hPrinter) # win32print.WritePrinter(hPrinter, raw_data) # win32print.EndPagePrinter(hPrinter) # finally: # win32print.EndDocPrinter(hPrinter) # finally: # win32print.ClosePrinter(hPrinter) def change_cv2_draw(image, strs, local, sizes, colour): ''' 解决中文标注问题 :param image: 图片 :param strs: 标签 :param local: 坐标,(x,y) :param sizes: 字体大小 :param colour: 字体颜色 :return: None ''' cv2img = cv2.cvtColor(image, cv2.COLOR_BGR2RGB) pilimg = Image.fromarray(cv2img) draw = ImageDraw.Draw(pilimg) # 图片上打印 font = ImageFont.truetype("SIMYOU.TTF", sizes, encoding="utf-8") draw.text(local, strs, colour, font=font) image = cv2.cvtColor(np.array(pilimg), cv2.COLOR_RGB2BGR) return image ''' 任务一 1、使用工业相机采集图片用于训练 2、训练模型 ==> 模型a 3、使用a模型预测商品,并使用(camera.py)工业相机和光电传感器,最后打印出来 任务二 人脸识别 1、使用usb相机采集选手面部信息 2、训练模型 3、识别人脸 ''' # class HHV: # def __init__(self, index=0): # # self.init_cam() # self.save_image2local(index) # self.exit_cam() # # def init_cam(self, ): # deviceList = MV_CC_DEVICE_INFO_LIST() # tlayerType = MV_GIGE_DEVICE | MV_USB_DEVICE # ret = MvCamera.MV_CC_EnumDevices(tlayerType, deviceList) # nConnectionNum = 0 # # ch:创建相机实例 | en:Creat Camera Object # self.cam = MvCamera() # # ch:选择设备并创建句柄 | en:Select device and create handle # stDeviceList = cast(deviceList.pDeviceInfo[int(nConnectionNum)], # POINTER(MV_CC_DEVICE_INFO)).contents # ret = self.cam.MV_CC_CreateHandle(stDeviceList) # # ch:打开设备 | en:Open device # ret = self.cam.MV_CC_OpenDevice(MV_ACCESS_Exclusive, 0) # # ch:设置触发模式为off | en:Set trigger mode as off # ret = self.cam.MV_CC_SetEnumValue("TriggerMode", MV_TRIGGER_MODE_OFF) # # # ch:获取数据包大小 | en:Get payload size # stParam = MVCC_INTVALUE() # memset(byref(stParam), 0, sizeof(MVCC_INTVALUE)) # # ret = self.cam.MV_CC_GetIntValue("PayloadSize", stParam) # # self.nPayloadSize = stParam.nCurValue # # def save_image2local(self, index=0): # # ch:开始取流 | en:Start grab image # ret = self.cam.MV_CC_StartGrabbing() # # stDeviceList = MV_FRAME_OUT_INFO_EX() # memset(byref(stDeviceList), 0, sizeof(stDeviceList)) # self.data_buf = (c_ubyte * self.nPayloadSize)() # # ret = self.cam.MV_CC_GetOneFrameTimeout(byref(self.data_buf), self.nPayloadSize, stDeviceList, 1000) # if ret == 0: # # print ("get one frame: Width[%d], Height[%d], nFrameNum[%d]" % (stDeviceList.nWidth, stDeviceList.nHeight, stDeviceList.nFrameNum)) # nRGBSize = stDeviceList.nWidth * stDeviceList.nHeight * 3 # stConvertParam = MV_SAVE_IMAGE_PARAM_EX() # stConvertParam.nWidth = stDeviceList.nWidth # stConvertParam.nHeight = stDeviceList.nHeight # stConvertParam.pData = self.data_buf # stConvertParam.nDataLen = stDeviceList.nFrameLen # stConvertParam.enPixelType = stDeviceList.enPixelType # stConvertParam.nImageLen = stConvertParam.nDataLen # stConvertParam.nJpgQuality = 70 # stConvertParam.enImageType = MV_Image_Jpeg # stConvertParam.pImageBuffer = (c_ubyte * nRGBSize)() # stConvertParam.nBufferSize = nRGBSize # # ret = self.cam.MV_CC_ConvertPixelType(stConvertParam) # # print(stConvertParam.nImageLen) # ret = self.cam.MV_CC_SaveImageEx2(stConvertParam) # if ret != 0: # print("convert pixel fail ! ret[0x%x]" % ret) # del self.data_buf # sys.exit() # file_path = "good_" + str(index) + ".jpg" # file_open = open(file_path.encode('ascii'), 'wb+') # img_buff = (c_ubyte * stConvertParam.nImageLen)() # cdll.msvcrt.memcpy(byref(img_buff), stConvertParam.pImageBuffer, stConvertParam.nImageLen) # file_open.write(img_buff) # # print ("Save Image succeed!") # # def exit_cam(self, ): # # ch:停止取流 | en:Stop grab image # ret = self.cam.MV_CC_StopGrabbing() # if ret != 0: # print("stop grabbing fail! ret[0x%x]" % ret) # del self.data_buf # sys.exit() # # ch:关闭设备 | Close device # ret = self.cam.MV_CC_CloseDevice() # if ret != 0: # print("close deivce fail! ret[0x%x]" % ret) # del self.data_buf # sys.exit() # # # ch:销毁句柄 | Destroy handle # ret = self.cam.MV_CC_DestroyHandle() # if ret != 0: # print("destroy handle fail! ret[0x%x]" % ret) # del self.data_buf # sys.exit() # # del self.data_buf class Task01(object): def __init__(self): pass def train(self): ''' 训练模型,采用分类的方式训练模型 :return: ''' # 一、 def build_model(nb_classes=9): ''' (构建模型)函数 :param nb_classes: :return: ''' # 建立一个线性叠堆模型 model = Sequential() # 二维卷积层1(通道32,卷积系数3*3,卷积后图像大小不变) model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=(64, 64, 3))) # 激活函数层1 model.add(Activation('relu')) # 二维卷积层2 model.add(Convolution2D(32, 3, 3)) # 激活函数层2 model.add(Activation('relu')) # 池化层1 model.add(MaxPooling2D(pool_size=(2, 2))) # Dropout层1(避免过拟合) # 每次训练迭代时, 随机在神经网络中放弃25%的神经元 model.add(Dropout(0.25)) # 二维卷积层3 model.add(Convolution2D(64, 3, 3, border_mode='same')) # 激活函数层3 model.add(Activation('relu')) # 二维卷积层4 model.add(Convolution2D(64, 3, 3)) # 激活函数层4 model.add(Activation('relu')) # 池化层2 model.add(MaxPooling2D(pool_size=(2, 2))) # Dropout层2(避免过拟合) model.add(Dropout(0.25)) # 平坦层(转换为一维向量) model.add(Flatten()) # Dense层1(全连接层) model.add(Dense(512)) # 激活函数层5 model.add(Activation('relu')) # Dropout层3(避免过拟合) model.add(Dropout(0.5)) # Dense层2 model.add(Dense(nb_classes)) # 分类层(最终结果) model.add(Activation('softmax')) print(model.summary()) return model # 二、建立模型 model = build_model() # 2.2模型再训练 # model = load_model('./good.model.h5') # try: # model.load_weights('./good.model.h5') # print("加载成功") # except: # print("加载失败") # 三、生成一个优化器对象(采用SGD+momentum的优化器进行训练) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # 四、定义训练方式 model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # 五、加载数据集 train_images, train_labels, valid_images, valid_labels, test_images, test_labels = load_data01() # 六、模型训练 batch_size = 20 nb_epoch = 100 train_history = model.fit(train_images, train_labels, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(valid_images, valid_labels), shuffle=True) # 七、模型评估 scores = model.evaluate(test_images, test_labels) print('accuracy=', scores[1]) # 八、保存模型 model.save('./good.model1.h5') def predict(self): ''' 检测商品 :return: ''' # 1、采集图片 dll = windll.LoadLibrary('GpioLib.dll') dll.GPIO_Init() count = 0 # 三、加载模型 model = load_model('./good.model1.h5') while True: signal = dll.GPI_Read(0) print(signal) if signal == 1: count += 1 if count == 5: # hhv = HHV() # 2、将图片放入到模型中检测 # 一、读取图片 size = 64 img = cv2.imread("./full_open/good_20.png") # img = cv2.imread("./good_1.jpg") # print(img.shape) # 二、数据预处理 # 缩放 img = cv2.resize(img, (size, size)) # 转为4维浮点化、归一化 shape_img = (img.reshape(1, 64, 64, 3)).astype('float32') / 255 # 四、预测 prediction = model.predict_classes(shape_img) label = prediction[0] print(label) # 打印标签 # # 删除临时图片 # os.remove(r".\good_1.jpg") count = 0 # 4、打印结果 result = match(label) print(result) # print_info(result) def model_evaluate(self): train_images, train_labels, valid_images, valid_labels, test_images, test_labels = load_data01() model = load_model('./good.model1.h5') scores = model.evaluate(test_images, test_labels) print('accuracy=', scores[1]) class Task02(object): def __init__(self): pass def collect(self): ''' 采集人脸 :return: ''' def my_faces(): ''' 采集自己的人脸数据 :return: ''' def relight(img, light=1, bias=0): ''' (改变图片的对比度和亮度)的函数 :param img: :param light: 对比度 :param bias: 亮度 :return:img ''' w = img.shape[1] h = img.shape[0] for i in range(0, w): for j in range(0, h): for c in range(3): tmp = int(img[j, i, c] * light + bias) if tmp > 255: tmp = 255 elif tmp < 0: tmp = 0 img[j, i, c] = tmp return img size = 160 detector = dlib.get_frontal_face_detector() cap = cv2.VideoCapture(0) num = 1 while True: # 从摄像头读取图片 sucess, img = cap.read() # 转为灰度图片s gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) dets = detector(gray, 1) # 使用坐标信息截取人脸图像 for i, d in enumerate(dets): x1 = d.top() if d.top() > 0 else 0 y1 = d.bottom() if d.bottom() > 0 else 0 x2 = d.left() if d.left() > 0 else 0 y2 = d.right() if d.right() > 0 else 0 # 对图像切片,截取人脸图像 img = img[x1:y1, x2:y2] # 随机调整图片的对比度和亮度 img = relight(img, random.uniform(0.5, 1.5), random.randint(-50, 50)) # 将所有人脸图像缩放成160*160固定大小 img = cv2.resize(img, (size, size)) # 显示摄像头 cv2.imshow('----------please enter "s" to take a picture----------', img) # 保持画面的持续,无限期等待输入 k = cv2.waitKey(1) # if k == ord("s"): # 通过s键保存图片,并退出。 cv2.imwrite('./parter_faces/img_{}.png'.format(str(num)), img) num += 1 k = cv2.waitKey(1) # k == 27 通过esc键退出摄像 ESC(ASCII码为27) if k == 27: break # 关闭摄像头 cap.release() cv2.destroyAllWindows() def other_faces(): ''' 获取其他人的人脸数据 :return: ''' # 一、创建文件夹input_img存放其他人的人脸数据 # 创建文件夹other_faces存放其他人的小脸图像 input_dir = './other_face' output_dir = './other_new_faces' # if not os.path.exists(input_dir): # os.mkdir(input_dir) # print('你创建了input_img!') # if not os.path.exists(output_dir): # os.mkdir(output_dir) # print('你创建了other_faces!') # 二、使用dlib模块自带的frontal_face_detector作为特征提取器 detector = dlib.get_frontal_face_detector() index = 1 # 图像的保存索引 size = 160 # 缩放比例 # 三、遍历input_img文件夹图像,截取小脸并保存 # 遍历文件夹下所有 for (path, dirnames, filenames) in os.walk(input_dir): for filename in filenames: # 找到所有jpg图片 if filename.endswith('.png'): print('Being processed picture %s' % index) img_path = path + '/' + filename # 读取图片 img = cv2.imread(img_path) # 转为灰度图 gray_img = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 使用特征提取器进行人脸识别,得到4个坐标信息,1表示放大再检查 dets = detector(gray_img, 1) # 获取各4个坐标信息 for i, d in enumerate(dets): x1 = d.top() if d.top() > 0 else 0 y1 = d.bottom() if d.bottom() > 0 else 0 x2 = d.left() if d.left() > 0 else 0 y2 = d.right() if d.right() > 0 else 0 # 对图像切片,截取小脸 face = img[x1:y1, x2:y2] # 将人脸图像缩放成固定大小 face = cv2.resize(face, (size, size)) # 使用帧号保存对应图片 cv2.imshow('faces', face) cv2.imwrite(output_dir + '/' + str(index) + '.png', face) index += 1 # 通过esc键退出摄像 ESC(ASCII码为27) key = cv2.waitKey(30) & 0xff if key == 27: # 退出程序 sys.exit(0) my_faces() # other_faces() def train(self): ''' 训练模型 :return: ''' # 一、 def build_model(nb_classes=3): ''' (构建模型)函数 :param nb_classes: :return: ''' # 建立一个线性叠堆模型 model = Sequential() # 二维卷积层1(通道32,卷积系数3*3,卷积后图像大小不变) model.add(Convolution2D(32, 3, 3, border_mode='same', input_shape=(64, 64, 3))) # 激活函数层1 model.add(Activation('relu')) # 二维卷积层2 model.add(Convolution2D(32, 3, 3)) # 激活函数层2 model.add(Activation('relu')) # 池化层1 model.add(MaxPooling2D(pool_size=(2, 2))) # Dropout层1(避免过拟合) # 每次训练迭代时, 随机在神经网络中放弃25%的神经元 model.add(Dropout(0.25)) # 二维卷积层3 model.add(Convolution2D(64, 3, 3, border_mode='same')) # 激活函数层3 model.add(Activation('relu')) # 二维卷积层4 model.add(Convolution2D(64, 3, 3)) # 激活函数层4 model.add(Activation('relu')) # 池化层2 model.add(MaxPooling2D(pool_size=(2, 2))) # Dropout层2(避免过拟合) model.add(Dropout(0.25)) # 平坦层(转换为一维向量) model.add(Flatten()) # Dense层1(全连接层) model.add(Dense(512)) # 激活函数层5 model.add(Activation('relu')) # Dropout层3(避免过拟合) model.add(Dropout(0.5)) # Dense层2 model.add(Dense(nb_classes)) # 分类层(最终结果) model.add(Activation('softmax')) print(model.summary()) return model # 二、建立模型 model = build_model() # 2.2模型再训练 # model = load_model('./me.face.model.h5') # try: # model.load_weights('./me.face.model.h5') # print("加载成功") # except: # print("加载失败") # sys.exit(0) # 三、生成一个优化器对象(采用SGD+momentum的优化器进行训练) sgd = SGD(lr=0.01, decay=1e-6, momentum=0.9, nesterov=True) # 四、定义训练方式 model.compile(loss='categorical_crossentropy', optimizer=sgd, metrics=['accuracy']) # 五、加载数据集 train_images, train_labels, valid_images, valid_labels, test_images, test_labels = load_data02() # 六、模型训练 batch_size = 50 nb_epoch = 10 train_history = model.fit(train_images, train_labels, batch_size=batch_size, nb_epoch=nb_epoch, validation_data=(valid_images, valid_labels), shuffle=True) # 七、模型评估 scores = model.evaluate(test_images, test_labels) print('accuracy=', scores[1]) # 八、保存模型 model.save('./testss.face.model.h5') def predict(self): ''' 检测人脸 :return: ''' count = 0 # 缩放比例 size = 64 # 二、使用dlib自带的frontal_face_detector作为特征提取器 detector = dlib.get_frontal_face_detector() # 三、打开摄像头获取人脸并截取小脸然后保存(参数为输入流,可以是摄像头(0)或视频路径) # cam = cv2.VideoCapture("./my_video.mp4") # cam = cv2.VideoCapture("./other_new_faces/7.jpg") cam = cv2.VideoCapture(0) # 一、加载模型 model = load_model('./me.face.model3.h5') while True: # 从摄像头读取图片 _, img = cam.read() # 转换为灰度图 gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) # 使用特征提取器进行人脸检测,返回4个坐标信息 dets = detector(gray_image, 1) # 根据坐标信息截取人脸图像 for i, d in enumerate(dets): x1 = d.top() if d.top() > 0 else 0 y1 = d.bottom() if d.bottom() > 0 else 0 x2 = d.left() if d.left() > 0 else 0 y2 = d.right() if d.right() > 0 else 0 # 四、数据预处理 # 人脸缩放 face = img[x1:y1, x2:y2] # 转为4维、浮点化、归一化 face = cv2.resize(face, (size, size)) shape_img = (face.reshape(1, size, size, 3)).astype('float32') / 255 # 五、预测 prediction = model.predict_classes(shape_img) print(prediction[0]) # 检测框文本 name = "unknown" if prediction[0] == 0: print("识别出本人") name = "林俊陆" elif prediction[0] == 1: print("识别出本人") name = "欧阳丹娜" else: print("不是本人") name = "未知" # 绘制检测框 cv2.rectangle(img, (x2, x1), (y2, y1), (255, 0, 0), 3) # 字体 # cv2.putText(img, name, (x2, x1), font, 3, (255, 255, 255), 1) #图像、文本、坐标、字体、字体大小、颜色、粗细 ==>被# def change_cv2_draw(image,strs,local,sizes,colour):取代 # 检测框文本 img = change_cv2_draw(img, name, (x2, x1 - 58), 25, (244, 164, 96)) cv2.namedWindow("image", cv2.WINDOW_NORMAL) cv2.imshow('image', img) key = cv2.waitKey(1) & 0xff if key == 27: sys.exit(0) elif key == ord("s"): # 通过s键保存图片,并退出。 cv2.imwrite('./testss/img_{}.png'.format(str(count)), img) count += 1 def model_evaluate(self): train_images, train_labels, valid_images, valid_labels, test_images, test_labels = load_data02() model = load_model('./zong.face.model3.h5') scores = model.evaluate(test_images, test_labels) print('accuracy=', scores[1]) if __name__ == '__main__': # task_one = Task01() # 创建任务一的对象 task_two = Task02() # 创建任务二的对象 # task_one.train() # task_one.predict() # task_one.model_evaluate() # task_two.collect() task_two.train() # task_two.predict() # task_two.model_evaluate()

read_data02.py

import os import cv2 import numpy as np from sklearn.model_selection import train_test_split from keras.utils import np_utils import random # 一、 def get_files(input_dir): ''' (获取图像路径列表)的函数 :param input_dir: :return: ''' file_list = [] for (path, dirnames, filenames) in os.walk(input_dir): for filename in filenames: if filename.endswith('.png') or filename.endswith('.bmp'): print(filename) full_path = os.path.join(path, filename) print(full_path) file_list.append(full_path) return file_list # 二、 def getPaddingSize(img): ''' (计算需填充成正方形的位置坐标)的函数 :param img: :return: ''' # 不需要的值,但需要返回,可赋值给‘_’ h, w, _ = img.shape top, bottom, left, right = (0, 0, 0, 0) # 得到图像长宽中的最大值longest longest = max(h, w) # 填充边长值较小的边,假设宽度较小,那么需要填充长度是longest-w,两边均匀填充 if w < longest: tmp = longest - w # "//"整除 left = tmp // 2 right = tmp - left elif h < longest: tmp = longest - h top = tmp // 2 bottom = tmp - top else: pass return top, bottom, left, right # 三、 def read_img_label(file_list, label): ''' (扩充图片边缘部分,缩放图片,获取标签)的函数 :param file_list: :param label: :return: ''' size = 64 imgs = [] labs = [] num = 0 for filename in file_list: img = cv2.imread(filename) top, bottom, left, right = getPaddingSize(img) # 将图片放大,扩充图片边缘部分 img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0]) img = cv2.resize(img, (size, size)) imgs.append(img) labs.append(label) num = num + 1 return imgs, labs # getPaddingSize和read_img_label函数:判断图片是不是正方形。如果不是,则短的那两边增加两条黑色的边框,使图像变成正方形 # 四、 def read_dataset(): ''' 合并数据集(为两个文件夹中的图像设置对应的标签) :return: ''' # 自己小脸路径 all_imgs_list = [] all_label_list = [] input_dir = r".\my_crop_faces" # 获得自己小脸图片文件路径列表 my_file_list = get_files(input_dir) label = 0 # [0,1] my_imgs_list, my_labs_list = read_img_label(my_file_list, label) # 其他人脸目录 input_dir = r".\parter_faces" parter_list = get_files(input_dir) label = 1 parter_imgs_list, parter_labs_list = read_img_label(parter_list, label) input_dir = r".\other_new_faces" # 获得其他人小脸图片文件路径列表 others_file_list = get_files(input_dir) label = 2 others_imgs_list, others_labs_list = read_img_label(others_file_list, label) # 将自己人脸与其他人脸数据及标签合并到一个列表中 for img in my_imgs_list: all_imgs_list.append(img) for img in parter_imgs_list: all_imgs_list.append(img) for img in others_imgs_list: all_imgs_list.append(img) for label in my_labs_list: all_label_list.append(label) for label in parter_labs_list: all_label_list.append(label) for label in others_labs_list: all_label_list.append(label) # 将合并数据转为numpy格式 imgs_array = np.array(all_imgs_list) labs_array = np.array(all_label_list) return imgs_array, labs_array # 五、 def load_data02(img_rows=64, img_cols=64, img_channels=3, nb_classes=3): ''' (加载、划分、预处理数据集)的函数 :param img_rows: :param img_cols: :param img_channels: :param nb_classes: :return: ''' # 加载数据集到内存 images, labels = read_dataset() print(images.shape) print(labels.shape) # 划分数据集 train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.3, random_state=random.randint(0, 100)) # 将数据集划分出测试集 _, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5, random_state=random.randint(0, 100)) # 维度变形(转为4维) train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels) valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels) test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels) input_shape = (img_rows, img_cols, img_channels) # 输出各数据集的数量 print(train_images.shape[0], 'train samples') print(valid_images.shape[0], 'valid samples') print(test_images.shape[0], 'testss samples') # 对标签进行独热编码 train_labels = np_utils.to_categorical(train_labels, nb_classes) valid_labels = np_utils.to_categorical(valid_labels, nb_classes) test_labels = np_utils.to_categorical(test_labels, nb_classes) print(train_labels.shape) print(valid_labels.shape) print(test_labels.shape) # 图像浮点化 train_images = train_images.astype('float32') valid_images = valid_images.astype('float32') test_images = test_images.astype('float32') # 图像归一化 train_images /= 255 valid_images /= 255 test_images /= 255 return train_images, train_labels, valid_images, valid_labels, test_images, test_labels # train_images, train_labels, valid_images, valid_labels, test_images, test_labels = load_data02()

read_data01.py

import os import cv2 import numpy as np from sklearn.model_selection import train_test_split from keras.utils import np_utils import random # 一、 def get_files(input_dir): ''' (获取图像路径列表)的函数 :param input_dir: :return: ''' file_list = [] for (path, dirnames, filenames) in os.walk(input_dir): for filename in filenames: if filename.endswith('.png') or filename.endswith('.bmp'): print(filename) full_path = os.path.join(path, filename) print(full_path) file_list.append(full_path) return file_list # 二、 def getPaddingSize(img): ''' (计算需填充成正方形的位置坐标)的函数 :param img: :return: ''' # 不需要的值,但需要返回,可赋值给‘_’ h, w, _ = img.shape top, bottom, left, right = (0, 0, 0, 0) # 得到图像长宽中的最大值longest longest = max(h, w) # 填充边长值较小的边,假设宽度较小,那么需要填充长度是longest-w,两边均匀填充 if w < longest: tmp = longest - w # "//"整除 left = tmp // 2 right = tmp - left elif h < longest: tmp = longest - h top = tmp // 2 bottom = tmp - top else: pass return top, bottom, left, right # 三、 def read_img_label(file_list, label): ''' (扩充图片边缘部分,缩放图片,获取标签)的函数 :param file_list: :param label: :return: ''' size = 64 imgs = [] labs = [] num = 0 for filename in file_list: img = cv2.imread(filename) top, bottom, left, right = getPaddingSize(img) # 将图片放大,扩充图片边缘部分 img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0, 0, 0]) img = cv2.resize(img, (size, size)) imgs.append(img) labs.append(label) num = num + 1 return imgs, labs # getPaddingSize和read_img_label函数:判断图片是不是正方形。如果不是,则短的那两边增加两条黑色的边框,使图像变成正方形 # 四、 def read_dataset(): ''' 合并数据集(为两个文件夹中的图像设置对应的标签) :return: ''' all_imgs_list = [] all_label_list = [] input_dir0 = r".\strawberries" file_list = get_files(input_dir0) label = 0 strawberries_imgs_list, strawberries_labs_list = read_img_label(file_list, label) input_dir1 = r".\square_cake" file_list = get_files(input_dir1) label = 1 square_imgs_list, square_labs_list = read_img_label(file_list, label) input_dir2 = r".\round_cake" file_list = get_files(input_dir2) label = 2 round_imgs_list, round_labs_list = read_img_label(file_list, label) input_dir3 = r".\lotus_mist" label = 3 file_list = get_files(input_dir3) lotus_imgs_list, lotus_labs_list = read_img_label(file_list, label) input_dir4 = r".\apple" label = 4 file_list = get_files(input_dir4) apple_imgs_list, apple_labs_list = read_img_label(file_list, label) input_dir5 = r".\pepper" label = 5 file_list = get_files(input_dir5) pepper_imgs_list, pepper_labs_list = read_img_label(file_list, label) input_dir6 = r".\full_open" label = 6 file_list = get_files(input_dir6) full_imgs_list, full_labs_list = read_img_label(file_list, label) input_dir7 = r".\easy_lids" label = 7 file_list = get_files(input_dir6) easy_imgs_list, easy_labs_list = read_img_label(file_list, label) input_dir8 = r".\tear_cover" label = 8 file_list = get_files(input_dir6) tear_imgs_list, tear_labs_list = read_img_label(file_list, label) # 将图片与标签合并到一个列表中 for img in strawberries_imgs_list: all_imgs_list.append(img) for img in square_imgs_list: all_imgs_list.append(img) for img in round_imgs_list: all_imgs_list.append(img) for img in lotus_imgs_list: all_imgs_list.append(img) for img in apple_imgs_list: all_imgs_list.append(img) for img in pepper_imgs_list: all_imgs_list.append(img) for img in full_imgs_list: all_imgs_list.append(img) for img in easy_imgs_list: all_imgs_list.append(img) for img in tear_imgs_list: all_imgs_list.append(img) for label in strawberries_labs_list: all_label_list.append(label) for label in square_labs_list: all_label_list.append(label) for label in round_labs_list: all_label_list.append(label) for label in lotus_labs_list: all_label_list.append(label) for label in apple_labs_list: all_label_list.append(label) for label in pepper_labs_list: all_label_list.append(label) for label in full_labs_list: all_label_list.append(label) for label in easy_labs_list: all_label_list.append(label) for label in tear_labs_list: all_label_list.append(label) # 将合并数据转为numpy格式 imgs_array = np.array(all_imgs_list) labs_array = np.array(all_label_list) return imgs_array, labs_array # 五、 def load_data01(img_rows=64, img_cols=64, img_channels=3, nb_classes=9): ''' (加载、划分、预处理数据集)的函数 :param img_rows: :param img_cols: :param img_channels: :param nb_classes: :return: ''' # 加载数据集到内存 images, labels = read_dataset() print(images.shape) print(labels.shape) # 划分数据集 train_images, valid_images, train_labels, valid_labels = train_test_split(images, labels, test_size=0.3, random_state=random.randint(0, 100)) # 将数据集划分出测试集 _, test_images, _, test_labels = train_test_split(images, labels, test_size=0.5, random_state=random.randint(0, 100)) # 维度变形(转为4维) train_images = train_images.reshape(train_images.shape[0], img_rows, img_cols, img_channels) valid_images = valid_images.reshape(valid_images.shape[0], img_rows, img_cols, img_channels) test_images = test_images.reshape(test_images.shape[0], img_rows, img_cols, img_channels) input_shape = (img_rows, img_cols, img_channels) # 输出各数据集的数量 print(train_images.shape[0], 'train samples') print(valid_images.shape[0], 'valid samples') print(test_images.shape[0], 'testss samples') # 对标签进行独热编码 train_labels = np_utils.to_categorical(train_labels, nb_classes) valid_labels = np_utils.to_categorical(valid_labels, nb_classes) test_labels = np_utils.to_categorical(test_labels, nb_classes) print(train_labels.shape) print(valid_labels.shape) print(test_labels.shape) # 图像浮点化 train_images = train_images.astype('float32') valid_images = valid_images.astype('float32') test_images = test_images.astype('float32') # 图像归一化 train_images /= 255 valid_images /= 255 test_images /= 255 return train_images, train_labels, valid_images, valid_labels, test_images, test_labels # train_images, train_labels, valid_images, valid_labels, test_images, test_labels = load_data01()


__EOF__

本文作者小鹿同学
本文链接https://www.cnblogs.com/exiaolu/p/16428073.html
关于博主:评论和私信会在第一时间回复。或者直接私信我。
版权声明:本博客所有文章除特别声明外,均采用 BY-NC-SA 许可协议。转载请注明出处!
声援博主:如果您觉得文章对您有帮助,可以点击文章右下角推荐一下。您的鼓励是博主的最大动力!
posted @   进击的小鹿  阅读(262)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 分享一个免费、快速、无限量使用的满血 DeepSeek R1 模型,支持深度思考和联网搜索!
· 基于 Docker 搭建 FRP 内网穿透开源项目(很简单哒)
· ollama系列01:轻松3步本地部署deepseek,普通电脑可用
· 25岁的心里话
· 按钮权限的设计及实现
点击右上角即可分享
微信分享提示