保存标注对象到txt 制作xml
1、算法将检测的目标名称和目标位置保存到txt文本
图片名 xmin ymin xmax ymax
(4).avi237face.jpg
4
smoke 83 234 142 251
hand 119 255 271 306
eye 178 148 216 163
eye 111 156 148 173
#!/usr/bin/python # -*- coding: UTF-8 -*- import os, h5py, cv2, sys, shutil import numpy as np from xml.dom.minidom import Document rootdir = "G:/MTCNNTraining/faceData/train" convet2yoloformat = True convert2vocformat = True resized_dim = (48, 48) # 最小取20大小的脸,并且补齐 minsize2select = 1 usepadding = True def convertimgset(img_set="train"): imgdir = rootdir + "/trainImages" gtfilepath = rootdir + "/SSDSave.txt" imagesdir = rootdir + "/images" vocannotationdir = rootdir + "/Annotations" labelsdir = rootdir + "/labels" if not os.path.exists(imagesdir): os.mkdir(imagesdir) if convet2yoloformat: if not os.path.exists(labelsdir): os.mkdir(labelsdir) if convert2vocformat: if not os.path.exists(vocannotationdir): os.mkdir(vocannotationdir) index = 0 with open(gtfilepath, 'r') as gtfile: while (True): # and len(faces)<10 filename = gtfile.readline()[:-1] if (filename == ""): break sys.stdout.write("\r" + str(index) + ":" + filename + "\t\t\t") sys.stdout.flush() imgpath = imgdir + "/" + filename img = cv2.imread(imgpath) if not img.data: break imgheight = img.shape[0] imgwidth = img.shape[1] maxl = max(imgheight, imgwidth) paddingleft = (maxl - imgwidth) >> 1 paddingright = (maxl - imgwidth) >> 1 paddingbottom = (maxl - imgheight) >> 1 paddingtop = (maxl - imgheight) >> 1 saveimg = cv2.copyMakeBorder(img, paddingtop, paddingbottom, paddingleft, paddingright, cv2.BORDER_CONSTANT,value=0) showimg = saveimg.copy() numbbox = int(gtfile.readline()) bboxes = [] bnames=[] for i in range(numbbox): line_read = gtfile.readline() line_cor = line_read.strip().split(" ") obj_name = line_cor[0] #line = line_cor[1:5] line = list(map(int,line_cor[1:5])) if (int(line[3]) <= 0 or int(line[2]) <= 0): continue x = int(line[0]) + paddingleft #左上角顶点x y = int(line[1]) + paddingtop #左上角顶点y width = int(line[2]) - int(line[0]) + 1 #宽度 height = int(line[3]) - int(line[1])+ 1 #高度 bbox = (x, y, width, height) #x2 = x + width #y2 = y + height # face=img[x:x2,y:y2] if width >= minsize2select and height >= minsize2select: bboxes.append(bbox) bnames.append(obj_name) #cv2.rectangle(showimg, (x, y), (x2, y2), (0, 255, 0)) # maxl=max(width,height) # x3=(int)(x+(width-maxl)*0.5) # y3=(int)(y+(height-maxl)*0.5) # x4=(int)(x3+maxl) # y4=(int)(y3+maxl) # cv2.rectangle(img,(x3,y3),(x4,y4),(255,0,0)) #else: #cv2.rectangle(showimg, (x, y), (x2, y2), (0, 0, 255)) #filename = filename.replace("/", "_") if len(bboxes) == 0: print ("warrning: no face") continue cv2.imwrite(imagesdir + "/" + filename, saveimg) #if convet2yoloformat: #height = saveimg.shape[0] #width = saveimg.shape[1] #txtpath = labelsdir + "/" + filename #txtpath = txtpath[:-3] + "txt" #ftxt = open(txtpath, 'w') #for i in range(len(bboxes)): #bbox = bboxes[i] #xcenter = (bbox[0] + bbox[2] * 0.5) / width #ycenter = (bbox[1] + bbox[3] * 0.5) / height #wr = bbox[2] * 1.0 / width #hr = bbox[3] * 1.0 / height #txtline = "0 " + str(xcenter) + " " + str(ycenter) + " " + str(wr) + " " + str(hr) + "\n" #ftxt.write(txtline) #ftxt.close() if convert2vocformat: xmlpath = vocannotationdir + "/" + filename xmlpath = xmlpath[:-3] + "xml" doc = Document() annotation = doc.createElement('annotation') doc.appendChild(annotation) folder = doc.createElement('folder') folder_name = doc.createTextNode('widerface') folder.appendChild(folder_name) annotation.appendChild(folder) filenamenode = doc.createElement('filename') filename_name = doc.createTextNode(filename) filenamenode.appendChild(filename_name) annotation.appendChild(filenamenode) source = doc.createElement('source') annotation.appendChild(source) database = doc.createElement('database') database.appendChild(doc.createTextNode('wider face Database')) source.appendChild(database) annotation_s = doc.createElement('annotation') annotation_s.appendChild(doc.createTextNode('PASCAL VOC2007')) source.appendChild(annotation_s) image = doc.createElement('image') image.appendChild(doc.createTextNode('flickr')) source.appendChild(image) flickrid = doc.createElement('flickrid') flickrid.appendChild(doc.createTextNode('-1')) source.appendChild(flickrid) owner = doc.createElement('owner') annotation.appendChild(owner) flickrid_o = doc.createElement('flickrid') flickrid_o.appendChild(doc.createTextNode('widerFace')) owner.appendChild(flickrid_o) name_o = doc.createElement('name') name_o.appendChild(doc.createTextNode('widerFace')) owner.appendChild(name_o) size = doc.createElement('size') annotation.appendChild(size) width = doc.createElement('width') width.appendChild(doc.createTextNode(str(saveimg.shape[1]))) height = doc.createElement('height') height.appendChild(doc.createTextNode(str(saveimg.shape[0]))) depth = doc.createElement('depth') depth.appendChild(doc.createTextNode(str(saveimg.shape[2]))) size.appendChild(width) size.appendChild(height) size.appendChild(depth) segmented = doc.createElement('segmented') segmented.appendChild(doc.createTextNode('0')) annotation.appendChild(segmented) for i in range(len(bboxes)): bbox = bboxes[i] objects = doc.createElement('object') annotation.appendChild(objects) object_name = doc.createElement('name') bnames_var = str(bnames[i]) object_name.appendChild(doc.createTextNode(bnames_var)) objects.appendChild(object_name) pose = doc.createElement('pose') pose.appendChild(doc.createTextNode('Unspecified')) objects.appendChild(pose) truncated = doc.createElement('truncated') truncated.appendChild(doc.createTextNode('1')) objects.appendChild(truncated) difficult = doc.createElement('difficult') difficult.appendChild(doc.createTextNode('0')) objects.appendChild(difficult) bndbox = doc.createElement('bndbox') objects.appendChild(bndbox) xmin = doc.createElement('xmin') xmin.appendChild(doc.createTextNode(str(bbox[0]))) bndbox.appendChild(xmin) ymin = doc.createElement('ymin') ymin.appendChild(doc.createTextNode(str(bbox[1]))) bndbox.appendChild(ymin) xmax = doc.createElement('xmax') xmax.appendChild(doc.createTextNode(str(bbox[0] + bbox[2]))) bndbox.appendChild(xmax) ymax = doc.createElement('ymax') ymax.appendChild(doc.createTextNode(str(bbox[1] + bbox[3]))) bndbox.appendChild(ymax) f = open(xmlpath, "w") f.write(doc.toprettyxml(indent='')) f.close() # cv2.imshow("img",showimg) # cv2.waitKey() index = index + 1 def convertdataset(): img_sets = ["train"] for img_set in img_sets: convertimgset(img_set) if __name__ == "__main__": convertdataset()