小編給大家分享一下python如何實(shí)現(xiàn)單目標(biāo)、多目標(biāo)、多尺度、自定義特征的KCF跟蹤算法,相信大部分人都還不怎么了解,因此分享這篇文章給大家參考一下,希望大家閱讀完這篇文章后大有收獲,下面讓我們一起去了解一下吧!
創(chuàng)新互聯(lián)專注于成都網(wǎng)站制作、網(wǎng)站建設(shè)、外貿(mào)網(wǎng)站建設(shè)、網(wǎng)頁(yè)設(shè)計(jì)、網(wǎng)站制作、網(wǎng)站開(kāi)發(fā)。公司秉持“客戶至上,用心服務(wù)”的宗旨,從客戶的利益和觀點(diǎn)出發(fā),讓客戶在網(wǎng)絡(luò)營(yíng)銷中找到自己的駐足之地。尊重和關(guān)懷每一位客戶,用嚴(yán)謹(jǐn)?shù)膽B(tài)度對(duì)待客戶,用專業(yè)的服務(wù)創(chuàng)造價(jià)值,成為客戶值得信賴的朋友,為客戶解除后顧之憂。單目標(biāo)跟蹤:
直接調(diào)用opencv中封裝的tracker即可。
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jan 5 17:50:47 2020 第四章 kcf跟蹤 @author: youxinlin """ import cv2 from items import MessageItem import time import numpy as np ''' 監(jiān)視者模塊,負(fù)責(zé)入侵檢測(cè),目標(biāo)跟蹤 ''' class WatchDog(object): #入侵檢測(cè)者模塊,用于入侵檢測(cè) def __init__(self,frame=None): #運(yùn)動(dòng)檢測(cè)器構(gòu)造函數(shù) self._background = None if frame is not None: self._background = cv2.GaussianBlur(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY),(21,21),0) self.es = cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (10, 10)) def isWorking(self): #運(yùn)動(dòng)檢測(cè)器是否工作 return self._background is not None def startWorking(self,frame): #運(yùn)動(dòng)檢測(cè)器開(kāi)始工作 if frame is not None: self._background = cv2.GaussianBlur(cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY), (21, 21), 0) def stopWorking(self): #運(yùn)動(dòng)檢測(cè)器結(jié)束工作 self._background = None def analyze(self,frame): #運(yùn)動(dòng)檢測(cè) if frame is None or self._background is None: return sample_frame = cv2.GaussianBlur(cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY),(21,21),0) diff = cv2.absdiff(self._background,sample_frame) diff = cv2.threshold(diff, 25, 255, cv2.THRESH_BINARY)[1] diff = cv2.dilate(diff, self.es, iterations=2) image, cnts, hierarchy = cv2.findContours(diff.copy(),cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) coordinate = [] bigC = None bigMulti = 0 for c in cnts: if cv2.contourArea(c) < 1500: continue (x,y,w,h) = cv2.boundingRect(c) if w * h > bigMulti: bigMulti = w * h bigC = ((x,y),(x+w,y+h)) if bigC: cv2.rectangle(frame, bigC[0],bigC[1], (255,0,0), 2, 1) coordinate.append(bigC) message = {"coord":coordinate} message['msg'] = None return MessageItem(frame,message) class Tracker(object): ''' 追蹤者模塊,用于追蹤指定目標(biāo) ''' def __init__(self,tracker_type = "BOOSTING",draw_coord = True): ''' 初始化追蹤器種類 ''' #獲得opencv版本 (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') self.tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN'] self.tracker_type = tracker_type self.isWorking = False self.draw_coord = draw_coord #構(gòu)造追蹤器 if int(minor_ver) < 3: self.tracker = cv2.Tracker_create(tracker_type) else: if tracker_type == 'BOOSTING': self.tracker = cv2.TrackerBoosting_create() if tracker_type == 'MIL': self.tracker = cv2.TrackerMIL_create() if tracker_type == 'KCF': self.tracker = cv2.TrackerKCF_create() if tracker_type == 'TLD': self.tracker = cv2.TrackerTLD_create() if tracker_type == 'MEDIANFLOW': self.tracker = cv2.TrackerMedianFlow_create() if tracker_type == 'GOTURN': self.tracker = cv2.TrackerGOTURN_create() def initWorking(self,frame,box): ''' 追蹤器工作初始化 frame:初始化追蹤畫(huà)面 box:追蹤的區(qū)域 ''' if not self.tracker: raise Exception("追蹤器未初始化") status = self.tracker.init(frame,box) if not status: raise Exception("追蹤器工作初始化失敗") self.coord = box self.isWorking = True def track(self,frame): ''' 開(kāi)啟追蹤 ''' message = None if self.isWorking: status,self.coord = self.tracker.update(frame) if status: message = {"coord":[((int(self.coord[0]), int(self.coord[1])),(int(self.coord[0] + self.coord[2]), int(self.coord[1] + self.coord[3])))]} if self.draw_coord: p1 = (int(self.coord[0]), int(self.coord[1])) p2 = (int(self.coord[0] + self.coord[2]), int(self.coord[1] + self.coord[3])) cv2.rectangle(frame, p1, p2, (255,0,0), 2, 1) message['msg'] = "is tracking" return MessageItem(frame,message) class ObjectTracker(object): def __init__(self,dataSet): self.cascade = cv2.CascadeClassifier(dataSet) def track(self,frame): gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY) faces = self.cascade.detectMultiScale(gray,1.03,5) for (x,y,w,h) in faces: cv2.rectangle(frame,(x,y),(x+w,y+h),(0,0,255),2) return frame if __name__ == '__main__' : # tracker_types = ['BOOSTING', 'MIL','KCF', 'TLD', 'MEDIANFLOW', 'GOTURN'] tracker = Tracker(tracker_type="KCF") # video = cv2.VideoCapture(0) # video = cv2.VideoCapture("complex1.mov") video = cv2.VideoCapture(r"/Users/youxinlin/Desktop/video_data/complex1.MOV") ok, frame = video.read() bbox = cv2.selectROI(frame, False) tracker.initWorking(frame,bbox) while True: _,frame = video.read(); if(_): item = tracker.track(frame); cv2.imshow("track",item.getFrame()) k = cv2.waitKey(1) & 0xff if k == 27: break
附帶items.py,放在同個(gè)文件夾下:
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jan 5 17:51:04 2020 @author: youxinlin """ import json from utils import IOUtil ''' 信息封裝類 ''' class MessageItem(object): #用于封裝信息的類,包含圖片和其他信息 def __init__(self,frame,message): self._frame = frame self._message = message def getFrame(self): #圖片信息 return self._frame def getMessage(self): #文字信息,json格式 return self._message def getBase64Frame(self): #返回base64格式的圖片,將BGR圖像轉(zhuǎn)化為RGB圖像 jepg = IOUtil.array_to_bytes(self._frame[...,::-1]) return IOUtil.bytes_to_base64(jepg) def getBase64FrameByte(self): #返回base64格式圖片的bytes return bytes(self.getBase64Frame()) def getJson(self): #獲得json數(shù)據(jù)格式 dicdata = {"frame":self.getBase64Frame().decode(),"message":self.getMessage()} return json.dumps(dicdata) def getBinaryFrame(self): return IOUtil.array_to_bytes(self._frame[...,::-1])
utils.py:也放在同一個(gè)文件夾下。
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jan 5 17:51:40 2020 @author: youxinlin """ import time import numpy import base64 import os import logging import sys from PIL import Image from io import BytesIO #工具類 class IOUtil(object): #流操作工具類 @staticmethod def array_to_bytes(pic,formatter="jpeg",quality=70): ''' 靜態(tài)方法,將numpy數(shù)組轉(zhuǎn)化二進(jìn)制流 :param pic: numpy數(shù)組 :param format: 圖片格式 :param quality:壓縮比,壓縮比越高,產(chǎn)生的二進(jìn)制數(shù)據(jù)越短 :return: ''' stream = BytesIO() picture = Image.fromarray(pic) picture.save(stream,format=formatter,quality=quality) jepg = stream.getvalue() stream.close() return jepg @staticmethod def bytes_to_base64(byte): ''' 靜態(tài)方法,bytes轉(zhuǎn)base64編碼 :param byte: :return: ''' return base64.b64encode(byte) @staticmethod def transport_rgb(frame): ''' 將bgr圖像轉(zhuǎn)化為rgb圖像,或者將rgb圖像轉(zhuǎn)化為bgr圖像 ''' return frame[...,::-1] @staticmethod def byte_to_package(bytes,cmd,var=1): ''' 將每一幀的圖片流的二進(jìn)制數(shù)據(jù)進(jìn)行分包 :param byte: 二進(jìn)制文件 :param cmd:命令 :return: ''' head = [ver,len(byte),cmd] headPack = struct.pack("!3I", *head) senddata = headPack+byte return senddata @staticmethod def mkdir(filePath): ''' 創(chuàng)建文件夾 ''' if not os.path.exists(filePath): os.mkdir(filePath) @staticmethod def countCenter(box): ''' 計(jì)算一個(gè)矩形的中心 ''' return (int(abs(box[0][0] - box[1][0])*0.5) + box[0][0],int(abs(box[0][1] - box[1][1])*0.5) +box[0][1]) @staticmethod def countBox(center): ''' 根據(jù)兩個(gè)點(diǎn)計(jì)算出,x,y,c,r ''' return (center[0][0],center[0][1],center[1][0]-center[0][0],center[1][1]-center[0][1]) @staticmethod def getImageFileName(): return time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())+'.png'
多目標(biāo)跟蹤:
和單目標(biāo)差不多,改用MultiTracker_create()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Jan 5 18:02:33 2020
多目標(biāo)跟蹤
@author: youxinlin """import numpy as np import cv2 import sys ''' if len(sys.argv) != 2: print('Input video name is missing') exit() ''' print('Select multiple tracking targets') cv2.namedWindow("tracking") camera = cv2.VideoCapture(r"/Users/youxinlin/Desktop/video_data/complex6.MOV") #camera = cv2.VideoCapture(0) tracker = cv2.MultiTracker_create() #多目標(biāo)跟蹤 a= cv2.Tracker_c init_once = False ok, image=camera.read() if not ok: print('Failed to read video') exit() bbox1 = cv2.selectROI('tracking', image) bbox2 = cv2.selectROI('tracking', image) bbox3 = cv2.selectROI('tracking', image) while camera.isOpened(): ok, image=camera.read() if not ok: print ('no image to read') break if not init_once: ok = tracker.add(cv2.TrackerKCF_create(),image,bbox1) ok = tracker.add(cv2.TrackerKCF_create( ),image, bbox2) ok = tracker.add(cv2.TrackerKCF_create(),image, bbox3) init_once = True ok, boxes = tracker.update(image) for newbox in boxes: p1 = (int(newbox[0]), int(newbox[1])) p2 = (int(newbox[0] + newbox[2]), int(newbox[1] + newbox[3])) cv2.rectangle(image, p1, p2, (0,0,255)) cv2.imshow('tracking', image) k = cv2.waitKey(1) if k == 27 : break # esc pressed
多尺度檢測(cè)的KCF、自定義所用特征的KCF
在一些場(chǎng)景下,不想使用默認(rèn)的hog特征跟蹤,或需要對(duì)比不同特征的跟蹤效果,那么封裝好的方法似乎不可用,需要可以自己擼一波kcf的代碼,從而使用自己設(shè)定的特征。
以上是“python如何實(shí)現(xiàn)單目標(biāo)、多目標(biāo)、多尺度、自定義特征的KCF跟蹤算法”這篇文章的所有內(nèi)容,感謝各位的閱讀!相信大家都有了一定的了解,希望分享的內(nèi)容對(duì)大家有所幫助,如果還想學(xué)習(xí)更多知識(shí),歡迎關(guān)注創(chuàng)新互聯(lián)行業(yè)資訊頻道!