动态的添加和丢弃关键点---32
原創博客:轉載請標明出處:http://www.cnblogs.com/zxouxuewei/
?
首先看看face_tracker2.launch啟動文件中的參數:(括號中的值表示默認值)
use_depth_for_tracking:(True) 如果你使用的是kienct,將此值設置為真則會丟棄那些離臉部太遠的點。
min_keypoints:(20) 在我們添加新的關鍵點之前的最小關鍵點的數量。
abs_min_keypoints:(6) 在我們發現丟失了當前的人臉追蹤并重新檢測之前,關鍵點的絕對最低數量。
add_keypoint_distance:(10) 一個新的關鍵點和任意現有關鍵點之間的最小值(以像素點為單位)。
std_err_xy:(2.5) 標準誤差(以像素為單位)用于判斷關鍵點是否為異常值。
pct_err_z:(1.5) 深度閾值(以百分比表示)確定一個離開面部關鍵點何時被刪除。
max_mse:(10000)在重新開始檢測人臉開始,面部的當前特征值的最大的總均方差。
expand_roi:(1.02) 尋找新的關鍵點時,用于在每個循環中增加擴張系數。
add_keypoints_interval:(1)嘗試添加新的關鍵點的頻繁程度,為1 表示每一幀都添加。
drop_keypoints_interval:(1) 嘗試刪除新的關鍵點的頻繁程度,為1 表示每一幀都刪除。
?
首先確保你的kinect驅動或者uvc相機驅動能正常啟動:(如果你使用的是kinect,請運行openni驅動)
roslaunch openni_launch openni.launch如果你沒有安裝kinect深度相機驅動,請看我前面的博文。
然后運行下面的launch文件:
roslaunch rbx1_vision face_tracker2.launch在視頻窗口的最前端,在鍵盤上輸入‘c’鍵。清除當前的關鍵點,強制重新檢測檢測面部。
這是我的運行結果:
rbx1/rbx1_vision/nodes/face_tracker2.py查看代碼: #!/usr/bin/env python""" face_tracker2.py - Version 1.1 2013-12-20Combines the OpenCV Haar face detector with Good Features to Track and Lucas-Kanadeoptical flow tracking. Keypoints are added and dropped according to simple statisicalclustering rules.Created for the Pi Robot Project: http://www.pirobot.orgCopyright (c) 2012 Patrick Goebel. All rights reserved.This program is free software; you can redistribute it and/or modifyit under the terms of the GNU General Public License as published bythe Free Software Foundation; either version 2 of the License, or(at your option) any later version.This program is distributed in the hope that it will be useful,but WITHOUT ANY WARRANTY; without even the implied warranty ofMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See theGNU General Public License for more details at:http://www.gnu.org/licenses/gpl.html """ import rospy import cv2 import cv2.cv as cv import numpy as np from math import isnan, isinf from rbx1_vision.face_detector import FaceDetector from rbx1_vision.lk_tracker import LKTrackerclass FaceTracker(FaceDetector, LKTracker):def __init__(self, node_name):super(FaceTracker, self).__init__(node_name)self.n_faces = rospy.get_param("~n_faces", 1)self.show_text = rospy.get_param("~show_text", True)self.show_add_drop = rospy.get_param("~show_add_drop", False)self.feature_size = rospy.get_param("~feature_size", 1)self.use_depth_for_tracking = rospy.get_param("~use_depth_for_tracking", False)self.min_keypoints = rospy.get_param("~min_keypoints", 20)self.abs_min_keypoints = rospy.get_param("~abs_min_keypoints", 6)self.std_err_xy = rospy.get_param("~std_err_xy", 2.5) self.pct_err_z = rospy.get_param("~pct_err_z", 0.42) self.max_mse = rospy.get_param("~max_mse", 10000)self.add_keypoint_distance = rospy.get_param("~add_keypoint_distance", 10)self.add_keypoints_interval = rospy.get_param("~add_keypoints_interval", 1)self.drop_keypoints_interval = rospy.get_param("~drop_keypoints_interval", 1)self.expand_roi_init = rospy.get_param("~expand_roi", 1.02)self.expand_roi = self.expand_roi_initself.face_tracking = Trueself.frame_index = 0self.add_index = 0self.drop_index = 0self.keypoints = list()self.detect_box = Noneself.track_box = Noneself.grey = Noneself.prev_grey = Nonedef process_image(self, cv_image):try:# Create a greyscale version of the imageself.grey = cv2.cvtColor(cv_image, cv2.COLOR_BGR2GRAY)# Equalize the grey histogram to minimize lighting effectsself.grey = cv2.equalizeHist(self.grey)# Step 1: Detect the face if we haven't alreadyif self.detect_box is None:self.keypoints = list()self.track_box = Noneself.detect_box = self.detect_face(self.grey)else:# Step 2: If we aren't yet tracking keypoints, get them nowif not self.track_box or not self.is_rect_nonzero(self.track_box):self.track_box = self.detect_boxself.keypoints = self.get_keypoints(self.grey, self.track_box)# Store a copy of the current grey image used for LK tracking if self.prev_grey is None:self.prev_grey = self.grey # Step 3: If we have keypoints, track them using optical flowself.track_box = self.track_keypoints(self.grey, self.prev_grey)# Step 4: Drop keypoints that are too far from the main clusterif self.frame_index % self.drop_keypoints_interval == 0 and len(self.keypoints) > 0:((cog_x, cog_y, cog_z), mse_xy, mse_z, score) = self.drop_keypoints(self.abs_min_keypoints, self.std_err_xy, self.max_mse)if score == -1:self.detect_box = Noneself.track_box = Nonereturn cv_image# Step 5: Add keypoints if the number is getting too low if self.frame_index % self.add_keypoints_interval == 0 and len(self.keypoints) < self.min_keypoints:self.expand_roi = self.expand_roi_init * self.expand_roiself.add_keypoints(self.track_box)else:self.frame_index += 1self.expand_roi = self.expand_roi_init# Store a copy of the current grey image used for LK tracking self.prev_grey = self.grey# Process any special keyboard commands for this moduleif self.keystroke != -1:try:cc = chr(self.keystroke & 255).lower()print ccif cc == 'c':self.keypoints = []self.track_box = Noneself.detect_box = Noneelif cc == 'd':self.show_add_drop = not self.show_add_dropexcept:passexcept AttributeError:passreturn cv_imagedef add_keypoints(self, track_box):# Look for any new keypoints around the current keypoints# Begin with a mask of all black pixelsmask = np.zeros_like(self.grey)# Get the coordinates and dimensions of the current track boxtry:((x,y), (w,h), a) = track_boxexcept:try:x,y,w,h = track_boxx = x + w / 2y = y + h / 2a = 0except:rospy.loginfo("Track box has shrunk to zero...")returnx = int(x)y = int(y)# Expand the track box to look for new keypointsw_new = int(self.expand_roi * w)h_new = int(self.expand_roi * h)pt1 = (x - int(w_new / 2), y - int(h_new / 2))pt2 = (x + int(w_new / 2), y + int(h_new / 2))mask_box = ((x, y), (w_new, h_new), a)# Display the expanded ROI with a yellow rectangleif self.show_add_drop:cv2.rectangle(self.marker_image, pt1, pt2, cv.RGB(255, 255, 0))# Create a filled white ellipse within the track_box to define the ROIcv2.ellipse(mask, mask_box, cv.CV_RGB(255,255, 255), cv.CV_FILLED)if self.keypoints is not None:# Mask the current keypointsfor x, y in [np.int32(p) for p in self.keypoints]:cv2.circle(mask, (x, y), 5, 0, -1)new_keypoints = cv2.goodFeaturesToTrack(self.grey, mask = mask, **self.gf_params)# Append new keypoints to the current list if they are not# too far from the current cluster if new_keypoints is not None:for x, y in np.float32(new_keypoints).reshape(-1, 2):distance = self.distance_to_cluster((x,y), self.keypoints)if distance > self.add_keypoint_distance:self.keypoints.append((x,y))# Briefly display a blue disc where the new point is addedif self.show_add_drop:cv2.circle(self.marker_image, (x, y), 3, (255, 255, 0, 0), cv.CV_FILLED, 2, 0)# Remove duplicate keypointsself.keypoints = list(set(self.keypoints))def distance_to_cluster(self, test_point, cluster):min_distance = 10000for point in cluster:if point == test_point:continue# Use L1 distance since it is faster than L2distance = abs(test_point[0] - point[0]) + abs(test_point[1] - point[1])if distance < min_distance:min_distance = distancereturn min_distancedef drop_keypoints(self, min_keypoints, outlier_threshold, mse_threshold):sum_x = 0sum_y = 0sum_z = 0sse = 0keypoints_xy = self.keypointskeypoints_z = self.keypointsn_xy = len(self.keypoints)n_z = n_xy# if self.use_depth_for_tracking: # if self.depth_image is None: # return ((0, 0, 0), 0, 0, -1)# If there are no keypoints left to track, start overif n_xy == 0:return ((0, 0, 0), 0, 0, -1)# Compute the COG (center of gravity) of the clusterfor point in self.keypoints:sum_x = sum_x + point[0]sum_y = sum_y + point[1]mean_x = sum_x / n_xymean_y = sum_y / n_xymean_z = 0if self.use_depth_for_tracking and not self.depth_image is None:for point in self.keypoints: try:z = self.depth_image[point[1], point[0]]except:n_z = n_z - 1continue if not isnan(z):sum_z = sum_z + zelse:n_z = n_z - 1continuetry:mean_z = sum_z / n_zexcept:mean_z = -1else:mean_z = -1# Compute the x-y MSE (mean squared error) of the cluster in the camera planefor point in self.keypoints:sse = sse + (point[0] - mean_x) * (point[0] - mean_x) + (point[1] - mean_y) * (point[1] - mean_y)#sse = sse + abs((point[0] - mean_x)) + abs((point[1] - mean_y))# Get the average over the number of feature pointsmse_xy = sse / n_xy# The MSE must be > 0 for any sensible feature clusterif mse_xy == 0 or mse_xy > mse_threshold:return ((0, 0, 0), 0, 0, -1)# Throw away the outliers based on the x-y variancemax_err = 0for point in self.keypoints:std_err = ((point[0] - mean_x) * (point[0] - mean_x) + (point[1] - mean_y) * (point[1] - mean_y)) / mse_xyif std_err > max_err:max_err = std_errif std_err > outlier_threshold:keypoints_xy.remove(point)if self.show_add_drop:# Briefly mark the removed points in redcv2.circle(self.marker_image, (point[0], point[1]), 3, (0, 0, 255), cv.CV_FILLED, 2, 0) try:keypoints_z.remove(point)n_z = n_z - 1except:passn_xy = n_xy - 1# Now do the same for depthif self.use_depth_for_tracking and not self.depth_image is None:sse = 0for point in keypoints_z:try:z = self.depth_image[point[1], point[0]]sse = sse + (z - mean_z) * (z - mean_z)except:n_z = n_z - 1try:mse_z = sse / n_zexcept:mse_z = 0# Throw away the outliers based on depth using percent error # rather than standard error since depth values can jump# dramatically at object boundariesfor point in keypoints_z:try:z = self.depth_image[point[1], point[0]]except:continuetry:pct_err = abs(z - mean_z) / mean_zif pct_err > self.pct_err_z:keypoints_xy.remove(point)if self.show_add_drop:# Briefly mark the removed points in redcv2.circle(self.marker_image, (point[0], point[1]), 2, (0, 0, 255), cv.CV_FILLED) except:passelse:mse_z = -1self.keypoints = keypoints_xy# Consider a cluster bad if we have fewer than min_keypoints leftif len(self.keypoints) < min_keypoints:score = -1else:score = 1return ((mean_x, mean_y, mean_z), mse_xy, mse_z, score)if __name__ == '__main__':try:node_name = "face_tracker"FaceTracker(node_name)rospy.spin()except KeyboardInterrupt:print "Shutting down face tracker node."cv.DestroyAllWindows()轉載于:https://www.cnblogs.com/zxouxuewei/p/5410124.html
總結
以上是生活随笔為你收集整理的动态的添加和丢弃关键点---32的全部內容,希望文章能夠幫你解決所遇到的問題。