捕捉移动点时, 我们可以使用一种叫做 光学流动(optical flow) 的算法, 在关键点的周围, 所有点的流动导数都是相同的, 从而我门可以判断出关键点( feature points)
下面是代码:
classCameraCaliberator
{
vector<vector<Point3f>> objectPoints;
// the real points
vector<vector<Point2f>> imagePoints;
// points found inimage
Mat cameraMatrix;
Mat distCoeffs;
int flag;
Mat map1, map2;
bool mustInitUndistort;
public:
CameraCaliberator() :flag(0), mustInitUndistort(true)
{
};
int addChessboardPoints(
const vector<string>& filelist, Size &boardSize){
vector<Point2f> imageCorners;
vector<Point3f> objectCorners;
for (int i = 0; i < boardSize.height;i++)
{
for (int j = 0; j < boardSize.width;j++)
{
objectCorners.push_back(Point3f(i, j,0.0f));
}
}
Mat image;
int successes = 0;
for (int i = 0; i < filelist.size();i++)
{
image = imread(filelist[i], 0);
bool found = findChessboardCorners(image, boardSize, imageCorners);
cornerSubPix(image, imageCorners, Size(5, 5), Size(-1, -1),
TermCriteria(TermCriteria::MAX_ITER + TermCriteria::EPS, 30, 0.1));
//used to define when to stop
if (imageCorners.size() == boardSize.area())
{
addPoints(imageCorners, objectCorners);
successes++;
}
}
return successes;
}
double calibrate(Size &imageSize){
mustInitUndistort = true;
vector<Mat> rvecs, tvecs;
return calibrateCamera(objectPoints,
imagePoints,
imageSize,
cameraMatrix,
distCoeffs,
rvecs,
tvecs,
flag);
}
Mat remap(const Mat&image){
Mat undistorted;
if (mustInitUndistort){
initUndistortRectifyMap(cameraMatrix,
distCoeffs,
Mat(), Mat(),
image.size(),
CV_32FC1,
map1, map2);
}
mustInitUndistort = false;
cv::remap(image, undistorted, map1, map2, INTER_LINEAR);
return undistorted;
};
// // Set the calibration options
// // 8radialCoeffEnabled shouldbe true if 8 radial coefficients are required (5 is default)
// // tangentialParamEnabledshould be true if tangeantial distortion is present
// voidCameraCaliberator::setCalibrationFlag(bool radial8CoeffEnabled, booltangentialParamEnabled) {
//
// //Set the flag used in cv::calibrateCamera()
// flag= 0;
// if(!tangentialParamEnabled) flag += CV_CALIB_ZERO_TANGENT_DIST;
// if(radial8CoeffEnabled) flag += CV_CALIB_RATIONAL_MODEL;
// }
private:
void addPoints(vector<Point2f>& imageCorners,vector<Point3f>& objectCorners){
imagePoints.push_back(imageCorners);
objectPoints.push_back(objectCorners);
}
};
void m(){
vector<string> filelist;
Size boardSize(6, 4);
for (int i = 1; i <= 20; i++) {
stringstream str;
str << "chessboards\\chessboard" <<std::setw(2) << std::setfill('0') << i << ".jpg";
cout << str.str() << endl;
filelist.push_back(str.str());
// image= cv::imread(str.str(), 0);
// cv::imshow("Image",image);
//
// cv::waitKey(100);
}
Mat image=imread(filelist[0]);
CameraCaliberator cameraCaliberator;
cameraCaliberator.addChessboardPoints(filelist,boardSize);
cameraCaliberator.calibrate(image.size());
Matuimage= cameraCaliberator.remap(image);
imshow(" ", uimage);
}
// Openthe video file
cv::VideoCapturecapture("bike.avi");
// checkif video successfully opened
if(!capture.isOpened())
return 1;
// Getthe frame rate
doublerate = capture.get(CV_CAP_PROP_FPS);
boolstop(false);
cv::Matframe; // current video frame
cv::namedWindow("ExtractedFrame");
// Delaybetween each frame in ms
//corresponds to video frame rate
int delay= 1000 / rate;
// forall frames in video
while(!stop) {
// read next frame if any
if (!capture.read(frame))
break;
cv::imshow("Extracted Frame", frame);
// introduce a delay
// or press key to stop
if (cv::waitKey(delay) >= 0)
stop = true;
}
// Closethe video file.
// Notrequired since called by destructor
capture.release();
class FeatureTracker : public FrameProcessor {
cv::Matgray; //current gray-level image
cv::Matgray_prev; //previous gray-level image
std::vector<cv::Point2f> points[2]; // tracked features from0->1
std::vector<cv::Point2f> initial; // initial position of tracked points
std::vector<cv::Point2f> features; // detected features
int max_count; // maximum number of features to detect
double qlevel; // qualitylevel for feature detection
double minDist; // minimumdistance between two feature points
std::vector<uchar> status; // status of tracked features
std::vector<float> err; // error in tracking
public:
FeatureTracker() : max_count(500), qlevel(0.01), minDist(10.) {}
// processing method
void process(cv:: Mat &frame, cv:: Mat &output) {
// convert to gray-level image
cv::cvtColor(frame, gray, CV_BGR2GRAY);
frame.copyTo(output);
// 1. if new feature points must be added
if(addNewPoints())
{
// detect feature points
detectFeaturePoints();
// add the detected features to the currently tracked features
points[0].insert(points[0].end(),features.begin(),features.end());
initial.insert(initial.end(),features.begin(),features.end());
}
// for first image of the sequence
if(gray_prev.empty())
gray.copyTo(gray_prev);
// 2. track features
cv::calcOpticalFlowPyrLK(gray_prev, gray, // 2 consecutive images
points[0], // input point position in first image
points[1], // output point postion in the second image
status, // tracking success
err); // tracking error
// 2. loop over the tracked points to reject the undesirables
int k=0;
for( int i= 0; i < points[1].size(); i++ ) {
// do we keep this point?
if (acceptTrackedPoint(i)) {
// keep this point in vector
initial[k]= initial[i];
points[1][k++] = points[1][i];
}
}
// eliminate unsuccesful points
points[1].resize(k);
initial.resize(k);
// 3. handle the accepted tracked points
handleTrackedPoints(frame, output);
// 4. current points and image become previous ones
std::swap(points[1], points[0]);
cv::swap(gray_prev, gray);
}
// feature point detection
void detectFeaturePoints() {
// detect the features
cv::goodFeaturesToTrack(gray, // the image
features, // the outputdetected features
max_count, // the maximumnumber of features
qlevel, // quality level
minDist); // min distancebetween two features
}
// determine if new points should be added
bool addNewPoints() {
// if too few points
return points[0].size()<=10;
}
// determine which tracked point should be accepted
bool acceptTrackedPoint(int i) {
return status[i] &&
// if point has moved
(abs(points[0][i].x-points[1][i].x)+
(abs(points[0][i].y-points[1][i].y))>2);
}
// handle the currently tracked points
void handleTrackedPoints(cv:: Mat &frame, cv:: Mat &output){
// for all tracked points
for(int i= 0; i < points[1].size(); i++ ) {
// draw line and circle
cv::line(output,initial[i], points[1][i], cv::Scalar(255,255,255));
cv::circle(output, points[1][i], 3, cv::Scalar(255,255,255),-1);
}
}
};
#endif