光流法运动目标检测
视频讲解如下:
当前系列所有demo下载地址:
https://github.com/GaoRenBao/OpenCv4-Demo
不同编程语言对应的OpenCv版本以及开发环境信息如下:
语言 | OpenCv版本 | IDE |
C# | OpenCvSharp4.4.8.0.20230708 | Visual Studio 2022 |
C++ | OpenCv-4.5.5-vc14_vc15 | Visual Studio 2022 |
Python | OpenCv-Python (4.6.0.66) | PyCharm Community Edition 2022.1.3 |
C#版本
C#版本需要安装“OpenCvSharp4”、“OpenCvSharp4.runtime.win”两个库才行。不然会报错。
如果需要使用“ BitmapConverter.ToBitmap”操作,则需要追加安装“OpenCvSharp4.Extensions”库。
注意:OpenCvSharp4版本不稳定,某些版本视频老是打开失败。。。
C#版本的修改其实也参考了网上的一些资料,而且关于角点检测函数GoodFeaturesToTrack,在官方demo里面竟然找不到例子,我有点怀疑是不是我找的地方不对。。。
光流法参考例子:https://blog.csdn.net/salt_bean_curd/article/details/107215187
还有个坑爹的OpenCVSharp教程,竟然需要收费,我不推荐大家花这个冤枉钱啊,有些东西网上找找还是能找到资料的。OpenCVSharp和OpenCv的区别也就是接口的调用方式有些不一样,大多数的函数名也基本保持一致的。
https://blog.csdn.net/stq054188/category_10800236.html
不说了,我们先来看下演示效果,如下:
C++的设置参数放到C#版本里面,效果好像没有C++效果来的明显,可能是一些参数的设置导致的,我暂时也没花太多时间去研究这个参数,毕竟调试OpenCVSharp也花了我不少时间。

代码如下:
using OpenCvSharp;
using System;
namespace demo
{
internal class Program
{
// 当前图片
public static Mat gray = new Mat();
// 预测图片
public static Mat gray_prev = new Mat();
// point1为特征点的原来位置,point2为特征点的新位置
public static Point2f[] points1;
public static Point2f[] points2;
// 初始化跟踪点的位置
public static Point2f[] initial;
// 检测的最大特征数
public static int maxCount = 500;
// 特征检测的等级
public static double qLevel = 0.01;
// 两特征点之间的最小距离
public static double minDist = 10.0;
// 跟踪特征的状态,特征的流发现为1,否则为0
public static byte[] status;
public static float[] err;
static void Main(string[] args)
{
var capture = new VideoCapture("../../../images/lol.avi");
// 计算帧率
int sleepTime = (int)Math.Round(1000 / capture.Fps);
// 声明实例 Mat类
Mat image = new Mat();
// 进入读取视频每镇的循环
while (true)
{
capture.Read(image);
//判断是否还有没有视频图像
if (image.Empty())
break;
Mat result = tracking(image);
Cv2.ImShow("效果图", result);
// 在pictureBox1中显示效果图
// pictureBox1.Image = BitmapConverter.ToBitmap(result);
Cv2.WaitKey(sleepTime);
}
}
//--------------------------------------
// function: tracking
// brief: 跟踪
// parameter: frame 输入的视频帧
// output 有跟踪结果的视频帧
// return: void
//--------------------------------------
public static Mat tracking(Mat frame)
{
Mat output = new Mat();
Cv2.CvtColor(frame, gray, ColorConversionCodes.BGR2GRAY);
frame.CopyTo(output);
// 添加特征点
if (addNewPoints())
{
// 只用这个好像也没啥区别
points1 = Cv2.GoodFeaturesToTrack(gray, maxCount, qLevel, minDist, new Mat(), 10, true, 0.04);
initial = points1;
//// 像素级检测特征点
//Point2f[] po = Cv2.GoodFeaturesToTrack(gray, maxCount, qLevel, minDist, new Mat(), 3, true, 0.04);
//// 亚像素级检测
//points1 = Cv2.CornerSubPix(gray, po, new Size(5, 5), new Size(-1, -1), new TermCriteria());
}
if (gray_prev.Empty())
{
gray.CopyTo(gray_prev);
}
//光流金字塔,输出图二的特征点
points2 = new Point2f[points1.Length];
Cv2.CalcOpticalFlowPyrLK(gray_prev, gray, points1, ref points2, out status, out err);
// 去掉一些不好的特征点
int k = 0;
for (int i = 0; i < points2.Length; i++)
{
if (acceptTrackedPoint(i))
{
initial[k] = initial[i];
points2[k++] = points2[i];
}
}
// 显示特征点和运动轨迹
for (int i = 0; i < k; i++)
{
Cv2.Line(output, (Point)initial[i], (Point)points2[i], new Scalar(0, 0, 255));
Cv2.Circle(output, (Point)points2[i], 3, new Scalar(0, 255, 0), -1);
}
// 把当前跟踪结果作为下一此参考
Swap(ref points2, ref points1);
Swap(ref gray_prev, ref gray);
return output;
}
public static void Swap<T>(ref T a, ref T b)
{
T t = a;
a = b;
b = t;
}
//-------------------------------------
// function: addNewPoints
// brief: 检测新点是否应该被添加
// parameter:
// return: 是否被添加标志
//-------------------------------------
public static bool addNewPoints()
{
if (points1 == null) return true;
// 这个实际上是限制了点数,最好别开
//return points1.Length <= 10;
//System.Diagnostics.Debug.WriteLine(points1.Length);
return true;
}
//--------------------------------------
// function: acceptTrackedPoint
// brief: 决定哪些跟踪点被接受
// parameter:
// return:
//-------------------------------------
public static bool acceptTrackedPoint(int i)
{
return status[i] == 1 && ((Math.Abs(points1[i].X - points2[i].X) + Math.Abs(points1[i].Y - points2[i].Y)) > 5);
}
}
}
C++版本
C++版本是毛星云的版本简化来的。
#include <opencv2/video/video.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/core/core.hpp>
#include <iostream>
#include <cstdio>
using namespace std;
using namespace cv;
void tracking(Mat& frame, Mat& output);
bool addNewPoints();
bool acceptTrackedPoint(int i);
string window_name = "optical flow tracking";
Mat gray; // 当前图片
Mat gray_prev; // 预测图片
vector<Point2f> points[2]; // point0为特征点的原来位置,point1为特征点的新位置
vector<Point2f> initial; // 初始化跟踪点的位置
vector<Point2f> features; // 检测的特征
int maxCount = 500; // 检测的最大特征数
double qLevel = 0.01; // 特征检测的等级
double minDist = 10.0; // 两特征点之间的最小距离
vector<uchar> status; // 跟踪特征的状态,特征的流发现为1,否则为0
vector<float> err;
int main()
{
Mat frame;
Mat result;
VideoCapture capture("../images/lol.avi");
if (capture.isOpened()) // 摄像头读取文件开关
{
capture >> frame;
imshow(window_name, frame);
waitKey(0);
while (true)
{
capture >> frame;
if (frame.empty()) break;
tracking(frame, result);
imshow(window_name, result);
if ((char)waitKey(50) == 27)
{
break;
}
}
}
return 0;
}
//--------------------------------------
// function: tracking
// brief: 跟踪
// parameter: frame 输入的视频帧
// output 有跟踪结果的视频帧
// return: void
//--------------------------------------
void tracking(Mat& frame, Mat& output)
{
//此句代码的OpenCV3版为:
cvtColor(frame, gray, COLOR_BGR2GRAY);
//此句代码的OpenCV2版为:
//cvtColor(frame, gray, CV_BGR2GRAY);
frame.copyTo(output);
// 添加特征点
if (addNewPoints())
{
goodFeaturesToTrack(gray, features, maxCount, qLevel, minDist);
points[0].insert(points[0].end(), features.begin(), features.end());
initial.insert(initial.end(), features.begin(), features.end());
}
if (gray_prev.empty())
{
gray.copyTo(gray_prev);
}
// l-k光流法运动估计
calcOpticalFlowPyrLK(gray_prev, gray, points[0], points[1], status, err);
// 去掉一些不好的特征点
int k = 0;
for (size_t i = 0; i < points[1].size(); i++)
{
if (acceptTrackedPoint(i))
{
initial[k] = initial[i];
points[1][k++] = points[1][i];
}
}
points[1].resize(k);
initial.resize(k);
// 显示特征点和运动轨迹
for (size_t i = 0; i < points[1].size(); i++)
{
line(output, initial[i], points[1][i], Scalar(0, 0, 255));
circle(output, points[1][i], 3, Scalar(0, 255, 0), -1);
}
// 把当前跟踪结果作为下一此参考
swap(points[1], points[0]);
swap(gray_prev, gray);
}
//-------------------------------------
// function: addNewPoints
// brief: 检测新点是否应该被添加
// parameter:
// return: 是否被添加标志
//-------------------------------------
bool addNewPoints()
{
return points[0].size() <= 10;
}
//--------------------------------------
// function: acceptTrackedPoint
// brief: 决定哪些跟踪点被接受
// parameter:
// return:
//-------------------------------------
bool acceptTrackedPoint(int i)
{
return status[i] && ((abs(points[0][i].x - points[1][i].x) + abs(points[0][i].y - points[1][i].y)) > 2);
}
Python版本
这个是通过毛星云的C++版本转换过来的,代码如下:
import cv2
gray_prev = None
points1 = None
points2 = None
st = None
def acceptTrackedPoint(a, b, c):
return (c == 1) and ((abs(a[0][0] - b[0][0]) - abs(a[0][1] - b[0][1])) > 2)
def swap(a, b):
return b, a
def tracking(frame):
global gray_prev, points1, points2, st
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
output = frame.copy()
# 添加特征点
points1 = cv2.goodFeaturesToTrack(gray, 500, 0.01, 2)
initial = points1;
if gray_prev is None:
gray_prev = gray.copy()
# 光流金字塔,输出图二的特征点
# lk_params = dict(winSize=(15, 15),maxLevel=2,criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# points2, st, err = cv2.calcOpticalFlowPyrLK(gray_prev, gray, points1, None, **lk_params)
# 单独用这个也可以,感觉没多大差异
points2, st, err = cv2.calcOpticalFlowPyrLK(gray_prev, gray, points1, None)
# 去掉一些不好的特征点
k = 0
for i in range(0, points2.size):
if i >= st.size:
break;
if acceptTrackedPoint(initial[i], points2[i], st[i]) == True:
initial[k] = initial[i]
points2[k] = points2[i]
k = k + 1
# 显示特征点和运动轨迹
# 选择good points
good_new = initial[st == 1]
good_old = points2[st == 1]
# 绘制跟踪框
for i, (new, old) in enumerate(zip(good_new, good_old)):
if i >= k:
break
a, b = new.ravel()
c, d = old.ravel()
output = cv2.line(output, (int(a), int(b)), (int(c), int(d)), (0, 0, 255), 1)
output = cv2.circle(output, (int(c), int(d)), 3, (0, 255, 0), -1)
# 把当前跟踪结果作为下一此参考
points2, points1 = swap(points2, points1)
gray_prev, gray = swap(gray_prev, gray)
return output;
if __name__ == "__main__":
video = cv2.VideoCapture('../images/lol.avi')
fps = video.get(cv2.CAP_PROP_FPS)
success = True
while success:
# 读帧
success, frame = video.read()
if success == False:
break
result = tracking(frame)
cv2.imshow('result', result) # 显示
cv2.waitKey(int(1000 / int(fps))) # 设置延迟时间
video.release()
PDF中还提供了一种算法,源码参考原文:
OpenCV-Python-Tutorial-中文版.pdf (P232)
演示效果如下:

代码如下:
import numpy as np
import cv2
cap = cv2.VideoCapture('../images/slow.flv')
# cap = cv2.VideoCapture(0)
# params for ShiTomasi corner detection
feature_params = dict(maxCorners=100,
qualityLevel=0.3,
minDistance=7,
blockSize=7)
# 光流法参数
# maxLevel 为使用的图像金字塔层数
lk_params = dict(winSize=(15, 15),
maxLevel=2,
criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.03))
# 创建随机生成的颜色
color = np.random.randint(0, 255, (100, 3))
# 取出视频的第一帧
ret, old_frame = cap.read()
# 灰度化
old_gray = cv2.cvtColor(old_frame, cv2.COLOR_BGR2GRAY)
p0 = cv2.goodFeaturesToTrack(old_gray, mask=None, **feature_params)
# 为绘制创建掩码图片
mask = np.zeros_like(old_frame)
while True:
ret, frame = cap.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 计算光流以获取点的新位置
p1, st, err = cv2.calcOpticalFlowPyrLK(old_gray, frame_gray, p0, None, **lk_params)
# 选择good points
good_new = p1[st == 1]
good_old = p0[st == 1]
# 选择good points
for i, (new, old) in enumerate(zip(good_new, good_old)):
a, b = new.ravel()
c, d = old.ravel()
mask = cv2.line(mask, (int(a), int(b)), (int(c), int(d)), color[i].tolist(), 2)
frame = cv2.circle(frame, (int(a), int(b)), 5, color[i].tolist(), -1)
img = cv2.add(frame, mask)
cv2.imshow('frame', img)
k = cv2.waitKey(30) #& 0xff
if k == 27:
break
# Now update the previous frame and previous points
old_gray = frame_gray.copy()
p0 = good_new.reshape(-1, 1, 2)
cv2.destroyAllWindows()
cap.release()