experimental trail and error changes throughout development.

all current experimentation with the code leads up to this point
for optical flow motion detection. the code as it stands will
input frames in pairs and then compare each pair of frames for
any significant changes in the optical flow distance between
points.

experiments have shown that this actual does work fairly well;
however there is significant amounts of CPU usage and video
encoding options are not flexible at all. the code still picks
up false positives but I have high confidence something that
can be adjusted through external parameters which I will
impliment in the future.
This commit is contained in:
Maurice ONeal 2022-06-11 08:43:19 -04:00
parent f721f45fc9
commit 8a17b58d25

View File

@ -19,6 +19,7 @@ struct shared_t
{ {
vector<Mat> buff; vector<Mat> buff;
vector<thread> writers; vector<thread> writers;
TermCriteria criteria;
string detectUrl; string detectUrl;
string recordUrl; string recordUrl;
string outDir; string outDir;
@ -27,6 +28,8 @@ struct shared_t
string secsStr; string secsStr;
bool wrRunning; bool wrRunning;
bool ffRunning; bool ffRunning;
int motion;
int gap;
int secs; int secs;
} sharedRes; } sharedRes;
@ -73,7 +76,7 @@ bool createDirTree(const string &full_path)
void vidCap(shared_t *share) void vidCap(shared_t *share)
{ {
if (share->buff.size() >= 30) if (share->buff.size() >= 100)
{ {
share->wrRunning = true; share->wrRunning = true;
@ -115,7 +118,7 @@ void vidCap(shared_t *share)
} }
} }
bool detectDiff(const Mat &prev, const Mat &next, shared_t *share) void detectDiff(Mat prev, Mat next, shared_t *share)
{ {
// optical flow calculations are used to detect motion. // optical flow calculations are used to detect motion.
// reference: https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html // reference: https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html
@ -123,38 +126,32 @@ bool detectDiff(const Mat &prev, const Mat &next, shared_t *share)
vector<uchar> status; vector<uchar> status;
vector<float> err; vector<float> err;
auto criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03); goodFeaturesToTrack(prev, p0, 50, 0.5, 3, Mat(), 3, false, 0.04);
calcOpticalFlowPyrLK(prev, next, p0, p1, status, err, Size(10, 10), 2, share->criteria);
// distance is basically 0.0578% of the total pixel area of the
// frames. this value is used later below.
auto distance = ((double) 0.0578 / (double) 100) * (prev.size().height * prev.size().width);
auto count = 0;
goodFeaturesToTrack(prev, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
calcOpticalFlowPyrLK(prev, next, p0, p1, status, err, Size(10, 10), 2, criteria);
for(uint i = 0; i < p0.size(); i++) for(uint i = 0; i < p0.size(); i++)
{ {
// select good points if (norm(p0[i] - p1[i]) > share->gap)
if(status[i] == 1)
{ {
if (norm(p0[i] - p1[i]) > distance) share->motion += 150;
{
// any points that moved 0.0578% or more of the total pixel break;
// area can be considered motion. }
return true; else if (share->motion != 0)
} {
share->motion -= 1;
} }
} }
return false;
} }
void timer(shared_t *share) void timer(shared_t *share)
{ {
sleep(share->secs); sleep(share->secs);
share->ffRunning = false; if (share->motion == 0)
{
share->ffRunning = false;
}
if (!share->wrRunning) if (!share->wrRunning)
{ {
@ -162,13 +159,30 @@ void timer(shared_t *share)
} }
} }
void addFramesToBuffer(const vector<Mat> &newFrames, shared_t *share)
{
for (auto &&frame : newFrames)
{
share->buff.push_back(frame);
}
}
Mat toGray(const Mat &src)
{
Mat ret;
cvtColor(src, ret, COLOR_BGR2GRAY);
return ret;
}
void moDetect(shared_t *share) void moDetect(shared_t *share)
{ {
auto dCap = VideoCapture(share->detectUrl, CAP_FFMPEG); auto dCap = VideoCapture(share->detectUrl, CAP_FFMPEG);
auto rCap = VideoCapture(share->recordUrl, CAP_FFMPEG); auto rCap = VideoCapture(share->recordUrl, CAP_FFMPEG);
auto mod = false;
Mat dPrevFrame, dNextFrame, dFrame, rFrame; vector<Mat> dFrames, rFrames;
Mat dFrame, rFrame;
while (share->ffRunning) while (share->ffRunning)
{ {
@ -187,24 +201,38 @@ void moDetect(shared_t *share)
{ {
rCap.open(share->recordUrl, CAP_FFMPEG); rCap.open(share->recordUrl, CAP_FFMPEG);
} }
else if (dPrevFrame.empty()) else if ((dFrames.size() < 2) || (rFrames.size() < 2))
{ {
cvtColor(dFrame, dPrevFrame, COLOR_BGR2GRAY); rFrames.push_back(rFrame.clone());
dFrames.push_back(toGray(dFrame));
} }
else else
{ {
cvtColor(dFrame, dNextFrame, COLOR_BGR2GRAY); if (share->gap == 0)
if (detectDiff(dPrevFrame, dNextFrame, share))
{ {
mod = true; // share->gap is used in detectDiff() to compare how far a
// point in the optical flow has moved. it is calculated by a
// certain percentage of the total pixel area of the frames.
share->buff.push_back(rFrame.clone()); // as of right now it is hard coded to 0.00579% of the total
// pixel area of the frames and only needs to be calculated
// once hence why share->gap == 0 is checked.
share->gap = ((double) 0.00579 / (double) 100) * (dFrame.size().height * dFrame.size().width);
} }
if (share->motion != 0)
{
addFramesToBuffer(rFrames, share);
}
detectDiff(dFrames[0], dFrames[1], share);
rFrames.clear();
dFrames.clear();
} }
} }
if (mod) if (share->motion != 0)
{ {
system(share->postMoCmd.c_str()); system(share->postMoCmd.c_str());
} }
@ -263,7 +291,10 @@ int main(int argc, char** argv)
} }
else else
{ {
sharedRes.criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03);
sharedRes.wrRunning = false; sharedRes.wrRunning = false;
sharedRes.motion = 0;
sharedRes.gap = 0;
while (true) while (true)
{ {