From 8a17b58d252f7ba66b9b89dbd50b679b93831d9e Mon Sep 17 00:00:00 2001 From: Maurice ONeal Date: Sat, 11 Jun 2022 08:43:19 -0400 Subject: [PATCH] experimental trail and error changes throughout development. all current experimentation with the code leads up to this point for optical flow motion detection. the code as it stands will input frames in pairs and then compare each pair of frames for any significant changes in the optical flow distance between points. experiments have shown that this actual does work fairly well; however there is significant amounts of CPU usage and video encoding options are not flexible at all. the code still picks up false positives but I have high confidence something that can be adjusted through external parameters which I will impliment in the future. --- src/main.cpp | 95 ++++++++++++++++++++++++++++++++++------------------ 1 file changed, 63 insertions(+), 32 deletions(-) diff --git a/src/main.cpp b/src/main.cpp index 8505d51..3e387b7 100755 --- a/src/main.cpp +++ b/src/main.cpp @@ -19,6 +19,7 @@ struct shared_t { vector buff; vector writers; + TermCriteria criteria; string detectUrl; string recordUrl; string outDir; @@ -27,6 +28,8 @@ struct shared_t string secsStr; bool wrRunning; bool ffRunning; + int motion; + int gap; int secs; } sharedRes; @@ -73,7 +76,7 @@ bool createDirTree(const string &full_path) void vidCap(shared_t *share) { - if (share->buff.size() >= 30) + if (share->buff.size() >= 100) { share->wrRunning = true; @@ -115,7 +118,7 @@ void vidCap(shared_t *share) } } -bool detectDiff(const Mat &prev, const Mat &next, shared_t *share) +void detectDiff(Mat prev, Mat next, shared_t *share) { // optical flow calculations are used to detect motion. // reference: https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html @@ -123,38 +126,32 @@ bool detectDiff(const Mat &prev, const Mat &next, shared_t *share) vector status; vector err; - auto criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03); - - // distance is basically 0.0578% of the total pixel area of the - // frames. this value is used later below. - auto distance = ((double) 0.0578 / (double) 100) * (prev.size().height * prev.size().width); - auto count = 0; - - goodFeaturesToTrack(prev, p0, 100, 0.3, 7, Mat(), 7, false, 0.04); - calcOpticalFlowPyrLK(prev, next, p0, p1, status, err, Size(10, 10), 2, criteria); + goodFeaturesToTrack(prev, p0, 50, 0.5, 3, Mat(), 3, false, 0.04); + calcOpticalFlowPyrLK(prev, next, p0, p1, status, err, Size(10, 10), 2, share->criteria); for(uint i = 0; i < p0.size(); i++) { - // select good points - if(status[i] == 1) + if (norm(p0[i] - p1[i]) > share->gap) { - if (norm(p0[i] - p1[i]) > distance) - { - // any points that moved 0.0578% or more of the total pixel - // area can be considered motion. - return true; - } + share->motion += 150; + + break; + } + else if (share->motion != 0) + { + share->motion -= 1; } } - - return false; } void timer(shared_t *share) { sleep(share->secs); - share->ffRunning = false; + if (share->motion == 0) + { + share->ffRunning = false; + } if (!share->wrRunning) { @@ -162,13 +159,30 @@ void timer(shared_t *share) } } +void addFramesToBuffer(const vector &newFrames, shared_t *share) +{ + for (auto &&frame : newFrames) + { + share->buff.push_back(frame); + } +} + +Mat toGray(const Mat &src) +{ + Mat ret; + + cvtColor(src, ret, COLOR_BGR2GRAY); + + return ret; +} + void moDetect(shared_t *share) { auto dCap = VideoCapture(share->detectUrl, CAP_FFMPEG); auto rCap = VideoCapture(share->recordUrl, CAP_FFMPEG); - auto mod = false; - Mat dPrevFrame, dNextFrame, dFrame, rFrame; + vector dFrames, rFrames; + Mat dFrame, rFrame; while (share->ffRunning) { @@ -187,24 +201,38 @@ void moDetect(shared_t *share) { rCap.open(share->recordUrl, CAP_FFMPEG); } - else if (dPrevFrame.empty()) + else if ((dFrames.size() < 2) || (rFrames.size() < 2)) { - cvtColor(dFrame, dPrevFrame, COLOR_BGR2GRAY); + rFrames.push_back(rFrame.clone()); + dFrames.push_back(toGray(dFrame)); } else { - cvtColor(dFrame, dNextFrame, COLOR_BGR2GRAY); - - if (detectDiff(dPrevFrame, dNextFrame, share)) + if (share->gap == 0) { - mod = true; + // share->gap is used in detectDiff() to compare how far a + // point in the optical flow has moved. it is calculated by a + // certain percentage of the total pixel area of the frames. - share->buff.push_back(rFrame.clone()); + // as of right now it is hard coded to 0.00579% of the total + // pixel area of the frames and only needs to be calculated + // once hence why share->gap == 0 is checked. + share->gap = ((double) 0.00579 / (double) 100) * (dFrame.size().height * dFrame.size().width); } + + if (share->motion != 0) + { + addFramesToBuffer(rFrames, share); + } + + detectDiff(dFrames[0], dFrames[1], share); + + rFrames.clear(); + dFrames.clear(); } } - if (mod) + if (share->motion != 0) { system(share->postMoCmd.c_str()); } @@ -263,7 +291,10 @@ int main(int argc, char** argv) } else { + sharedRes.criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03); sharedRes.wrRunning = false; + sharedRes.motion = 0; + sharedRes.gap = 0; while (true) {