Major changes to application internals and efficiency increases

removed all threads from the application as there is no used for them at
at this time. instead, the application will now operate on a single
event loop and now directly utilize use ffmpeg to record video footage
instead of opencv's implementation.

old code pulled tons of frames the detection stream at full speed,
wasting a lot of cpu cycles. instead it will now pull frames in a steady
speed at the new detect_fps value. doing this significantly reduced cpu
usage and can potentially further reduce cpu usage for end users by
pulling the fps value lower then the default.
This commit is contained in:
Maurice ONeal 2022-07-28 10:30:07 -04:00
parent c054356541
commit a36d4e93c0
2 changed files with 187 additions and 247 deletions

View File

@ -29,7 +29,7 @@ parameters supported and descriptions of each parameter.
# line, start it with a '#' # line, start it with a '#'
# #
recording_stream = rtsp://1.2.3.4:554/h264 recording_stream = rtsp://1.2.3.4:554/h264
# this is the url to the main stream of the IP camera that will be used to # this is the url to the main stream of the IP camera that will be used
# to record footage if it contains motion. # to record footage if it contains motion.
# #
detection_stream = rtsp://1.2.3.4:554/h264cif detection_stream = rtsp://1.2.3.4:554/h264cif
@ -43,52 +43,61 @@ output_dir = /path/to/footage/directory
# a sub-folder for the date if it needs to and then stores the video file # a sub-folder for the date if it needs to and then stores the video file
# using the time. # using the time.
# #
diff_threshold = 210 diff_verbose = N
# this is a boolean Y or N option that turns on/off the option to output
# the pixel diff values that the application is reading from the camera in
# real time out into stdout. this is useful for determining the best value
# to use in pix_threshold, color_threshold or consec_threshold.
#
pix_threshold = 8
# this application detects motion by loading back to back frames from the # this application detects motion by loading back to back frames from the
# detection stream, converts them to gray scale and then compares the gray # detection stream and then compares the color spectrum levels of each
# levels of each pixel of those frames. if the gray levels are significantly # pixel of those frames. if the levels are significantly different, that
# different, that will be considered motion. in an ideal world, the gray # will maybe considered motion. this threshold indicates how many pixels
# differences of each pixel should be 0 if there is no motion but cameras # in the image needs to be different before triggering a potential motion
# are not perfect so non-zero gray diffs on no motion footage can be common # event.
# so that's where this threshold value comes in. 210 has been a decent #
# value to use but this will entirely depend on the camera you are using. color_threshold = 190
# the color levels in each pixel of the detection stream can range from
# 0-255. in an ideal world the color differences in between frames should
# be 0 if there is no motion but must cameras can't do this. the threshold
# value here is used to filter if the pixels are truly different or if its
# seeing color differences of small objects that are of no interest.
#
consec_threshold = 10
# this setting is used to tell the application how many consecutive frames
# need to have pixel differences over the pix_threshold before triggering
# a motion event and then record to storage.
# #
duration = 60 duration = 60
# this sets the internal timer to check for any cached frames to record to # this sets the internal timer used to reset to the detection loop and
# permanent storage. warning: setting this value too high could cause the # then call post_cmd if it is defined. note: this time can be extended if
# application to use too much memory. # motion was detected. this will also reload the config file so changes
# to the settings will be applied without restarting the application.
# #
post_cmd = move_the_ptz_camera.py post_cmd = move_the_ptz_camera.py
# this an optional command to run after the internal timer duration has # this an optional command to run after the internal timer duration has
# elapsed. one great use for this is to move a ptz camera to the next # elapsed. one great use for this is to move a ptz camera to the next
# position of it's patrol pattern. note: the command is not called if # position of it's patrol pattern. note: the call to this command can be
# motion was detected. # delayed if motion was detected.
# #
pixel_size = 3 detect_fps = 20
# this is the pixel size of the detected object or movement. this can # this is how many frames to read from the detection stream per second.
# prevent false positives due small moves in grass/plants or insects. # setting this any higher the camera's actual fps will just waste cpu
# cycles but setting it too low makes detecting motion inaccurate.
# #
secs_post_motion = 3 secs_post_motion = 10
# this is the minimum amount of seconds to capture after motion was # this is the minimum amount of seconds to capture after motion was
# detected. # detected.
# #
recording_fps = 25
# recording fps to use when recording footage to storage.
#
section_size = 100
# detection frames are read in y axis sections and then runs gray level
# comparisons in a separate thread for each section. this value is used
# to determine how wide each section will be and how many threads will be
# needed to process each section. example: a 380x240 frame with a section
# size of 100 will process 100x240, 100x240, 100x240 and 80x240 images
# in 4 threads.
``` ```
### Build Setup ### ### Build Setup ###
This application is currently only compatible with a Linux based operating This application is currently only compatible with a Linux based operating
systems that are capable of building and installing the opencv API from source. systems that are capable of building and installing the opencv API from source.
instructions on how to install opencv can be found [here](https://docs.opencv.org/4.x/df/d65/tutorial_table_of_content_introduction.html).
[opencv](https://docs.opencv.org/4.x/df/d65/tutorial_table_of_content_introduction.html).
``` ```
cd /path/to/Motion/Watch/source cd /path/to/Motion/Watch/source
mkdir ./build mkdir ./build

View File

@ -1,8 +1,6 @@
#include <iostream> #include <iostream>
#include <fstream> #include <fstream>
#include <thread>
#include <string> #include <string>
#include <vector>
#include <unistd.h> #include <unistd.h>
#include <time.h> #include <time.h>
#include <stdlib.h> #include <stdlib.h>
@ -10,8 +8,6 @@
#include <errno.h> #include <errno.h>
#include <opencv4/opencv2/opencv.hpp> #include <opencv4/opencv2/opencv.hpp>
#include <opencv4/opencv2/video/tracking.hpp>
#include <opencv4/opencv2/core/ocl.hpp>
#include <opencv4/opencv2/videoio.hpp> #include <opencv4/opencv2/videoio.hpp>
using namespace cv; using namespace cv;
@ -19,22 +15,21 @@ using namespace std;
struct shared_t struct shared_t
{ {
vector<Mat> buff; VideoCapture camera;
Mat baseImg;
string detectUrl; string detectUrl;
string recordUrl; string recordUrl;
string diffVerb;
string outDir; string outDir;
string postCmd; string postCmd;
string conf; string conf;
bool wrRunning; int detectFps;
bool ffRunning; int colorThresh;
bool motion;
int fps;
int secs; int secs;
int thrWithMotion; int consec;
int thresh; int consecThresh;
int pixSize; int pixThresh;
int postMoIncr; int postMoIncr;
int sectionSize;
int retCode; int retCode;
} sharedRes; } sharedRes;
@ -79,12 +74,8 @@ bool createDirTree(const string &full_path)
return ret; return ret;
} }
void vidCap(shared_t *share) string genDstFile(const string &dirOut, const string &ext)
{ {
if (!share->buff.empty())
{
share->wrRunning = true;
time_t rawtime; time_t rawtime;
time(&rawtime); time(&rawtime);
@ -95,136 +86,23 @@ void vidCap(shared_t *share)
char fileName[20]; char fileName[20];
strftime(dirName, 20, "%Y%m%d", timeinfo); strftime(dirName, 20, "%Y%m%d", timeinfo);
strftime(fileName, 20, "%H%M%S.avi", timeinfo); strftime(fileName, 20, "%H%M%S", timeinfo);
createDirTree(cleanDir(share->outDir) + string("/") + string(dirName)); createDirTree(cleanDir(dirOut) + string("/") + string(dirName));
auto dstPath = cleanDir(share->outDir) + string("/") + string(dirName) + string("/") + string(fileName); return cleanDir(dirOut) + string("/") + string(dirName) + string("/") + string(fileName) + ext;
auto codec = VideoWriter::fourcc('M', 'J', 'P', 'G'); }
VideoWriter writer; void wrOut(shared_t *share)
writer.open(dstPath, codec, (double) share->fps, share->buff[0].size(), true);
if (!writer.isOpened())
{ {
cerr << "could not open the output video file for writing: " << dstPath; share->baseImg.release();
}
else
{
for (; !share->buff.empty(); share->buff.erase(share->buff.begin()))
{
writer.write(share->buff[0]);
}
}
share->wrRunning = false; share->consec = 0;
}
}
uchar valDiff(uchar valA, uchar valB) auto dstPath = genDstFile(share->outDir, ".mp4");
{ auto cmd = "ffmpeg -i " + share->recordUrl + " -y -vcodec copy -t " + to_string(share->postMoIncr) + " " + dstPath;
auto diff = 0;
if (valA > valB) diff = valA - valB; system(cmd.c_str());
if (valA < valB) diff = valB - valA;
return diff;
}
void secDiff(Mat imgA, Mat imgB, int rows, int cols, int rowOffs, int colOffs, shared_t *share)
{
auto xCnt = 0;
auto yCnt = 0;
for (auto y = rowOffs; (y < rows) && share->thrWithMotion == 0; y++)
{
for (auto x = colOffs; (x < cols) && share->thrWithMotion == 0; x++)
{
auto pixA = imgA.at<uchar>(Point(x, y));
auto pixB = imgB.at<uchar>(Point(x, y));
if (valDiff(pixA, pixB) > share->thresh)
{
xCnt += 1;
if (xCnt >= share->pixSize) break;
}
else
{
xCnt = 0;
}
}
if (xCnt >= share->pixSize)
{
yCnt += 1;
if (yCnt >= share->pixSize)
{
share->thrWithMotion += 1;
}
}
else
{
xCnt = 0;
yCnt = 0;
}
}
}
bool grayDiff(Mat imgA, Mat imgB, shared_t *share)
{
share->thrWithMotion = 0;
auto colBuff = share->sectionSize;
auto allRows = imgA.rows;
auto allCols = imgA.cols;
auto colsOffs = 0;
vector<thread> thrs;
while (allCols != 0)
{
if (colBuff > allCols)
{
colBuff -= (colBuff - allCols);
}
thrs.push_back(thread(secDiff, imgA, imgB, allRows, colBuff, 0, colsOffs, share));
colsOffs += colBuff;
allCols -= colBuff;
}
for (auto &&thr : thrs)
{
thr.join();
}
return share->thrWithMotion != 0;
}
void loopTimer(shared_t *share)
{
sleep(share->secs);
if (!share->motion)
{
share->ffRunning = false;
}
if (!share->wrRunning)
{
new thread(vidCap, share);
}
}
void motionTimer(shared_t *share)
{
sleep(share->postMoIncr);
share->motion = false;
} }
Mat toGray(const Mat &src) Mat toGray(const Mat &src)
@ -236,60 +114,76 @@ Mat toGray(const Mat &src)
return ret; return ret;
} }
void moDetect(shared_t *share) bool pixDiff(const uchar &pixA, const uchar &pixB, shared_t *share)
{ {
auto dCap = VideoCapture(share->detectUrl, CAP_FFMPEG); auto diff = 0;
auto rCap = VideoCapture(share->recordUrl, CAP_FFMPEG);
Mat dFrame, rFrame, dPrev, rPrev; if (pixA > pixB) diff = pixA - pixB;
if (pixB > pixA) diff = pixB - pixA;
while (share->ffRunning) if (diff < share->colorThresh)
{ {
if (!share->motion) dCap >> dFrame; diff = 0;
rCap >> rFrame;
if (dFrame.empty() && (!share->motion))
{
// broken frames returned from the cameras i've tested this with would cause
// the entire capture connection to drop, hence why this bit of code is here
// to detect empty frames (signs of a dropped connection) and attempt
// re-connect to the cammera.
dCap.open(share->detectUrl, CAP_FFMPEG);
} }
else if (rFrame.empty())
{
rCap.open(share->recordUrl, CAP_FFMPEG);
}
else if (share->motion)
{
share->buff.push_back(rFrame.clone());
}
else if (dPrev.empty() || rPrev.empty())
{
dPrev = toGray(dFrame);
rPrev = rFrame.clone();
}
else if (grayDiff(dPrev, toGray(dFrame), share))
{
share->buff.push_back(rPrev);
share->buff.push_back(rFrame.clone());
share->motion = true; return diff != 0;
}
thread(motionTimer, share); int secDiff(Mat imgA, Mat imgB, int rows, int cols, int rowOffs, int colOffs, shared_t *share)
{
auto pnts = 0;
rPrev.release(); for (auto y = rowOffs; y < rows; y++)
dPrev.release(); {
for (auto x = colOffs; x < cols; x++)
{
auto pixA = imgA.at<uchar>(Point(x, y));
auto pixB = imgB.at<uchar>(Point(x, y));
if (pixDiff(pixA, pixB, share))
{
pnts += 1;
}
}
}
return pnts;
}
bool imgDiff(Mat curImg, shared_t *share)
{
if (share->baseImg.empty())
{
share->baseImg = toGray(curImg);
return false;
} }
else else
{ {
rPrev.release(); curImg = toGray(curImg);
dPrev.release();
} auto pnts = secDiff(share->baseImg, curImg, curImg.rows, curImg.cols, 0, 0, share);
if (share->diffVerb == "Y")
{
cout << "diff: " << pnts << endl;
} }
system(share->postCmd.c_str()); share->baseImg = curImg.clone();
if (pnts >= share->pixThresh)
{
share->consec += 1;
return share->consec >= share->consecThresh;
}
else
{
share->consec = 0;
return false;
}
}
} }
string parseForParam(const string &arg, int argc, char** argv, bool argOnly) string parseForParam(const string &arg, int argc, char** argv, bool argOnly)
@ -354,6 +248,21 @@ bool rdConf(shared_t *share)
{ {
string line; string line;
share->recordUrl.clear();
share->detectUrl.clear();
share->outDir.clear();
share->postCmd.clear();
share->diffVerb.clear();
share->baseImg.release();
share->pixThresh = 8;
share->consecThresh = 10;
share->colorThresh = 60;
share->secs = 60;
share->detectFps = 20;
share->postMoIncr = 5;
share->consec = 0;
do do
{ {
getline(varFile, line); getline(varFile, line);
@ -363,13 +272,14 @@ bool rdConf(shared_t *share)
rdLine("recording_stream = ", line, &share->recordUrl); rdLine("recording_stream = ", line, &share->recordUrl);
rdLine("detection_stream = ", line, &share->detectUrl); rdLine("detection_stream = ", line, &share->detectUrl);
rdLine("output_dir = ", line, &share->outDir); rdLine("output_dir = ", line, &share->outDir);
rdLine("diff_verbose = ", line, &share->diffVerb);
rdLine("post_cmd = ", line, &share->postCmd); rdLine("post_cmd = ", line, &share->postCmd);
rdLine("diff_threshold = ", line, &share->thresh); rdLine("pix_threshold = ", line, &share->pixThresh);
rdLine("color_threshold = ", line, &share->colorThresh);
rdLine("consec_threshold = ", line, &share->consecThresh);
rdLine("duration = ", line, &share->secs); rdLine("duration = ", line, &share->secs);
rdLine("pixel_size = ", line, &share->pixSize);
rdLine("secs_post_motion = ", line, &share->postMoIncr); rdLine("secs_post_motion = ", line, &share->postMoIncr);
rdLine("section_size = ", line, &share->sectionSize); rdLine("detect_fps = ", line, &share->detectFps);
rdLine("recording_fps = ", line, &share->fps);
} }
} while(!line.empty()); } while(!line.empty());
@ -384,6 +294,43 @@ bool rdConf(shared_t *share)
return ret; return ret;
} }
void moDetect(shared_t *share)
{
while (rdConf(share))
{
for (auto i = 0; i < (share->secs * share->detectFps); ++i)
{
Mat frame;
if (!share->camera.isOpened())
{
share->camera.open(share->detectUrl, CAP_FFMPEG);
}
share->camera >> frame;
if (frame.empty())
{
// broken frames returned from the cameras i've tested this with would cause
// the entire capture connection to drop, hence why this bit of code is here
// to detect empty frames (signs of a dropped connection) and attempt
// re-connect to the cammera.
share->camera.open(share->detectUrl, CAP_FFMPEG);
}
else if (imgDiff(frame, share))
{
wrOut(share); i = 0;
}
else
{
usleep(1000000 / share->detectFps);
}
}
system(share->postCmd.c_str());
}
}
void showHelp() void showHelp()
{ {
cout << "Motion Watch v1.0" << endl << endl; cout << "Motion Watch v1.0" << endl << endl;
@ -406,23 +353,7 @@ int main(int argc, char** argv)
} }
else else
{ {
sharedRes.retCode = 0; moDetect(&sharedRes);
sharedRes.motion = false;
sharedRes.wrRunning = false;
while (rdConf(&sharedRes))
{
sharedRes.ffRunning = true;
thread th1(loopTimer, &sharedRes);
thread th2(moDetect, &sharedRes);
// Wait for the threads to finish
// Wait for thread t1 to finish
th1.join();
// Wait for thread t2 to finish
th2.join();
}
return sharedRes.retCode; return sharedRes.retCode;
} }