Major changes to application internals and efficiency increases

removed all threads from the application as there is no used for them at
at this time. instead, the application will now operate on a single
event loop and now directly utilize use ffmpeg to record video footage
instead of opencv's implementation.

old code pulled tons of frames the detection stream at full speed,
wasting a lot of cpu cycles. instead it will now pull frames in a steady
speed at the new detect_fps value. doing this significantly reduced cpu
usage and can potentially further reduce cpu usage for end users by
pulling the fps value lower then the default.
This commit is contained in:
Maurice ONeal 2022-07-28 10:30:07 -04:00
parent c054356541
commit a36d4e93c0
2 changed files with 187 additions and 247 deletions

View File

@ -29,7 +29,7 @@ parameters supported and descriptions of each parameter.
# line, start it with a '#'
#
recording_stream = rtsp://1.2.3.4:554/h264
# this is the url to the main stream of the IP camera that will be used to
# this is the url to the main stream of the IP camera that will be used
# to record footage if it contains motion.
#
detection_stream = rtsp://1.2.3.4:554/h264cif
@ -43,52 +43,61 @@ output_dir = /path/to/footage/directory
# a sub-folder for the date if it needs to and then stores the video file
# using the time.
#
diff_threshold = 210
diff_verbose = N
# this is a boolean Y or N option that turns on/off the option to output
# the pixel diff values that the application is reading from the camera in
# real time out into stdout. this is useful for determining the best value
# to use in pix_threshold, color_threshold or consec_threshold.
#
pix_threshold = 8
# this application detects motion by loading back to back frames from the
# detection stream, converts them to gray scale and then compares the gray
# levels of each pixel of those frames. if the gray levels are significantly
# different, that will be considered motion. in an ideal world, the gray
# differences of each pixel should be 0 if there is no motion but cameras
# are not perfect so non-zero gray diffs on no motion footage can be common
# so that's where this threshold value comes in. 210 has been a decent
# value to use but this will entirely depend on the camera you are using.
# detection stream and then compares the color spectrum levels of each
# pixel of those frames. if the levels are significantly different, that
# will maybe considered motion. this threshold indicates how many pixels
# in the image needs to be different before triggering a potential motion
# event.
#
color_threshold = 190
# the color levels in each pixel of the detection stream can range from
# 0-255. in an ideal world the color differences in between frames should
# be 0 if there is no motion but must cameras can't do this. the threshold
# value here is used to filter if the pixels are truly different or if its
# seeing color differences of small objects that are of no interest.
#
consec_threshold = 10
# this setting is used to tell the application how many consecutive frames
# need to have pixel differences over the pix_threshold before triggering
# a motion event and then record to storage.
#
duration = 60
# this sets the internal timer to check for any cached frames to record to
# permanent storage. warning: setting this value too high could cause the
# application to use too much memory.
# this sets the internal timer used to reset to the detection loop and
# then call post_cmd if it is defined. note: this time can be extended if
# motion was detected. this will also reload the config file so changes
# to the settings will be applied without restarting the application.
#
post_cmd = move_the_ptz_camera.py
# this an optional command to run after the internal timer duration has
# elapsed. one great use for this is to move a ptz camera to the next
# position of it's patrol pattern. note: the command is not called if
# motion was detected.
# position of it's patrol pattern. note: the call to this command can be
# delayed if motion was detected.
#
pixel_size = 3
# this is the pixel size of the detected object or movement. this can
# prevent false positives due small moves in grass/plants or insects.
detect_fps = 20
# this is how many frames to read from the detection stream per second.
# setting this any higher the camera's actual fps will just waste cpu
# cycles but setting it too low makes detecting motion inaccurate.
#
secs_post_motion = 3
secs_post_motion = 10
# this is the minimum amount of seconds to capture after motion was
# detected.
#
recording_fps = 25
# recording fps to use when recording footage to storage.
#
section_size = 100
# detection frames are read in y axis sections and then runs gray level
# comparisons in a separate thread for each section. this value is used
# to determine how wide each section will be and how many threads will be
# needed to process each section. example: a 380x240 frame with a section
# size of 100 will process 100x240, 100x240, 100x240 and 80x240 images
# in 4 threads.
```
### Build Setup ###
This application is currently only compatible with a Linux based operating
systems that are capable of building and installing the opencv API from source.
instructions on how to install opencv can be found [here](https://docs.opencv.org/4.x/df/d65/tutorial_table_of_content_introduction.html).
[opencv](https://docs.opencv.org/4.x/df/d65/tutorial_table_of_content_introduction.html).
```
cd /path/to/Motion/Watch/source
mkdir ./build

View File

@ -1,8 +1,6 @@
#include <iostream>
#include <fstream>
#include <thread>
#include <string>
#include <vector>
#include <unistd.h>
#include <time.h>
#include <stdlib.h>
@ -10,8 +8,6 @@
#include <errno.h>
#include <opencv4/opencv2/opencv.hpp>
#include <opencv4/opencv2/video/tracking.hpp>
#include <opencv4/opencv2/core/ocl.hpp>
#include <opencv4/opencv2/videoio.hpp>
using namespace cv;
@ -19,23 +15,22 @@ using namespace std;
struct shared_t
{
vector<Mat> buff;
string detectUrl;
string recordUrl;
string outDir;
string postCmd;
string conf;
bool wrRunning;
bool ffRunning;
bool motion;
int fps;
int secs;
int thrWithMotion;
int thresh;
int pixSize;
int postMoIncr;
int sectionSize;
int retCode;
VideoCapture camera;
Mat baseImg;
string detectUrl;
string recordUrl;
string diffVerb;
string outDir;
string postCmd;
string conf;
int detectFps;
int colorThresh;
int secs;
int consec;
int consecThresh;
int pixThresh;
int postMoIncr;
int retCode;
} sharedRes;
@ -79,152 +74,35 @@ bool createDirTree(const string &full_path)
return ret;
}
void vidCap(shared_t *share)
string genDstFile(const string &dirOut, const string &ext)
{
if (!share->buff.empty())
{
share->wrRunning = true;
time_t rawtime;
time_t rawtime;
time(&rawtime);
time(&rawtime);
auto timeinfo = localtime(&rawtime);
auto timeinfo = localtime(&rawtime);
char dirName[20];
char fileName[20];
char dirName[20];
char fileName[20];
strftime(dirName, 20, "%Y%m%d", timeinfo);
strftime(fileName, 20, "%H%M%S", timeinfo);
strftime(dirName, 20, "%Y%m%d", timeinfo);
strftime(fileName, 20, "%H%M%S.avi", timeinfo);
createDirTree(cleanDir(dirOut) + string("/") + string(dirName));
createDirTree(cleanDir(share->outDir) + string("/") + string(dirName));
auto dstPath = cleanDir(share->outDir) + string("/") + string(dirName) + string("/") + string(fileName);
auto codec = VideoWriter::fourcc('M', 'J', 'P', 'G');
VideoWriter writer;
writer.open(dstPath, codec, (double) share->fps, share->buff[0].size(), true);
if (!writer.isOpened())
{
cerr << "could not open the output video file for writing: " << dstPath;
}
else
{
for (; !share->buff.empty(); share->buff.erase(share->buff.begin()))
{
writer.write(share->buff[0]);
}
}
share->wrRunning = false;
}
return cleanDir(dirOut) + string("/") + string(dirName) + string("/") + string(fileName) + ext;
}
uchar valDiff(uchar valA, uchar valB)
void wrOut(shared_t *share)
{
auto diff = 0;
share->baseImg.release();
if (valA > valB) diff = valA - valB;
if (valA < valB) diff = valB - valA;
share->consec = 0;
return diff;
}
auto dstPath = genDstFile(share->outDir, ".mp4");
auto cmd = "ffmpeg -i " + share->recordUrl + " -y -vcodec copy -t " + to_string(share->postMoIncr) + " " + dstPath;
void secDiff(Mat imgA, Mat imgB, int rows, int cols, int rowOffs, int colOffs, shared_t *share)
{
auto xCnt = 0;
auto yCnt = 0;
for (auto y = rowOffs; (y < rows) && share->thrWithMotion == 0; y++)
{
for (auto x = colOffs; (x < cols) && share->thrWithMotion == 0; x++)
{
auto pixA = imgA.at<uchar>(Point(x, y));
auto pixB = imgB.at<uchar>(Point(x, y));
if (valDiff(pixA, pixB) > share->thresh)
{
xCnt += 1;
if (xCnt >= share->pixSize) break;
}
else
{
xCnt = 0;
}
}
if (xCnt >= share->pixSize)
{
yCnt += 1;
if (yCnt >= share->pixSize)
{
share->thrWithMotion += 1;
}
}
else
{
xCnt = 0;
yCnt = 0;
}
}
}
bool grayDiff(Mat imgA, Mat imgB, shared_t *share)
{
share->thrWithMotion = 0;
auto colBuff = share->sectionSize;
auto allRows = imgA.rows;
auto allCols = imgA.cols;
auto colsOffs = 0;
vector<thread> thrs;
while (allCols != 0)
{
if (colBuff > allCols)
{
colBuff -= (colBuff - allCols);
}
thrs.push_back(thread(secDiff, imgA, imgB, allRows, colBuff, 0, colsOffs, share));
colsOffs += colBuff;
allCols -= colBuff;
}
for (auto &&thr : thrs)
{
thr.join();
}
return share->thrWithMotion != 0;
}
void loopTimer(shared_t *share)
{
sleep(share->secs);
if (!share->motion)
{
share->ffRunning = false;
}
if (!share->wrRunning)
{
new thread(vidCap, share);
}
}
void motionTimer(shared_t *share)
{
sleep(share->postMoIncr);
share->motion = false;
system(cmd.c_str());
}
Mat toGray(const Mat &src)
@ -236,60 +114,76 @@ Mat toGray(const Mat &src)
return ret;
}
void moDetect(shared_t *share)
bool pixDiff(const uchar &pixA, const uchar &pixB, shared_t *share)
{
auto dCap = VideoCapture(share->detectUrl, CAP_FFMPEG);
auto rCap = VideoCapture(share->recordUrl, CAP_FFMPEG);
auto diff = 0;
Mat dFrame, rFrame, dPrev, rPrev;
if (pixA > pixB) diff = pixA - pixB;
if (pixB > pixA) diff = pixB - pixA;
while (share->ffRunning)
if (diff < share->colorThresh)
{
if (!share->motion) dCap >> dFrame;
diff = 0;
}
rCap >> rFrame;
return diff != 0;
}
if (dFrame.empty() && (!share->motion))
{
// broken frames returned from the cameras i've tested this with would cause
// the entire capture connection to drop, hence why this bit of code is here
// to detect empty frames (signs of a dropped connection) and attempt
// re-connect to the cammera.
dCap.open(share->detectUrl, CAP_FFMPEG);
}
else if (rFrame.empty())
{
rCap.open(share->recordUrl, CAP_FFMPEG);
}
else if (share->motion)
{
share->buff.push_back(rFrame.clone());
}
else if (dPrev.empty() || rPrev.empty())
{
dPrev = toGray(dFrame);
rPrev = rFrame.clone();
}
else if (grayDiff(dPrev, toGray(dFrame), share))
{
share->buff.push_back(rPrev);
share->buff.push_back(rFrame.clone());
int secDiff(Mat imgA, Mat imgB, int rows, int cols, int rowOffs, int colOffs, shared_t *share)
{
auto pnts = 0;
share->motion = true;
thread(motionTimer, share);
rPrev.release();
dPrev.release();
}
else
for (auto y = rowOffs; y < rows; y++)
{
for (auto x = colOffs; x < cols; x++)
{
rPrev.release();
dPrev.release();
auto pixA = imgA.at<uchar>(Point(x, y));
auto pixB = imgB.at<uchar>(Point(x, y));
if (pixDiff(pixA, pixB, share))
{
pnts += 1;
}
}
}
system(share->postCmd.c_str());
return pnts;
}
bool imgDiff(Mat curImg, shared_t *share)
{
if (share->baseImg.empty())
{
share->baseImg = toGray(curImg);
return false;
}
else
{
curImg = toGray(curImg);
auto pnts = secDiff(share->baseImg, curImg, curImg.rows, curImg.cols, 0, 0, share);
if (share->diffVerb == "Y")
{
cout << "diff: " << pnts << endl;
}
share->baseImg = curImg.clone();
if (pnts >= share->pixThresh)
{
share->consec += 1;
return share->consec >= share->consecThresh;
}
else
{
share->consec = 0;
return false;
}
}
}
string parseForParam(const string &arg, int argc, char** argv, bool argOnly)
@ -354,6 +248,21 @@ bool rdConf(shared_t *share)
{
string line;
share->recordUrl.clear();
share->detectUrl.clear();
share->outDir.clear();
share->postCmd.clear();
share->diffVerb.clear();
share->baseImg.release();
share->pixThresh = 8;
share->consecThresh = 10;
share->colorThresh = 60;
share->secs = 60;
share->detectFps = 20;
share->postMoIncr = 5;
share->consec = 0;
do
{
getline(varFile, line);
@ -363,13 +272,14 @@ bool rdConf(shared_t *share)
rdLine("recording_stream = ", line, &share->recordUrl);
rdLine("detection_stream = ", line, &share->detectUrl);
rdLine("output_dir = ", line, &share->outDir);
rdLine("diff_verbose = ", line, &share->diffVerb);
rdLine("post_cmd = ", line, &share->postCmd);
rdLine("diff_threshold = ", line, &share->thresh);
rdLine("pix_threshold = ", line, &share->pixThresh);
rdLine("color_threshold = ", line, &share->colorThresh);
rdLine("consec_threshold = ", line, &share->consecThresh);
rdLine("duration = ", line, &share->secs);
rdLine("pixel_size = ", line, &share->pixSize);
rdLine("secs_post_motion = ", line, &share->postMoIncr);
rdLine("section_size = ", line, &share->sectionSize);
rdLine("recording_fps = ", line, &share->fps);
rdLine("detect_fps = ", line, &share->detectFps);
}
} while(!line.empty());
@ -384,6 +294,43 @@ bool rdConf(shared_t *share)
return ret;
}
void moDetect(shared_t *share)
{
while (rdConf(share))
{
for (auto i = 0; i < (share->secs * share->detectFps); ++i)
{
Mat frame;
if (!share->camera.isOpened())
{
share->camera.open(share->detectUrl, CAP_FFMPEG);
}
share->camera >> frame;
if (frame.empty())
{
// broken frames returned from the cameras i've tested this with would cause
// the entire capture connection to drop, hence why this bit of code is here
// to detect empty frames (signs of a dropped connection) and attempt
// re-connect to the cammera.
share->camera.open(share->detectUrl, CAP_FFMPEG);
}
else if (imgDiff(frame, share))
{
wrOut(share); i = 0;
}
else
{
usleep(1000000 / share->detectFps);
}
}
system(share->postCmd.c_str());
}
}
void showHelp()
{
cout << "Motion Watch v1.0" << endl << endl;
@ -406,23 +353,7 @@ int main(int argc, char** argv)
}
else
{
sharedRes.retCode = 0;
sharedRes.motion = false;
sharedRes.wrRunning = false;
while (rdConf(&sharedRes))
{
sharedRes.ffRunning = true;
thread th1(loopTimer, &sharedRes);
thread th2(moDetect, &sharedRes);
// Wait for the threads to finish
// Wait for thread t1 to finish
th1.join();
// Wait for thread t2 to finish
th2.join();
}
moDetect(&sharedRes);
return sharedRes.retCode;
}