Changed comparison functions again

decided to change frame comparison functions again from optical flow to
    a home brewed function that compares gray levels in the pixels of each
    frame. significant differences in gray levels between the frames can
    potentially trigger a motion event.

    also moved away command line arguments to an external config file to set
    app parameters.

    created a README file to get this project ready for general open source
    release.
This commit is contained in:
Maurice ONeal 2022-07-08 15:24:45 -04:00
parent 8a17b58d25
commit 072cbe269c
2 changed files with 360 additions and 118 deletions

101
README.md Normal file
View File

@ -0,0 +1,101 @@
# Motion Watch #
Motion Watch is a video surveillance application that monitors the video feeds
of an IP camera and records only footage that contains motion. The main
advantage of this is reduced storage requirements as opposed to continuous
recording because only video footage of interest is recorded to storage.
The entire app is designed to operate on just one camera but multiple instances
of this app can be used to operate multiple cameras.
### Usage ###
```
Usage: mow <argument>
-h : display usage information about this application.
-c : path to the config file.
```
### Config File ###
The config file is a simple text file that contain parameters that dictate the
behavior of the application. Below is an example of a config file with all
parameters supported and descriptions of each parameter.
```
# Motion Watch config file v1.0
#
# note all lines in this config file that starts with a '#' are ignored.
# also note to avoid using empty lines. if you're going to need an empty
# line, start it with a '#'
#
recording_stream = rtsp://1.2.3.4:554/h264
# this is the url to the main stream of the IP camera that will be used to
# to record footage if it contains motion.
#
detection_stream = rtsp://1.2.3.4:554/h264cif
# this is the low resolution secondary stream url of the IP camera the
# will be used to detect motion. it is never recorded. note: consider
# matching the fps of both streams for best results.
#
output_dir = /path/to/footage/directory
# this is the output directory that will be used to store recorded footage
# from the camera. the file naming convention uses date codes. it creates
# a subfolder for the date if it needs to and then stores the video file
# using the time.
#
diff_threshold = 210
# this application detects motion by loading back to back frames from the
# detection stream, converts them to gray scale and then compares the gray
# levels of each pixel of those frames. if the gray levels are significantly
# different, that will be considered motion. in an ideal world, the gray
# differences of each pixel should be 0 if there is no motion but cameras
# are not perfect so non-zero gray diffs on no motion footage can be common
# so that's where this threshold value comes in. 210 has been a decent
# value to use but this will entirely depend on the camera you are using.
#
duration = 60
# this sets the internal timer to check for any cached frames to record to
# permanent storage. warning: setting this value too high could cause the
# application to use too much memory.
#
post_cmd = move_the_ptz_camera.py
# this an optional command to run after the internal timer duration has
# elapsed. one great use for this is to move a ptz camera to the next
# position of it's patrol pattern. note: the command is not called if
# motion was detected.
#
pixel_size = 3
# this is the pixel size of the detected object or movement. this can
# prevent false positives due small moves in grass/plants or insects.
#
frames_post_motion = 60
# this is the amount frames to capture after motion was detected.
#
minimum_recording_frames = 90
# this is the minimum amount of frames needed before video footage is
# recorded to storage. this prevents video files that are too small to
# be of any use and reduces clutter. warning: setting this value too
# high could cause the application to use too much memory.
#
section_size = 100
# detection frames are read in y axis sections and then runs gray level
# comparisons in a separate thread for each section. this value is used
# to determine how wide each section will be and how many threads will be
# needed to process each section. example: a 380x240 frame with a section
# size of 100 will process 100x240, 100x240, 100x240 and 80x240 images
# in 4 threads.
```
### Build Setup ###
This application is currently only compatible with a Linux based operating
systems that are capable of building and installing the opencv API from source.
instructions on how to install opencv can be found [here](https://docs.opencv.org/4.x/df/d65/tutorial_table_of_content_introduction.html).
```
cd /path/to/Motion/Watch/source
mkdir ./build
cd ./build
cmake ..
make
sudo cp ./mow /usr/bin/mow
```

View File

@ -1,4 +1,5 @@
#include <iostream>
#include <fstream>
#include <thread>
#include <string>
#include <vector>
@ -6,6 +7,7 @@
#include <time.h>
#include <stdlib.h>
#include <sys/stat.h>
#include <errno.h>
#include <opencv4/opencv2/opencv.hpp>
#include <opencv4/opencv2/video/tracking.hpp>
@ -18,19 +20,22 @@ using namespace std;
struct shared_t
{
vector<Mat> buff;
vector<thread> writers;
TermCriteria criteria;
string detectUrl;
string recordUrl;
string outDir;
string postMoCmd;
string postNoMoCmd;
string secsStr;
string postCmd;
string conf;
bool wrRunning;
bool ffRunning;
int motion;
int gap;
int secs;
int thrWithMotion;
int thresh;
int pixSize;
int postMoIncr;
int minRecFrames;
int sectionSize;
int retCode;
} sharedRes;
@ -76,7 +81,7 @@ bool createDirTree(const string &full_path)
void vidCap(shared_t *share)
{
if (share->buff.size() >= 100)
if (share->buff.size() >= share->minRecFrames)
{
share->wrRunning = true;
@ -96,11 +101,10 @@ void vidCap(shared_t *share)
auto dstPath = cleanDir(share->outDir) + string("/") + string(dirName) + string("/") + string(fileName);
auto codec = VideoWriter::fourcc('M', 'J', 'P', 'G');
auto fps = 25.0;
VideoWriter writer;
writer.open(dstPath, codec, fps, share->buff[0].size(), true);
writer.open(dstPath, codec, 30.0, share->buff[0].size(), true);
if (!writer.isOpened())
{
@ -118,30 +122,87 @@ void vidCap(shared_t *share)
}
}
void detectDiff(Mat prev, Mat next, shared_t *share)
uchar valDiff(uchar valA, uchar valB)
{
// optical flow calculations are used to detect motion.
// reference: https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html
vector<Point2f> p0, p1;
vector<uchar> status;
vector<float> err;
auto diff = 0;
goodFeaturesToTrack(prev, p0, 50, 0.5, 3, Mat(), 3, false, 0.04);
calcOpticalFlowPyrLK(prev, next, p0, p1, status, err, Size(10, 10), 2, share->criteria);
if (valA > valB) diff = valA - valB;
if (valA < valB) diff = valB - valA;
for(uint i = 0; i < p0.size(); i++)
{
if (norm(p0[i] - p1[i]) > share->gap)
{
share->motion += 150;
break;
return diff;
}
else if (share->motion != 0)
void secDiff(Mat imgA, Mat imgB, int rows, int cols, int rowOffs, int colOffs, shared_t *share)
{
share->motion -= 1;
auto xCnt = 0;
auto yCnt = 0;
for (auto y = rowOffs; (y < rows) && share->thrWithMotion == 0; y++)
{
for (auto x = colOffs; (x < cols) && share->thrWithMotion == 0; x++)
{
auto pixA = imgA.at<uchar>(Point(x, y));
auto pixB = imgB.at<uchar>(Point(x, y));
if (valDiff(pixA, pixB) > share->thresh)
{
xCnt += 1;
if (xCnt >= share->pixSize) break;
}
else
{
xCnt = 0;
}
}
if (xCnt >= share->pixSize)
{
yCnt += 1;
if (yCnt >= share->pixSize)
{
share->thrWithMotion += 1;
}
}
else
{
xCnt = 0;
yCnt = 0;
}
}
}
bool grayDiff(Mat imgA, Mat imgB, shared_t *share)
{
share->thrWithMotion = 0;
auto colBuff = share->sectionSize;
auto allRows = imgA.rows;
auto allCols = imgA.cols;
auto colsOffs = 0;
vector<thread> thrs;
while (allCols != 0)
{
if (colBuff > allCols)
{
colBuff -= (colBuff - allCols);
}
thrs.push_back(thread(secDiff, imgA, imgB, allRows, colBuff, 0, colsOffs, share));
colsOffs += colBuff;
allCols -= colBuff;
}
for (auto &&thr : thrs)
{
thr.join();
}
return share->thrWithMotion != 0;
}
void timer(shared_t *share)
@ -159,14 +220,6 @@ void timer(shared_t *share)
}
}
void addFramesToBuffer(const vector<Mat> &newFrames, shared_t *share)
{
for (auto &&frame : newFrames)
{
share->buff.push_back(frame);
}
}
Mat toGray(const Mat &src)
{
Mat ret;
@ -181,15 +234,15 @@ void moDetect(shared_t *share)
auto dCap = VideoCapture(share->detectUrl, CAP_FFMPEG);
auto rCap = VideoCapture(share->recordUrl, CAP_FFMPEG);
vector<Mat> dFrames, rFrames;
Mat dFrame, rFrame;
Mat dFrame, rFrame, dPrev, rPrev;
while (share->ffRunning)
{
dCap >> dFrame;
if (share->motion == 0) dCap >> dFrame;
rCap >> rFrame;
if (dFrame.empty())
if (dFrame.empty() && (share->motion == 0))
{
// broken frames returned from the cameras i've tested this with would cause
// the entire capture connection to drop, hence why this bit of code is here
@ -201,54 +254,46 @@ void moDetect(shared_t *share)
{
rCap.open(share->recordUrl, CAP_FFMPEG);
}
else if ((dFrames.size() < 2) || (rFrames.size() < 2))
else if (share->motion > 0)
{
rFrames.push_back(rFrame.clone());
dFrames.push_back(toGray(dFrame));
share->buff.push_back(rFrame.clone());
share->motion -= 1;
}
else if (dPrev.empty() || rPrev.empty())
{
dPrev = toGray(dFrame);
rPrev = rFrame.clone();
}
else if (grayDiff(dPrev, toGray(dFrame), share))
{
share->buff.push_back(rPrev);
share->buff.push_back(rFrame.clone());
share->motion += share->postMoIncr;
rPrev.release();
dPrev.release();
}
else
{
if (share->gap == 0)
{
// share->gap is used in detectDiff() to compare how far a
// point in the optical flow has moved. it is calculated by a
// certain percentage of the total pixel area of the frames.
// as of right now it is hard coded to 0.00579% of the total
// pixel area of the frames and only needs to be calculated
// once hence why share->gap == 0 is checked.
share->gap = ((double) 0.00579 / (double) 100) * (dFrame.size().height * dFrame.size().width);
}
if (share->motion != 0)
{
addFramesToBuffer(rFrames, share);
}
detectDiff(dFrames[0], dFrames[1], share);
rFrames.clear();
dFrames.clear();
rPrev.release();
dPrev.release();
}
}
if (share->motion != 0)
{
system(share->postMoCmd.c_str());
}
else
{
system(share->postNoMoCmd.c_str());
}
system(share->postCmd.c_str());
}
string parseForParam(const string &arg, int argc, char** argv)
string parseForParam(const string &arg, int argc, char** argv, bool argOnly)
{
for (int i = 0; i < argc; ++i)
{
auto argInParams = string(argv[i]);
if (arg.compare(argInParams) == 0)
{
if (!argOnly)
{
// check ahead, make sure i + 1 won't cause out-of-range exception
if ((i + 1) <= (argc - 1))
@ -256,55 +301,151 @@ string parseForParam(const string &arg, int argc, char** argv)
return string(argv[i + 1]);
}
}
else
{
return string("true")
}
}
}
return string();
}
int main(int argc, char** argv)
bool rdConf(shared_t *share)
{
auto vidRet = 0;
auto moRet = 0;
auto secsStr = parseForParam("-sec", argc, argv);
auto highUrl = parseForParam("-rs", argc, argv);
auto lowUrl = parseForParam("-ds", argc, argv);
auto outDir = parseForParam("-dir", argc, argv);
auto moCmd = parseForParam("-mc", argc, argv);
auto noMocmd = parseForParam("-nmc", argc, argv);
auto secs = strtol(secsStr.c_str(), NULL, 10);
// recording_stream
// detection_stream
// output_dir
// diff_threshold
// post_cmd
// duration
// pixel_size
// frames_post_motion
// minimum_recording_frames
// section_size
if (lowUrl.empty())
auto ret = false;
share->retCode = ENOENT;
ifstream varFile(share->conf.c_str());
if (!varFile.is_open())
{
cerr << "the detection-stream camera url is empty." << endl;
}
else if (highUrl.empty())
{
cerr << "the recording-stream camera url is empty." << endl;
}
else if (outDir.empty())
{
cerr << "the output directory is empty." << endl;
}
else if (secs == 0)
{
cerr << "the amount of seconds in -sec cannot be 0 or an invalid number was given." << endl;
cerr << "err: failed to open the config file: " << share->conf << " for reading. please check file permissions or if it exists." << endl;
}
else
{
sharedRes.criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03);
sharedRes.wrRunning = false;
sharedRes.motion = 0;
sharedRes.gap = 0;
string line;
while (true)
do
{
getline(varFile, line);
if (line.rfind("#", 0) != 0)
{
if (line.rfind("recording_stream = ", 0) == 0)
{
share->recordUrl = line.substr(19);
cout << "recording_stream = " << share->recordUrl << endl;
}
else if (line.rfind("detection_stream = ", 0) == 0)
{
share->detectUrl = line.substr(19);
cout << "detection_stream = " << share->detectUrl << endl;
}
else if (line.rfind("output_dir = ", 0) == 0)
{
share->outDir = line.substr(13);
cout << "output_dir = " << share->outDir << endl;
}
else if (line.rfind("post_cmd = ", 0) == 0)
{
share->postCmd = line.substr(11);
cout << "post_cmd = " << share->postCmd << endl;
}
else if (line.rfind("diff_threshold = ", 0) == 0)
{
share->thresh = strtol(line.substr(17).c_str(), NULL, 10);
cout << "diff_threshold = " << share->thresh << endl;
}
else if (line.rfind("duration = ", 0) == 0)
{
share->secs = strtol(line.substr(11).c_str(), NULL, 10);
cout << "duration = " << share->secs << endl;
}
else if (line.rfind("pixel_size = ", 0) == 0)
{
share->pixSize = strtol(line.substr(13).c_str(), NULL, 10);
cout << "pixel_size = " << share->pixSize << endl;
}
else if (line.rfind("frames_post_motion = ", 0) == 0)
{
share->postMoIncr = strtol(line.substr(21).c_str(), NULL, 10);
cout << "frames_post_motion = " << share->postMoIncr << endl;
}
else if (line.rfind("minimum_recording_frames = ", 0) == 0)
{
share->minRecFrames = strtol(line.substr(27).c_str(), NULL, 10);
cout << "minimum_recording_frames = " << share->minRecFrames << endl;
}
else if (line.rfind("section_size = ", 0) == 0)
{
share->sectionSize = strtol(line.substr(15).c_str(), NULL, 10);
cout << "section_size = " << share->sectionSize << endl;
}
}
} while(!line.empty());
ret = true;
share->retCode = 0;
}
varFile.close();
return ret;
}
void showHelp()
{
cout << "Motion Watch v1.0" << endl << endl;
cout << "Usage: mow <argument>" << endl << endl;
cout << "-h : display usage information about this application." << endl;
cout << "-c : path to the config file." << endl;
}
int main(int argc, char** argv)
{
sharedRes.conf = parseForParam("-c", argc, argv, false);
if (parseForParam("-h", argc, argv, true) == "true")
{
showHelp();
}
else if (sharedRes.conf.empty())
{
cerr << "err: a config file was not given in -c" << endl;
}
else
{
sharedRes.retCode = 0;
sharedRes.motion = 0;
sharedRes.wrRunning = false;
while (rdConf(&sharedRes))
{
sharedRes.recordUrl = highUrl;
sharedRes.detectUrl = lowUrl;
sharedRes.postMoCmd = moCmd;
sharedRes.postNoMoCmd = noMocmd;
sharedRes.secsStr = secsStr;
sharedRes.secs = secs;
sharedRes.outDir = outDir;
sharedRes.ffRunning = true;
thread th1(timer, &sharedRes);
@ -317,8 +458,8 @@ int main(int argc, char** argv)
th2.join();
}
return 0;
return sharedRes.retCode;
}
return 1;
return EINVAL;
}