completely removed object detection code because I don't foresee going
back to that model anytime soon. diffs will not reset to 0 instead
decrement and the consecutive pixel diffs are now adjustable via
consec_threshold.

updated README.md for the changes to pixel diff detection.
This commit is contained in:
Maurice ONeal 2022-10-14 14:31:12 -04:00
parent 9ecace7e4b
commit a0ee8e35f7
7 changed files with 28 additions and 107 deletions

View File

@ -3,5 +3,5 @@ project( MotionWatch )
find_package( OpenCV REQUIRED )
set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++17 -pthread")
include_directories( ${OpenCV_INCLUDE_DIRS} )
add_executable( mow src/main.cpp src/common.cpp src/mo_detect.cpp src/obj_detect.cpp )
add_executable( mow src/main.cpp src/common.cpp src/mo_detect.cpp )
target_link_libraries( mow ${OpenCV_LIBS} )

View File

@ -45,20 +45,15 @@ buff_dir = /tmp/ramdisk/cam_name
# recommend to use a ramdisk tempfs for this since this directory is used
# for lots of writes.
#
color_threshold = 8
# the color levels in each pixel of the camera stream can range from 0-255.
# in an ideal world the color differences in between frames should be 0 if
# there is no motion but must cameras can't do this. the threshold value
# here is used to filter if the pixels are truly different.
consec_threshold = 512
# motion is detected by comparing each frame in the camera feed for
# differences in the pixels. this value determine how many consecutive
# pixels need to different or how large the suspect object in motion
# needs to be.
#
block_threshold = 3456
# this application detects motion by loading frames from the camera and
# then compare the pixels of each back to back frame for any significant
# differences between the pixels based on color_threshold. it loads the
# pixels of each frame in blocks. the size of the blocks are adjustable
# below. it counts how many pixels are different in the block and this is
# used to tell if the footage has motion if the different pixel count
# exceeds it.
block_threshold = 1024
# this value tells the application how many "lines" of pixels need to
# exceed consec_threshold before being considered motion.
#
block_x = 64
# this is the x coordinate size or horizontal size of a block of pixels

View File

@ -188,11 +188,11 @@ bool rdConf(shared_t *share)
share->postCmd.clear();
share->buffDir.clear();
share->colorThresh = 5;
share->consecThresh = 512;
share->secs = 60;
share->blockX = 32;
share->blockY = 32;
share->blockThresh = 900;
share->blockThresh = 1024;
share->maxDays = 5;
share->vidExt = "mp4";
share->recLoopWait = false;
@ -207,7 +207,7 @@ bool rdConf(shared_t *share)
rdLine("recording_stream = ", line, &share->recordUrl);
rdLine("output_dir = ", line, &share->outDir);
rdLine("post_cmd = ", line, &share->postCmd);
rdLine("color_threshold = ", line, &share->colorThresh);
rdLine("consec_threshold = ", line, &share->consecThresh);
rdLine("duration = ", line, &share->secs);
rdLine("buff_dir = ", line, &share->buffDir);
rdLine("block_x = ", line, &share->blockX);
@ -310,7 +310,7 @@ void statOut(shared_t *share)
system(string("touch " + cleanDir(share->buffDir) + "/stat").c_str());
auto path = string(cleanDir(share->buffDir) + "/stat");
auto fd = open(path.c_str(), O_WRONLY);
auto fd = open(path.c_str(), fstream::out | fstream::trunc);
write(fd, share->stat.c_str(), share->stat.size() + 1);
close(fd);

View File

@ -37,7 +37,7 @@ using namespace std;
using namespace std::filesystem;
#define BUF_SZ 10
#define APP_VER "1.4.t9"
#define APP_VER "1.4.t11"
struct shared_t
{
@ -51,7 +51,7 @@ struct shared_t
bool init;
bool recLoopWait;
bool skipCmd;
int colorThresh;
int consecThresh;
int secs;
int blockThresh;
int blockX;

View File

@ -23,6 +23,7 @@ bool pixDiff(const uchar &pixA, const uchar &pixB, shared_t *share)
void secDiff(const Mat &imgA, const Mat &imgB, int id, int rows, int cols, int rowOffs, int colOffs, vector<sec_t> *results, mutex *secMutex, shared_t *share)
{
auto diff = 0;
auto pnts = 0;
for (auto y = rowOffs; y < (rowOffs + rows); y++)
{
@ -31,13 +32,16 @@ void secDiff(const Mat &imgA, const Mat &imgB, int id, int rows, int cols, int r
auto pixA = imgA.at<uchar>(Point(x, y));
auto pixB = imgB.at<uchar>(Point(x, y));
if (pixDiff(pixA, pixB, share)) diff += 1;
else diff -= 1;
if (pixDiff(pixA, pixB, share)) pnts += 1;
else pnts = 0;
if (pnts >= share->consecThresh)
{
diff += 1;
}
}
}
if (diff < 0) diff = 0;
struct sec_t res;
res.x = colOffs;
@ -87,7 +91,10 @@ bool imgDiff(const Mat &prev, const Mat &next, shared_t *share)
auto diff = results[i].pixDiff;
auto id = results[i].id;
share->stat += string("block_thread:") + " id=" + to_string(id) + " diff=" + to_string(diff) + "\n";
if (diff > 0)
{
share->stat += string("block_thread:") + " id=" + to_string(id) + " diff=" + to_string(diff) + "\n";
}
if ((results[i].pixDiff >= share->blockThresh) && (results[i].pixDiff > maxPixDiff))
{

View File

@ -1,61 +0,0 @@
// This file is part of Motion Watch.
// Motion Watch is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Motion Watch is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
#include "obj_detect.h"
bool objectFlowInImage(const Mat &inPrev, const Mat &inNext, const Rect &area, shared_t *share)
{
Mat prev(inPrev, area);
Mat next(inNext, area);
// optical flow calculations are used to detect motion.
// reference: https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html
vector<Point2f> p0, p1;
vector<uchar> status;
vector<float> err;
auto criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03);
// distance is basically 0.0578% of the total pixel area of the
// frames. this value is used later below.
auto thresh = ((double) 0.0578 / (double) 100) * (area.height * area.width);
auto count = 0;
goodFeaturesToTrack(prev, p0, 100, 0.3, 7, Mat(), 7, false, 0.04);
calcOpticalFlowPyrLK(prev, next, p0, p1, status, err, Size(10, 10), 2, criteria);
share->stat += " object_flow_detection-- block-" + to_string(area.x) + "x" + to_string(area.y) + "\n";
for(uint i = 0; i < p0.size(); i++)
{
// select good points
if(status[i] == 1)
{
auto dis = norm(p0[i] - p1[i]);
if (dis > thresh)
{
// any points that moved 0.0578% or more of the total pixel
// area can be considered an object in motion.
share->stat += " obj_move_greater_than=" + to_string(thresh) + "\n";
share->stat += " obj_move_distance=" + to_string(dis) + "\n";
return true;
}
}
}
share->stat += " no_object_flow_detected\n";
return false;
}

View File

@ -1,20 +0,0 @@
#ifndef OBJ_DETECT_H
#define OBJ_DETECT_H
// This file is part of Motion Watch.
// Motion Watch is free software: you can redistribute it and/or modify
// it under the terms of the GNU General Public License as published by
// the Free Software Foundation, either version 3 of the License, or
// (at your option) any later version.
// Motion Watch is distributed in the hope that it will be useful,
// but WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
// GNU General Public License for more details.
#include "common.h"
bool objectFlowInImage(const Mat &inPrev, const Mat &inNext, const Rect &area, shared_t *share);
#endif // OBJ_DETECT_H