added object detection code base on yolov5 machine vision model. also
added a stat file so motion and object detection values can be monitored
in real time if used with the 'watch' command.
This commit is contained in:
Maurice ONeal 2022-09-27 18:10:04 -04:00
parent d5a9d1f046
commit 5d12855fad
8 changed files with 129 additions and 28 deletions

View File

@ -1,3 +1,6 @@
#!/bin/sh
mkdir -p /etc/mow
cp ./etc/yolov5s.onnx /etc/mow/yolov5s.onnx
cp ./etc/classes.txt /etc/mow/classes.txt
cp ./.build-mow/mow /usr/bin/mow

View File

@ -3,11 +3,17 @@
apt update
apt install -y cmake g++ wget unzip git ffmpeg libavcodec-dev libavformat-dev libavutil-dev libswscale-dev
cd ./src
git clone https://github.com/opencv/opencv.git
if [ -d "./opencv" ]
then
cd ./opencv
git pull origin
cd ..
else
git clone https://github.com/opencv/opencv.git
fi
cd ..
mkdir -p ./.build-opencv
cd ./.build-opencv
cmake ../src/opencv
make -j4
make install

View File

@ -181,6 +181,21 @@ void rdLine(const string &param, const string &line, int *value)
}
}
vector<string> loadClassList()
{
vector<string> ret;
ifstream ifs("/etc/mow/classes.txt");
string line;
while (getline(ifs, line))
{
ret.push_back(line);
}
return ret;
}
bool rdConf(shared_t *share)
{
ifstream varFile(share->conf.c_str());
@ -209,6 +224,11 @@ bool rdConf(shared_t *share)
share->vidExt = "mp4";
share->recLoopWait = false;
share->skipCmd = false;
share->network = dnn::readNet("/etc/mow/yolov5s.onnx");
share->classNames = loadClassList();
share->network.setPreferableBackend(dnn::DNN_BACKEND_OPENCV);
share->network.setPreferableTarget(dnn::DNN_TARGET_CPU);
do
{

View File

@ -34,30 +34,42 @@ using namespace std;
using namespace std::filesystem;
#define BUF_SZ 10
#define APP_VER "1.4"
#define APP_VER "1.4.t1"
#define ST_LEN 7
#define ST_PIXDIFF 0
#define ST_BLOCK_POS_X 1
#define ST_BLOCK_POS_Y 2
#define ST_BLOCK_LEN_X 3
#define ST_BLOCK_LEN_Y 4
#define ST_CONFIDENCE 5
#define ST_CLASS_SCORE 6
struct shared_t
{
string recordUrl;
string outDir;
string postCmd;
string conf;
string buffDir;
string concatTxtTmp;
string concatShTmp;
string createShTmp;
string vidExt;
bool init;
bool recLoopWait;
bool skipCmd;
int tmpId;
int colorThresh;
int secs;
int blockThresh;
int blockX;
int blockY;
int maxDays;
int retCode;
vector<string> stat;
vector<string> classNames;
dnn::Net network;
string recordUrl;
string outDir;
string postCmd;
string conf;
string buffDir;
string concatTxtTmp;
string concatShTmp;
string createShTmp;
string vidExt;
bool init;
bool recLoopWait;
bool skipCmd;
int tmpId;
int colorThresh;
int secs;
int blockThresh;
int blockX;
int blockY;
int maxDays;
int retCode;
};
string genTmpFile(const string &dirOut, const string &ext, shared_t *share);
@ -77,5 +89,6 @@ bool rdConf(shared_t *share);
bool capPair(Mat &prev, Mat &next, VideoCapture &capture, shared_t *share);
Mat toGray(const Mat &src);
vector<string> lsFilesInDir(const string &path, const string &ext);
vector<string> loadClassList();
#endif // COMMON_H

View File

@ -30,7 +30,7 @@ void detectLoop(shared_t *share)
if (moDetect(fullPath, &blockArea, &blockImg, share))
{
if (objectInImage(blockImg, blockArea))
if (objectInImage(blockImg, blockArea, share))
{
share->skipCmd = true;

View File

@ -44,6 +44,12 @@ void secDiff(Mat imgA, Mat imgB, int rows, int cols, int rowOffs, int colOffs, R
if (pnts >= share->blockThresh)
{
share->stat[ST_PIXDIFF] = to_string(pnts);
share->stat[ST_BLOCK_POS_X] = to_string(colOffs);
share->stat[ST_BLOCK_POS_Y] = to_string(rowOffs);
share->stat[ST_BLOCK_LEN_X] = to_string(cols);
share->stat[ST_BLOCK_LEN_Y] = to_string(rows);
block->x = colOffs;
block->y = rowOffs;
@ -92,6 +98,8 @@ bool moDetect(const string &buffFile, Rect *block, Mat *img, shared_t *share)
Mat prev;
Mat next;
share->stat = vector<string>(ST_LEN, string("0"));
while (capPair(prev, next, capture, share))
{
if (imgDiff(toGray(prev), toGray(next), block, share))
@ -100,6 +108,19 @@ bool moDetect(const string &buffFile, Rect *block, Mat *img, shared_t *share)
mod = true; break;
}
}
ofstream file(string(cleanDir(share->buffDir) + "/stat").c_str());
file << "pix_diff: " << share->stat[ST_PIXDIFF] << endl;
file << "block_pos_x: " << share->stat[ST_BLOCK_POS_X] << endl;
file << "block_pos_y: " << share->stat[ST_BLOCK_POS_Y] << endl;
file << "block_siz_x: " << share->stat[ST_BLOCK_LEN_X] << endl;
file << "block_siz_y: " << share->stat[ST_BLOCK_LEN_Y] << endl;
file << "--object_dection:" << endl;
file << " confidence: " << share->stat[ST_CONFIDENCE] << endl;
file << " class_score: " << share->stat[ST_CLASS_SCORE] << endl;
file.close();
}
else
{

View File

@ -12,8 +12,46 @@
#include "obj_detect.h"
bool objectInImage(const Mat &image, const Rect &area)
bool objectInImage(const Mat &src, const Rect &area, shared_t *share)
{
// future object detection code. for now this will just return true.
return true;
// reference: https://github.com/doleron/yolov5-opencv-cpp-python/blob/main/cpp/yolo.cpp
Mat blockImage(src, area);
Mat blob;
dnn::blobFromImage(blockImage, blob, 1./255., Size(area.width, area.height), cv::Scalar(), true, false);
share->network.setInput(blob);
vector<Mat> outputs;
share->network.forward(outputs, share->network.getUnconnectedOutLayersNames());
float *data = (float *)outputs[0].data;
for (int i = 0; i < 25200; ++i)
{
// confidence level data[4];
if (data[4] >= 0.4)
{
float *classesScores = data + 5;
Mat scores(1, share->classNames.size(), CV_32FC1, classesScores);
Point classId;
double maxClassScore;
minMaxLoc(scores, 0, &maxClassScore, 0, &classId);
share->stat[ST_CONFIDENCE] = to_string(data[4]);
share->stat[ST_CLASS_SCORE] = to_string(maxClassScore);
if (maxClassScore > 0.2)
{
return true;
}
}
data += 85;
}
return false;
}

View File

@ -15,6 +15,6 @@
#include "common.h"
bool objectInImage(const Mat &image, const Rect &area);
bool objectInImage(const Mat &image, const Rect &area, shared_t *share);
#endif // OBJ_DETECT_H