#include #include #include #include #include #include #include #include #include #include #include #include using namespace cv; using namespace std; struct shared_t { vector buff; vector writers; TermCriteria criteria; string detectUrl; string recordUrl; string outDir; string postMoCmd; string postNoMoCmd; string secsStr; bool wrRunning; bool ffRunning; int motion; int gap; int secs; } sharedRes; string cleanDir(const string &path) { if (path[path.size() - 1] == '/') { return path.substr(0, path.size() - 1); } else { return path; } } bool createDir(const string &dir) { auto ret = mkdir(dir.c_str(), 0777); if (ret == -1) { return errno == EEXIST; } else { return true; } } bool createDirTree(const string &full_path) { size_t pos = 0; auto ret = true; while (ret == true && pos != string::npos) { pos = full_path.find('/', pos + 1); ret = createDir(full_path.substr(0, pos)); } return ret; } void vidCap(shared_t *share) { if (share->buff.size() >= 100) { share->wrRunning = true; time_t rawtime; time(&rawtime); auto timeinfo = localtime(&rawtime); char dirName[20]; char fileName[20]; strftime(dirName, 20, "%Y%m%d", timeinfo); strftime(fileName, 20, "%H%M%S.avi", timeinfo); createDirTree(cleanDir(share->outDir) + string("/") + string(dirName)); auto dstPath = cleanDir(share->outDir) + string("/") + string(dirName) + string("/") + string(fileName); auto codec = VideoWriter::fourcc('M', 'J', 'P', 'G'); auto fps = 25.0; VideoWriter writer; writer.open(dstPath, codec, fps, share->buff[0].size(), true); if (!writer.isOpened()) { cerr << "could not open the output video file for writing: " << dstPath; } else { for (; !share->buff.empty(); share->buff.erase(share->buff.begin())) { writer.write(share->buff[0]); } } share->wrRunning = false; } } void detectDiff(Mat prev, Mat next, shared_t *share) { // optical flow calculations are used to detect motion. // reference: https://docs.opencv.org/3.4/d4/dee/tutorial_optical_flow.html vector p0, p1; vector status; vector err; goodFeaturesToTrack(prev, p0, 50, 0.5, 3, Mat(), 3, false, 0.04); calcOpticalFlowPyrLK(prev, next, p0, p1, status, err, Size(10, 10), 2, share->criteria); for(uint i = 0; i < p0.size(); i++) { if (norm(p0[i] - p1[i]) > share->gap) { share->motion += 150; break; } else if (share->motion != 0) { share->motion -= 1; } } } void timer(shared_t *share) { sleep(share->secs); if (share->motion == 0) { share->ffRunning = false; } if (!share->wrRunning) { new thread(vidCap, share); } } void addFramesToBuffer(const vector &newFrames, shared_t *share) { for (auto &&frame : newFrames) { share->buff.push_back(frame); } } Mat toGray(const Mat &src) { Mat ret; cvtColor(src, ret, COLOR_BGR2GRAY); return ret; } void moDetect(shared_t *share) { auto dCap = VideoCapture(share->detectUrl, CAP_FFMPEG); auto rCap = VideoCapture(share->recordUrl, CAP_FFMPEG); vector dFrames, rFrames; Mat dFrame, rFrame; while (share->ffRunning) { dCap >> dFrame; rCap >> rFrame; if (dFrame.empty()) { // broken frames returned from the cameras i've tested this with would cause // the entire capture connection to drop, hence why this bit of code is here // to detect empty frames (signs of a dropped connection) and attempt // re-connect to the cammera. dCap.open(share->detectUrl, CAP_FFMPEG); } else if (rFrame.empty()) { rCap.open(share->recordUrl, CAP_FFMPEG); } else if ((dFrames.size() < 2) || (rFrames.size() < 2)) { rFrames.push_back(rFrame.clone()); dFrames.push_back(toGray(dFrame)); } else { if (share->gap == 0) { // share->gap is used in detectDiff() to compare how far a // point in the optical flow has moved. it is calculated by a // certain percentage of the total pixel area of the frames. // as of right now it is hard coded to 0.00579% of the total // pixel area of the frames and only needs to be calculated // once hence why share->gap == 0 is checked. share->gap = ((double) 0.00579 / (double) 100) * (dFrame.size().height * dFrame.size().width); } if (share->motion != 0) { addFramesToBuffer(rFrames, share); } detectDiff(dFrames[0], dFrames[1], share); rFrames.clear(); dFrames.clear(); } } if (share->motion != 0) { system(share->postMoCmd.c_str()); } else { system(share->postNoMoCmd.c_str()); } } string parseForParam(const string &arg, int argc, char** argv) { for (int i = 0; i < argc; ++i) { auto argInParams = string(argv[i]); if (arg.compare(argInParams) == 0) { // check ahead, make sure i + 1 won't cause out-of-range exception if ((i + 1) <= (argc - 1)) { return string(argv[i + 1]); } } } return string(); } int main(int argc, char** argv) { auto vidRet = 0; auto moRet = 0; auto secsStr = parseForParam("-sec", argc, argv); auto highUrl = parseForParam("-rs", argc, argv); auto lowUrl = parseForParam("-ds", argc, argv); auto outDir = parseForParam("-dir", argc, argv); auto moCmd = parseForParam("-mc", argc, argv); auto noMocmd = parseForParam("-nmc", argc, argv); auto secs = strtol(secsStr.c_str(), NULL, 10); if (lowUrl.empty()) { cerr << "the detection-stream camera url is empty." << endl; } else if (highUrl.empty()) { cerr << "the recording-stream camera url is empty." << endl; } else if (outDir.empty()) { cerr << "the output directory is empty." << endl; } else if (secs == 0) { cerr << "the amount of seconds in -sec cannot be 0 or an invalid number was given." << endl; } else { sharedRes.criteria = TermCriteria((TermCriteria::COUNT) + (TermCriteria::EPS), 10, 0.03); sharedRes.wrRunning = false; sharedRes.motion = 0; sharedRes.gap = 0; while (true) { sharedRes.recordUrl = highUrl; sharedRes.detectUrl = lowUrl; sharedRes.postMoCmd = moCmd; sharedRes.postNoMoCmd = noMocmd; sharedRes.secsStr = secsStr; sharedRes.secs = secs; sharedRes.outDir = outDir; sharedRes.ffRunning = true; thread th1(timer, &sharedRes); thread th2(moDetect, &sharedRes); // Wait for the threads to finish // Wait for thread t1 to finish th1.join(); // Wait for thread t2 to finish th2.join(); } return 0; } return 1; }