Skip to content
Snippets Groups Projects
Commit ee1954f7 authored by Sebastian Hahta's avatar Sebastian Hahta
Browse files

debug info in detecttrack and single camera bugs

parent 6c3dd5ce
No related branches found
No related tags found
No related merge requests found
......@@ -34,12 +34,17 @@ class DetectAndTrack : public ftl::operators::Operator {
bool track(const cv::Mat &im);
private:
ftl::codecs::Channel channel_in_;
ftl::codecs::Channel channel_out_;
bool debug_;
int id_max_;
struct Object {
cv::Ptr<cv::Tracker> tracker;
int id;
cv::Rect2d object;
cv::Ptr<cv::Tracker> tracker;
int fail_count;
};
std::vector<Object> tracked_;
......
......@@ -23,7 +23,8 @@ DetectAndTrack::DetectAndTrack(ftl::Configurable *cfg) : ftl::operators::Operato
bool DetectAndTrack::init() {
fname_ = config()->value<string>("filename", "");
debug_ = config()->value<bool>("debug", false);
detect_n_frames_ = config()->value<int>("n_frames", 10);
detect_n_frames_ = detect_n_frames_ < 0.0 ? 0.0 : detect_n_frames_;
......@@ -55,6 +56,8 @@ bool DetectAndTrack::init() {
if (min_size_[1] > max_size_[1]) { min_size_[1] = max_size_[1]; }
channel_in_ = ftl::codecs::Channel::Colour;
channel_out_ = ftl::codecs::Channel::Data;
id_max_ = 0;
bool retval = false;
......@@ -75,12 +78,6 @@ bool DetectAndTrack::init() {
return true;
}
static double distance(Point2i p, Point2i q) {
double a = (p.x-q.x);
double b = (p.y-q.y);
return sqrt(a*a+b*b);
}
static Point2d center(Rect2d obj) {
return Point2d(obj.x+obj.width/2.0, obj.y+obj.height/2.0);
}
......@@ -101,7 +98,7 @@ bool DetectAndTrack::detect(const Mat &im) {
bool found = false;
for (auto &tracker : tracked_) {
if (distance(center(tracker.object), c) < max_distance_) {
if (cv::norm(center(tracker.object)-c) < max_distance_) {
// update? (bounding box can be quite different)
// tracker.object = obj;
found = true;
......@@ -112,7 +109,7 @@ bool DetectAndTrack::detect(const Mat &im) {
if (!found && (tracked_.size() < max_tracked_)) {
cv::Ptr<cv::Tracker> tracker = cv::TrackerKCF::create();
tracker->init(im, obj);
tracked_.push_back({ tracker, obj, 0 });
tracked_.push_back({ id_max_++, obj, tracker, 0 });
}
}
......@@ -171,11 +168,20 @@ bool DetectAndTrack::apply(Frame &in, Frame &out, cudaStream_t stream) {
detect(gray_);
}
// TODO: save results somewhere
std::vector<Rect2d> result;
result.reserve(tracked_.size());
for (auto const &tracked : tracked_) { result.push_back(tracked.object); }
in.create(ftl::codecs::Channel::Data, result);
for (auto const &tracked : tracked_) {
result.push_back(tracked.object);
if (debug_) {
cv::putText(im, "#" + std::to_string(tracked.id),
Point2i(tracked.object.x+5, tracked.object.y+tracked.object.height-5),
cv::FONT_HERSHEY_COMPLEX_SMALL, 1.0, cv::Scalar(0,0,255));
cv::rectangle(im, tracked.object, cv::Scalar(0, 0, 255), 1);
}
}
in.create(channel_out_, result);
// TODO: should be uploaded by operator which requires data on GPU
in.upload(channel_in_);
......
......@@ -81,7 +81,8 @@ void StereoVideoSource::init(const string &file) {
#ifdef HAVE_OPTFLOW
pipeline_input_->append<ftl::operators::NVOpticalFlow>("optflow", Channel::Colour, Channel::Flow);
#endif
pipeline_input_->append<ftl::operators::DetectAndTrack>("facedetection")->set("enabled", false);
pipeline_input_->append<ftl::operators::DetectAndTrack>("facedetection")->value("enabled", false);
pipeline_input_->append<ftl::operators::ColourChannels>("colour");
calib_ = ftl::create<Calibrate>(host_, "calibration", cv::Size(lsrc_->fullWidth(), lsrc_->fullHeight()), stream_);
......@@ -192,7 +193,7 @@ void StereoVideoSource::updateParameters() {
// same for left and right
double baseline = 1.0 / calib_->getQ().at<double>(3,2);
double doff = -calib_->getQ().at<double>(3,3) * baseline;
double doff = -calib_->getQ().at<double>(3,3) * baseline;
double min_depth = this->host_->getConfig().value<double>("min_depth", 0.0);
double max_depth = this->host_->getConfig().value<double>("max_depth", 15.0);
......@@ -245,12 +246,20 @@ bool StereoVideoSource::retrieve() {
auto &frame = frames_[0];
frame.reset();
frame.setOrigin(&state_);
auto &left = frame.create<cv::cuda::GpuMat>(Channel::Left);
auto &right = frame.create<cv::cuda::GpuMat>(Channel::Right);
cv::cuda::GpuMat dummy;
auto &hres = (lsrc_->hasHigherRes()) ? frame.create<cv::cuda::GpuMat>(Channel::ColourHighRes) : dummy;
auto &hres = (lsrc_->hasHigherRes()) ? frame.create<cv::cuda::GpuMat>(Channel::ColourHighRes) : dummy;
lsrc_->get(left, right, hres, calib_, stream2_);
if (lsrc_->isStereo()) {
cv::cuda::GpuMat &left = frame.create<cv::cuda::GpuMat>(Channel::Left);
cv::cuda::GpuMat &right = frame.create<cv::cuda::GpuMat>(Channel::Right);
lsrc_->get(left, right, hres, calib_, stream2_);
}
else {
cv::cuda::GpuMat &left = frame.create<cv::cuda::GpuMat>(Channel::Left);
cv::cuda::GpuMat right;
lsrc_->get(left, right, hres, calib_, stream2_);
}
//LOG(INFO) << "Channel size: " << hres.size();
......@@ -269,20 +278,25 @@ void StereoVideoSource::swap() {
bool StereoVideoSource::compute(int n, int b) {
auto &frame = frames_[1];
if (!frame.hasChannel(Channel::Left) || !frame.hasChannel(Channel::Right)) {
return false;
}
if (lsrc_->isStereo()) {
if (!frame.hasChannel(Channel::Left) ||
!frame.hasChannel(Channel::Right)) {
return false;
}
cv::cuda::GpuMat& left = frame.get<cv::cuda::GpuMat>(Channel::Left);
cv::cuda::GpuMat& right = frame.get<cv::cuda::GpuMat>(Channel::Right);
cv::cuda::GpuMat& left = frame.get<cv::cuda::GpuMat>(Channel::Left);
cv::cuda::GpuMat& right = frame.get<cv::cuda::GpuMat>(Channel::Right);
if (left.empty() || right.empty()) {
return false;
}
if (left.empty() || right.empty()) { return false; }
//stream_.waitForCompletion();
//stream_.waitForCompletion();
}
else {
if (!frame.hasChannel(Channel::Left)) { return false; }
}
host_->notify(timestamp_, frame);
return true;
}
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment