MITK-IGT
IGT Extension of MITK
Loading...
Searching...
No Matches
mitkGrabCutOpenCVImageFilter.cpp
Go to the documentation of this file.
1/*============================================================================
2
3The Medical Imaging Interaction Toolkit (MITK)
4
5Copyright (c) German Cancer Research Center (DKFZ)
6All rights reserved.
7
8Use of this source code is governed by a 3-clause BSD license that can be
9found in the LICENSE file.
10
11============================================================================*/
12
13// mitk headers
15#include "mitkPointSet.h"
16
17#include <opencv2/imgproc.hpp>
18#include <opencv2/imgproc/imgproc_c.h>
19
20// This is a magic number defined in "grabcut.cpp" of OpenCV.
21// GrabCut function crashes if less than this number of model
22// points are given. There must be at least as much model points
23// as components of the Gaussian Mixture Model.
24#define GMM_COMPONENTS_COUNT 5
25
27 : m_ModelPointsDilationSize(0),
28 m_UseOnlyRegionAroundModelPoints(false),
29 m_CurrentProcessImageNum(0),
30 m_InputImageId(AbstractOpenCVImageFilter::INVALID_IMAGE_ID),
31 m_ResultImageId(AbstractOpenCVImageFilter::INVALID_IMAGE_ID),
32 m_StopThread(false)
33{
34 m_Thread = std::thread(&GrabCutOpenCVImageFilter::SegmentationWorker, this);
35}
36
38{
39 // terminate worker thread on destruction
40 m_StopThread = true;
41 m_WorkerBarrier.notify_all();
42 if (m_Thread.joinable())
43 m_Thread.detach();
44}
45
47{
48 if ( image.empty() )
49 {
50 MITK_WARN << "Filtering empty image?";
51 return false;
52 }
53
54 // make sure that the image is an rgb image as needed
55 // by the GrabCut algorithm
56 if (image.type() != CV_8UC3)
57 {
58 cv::Mat tmp = image.clone();
59 cv::cvtColor(tmp, image, CV_GRAY2RGB);
60 }
61
62 // set image as the current input image, guarded by
63 // a mutex as the worker thread reads this imagr
64 m_ImageMutex.lock();
65 m_InputImage = image.clone();
66 m_InputImageId = this->GetCurrentImageId();
67 m_ImageMutex.unlock();
68
69 // wake up the worker thread if there was an image set
70 // and foreground model points are available
71 if ( ! m_ForegroundPoints.empty()) { m_WorkerBarrier.notify_all(); }
72
73 return true;
74}
75
77{
78 m_PointSetsMutex.lock();
79 m_ForegroundPoints = foregroundPoints;
80 m_PointSetsMutex.unlock();
81}
82
84{
85 m_PointSetsMutex.lock();
86 m_BackgroundPoints = backgroundPoints;
87 m_ForegroundPoints = foregroundPoints;
88 m_PointSetsMutex.unlock();
89}
90
92{
93 m_PointSetsMutex.lock();
94 m_ForegroundPoints = this->ConvertMaskToModelPointsList(foregroundMask);
95 m_PointSetsMutex.unlock();
96}
97
98void mitk::GrabCutOpenCVImageFilter::SetModelPoints(cv::Mat foregroundMask, cv::Mat backgroundMask)
99{
100 m_PointSetsMutex.lock();
101 m_ForegroundPoints = this->ConvertMaskToModelPointsList(foregroundMask);
102 m_BackgroundPoints = this->ConvertMaskToModelPointsList(backgroundMask);
103 m_PointSetsMutex.unlock();
104}
105
107{
108 if ( modelPointsDilationSize < 0 )
109 {
110 MITK_ERROR("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter")
111 << "Model points dilation size must not be smaller then zero.";
112 mitkThrow() << "Model points dilation size must not be smaller then zero.";
113 }
114
115 m_ModelPointsDilationSize = modelPointsDilationSize;
116}
117
119{
120 m_UseOnlyRegionAroundModelPoints = true;
121 m_AdditionalWidth = additionalWidth;
122}
123
125{
126 m_UseOnlyRegionAroundModelPoints = false;
127}
128
130{
131 return m_BoundingBox;
132}
133
135{
136 return m_ResultImageId;
137}
138
140{
141 cv::Mat result;
142
143 m_ResultMutex.lock();
144 result = m_ResultMask.clone();
145 m_ResultMutex.unlock();
146
147 return result;
148}
149
150std::vector<mitk::GrabCutOpenCVImageFilter::ModelPointsList> mitk::GrabCutOpenCVImageFilter::GetResultContours()
151{
152 std::vector<std::vector<cv::Point> > cvContours;
153 std::vector<cv::Vec4i> hierarchy;
154 std::vector<mitk::GrabCutOpenCVImageFilter::ModelPointsList> contourPoints;
155
156 cv::Mat resultMask = this->GetResultMask();
157 if (resultMask.empty()) { return contourPoints; }
158
159 cv::findContours(resultMask, cvContours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
160
161 // convert cvContours to vector of ModelPointsLists
162 for ( unsigned int i = 0; i < cvContours.size(); ++i )
163 {
165
166 for ( auto it = cvContours[i].begin();
167 it != cvContours[i].end(); ++it)
168 {
169 itk::Index<2> index;
170 index.SetElement(0, it->x);
171 index.SetElement(1, it->y);
172 curContourPoints.push_back(index);
173 }
174
175 contourPoints.push_back(curContourPoints);
176 }
177
178 return contourPoints;
179}
180
182{
183 cv::Mat mask = this->GetResultMask();
184 if (mask.empty()) { return mitk::GrabCutOpenCVImageFilter::ModelPointsList(); }
185
186 // return empty model point list if given pixel is outside the image borders
187 if (pixelIndex.GetElement(0) < 0 || pixelIndex.GetElement(0) >= mask.size().height
188 || pixelIndex.GetElement(1) < 0 || pixelIndex.GetElement(1) >= mask.size().width)
189 {
190 MITK_WARN("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter")
191 << "Given pixel index ("<< pixelIndex.GetElement(0) << ", " << pixelIndex.GetElement(1)
192 << ") is outside the image (" << mask.size().height << ", " << mask.size().width << ").";
193
195 }
196
197 // create a mask where the segmentation around the given pixel index is
198 // set (done by flood filling the result mask using the pixel as seed)
199 cv::floodFill(mask, cv::Point(pixelIndex.GetElement(0), pixelIndex.GetElement(1)), 5);
200
201 cv::Mat foregroundMask;
202 cv::compare(mask, 5, foregroundMask, cv::CMP_EQ);
203
204 // find the contour on the flood filled image (there can be only one now)
205 std::vector<std::vector<cv::Point> > cvContours;
206 std::vector<cv::Vec4i> hierarchy;
207 cv::findContours(foregroundMask, cvContours, hierarchy, cv::RETR_LIST, cv::CHAIN_APPROX_NONE);
208
209 ModelPointsList contourPoints;
210
211 // convert cvContours to ModelPointsList
212 for ( auto it = cvContours[0].begin();
213 it != cvContours[0].end(); ++it)
214 {
215 itk::Index<2> index;
216 index.SetElement(0, it->x);
217 index.SetElement(1, it->y);
218 contourPoints.push_back(index);
219 }
220
221 return contourPoints;
222}
223
225{
226 // initialize mask with values of propably background
227 cv::Mat mask(m_InputImage.size().height, m_InputImage.size().width, CV_8UC1, cv::GC_PR_BGD);
228
229 // get foreground and background points (guarded by mutex)
230 m_PointSetsMutex.lock();
231 ModelPointsList pointsLists[2] = {ModelPointsList(m_ForegroundPoints), ModelPointsList(m_BackgroundPoints)};
232 m_PointSetsMutex.unlock();
233
234 // define values for foreground and background pixels
235 unsigned int pixelValues[2] = {cv::GC_FGD, cv::GC_BGD};
236
237 for (unsigned int n = 0; n < 2; ++n)
238 {
239 for (auto it = pointsLists[n].begin();
240 it != pointsLists[n].end(); ++it)
241 {
242 // set pixels around current pixel to the same value (size of this
243 // area is specified by ModelPointsDilationSize)
244 for ( int i = -m_ModelPointsDilationSize; i <= m_ModelPointsDilationSize; ++i )
245 {
246 for ( int j = -m_ModelPointsDilationSize; j <= m_ModelPointsDilationSize; ++j)
247 {
248 int x = it->GetElement(1) + i; int y = it->GetElement(0) + j;
249 if ( x >= 0 && y >= 0 && x < mask.cols && y < mask.rows)
250 {
251 mask.at<unsigned char>(x, y) = pixelValues[n];
252 }
253 }
254 }
255 }
256 }
257
258 return mask;
259}
260
262{
263 cv::Mat nonPropablyBackgroundMask, modelPoints;
264 cv::compare(mask, cv::GC_PR_BGD, nonPropablyBackgroundMask, cv::CMP_NE);
265 cv::findNonZero(nonPropablyBackgroundMask, modelPoints);
266
267 if (modelPoints.empty())
268 {
269 MITK_WARN("AbstractOpenCVImageFilter")("GrabCutOpenCVImageFilter")
270 << "Cannot find any foreground points. Returning full image size as bounding rectangle.";
271 return cv::Rect(0, 0, mask.rows, mask.cols);
272 }
273
274 // calculate bounding rect around the model points
275 cv::Rect boundingRect = cv::boundingRect(modelPoints);
276
277 // substract additional width to x and y value (and make sure that they aren't outside the image then)
278 boundingRect.x = static_cast<unsigned int>(boundingRect.x) > m_AdditionalWidth ? boundingRect.x - m_AdditionalWidth : 0;
279 boundingRect.y = static_cast<unsigned int>(boundingRect.y) > m_AdditionalWidth ? boundingRect.y - m_AdditionalWidth : 0;
280
281 // add additional width to width of bounding rect (twice as x value was moved before)
282 // and make sure that the bounding rect will stay inside the image borders)
283 if ( static_cast<unsigned int>(boundingRect.x + boundingRect.width)
284 + 2 * m_AdditionalWidth < static_cast<unsigned int>(mask.size().width) )
285 {
286 boundingRect.width += 2 * m_AdditionalWidth;
287 }
288 else
289 {
290 boundingRect.width = mask.size().width - boundingRect.x - 1;
291 }
292
293 // add additional width to height of bounding rect (twice as y value was moved before)
294 // and make sure that the bounding rect will stay inside the image borders)
295 if ( static_cast<unsigned int>(boundingRect.y + boundingRect.height)
296 + 2 * m_AdditionalWidth < static_cast<unsigned int>(mask.size().height) )
297 {
298 boundingRect.height += 2 * m_AdditionalWidth;
299 }
300 else
301 {
302 boundingRect.height = mask.size().height - boundingRect.y - 1;
303 }
304
305 assert(boundingRect.x + boundingRect.width < mask.size().width);
306 assert(boundingRect.y + boundingRect.height < mask.size().height);
307
308 return boundingRect;
309}
310
311cv::Mat mitk::GrabCutOpenCVImageFilter::RunSegmentation(cv::Mat input, cv::Mat mask)
312{
313 // test if foreground and background models are large enough for GrabCut
314 cv::Mat compareFgResult, compareBgResult;
315 cv::compare(mask, cv::GC_FGD, compareFgResult, cv::CMP_EQ);
316 cv::compare(mask, cv::GC_PR_BGD, compareBgResult, cv::CMP_EQ);
317 if ( cv::countNonZero(compareFgResult) < GMM_COMPONENTS_COUNT
318 || cv::countNonZero(compareBgResult) < GMM_COMPONENTS_COUNT)
319 {
320 // return result mask with no pixels set to foreground
321 return cv::Mat::zeros(mask.size(), mask.type());
322 }
323
324 // do the actual grab cut segmentation (initialized with the mask)
325 cv::Mat bgdModel, fgdModel;
326 cv::grabCut(input, mask, cv::Rect(), bgdModel, fgdModel, 1, cv::GC_INIT_WITH_MASK);
327
328 // set propably foreground pixels to white on result mask
329 cv::Mat result;
330 cv::compare(mask, cv::GC_PR_FGD, result, cv::CMP_EQ);
331
332 // set foreground pixels to white on result mask
333 cv::Mat foregroundMat;
334 cv::compare(mask, cv::GC_FGD, foregroundMat, cv::CMP_EQ);
335 foregroundMat.copyTo(result, foregroundMat);
336
337 return result; // now the result mask can be returned
338}
339
341{
342 cv::Mat points;
343 cv::findNonZero(mask, points);
344
345 // push extracted points into a vector of itk indices
346 ModelPointsList pointsVector;
347 for ( size_t n = 0; n < points.total(); ++n)
348 {
349 itk::Index<2> index;
350 index.SetElement(0, points.at<cv::Point>(n).x);
351 index.SetElement(1, points.at<cv::Point>(n).y);
352 pointsVector.push_back(index);
353 }
354
355 return pointsVector;
356}
357
358void mitk::GrabCutOpenCVImageFilter::SegmentationWorker()
359{
360 std::mutex mutex;
361 std::unique_lock<std::mutex> lock(mutex);
362
363 while (true)
364 {
365 m_WorkerBarrier.wait(lock, [this] { return !m_StopThread; });
366
367 m_ImageMutex.lock();
368 cv::Mat image = m_InputImage.clone();
369 int inputImageId = m_InputImageId;
370 m_ImageMutex.unlock();
371
372 cv::Mat mask = this->GetMaskFromPointSets();
373
374 cv::Mat result;
375 if (m_UseOnlyRegionAroundModelPoints)
376 {
377 result = cv::Mat(mask.rows, mask.cols, mask.type(), 0.0);
378 m_BoundingBox = this->GetBoundingRectFromMask(mask);
379 RunSegmentation(image(m_BoundingBox), mask(m_BoundingBox)).copyTo(result(m_BoundingBox));
380 }
381 else
382 {
383 result = this->RunSegmentation(image, mask);
384 }
385
386 // save result to member attribute
387 m_ResultMutex.lock();
388 m_ResultMask = result;
389 m_ResultImageId = inputImageId;
390 m_ResultMutex.unlock();
391 }
392}
Interface for image filters on OpenCV images.
bool OnFilterImage(cv::Mat &image) override
Implementation of the virtual image filtering method. The input image is copied to a member attribute...
void SetUseFullImage()
The full image is used as input for the segmentation. This method sets the behaviour back to the defa...
std::vector< ModelPointsList > GetResultContours()
Getter for the contours of the current segmentation.
void SetModelPointsDilationSize(int modelPointsDilationSize)
Set a size of which each model point is dilated before image filtering. The more color information of...
std::vector< itk::Index< 2 > > ModelPointsList
List holding image indices of the model points.
cv::Rect GetBoundingRectFromMask(cv::Mat mask)
Creates a bounding box around all pixels which aren't propably background. The bounding box is widene...
cv::Mat RunSegmentation(cv::Mat input, cv::Mat mask)
Performs a GrabCut segmentation of the given input image.
cv::Rect GetRegionAroundModelPoints()
Getter for the rectangle used for the area of segmentation. See mitk::GrabCutOpenCVImageFilter::SetUs...
ModelPointsList GetResultContourWithPixel(itk::Index< 2 > pixelIndex)
Getter for one specific contour of the current segmentation.
int GetResultImageId()
Getter for an ascending id of the current result image. The id will be increased for every segmentati...
void SetModelPoints(ModelPointsList foregroundPoints)
Sets a list of image indices as foreground model points.
cv::Mat GetResultMask()
Getter for the result mask of the current segmentation. The result of this method is not necessarily ...
cv::Mat GetMaskFromPointSets()
Creates an image mask for GrabCut algorithm by using the foreground and background point sets....
void SetUseOnlyRegionAroundModelPoints(unsigned int additionalBorder)
Use only the region around the foreground model points for the segmentation.
ModelPointsList ConvertMaskToModelPointsList(cv::Mat mask)
Creates a list of points from every non-zero pixel of the given mask.
#define GMM_COMPONENTS_COUNT