2014-02-10 148 views
9

我就是觉得轮廓的流,例如一个程序:寻找轮廓“fitLine” OpenCV中

contours

我想找到“设置点”,可以形容这个轮廓说喜欢红线:

enter image description here

黄色的部分是轮廓的时刻,

我尝试使用opencv的fitLine函数,但结果是无意义的,任何想法如何获得轮廓的中间线,这应该代表轮廓的关键方面。顺便说一句**我不是要求代码! **只是一个提示,我该怎么做?

在此先感谢您的帮助

+0

如果两条直线的宽度相似,则可以计算距离变换(反转或轮廓),用轮廓图像掩盖它(因此您将留下内部距离变换),然后查找最大值距离并选择具有相似距离的所有像素。 (或者在距离变换上计算一些好的脊线检测) – Micka

+0

你的意思是你想要线条的骨架。线适合平常意味着别的 –

回答

6

也许尝试用距离这种方法变换和脊检测:

cv::Mat input = cv::imread("fitLine.jpg"); 
cv::Mat gray; 
cv::cvtColor(input,gray,CV_BGR2GRAY); 

cv::Mat mask = gray>100; 
cv::imshow("mask",mask); 

cv::Mat dt; 
cv::distanceTransform(mask,dt,CV_DIST_L1,CV_DIST_MASK_PRECISE); 

cv::imshow("dt", dt/15.0f); 
cv::imwrite("fitLineOut.png",255*dt/15.0f); 


//care: this part doesn't work for diagonal lines, a ridge detection would be better!! 
cv::Mat lines = cv::Mat::zeros(input.rows, input.cols, CV_8UC1); 
//only take the maxDist of each row 
for(unsigned int y=0; y<dt.rows; ++y) 
{ 
    float biggestDist = 0; 
    cv::Point2i biggestDistLoc(0,0); 
    for(unsigned int x=0; x<dt.cols; ++x) 
    { 
     cv::Point2i current(x,y); 
     if(dt.at<float>(current) > biggestDist) 
     { 
      biggestDist = dt.at<float>(current) ; 
      biggestDistLoc = current; 

     } 
    } 
    lines.at<unsigned char>(biggestDistLoc) = 255; 
} 

//and the maxDist of each row 
for(unsigned int x=0; x<dt.cols; ++x) 
{ 
    float biggestDist = 0; 
    cv::Point2i biggestDistLoc(0,0); 
    for(unsigned int y=0; y<dt.rows; ++y) 
    { 
     cv::Point2i current(x,y); 
     if(dt.at<float>(current) > biggestDist) 
     { 
      biggestDist = dt.at<float>(current) ; 
      biggestDistLoc = current; 

     } 
    } 
    lines.at<unsigned char>(biggestDistLoc) = 255; 
} 

cv::imshow("max", lines); 


cv::waitKey(-1); 

的想法是计算轮廓内的距离变换,找到脊。

这是距离变换图像的样子: enter image description here 您可以看到线条中间有一个局部脊线最大值。

然后我使用一个非常简单的方法:只找到每行/列的最大距离。这是非常草率,应该改变一个真正的脊检测或稀疏的方法!

enter image description here

编辑:附加说明:

的想法是找到那些在轮廓的“中间”的所有要点。在数学/图形中,一个物体的某种“中间”的中轴线和它的定义是所有与至少两个轮廓点同时具有相同最小距离的点。

一种近似中轴的方法是计算距离变换。距离变换是一个矩阵,它为每个像素保存到下一个对象点的距离(例如对象的轮廓)(参见http://en.wikipedia.org/wiki/Distance_transform)。这是第一个图像。在这里你可以看到线条中间的点比靠近边界的点稍亮一些,这意味着沿线的最亮点可以被解释为中轴(近似),因为如果你移动远离它(与线方向正交)的距离变得更小,所以峰值是距两边距离接近相等的点。

这样,如果你能在距离变换中找到那些“脊”,就完成了。 脊线检测通常由哈里斯操作员完成(请参阅http://en.wikipedia.org/wiki/Ridge_detection)。

在我发布的快速和肮脏的版本中,我尝试通过仅接受每行和每行中的最大值来检测脊线。对于大多数水平和垂直的脊线来说没问题,但是对于对角线的则会失败。所以也许你真的想要for loops实在ridge detection

+0

感谢您的答案,它的工作,但我仍然不明白为什么?你能解释你在做什么吗?提前致谢 ! – Engine

+0

我已添加一些说明! – Micka

+0

你为什么在15.0分? – Engine

3

有趣的任务:)这里是我的解决方案:

enter image description here

这里是代码:

#include <iostream> 
#include <vector> 
#include <stdio.h> 
#include <stdarg.h> 
#include <set> 
#include "opencv2/opencv.hpp" 
#include "fstream" 
#include "iostream" 
using namespace std; 
using namespace cv; 


int Thinning(unsigned char * ucBinedImg, unsigned char * ucThinnedImage, long lWidth, long lHeight, long lIterativeLimit) 
{ 
    if(ucBinedImg == NULL) 
     return -1; 

    if(ucThinnedImage == NULL) 
     return -2; 

    if(lIterativeLimit == -1) 
     lIterativeLimit = 60000; 

    unsigned char x1, x2, x3, x4, x5, x6, x7, x8, xp; 
    unsigned char g1, g2, g3, g4; 
    unsigned char b1, b2, b3, b4; 
    unsigned char np1, np2, npm; 
    unsigned char *pUp, *pDown, *pImg; 
    long lDeletedPoints = 0; 

    // set border 
    memcpy(ucThinnedImage, ucBinedImg, lWidth*lHeight); 

    for(long it=0; it<lIterativeLimit; it++) 
    { 
     lDeletedPoints = 0; 
     for(long i=1; i<lHeight-1; i++) 
     { 
      // init neighborhood 
      pUp = ucBinedImg + (i-1)*lWidth; 
      pImg = ucBinedImg + i*lWidth ; 
      pDown = ucBinedImg + (i+1)*lWidth ; 

      for(long j=1; j<lWidth-1; j++) 
      { 
       pUp++; 
       pImg++; 
       pDown++; 

       if(!*pImg) 
        continue; 

       x6 = *(pUp-1); 
       x5 = *(pImg-1); 
       x4 = *(pDown-1); 

       x7 = *pUp; 
       xp = *pImg; 
       x3 = *pDown; 

       x8 = *(pUp+1); 
       x1 = *(pImg + 1); 
       x2 = *(pDown + 1); 

       b1 = !x1 && (x2 == 1 || x3 == 1); 
       b2 = !x3 && (x4 == 1 || x5 == 1); 
       b3 = !x5 && (x6 == 1 || x7 == 1); 
       b4 = !x7 && (x8 == 1 || x1 == 1); 

       g1 = (b1 + b2 + b3 + b4) == 1; 

       np1 = x1|| x2; 
       np1 += x3 || x4; 
       np1 += x5 || x6; 
       np1 += x7 || x8; 
       np2 = x2|| x3; 
       np2 += x4 || x5; 
       np2 += x6 || x7; 
       np2 += x8 || x1; 

       npm = np1>np2?np2:np1; 
       g2 = npm>=2 && npm<=3; 

       g3 = (x1 && (x2 || x3 || !x8)) == 0; 
       g4 = (x5 && (x6 || x7 || !x4)) == 0; 

       // first part 
       if(g1 && g2 && g3) 
       { 
        // delete this point 
        ucThinnedImage[lWidth*i + j] = 0; 
        ++lDeletedPoints; 
       } 
      } 

     } 

     //syn 
     memcpy(ucBinedImg, ucThinnedImage, lWidth*lHeight); 

     for(long i=1; i<lHeight-1; i++) 
     { 
      // init neighborhood 
      pUp = ucBinedImg + (i-1)*lWidth; 
      pImg = ucBinedImg + i*lWidth ; 
      pDown = ucBinedImg + (i+1)*lWidth ; 

      for(long j=1; j<lWidth-1; j++) 
      { 
       pUp++; 
       pImg++; 
       pDown++; 

       if(!*pImg) 
        continue; 

       x6 = *(pUp-1); 
       x5 = *(pImg-1); 
       x4 = *(pDown-1); 

       x7 = *pUp; 
       xp = *pImg; 
       x3 = *pDown; 

       x8 = *(pUp+1); 
       x1 = *(pImg + 1); 
       x2 = *(pDown + 1); 

       b1 = !x1 && (x2 == 1 || x3 == 1); 
       b2 = !x3 && (x4 == 1 || x5 == 1); 
       b3 = !x5 && (x6 == 1 || x7 == 1); 
       b4 = !x7 && (x8 == 1 || x1 == 1); 

       g1 = (b1 + b2 + b3 + b4) == 1; 

       np1 = x1|| x2; 
       np1 += x3 || x4; 
       np1 += x5 || x6; 
       np1 += x7 || x8; 
       np2 = x2|| x3; 
       np2 += x4 || x5; 
       np2 += x6 || x7; 
       np2 += x8 || x1; 

       npm = np1>np2?np2:np1; 
       g2 = npm>=2 && npm<=3; 

       g3 = (x1 && (x2 || x3 || !x8)) == 0; 
       g4 = (x5 && (x6 || x7 || !x4)) == 0; 

       // second part 
       if(g1 && g2 && g4) 
       { 
        // delete this point 
        ucThinnedImage[lWidth*i + j] = 0; 
        ++lDeletedPoints; 
       } 

      } 

     } 
     //syn 
     memcpy(ucBinedImg, ucThinnedImage, lWidth*lHeight); 

     // if no points to be deleted 
     if(lDeletedPoints == 0) 
      break; 

    } 

    // clear edge bar 
    for(long i=0; i<lHeight; i++) 
    { 
     for(long j=0; j<lWidth; j++) 
     { 
      if(i<16) 
       ucThinnedImage[i*lWidth+j] = 0; 
      else if(i>=lHeight-16) 
       ucThinnedImage[i*lWidth+j] = 0; 
      else if(j<16) 
       ucThinnedImage[i*lWidth+j] = 0; 
      else if(j>=lWidth-16) 
       ucThinnedImage[i*lWidth+j] = 0; 
     } 
    } 

    return 0; 
} 

void Thinning(Mat& src,Mat& dst,long IterativeLimit=-1) 
{ 
    Mat bin_img=src&1; 
    if(!dst.empty()){dst.release();} 
    dst=Mat::zeros(src.size(),CV_8UC1); 
    Thinning(bin_img.data,dst.data,bin_img.cols,bin_img.rows,IterativeLimit); 
    dst*=255; 
} 

int main(int argc, char* argv[]) 
{ 
    namedWindow("source"); 
    namedWindow("result"); 

    Mat img=imread("raw_image.jpg",0); 
    cv::threshold(img,img,128,255,cv::THRESH_BINARY); 

    int erosion_size=5; 
    Mat element = getStructuringElement(cv::MORPH_ELLIPSE,Size(2*erosion_size + 1, 2*erosion_size+1),Point(erosion_size, erosion_size)); 

    cv::dilate(img,img,element); 

    Mat thinned; 
    Thinning(img,thinned); 

    vector<Vec2f> lines; 
    HoughLines(thinned, lines, 0.5, CV_PI/360, 50, 0, 0); 

    float hist_theta[2]={0,0}; 
    float hist_rho[2]={0,0}; 
    float n[2]={0,0}; 
    for(size_t i = 0; i < lines.size(); i++) 
    { 
     float rho = lines[i][0], theta = lines[i][1]; 
     if(fabs(theta-CV_PI/2)<CV_PI/4) 
     { 
      hist_theta[0]+=theta; 
      hist_rho[0]+=rho; 
      n[0]+=1; 
     }else 
     { 
      hist_theta[1]+=theta; 
      hist_rho[1]+=rho; 
      n[1]+=1; 
     } 
    } 



    for(size_t i = 0; i < 2; i++) 
    { 
     float rho = hist_rho[i]/n[i], theta = hist_theta[i]/n[i]; 
     Point pt1, pt2; 
     double a = cos(theta), b = sin(theta); 
     double x0 = a*rho, y0 = b*rho; 
     pt1.x = cvRound(x0 + 1000*(-b)); 
     pt1.y = cvRound(y0 + 1000*(a)); 
     pt2.x = cvRound(x0 - 1000*(-b)); 
     pt2.y = cvRound(y0 - 1000*(a)); 
     line(thinned, pt1, pt2, Scalar(255,255,255), 1, CV_AA); 
    } 

    imshow("source",img); 
    imshow("result",thinned); 
    cv::waitKey(0); 
} 

下面是在这个源的黑客它使用两个1D直方图进行后处理。在现实生活中,它应该使用二维直方图进行紧密线条平均。