Detecting individual mailboxes in W2 using opencv - python

I have done extensive research and cannot find a combination of techniques that will achieve what I need.

I have a situation where I need to perform OCR on hundreds of W2s in order to extract data for reconciliation. W2s are of very poor quality as they are printed and subsequently scanned back to the computer. The above process is beyond my control; Unfortunately, I have to work with what I have.

I was able to successfully complete this process last year, but I had to overdo it, since timeliness was a serious problem. I did this by manually specifying the coordinates for data extraction, then doing OCR on only these segments one at a time. This year I would like to offer a more dynamic situation in anticipation of a change in coordinates, a change in format, etc.

I included the sample scrubbed W2 below. The idea is that each box on W2 is its own rectangle and extracts data, iterating over all the rectangles. I tried several edge detection methods, but none of them delivered exactly what I needed. I believe that I did not find the right combination of preprocessing. I tried to reflect some Sudoku puzzle detection scenarios.

Example W2

, , python, OpenCV 2 3:

Processed w2

import cv2
import numpy as np

img = cv2.imread(image_path_here)

newx,newy = img.shape[1]/2,img.shape[0]/2
img = cv2.resize(img,(newx,newy))
blur = cv2.GaussianBlur(img, (3,3),5)
ret,thresh1 = cv2.threshold(blur,225,255,cv2.THRESH_BINARY)

gray = cv2.cvtColor(thresh1,cv2.COLOR_BGR2GRAY)

edges = cv2.Canny(gray,50,220,apertureSize = 3)

minLineLength = 20
maxLineGap = 50
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength,maxLineGap)

for x1,y1,x2,y2 in lines[0]:
    cv2.line(img,(x1,y1),(x2,y2),(255,0,255),2)

cv2.imshow('hough',img)
cv2.waitKey(0)
+4
2

, - . :

1: ( , )

2: idk, , , .

3: ( , , ). , , xy, , ( )

, , , , , , , . , , . Processing steps

Final result

, , , (, , ) cuttoff .

blob ( boxgrid). , blobs... , , blob.

- , , ( ).

(, ++, , , ):

#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/highgui/highgui.hpp"
#include <iostream>
#include <stdio.h>
#include <opencv2/opencv.hpp>

using namespace cv;


//Attempts to find the largest connected group of points (assumed to be the interconnected boundaries of the textbox grid)
Mat biggestComponent(Mat targetImage, int connectivity=8)
{
    Mat inputImage;
    inputImage = targetImage.clone();
    Mat finalImage;// = inputImage;
    int greatestBlobSize=0;
    std::cout<<"Top"<<std::endl;
    std::cout<<inputImage.rows<<std::endl;
    std::cout<<inputImage.cols<<std::endl;

    for(int i=0;i<inputImage.cols;i++)
    {
        for(int ii=0;ii<inputImage.rows;ii++)
        {
            if(inputImage.at<uchar>(ii,i)!=0)
            {
                Mat lastImage;
                lastImage = inputImage.clone();
                Rect* boundbox;
                int blobSize = floodFill(inputImage, cv::Point(i,ii), Scalar(0),boundbox,Scalar(200),Scalar(255),connectivity);

                if(greatestBlobSize<blobSize)
                {
                    greatestBlobSize=blobSize;
                    std::cout<<blobSize<<std::endl;
                    Mat tempDif = lastImage-inputImage;
                    finalImage = tempDif.clone();
                }
                //std::cout<<"Loop"<<std::endl;
            }
        }
    }
    return finalImage;
}

//Takes an image that only has outlines of boxes and gets handles for each textbox.
//Returns a vector of points which represent the top left corners of the text boxes.
std::vector<Rect> boxCorners(Mat processedImage, int connectivity=4)
{
    std::vector<Rect> boxHandles;

    Mat inputImage;
    bool outerRegionFlag=true;

    inputImage = processedImage.clone();

    std::cout<<inputImage.rows<<std::endl;
    std::cout<<inputImage.cols<<std::endl;

    for(int i=0;i<inputImage.cols;i++)
    {
        for(int ii=0;ii<inputImage.rows;ii++)
        {
            if(inputImage.at<uchar>(ii,i)==0)
            {
                Mat lastImage;
                lastImage = inputImage.clone();
                Rect boundBox;

                if(outerRegionFlag) //This is to floodfill the outer zone of the page
                {
                    outerRegionFlag=false;
                    floodFill(inputImage, cv::Point(i,ii), Scalar(255),&boundBox,Scalar(0),Scalar(50),connectivity);
                }
                else
                {
                    floodFill(inputImage, cv::Point(i,ii), Scalar(255),&boundBox,Scalar(0),Scalar(50),connectivity);
                    boxHandles.push_back(boundBox);
                }
            }
        }
    }
    return boxHandles;
}

Mat drawTestBoxes(Mat originalImage, std::vector<Rect> boxes)
{
    Mat outImage;
    outImage = originalImage.clone();
    outImage = outImage*0; //really I am just being lazy, this should just be initialized with dimensions

    for(int i=0;i<boxes.size();i++)
    {
        rectangle(outImage,boxes[i],Scalar(255));
    }
    return outImage;
}

int main() {

    Mat image;
    Mat thresholded;
    Mat processed;

    image = imread( "Images/W2.png", 1 );
    Mat channel[3];

    split(image, channel);


    threshold(channel[0],thresholded,150,255,1);

    std::cout<<"Coputing biggest object"<<std::endl;
    processed = biggestComponent(thresholded);

    std::vector<Rect> textBoxes = boxCorners(processed);

    Mat finalBoxes = drawTestBoxes(image,textBoxes);


    namedWindow("Original", WINDOW_AUTOSIZE );
    imshow("Original", channel[0]);

    namedWindow("Thresholded", WINDOW_AUTOSIZE );
    imshow("Thresholded", thresholded);

    namedWindow("Processed", WINDOW_AUTOSIZE );
    imshow("Processed", processed);

    namedWindow("Boxes", WINDOW_AUTOSIZE );
    imshow("Boxes", finalBoxes);



    std::cout<<"waiting for user input"<<std::endl;

    waitKey(0);

    return 0;
}
+2

, , - . ( , ), .

, .

enter image description here

+3

Source: https://habr.com/ru/post/1663790/


All Articles