Barry Thomas

Java Source Code

These snippets of code are from my demo Android app for machine vision, using OpenCV v2.4.4. The snippets on this site are intended to help you see how easy it is to use many of the features of OpenCV. I assume you have installed the OpenCV v2.4.4 library and the samples which accompany it. These snippets should drop reasonably neatly into the sample apps' code from the OpenCV 2.4.4 sdk.

Feel free to use this code for any purpose. I don't guarantee it works. The code isn't written for speed or efficiency, just so it works and you can see what is going on. Feel free to improve it.

Other advice: Get a copy of "Learning OpenCV". Read it. Read it again because you probably skipped a lot of it first time through. It does get easier the more you do. Find lots of snippets of test code here. Use StackOverflow. Read Marcos Nieto's blog.

Canny Edge Detection

case VIEW_MODE_CANNY:
        	
    Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_RGBA2GRAY);

    // doing a gaussian blur prevents getting a lot of false hits
    Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 2, 2);
  
    iCannyLowerThreshold = 35;
    iCannyUpperThreshold = 75;
                
    Imgproc.Canny(mGray, mIntermediateMat, iCannyLowerThreshold, iCannyUpperThreshold);

    Imgproc.cvtColor(mIntermediateMat, mRgba, Imgproc.COLOR_GRAY2BGRA, 4);

    if (bDisplayTitle)
        ShowTitle ("Canny Edges", 1);

    break;

Hough Circles

case VIEW_MODE_HOUGH_CIRCLES:

    // the lower this figure the more spurious circles you get
    // 50 looks good in CANNY, but 100 is better when converting that into Hough circles
    iCannyUpperThreshold = 100;
            	
    Imgproc.HoughCircles(mGray, mIntermediateMat, Imgproc.CV_HOUGH_GRADIENT, 2.0, mGray.rows() / 8, 
                iCannyUpperThreshold, iAccumulator, iMinRadius, iMaxRadius);
                
    if (mIntermediateMat.cols() > 0)
        for (int x = 0; x < Math.min(mIntermediateMat.cols(), 10); x++) 
            {
            double vCircle[] = mIntermediateMat.get(0,x);

            if (vCircle == null)
                break;

            pt = new Point(Math.round(vCircle[0]), Math.round(vCircle[1]));
            radius = (int)Math.round(vCircle[2]);
            // draw the found circle
            Core.circle(mRgba, pt, radius, colorRed, iLineThickness);
                        
            // draw a cross on the centre of the circle
            DrawCross (mRgba, pt);
            }
                
        if (bDisplayTitle)
            ShowTitle ("Hough Circles", 1);
        	
    break;

Hough Lines

I limit the results to 50 lines, otherwise on very busy textured surfaces you can get a LOT of spurious results.

case VIEW_MODE_HOUGH_LINES:

    // the lower this figure the more spurious circles you get
    // 50 upper looks good in CANNY, but 75 is better when converting that into Hough circles
    iCannyLowerThreshold = 45;
    iCannyUpperThreshold = 75;
                
    Imgproc.Canny(mGray, mGray, iCannyLowerThreshold, iCannyUpperThreshold);

    Imgproc.HoughLinesP(mGray, lines, 1, Math.PI/180, iHoughLinesThreshold, iHoughLinesMinLineSize, iHoughLinesGap);
                
    for (int x = 0; x < Math.min(lines.cols(), 50); x++) 
        {
        double[] vec = lines.get(0, x);
                    
        if (vec == null)
            break;
                    
        double x1 = vec[0], y1 = vec[1], x2 = vec[2], y2 = vec[3];
        Point start = new Point(x1, y1);
        Point end = new Point(x2, y2);

        Core.line(mRgba, start, end, colorRed, 3);
        }
     
    if (bDisplayTitle)
        ShowTitle ("Hough Lines", 1);

    break;        

Colour Searching, with Bounding Contour

Convert to HSV, get the colours in range (here I'm looking for a nice bright yellow), erode the result a little to simplify it and reduce the resulting data set, get the contour, draw the contour (polygon).

    case VIEW_MODE_COLCONTOUR:
        	
    // Convert the image into an HSV image
        	
    Imgproc.cvtColor(mRgba, mHSVMat, Imgproc.COLOR_RGB2HSV, 3);
            
    Core.inRange(mHSVMat, new Scalar(byteColourTrackCentreHue[0] - 10, 100, 100), 
        		     new Scalar(byteColourTrackCentreHue[0] + 10, 255, 255), mHSVMat);
            
    // Here i'm only using the external contours and by
    // eroding we make the draw a teeny bit faster and the result a lot smoother
    // on the rough edges where the colour fades out of range by losing a lot
    // of the little spiky corners.

    Imgproc.erode(mHSVMat, mHSVMat, mErodeKernel);
    contours.clear();

    Imgproc.findContours(mHSVMat, contours, mContours, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);

    for (x = 0; x < contours.size(); x++) {
         	d = Imgproc.contourArea (contours.get(x));

    // get an approximation of the contour (last but one param is the min required
    // distance between the real points and the new approximation (in pixels)
    		
    // contours is a List
    // so contours.get(x) is a single MatOfPoint
    // but to use approxPolyDP we need to pass a MatOfPoint2f
    // so we need to do a conversion 
    		
    contours.get(x).convertTo(mMOP2f1, CvType.CV_32FC2);
        		
    if (d > iContourAreaMin) {

        Imgproc.approxPolyDP(mMOP2f1, mMOP2f2, 2, true);
                    
        // convert back to MatOfPoint and put it back in the list
        mMOP2f2.convertTo(contours.get(x), CvType.CV_32S);
            		
        // draw the contour itself
        Imgproc.drawContours(mRgba, contours, x, colorRed, iLineThickness);
        }
    }

    if (bDisplayTitle)
       	ShowTitle ("In-range + Contours", 1);

    break;

Colour Searching, with Quadrilateral

As above, except we simplify the boundary of the contour by allowing a much bigger gap between the actual edge of the block of colour and the bounding contour (15 pixels). If after that the resulting polygon has four corners we know we have a quadrilateral. In addition to drawing the contour, we draw in the diagonals of the quad.

case VIEW_MODE_YELLOW_QUAD_DETECT:

    // Convert the image into an HSV image

    Imgproc.cvtColor(mRgba, mHSVMat, Imgproc.COLOR_RGB2HSV, 3);
            
    Core.inRange(mHSVMat, new Scalar(byteColourTrackCentreHue[0] - 12, 100, 100), 
        		     new Scalar(byteColourTrackCentreHue[0] + 12, 255, 255), mHSVMat);

    contours.clear();
            
    Imgproc.findContours(mHSVMat, contours, mContours, Imgproc.RETR_EXTERNAL, Imgproc.CHAIN_APPROX_SIMPLE);
            
    for (x = 0; x < contours.size(); x++) {
        d = Imgproc.contourArea (contours.get(x));

        if (d > iContourAreaMin) {
            // get an approximation of the contour (last but one param is the min required
            // distance between the real points and the new approximation (in pixels)
            		
            contours.get(x).convertTo(mMOP2f1, CvType.CV_32FC2);
            		
            Imgproc.approxPolyDP(mMOP2f1, mMOP2f2, 15, true);
                    
            // convert back to MatOfPoint and put it back in the list
            mMOP2f2.convertTo(contours.get(x), CvType.CV_32S);
            		
            if (contours.get(x).rows() == 4) {
                        
            Converters.Mat_to_vector_Point2f(contours.get(x), pts);
            			
            Imgproc.drawContours(mRgba, contours, x, colorRed, iLineThickness);

            Core.line(mRgba, pts.get(0), pts.get(2), colorRed, iLineThickness);
            Core.line(mRgba, pts.get(1), pts.get(3), colorRed, iLineThickness);
            }
        }
    }

    if (bDisplayTitle)
        ShowTitle ("Colour quadrilateral", 1);

    break;

Face Detection

This section uses some code from the OpenCV sample "face-detection" and uses the LBP cascade file

try {
    // DO FACE CASCADE SETUP
                    	
    Context context = getApplicationContext();
    InputStream is3 = context.getResources().openRawResource(R.raw.lbpcascade_frontalface);
    File cascadeDir = context.getDir("cascade", Context.MODE_PRIVATE);
    File cascadeFile = new File(cascadeDir, "lbpcascade_frontalface.xml");

    FileOutputStream os = new FileOutputStream(cascadeFile);

    byte[] buffer = new byte[4096];
    int bytesRead;
                    
    while ((bytesRead = is3.read(buffer)) != -1) {
        os.write(buffer, 0, bytesRead);
        }
                    
    is3.close();
    os.close();

    mCascade = new CascadeClassifier(cascadeFile.getAbsolutePath());
                                    
    if (mCascade.empty()) {
        Log.d("OpenCV", "Failed to load cascade classifier");
        mCascade = null;
        }  

    cascadeFile.delete();
    cascadeDir.delete();

    } 
catch (IOException e) {
    e.printStackTrace();
    Log.d("OpenCV", "Failed to load cascade. Exception thrown: " + e);
    }

I first draw a rectangle around each face, and check to see if the current face is larger than the rest so far. I then copy the largest face to the top left of the screen. This just shows how, having found a face, you can take just that part of the image and do something else with it.

case VIEW_MODE_FACEDETECT:

    // Convert the image into a gray image

    Imgproc.cvtColor(mRgba, mGray, Imgproc.COLOR_RGBA2GRAY);
            
    if (mCascade != null) {
        int height = mGray.rows();
        int faceSize = Math.round(height * 0.2f);
                
        mCascade.detectMultiScale(mGray, faces, 1.1, 2, 2, new Size(faceSize, faceSize), new Size());

        for (Rect r : faces.toArray()) {
                    
            // draw the rectangle itself
            Core.rectangle(mRgba, r.tl(), r.br(), colorRed, 3);
            if (iMaxFaceHeight < r.height) {
                iMaxFaceHeight = r.height;
                iMaxFaceHeightIndex++;
                }
            }

        if (iMaxFaceHeight > 0) {
            // we have at least one face
            Rect[] facesArray = faces.toArray();
                    
            rect = facesArray[iMaxFaceHeightIndex];

            // get the submat of the rect containing the face
            mROIMat = mRgba.submat(rect);
            // resize it to the dest rect size (100x100)
            Imgproc.resize(mROIMat, mFaceResized, sSize);
            // copy it to dest rect in main image
                    
            mFaceResized.copyTo(mFaceDest);
            mROIMat.release();
            }
        }
    }

if (bDisplayTitle)
    ShowTitle ("Face Detection", 1);
            
break;

Good Features to Track

This is the first stage of the process of tracking objects - first find good (ie strong) features to track and then track existing objects through subsequent frames.

case VIEW_MODE_GFTT:
    // DON'T do a gaussian blur here, it makes the results poorer and
    // takes 0.5 off the fps rate
    // Imgproc.GaussianBlur(mGray, mGray, new Size(5, 5), 2, 2);

    Imgproc.goodFeaturesToTrack(mGray, MOPcorners, 50, 0.01, 30);
            
    y = MOPcorners.rows();
        	
    corners = MOPcorners.toList();
        	
    for (int x = 0; x < y; x++) 
        // Core.circle(mRgba, corners.get(x), 8, colorRed, iLineThickness - 1);
        DrawCross (mRgba, corners.get(x));

    if (bDisplayTitle)
        ShowTitle ("Track Features", 1);
        	
    break;

Optical Flow

Compares "good features to track" from one frame to the next. Currently limited to 40 features or fewer (poor quality features are ignored). Read more here.


case VIEW_MODE_OPFLOW:

    if (mMOP2fptsPrev.rows() == 0) {
        // first time through the loop so we need prev and this mats
        // plus prev points
        // get this mat
        Imgproc.cvtColor(mRgba, matOpFlowThis, Imgproc.COLOR_RGBA2GRAY);

        // copy that to prev mat
        matOpFlowThis.copyTo(matOpFlowPrev);
        		
        // get prev corners
        Imgproc.goodFeaturesToTrack(matOpFlowPrev, MOPcorners, iGFFTMax, 0.01, 20);
        mMOP2fptsPrev.fromArray(MOPcorners.toArray());
        		
        // get safe copy of this corners
        mMOP2fptsPrev.copyTo(mMOP2fptsSafe);
        }
    else
        {
        // we've been through before so
        // this mat is valid. Copy it to prev mat
        matOpFlowThis.copyTo(matOpFlowPrev);

        // get this mat
        Imgproc.cvtColor(mRgba, matOpFlowThis, Imgproc.COLOR_RGBA2GRAY);
        		
        // get the corners for this mat
        Imgproc.goodFeaturesToTrack(matOpFlowThis, MOPcorners, iGFFTMax, 0.01, 20);
        mMOP2fptsThis.fromArray(MOPcorners.toArray());
        		
        // retrieve the corners from the prev mat
        // (saves calculating them again)
        mMOP2fptsSafe.copyTo(mMOP2fptsPrev);
        		
        // and save this corners for next time through
        		
        mMOP2fptsThis.copyTo(mMOP2fptsSafe);
        }
        	
    Video.calcOpticalFlowPyrLK(matOpFlowPrev, matOpFlowThis, mMOP2fptsPrev, mMOP2fptsThis, mMOBStatus, mMOFerr);

    cornersPrev = mMOP2fptsPrev.toList();
    cornersThis = mMOP2fptsThis.toList();
    byteStatus = mMOBStatus.toList();
            
    for (x = 0; x < byteStatus.size() - 1; x++) {
        if (byteStatus.get(x) == 1) {
        pt = cornersThis.get(x);
        pt2 = cornersPrev.get(x);
                    
        Core.circle(mRgba, pt, 5, colorRed, iLineThickness - 1);
        Core.line(mRgba, pt, pt2, colorRed, iLineThickness);
        }
    }
            
            if (bDisplayTitle)
            	ShowTitle ("Optical Flow", 1, colorGreen);

            break;