Posts Tagged ‘Image stitching’

Me and a buddy of mine from class, Thomas LaBruyere (Linkedin profile here) had recently worked on a panorama app on Android using OpenCV stitching module. Here is the GUI of the app-

Panorama app GUI

We built the app on Google Nexus 7  running Android 4.1(Jelly Bean). The basic functionality of the app is as follows –

1)  It has four buttons “Start Video Capture” , “Capture Still Image”, “Stitch” and “View Stitched Images”.

2) The app is started with normal video mode where the video from the front camera is shown. There are two main modes of getting the panorama and stitching it. First one is to use the “Start Video capture” button where a video of couple of seconds is stored where you move  your camera around to capture your surroundings. Then press the “Stitch” button and the stitched panorama image is saved on to the SD card after a couple of seconds. Second  is  “Capture Still Image” button where you  click the button to store images that need to be stitched as panorama while you move the camera around. Once done with capturing the images to be stitched , click on “Stitch” button to complete the panorama stitching.  The stitched output will be saved on to  the SD card. You may change the path in the code accordingly to save it at a different path on the disk. The button ” View Stitched images”  should open up the gallery to look through the stitched images but it has a slight problem that hasn’t been fixed yet so wouldn’t work as expected. You should navigate to the SD card folder manually to see the image.

The algorithm of panorama stitching is implemented through   OpenCV stitching module.  The Android jni interface communicates with the OpenCV C/C++ native  code.

We did not have any previous experience with Android programming or  linking OpenCV . We used the following link and got started –

Getting started with Android and OpenCV

Here is the output of the  panorama that we took in the class .

Panorama output

This project might be old  gives some compilation errors. I don’t have the setup now to fix it. I would highly recommend you to compile the example projects from opencv4android and integrate the code parts from below in them. Nevertheless the project folder can be downloaded just for reference from here.

The two main codes from the project are shown below –

1) OpenCV code to stitch the images. (jni_part.cpp)

#include < jni.h >
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/stitching/stitcher.hpp>

#include < vector >
#include < iostream >
#include < stdio.h >
#include < list >
#include< sstream >
#include< string >

using namespace std;
using namespace cv;

extern "C" {
//JNIEXPORT Mat JNICALL Java_org_opencv_samples_tutorial3_Sample3Native_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba)

JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial3_Sample3Native_FindFeatures(
		JNIEnv*, jobject, jlong im1, jlong im2, jlong im3, jint no_images) {

	vector < Mat > imgs;
	bool try_use_gpu = false;
	// New testing
	Mat& temp1 = *((Mat*) im1);
	Mat& temp2 = *((Mat*) im2);
	Mat& pano = *((Mat*) im3);

	for (int k = 0; k < no_images; ++k) {
		string id;
		ostringstream convert;
		convert << k;
		id = convert.str();
		Mat img = imread("/storage/emulated/0/panoTmpImage/im" + id + ".jpeg");

		imgs.push_back(img);
	}

	Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
	Stitcher::Status status = stitcher.stitch(imgs, pano);

}

}

2) Android code that has main GUI and calls the OpenCV function (Sample3Native.java)

package org.opencv.samples.tutorial3;

import java.io.File;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.highgui.Highgui;

import android.app.Activity;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.os.Environment;
import android.os.Handler;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.Window;
import android.view.WindowManager;
import android.widget.Button;

public class Sample3Native extends Activity implements CvCameraViewListener {
	private static final String TAG = "OCVSample::Activity";

	public static final int VIEW_MODE_RGBA = 0;
	public static final int SAVE_IMAGE_MAT = 1;
	public static final int CAPT_STILL_IM = 2;
	private static int viewMode = VIEW_MODE_RGBA;
//	public static int image_count = 0;
	private MenuItem mStitch;
	private MenuItem mItemCaptureImage;
	private Mat mRgba;
	private Mat mGrayMat;
	private Mat panorama;
	private Mat mtemp;
	private List < Mat > images_to_be_stitched = new ArrayList < Mat >();
	private CameraBridgeViewBase mOpenCvCameraView;
	private long mPrevTime = new Date().getTime();
	private static final int FRAME2GRAB = 10;
	private int mframeNum = 0;
	private static final File tempImageDir = new File(Environment.getExternalStorageDirectory() + File.separator + "panoTmpImage");
	private static final File StitchImageDir = new File(Environment.getExternalStorageDirectory()+ File.separator  + "panoStitchIm");
	private static final String mImageName = "im";
	private static final String mImageExt = ".jpeg";
	private long recordStart = new Date().getTime();
	private static final long MAX_VIDEO_INTERVAL_IN_SECONDS = 3 * 1000; // Convert milliseconds to seconds
	public final Handler mHandler = new Handler();

	// Create runnable for posting
    final Runnable mUpdateResults = new Runnable() {
        public void run() {
            updateResultsInUi();
        }
    };

    private void updateResultsInUi()
    {

    }

	private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
		@Override
		public void onManagerConnected(int status) {
			switch (status) {
			case LoaderCallbackInterface.SUCCESS: {
				Log.i(TAG, "OpenCV loaded successfully");

				// Load native library after(!) OpenCV initialization
				System.loadLibrary("native_sample");

				mOpenCvCameraView.enableView();
			}
				break;
			default: {
				super.onManagerConnected(status);
			}
				break;
			}
		}
	};

	public Sample3Native() {
		Log.i(TAG, "Instantiated new " + this.getClass());
	}

	/** Called when the activity is first created. */
	@Override
	public void onCreate(Bundle savedInstanceState) {
		Log.i(TAG, "called onCreate");
		super.onCreate(savedInstanceState);
		requestWindowFeature(Window.FEATURE_NO_TITLE);
		getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);

		setContentView(R.layout.tutorial3_surface_view);

		final Button btnVidCapt = (Button) findViewById(R.id.btnVidCapt);
		btnVidCapt.setOnClickListener(new View.OnClickListener() {
            public void onClick(View v) {
            	startVidCap();
            }
        });

		final Button btnStitch = (Button) findViewById(R.id.btnStitch);
		btnStitch.setOnClickListener(new View.OnClickListener() {
            public void onClick(View v) {
            	stitchImages();
            }
        });

		final Button btnViewStitchedIm = (Button) findViewById(R.id.btnViewStitchedIm);
		btnViewStitchedIm.setOnClickListener(new View.OnClickListener() {
            public void onClick(View v) {
            	viewStitchImages();
            }
        });

		final Button btnCapStil = (Button) findViewById(R.id.btnCapStil);
		btnCapStil.setOnClickListener(new View.OnClickListener() {
            public void onClick(View v) {
            	captStillImage();
            }
        });
		mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial4_activity_surface_view);
		mOpenCvCameraView.setCvCameraViewListener(this);
	}

	@Override
	public void onPause() {
		if (mOpenCvCameraView != null)
			mOpenCvCameraView.disableView();
		super.onPause();
	}

	@Override
	public void onResume() {
		super.onResume();
		OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this,
				mLoaderCallback);
	}

	public void onDestroy() {
		super.onDestroy();
		if (mOpenCvCameraView != null)
			mOpenCvCameraView.disableView();
	}

	public void onCameraViewStarted(int width, int height) {
		mRgba = new Mat(height, width, CvType.CV_8UC3);
		mGrayMat = new Mat(height, width, CvType.CV_8UC1);
		mtemp = new Mat(height, width, CvType.CV_8UC3);
		panorama = new Mat(height, width, CvType.CV_8UC3);
	}

	public void onCameraViewStopped() {
		mRgba.release();
		mGrayMat.release();
		mtemp.release();
		panorama.release();
	}

	public Mat onCameraFrame(Mat inputFrame) {
		inputFrame.copyTo(mRgba);
		switch (Sample3Native.viewMode) {
		case Sample3Native.VIEW_MODE_RGBA: {
			Core.putText(mRgba, "Video Mode", new Point(10, 50), 3, 1, new Scalar(255, 0, 0, 255), 2);
			// Update start recordtime until starting recording
		}break;
		case Sample3Native.SAVE_IMAGE_MAT: {
			long curTime = new Date().getTime();
			Core.putText(mRgba, "Record Mode", new Point(10, 50), 3, 1, new Scalar(255, 0, 0, 255), 2);
			long timeDiff = curTime - recordStart;
			Log.i("timeDiff", Long.toString(timeDiff));

			if ( timeDiff < MAX_VIDEO_INTERVAL_IN_SECONDS) {
				if ((mframeNum % FRAME2GRAB) == 0) {
					saveImageToArray(inputFrame);
					mframeNum++;
				}
				else
					mframeNum++;
			}
			else
			{
				mframeNum = 0;
				turnOffCapture();
			}
		}break;
		case Sample3Native.CAPT_STILL_IM :
		{
			saveImageToArray(inputFrame);
			Sample3Native.viewMode = Sample3Native.VIEW_MODE_RGBA;
		}
		}
		return mRgba;
	}

	public void startVidCap() {
		if (Sample3Native.viewMode == Sample3Native.VIEW_MODE_RGBA)
		{
			turnOnCapture();
		}
		else if (Sample3Native.viewMode == Sample3Native.SAVE_IMAGE_MAT)
		{
			turnOffCapture();
		}
	}

	private void turnOffCapture()
	{

		Sample3Native.viewMode = Sample3Native.VIEW_MODE_RGBA;
	}

	private void turnOnCapture()
	{

		Sample3Native.viewMode = Sample3Native.SAVE_IMAGE_MAT;
//		startVidCapture.setText("Stop Video Capture");
		images_to_be_stitched.clear();
		recordStart = new Date().getTime();

	}

	public void stitchImages() {
		if(!images_to_be_stitched.isEmpty())
		{
			for (int j = 0; j < images_to_be_stitched.size(); j++) {
				writeImage(images_to_be_stitched.get(j), j);
			}
		Log.i("stitchImages", "Done writing 2 disk. Starting stitching " + images_to_be_stitched.size() + " images");
			FindFeatures(images_to_be_stitched.get(0).getNativeObjAddr(),
					images_to_be_stitched.get(0).getNativeObjAddr(),
					panorama.getNativeObjAddr(), images_to_be_stitched.size());
		Log.i("stitchImages", "Done stitching. Writing panarama");
			writePano(panorama);

		Log.i("stitchImages", "deleting temp files");

			deleteTmpIm();
		}
	}

    public void captStillImage()
    {
    	Sample3Native.viewMode = Sample3Native.CAPT_STILL_IM;

    }

	private String getFullFileName( int num)
	{
		return mImageName + num + mImageExt;
	}

	private void writeImage(Mat image, int imNum)
	{
		writeImage(image, getFullFileName(imNum));
	}

	private void writeImage(Mat image, String fileName) {
		File createDir = tempImageDir;
		if(!createDir.exists())
			createDir.mkdir();
		Highgui.imwrite(tempImageDir+File.separator + fileName, image);
	}

	private void writePano(Mat image)
	{
		Date dateNow = new  Date();
		SimpleDateFormat dateFormat = new SimpleDateFormat("yyyyMMdd_HHmmss");
		if(!StitchImageDir.exists())
			StitchImageDir.mkdir();
		Highgui.imwrite(StitchImageDir.getPath()+ File.separator + "panoStich"+dateFormat.format(dateNow) +mImageExt, image);

	}

	private void deleteTmpIm()
    {
		File curFile;
		for (int j = 0; j < images_to_be_stitched.size(); j++) {
			curFile = new File(getFullFileName(j));
			curFile.delete();
		}
		images_to_be_stitched.clear();
    }

	public void viewStitchImages()
	{

		Intent intent = new Intent(this, GalleryActivity.class);

		startActivity(intent);
	}

	private void saveImageToArray(Mat inputFrame) {
		images_to_be_stitched.add(inputFrame.clone());
	}

	private int FPS() {
		long curTime = new Date().getTime();
		int FPS = (int) (1000 / (curTime - mPrevTime));
		mPrevTime = curTime;
		return FPS;
	}

	@Override
	public boolean onCreateOptionsMenu(Menu menu) {
		return true;

	}

	@Override
	public boolean onOptionsItemSelected(MenuItem item) {
	return true;
	}

	// public native void FindFeatures(List pano_images, Long stitch );
	public native void FindFeatures(long image1, long image2, long image3,
			int count);
}

The code  snippet shown below  is  for simple image stitching of two images in OpenCV . It can easily be modified to stitch multiple images together and create a Panorama.

OpenCV also has a stitching module which helps in achieving this task and which is more robust than this. The code presented here will help in understanding the major steps involved in image stitching algorithm. I am using OpenCV 2.4.3 and Visual studio 2010.  This code is based on the  openCV tutorial  available here.

The main parts of stitching algorithm are –  1) Finding Surf descriptors in both images 2) Matching the surf descriptors between two images . 3) Using  RANSAC to estimate the homography matrix using the matched surf descriptors. 4) Warping the images based on the homography matrix.

Input images :    

Stitched Output:

Code:

#include <stdio.h>
#include <iostream>

#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"

using namespace cv;

void readme();

/** @function main */
int main( int argc, char** argv )
{
 if( argc != 3 )
 { readme(); return -1; }

// Load the images
 Mat image1= imread( argv[2] );
 Mat image2= imread( argv[1] );
 Mat gray_image1;
 Mat gray_image2;
 // Convert to Grayscale
 cvtColor( image1, gray_image1, CV_RGB2GRAY );
 cvtColor( image2, gray_image2, CV_RGB2GRAY );

imshow("first image",image2);
 imshow("second image",image1);

if( !gray_image1.data || !gray_image2.data )
 { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

//-- Step 1: Detect the keypoints using SURF Detector
 int minHessian = 400;

SurfFeatureDetector detector( minHessian );

std::vector< KeyPoint > keypoints_object, keypoints_scene;

detector.detect( gray_image1, keypoints_object );
 detector.detect( gray_image2, keypoints_scene );

//-- Step 2: Calculate descriptors (feature vectors)
 SurfDescriptorExtractor extractor;

Mat descriptors_object, descriptors_scene;

extractor.compute( gray_image1, keypoints_object, descriptors_object );
 extractor.compute( gray_image2, keypoints_scene, descriptors_scene );

//-- Step 3: Matching descriptor vectors using FLANN matcher
 FlannBasedMatcher matcher;
 std::vector< DMatch > matches;
 matcher.match( descriptors_object, descriptors_scene, matches );

double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
 for( int i = 0; i < descriptors_object.rows; i++ )
 { double dist = matches[i].distance;
 if( dist < min_dist ) min_dist = dist;
 if( dist > max_dist ) max_dist = dist;
 }

printf("-- Max dist : %f \n", max_dist );
 printf("-- Min dist : %f \n", min_dist );

//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
 std::vector< DMatch > good_matches;

for( int i = 0; i < descriptors_object.rows; i++ )
 { if( matches[i].distance < 3*min_dist )
 { good_matches.push_back( matches[i]); }
 }
 std::vector< Point2f > obj;
 std::vector< Point2f > scene;

for( int i = 0; i < good_matches.size(); i++ )
 {
 //-- Get the keypoints from the good matches
 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
 }

// Find the Homography Matrix
 Mat H = findHomography( obj, scene, CV_RANSAC );
 // Use the Homography Matrix to warp the images
 cv::Mat result;
 warpPerspective(image1,result,H,cv::Size(image1.cols+image2.cols,image1.rows));
 cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
 image2.copyTo(half);
 imshow( "Result", result );

 waitKey(0);
 return 0;
 }

/** @function readme */
 void readme()
 { std::cout << " Usage: Panorama < img1 > < img2 >" << std::endl; }

Running the code :

Build the code and pass in the two images to be stitched as arguments to the generated exe. Sometimes if the stitching  output is not proper reversing the order of the two images when you pass to the exe would help.