Tools:
It is assumed that OpenCV is already installed.

The goal:
To make a perspective transformation on an image using homography and overlay it onto the other image.

Input :
Two image files – “main image” and “logo image”.
main

logo

Output:
Overlayed images

out1

out2

 

Algorithm
The “logo image” is overlayed onto the main image. We need a homography matrix to transform the image points of “logo image” before it is overlayed. To calculate a homography matrix we need 4 correspomding pair of points from “logo image” and “main image”. The 4 points for “logo image” are taken as the four corners of the image where as the 4 points for “main image” are chosen by the user. Remember each of the 4 points in each image is of the form (x,y).

Once homography matrix is calculated the “logo image” is perspectively projected onto the “main image”. In this implementation the pixels of “logo image” replace the pixels of “main image”. Users can change this to do any other kind of blending.

Running the code:

Github code
Assume that the executable generated is “homography”. The images files(main.jpg,logo.jpg) are passed as two arguments. Note that the image files are present in the Github repository.

$ ./homography main.jpg logo.jpg
The video link with demo :

 Code:

#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
#include "opencv2/imgproc/imgproc.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include <iostream>
#include <limits>
#include <numeric>
using namespace cv;
using namespace std;

// We need 4 corresponding 2D points(x,y) to calculate homography.
vector<Point2f> left_image;      // Stores 4 points(x,y) of the logo image. Here the four points are 4 corners of image.
vector<Point2f> right_image;    // stores 4 points that the user clicks(mouse left click) in the main image.

// Image containers for main and logo image
Mat imageMain;
Mat imageLogo;

// Function to add main image and transformed logo image and show final output.
// Icon image replaces the pixels of main image in this implementation.
void showFinal(Mat src1,Mat src2)
{

    Mat gray,gray_inv,src1final,src2final;
    cvtColor(src2,gray,CV_BGR2GRAY);
    threshold(gray,gray,0,255,CV_THRESH_BINARY);
    //adaptiveThreshold(gray,gray,255,ADAPTIVE_THRESH_MEAN_C,THRESH_BINARY,5,4);
    bitwise_not ( gray, gray_inv );
    src1.copyTo(src1final,gray_inv);
    src2.copyTo(src2final,gray);
    Mat finalImage = src1final+src2final;
    namedWindow( "output", WINDOW_AUTOSIZE );
    imshow("output",finalImage);
    cvWaitKey(0);

}

// Here we get four points from the user with left mouse clicks.
// On 5th click we output the overlayed image.
void on_mouse( int e, int x, int y, int d, void *ptr )
{
    if (e == EVENT_LBUTTONDOWN )
    {
        if(right_image.size() < 4 )
        {

            right_image.push_back(Point2f(float(x),float(y)));
            cout << x << " "<< y <<endl;
        }
        else
        {
            cout << " Calculating Homography " <<endl;
            // Deactivate callback
            cv::setMouseCallback("Display window", NULL, NULL);
            // once we get 4 corresponding points in both images calculate homography matrix
            Mat H = findHomography(  left_image,right_image,0 );
            Mat logoWarped;
            // Warp the logo image to change its perspective
            warpPerspective(imageLogo,logoWarped,H,imageMain.size() );
            showFinal(imageMain,logoWarped);

        }

    }
}


int main( int argc, char** argv )
{
//  We need tow argumemts. "Main image" and "logo image"
    if( argc != 3)
    {
        cout <<" Usage: error" << endl;
        return -1;
    }


// Load images from arguments passed.
    imageMain = imread(argv[1], CV_LOAD_IMAGE_COLOR);
    imageLogo = imread(argv[2], CV_LOAD_IMAGE_COLOR);
// Push the 4 corners of the logo image as the 4 points for correspondence to calculate homography.
    left_image.push_back(Point2f(float(0),float(0)));
    left_image.push_back(Point2f(float(0),float(imageLogo.rows)));
    left_image.push_back(Point2f(float(imageLogo.cols),float(imageLogo.rows)));
    left_image.push_back(Point2f(float(imageLogo.cols),float(0)));



    namedWindow( "Display window", WINDOW_AUTOSIZE );// Create a window for display.
    imshow( "Display window", imageMain );


    setMouseCallback("Display window",on_mouse, NULL );


//  Press "Escape button" to exit
    while(1)
    {
        int key=cvWaitKey(10);
        if(key==27) break;
    }


    return 0;
}

Me and a buddy of mine from class, Thomas LaBruyere (Linkedin profile here) had recently worked on a panorama app on Android using OpenCV stitching module. Here is the GUI of the app-

Panorama app GUI

We built the app on Google Nexus 7  running Android 4.1(Jelly Bean). The basic functionality of the app is as follows –

1)  It has four buttons “Start Video Capture” , “Capture Still Image”, “Stitch” and “View Stitched Images”.

2) The app is started with normal video mode where the video from the front camera is shown. There are two main modes of getting the panorama and stitching it. First one is to use the “Start Video capture” button where a video of couple of seconds is stored where you move  your camera around to capture your surroundings. Then press the “Stitch” button and the stitched panorama image is saved on to the SD card after a couple of seconds. Second  is  “Capture Still Image” button where you  click the button to store images that need to be stitched as panorama while you move the camera around. Once done with capturing the images to be stitched , click on “Stitch” button to complete the panorama stitching.  The stitched output will be saved on to  the SD card. You may change the path in the code accordingly to save it at a different path on the disk. The button ” View Stitched images”  should open up the gallery to look through the stitched images but it has a slight problem that hasn’t been fixed yet so wouldn’t work as expected. You should navigate to the SD card folder manually to see the image.

The algorithm of panorama stitching is implemented through   OpenCV stitching module.  The Android jni interface communicates with the OpenCV C/C++ native  code.

We did not have any previous experience with Android programming or  linking OpenCV . We used the following link and got started –

Getting started with Android and OpenCV

Here is the output of the  panorama that we took in the class .

Panorama output

This project might be old  gives some compilation errors. I don’t have the setup now to fix it. I would highly recommend you to compile the example projects from opencv4android and integrate the code parts from below in them. Nevertheless the project folder can be downloaded just for reference from here.

The two main codes from the project are shown below –

1) OpenCV code to stitch the images. (jni_part.cpp)

#include < jni.h >
#include <opencv2/core/core.hpp>
#include <opencv2/features2d/features2d.hpp>
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/calib3d/calib3d.hpp>
#include <opencv2/stitching/stitcher.hpp>

#include < vector >
#include < iostream >
#include < stdio.h >
#include < list >
#include< sstream >
#include< string >

using namespace std;
using namespace cv;

extern "C" {
//JNIEXPORT Mat JNICALL Java_org_opencv_samples_tutorial3_Sample3Native_FindFeatures(JNIEnv*, jobject, jlong addrGray, jlong addrRgba)

JNIEXPORT void JNICALL Java_org_opencv_samples_tutorial3_Sample3Native_FindFeatures(
		JNIEnv*, jobject, jlong im1, jlong im2, jlong im3, jint no_images) {

	vector < Mat > imgs;
	bool try_use_gpu = false;
	// New testing
	Mat& temp1 = *((Mat*) im1);
	Mat& temp2 = *((Mat*) im2);
	Mat& pano = *((Mat*) im3);

	for (int k = 0; k < no_images; ++k) {
		string id;
		ostringstream convert;
		convert << k;
		id = convert.str();
		Mat img = imread("/storage/emulated/0/panoTmpImage/im" + id + ".jpeg");

		imgs.push_back(img);
	}

	Stitcher stitcher = Stitcher::createDefault(try_use_gpu);
	Stitcher::Status status = stitcher.stitch(imgs, pano);

}

}

2) Android code that has main GUI and calls the OpenCV function (Sample3Native.java)

package org.opencv.samples.tutorial3;

import java.io.File;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Date;
import java.util.List;

import org.opencv.android.BaseLoaderCallback;
import org.opencv.android.CameraBridgeViewBase;
import org.opencv.android.CameraBridgeViewBase.CvCameraViewListener;
import org.opencv.android.LoaderCallbackInterface;
import org.opencv.android.OpenCVLoader;
import org.opencv.core.Core;
import org.opencv.core.CvType;
import org.opencv.core.Mat;
import org.opencv.core.Point;
import org.opencv.core.Scalar;
import org.opencv.highgui.Highgui;

import android.app.Activity;
import android.content.Intent;
import android.net.Uri;
import android.os.Bundle;
import android.os.Environment;
import android.os.Handler;
import android.util.Log;
import android.view.Menu;
import android.view.MenuItem;
import android.view.View;
import android.view.Window;
import android.view.WindowManager;
import android.widget.Button;

public class Sample3Native extends Activity implements CvCameraViewListener {
	private static final String TAG = "OCVSample::Activity";

	public static final int VIEW_MODE_RGBA = 0;
	public static final int SAVE_IMAGE_MAT = 1;
	public static final int CAPT_STILL_IM = 2;
	private static int viewMode = VIEW_MODE_RGBA;
//	public static int image_count = 0;
	private MenuItem mStitch;
	private MenuItem mItemCaptureImage;
	private Mat mRgba;
	private Mat mGrayMat;
	private Mat panorama;
	private Mat mtemp;
	private List < Mat > images_to_be_stitched = new ArrayList < Mat >();
	private CameraBridgeViewBase mOpenCvCameraView;
	private long mPrevTime = new Date().getTime();
	private static final int FRAME2GRAB = 10;
	private int mframeNum = 0;
	private static final File tempImageDir = new File(Environment.getExternalStorageDirectory() + File.separator + "panoTmpImage");
	private static final File StitchImageDir = new File(Environment.getExternalStorageDirectory()+ File.separator  + "panoStitchIm");
	private static final String mImageName = "im";
	private static final String mImageExt = ".jpeg";
	private long recordStart = new Date().getTime();
	private static final long MAX_VIDEO_INTERVAL_IN_SECONDS = 3 * 1000; // Convert milliseconds to seconds
	public final Handler mHandler = new Handler();

	// Create runnable for posting
    final Runnable mUpdateResults = new Runnable() {
        public void run() {
            updateResultsInUi();
        }
    };

    private void updateResultsInUi()
    {

    }

	private BaseLoaderCallback mLoaderCallback = new BaseLoaderCallback(this) {
		@Override
		public void onManagerConnected(int status) {
			switch (status) {
			case LoaderCallbackInterface.SUCCESS: {
				Log.i(TAG, "OpenCV loaded successfully");

				// Load native library after(!) OpenCV initialization
				System.loadLibrary("native_sample");

				mOpenCvCameraView.enableView();
			}
				break;
			default: {
				super.onManagerConnected(status);
			}
				break;
			}
		}
	};

	public Sample3Native() {
		Log.i(TAG, "Instantiated new " + this.getClass());
	}

	/** Called when the activity is first created. */
	@Override
	public void onCreate(Bundle savedInstanceState) {
		Log.i(TAG, "called onCreate");
		super.onCreate(savedInstanceState);
		requestWindowFeature(Window.FEATURE_NO_TITLE);
		getWindow().addFlags(WindowManager.LayoutParams.FLAG_KEEP_SCREEN_ON);

		setContentView(R.layout.tutorial3_surface_view);

		final Button btnVidCapt = (Button) findViewById(R.id.btnVidCapt);
		btnVidCapt.setOnClickListener(new View.OnClickListener() {
            public void onClick(View v) {
            	startVidCap();
            }
        });

		final Button btnStitch = (Button) findViewById(R.id.btnStitch);
		btnStitch.setOnClickListener(new View.OnClickListener() {
            public void onClick(View v) {
            	stitchImages();
            }
        });

		final Button btnViewStitchedIm = (Button) findViewById(R.id.btnViewStitchedIm);
		btnViewStitchedIm.setOnClickListener(new View.OnClickListener() {
            public void onClick(View v) {
            	viewStitchImages();
            }
        });

		final Button btnCapStil = (Button) findViewById(R.id.btnCapStil);
		btnCapStil.setOnClickListener(new View.OnClickListener() {
            public void onClick(View v) {
            	captStillImage();
            }
        });
		mOpenCvCameraView = (CameraBridgeViewBase) findViewById(R.id.tutorial4_activity_surface_view);
		mOpenCvCameraView.setCvCameraViewListener(this);
	}

	@Override
	public void onPause() {
		if (mOpenCvCameraView != null)
			mOpenCvCameraView.disableView();
		super.onPause();
	}

	@Override
	public void onResume() {
		super.onResume();
		OpenCVLoader.initAsync(OpenCVLoader.OPENCV_VERSION_2_4_3, this,
				mLoaderCallback);
	}

	public void onDestroy() {
		super.onDestroy();
		if (mOpenCvCameraView != null)
			mOpenCvCameraView.disableView();
	}

	public void onCameraViewStarted(int width, int height) {
		mRgba = new Mat(height, width, CvType.CV_8UC3);
		mGrayMat = new Mat(height, width, CvType.CV_8UC1);
		mtemp = new Mat(height, width, CvType.CV_8UC3);
		panorama = new Mat(height, width, CvType.CV_8UC3);
	}

	public void onCameraViewStopped() {
		mRgba.release();
		mGrayMat.release();
		mtemp.release();
		panorama.release();
	}

	public Mat onCameraFrame(Mat inputFrame) {
		inputFrame.copyTo(mRgba);
		switch (Sample3Native.viewMode) {
		case Sample3Native.VIEW_MODE_RGBA: {
			Core.putText(mRgba, "Video Mode", new Point(10, 50), 3, 1, new Scalar(255, 0, 0, 255), 2);
			// Update start recordtime until starting recording
		}break;
		case Sample3Native.SAVE_IMAGE_MAT: {
			long curTime = new Date().getTime();
			Core.putText(mRgba, "Record Mode", new Point(10, 50), 3, 1, new Scalar(255, 0, 0, 255), 2);
			long timeDiff = curTime - recordStart;
			Log.i("timeDiff", Long.toString(timeDiff));

			if ( timeDiff < MAX_VIDEO_INTERVAL_IN_SECONDS) {
				if ((mframeNum % FRAME2GRAB) == 0) {
					saveImageToArray(inputFrame);
					mframeNum++;
				}
				else
					mframeNum++;
			}
			else
			{
				mframeNum = 0;
				turnOffCapture();
			}
		}break;
		case Sample3Native.CAPT_STILL_IM :
		{
			saveImageToArray(inputFrame);
			Sample3Native.viewMode = Sample3Native.VIEW_MODE_RGBA;
		}
		}
		return mRgba;
	}

	public void startVidCap() {
		if (Sample3Native.viewMode == Sample3Native.VIEW_MODE_RGBA)
		{
			turnOnCapture();
		}
		else if (Sample3Native.viewMode == Sample3Native.SAVE_IMAGE_MAT)
		{
			turnOffCapture();
		}
	}

	private void turnOffCapture()
	{

		Sample3Native.viewMode = Sample3Native.VIEW_MODE_RGBA;
	}

	private void turnOnCapture()
	{

		Sample3Native.viewMode = Sample3Native.SAVE_IMAGE_MAT;
//		startVidCapture.setText("Stop Video Capture");
		images_to_be_stitched.clear();
		recordStart = new Date().getTime();

	}

	public void stitchImages() {
		if(!images_to_be_stitched.isEmpty())
		{
			for (int j = 0; j < images_to_be_stitched.size(); j++) {
				writeImage(images_to_be_stitched.get(j), j);
			}
		Log.i("stitchImages", "Done writing 2 disk. Starting stitching " + images_to_be_stitched.size() + " images");
			FindFeatures(images_to_be_stitched.get(0).getNativeObjAddr(),
					images_to_be_stitched.get(0).getNativeObjAddr(),
					panorama.getNativeObjAddr(), images_to_be_stitched.size());
		Log.i("stitchImages", "Done stitching. Writing panarama");
			writePano(panorama);

		Log.i("stitchImages", "deleting temp files");

			deleteTmpIm();
		}
	}

    public void captStillImage()
    {
    	Sample3Native.viewMode = Sample3Native.CAPT_STILL_IM;

    }

	private String getFullFileName( int num)
	{
		return mImageName + num + mImageExt;
	}

	private void writeImage(Mat image, int imNum)
	{
		writeImage(image, getFullFileName(imNum));
	}

	private void writeImage(Mat image, String fileName) {
		File createDir = tempImageDir;
		if(!createDir.exists())
			createDir.mkdir();
		Highgui.imwrite(tempImageDir+File.separator + fileName, image);
	}

	private void writePano(Mat image)
	{
		Date dateNow = new  Date();
		SimpleDateFormat dateFormat = new SimpleDateFormat("yyyyMMdd_HHmmss");
		if(!StitchImageDir.exists())
			StitchImageDir.mkdir();
		Highgui.imwrite(StitchImageDir.getPath()+ File.separator + "panoStich"+dateFormat.format(dateNow) +mImageExt, image);

	}

	private void deleteTmpIm()
    {
		File curFile;
		for (int j = 0; j < images_to_be_stitched.size(); j++) {
			curFile = new File(getFullFileName(j));
			curFile.delete();
		}
		images_to_be_stitched.clear();
    }

	public void viewStitchImages()
	{

		Intent intent = new Intent(this, GalleryActivity.class);

		startActivity(intent);
	}

	private void saveImageToArray(Mat inputFrame) {
		images_to_be_stitched.add(inputFrame.clone());
	}

	private int FPS() {
		long curTime = new Date().getTime();
		int FPS = (int) (1000 / (curTime - mPrevTime));
		mPrevTime = curTime;
		return FPS;
	}

	@Override
	public boolean onCreateOptionsMenu(Menu menu) {
		return true;

	}

	@Override
	public boolean onOptionsItemSelected(MenuItem item) {
	return true;
	}

	// public native void FindFeatures(List pano_images, Long stitch );
	public native void FindFeatures(long image1, long image2, long image3,
			int count);
}

The code  snippet shown below  is  for simple image stitching of two images in OpenCV . It can easily be modified to stitch multiple images together and create a Panorama.

OpenCV also has a stitching module which helps in achieving this task and which is more robust than this. The code presented here will help in understanding the major steps involved in image stitching algorithm. I am using OpenCV 2.4.3 and Visual studio 2010.  This code is based on the  openCV tutorial  available here.

The main parts of stitching algorithm are –  1) Finding Surf descriptors in both images 2) Matching the surf descriptors between two images . 3) Using  RANSAC to estimate the homography matrix using the matched surf descriptors. 4) Warping the images based on the homography matrix.

Input images :    

Stitched Output:

Code:

#include <stdio.h>
#include <iostream>

#include "opencv2/core/core.hpp"
#include "opencv2/features2d/features2d.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/nonfree/nonfree.hpp"
#include "opencv2/calib3d/calib3d.hpp"
#include "opencv2/imgproc/imgproc.hpp"

using namespace cv;

void readme();

/** @function main */
int main( int argc, char** argv )
{
 if( argc != 3 )
 { readme(); return -1; }

// Load the images
 Mat image1= imread( argv[2] );
 Mat image2= imread( argv[1] );
 Mat gray_image1;
 Mat gray_image2;
 // Convert to Grayscale
 cvtColor( image1, gray_image1, CV_RGB2GRAY );
 cvtColor( image2, gray_image2, CV_RGB2GRAY );

imshow("first image",image2);
 imshow("second image",image1);

if( !gray_image1.data || !gray_image2.data )
 { std::cout<< " --(!) Error reading images " << std::endl; return -1; }

//-- Step 1: Detect the keypoints using SURF Detector
 int minHessian = 400;

SurfFeatureDetector detector( minHessian );

std::vector< KeyPoint > keypoints_object, keypoints_scene;

detector.detect( gray_image1, keypoints_object );
 detector.detect( gray_image2, keypoints_scene );

//-- Step 2: Calculate descriptors (feature vectors)
 SurfDescriptorExtractor extractor;

Mat descriptors_object, descriptors_scene;

extractor.compute( gray_image1, keypoints_object, descriptors_object );
 extractor.compute( gray_image2, keypoints_scene, descriptors_scene );

//-- Step 3: Matching descriptor vectors using FLANN matcher
 FlannBasedMatcher matcher;
 std::vector< DMatch > matches;
 matcher.match( descriptors_object, descriptors_scene, matches );

double max_dist = 0; double min_dist = 100;

//-- Quick calculation of max and min distances between keypoints
 for( int i = 0; i < descriptors_object.rows; i++ )
 { double dist = matches[i].distance;
 if( dist < min_dist ) min_dist = dist;
 if( dist > max_dist ) max_dist = dist;
 }

printf("-- Max dist : %f \n", max_dist );
 printf("-- Min dist : %f \n", min_dist );

//-- Use only "good" matches (i.e. whose distance is less than 3*min_dist )
 std::vector< DMatch > good_matches;

for( int i = 0; i < descriptors_object.rows; i++ )
 { if( matches[i].distance < 3*min_dist )
 { good_matches.push_back( matches[i]); }
 }
 std::vector< Point2f > obj;
 std::vector< Point2f > scene;

for( int i = 0; i < good_matches.size(); i++ )
 {
 //-- Get the keypoints from the good matches
 obj.push_back( keypoints_object[ good_matches[i].queryIdx ].pt );
 scene.push_back( keypoints_scene[ good_matches[i].trainIdx ].pt );
 }

// Find the Homography Matrix
 Mat H = findHomography( obj, scene, CV_RANSAC );
 // Use the Homography Matrix to warp the images
 cv::Mat result;
 warpPerspective(image1,result,H,cv::Size(image1.cols+image2.cols,image1.rows));
 cv::Mat half(result,cv::Rect(0,0,image2.cols,image2.rows));
 image2.copyTo(half);
 imshow( "Result", result );

 waitKey(0);
 return 0;
 }

/** @function readme */
 void readme()
 { std::cout << " Usage: Panorama < img1 > < img2 >" << std::endl; }

Running the code :

Build the code and pass in the two images to be stitched as arguments to the generated exe. Sometimes if the stitching  output is not proper reversing the order of the two images when you pass to the exe would help.

This code snippet shown below shows the calculation of Gaussian Curvature on a parametric Bezier Patch. The program flow goes like this –

a) Forming Coon point control grid .
b) Constructing Bezier patch corresponding to the Coon Point Control grid points using De Casteljau’s algorithm.
c) Finding partial derivatives for a given parametric point (u,v) along u direction, v direction, uv direction using De casteljau’s algorithm.
d) Finding normal and double partial derivatives along u and v direction using De Casteljau’s algorithm.
e) Finding Gaussian curvature using the above calculated derivatives and normal.

The following code is  written in Mathematica. The code can be optimized a lot . This will only serve to get a basic understanding of computing Gaussian curvature using De casteljau’s algorithm on Bezier patches.

Output:


(*  Change the values of U and V as  you wish between 0 and 1*)

u = 0.25;
v = 0.25;

(* Boundary Polygons *)
coonPoints = {{{0, 0, 0}, {1, 0, 1}, {2, 0, 1}, {3, 0, 1}, {4, 0, 1}},
   		    {{0, 1, 0}, {}, {}, {}, {4, 1, 3}},
   		    {{0, 2, 0}, {}, {}, {}, {4, 2, 3}},
   		    {{0, 3, 0}, {1, 3, 1}, {2, 3, 1}, {3, 3, 1}, {4, 3, 1}}};

m = 3;
n = 4;
For[i = 1, i < m, i++,
  For[j = 1, j < n, j++,
   temp = ((({{(1 - (i/m))*(coonPoints[[1, j + 1]])}}) + ({{(i/
              m)*(coonPoints[[m + 1, 
              j + 1]])}}) + ({{(1 - (j/n))*(coonPoints[[i + 1, 
              1]])}}) + ({{(j/n)*(coonPoints[[i + 1, n + 1]])}})) - 
      Transpose[{{1 - (j/n)}, {j/n}}].Transpose[{{1 - (i/m), 
           i/m}}.{{coonPoints[[1, 1]], 
           coonPoints[[1, n + 1]]}, {coonPoints[[m + 1, 1]], 
           coonPoints[[m + 1, n + 1]]}}]);
   
   coonPoints[[i + 1, j + 1]] = temp[[1, 1]];]];
       coonPointsbackup = coonPoints;

(* De Cateljau's algorithm function definition *)
dca[c_, r_, i_, t_] := 
  If [r == 0, 
   c[[i + 1]], (1 - t)*dca[c, r - 1, i, t] + 
    t*dca[c, r - 1, i + 1, t]];

dcaPart1[u_, points_] := dca[points, m, 0, u];
dcaPart2[u_, v_, points_] := dca[dcaPart1[u, points], n, 0, v];

dcaPart1upartial[u_, points_] := dca[points, m - 1, 0, u];
dcaPart2upartial[u_, v_, points_] := 
  dca[dcaPart1upartial[u, points], n, 0, v]; 

dcaPart1uupartial[u_, points_] := dca[points, m - 2, 0, u];
dcaPart2uupartial[u_, v_, points_] := 
  dca[dcaPart1uupartial[u, points], n, 0, v]; 

dcaPart1vpartial[u_, points_] := dca[points, m, 0, u];
dcaPart2vpartial[u_, v_, points_] := 
  dca[dcaPart1vpartial[u, points], n - 1, 0, v]; 

dcaPart1vvpartial[u_, points_] := dca[points, m, 0, u];
dcaPart2vvpartial[u_, v_, points_] := 
  dca[dcaPart1vvpartial[u, points], n - 2, 0, v]; 

dcaPart1uvpartial[u_, points_] := dca[points, m - 1, 0, u];
dcaPart2uvpartial[u_, v_, points_] := 
  dca[dcaPart1uvpartial[u, points], n - 1, 0, v]; 

MatrixForm[coonPoints];

(* U partial*)
For[i = 1, i <= m , i++, 
  coonPoints[[i]] = coonPoints[[i + 1]] - coonPoints[[i]]];
a = coonPoints[[1]];
b = coonPoints[[2]];
c = coonPoints[[3]];
upartialmatrix = {a, b, c};
upartialbackup = upartialmatrix;

(* uu partial *)
For[i = 1, i <= m - 1, i++, 
  upartialbackup [[i]] = 
   upartialbackup [[i + 1]] - upartialbackup [[i]]];
aa = upartialbackup [[1]];
bb = upartialbackup [[2]];
uupartialmatrix = {aa, bb};


(* coonPoints again *)

     coonPoints = coonPointsbackup ;

(* V partial*)
For[j = 1, j <= n , j++, 
  coonPoints[[All, j]] = 
   coonPoints[[All, j + 1]] - coonPoints[[All, j]]];
a = coonPoints[[All, 1]];
b = coonPoints[[All, 2]];
c = coonPoints[[All, 3]];
d = coonPoints[[All, 4]];
vpartialmatrix = Transpose[{a, b, c, d}];
vpartialbackup = vpartialmatrix;

(* vv partial *)
For[j = 1, j <= n - 1, j++, 
  vpartialbackup [[All, j]] = 
   vpartialbackup [[All, j + 1]] - vpartialbackup [[All, j]]];
a1 = vpartialbackup [[All, 1]];
a2 = vpartialbackup [[All, 2]];
a3 = vpartialbackup [[All, 3]];
vvpartialmatrix = Transpose[{a1, a2, a3}];


(* uv partial calculation *)

  coonPoints = coonPointsbackup ;

For[i = 1, i <= m , i++, 
  coonPoints[[i]] = coonPoints[[i + 1]] - coonPoints[[i]]];
a = coonPoints[[1]];
b = coonPoints[[2]];
c = coonPoints[[3]];
upartialfinal = {a, b, c};
MatrixForm[upartialfinal];
For[j = 1, j <= n , j++, 
  upartialfinal[[All, j]] = 
   upartialfinal[[All, j + 1]] - upartialfinal[[All, j]]];
MatrixForm[upartialfinal];
a = upartialfinal [[All, 1]];
b = upartialfinal[[All, 2]];
c = upartialfinal [[All, 3]];
d = upartialfinal [[All, 4]];
uvpartialmatrix = Transpose[{a, b, c, d}];


(* Important functions to calculate partials *)
upartialfunction[u_, v_] := m*dcaPart2upartial[u, v, upartialmatrix ];
vpartialfunction[u_, v_] := n*dcaPart2vpartial[u, v, vpartialmatrix ];
uupartialfunction[u_, v_] := 
  m*(m - 1)*dcaPart2uupartial[u, v, uupartialmatrix ];
vvpartialfunction[u_, v_] := 
  n*(n - 1)*dcaPart2vvpartial[u, v, vvpartialmatrix ];
uvpartialfunction[u_, v_] := 
  m*n*dcaPart2uvpartial[u, v, uvpartialmatrix ];
normalfunction[u_, v_] := 
  Cross[upartialfunction[u, v], vpartialfunction[u, v]]/
   Norm[Cross[upartialfunction[u, v], vpartialfunction[u, v]]];


(* Calculating Gaussian Curvature Functions *)
f[upar_, uvpar_, vpar_] := 
  Det[{ {upar.upar, upar.vpar} , {upar.vpar, vpar.vpar }}];
s[norm_, uupar_, uvpar_, vvpar_] := 
  Det[{ {norm.uupar, norm.uvpar} , {norm.uvpar, norm.vvpar}}];
gauss[upar_, vpar_, norm_, uupar_, vvpar_, uvpar_] := 
  s[norm, uupar, uvpar, vvpar]/f[upar, uvpar, vpar];
gaussfinal[u_, v_] := 
  gauss[upartialfunction[u, v], vpartialfunction[u, v], 
   normalfunction[u, v], uupartialfunction[u, v], 
   vvpartialfunction[u, v], uvpartialfunction[u, v]];

(* calculating  gaussian curvature *)
gaussiancurvature = gaussfinal[u, v];
Print["The value of gaussian curvature at " , "(", u, " ," , v, ")", 
  "is", "   ", gaussiancurvature];

coonPoints = coonPointsbackup;
controlGrid = 
  Graphics3D[{PointSize[Medium], Red, Map[Point, coonPoints], Gray, 
    Line[coonPoints], Line[Transpose[coonPoints]]}];

(* Create BezierSurface using DCA *)
bezierSurface = 
  ParametricPlot3D[dcaPart2[u, v, coonPoints], {u, 0, 1}, {v, 0, 1}];




(* calculating actual partial values values here *)

upartial = upartialfunction[u, v];
vpartial = vpartialfunction[u, v];
normal = normalfunction[u, v];
uvpartial = uvpartialfunction[u, v];
uupartial = uupartialfunction[u, v];
vvpartial = vvpartialfunction[u, v];

p = dcaPart2[u, v, coonPoints];
Print[" upartial at " , "(", u, " ," , v, ")", "is", "   ", upartial ];
Print[" vpartial at " , "(", u, " ," , v, ")", "is", "   ", vpartial ];
Print[" normal at " , "(", u, " ," , v, ")", "is", "   ", normal];
Print[" uvpartial at " , "(", u, " ," , v, ")", "is", "   ", 
  uvpartial ];
Print[" uupartial at " , "(", u, " ," , v, ")", "is", "   ", 
  uupartial ];
Print[" vvpartial at " , "(", u, " ," , v, ")", "is", "   ", 
  vvpartial ];
(* Calculating to plot *)
n = p - 0.5*normal;
up = p - 0.25*upartial;
vp = p - 0.25*vpartial;
uvp = p - 0.25*uvpartial;
plotpoints1 = 
  Graphics3D[{RGBColor[1, 0, 0], Cylinder[{p, n}, .03], 
    RGBColor[0, 1, 0], Cylinder[{p, up}, .03], RGBColor[0, 0, 1], 
    Cylinder[{p, vp}, .03], RGBColor[1, 1, 0], 
    Cylinder[{p, uvp}, .03]}];

(* Just Graphics to plot partials and their colors *)
kk = Graphics[{
    
    Text[Style["Normal Vector", Medium, "Label", Darker[Brown]], {1, 
      38}, {-1, 0}],
    Red, Thick, Line[{{1, 36}, {10, 36}}],
    Text[Style["Vpartial Vector", Medium, "Label", Darker[Brown]], {1,
       33}, {-1, 0}],
    Blue, Thick, Line[{{1, 31}, {10, 31}}],
    Text[Style["Upartial Vector", Medium, "Label", Darker[Brown]], {1,
       28}, {-1, 0}],
    Green, Thick, Line[{{1, 26}, {10, 26}}],
    Text[Style["UVpartial Vector", Medium, "Label", 
      Darker[Brown]], {1, 23}, {-1, 0}],
    Yellow, Thick, Line[{{1, 21}, {10, 21}}],
    }, ImageSize -> {150, 300}, PlotRange -> All];


Show[kk, PlotRange -> All]
Show[plotpoints1, controlGrid, bezierSurface, 
 PlotLabel -> 
  Style["upartial, vpartial, normal, uvpartial vectors plotted", 16],
 PlotRange -> All, Axes -> True]

 

Hi all,

I have made a presentation to explain Viola Jones face detection and tracking algorithm. It explains the concepts of haar features, integral image, adaboost , cascading classifiers, mean shift tracking and Camshift tracking. Please post your doubts in the comments if I have not made myself clear in explaining any concepts. I shall try my best to answer them. Thanks for watching !

The objective of this tutorial is –
1) To install OpenNI, Nite (drivers that help  getting data from the kinect) in Ubuntu 12.04.
2) To set up Processing and  SimpleOpenNI ( OpenNI and Nite wrapper for Processing  ) using which you can get started with Kinect coding.

What you need?

Kinect with usb cable and a computer with Ubuntu installed. It is not recommended to run Ubuntu as a wubi installer from Windows when working with Kinect. Better install Ubuntu  in a new partition and run it.

1) Installing OpenNI and NITE

1) I highly recommend installing 32 bit versions of all  even if yours is a 64 bit system.  Download OpenNI_Nite installer package from here. eg: I downloaded OpenNI_NITE_Installer-Linux32-0.27.zip . Now extract the zip file to a folder.

Tip:

Instead of navigating to different folders using cd command you can enable Open in terminal  option when you right click in any folder. To get that open terminal and type:

sudo apt-get install nautilus-open-terminal

After installing type : killall nautilus && nautilus in terminal to activate the change immediately.


2) Navigate to OpenNI-Bin-Dev-Linux-x86-v1.5.4.0 folder ( version could have changed if you are seeing at a later point in time) in the unzipped folder using terminal and type the following command  to install –

sudo ./install.sh
 
 
 

3) Navigate to NITE-Bin-Dev-Linux-x86-v1.5.2.21 folder ( version could have changed if you are seeing at a later point in time) in the unzipped folder and type the following command  to install –

sudo ./install.sh
 

4) Navigate to Kinect folder and then to Sensor-Bin-Linux-x86-v5.1.2.1 folder ( version could have changed if you are seeing at a later point in time) in the unzipped folder and type the following command  to install –

sudo ./install.sh
 
Testing the installation:

Connect the kinect and ensure that the green Led on it is blinking. Now, navigate to the unzipped folder OpenNI_NITE_Installer-Linux32-0.27. Then to OpenNI-Bin-Dev-Linux-x86-v1.5.4.0  -> Samples -> Bin -> x86-Release and  try out the following sample in the the terminal.

./NiViewer

The output with my Kinect:

2) Setting up Processing and SimpleOpenNI for Kinect coding

1)Download processing for Linux from here.

2) Install OpenJDK Java7 Runtime by searching it from Ubuntu Software center.

3) Download simpleOpenNI from here and extract it. I downloaded SimpleOpenNI-0.27.zip which works for all platforms.

4) Unzip the downloaded processing folder, navigate to it and run ./processing in terminal.

When started for the first time Processing asks where to place the sketchbook folder. I let it place in the default folder. It’s path in my computer is  /home/sanmarino/sketchbook

Create a folder named libraries inside Sketchbook folder and place the unzipped folder SimpleOpenNI from step3 inside it.

5) Restart processing again . Now you should be able to see SimpleOpenNI as shown under.

Now we are done with installation.

Running processing Demo

In the processing menu under File ->Examples-> SimpleOpenNI(scroll down to see) ->OpenNI you will find DepthImage. Double click on it to bring up the code. Make sure that the kinect is properly connected. Now run the code by pressing the run button (that is right below the File menu). You should be able to see the output.

Now if you want to systematically learn kinect coding using processing and SimpleOpenNI you can follow this book called Making things See. If you just want to try out few other  examples , the codes from the book are available here.

Feel free to comment if any of the steps are unclear and if you face any problem. I shall be glad to offer the help  I can.

Errors in my case:

error while loading shared libraries: libglut.so.3: cannot open shared object file: No such file or directory

Installing freeglut3 from Ubuntu software center fixed it.

This is an example showing integration of OpenCV and PCL(Point Cloud Library) in using trackbars of OpenCV(highgui) to adjust the x,y,z limits of passthrough filter of PCL .

It is assumed that you have already installed OpenCV and PCL . Make sure that you have OpenCV version> 2.3 & PCL version > 1.1.  In my case I am using OpenCV 2.3.1 and PCL 1.6 ( Compiled from current trunk).

Before downloading the code and running, you might want to see how the output looks like.

Running the code:

This code takes a PLY file as input and applies passthrough filters to it and visualizes it. You can adjust the X,Y,Z  limits of passthrough filters using trackbars and see the output on the fly.

Step1: Download the code folder and extract it.

Step2: Create a folder named “build” inside it. Open CMake and in the field where is the source code provide the path of folder. eg: C:/Users/Sanmarino/Downloads/Integrating_Opencv_PCL_PassthroughFilters. In the field where to build the binaries provide the path to the empty build folder you created. eg:C:/Users/Sanmarino/Downloads/Integrating_Opencv_PCL_PassthroughFilters/build. Then Click configure at the bottom. Choose the compiler you want to use when prompted. In my case I chose Visual Studio 10

.

Step3: If everything is fine after configure , click generate. Else sometimes you might get an error like this –

“CMake Error at CMakeLists.txt:6 (find_package): By not providing “FindOpenCV.cmake” in CMAKE_MODULE_PATH this project has asked CMake to find a package configuration file provided by “OpenCV”, but CMake did not find one.

Could not find a package configuration file provided by “OpenCV” with any of the following names: OpenCVConfig.cmake opencv-config.cmake

Add the installation prefix of “OpenCV” to CMAKE_PREFIX_PATH or set “OpenCV_DIR” to a directory containing one of the above files. If “OpenCV” provides a separate development package or SDK, be sure it has been installed.

If this is the case then we have to set the OpenCV_DIR to the place where it can find OpenCVConfig.cmake. So  click on OpenCV_DIR-NOTFOUND and change it to the path where OpenCVConfig.cmake exists. In my case it was  C:\OpenCV\build  where I had built the binaries of OpenCV. Now hit Configure again and hit Generate.

Changed path of OpenCV_DIR:

Step4: Now open the generated solution and build it in release/debug mode.  Place scene_mesh.ply from the unzipped code folder in the folder where simple_visualizer.exe is generated. Run simple_visualizer.exe in command prompt by giving scene_mesh.ply as argument.

eg: C:\Users\Sanmarino\Downloads\Integrating_Opencv_PCL_PassthroughFilters\build\Release>    simple_visualizer.exe  scene_mesh.ply. Change the trackbars to apply passThrough filter on the pointcloud and see the output.

simple_visualizer.cpp


#include <iostream>

// Point cloud library
#include <pcl/point_cloud.h>
#include <pcl/io/pcd_io.h>
#include <pcl/io/ply_io.h>
#include <pcl/point_types.h>
#include <pcl/filters/passthrough.h>
#include <pcl/visualization/pcl_visualizer.h>

// Opencv
#include <opencv2/core/core.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <opencv2/highgui/highgui.hpp>

using namespace cv;

// These are track bar initial settings adjusted to the given pointcloud to make it completely visible.
//  Need to be adjusted depending on the pointcloud and its xyz limits if used with new pointclouds.

int a = 22;
int b = 12;
int c=  10;

// PCL Visualizer to view the pointcloud
pcl::visualization::PCLVisualizer viewer ("Simple visualizing window");

int
	main (int argc, char** argv)
{
	pcl::PointCloud::Ptr cloud (new pcl::PointCloud);
	pcl::PointCloud::Ptr cloud_filtered (new pcl::PointCloud);

	if (pcl::io::loadPLYFile (argv[1], *cloud) == -1) //* load the ply file from command line
	{
		PCL_ERROR ("Couldn't load the file\n");
		return (-1);
	}

	pcl::copyPointCloud( *cloud,*cloud_filtered);

	float i ;
	float j;
	float k;

	cv::namedWindow( "picture");

	// Creating trackbars uisng opencv to control the pcl filter limits
	cvCreateTrackbar("X_limit", "picture", &a, 30, NULL);
	cvCreateTrackbar("Y_limit", "picture", &b, 30, NULL);
	cvCreateTrackbar("Z_limit", "picture", &c, 30, NULL);

	// Starting the while loop where we continually filter with limits using trackbars and display pointcloud
	char last_c = 0;
	while(true && (last_c != 27))
	{

		pcl::copyPointCloud(*cloud_filtered, *cloud);

		// i,j,k Need to be adjusted depending on the pointcloud and its xyz limits if used with new pointclouds.

		i = 0.1*((float)a);
		j = 0.1*((float)b);
		k = 0.1*((float)c);

		// Printing to ensure that the passthrough filter values are changing if we move trackbars.

		cout << "i = " << i << " j = " << j << " k = " << k << endl;

		// Applying passthrough filters with XYZ limits

		pcl::PassThrough pass;
		pass.setInputCloud (cloud);
		pass.setFilterFieldName ("y");
		//  pass.setFilterLimits (-0.1, 0.1);
		pass.setFilterLimits (-k, k);
		pass.filter (*cloud);

		pass.setInputCloud (cloud);
		pass.setFilterFieldName ("x");
		// pass.setFilterLimits (-0.1, 0.1);
		pass.setFilterLimits (-j, j);
		pass.filter (*cloud);

		pass.setInputCloud (cloud);
		pass.setFilterFieldName ("z");
		//  pass.setFilterLimits (-10, 10);
		pass.setFilterLimits (-i, i);
		pass.filter (*cloud);

		// Visualizing pointcloud
		viewer.addPointCloud (cloud, "scene_cloud");
		viewer.spinOnce();
		viewer.removePointCloud("scene_cloud");
	}

	return (0);
}

CMakeLists.txt

cmake_minimum_required(VERSION 2.8 FATAL_ERROR)

project(visualizer_grouping)

find_package(PCL 1.4 REQUIRED)
find_package(OpenCV REQUIRED)

include_directories(${PCL_INCLUDE_DIRS} )
link_directories(${PCL_LIBRARY_DIRS} )
add_definitions(${PCL_DEFINITIONS} )

add_executable (simple_visualizer simple_visualizer.cpp)
target_link_libraries (simple_visualizer ${PCL_LIBRARIES} ${OpenCV_LIBS})