Example Visual Odometry Depth

From BoofCV
Revision as of 15:00, 20 June 2013 by Peter (talk | contribs) (Created page with " This example demonstrates how to estimate the camera's ego motion using an RGB-D sensor, such as the Kinect, which contains visual and depth information. Example Code: * [ht...")
(diff) ← Older revision | Latest revision (diff) | Newer revision → (diff)
Jump to navigationJump to search
The printable version is no longer supported and may have rendering errors. Please update your browser bookmarks and please use the default browser print function instead.

This example demonstrates how to estimate the camera's ego motion using an RGB-D sensor, such as the Kinect, which contains visual and depth information.

Example Code:

Concepts:

  • RGB-D

Relevant Applets:

Related Examples:

Example Code

/**
 * Bare bones example showing how to estimate the camera's ego-motion using a depth camera system, e.g. Kinect.
 * Additional information on the scene can be optionally extracted from the algorithm if it implements AccessPointTracks3D.
 *
 * @author Peter Abeles
 */
public class ExampleVisualOdometryDepth {

	public static void main( String args[] ) throws IOException {

		MediaManager media = DefaultMediaManager.INSTANCE;

		String directory = "../data/applet/kinect/straight/";

		// load camera description and the video sequence
		VisualDepthParameters param = BoofMiscOps.loadXML(media.openFile(directory + "visualdepth.xml"));

		// specify how the image features are going to be tracked
		PkltConfig<ImageUInt8, ImageSInt16> configKlt = PkltConfig.createDefault(ImageUInt8.class, ImageSInt16.class);
		configKlt.pyramidScaling = new int[]{1, 2, 4, 8};
		configKlt.templateRadius = 3;

		PointTrackerTwoPass<ImageUInt8> tracker =
				FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1));

		DepthSparse3D<ImageUInt16> sparseDepth = new DepthSparse3D.I<ImageUInt16>(1e-3);

		// declares the algorithm
		DepthVisualOdometry<ImageUInt8,ImageUInt16> visualOdometry =
				FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true,
				sparseDepth, tracker, ImageUInt8.class, ImageUInt16.class);

		// Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
		visualOdometry.setCalibration(param.visualParam,new DoNothingPixelTransform_F32());

		// image with depth information
		ImageUInt16 depth = new ImageUInt16(1,1);
		// image with color information
		MultiSpectral<ImageUInt8> rgb = new MultiSpectral<ImageUInt8>(ImageUInt8.class,1,1,3);
		ImageUInt8 gray = new ImageUInt8(1,1);
		// work space
		GrowQueue_I8 data = new GrowQueue_I8();

		// Process the video sequence and output the location plus number of inliers
		SimpleImageSequence<ImageUInt8> videoVisual = media.openVideo(directory+"rgb.mjpeg",ImageDataType.single(ImageUInt8.class));
		SimpleImageSequence<ImageUInt16> videoDepth = media.openVideo(directory + "depth.mpng", ImageDataType.single(ImageUInt16.class));

		while( videoVisual.hasNext() ) {
			ImageUInt8 left = videoVisual.next();
			ImageUInt16 right = videoDepth.next();

			if( !visualOdometry.process(left,right) ) {
				throw new RuntimeException("VO Failed!");
			}

			Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
			Vector3D_F64 T = leftToWorld.getT();

			System.out.printf("Location %8.2f %8.2f %8.2f      inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
		}
	}

	/**
	 * If the algorithm implements AccessPointTracks3D, then count the number of inlier features
	 * and return a string.
	 */
	public static String inlierPercent(VisualOdometry alg) {
		if( !(alg instanceof AccessPointTracks3D))
			return "";

		AccessPointTracks3D access = (AccessPointTracks3D)alg;

		int count = 0;
		int N = access.getAllTracks().size();
		for( int i = 0; i < N; i++ ) {
			if( access.isInlier(i) )
				count++;
		}

		return String.format("%%%5.3f", 100.0 * count / N);
	}
}