Difference between revisions of "Example Visual Odometry Depth"

From BoofCV
Jump to navigationJump to search
m
m
Line 3: Line 3:


Example Code:
Example Code:
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.17/examples/src/boofcv/examples/sfm/ExampleVisualOdometryDepth.java ExampleVisualOdometryDepth.java]
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.18/examples/src/boofcv/examples/sfm/ExampleVisualOdometryDepth.java ExampleVisualOdometryDepth.java]


Concepts:
Concepts:
Line 56: Line 56:
// Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
// Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
visualOdometry.setCalibration(param.visualParam,new DoNothingPixelTransform_F32());
visualOdometry.setCalibration(param.visualParam,new DoNothingPixelTransform_F32());
// image with depth information
ImageUInt16 depth = new ImageUInt16(1,1);
// image with color information
MultiSpectral<ImageUInt8> rgb = new MultiSpectral<ImageUInt8>(ImageUInt8.class,1,1,3);
ImageUInt8 gray = new ImageUInt8(1,1);
// work space
GrowQueue_I8 data = new GrowQueue_I8();


// Process the video sequence and output the location plus number of inliers
// Process the video sequence and output the location plus number of inliers
Line 70: Line 62:


while( videoVisual.hasNext() ) {
while( videoVisual.hasNext() ) {
ImageUInt8 left = videoVisual.next();
ImageUInt8 visual = videoVisual.next();
ImageUInt16 right = videoDepth.next();
ImageUInt16 depth = videoDepth.next();


if( !visualOdometry.process(left,right) ) {
if( !visualOdometry.process(visual,depth) ) {
throw new RuntimeException("VO Failed!");
throw new RuntimeException("VO Failed!");
}
}

Revision as of 14:59, 22 September 2014

This example demonstrates how to estimate the camera's ego motion using an RGB-D sensor, such as the Kinect, which contains visual and depth information.

Example Code:

Concepts:

  • RGB-D

Relevant Videos:

Relevant Applets:

Related Examples:

Example Code

/**
 * Bare bones example showing how to estimate the camera's ego-motion using a depth camera system, e.g. Kinect.
 * Additional information on the scene can be optionally extracted from the algorithm if it implements AccessPointTracks3D.
 *
 * @author Peter Abeles
 */
public class ExampleVisualOdometryDepth {

	public static void main( String args[] ) throws IOException {

		MediaManager media = DefaultMediaManager.INSTANCE;

		String directory = "../data/applet/kinect/straight/";

		// load camera description and the video sequence
		VisualDepthParameters param = UtilIO.loadXML(media.openFile(directory + "visualdepth.xml"));

		// specify how the image features are going to be tracked
		PkltConfig configKlt = new PkltConfig();
		configKlt.pyramidScaling = new int[]{1, 2, 4, 8};
		configKlt.templateRadius = 3;

		PointTrackerTwoPass<ImageUInt8> tracker =
				FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1),
						ImageUInt8.class, ImageSInt16.class);

		DepthSparse3D<ImageUInt16> sparseDepth = new DepthSparse3D.I<ImageUInt16>(1e-3);

		// declares the algorithm
		DepthVisualOdometry<ImageUInt8,ImageUInt16> visualOdometry =
				FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true,
				sparseDepth, tracker, ImageUInt8.class, ImageUInt16.class);

		// Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
		visualOdometry.setCalibration(param.visualParam,new DoNothingPixelTransform_F32());

		// Process the video sequence and output the location plus number of inliers
		SimpleImageSequence<ImageUInt8> videoVisual = media.openVideo(directory+"rgb.mjpeg", ImageType.single(ImageUInt8.class));
		SimpleImageSequence<ImageUInt16> videoDepth = media.openVideo(directory + "depth.mpng", ImageType.single(ImageUInt16.class));

		while( videoVisual.hasNext() ) {
			ImageUInt8 visual = videoVisual.next();
			ImageUInt16 depth = videoDepth.next();

			if( !visualOdometry.process(visual,depth) ) {
				throw new RuntimeException("VO Failed!");
			}

			Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
			Vector3D_F64 T = leftToWorld.getT();

			System.out.printf("Location %8.2f %8.2f %8.2f      inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
		}
	}

	/**
	 * If the algorithm implements AccessPointTracks3D, then count the number of inlier features
	 * and return a string.
	 */
	public static String inlierPercent(VisualOdometry alg) {
		if( !(alg instanceof AccessPointTracks3D))
			return "";

		AccessPointTracks3D access = (AccessPointTracks3D)alg;

		int count = 0;
		int N = access.getAllTracks().size();
		for( int i = 0; i < N; i++ ) {
			if( access.isInlier(i) )
				count++;
		}

		return String.format("%%%5.3f", 100.0 * count / N);
	}
}