Difference between revisions of "Example Visual Odometry Monocular Plane"

From BoofCV
Jump to navigationJump to search
(adding)
 
m
(9 intermediate revisions by the same user not shown)
Line 3: Line 3:


Example Code:
Example Code:
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.15/examples/src/boofcv/examples/ExampleVisualOdometryMonocularPlane.java ExampleVisualOdometryMonocularPlane.java]
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.36/examples/src/main/java/boofcv/examples/sfm/ExampleVisualOdometryMonocularPlane.java ExampleVisualOdometryMonocularPlane.java]


Concepts:
Concepts:
* Plane/Homography
* Plane/Homography
Relevant Applets:
* [[Applet Visual Odometry Monocular Visual Odometry Monocular Plane]]


Related Examples:
Related Examples:
Line 31: Line 28:
MediaManager media = DefaultMediaManager.INSTANCE;
MediaManager media = DefaultMediaManager.INSTANCE;


String directory = "../data/applet/vo/drc/";
String directory = UtilIO.pathExample("vo/drc/");


// load camera description and the video sequence
// load camera description and the video sequence
MonoPlaneParameters calibration = BoofMiscOps.loadXML(media.openFile(directory + "mono_plane.xml"));
MonoPlaneParameters calibration = CalibrationIO.load(
SimpleImageSequence<ImageUInt8> video = media.openVideo(directory + "left.mjpeg", ImageDataType.single(ImageUInt8.class));
media.openFile(new File(directory , "mono_plane.yaml").getPath()));
SimpleImageSequence<GrayU8> video = media.openVideo(
new File(directory , "left.mjpeg").getPath(), ImageType.single(GrayU8.class));


// specify how the image features are going to be tracked
// specify how the image features are going to be tracked
PkltConfig<ImageUInt8, ImageSInt16> configKlt = PkltConfig.createDefault(ImageUInt8.class, ImageSInt16.class);
ConfigPKlt configKlt = new ConfigPKlt();
configKlt.pyramidScaling = new int[]{1, 2, 4, 8};
configKlt.pyramidLevels = ConfigDiscreteLevels.levels(4);
configKlt.templateRadius = 3;
configKlt.templateRadius = 3;
ConfigPointDetector configDetector = new ConfigPointDetector();
configDetector.type = PointDetectorTypes.SHI_TOMASI;
configDetector.general.maxFeatures = 600;
configDetector.general.radius = 3;
configDetector.general.threshold = 1;


PointTrackerTwoPass<ImageUInt8> tracker =
PointTracker<GrayU8> tracker = FactoryPointTracker.klt(configKlt, configDetector, GrayU8.class, null);
FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1));


// declares the algorithm
// declares the algorithm
MonocularPlaneVisualOdometry<ImageUInt8> visualOdometry =
MonocularPlaneVisualOdometry<GrayU8> visualOdometry =
FactoryVisualOdometry.monoPlaneInfinity(75, 2, 1.5, 200, tracker, ImageDataType.single(ImageUInt8.class));
FactoryVisualOdometry.monoPlaneInfinity(75, 2, 1.5, 200, tracker, ImageType.single(GrayU8.class));


// Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
// Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
Line 54: Line 57:
// Process the video sequence and output the location plus number of inliers
// Process the video sequence and output the location plus number of inliers
while( video.hasNext() ) {
while( video.hasNext() ) {
ImageUInt8 left = video.next();
GrayU8 image = video.next();


if( !visualOdometry.process(left) ) {
if( !visualOdometry.process(image) ) {
throw new RuntimeException("VO Failed!");
System.out.println("Fault!");
visualOdometry.reset();
}
}


Line 78: Line 82:


int count = 0;
int count = 0;
int N = access.getAllTracks().size();
int N = access.getTotalTracks();
for( int i = 0; i < N; i++ ) {
for( int i = 0; i < N; i++ ) {
if( access.isInlier(i) )
if( access.isTrackInlier(i) )
count++;
count++;
}
}

Revision as of 22:19, 17 May 2020

This example demonstrates how to estimate the camera's ego motion using a single camera and known plane. Since the plane's relative location to the camera is known there is no scale ambiguity, like there is with a more general single camera solution.

Example Code:

Concepts:

  • Plane/Homography

Related Examples:

Example Code

/**
 * Bare bones example showing how to estimate the camera's ego-motion using a single camera and a known
 * plane. Additional information on the scene can be optionally extracted from the algorithm,
 * if it implements AccessPointTracks3D.
 *
 * @author Peter Abeles
 */
public class ExampleVisualOdometryMonocularPlane {

	public static void main( String args[] ) {

		MediaManager media = DefaultMediaManager.INSTANCE;

		String directory = UtilIO.pathExample("vo/drc/");

		// load camera description and the video sequence
		MonoPlaneParameters calibration = CalibrationIO.load(
				media.openFile(new File(directory , "mono_plane.yaml").getPath()));
		SimpleImageSequence<GrayU8> video = media.openVideo(
				new File(directory , "left.mjpeg").getPath(), ImageType.single(GrayU8.class));

		// specify how the image features are going to be tracked
		ConfigPKlt configKlt = new ConfigPKlt();
		configKlt.pyramidLevels = ConfigDiscreteLevels.levels(4);
		configKlt.templateRadius = 3;
		ConfigPointDetector configDetector = new ConfigPointDetector();
		configDetector.type = PointDetectorTypes.SHI_TOMASI;
		configDetector.general.maxFeatures = 600;
		configDetector.general.radius = 3;
		configDetector.general.threshold = 1;

		PointTracker<GrayU8> tracker = FactoryPointTracker.klt(configKlt, configDetector, GrayU8.class, null);

		// declares the algorithm
		MonocularPlaneVisualOdometry<GrayU8> visualOdometry =
				FactoryVisualOdometry.monoPlaneInfinity(75, 2, 1.5, 200, tracker, ImageType.single(GrayU8.class));

		// Pass in intrinsic/extrinsic calibration.  This can be changed in the future.
		visualOdometry.setCalibration(calibration);

		// Process the video sequence and output the location plus number of inliers
		while( video.hasNext() ) {
			GrayU8 image = video.next();

			if( !visualOdometry.process(image) ) {
				System.out.println("Fault!");
				visualOdometry.reset();
			}

			Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
			Vector3D_F64 T = leftToWorld.getT();

			System.out.printf("Location %8.2f %8.2f %8.2f      inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
		}
	}

	/**
	 * If the algorithm implements AccessPointTracks3D, then count the number of inlier features
	 * and return a string.
	 */
	public static String inlierPercent(VisualOdometry<?> alg) {
		if( !(alg instanceof AccessPointTracks3D))
			return "";

		AccessPointTracks3D access = (AccessPointTracks3D)alg;

		int count = 0;
		int N = access.getTotalTracks();
		for( int i = 0; i < N; i++ ) {
			if( access.isTrackInlier(i) )
				count++;
		}

		return String.format("%%%5.3f", 100.0 * count / N);
	}
}