Difference between revisions of "Example Visual Odometry Monocular Plane"

From BoofCV
Jump to navigationJump to search
m
m
 
(One intermediate revision by the same user not shown)
Line 3: Line 3:


Example Code:
Example Code:
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.37/examples/src/main/java/boofcv/examples/sfm/ExampleVisualOdometryMonocularPlane.java ExampleVisualOdometryMonocularPlane.java]
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.39/examples/src/main/java/boofcv/examples/sfm/ExampleVisualOdometryMonocularPlane.java ExampleVisualOdometryMonocularPlane.java]


Concepts:
Concepts:
Line 33: Line 33:
SimpleImageSequence<GrayU8> video = media.openVideo(
SimpleImageSequence<GrayU8> video = media.openVideo(
new File(directory, "left.mjpeg").getPath(), ImageType.single(GrayU8.class));
new File(directory, "left.mjpeg").getPath(), ImageType.single(GrayU8.class));
var config = new ConfigPlanarTrackPnP();


// specify how the image features are going to be tracked
// specify how the image features are going to be tracked
ConfigPKlt configKlt = new ConfigPKlt();
config.tracker.typeTracker = ConfigPointTracker.TrackerType.KLT;
configKlt.pyramidLevels = ConfigDiscreteLevels.levels(4);
config.tracker.klt.pyramidLevels = ConfigDiscreteLevels.levels(4);
configKlt.templateRadius = 3;
config.tracker.klt.templateRadius = 3;
ConfigPointDetector configDetector = new ConfigPointDetector();
 
configDetector.type = PointDetectorTypes.SHI_TOMASI;
config.tracker.detDesc.detectPoint.type = PointDetectorTypes.SHI_TOMASI;
configDetector.general.maxFeatures = 600;
config.tracker.detDesc.detectPoint.general.maxFeatures = 600;
configDetector.general.radius = 3;
config.tracker.detDesc.detectPoint.general.radius = 3;
configDetector.general.threshold = 1;
config.tracker.detDesc.detectPoint.general.threshold = 1;


PointTracker<GrayU8> tracker = FactoryPointTracker.klt(configKlt, configDetector, GrayU8.class, null);
// Configure how visual odometry works
config.thresholdAdd = 75;
config.thresholdRetire = 2;
config.ransac.iterations = 200;
config.ransac.inlierThreshold = 1.5;


// declares the algorithm
// declares the algorithm
MonocularPlaneVisualOdometry<GrayU8> visualOdometry =
MonocularPlaneVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.monoPlaneInfinity(config, GrayU8.class);
FactoryVisualOdometry.monoPlaneInfinity(75, 2, 1.5, 200, tracker, ImageType.single(GrayU8.class));


// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(calibration);
visualOdometry.setCalibration(calibration);


// Process the video sequence and output the location plus number of inliers
// Process the video sequence and output the location plus number of inliers
long startTime = System.nanoTime();
while (video.hasNext()) {
while (video.hasNext()) {
GrayU8 image = video.next();
GrayU8 image = video.next();
Line 65: Line 71:
Vector3D_F64 T = leftToWorld.getT();
Vector3D_F64 T = leftToWorld.getT();


System.out.printf("Location %8.2f %8.2f %8.2f     inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry));
}
}
}
System.out.printf("FPS %4.2f\n", video.getFrameNumber()/((System.nanoTime() - startTime)*1e-9));
 
/**
* If the algorithm implements AccessPointTracks3D, then count the number of inlier features
* and return a string.
*/
public static String inlierPercent( VisualOdometry<?> alg ) {
if (!(alg instanceof AccessPointTracks3D))
return "";
 
AccessPointTracks3D access = (AccessPointTracks3D)alg;
 
int count = 0;
int N = access.getTotalTracks();
for (int i = 0; i < N; i++) {
if (access.isTrackInlier(i))
count++;
}
 
return String.format("%%%5.3f", 100.0*count/N);
}
}
}
}
</syntaxhighlight>
</syntaxhighlight>

Latest revision as of 19:27, 8 October 2021

This example demonstrates how to estimate the camera's ego motion using a single camera and known plane. Since the plane's relative location to the camera is known there is no scale ambiguity, like there is with a more general single camera solution.

Example Code:

Concepts:

  • Plane/Homography

Related Examples:

Example Code

/**
 * Bare bones example showing how to estimate the camera's ego-motion using a single camera and a known
 * plane. Additional information on the scene can be optionally extracted from the algorithm,
 * if it implements AccessPointTracks3D.
 *
 * @author Peter Abeles
 */
public class ExampleVisualOdometryMonocularPlane {
	public static void main( String[] args ) {
		MediaManager media = DefaultMediaManager.INSTANCE;

		String directory = UtilIO.pathExample("vo/drc/");

		// load camera description and the video sequence
		MonoPlaneParameters calibration = CalibrationIO.load(
				media.openFile(new File(directory, "mono_plane.yaml").getPath()));
		SimpleImageSequence<GrayU8> video = media.openVideo(
				new File(directory, "left.mjpeg").getPath(), ImageType.single(GrayU8.class));

		var config = new ConfigPlanarTrackPnP();

		// specify how the image features are going to be tracked
		config.tracker.typeTracker = ConfigPointTracker.TrackerType.KLT;
		config.tracker.klt.pyramidLevels = ConfigDiscreteLevels.levels(4);
		config.tracker.klt.templateRadius = 3;

		config.tracker.detDesc.detectPoint.type = PointDetectorTypes.SHI_TOMASI;
		config.tracker.detDesc.detectPoint.general.maxFeatures = 600;
		config.tracker.detDesc.detectPoint.general.radius = 3;
		config.tracker.detDesc.detectPoint.general.threshold = 1;

		// Configure how visual odometry works
		config.thresholdAdd = 75;
		config.thresholdRetire = 2;
		config.ransac.iterations = 200;
		config.ransac.inlierThreshold = 1.5;

		// declares the algorithm
		MonocularPlaneVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.monoPlaneInfinity(config, GrayU8.class);

		// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
		visualOdometry.setCalibration(calibration);

		// Process the video sequence and output the location plus number of inliers
		long startTime = System.nanoTime();
		while (video.hasNext()) {
			GrayU8 image = video.next();

			if (!visualOdometry.process(image)) {
				System.out.println("Fault!");
				visualOdometry.reset();
			}

			Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
			Vector3D_F64 T = leftToWorld.getT();

			System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry));
		}
		System.out.printf("FPS %4.2f\n", video.getFrameNumber()/((System.nanoTime() - startTime)*1e-9));
	}
}