Difference between revisions of "Example Visual Odometry Monocular Plane"
From BoofCV
Jump to navigationJump to searchm |
m |
||
Line 3: | Line 3: | ||
Example Code: | Example Code: | ||
* [https://github.com/lessthanoptimal/BoofCV/blob/v0. | * [https://github.com/lessthanoptimal/BoofCV/blob/v0.39/examples/src/main/java/boofcv/examples/sfm/ExampleVisualOdometryMonocularPlane.java ExampleVisualOdometryMonocularPlane.java] | ||
Concepts: | Concepts: | ||
Line 33: | Line 33: | ||
SimpleImageSequence<GrayU8> video = media.openVideo( | SimpleImageSequence<GrayU8> video = media.openVideo( | ||
new File(directory, "left.mjpeg").getPath(), ImageType.single(GrayU8.class)); | new File(directory, "left.mjpeg").getPath(), ImageType.single(GrayU8.class)); | ||
var config = new ConfigPlanarTrackPnP(); | |||
// specify how the image features are going to be tracked | // specify how the image features are going to be tracked | ||
config.tracker.typeTracker = ConfigPointTracker.TrackerType.KLT; | |||
config.tracker.klt.pyramidLevels = ConfigDiscreteLevels.levels(4); | |||
config.tracker.klt.templateRadius = 3; | |||
config.tracker.detDesc.detectPoint.type = PointDetectorTypes.SHI_TOMASI; | |||
config.tracker.detDesc.detectPoint.general.maxFeatures = 600; | |||
config.tracker.detDesc.detectPoint.general.radius = 3; | |||
config.tracker.detDesc.detectPoint.general.threshold = 1; | |||
// Configure how visual odometry works | |||
config.thresholdAdd = 75; | |||
config.thresholdRetire = 2; | |||
config.ransac.iterations = 200; | |||
config.ransac.inlierThreshold = 1.5; | |||
// declares the algorithm | // declares the algorithm | ||
MonocularPlaneVisualOdometry<GrayU8> visualOdometry = | MonocularPlaneVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.monoPlaneInfinity(config, GrayU8.class); | ||
// Pass in intrinsic/extrinsic calibration. This can be changed in the future. | // Pass in intrinsic/extrinsic calibration. This can be changed in the future. | ||
Line 54: | Line 59: | ||
// Process the video sequence and output the location plus number of inliers | // Process the video sequence and output the location plus number of inliers | ||
long startTime = System.nanoTime(); | |||
while (video.hasNext()) { | while (video.hasNext()) { | ||
GrayU8 image = video.next(); | GrayU8 image = video.next(); | ||
Line 67: | Line 73: | ||
System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry)); | System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry)); | ||
} | } | ||
System.out.printf("FPS %4.2f\n", video.getFrameNumber()/((System.nanoTime() - startTime)*1e-9)); | |||
} | } | ||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> |
Latest revision as of 19:27, 8 October 2021
This example demonstrates how to estimate the camera's ego motion using a single camera and known plane. Since the plane's relative location to the camera is known there is no scale ambiguity, like there is with a more general single camera solution.
Example Code:
Concepts:
- Plane/Homography
Related Examples:
Example Code
/**
* Bare bones example showing how to estimate the camera's ego-motion using a single camera and a known
* plane. Additional information on the scene can be optionally extracted from the algorithm,
* if it implements AccessPointTracks3D.
*
* @author Peter Abeles
*/
public class ExampleVisualOdometryMonocularPlane {
public static void main( String[] args ) {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("vo/drc/");
// load camera description and the video sequence
MonoPlaneParameters calibration = CalibrationIO.load(
media.openFile(new File(directory, "mono_plane.yaml").getPath()));
SimpleImageSequence<GrayU8> video = media.openVideo(
new File(directory, "left.mjpeg").getPath(), ImageType.single(GrayU8.class));
var config = new ConfigPlanarTrackPnP();
// specify how the image features are going to be tracked
config.tracker.typeTracker = ConfigPointTracker.TrackerType.KLT;
config.tracker.klt.pyramidLevels = ConfigDiscreteLevels.levels(4);
config.tracker.klt.templateRadius = 3;
config.tracker.detDesc.detectPoint.type = PointDetectorTypes.SHI_TOMASI;
config.tracker.detDesc.detectPoint.general.maxFeatures = 600;
config.tracker.detDesc.detectPoint.general.radius = 3;
config.tracker.detDesc.detectPoint.general.threshold = 1;
// Configure how visual odometry works
config.thresholdAdd = 75;
config.thresholdRetire = 2;
config.ransac.iterations = 200;
config.ransac.inlierThreshold = 1.5;
// declares the algorithm
MonocularPlaneVisualOdometry<GrayU8> visualOdometry = FactoryVisualOdometry.monoPlaneInfinity(config, GrayU8.class);
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(calibration);
// Process the video sequence and output the location plus number of inliers
long startTime = System.nanoTime();
while (video.hasNext()) {
GrayU8 image = video.next();
if (!visualOdometry.process(image)) {
System.out.println("Fault!");
visualOdometry.reset();
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry));
}
System.out.printf("FPS %4.2f\n", video.getFrameNumber()/((System.nanoTime() - startTime)*1e-9));
}
}