Difference between revisions of "Example Visual Odometry Monocular Plane"
From BoofCV
Jump to navigationJump to searchm |
m |
||
Line 3: | Line 3: | ||
Example Code: | Example Code: | ||
* [https://github.com/lessthanoptimal/BoofCV/blob/v0. | * [https://github.com/lessthanoptimal/BoofCV/blob/v0.37/examples/src/main/java/boofcv/examples/sfm/ExampleVisualOdometryMonocularPlane.java ExampleVisualOdometryMonocularPlane.java] | ||
Concepts: | Concepts: | ||
Line 23: | Line 23: | ||
*/ | */ | ||
public class ExampleVisualOdometryMonocularPlane { | public class ExampleVisualOdometryMonocularPlane { | ||
public static void main( String[] args ) { | |||
public static void main( String | |||
MediaManager media = DefaultMediaManager.INSTANCE; | MediaManager media = DefaultMediaManager.INSTANCE; | ||
Line 32: | Line 30: | ||
// load camera description and the video sequence | // load camera description and the video sequence | ||
MonoPlaneParameters calibration = CalibrationIO.load( | MonoPlaneParameters calibration = CalibrationIO.load( | ||
media.openFile(new File(directory , "mono_plane.yaml").getPath())); | media.openFile(new File(directory, "mono_plane.yaml").getPath())); | ||
SimpleImageSequence<GrayU8> video = media.openVideo( | SimpleImageSequence<GrayU8> video = media.openVideo( | ||
new File(directory , "left.mjpeg").getPath(), ImageType.single(GrayU8.class)); | new File(directory, "left.mjpeg").getPath(), ImageType.single(GrayU8.class)); | ||
// specify how the image features are going to be tracked | // specify how the image features are going to be tracked | ||
Line 56: | Line 54: | ||
// Process the video sequence and output the location plus number of inliers | // Process the video sequence and output the location plus number of inliers | ||
while( video.hasNext() ) { | while (video.hasNext()) { | ||
GrayU8 image = video.next(); | GrayU8 image = video.next(); | ||
if( !visualOdometry.process(image) ) { | if (!visualOdometry.process(image)) { | ||
System.out.println("Fault!"); | System.out.println("Fault!"); | ||
visualOdometry.reset(); | visualOdometry.reset(); | ||
Line 75: | Line 73: | ||
* and return a string. | * and return a string. | ||
*/ | */ | ||
public static String inlierPercent(VisualOdometry<?> alg) { | public static String inlierPercent( VisualOdometry<?> alg ) { | ||
if( !(alg instanceof AccessPointTracks3D)) | if (!(alg instanceof AccessPointTracks3D)) | ||
return ""; | return ""; | ||
Line 83: | Line 81: | ||
int count = 0; | int count = 0; | ||
int N = access.getTotalTracks(); | int N = access.getTotalTracks(); | ||
for( int i = 0; i < N; i++ ) { | for (int i = 0; i < N; i++) { | ||
if( access.isTrackInlier(i) ) | if (access.isTrackInlier(i)) | ||
count++; | count++; | ||
} | } | ||
return String.format("%%%5.3f", 100.0 * count / N); | return String.format("%%%5.3f", 100.0*count/N); | ||
} | } | ||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> |
Revision as of 18:44, 21 December 2020
This example demonstrates how to estimate the camera's ego motion using a single camera and known plane. Since the plane's relative location to the camera is known there is no scale ambiguity, like there is with a more general single camera solution.
Example Code:
Concepts:
- Plane/Homography
Related Examples:
Example Code
/**
* Bare bones example showing how to estimate the camera's ego-motion using a single camera and a known
* plane. Additional information on the scene can be optionally extracted from the algorithm,
* if it implements AccessPointTracks3D.
*
* @author Peter Abeles
*/
public class ExampleVisualOdometryMonocularPlane {
public static void main( String[] args ) {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("vo/drc/");
// load camera description and the video sequence
MonoPlaneParameters calibration = CalibrationIO.load(
media.openFile(new File(directory, "mono_plane.yaml").getPath()));
SimpleImageSequence<GrayU8> video = media.openVideo(
new File(directory, "left.mjpeg").getPath(), ImageType.single(GrayU8.class));
// specify how the image features are going to be tracked
ConfigPKlt configKlt = new ConfigPKlt();
configKlt.pyramidLevels = ConfigDiscreteLevels.levels(4);
configKlt.templateRadius = 3;
ConfigPointDetector configDetector = new ConfigPointDetector();
configDetector.type = PointDetectorTypes.SHI_TOMASI;
configDetector.general.maxFeatures = 600;
configDetector.general.radius = 3;
configDetector.general.threshold = 1;
PointTracker<GrayU8> tracker = FactoryPointTracker.klt(configKlt, configDetector, GrayU8.class, null);
// declares the algorithm
MonocularPlaneVisualOdometry<GrayU8> visualOdometry =
FactoryVisualOdometry.monoPlaneInfinity(75, 2, 1.5, 200, tracker, ImageType.single(GrayU8.class));
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(calibration);
// Process the video sequence and output the location plus number of inliers
while (video.hasNext()) {
GrayU8 image = video.next();
if (!visualOdometry.process(image)) {
System.out.println("Fault!");
visualOdometry.reset();
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
}
}
/**
* If the algorithm implements AccessPointTracks3D, then count the number of inlier features
* and return a string.
*/
public static String inlierPercent( VisualOdometry<?> alg ) {
if (!(alg instanceof AccessPointTracks3D))
return "";
AccessPointTracks3D access = (AccessPointTracks3D)alg;
int count = 0;
int N = access.getTotalTracks();
for (int i = 0; i < N; i++) {
if (access.isTrackInlier(i))
count++;
}
return String.format("%%%5.3f", 100.0*count/N);
}
}