Difference between revisions of "Example Visual Odometry Depth"
From BoofCV
Jump to navigationJump to searchm |
m |
||
Line 3: | Line 3: | ||
Example Code: | Example Code: | ||
* [https://github.com/lessthanoptimal/BoofCV/blob/v0. | * [https://github.com/lessthanoptimal/BoofCV/blob/v0.38/examples/src/main/java/boofcv/examples/sfm/ExampleVisualOdometryDepth.java ExampleVisualOdometryDepth.java] | ||
Concepts: | Concepts: | ||
Line 34: | Line 34: | ||
// load camera description and the video sequence | // load camera description and the video sequence | ||
VisualDepthParameters param = CalibrationIO.load( | VisualDepthParameters param = CalibrationIO.load( | ||
media.openFile(new File(directory , "visualdepth.yaml").getPath())); | media.openFile(new File(directory, "visualdepth.yaml").getPath())); | ||
// specify how the image features are going to be tracked | // specify how the image features are going to be tracked | ||
Line 53: | Line 53: | ||
// declares the algorithm | // declares the algorithm | ||
DepthVisualOdometry<GrayU8,GrayU16> visualOdometry = | DepthVisualOdometry<GrayU8, GrayU16> visualOdometry = | ||
FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true, | FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true, | ||
sparseDepth, tracker, GrayU8.class, GrayU16.class); | |||
// Pass in intrinsic/extrinsic calibration. | // Pass in intrinsic/extrinsic calibration. This can be changed in the future. | ||
visualOdometry.setCalibration(param.visualParam,new DoNothing2Transform2_F32()); | visualOdometry.setCalibration(param.visualParam, new DoNothing2Transform2_F32()); | ||
// Process the video sequence and output the location plus number of inliers | // Process the video sequence and output the location plus number of inliers | ||
SimpleImageSequence<GrayU8> videoVisual = media.openVideo( | SimpleImageSequence<GrayU8> videoVisual = media.openVideo( | ||
new File(directory ,"rgb.mjpeg").getPath(), ImageType.single(GrayU8.class)); | new File(directory, "rgb.mjpeg").getPath(), ImageType.single(GrayU8.class)); | ||
SimpleImageSequence<GrayU16> videoDepth = media.openVideo( | SimpleImageSequence<GrayU16> videoDepth = media.openVideo( | ||
new File(directory , "depth.mpng").getPath(), ImageType.single(GrayU16.class)); | new File(directory, "depth.mpng").getPath(), ImageType.single(GrayU16.class)); | ||
while( videoVisual.hasNext() ) { | while (videoVisual.hasNext()) { | ||
GrayU8 visual = videoVisual.next(); | GrayU8 visual = videoVisual.next(); | ||
GrayU16 depth = videoDepth.next(); | GrayU16 depth = videoDepth.next(); | ||
if( !visualOdometry.process(visual,depth) ) { | if (!visualOdometry.process(visual, depth)) { | ||
throw new RuntimeException("VO Failed!"); | throw new RuntimeException("VO Failed!"); | ||
} | } | ||
Line 77: | Line 77: | ||
Vector3D_F64 T = leftToWorld.getT(); | Vector3D_F64 T = leftToWorld.getT(); | ||
System.out.printf("Location %8.2f %8.2f %8.2f | System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry)); | ||
} | } | ||
} | } | ||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> |
Revision as of 11:52, 12 July 2021
This example demonstrates how to estimate the camera's ego motion using an RGB-D sensor, such as the Kinect, which contains visual and depth information.
Example Code:
Concepts:
- RGB-D
Relevant Videos:
Related Examples:
Example Code
/**
* Bare bones example showing how to estimate the camera's ego-motion using a depth camera system, e.g. Kinect.
* Additional information on the scene can be optionally extracted from the algorithm if it implements AccessPointTracks3D.
*
* @author Peter Abeles
*/
public class ExampleVisualOdometryDepth {
public static void main( String[] args ) {
MediaManager media = DefaultMediaManager.INSTANCE;
String directory = UtilIO.pathExample("kinect/straight");
// load camera description and the video sequence
VisualDepthParameters param = CalibrationIO.load(
media.openFile(new File(directory, "visualdepth.yaml").getPath()));
// specify how the image features are going to be tracked
ConfigPKlt configKlt = new ConfigPKlt();
configKlt.pyramidLevels = ConfigDiscreteLevels.levels(4);
configKlt.templateRadius = 3;
ConfigPointDetector configDet = new ConfigPointDetector();
configDet.type = PointDetectorTypes.SHI_TOMASI;
configDet.shiTomasi.radius = 3;
configDet.general.maxFeatures = 600;
configDet.general.radius = 3;
configDet.general.threshold = 1;
PointTracker<GrayU8> tracker = FactoryPointTracker.klt(configKlt, configDet, GrayU8.class, GrayS16.class);
DepthSparse3D<GrayU16> sparseDepth = new DepthSparse3D.I<>(1e-3);
// declares the algorithm
DepthVisualOdometry<GrayU8, GrayU16> visualOdometry =
FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true,
sparseDepth, tracker, GrayU8.class, GrayU16.class);
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(param.visualParam, new DoNothing2Transform2_F32());
// Process the video sequence and output the location plus number of inliers
SimpleImageSequence<GrayU8> videoVisual = media.openVideo(
new File(directory, "rgb.mjpeg").getPath(), ImageType.single(GrayU8.class));
SimpleImageSequence<GrayU16> videoDepth = media.openVideo(
new File(directory, "depth.mpng").getPath(), ImageType.single(GrayU16.class));
while (videoVisual.hasNext()) {
GrayU8 visual = videoVisual.next();
GrayU16 depth = videoDepth.next();
if (!visualOdometry.process(visual, depth)) {
throw new RuntimeException("VO Failed!");
}
Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
Vector3D_F64 T = leftToWorld.getT();
System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry));
}
}
}