Difference between revisions of "Example Visual Odometry Depth"

From BoofCV
Jump to navigationJump to search
m
m
 
(9 intermediate revisions by the same user not shown)
Line 3: Line 3:


Example Code:
Example Code:
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.18/examples/src/boofcv/examples/sfm/ExampleVisualOdometryDepth.java ExampleVisualOdometryDepth.java]
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.39/examples/src/main/java/boofcv/examples/sfm/ExampleVisualOdometryDepth.java ExampleVisualOdometryDepth.java]


Concepts:
Concepts:
Line 10: Line 10:
Relevant Videos:
Relevant Videos:
* [http://www.youtube.com/watch?v=kOGXsf1tP3A| YouTube]
* [http://www.youtube.com/watch?v=kOGXsf1tP3A| YouTube]
Relevant Applets:
* [[Applet Visual Odometry Depth| Visual Odometry Depth]]


Related Examples:
Related Examples:
Line 28: Line 25:
  */
  */
public class ExampleVisualOdometryDepth {
public class ExampleVisualOdometryDepth {
 
public static void main( String[] args ) {
public static void main( String args[] ) throws IOException {
 
MediaManager media = DefaultMediaManager.INSTANCE;
MediaManager media = DefaultMediaManager.INSTANCE;


String directory = "../data/applet/kinect/straight/";
String directory = UtilIO.pathExample("kinect/straight");


// load camera description and the video sequence
// load camera description and the video sequence
VisualDepthParameters param = UtilIO.loadXML(media.openFile(directory + "visualdepth.xml"));
VisualDepthParameters param = CalibrationIO.load(
media.openFile(new File(directory, "visualdepth.yaml").getPath()));


// specify how the image features are going to be tracked
// specify how the image features are going to be tracked
PkltConfig configKlt = new PkltConfig();
ConfigRgbDepthTrackPnP config = new ConfigRgbDepthTrackPnP();
configKlt.pyramidScaling = new int[]{1, 2, 4, 8};
config.depthScale = 1e-3; // convert depth image distance units to meters
configKlt.templateRadius = 3;


PointTrackerTwoPass<ImageUInt8> tracker =
config.tracker.typeTracker = ConfigPointTracker.TrackerType.KLT;
FactoryPointTrackerTwoPass.klt(configKlt, new ConfigGeneralDetector(600, 3, 1),
config.tracker.detDesc.detectPoint.type = PointDetectorTypes.SHI_TOMASI;
ImageUInt8.class, ImageSInt16.class);
config.tracker.detDesc.detectPoint.shiTomasi.radius = 3;
 
config.tracker.detDesc.detectPoint.general.maxFeatures = 600;
DepthSparse3D<ImageUInt16> sparseDepth = new DepthSparse3D.I<ImageUInt16>(1e-3);
config.tracker.detDesc.detectPoint.general.radius = 3;
config.tracker.detDesc.detectPoint.general.threshold = 1;


// declares the algorithm
// declares the algorithm
DepthVisualOdometry<ImageUInt8,ImageUInt16> visualOdometry =
DepthVisualOdometry<GrayU8, GrayU16> visualOdometry =
FactoryVisualOdometry.depthDepthPnP(1.5, 120, 2, 200, 50, true,
FactoryVisualOdometry.rgbDepthPnP(config, GrayU8.class, GrayU16.class);
sparseDepth, tracker, ImageUInt8.class, ImageUInt16.class);


// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
visualOdometry.setCalibration(param.visualParam,new DoNothingPixelTransform_F32());
visualOdometry.setCalibration(param.visualParam, new DoNothing2Transform2_F32());


// Process the video sequence and output the location plus number of inliers
// Process the video sequence and output the location plus number of inliers
SimpleImageSequence<ImageUInt8> videoVisual = media.openVideo(directory+"rgb.mjpeg", ImageType.single(ImageUInt8.class));
SimpleImageSequence<GrayU8> videoVisual = media.openVideo(
SimpleImageSequence<ImageUInt16> videoDepth = media.openVideo(directory + "depth.mpng", ImageType.single(ImageUInt16.class));
new File(directory, "rgb.mjpeg").getPath(), ImageType.single(GrayU8.class));
SimpleImageSequence<GrayU16> videoDepth = media.openVideo(
new File(directory, "depth.mpng").getPath(), ImageType.single(GrayU16.class));


while( videoVisual.hasNext() ) {
long startTime = System.nanoTime();
ImageUInt8 visual = videoVisual.next();
while (videoVisual.hasNext()) {
ImageUInt16 depth = videoDepth.next();
GrayU8 visual = videoVisual.next();
GrayU16 depth = videoDepth.next();


if( !visualOdometry.process(visual,depth) ) {
if (!visualOdometry.process(visual, depth)) {
throw new RuntimeException("VO Failed!");
throw new RuntimeException("VO Failed!");
}
}
Line 72: Line 70:
Vector3D_F64 T = leftToWorld.getT();
Vector3D_F64 T = leftToWorld.getT();


System.out.printf("Location %8.2f %8.2f %8.2f     inliers %s\n", T.x, T.y, T.z, inlierPercent(visualOdometry));
System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry));
}
}
}
System.out.printf("FPS %4.2f\n", videoVisual.getFrameNumber()/((System.nanoTime() - startTime)*1e-9));
 
/**
* If the algorithm implements AccessPointTracks3D, then count the number of inlier features
* and return a string.
*/
public static String inlierPercent(VisualOdometry alg) {
if( !(alg instanceof AccessPointTracks3D))
return "";
 
AccessPointTracks3D access = (AccessPointTracks3D)alg;
 
int count = 0;
int N = access.getAllTracks().size();
for( int i = 0; i < N; i++ ) {
if( access.isInlier(i) )
count++;
}
 
return String.format("%%%5.3f", 100.0 * count / N);
}
}
}
}
</syntaxhighlight>
</syntaxhighlight>

Latest revision as of 20:26, 8 October 2021

This example demonstrates how to estimate the camera's ego motion using an RGB-D sensor, such as the Kinect, which contains visual and depth information.

Example Code:

Concepts:

  • RGB-D

Relevant Videos:

Related Examples:

Example Code

/**
 * Bare bones example showing how to estimate the camera's ego-motion using a depth camera system, e.g. Kinect.
 * Additional information on the scene can be optionally extracted from the algorithm if it implements AccessPointTracks3D.
 *
 * @author Peter Abeles
 */
public class ExampleVisualOdometryDepth {
	public static void main( String[] args ) {
		MediaManager media = DefaultMediaManager.INSTANCE;

		String directory = UtilIO.pathExample("kinect/straight");

		// load camera description and the video sequence
		VisualDepthParameters param = CalibrationIO.load(
				media.openFile(new File(directory, "visualdepth.yaml").getPath()));

		// specify how the image features are going to be tracked
		ConfigRgbDepthTrackPnP config = new ConfigRgbDepthTrackPnP();
		config.depthScale = 1e-3; // convert depth image distance units to meters

		config.tracker.typeTracker = ConfigPointTracker.TrackerType.KLT;
		config.tracker.detDesc.detectPoint.type = PointDetectorTypes.SHI_TOMASI;
		config.tracker.detDesc.detectPoint.shiTomasi.radius = 3;
		config.tracker.detDesc.detectPoint.general.maxFeatures = 600;
		config.tracker.detDesc.detectPoint.general.radius = 3;
		config.tracker.detDesc.detectPoint.general.threshold = 1;

		// declares the algorithm
		DepthVisualOdometry<GrayU8, GrayU16> visualOdometry =
				FactoryVisualOdometry.rgbDepthPnP(config, GrayU8.class, GrayU16.class);

		// Pass in intrinsic/extrinsic calibration. This can be changed in the future.
		visualOdometry.setCalibration(param.visualParam, new DoNothing2Transform2_F32());

		// Process the video sequence and output the location plus number of inliers
		SimpleImageSequence<GrayU8> videoVisual = media.openVideo(
				new File(directory, "rgb.mjpeg").getPath(), ImageType.single(GrayU8.class));
		SimpleImageSequence<GrayU16> videoDepth = media.openVideo(
				new File(directory, "depth.mpng").getPath(), ImageType.single(GrayU16.class));

		long startTime = System.nanoTime();
		while (videoVisual.hasNext()) {
			GrayU8 visual = videoVisual.next();
			GrayU16 depth = videoDepth.next();

			if (!visualOdometry.process(visual, depth)) {
				throw new RuntimeException("VO Failed!");
			}

			Se3_F64 leftToWorld = visualOdometry.getCameraToWorld();
			Vector3D_F64 T = leftToWorld.getT();

			System.out.printf("Location %8.2f %8.2f %8.2f, %s\n", T.x, T.y, T.z, trackStats(visualOdometry));
		}
		System.out.printf("FPS %4.2f\n", videoVisual.getFrameNumber()/((System.nanoTime() - startTime)*1e-9));
	}
}