Difference between revisions of "Example Stereo Single Camera"
From BoofCV
Jump to navigationJump to searchm |
m |
||
(19 intermediate revisions by the same user not shown) | |||
Line 1: | Line 1: | ||
<center> | <center> | ||
{| class="wikitable" style="text-align: center; border-spacing: 5;" | {| class="wikitable" style="text-align: center; border-spacing: 5;" | ||
Line 15: | Line 13: | ||
</center> | </center> | ||
In this scenario we wish to compute a dense point cloud from two views taken with the same calibrated camera. Because the intrinsic parameters are known, it is easier to converge towards a valid solution than the entirely [[Example_Stereo_Uncalibrated|uncalibrated scenario]]. | |||
Because the extrinsic parameters (translation and rotation) between the two views is unknown the scene's structure can only be recovered up to a scale factor. In this example natural features are used to determine the geometric relationship between the two views. The algorithm can be summarized as follows: | |||
* Load camera calibration and two images | * Load camera calibration and two images | ||
Line 24: | Line 24: | ||
* Convert into 3D point cloud | * Convert into 3D point cloud | ||
Example File: [https://github.com/lessthanoptimal/BoofCV/blob/v0. | Example File: | ||
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.41/examples/src/main/java/boofcv/examples/stereo/ExampleStereoTwoViewsOneCamera.java ExampleStereoTwoViewsOneCamera.java] | |||
Concepts: | Concepts: | ||
Line 32: | Line 33: | ||
* Dense stereo processing | * Dense stereo processing | ||
Related Videos: | |||
* [ | * [https://youtu.be/wOXuphyzypc Android Two View Stereo] | ||
* [ | * [https://www.youtube.com/watch?v=8pn9Ebw90uk&t=672s Updates 0.24-0.36] | ||
Related Tutorials/Example Code: | Related Tutorials/Example Code: | ||
* [[Example_Stereo_Uncalibrated| Stereo Uncalibrated]] | |||
* [[Example_Three_View_Stereo_Uncalibrated| Three View Stereo Uncalibrated]] | |||
* [[Example_Stereo_Disparity| Stereo Disparity Example]] | * [[Example_Stereo_Disparity| Stereo Disparity Example]] | ||
* [[Example_Rectification_Calibrated| Calibrated Stereo Rectification Example]] | * [[Example_Rectification_Calibrated| Calibrated Stereo Rectification Example]] | ||
Line 47: | Line 49: | ||
/** | /** | ||
* Example demonstrating how to use to images taken from a single calibrated camera to create a stereo disparity image, | * Example demonstrating how to use to images taken from a single calibrated camera to create a stereo disparity image, | ||
* from which a dense 3D point cloud of the scene can be computed. | * from which a dense 3D point cloud of the scene can be computed. For this technique to work the camera's motion | ||
* needs to be approximately tangential to the direction the camera is pointing. | * needs to be approximately tangential to the direction the camera is pointing. The code below assumes that the first | ||
* image is to the left of the second image. | * image is to the left of the second image. | ||
* | * | ||
Line 55: | Line 57: | ||
public class ExampleStereoTwoViewsOneCamera { | public class ExampleStereoTwoViewsOneCamera { | ||
// | // Specifies the disparity values which will be considered | ||
private static int | private static final int disparityMin = 15; | ||
private static int | private static final int disparityRange = 85; | ||
public static void main(String | public static void main( String[] args ) { | ||
// specify location of images and calibration | // specify location of images and calibration | ||
String calibDir = " | String calibDir = UtilIO.pathExample("calibration/mono/Sony_DSC-HX5V_Chess/"); | ||
String imageDir = " | String imageDir = UtilIO.pathExample("stereo/"); | ||
// Camera parameters | // Camera parameters | ||
CameraPinholeBrown intrinsic = CalibrationIO.load(new File(calibDir, "intrinsic.yaml")); | |||
// Input images from the camera moving left to right | // Input images from the camera moving left to right | ||
BufferedImage origLeft = UtilImageIO.loadImage(imageDir | BufferedImage origLeft = UtilImageIO.loadImage(imageDir, "mono_wall_01.jpg"); | ||
BufferedImage origRight = UtilImageIO.loadImage(imageDir, "mono_wall_02.jpg"); | |||
// Input images with lens distortion | // Input images with lens distortion | ||
GrayU8 distortedLeft = ConvertBufferedImage.convertFrom(origLeft, (GrayU8)null); | |||
GrayU8 distortedRight = ConvertBufferedImage.convertFrom(origRight, (GrayU8)null); | |||
// matched features between the two images | // matched features between the two images | ||
List<AssociatedPair> matchedFeatures = | List<AssociatedPair> matchedFeatures = ExampleComputeFundamentalMatrix.computeMatches(origLeft, origRight); | ||
// convert from pixel coordinates into normalized image coordinates | // convert from pixel coordinates into normalized image coordinates | ||
Line 83: | Line 84: | ||
// Robustly estimate camera motion | // Robustly estimate camera motion | ||
List<AssociatedPair> inliers = new ArrayList< | List<AssociatedPair> inliers = new ArrayList<>(); | ||
Se3_F64 leftToRight = estimateCameraMotion(intrinsic, matchedCalibrated, inliers); | Se3_F64 leftToRight = estimateCameraMotion(intrinsic, matchedCalibrated, inliers); | ||
Line 89: | Line 90: | ||
// Rectify and remove lens distortion for stereo processing | // Rectify and remove lens distortion for stereo processing | ||
DMatrixRMaj rectifiedK = new DMatrixRMaj(3, 3); | |||
DMatrixRMaj rectifiedR = new DMatrixRMaj(3, 3); | |||
GrayU8 rectifiedLeft = distortedLeft.createSameShape(); | |||
GrayU8 rectifiedRight = distortedRight.createSameShape(); | |||
GrayU8 rectifiedMask = distortedLeft.createSameShape(); | |||
rectifyImages(distortedLeft, distortedRight, leftToRight, intrinsic, rectifiedLeft, rectifiedRight, rectifiedK); | rectifyImages(distortedLeft, distortedRight, leftToRight, intrinsic, intrinsic, | ||
rectifiedLeft, rectifiedRight, rectifiedMask, rectifiedK, rectifiedR); | |||
// compute disparity | // compute disparity | ||
StereoDisparity< | ConfigDisparityBMBest5 config = new ConfigDisparityBMBest5(); | ||
FactoryStereoDisparity. | config.errorType = DisparityError.CENSUS; | ||
config.disparityMin = disparityMin; | |||
config.disparityRange = disparityRange; | |||
config.subpixel = true; | |||
config.regionRadiusX = config.regionRadiusY = 5; | |||
config.maxPerPixelError = 20; | |||
config.validateRtoL = 1; | |||
config.texture = 0.1; | |||
StereoDisparity<GrayU8, GrayF32> disparityAlg = | |||
FactoryStereoDisparity.blockMatchBest5(config, GrayU8.class, GrayF32.class); | |||
// process and return the results | // process and return the results | ||
disparityAlg.process(rectifiedLeft, rectifiedRight); | disparityAlg.process(rectifiedLeft, rectifiedRight); | ||
GrayF32 disparity = disparityAlg.getDisparity(); | |||
RectifyImageOps.applyMask(disparity, rectifiedMask, 0); | |||
// show results | // show results | ||
BufferedImage visualized = VisualizeImageData.disparity(disparity, null, | BufferedImage visualized = VisualizeImageData.disparity(disparity, null, disparityRange, 0); | ||
BufferedImage outLeft = ConvertBufferedImage.convertTo(rectifiedLeft, null); | BufferedImage outLeft = ConvertBufferedImage.convertTo(rectifiedLeft, null); | ||
BufferedImage outRight = ConvertBufferedImage.convertTo(rectifiedRight, null); | BufferedImage outRight = ConvertBufferedImage.convertTo(rectifiedRight, null); | ||
ShowImages.showWindow(new RectifiedPairPanel(true, outLeft, outRight), "Rectification"); | ShowImages.showWindow(new RectifiedPairPanel(true, outLeft, outRight), "Rectification", true); | ||
ShowImages.showWindow(visualized, "Disparity"); | ShowImages.showWindow(visualized, "Disparity", true); | ||
showPointCloud(disparity, outLeft, leftToRight, rectifiedK, | showPointCloud(disparity, outLeft, leftToRight, rectifiedK, rectifiedR, disparityMin, disparityRange); | ||
System.out.println("Total found " + matchedCalibrated.size()); | System.out.println("Total found " + matchedCalibrated.size()); | ||
Line 122: | Line 135: | ||
* Estimates the camera motion robustly using RANSAC and a set of associated points. | * Estimates the camera motion robustly using RANSAC and a set of associated points. | ||
* | * | ||
* @param intrinsic | * @param intrinsic Intrinsic camera parameters | ||
* @param matchedNorm set of matched point features in normalized image coordinates | * @param matchedNorm set of matched point features in normalized image coordinates | ||
* @param inliers | * @param inliers OUTPUT: Set of inlier features from RANSAC | ||
* @return Found camera motion. | * @return Found camera motion. Note translation has an arbitrary scale | ||
*/ | */ | ||
public static Se3_F64 estimateCameraMotion( | public static Se3_F64 estimateCameraMotion( CameraPinholeBrown intrinsic, | ||
List<AssociatedPair> matchedNorm, List<AssociatedPair> inliers ) { | |||
ModelMatcherMultiview<Se3_F64, AssociatedPair> epipolarMotion = | |||
FactoryMultiViewRobust.baselineRansac(new ConfigEssential(), new ConfigRansac(200, 0.5)); | |||
epipolarMotion.setIntrinsic(0, intrinsic); | |||
epipolarMotion.setIntrinsic(1, intrinsic); | |||
new | |||
if (!epipolarMotion.process(matchedNorm)) | if (!epipolarMotion.process(matchedNorm)) | ||
Line 153: | Line 153: | ||
inliers.addAll(epipolarMotion.getMatchSet()); | inliers.addAll(epipolarMotion.getMatchSet()); | ||
return epipolarMotion. | return epipolarMotion.getModelParameters(); | ||
} | } | ||
Line 159: | Line 159: | ||
* Convert a set of associated point features from pixel coordinates into normalized image coordinates. | * Convert a set of associated point features from pixel coordinates into normalized image coordinates. | ||
*/ | */ | ||
public static List<AssociatedPair> convertToNormalizedCoordinates(List<AssociatedPair> matchedFeatures, | public static List<AssociatedPair> convertToNormalizedCoordinates( List<AssociatedPair> matchedFeatures, CameraPinholeBrown intrinsic ) { | ||
Point2Transform2_F64 p_to_n = LensDistortionFactory.narrow(intrinsic).undistort_F64(true, false); | |||
List<AssociatedPair> calibratedFeatures = new ArrayList< | List<AssociatedPair> calibratedFeatures = new ArrayList<>(); | ||
for (AssociatedPair p : matchedFeatures) { | for (AssociatedPair p : matchedFeatures) { | ||
AssociatedPair c = new AssociatedPair(); | AssociatedPair c = new AssociatedPair(); | ||
p_to_n.compute(p.p1.x, p.p1.y, c.p1); | |||
p_to_n.compute(p.p2.x, p.p2.y, c.p2); | |||
calibratedFeatures.add(c); | calibratedFeatures.add(c); | ||
Line 180: | Line 180: | ||
* Remove lens distortion and rectify stereo images | * Remove lens distortion and rectify stereo images | ||
* | * | ||
* @param distortedLeft | * @param distortedLeft Input distorted image from left camera. | ||
* @param distortedRight Input distorted image from right camera. | * @param distortedRight Input distorted image from right camera. | ||
* @param leftToRight | * @param leftToRight Camera motion from left to right | ||
* @param | * @param intrinsicLeft Intrinsic camera parameters | ||
* @param rectifiedLeft | * @param rectifiedLeft Output rectified image for left camera. | ||
* @param rectifiedRight Output rectified image for right camera. | * @param rectifiedRight Output rectified image for right camera. | ||
* @param rectifiedK | * @param rectifiedMask Mask that indicates invalid pixels in rectified image. 1 = valid, 0 = invalid | ||
* @param rectifiedK Output camera calibration matrix for rectified camera | |||
*/ | */ | ||
public static void rectifyImages( | public static <T extends ImageBase<T>> | ||
void rectifyImages( T distortedLeft, | |||
T distortedRight, | |||
Se3_F64 leftToRight, | |||
CameraPinholeBrown intrinsicLeft, | |||
CameraPinholeBrown intrinsicRight, | |||
T rectifiedLeft, | |||
T rectifiedRight, | |||
GrayU8 rectifiedMask, | |||
DMatrixRMaj rectifiedK, | |||
DMatrixRMaj rectifiedR ) { | |||
RectifyCalibrated rectifyAlg = RectifyImageOps.createCalibrated(); | RectifyCalibrated rectifyAlg = RectifyImageOps.createCalibrated(); | ||
// original camera calibration matrices | // original camera calibration matrices | ||
DMatrixRMaj K1 = PerspectiveOps.pinholeToMatrix(intrinsicLeft, (DMatrixRMaj)null); | |||
DMatrixRMaj K2 = PerspectiveOps.pinholeToMatrix(intrinsicRight, (DMatrixRMaj)null); | |||
rectifyAlg.process( | rectifyAlg.process(K1, new Se3_F64(), K2, leftToRight); | ||
// rectification matrix for each image | // rectification matrix for each image | ||
DMatrixRMaj rect1 = rectifyAlg.getUndistToRectPixels1(); | |||
DMatrixRMaj rect2 = rectifyAlg.getUndistToRectPixels2(); | |||
rectifiedR.setTo(rectifyAlg.getRectifiedRotation()); | |||
// New calibration matrix, | // New calibration matrix, | ||
rectifiedK. | rectifiedK.setTo(rectifyAlg.getCalibrationMatrix()); | ||
// Adjust the rectification to make the view area more useful | // Adjust the rectification to make the view area more useful | ||
RectifyImageOps.allInsideLeft( | ImageDimension rectShape = new ImageDimension(); | ||
RectifyImageOps.fullViewLeft(intrinsicLeft, rect1, rect2, rectifiedK, rectShape); | |||
// RectifyImageOps.allInsideLeft(intrinsicLeft, rect1, rect2, rectifiedK, rectShape); | |||
// Taking in account the relative rotation between the image axis and the baseline is important in | |||
// this scenario since a person can easily hold the camera at an odd angle. If you don't adjust | |||
// the rectified image size you might end up with a lot of wasted pixels and a low resolution model! | |||
rectifiedLeft.reshape(rectShape.width, rectShape.height); | |||
rectifiedRight.reshape(rectShape.width, rectShape.height); | |||
// undistorted and rectify images | // undistorted and rectify images | ||
ImageDistort< | FMatrixRMaj rect1_F32 = new FMatrixRMaj(3, 3); | ||
FMatrixRMaj rect2_F32 = new FMatrixRMaj(3, 3); | |||
ImageDistort< | ConvertMatrixData.convert(rect1, rect1_F32); | ||
ConvertMatrixData.convert(rect2, rect2_F32); | |||
// Extending the image prevents a harsh edge reducing false matches at the image border | |||
// SKIP is another option, possibly a tinny bit faster, but has a harsh edge which will need to be filtered | |||
ImageDistort<T, T> distortLeft = | |||
RectifyDistortImageOps.rectifyImage(intrinsicLeft, rect1_F32, BorderType.EXTENDED, distortedLeft.getImageType()); | |||
ImageDistort<T, T> distortRight = | |||
RectifyDistortImageOps.rectifyImage(intrinsicRight, rect2_F32, BorderType.EXTENDED, distortedRight.getImageType()); | |||
distortLeft.apply(distortedLeft, rectifiedLeft); | distortLeft.apply(distortedLeft, rectifiedLeft, rectifiedMask); | ||
distortRight.apply(distortedRight, rectifiedRight); | distortRight.apply(distortedRight, rectifiedRight); | ||
} | } | ||
/** | /** | ||
* Draw inliers for debugging purposes. | * Draw inliers for debugging purposes. Need to convert from normalized to pixel coordinates. | ||
*/ | */ | ||
public static void drawInliers(BufferedImage left, BufferedImage right, | public static void drawInliers( BufferedImage left, BufferedImage right, CameraPinholeBrown intrinsic, | ||
List<AssociatedPair> normalized ) { | |||
Point2Transform2_F64 n_to_p = LensDistortionFactory.narrow(intrinsic).distort_F64(false, true); | |||
List<AssociatedPair> pixels = new ArrayList< | List<AssociatedPair> pixels = new ArrayList<>(); | ||
for (AssociatedPair n : normalized) { | for (AssociatedPair n : normalized) { | ||
AssociatedPair p = new AssociatedPair(); | AssociatedPair p = new AssociatedPair(); | ||
n_to_p.compute(n.p1.x, n.p1.y, p.p1); | |||
n_to_p.compute(n.p2.x, n.p2.y, p.p2); | |||
pixels.add(p); | pixels.add(p); | ||
Line 245: | Line 266: | ||
panel.setImages(left, right); | panel.setImages(left, right); | ||
ShowImages.showWindow(panel, "Inlier Features"); | ShowImages.showWindow(panel, "Inlier Features", true); | ||
} | } | ||
Line 251: | Line 272: | ||
* Show results as a point cloud | * Show results as a point cloud | ||
*/ | */ | ||
public static void showPointCloud( | public static void showPointCloud( ImageGray disparity, BufferedImage left, | ||
Se3_F64 motion, DMatrixRMaj rectifiedK, DMatrixRMaj rectifiedR, | |||
int disparityMin, int disparityRange ) { | |||
DisparityToColorPointCloud d2c = new DisparityToColorPointCloud(); | |||
PointCloudWriter.CloudArraysF32 cloud = new PointCloudWriter.CloudArraysF32(); | |||
double baseline = motion.getT().norm(); | double baseline = motion.getT().norm(); | ||
d2c.configure(baseline, rectifiedK, rectifiedR, null, disparityMin, disparityRange); | |||
d2c.process(disparity, UtilDisparitySwing.wrap(left), cloud); | |||
CameraPinhole rectifiedPinhole = PerspectiveOps.matrixToPinhole(rectifiedK, disparity.width, disparity.height, null); | |||
// skew the view to make the structure easier to see | |||
Se3_F64 cameraToWorld = SpecialEuclideanOps_F64.eulerXyz(-baseline*5, 0, 0, 0, 0.2, 0, null); | |||
PointCloudViewer pcv = VisualizeData.createPointCloudViewer(); | |||
pcv.setCameraHFov(PerspectiveOps.computeHFov(rectifiedPinhole)); | |||
pcv.setCameraToWorld(cameraToWorld); | |||
pcv.setTranslationStep(baseline/3); | |||
pcv.addCloud(cloud.cloudXyz, cloud.cloudRgb); | |||
pcv.setDotSize(1); | |||
pcv.setTranslationStep(baseline/10); | |||
ShowImages.showWindow( | pcv.getComponent().setPreferredSize(new Dimension(left.getWidth(), left.getHeight())); | ||
ShowImages.showWindow(pcv.getComponent(), "Point Cloud", true); | |||
} | } | ||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> |
Latest revision as of 16:55, 2 September 2022
Associated inlier features between two views | |
---|---|
Stereo disparity image | 3D point cloud |
In this scenario we wish to compute a dense point cloud from two views taken with the same calibrated camera. Because the intrinsic parameters are known, it is easier to converge towards a valid solution than the entirely uncalibrated scenario.
Because the extrinsic parameters (translation and rotation) between the two views is unknown the scene's structure can only be recovered up to a scale factor. In this example natural features are used to determine the geometric relationship between the two views. The algorithm can be summarized as follows:
- Load camera calibration and two images
- Detect, describe, and associate image features
- Compute camera motion (Essential matrix)
- Rectify image pair
- Compute dense stereo disparity
- Convert into 3D point cloud
Example File:
Concepts:
- Point feature association
- Epipolar geometry
- Rectification
- Dense stereo processing
Related Videos:
Related Tutorials/Example Code:
- Stereo Uncalibrated
- Three View Stereo Uncalibrated
- Stereo Disparity Example
- Calibrated Stereo Rectification Example
- Camera Calibration Tutorial
Example Code
/**
* Example demonstrating how to use to images taken from a single calibrated camera to create a stereo disparity image,
* from which a dense 3D point cloud of the scene can be computed. For this technique to work the camera's motion
* needs to be approximately tangential to the direction the camera is pointing. The code below assumes that the first
* image is to the left of the second image.
*
* @author Peter Abeles
*/
public class ExampleStereoTwoViewsOneCamera {
// Specifies the disparity values which will be considered
private static final int disparityMin = 15;
private static final int disparityRange = 85;
public static void main( String[] args ) {
// specify location of images and calibration
String calibDir = UtilIO.pathExample("calibration/mono/Sony_DSC-HX5V_Chess/");
String imageDir = UtilIO.pathExample("stereo/");
// Camera parameters
CameraPinholeBrown intrinsic = CalibrationIO.load(new File(calibDir, "intrinsic.yaml"));
// Input images from the camera moving left to right
BufferedImage origLeft = UtilImageIO.loadImage(imageDir, "mono_wall_01.jpg");
BufferedImage origRight = UtilImageIO.loadImage(imageDir, "mono_wall_02.jpg");
// Input images with lens distortion
GrayU8 distortedLeft = ConvertBufferedImage.convertFrom(origLeft, (GrayU8)null);
GrayU8 distortedRight = ConvertBufferedImage.convertFrom(origRight, (GrayU8)null);
// matched features between the two images
List<AssociatedPair> matchedFeatures = ExampleComputeFundamentalMatrix.computeMatches(origLeft, origRight);
// convert from pixel coordinates into normalized image coordinates
List<AssociatedPair> matchedCalibrated = convertToNormalizedCoordinates(matchedFeatures, intrinsic);
// Robustly estimate camera motion
List<AssociatedPair> inliers = new ArrayList<>();
Se3_F64 leftToRight = estimateCameraMotion(intrinsic, matchedCalibrated, inliers);
drawInliers(origLeft, origRight, intrinsic, inliers);
// Rectify and remove lens distortion for stereo processing
DMatrixRMaj rectifiedK = new DMatrixRMaj(3, 3);
DMatrixRMaj rectifiedR = new DMatrixRMaj(3, 3);
GrayU8 rectifiedLeft = distortedLeft.createSameShape();
GrayU8 rectifiedRight = distortedRight.createSameShape();
GrayU8 rectifiedMask = distortedLeft.createSameShape();
rectifyImages(distortedLeft, distortedRight, leftToRight, intrinsic, intrinsic,
rectifiedLeft, rectifiedRight, rectifiedMask, rectifiedK, rectifiedR);
// compute disparity
ConfigDisparityBMBest5 config = new ConfigDisparityBMBest5();
config.errorType = DisparityError.CENSUS;
config.disparityMin = disparityMin;
config.disparityRange = disparityRange;
config.subpixel = true;
config.regionRadiusX = config.regionRadiusY = 5;
config.maxPerPixelError = 20;
config.validateRtoL = 1;
config.texture = 0.1;
StereoDisparity<GrayU8, GrayF32> disparityAlg =
FactoryStereoDisparity.blockMatchBest5(config, GrayU8.class, GrayF32.class);
// process and return the results
disparityAlg.process(rectifiedLeft, rectifiedRight);
GrayF32 disparity = disparityAlg.getDisparity();
RectifyImageOps.applyMask(disparity, rectifiedMask, 0);
// show results
BufferedImage visualized = VisualizeImageData.disparity(disparity, null, disparityRange, 0);
BufferedImage outLeft = ConvertBufferedImage.convertTo(rectifiedLeft, null);
BufferedImage outRight = ConvertBufferedImage.convertTo(rectifiedRight, null);
ShowImages.showWindow(new RectifiedPairPanel(true, outLeft, outRight), "Rectification", true);
ShowImages.showWindow(visualized, "Disparity", true);
showPointCloud(disparity, outLeft, leftToRight, rectifiedK, rectifiedR, disparityMin, disparityRange);
System.out.println("Total found " + matchedCalibrated.size());
System.out.println("Total Inliers " + inliers.size());
}
/**
* Estimates the camera motion robustly using RANSAC and a set of associated points.
*
* @param intrinsic Intrinsic camera parameters
* @param matchedNorm set of matched point features in normalized image coordinates
* @param inliers OUTPUT: Set of inlier features from RANSAC
* @return Found camera motion. Note translation has an arbitrary scale
*/
public static Se3_F64 estimateCameraMotion( CameraPinholeBrown intrinsic,
List<AssociatedPair> matchedNorm, List<AssociatedPair> inliers ) {
ModelMatcherMultiview<Se3_F64, AssociatedPair> epipolarMotion =
FactoryMultiViewRobust.baselineRansac(new ConfigEssential(), new ConfigRansac(200, 0.5));
epipolarMotion.setIntrinsic(0, intrinsic);
epipolarMotion.setIntrinsic(1, intrinsic);
if (!epipolarMotion.process(matchedNorm))
throw new RuntimeException("Motion estimation failed");
// save inlier set for debugging purposes
inliers.addAll(epipolarMotion.getMatchSet());
return epipolarMotion.getModelParameters();
}
/**
* Convert a set of associated point features from pixel coordinates into normalized image coordinates.
*/
public static List<AssociatedPair> convertToNormalizedCoordinates( List<AssociatedPair> matchedFeatures, CameraPinholeBrown intrinsic ) {
Point2Transform2_F64 p_to_n = LensDistortionFactory.narrow(intrinsic).undistort_F64(true, false);
List<AssociatedPair> calibratedFeatures = new ArrayList<>();
for (AssociatedPair p : matchedFeatures) {
AssociatedPair c = new AssociatedPair();
p_to_n.compute(p.p1.x, p.p1.y, c.p1);
p_to_n.compute(p.p2.x, p.p2.y, c.p2);
calibratedFeatures.add(c);
}
return calibratedFeatures;
}
/**
* Remove lens distortion and rectify stereo images
*
* @param distortedLeft Input distorted image from left camera.
* @param distortedRight Input distorted image from right camera.
* @param leftToRight Camera motion from left to right
* @param intrinsicLeft Intrinsic camera parameters
* @param rectifiedLeft Output rectified image for left camera.
* @param rectifiedRight Output rectified image for right camera.
* @param rectifiedMask Mask that indicates invalid pixels in rectified image. 1 = valid, 0 = invalid
* @param rectifiedK Output camera calibration matrix for rectified camera
*/
public static <T extends ImageBase<T>>
void rectifyImages( T distortedLeft,
T distortedRight,
Se3_F64 leftToRight,
CameraPinholeBrown intrinsicLeft,
CameraPinholeBrown intrinsicRight,
T rectifiedLeft,
T rectifiedRight,
GrayU8 rectifiedMask,
DMatrixRMaj rectifiedK,
DMatrixRMaj rectifiedR ) {
RectifyCalibrated rectifyAlg = RectifyImageOps.createCalibrated();
// original camera calibration matrices
DMatrixRMaj K1 = PerspectiveOps.pinholeToMatrix(intrinsicLeft, (DMatrixRMaj)null);
DMatrixRMaj K2 = PerspectiveOps.pinholeToMatrix(intrinsicRight, (DMatrixRMaj)null);
rectifyAlg.process(K1, new Se3_F64(), K2, leftToRight);
// rectification matrix for each image
DMatrixRMaj rect1 = rectifyAlg.getUndistToRectPixels1();
DMatrixRMaj rect2 = rectifyAlg.getUndistToRectPixels2();
rectifiedR.setTo(rectifyAlg.getRectifiedRotation());
// New calibration matrix,
rectifiedK.setTo(rectifyAlg.getCalibrationMatrix());
// Adjust the rectification to make the view area more useful
ImageDimension rectShape = new ImageDimension();
RectifyImageOps.fullViewLeft(intrinsicLeft, rect1, rect2, rectifiedK, rectShape);
// RectifyImageOps.allInsideLeft(intrinsicLeft, rect1, rect2, rectifiedK, rectShape);
// Taking in account the relative rotation between the image axis and the baseline is important in
// this scenario since a person can easily hold the camera at an odd angle. If you don't adjust
// the rectified image size you might end up with a lot of wasted pixels and a low resolution model!
rectifiedLeft.reshape(rectShape.width, rectShape.height);
rectifiedRight.reshape(rectShape.width, rectShape.height);
// undistorted and rectify images
FMatrixRMaj rect1_F32 = new FMatrixRMaj(3, 3);
FMatrixRMaj rect2_F32 = new FMatrixRMaj(3, 3);
ConvertMatrixData.convert(rect1, rect1_F32);
ConvertMatrixData.convert(rect2, rect2_F32);
// Extending the image prevents a harsh edge reducing false matches at the image border
// SKIP is another option, possibly a tinny bit faster, but has a harsh edge which will need to be filtered
ImageDistort<T, T> distortLeft =
RectifyDistortImageOps.rectifyImage(intrinsicLeft, rect1_F32, BorderType.EXTENDED, distortedLeft.getImageType());
ImageDistort<T, T> distortRight =
RectifyDistortImageOps.rectifyImage(intrinsicRight, rect2_F32, BorderType.EXTENDED, distortedRight.getImageType());
distortLeft.apply(distortedLeft, rectifiedLeft, rectifiedMask);
distortRight.apply(distortedRight, rectifiedRight);
}
/**
* Draw inliers for debugging purposes. Need to convert from normalized to pixel coordinates.
*/
public static void drawInliers( BufferedImage left, BufferedImage right, CameraPinholeBrown intrinsic,
List<AssociatedPair> normalized ) {
Point2Transform2_F64 n_to_p = LensDistortionFactory.narrow(intrinsic).distort_F64(false, true);
List<AssociatedPair> pixels = new ArrayList<>();
for (AssociatedPair n : normalized) {
AssociatedPair p = new AssociatedPair();
n_to_p.compute(n.p1.x, n.p1.y, p.p1);
n_to_p.compute(n.p2.x, n.p2.y, p.p2);
pixels.add(p);
}
// display the results
AssociationPanel panel = new AssociationPanel(20);
panel.setAssociation(pixels);
panel.setImages(left, right);
ShowImages.showWindow(panel, "Inlier Features", true);
}
/**
* Show results as a point cloud
*/
public static void showPointCloud( ImageGray disparity, BufferedImage left,
Se3_F64 motion, DMatrixRMaj rectifiedK, DMatrixRMaj rectifiedR,
int disparityMin, int disparityRange ) {
DisparityToColorPointCloud d2c = new DisparityToColorPointCloud();
PointCloudWriter.CloudArraysF32 cloud = new PointCloudWriter.CloudArraysF32();
double baseline = motion.getT().norm();
d2c.configure(baseline, rectifiedK, rectifiedR, null, disparityMin, disparityRange);
d2c.process(disparity, UtilDisparitySwing.wrap(left), cloud);
CameraPinhole rectifiedPinhole = PerspectiveOps.matrixToPinhole(rectifiedK, disparity.width, disparity.height, null);
// skew the view to make the structure easier to see
Se3_F64 cameraToWorld = SpecialEuclideanOps_F64.eulerXyz(-baseline*5, 0, 0, 0, 0.2, 0, null);
PointCloudViewer pcv = VisualizeData.createPointCloudViewer();
pcv.setCameraHFov(PerspectiveOps.computeHFov(rectifiedPinhole));
pcv.setCameraToWorld(cameraToWorld);
pcv.setTranslationStep(baseline/3);
pcv.addCloud(cloud.cloudXyz, cloud.cloudRgb);
pcv.setDotSize(1);
pcv.setTranslationStep(baseline/10);
pcv.getComponent().setPreferredSize(new Dimension(left.getWidth(), left.getHeight()));
ShowImages.showWindow(pcv.getComponent(), "Point Cloud", true);
}
}