Difference between revisions of "Example Multiview Reconstruction Dense"

From BoofCV
Jump to navigationJump to search
m
(7 intermediate revisions by the same user not shown)
Line 10: Line 10:


Example Code:
Example Code:
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.37/examples/src/main/java/boofcv/examples/sfm/ExampleMultiviewSceneReconstruction.java ExampleMultiViewDenseReconstruction.java]
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.38/examples/src/main/java/boofcv/examples/reconstruction/ExampleMultiViewDenseReconstruction.java ExampleMultiViewDenseReconstruction.java]


Concepts:
Concepts:
* Structure from Motion
* Structure from Motion
* Multiple View Stereo
* Multi Baseline Stereo
* [[Example_Sparse_Bundle_Adjustment|Sparse Bundle Adjustment]]
* [[Example_Sparse_Bundle_Adjustment|Sparse Bundle Adjustment]]
* [[Example_Multi_Baseline_Stereo|Multi Baseline Stereo]]
* [[Example_Multi_Baseline_Stereo|Multi Baseline Stereo]]
* [[Example_Multiview_Uncalibrated_Reconstruction|Uncalibrated Sparse Reconstruction]
* [[Example_Multiview_Uncalibrated_Reconstruction_Sparse|Uncalibrated Sparse Reconstruction]]
 
Videos:
* [https://youtu.be/BbTPQ9mIoQU?t=6 Improvements in v0.38]
 
Tutorials
* [[3D_Reconstruction_on_Desktop_Tutorial|Photogrammetry / 3D Reconstruction on Desktop]]




Line 29: Line 33:
  * neighbors for stereo computations using a heuristic. Then a global point cloud is created from the "center" view
  * neighbors for stereo computations using a heuristic. Then a global point cloud is created from the "center" view
  * disparity images while taking care to avoid adding duplicate points.
  * disparity images while taking care to avoid adding duplicate points.
*
* As you can see there is still a fair amount of noise in the cloud. Additional filtering and processing
* is typically required at this point.
  *
  *
  * @author Peter Abeles
  * @author Peter Abeles
Line 38: Line 39:
public static void main( String[] args ) {
public static void main( String[] args ) {
var example = new ExampleMultiViewSparseReconstruction();
var example = new ExampleMultiViewSparseReconstruction();
// example.maxFrames = 100;       // This will process the entire sequence
example.compute("tree_snow_01.mp4", true);
example.compute("tree_snow_01.mp4");
// example.compute("ditch_02.mp4", true);
// example.compute("ditch_02.mp4");
// example.compute("holiday_display_01.mp4", true);
// example.compute("holiday_display_01.mp4");
// example.compute("log_building_02.mp4", true);
// example.compute("log_building_02.mp4");
// example.compute("drone_park_01.mp4", false);
// example.compute("stone_sign.mp4", true);


// SGM is a reasonable trade between quality and speed.
// Looks up images based on their index in the file list
var configSgm = new ConfigDisparitySGM();
var imageLookup = new LookUpImageFilesByIndex(example.imageFiles);
 
// We will use a high level algorithm that does almost all the work for us. It is highly configurable
// and just about every parameter can be tweaked using its Config. Internal algorithms can be accessed
// and customize directly if needed. Specifics for how it work is beyond this example but the code
// is easily accessible.
 
// Let's do some custom configuration for this scenario
var config = new ConfigSparseToDenseCloud();
config.disparity.approach = ConfigDisparity.Approach.SGM;
ConfigDisparitySGM configSgm = config.disparity.approachSGM;
configSgm.validateRtoL = 0;
configSgm.validateRtoL = 0;
configSgm.texture = 0.75;
configSgm.texture = 0.75;
configSgm.disparityRange = 120;
configSgm.disparityRange = 250;
configSgm.paths = ConfigDisparitySGM.Paths.P4;
configSgm.paths = ConfigDisparitySGM.Paths.P4;
configSgm.configBlockMatch.radiusX = 3;
configSgm.configBlockMatch.radiusX = 3;
configSgm.configBlockMatch.radiusY = 3;
configSgm.configBlockMatch.radiusY = 3;


// Looks up images based on their index in the file list
// Create the sparse to dense reconstruction using a factory
var imageLookup = new LookUpImageFilesByIndex(example.imageFiles);
SparseSceneToDenseCloud<GrayU8> sparseToDense =
FactorySceneReconstruction.sparseSceneToDenseCloud(config, ImageType.SB_U8);


// Create and configure MVS
// To help make the time go by faster while we wait about 1 to 2 minutes for it to finish, let's print stuff
//
sparseToDense.getMultiViewStereo().setVerbose(
// Note that the stereo disparity algorithm used must output a GrayF32 disparity image as much of the code
System.out, BoofMiscOps.hashSet(BoofVerbose.RECURSIVE, BoofVerbose.RUNTIME));
// is hard coded to use it. MVS would not work without sub-pixel enabled.
var mvs = new MultiViewStereoFromKnownSceneStructure<>(imageLookup, ImageType.SB_U8);
mvs.setStereoDisparity(FactoryStereoDisparity.sgm(configSgm, GrayU8.class, GrayF32.class));
// Improve stereo by removing small regions, which tends to be noise. Consider adjusting the region size.
mvs.getComputeFused().setDisparitySmoother(FactoryStereoDisparity.removeSpeckle(null, GrayF32.class));
// Print out profiling info from multi baseline stereo
mvs.getComputeFused().setVerboseProfiling(System.out);


// Grab intermediate results as they are computed
// To visualize intermediate results we will add a listener. This will show fused disparity images
mvs.setListener(new MultiViewStereoFromKnownSceneStructure.Listener<>() {
sparseToDense.getMultiViewStereo().setListener(new MultiViewStereoFromKnownSceneStructure.Listener<>() {
@Override
@Override
public void handlePairDisparity( String left, String right, GrayU8 rect0, GrayU8 rect1,
public void handlePairDisparity( String left, String right, GrayU8 rect0, GrayU8 rect1,
GrayF32 disparity, GrayU8 mask, DisparityParameters parameters ) {
GrayF32 disparity, GrayU8 mask, DisparityParameters parameters ) {
// Displaying individual stereo pair results can be very useful for debugging, but this isn't done
// Uncomment to display individual stereo pairs. Commented out by default because it generates
// because of the amount of information it would show
// a LOT of windows
// BufferedImage outLeft = ConvertBufferedImage.convertTo(rect0, null);
// BufferedImage outRight = ConvertBufferedImage.convertTo(rect1, null);
//
// ShowImages.showWindow(new RectifiedPairPanel(true, outLeft, outRight), "Rectification: "+left+" "+right);
// BufferedImage colorized = VisualizeImageData.disparity(disparity, null, parameters.disparityRange, 0);
// ShowImages.showWindow(colorized, "Disparity " + left + " " + right);
}
}


Line 79: Line 91:
public void handleFusedDisparity( String name,
public void handleFusedDisparity( String name,
  GrayF32 disparity, GrayU8 mask, DisparityParameters parameters ) {
  GrayF32 disparity, GrayU8 mask, DisparityParameters parameters ) {
// You can also do custom filtering of the disparity image in this function. If the line below is
// uncommented then points which are far away will be marked as invalid
// PixelMath.operator1(disparity, ( v ) -> v >= 20 ? v : parameters.disparityRange, disparity);
// Display the disparity for each center view
// Display the disparity for each center view
BufferedImage colorized = VisualizeImageData.disparity(disparity, null, parameters.disparityRange, 0);
BufferedImage colorized = VisualizeImageData.disparity(disparity, null, parameters.disparityRange, 0);
Line 85: Line 101:
});
});


// MVS stereo needs to know which view pairs have enough 3D information to act as a stereo pair and
// It needs a look up table to go from SBA view index to image name. It loads images as needed to perform
// the quality of that 3D information. This is used to guide which views act as "centers" for accumulating
// stereo disparity
// 3D information which is then converted into the point cloud.
var viewToId = new TIntObjectHashMap<String>();
//
BoofMiscOps.forIdx(example.working.listViews, ( workIdxI, wv ) -> viewToId.put(wv.index, wv.pview.id));
// StereoPairGraph contains this information and we will create it from Pairwise and Working graphs.
if (!sparseToDense.process(example.scene, viewToId, imageLookup))
throw new RuntimeException("Dense reconstruction failed!");
 
saveCloudToDisk(sparseToDense);


var mvsGraph = new StereoPairGraph();
// Display the dense cloud
PairwiseImageGraph _pairwise = example.pairwise;
visualizeInPointCloud(sparseToDense.getCloud(), sparseToDense.getColorRgb(), example.scene);
SceneStructureMetric _structure = example.scene;
}
// Add a vertex for each view
BoofMiscOps.forIdx(example.working.viewList, ( i, wv ) -> mvsGraph.addVertex(wv.pview.id, i));
// Compute the 3D score for each connected view
BoofMiscOps.forIdx(example.working.viewList, ( workIdxI, wv ) -> {
var pv = _pairwise.mapNodes.get(wv.pview.id);
pv.connections.forIdx(( j, e ) -> {
// Look at the ratio of inliers for a homography and fundamental matrix model
PairwiseImageGraph.View po = e.other(pv);
double ratio = 1.0 - Math.min(1.0, e.countH/(1.0 + e.countF));
if (ratio <= 0.25)
return;
// There is sufficient 3D information between these two views
SceneWorkingGraph.View wvo = example.working.views.get(po.id);
int workIdxO = example.working.viewList.indexOf(wvo);
if (workIdxO <= workIdxI)
return;
mvsGraph.connect(pv.id, po.id, ratio);
});
});


// Compute the dense 3D point cloud
private static void saveCloudToDisk( SparseSceneToDenseCloud<GrayU8> sparseToDense ) {
BoofMiscOps.profile(() -> mvs.process(_structure, mvsGraph), "MVS Cloud");
// Save the dense point cloud to disk in PLY format
try (FileOutputStream out = new FileOutputStream("saved_cloud.ply")) {
// Filter points which are far away to make it easier to view in 3rd party viewers that auto scale
// You might need to adjust the threshold for your application if too many points are cut
double distanceThreshold = 50.0;
List<Point3D_F64> cloud = sparseToDense.getCloud();
DogArray_I32 colorsRgb = sparseToDense.getColorRgb();


System.out.println("Dense Cloud Size: " + mvs.getCloud().size());
DogArray<Point3dRgbI_F64> filtered = PointCloudUtils_F64.filter(
( idx, p ) -> p.setTo(cloud.get(idx)), colorsRgb::get, cloud.size(),
( idx ) -> cloud.get(idx).norm() <= distanceThreshold, null);


// Colorize the cloud to make it easier to view. This is done by projecting points back into the
PointCloudIO.save3D(PointCloudIO.Format.PLY, PointCloudReader.wrapF64RGB(filtered.toList()), true, out);
// first view they were seen in and reading the color
} catch (IOException e) {
DogArray_I32 colorRgb = new DogArray_I32();
e.printStackTrace();
colorRgb.resize(mvs.getCloud().size());
}
var colorizeMvs = new ColorizeMultiViewStereoResults<>(new LookUpColorRgbFormats.PL_U8(), imageLookup);
colorizeMvs.processMvsCloud(example.scene, mvs,
( idx, r, g, b ) -> colorRgb.set(idx, (r << 16) | (g << 8) | b));
visualizeInPointCloud(mvs.getCloud(), colorRgb, example.scene);
}
}


Line 135: Line 139:
viewer.setDotSize(1);
viewer.setDotSize(1);
viewer.setTranslationStep(0.15);
viewer.setTranslationStep(0.15);
viewer.addCloud(cloud, colorsRgb.data);
viewer.addCloud(( idx, p ) -> p.setTo(cloud.get(idx)), colorsRgb::get, cloud.size());
// viewer.setColorizer(new TwoAxisRgbPlane.Z_XY(1.0).fperiod(40));
viewer.setCameraHFov(UtilAngle.radian(60));
viewer.setCameraHFov(UtilAngle.radian(60));



Revision as of 23:10, 24 July 2021

Video showing resulting point cloud. Red squares represent camera view locations.

After the sparse reconstruction has been applied and the extrinsic and intrinsic parameters of the scene are known, the next step it to compute a dense reconstruction. Internally key frames are selected to perform multi-baseline stereo on and then their resulting point clouds are all combined together into a single cloud.

Example Code:

Concepts:

Videos:

Tutorials


Example Code

/**
 * A dense point cloud is created using a previously computed sparse reconstruction and a basic implementation of
 * multiview stereo (MVS). This approach to MVS works by identifying "center" views which have the best set of
 * neighbors for stereo computations using a heuristic. Then a global point cloud is created from the "center" view
 * disparity images while taking care to avoid adding duplicate points.
 *
 * @author Peter Abeles
 */
public class ExampleMultiViewDenseReconstruction {
	public static void main( String[] args ) {
		var example = new ExampleMultiViewSparseReconstruction();
		example.compute("tree_snow_01.mp4", true);
//		example.compute("ditch_02.mp4", true);
//		example.compute("holiday_display_01.mp4", true);
//		example.compute("log_building_02.mp4", true);
//		example.compute("drone_park_01.mp4", false);
//		example.compute("stone_sign.mp4", true);

		// Looks up images based on their index in the file list
		var imageLookup = new LookUpImageFilesByIndex(example.imageFiles);

		// We will use a high level algorithm that does almost all the work for us. It is highly configurable
		// and just about every parameter can be tweaked using its Config. Internal algorithms can be accessed
		// and customize directly if needed. Specifics for how it work is beyond this example but the code
		// is easily accessible.

		// Let's do some custom configuration for this scenario
		var config = new ConfigSparseToDenseCloud();
		config.disparity.approach = ConfigDisparity.Approach.SGM;
		ConfigDisparitySGM configSgm = config.disparity.approachSGM;
		configSgm.validateRtoL = 0;
		configSgm.texture = 0.75;
		configSgm.disparityRange = 250;
		configSgm.paths = ConfigDisparitySGM.Paths.P4;
		configSgm.configBlockMatch.radiusX = 3;
		configSgm.configBlockMatch.radiusY = 3;

		// Create the sparse to dense reconstruction using a factory
		SparseSceneToDenseCloud<GrayU8> sparseToDense =
				FactorySceneReconstruction.sparseSceneToDenseCloud(config, ImageType.SB_U8);

		// To help make the time go by faster while we wait about 1 to 2 minutes for it to finish, let's print stuff
		sparseToDense.getMultiViewStereo().setVerbose(
				System.out, BoofMiscOps.hashSet(BoofVerbose.RECURSIVE, BoofVerbose.RUNTIME));

		// To visualize intermediate results we will add a listener. This will show fused disparity images
		sparseToDense.getMultiViewStereo().setListener(new MultiViewStereoFromKnownSceneStructure.Listener<>() {
			@Override
			public void handlePairDisparity( String left, String right, GrayU8 rect0, GrayU8 rect1,
											 GrayF32 disparity, GrayU8 mask, DisparityParameters parameters ) {
				// Uncomment to display individual stereo pairs. Commented out by default because it generates
				// a LOT of windows
//				BufferedImage outLeft = ConvertBufferedImage.convertTo(rect0, null);
//				BufferedImage outRight = ConvertBufferedImage.convertTo(rect1, null);
//
//				ShowImages.showWindow(new RectifiedPairPanel(true, outLeft, outRight), "Rectification: "+left+" "+right);
//				BufferedImage colorized = VisualizeImageData.disparity(disparity, null, parameters.disparityRange, 0);
//				ShowImages.showWindow(colorized, "Disparity " + left + " " + right);
			}

			@Override
			public void handleFusedDisparity( String name,
											  GrayF32 disparity, GrayU8 mask, DisparityParameters parameters ) {
				// You can also do custom filtering of the disparity image in this function. If the line below is
				// uncommented then points which are far away will be marked as invalid
//				PixelMath.operator1(disparity, ( v ) -> v >= 20 ? v : parameters.disparityRange, disparity);

				// Display the disparity for each center view
				BufferedImage colorized = VisualizeImageData.disparity(disparity, null, parameters.disparityRange, 0);
				ShowImages.showWindow(colorized, "Center " + name);
			}
		});

		// It needs a look up table to go from SBA view index to image name. It loads images as needed to perform
		// stereo disparity
		var viewToId = new TIntObjectHashMap<String>();
		BoofMiscOps.forIdx(example.working.listViews, ( workIdxI, wv ) -> viewToId.put(wv.index, wv.pview.id));
		if (!sparseToDense.process(example.scene, viewToId, imageLookup))
			throw new RuntimeException("Dense reconstruction failed!");

		saveCloudToDisk(sparseToDense);

		// Display the dense cloud
		visualizeInPointCloud(sparseToDense.getCloud(), sparseToDense.getColorRgb(), example.scene);
	}

	private static void saveCloudToDisk( SparseSceneToDenseCloud<GrayU8> sparseToDense ) {
		// Save the dense point cloud to disk in PLY format
		try (FileOutputStream out = new FileOutputStream("saved_cloud.ply")) {
			// Filter points which are far away to make it easier to view in 3rd party viewers that auto scale
			// You might need to adjust the threshold for your application if too many points are cut
			double distanceThreshold = 50.0;
			List<Point3D_F64> cloud = sparseToDense.getCloud();
			DogArray_I32 colorsRgb = sparseToDense.getColorRgb();

			DogArray<Point3dRgbI_F64> filtered = PointCloudUtils_F64.filter(
					( idx, p ) -> p.setTo(cloud.get(idx)), colorsRgb::get, cloud.size(),
					( idx ) -> cloud.get(idx).norm() <= distanceThreshold, null);

			PointCloudIO.save3D(PointCloudIO.Format.PLY, PointCloudReader.wrapF64RGB(filtered.toList()), true, out);
		} catch (IOException e) {
			e.printStackTrace();
		}
	}

	public static void visualizeInPointCloud( List<Point3D_F64> cloud, DogArray_I32 colorsRgb,
											  SceneStructureMetric structure ) {
		PointCloudViewer viewer = VisualizeData.createPointCloudViewer();
		viewer.setFog(true);
		viewer.setDotSize(1);
		viewer.setTranslationStep(0.15);
		viewer.addCloud(( idx, p ) -> p.setTo(cloud.get(idx)), colorsRgb::get, cloud.size());
		viewer.setCameraHFov(UtilAngle.radian(60));

		SwingUtilities.invokeLater(() -> {
			// Show where the cameras are
			BoofSwingUtil.visualizeCameras(structure, viewer);

			// Display the point cloud
			viewer.getComponent().setPreferredSize(new Dimension(600, 600));
			ShowImages.showWindow(viewer.getComponent(), "Dense Reconstruction Cloud", true);
		});
	}
}