Example Image Stitching
From BoofCV
Jump to navigationJump to search
Image Stitching Tutorial
Image stitching refers to combining two or more overlapping images together into a single large image. The goal is to find transforms which minimize the error in overlapping regions and provide a smooth transition between images. There are many different ways in which image stitching can be done, what is discussed here are point based methods.
BoofCV provides several different ways to identify and describe interest points inside of images. In this tutorial it will be shown how abstracted code can be used to switch between different algorithms.
Example File: ImageStitchingExample.java
Concepts:
- Interest point detection
- Region descriptions
- Feature association
- Robust model fitting
- Homography
Relevant Applets:
Core Algorithm
Described at a high level this image stitching algorithm can be summarized as follows:
- Detect feature locations
- Compute feature descriptors
- Associate features together
- Use robust fitting to find transform
- Render combined image
/** * Using abstracted code, find a transform which minimises the difference between corresponding features * in both images. This code is completely model independent and is the core algorithms. */ public static<T extends ImageBase> Homography2D_F64 computeTransform( T imageA , T imageB , InterestPointDetector<T> detector , DescribeRegionPoint<T> describe , GeneralAssociation<TupleDesc_F64> associate , ModelMatcher<Homography2D_F64,AssociatedPair> modelMatcher ) { // see if the detector has everything that the describer needs if( describe.requiresOrientation() && !detector.hasOrientation() ) throw new IllegalArgumentException("Requires orientation be provided."); if( describe.requiresScale() && !detector.hasScale() ) throw new IllegalArgumentException("Requires scale be provided."); // get the length of the description int descriptionDOF = describe.getDescriptionLength(); List<Point2D_F64> pointsA = new ArrayList<Point2D_F64>(); FastQueue<TupleDesc_F64> descA = new TupleDescQueue(descriptionDOF,true); List<Point2D_F64> pointsB = new ArrayList<Point2D_F64>(); FastQueue<TupleDesc_F64> descB = new TupleDescQueue(descriptionDOF,true); // extract feature locations and descriptions from each image describeImage(imageA, detector, describe, pointsA, descA); describeImage(imageB, detector, describe, pointsB, descB); // Associate features between the two images associate.associate(descA,descB); // create a list of AssociatedPairs that tell the model matcher how a feature moved FastQueue<AssociatedIndex> matches = associate.getMatches(); List<AssociatedPair> pairs = new ArrayList<AssociatedPair>(); for( int i = 0; i < matches.size(); i++ ) { AssociatedIndex match = matches.get(i); Point2D_F64 a = pointsA.get(match.src); Point2D_F64 b = pointsB.get(match.dst); pairs.add( new AssociatedPair(a,b,false)); } // find the best fit model to describe the change between these images if( !modelMatcher.process(pairs,null) ) throw new RuntimeException("Model Matcher failed!"); // return the found image transform return modelMatcher.getModel(); }
/** * Detects features inside the two images and computes descriptions at those points. */ private static <T extends ImageBase> void describeImage(T image, InterestPointDetector<T> detector, DescribeRegionPoint<T> describe, List<Point2D_F64> points, FastQueue<TupleDesc_F64> descs) { detector.detect(image); describe.setImage(image); descs.reset(); TupleDesc_F64 desc = descs.pop(); for( int i = 0; i < detector.getNumberOfFeatures(); i++ ) { // get the feature location info Point2D_F64 p = detector.getLocation(i); double yaw = detector.getOrientation(i); double scale = detector.getScale(i); // extract the description and save the results into the provided description if( describe.process(p.x,p.y,yaw,scale,desc) != null ) { points.add(p.copy()); desc = descs.pop(); } } // remove the last element from the queue, which has not been used. descs.removeTail(); }
=Create Component Algorithms
/** * Given two input images create and display an image where the two have been overlayed on top of each other. */ public static <T extends ImageBase> void stitch( BufferedImage imageA , BufferedImage imageB , Class<T> imageType ) { T inputA = ConvertBufferedImage.convertFrom(imageA , null, imageType); T inputB = ConvertBufferedImage.convertFrom(imageB, null, imageType); // Detect using the standard SURF feature descriptor and describer InterestPointDetector<T> detector = FactoryInterestPoint.fromFastHessian(400,9,4,4); DescribeRegionPoint<T> describe = FactoryDescribeRegionPoint.surf(true,imageType); GeneralAssociation<TupleDesc_F64> associate = FactoryAssociation.greedy(new ScoreAssociateEuclideanSq(),2,-1,true); // fit the images using a homography. This works well for rotations and distant objects. ModelFitterLinearHomography modelFitter = new ModelFitterLinearHomography(); DistanceHomographySq distance = new DistanceHomographySq(); int minSamples = modelFitter.getMinimumPoints(); ModelMatcher<Homography2D_F64,AssociatedPair> modelMatcher = new SimpleInlierRansac<Homography2D_F64,AssociatedPair>(123,modelFitter,distance,60,minSamples,30,1000,9); Homography2D_F64 H = computeTransform(inputA, inputB, detector, describe, associate, modelMatcher); // draw the results HomographyStitchPanel panel = new HomographyStitchPanel(0.5,inputA.width,inputA.height); panel.configure(imageA,imageB,H); ShowImages.showWindow(panel,"Stitched Images"); }