Difference between revisions of "Example Associate Interest Points"

From BoofCV
Jump to navigationJump to search
m
m
 
(14 intermediate revisions by the same user not shown)
Line 1: Line 1:
= Detect Interest Point Example =
<center>
<center>
{|
{|
Line 9: Line 7:
</center>
</center>


A common problem for many computer vision applications is matching features observed in two or more images.  Below is an example of how this can be accomplished using interest point and their descriptions.  When you run this example click on the image to select individual points.
A common problem for many computer vision applications is matching features observed in two or more images.  Below is an example of how this can be accomplished using interest point and their descriptions.  When run, you can click on the image to select individual points or drag a region to select several.


Example Code:
Example Code:
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.12/examples/src/boofcv/examples/ExampleAssociatePoints.java ExampleAssociatePoints.java]
* [https://github.com/lessthanoptimal/BoofCV/tree/v0.40/examples/src/main/java/boofcv/examples/features/ExampleAssociatePoints.java ExampleAssociatePoints.java]


Concepts:
Concepts:
* Describe point features
* Describe point features
* Associate descriptions
* Associate descriptions
Relevant Applets:
* [[Applet_Description_Region| Describe Points]]
* [[Applet_Associate_Points| Associate Points]]


= Example Code =
= Example Code =
Line 27: Line 21:
/**
/**
  * After interest points have been detected in two images the next step is to associate the two
  * After interest points have been detected in two images the next step is to associate the two
  * sets of images so that the relationship can be found. This is done by detecting point features inside
  * sets of images so that the relationship can be found. This is done by computing descriptors for
  * the image and associating them using their descriptors. A high level interface is used that allows
  * each detected feature and associating them together. In the code below abstracted interfaces are
  * different features to be easily swapped.
  * used to allow different algorithms to be easily used. The cost of this abstraction is that detector/descriptor
* specific information is thrown away, potentially slowing down or degrading performance.
  *
  *
  * @author Peter Abeles
  * @author Peter Abeles
  */
  */
public class ExampleAssociatePoints<T extends ImageSingleBand, FD extends TupleDesc> {
public class ExampleAssociatePoints<T extends ImageGray<T>, TD extends TupleDesc<TD>> {


// algorithm used to detect and describe interest points
// algorithm used to detect and describe interest points
DetectDescribePoint<T,FD> detDesc;
DetectDescribePoint<T, TD> detDesc;
// Associated descriptions together by minimizing an error metric
// Associated descriptions together by minimizing an error metric
GeneralAssociation<FD> associate;
AssociateDescription<TD> associate;


// location of interest points
// location of interest points
List<Point2D_F64> pointsA;
public List<Point2D_F64> pointsA;
List<Point2D_F64> pointsB;
public List<Point2D_F64> pointsB;


Class<T> imageType;
Class<T> imageType;


public ExampleAssociatePoints(DetectDescribePoint<T,FD> detDesc,
public ExampleAssociatePoints( DetectDescribePoint<T, TD> detDesc,
  GeneralAssociation<FD> associate,
  AssociateDescription<TD> associate,
  Class<T> imageType) {
  Class<T> imageType ) {
this.detDesc = detDesc;
this.detDesc = detDesc;
this.associate = associate;
this.associate = associate;
Line 55: Line 50:


/**
/**
* Detect and associate point features in the two images. Display the results.
* Detect and associate point features in the two images. Display the results.
*/
*/
public void associate( BufferedImage imageA , BufferedImage imageB )
public void associate( BufferedImage imageA, BufferedImage imageB ) {
{
T inputA = ConvertBufferedImage.convertFromSingle(imageA, null, imageType);
T inputA = ConvertBufferedImage.convertFromSingle(imageA, null, imageType);
T inputB = ConvertBufferedImage.convertFromSingle(imageB, null, imageType);
T inputB = ConvertBufferedImage.convertFromSingle(imageB, null, imageType);


// stores the location of detected interest points
// stores the location of detected interest points
pointsA = new ArrayList<Point2D_F64>();
pointsA = new ArrayList<>();
pointsB = new ArrayList<Point2D_F64>();
pointsB = new ArrayList<>();


// stores the description of detected interest points
// stores the description of detected interest points
FastQueue<FD> descA = UtilFeature.createQueue(detDesc,100);
DogArray<TD> descA = UtilFeature.createArray(detDesc, 100);
FastQueue<FD> descB = UtilFeature.createQueue(detDesc,100);
DogArray<TD> descB = UtilFeature.createArray(detDesc, 100);


// describe each image using interest points
// describe each image using interest points
describeImage(inputA,pointsA,descA);
describeImage(inputA, pointsA, descA);
describeImage(inputB,pointsB,descB);
describeImage(inputB, pointsB, descB);


// Associate features between the two images
// Associate features between the two images
Line 81: Line 75:
// display the results
// display the results
AssociationPanel panel = new AssociationPanel(20);
AssociationPanel panel = new AssociationPanel(20);
panel.setAssociation(pointsA,pointsB,associate.getMatches());
panel.setAssociation(pointsA, pointsB, associate.getMatches());
panel.setImages(imageA,imageB);
panel.setImages(imageA, imageB);


ShowImages.showWindow(panel,"Associated Features");
ShowImages.showWindow(panel, "Associated Features", true);
}
}


Line 90: Line 84:
* Detects features inside the two images and computes descriptions at those points.
* Detects features inside the two images and computes descriptions at those points.
*/
*/
private void describeImage(T input, List<Point2D_F64> points, FastQueue<FD> descs )
private void describeImage( T input, List<Point2D_F64> points, DogArray<TD> descs ) {
{
detDesc.detect(input);
detDesc.detect(input);


for( int i = 0; i < detDesc.getNumberOfFeatures(); i++ ) {
for (int i = 0; i < detDesc.getNumberOfFeatures(); i++) {
points.add( detDesc.getLocation(i).copy() );
points.add(detDesc.getLocation(i).copy());
descs.grow().setTo(detDesc.getDescriptor(i));
descs.grow().setTo(detDesc.getDescription(i));
}
}
}
}


public static void main( String args[] ) {
public static void main( String[] args ) {


Class imageType = ImageFloat32.class;
Class imageType = GrayF32.class;
// Class imageType = GrayU8.class;


// select which algorithms to use
// select which algorithms to use
DetectDescribePoint detDesc = FactoryDetectDescribe.surf(1, 2, 200, 1, 9, 4, 4,true,imageType);
DetectDescribePoint detDesc = FactoryDetectDescribe.
surfStable(new ConfigFastHessian(1, 2, 300, 1, 9, 4, 4), null, null, imageType);
// sift(new ConfigCompleteSift(0,5,600));


ScoreAssociation scorer = FactoryAssociation.defaultScore(detDesc.getDescriptorType());
ScoreAssociation scorer = FactoryAssociation.defaultScore(detDesc.getDescriptionType());
GeneralAssociation associate = FactoryAssociation.greedy(scorer, Double.MAX_VALUE, -1, true);
AssociateDescription associate = FactoryAssociation.greedy(new ConfigAssociateGreedy(true), scorer);


// load and match images
// load and match images
ExampleAssociatePoints app = new ExampleAssociatePoints(detDesc,associate,imageType);
ExampleAssociatePoints app = new ExampleAssociatePoints(detDesc, associate, imageType);


BufferedImage imageA = UtilImageIO.loadImage("../data/evaluation/stitch/kayak_01.jpg");
BufferedImage imageA = UtilImageIO.loadImageNotNull(UtilIO.pathExample("stitch/kayak_01.jpg"));
BufferedImage imageB = UtilImageIO.loadImage("../data/evaluation/stitch/kayak_03.jpg");
BufferedImage imageB = UtilImageIO.loadImageNotNull(UtilIO.pathExample("stitch/kayak_03.jpg"));


app.associate(imageA,imageB);
app.associate(imageA, imageB);
}
}
}
}
</syntaxhighlight>
</syntaxhighlight>

Latest revision as of 12:30, 17 January 2022

Associated feature between images using example code.
Associated feature between images using example code.

A common problem for many computer vision applications is matching features observed in two or more images. Below is an example of how this can be accomplished using interest point and their descriptions. When run, you can click on the image to select individual points or drag a region to select several.

Example Code:

Concepts:

  • Describe point features
  • Associate descriptions

Example Code

/**
 * After interest points have been detected in two images the next step is to associate the two
 * sets of images so that the relationship can be found. This is done by computing descriptors for
 * each detected feature and associating them together. In the code below abstracted interfaces are
 * used to allow different algorithms to be easily used. The cost of this abstraction is that detector/descriptor
 * specific information is thrown away, potentially slowing down or degrading performance.
 *
 * @author Peter Abeles
 */
public class ExampleAssociatePoints<T extends ImageGray<T>, TD extends TupleDesc<TD>> {

	// algorithm used to detect and describe interest points
	DetectDescribePoint<T, TD> detDesc;
	// Associated descriptions together by minimizing an error metric
	AssociateDescription<TD> associate;

	// location of interest points
	public List<Point2D_F64> pointsA;
	public List<Point2D_F64> pointsB;

	Class<T> imageType;

	public ExampleAssociatePoints( DetectDescribePoint<T, TD> detDesc,
								   AssociateDescription<TD> associate,
								   Class<T> imageType ) {
		this.detDesc = detDesc;
		this.associate = associate;
		this.imageType = imageType;
	}

	/**
	 * Detect and associate point features in the two images. Display the results.
	 */
	public void associate( BufferedImage imageA, BufferedImage imageB ) {
		T inputA = ConvertBufferedImage.convertFromSingle(imageA, null, imageType);
		T inputB = ConvertBufferedImage.convertFromSingle(imageB, null, imageType);

		// stores the location of detected interest points
		pointsA = new ArrayList<>();
		pointsB = new ArrayList<>();

		// stores the description of detected interest points
		DogArray<TD> descA = UtilFeature.createArray(detDesc, 100);
		DogArray<TD> descB = UtilFeature.createArray(detDesc, 100);

		// describe each image using interest points
		describeImage(inputA, pointsA, descA);
		describeImage(inputB, pointsB, descB);

		// Associate features between the two images
		associate.setSource(descA);
		associate.setDestination(descB);
		associate.associate();

		// display the results
		AssociationPanel panel = new AssociationPanel(20);
		panel.setAssociation(pointsA, pointsB, associate.getMatches());
		panel.setImages(imageA, imageB);

		ShowImages.showWindow(panel, "Associated Features", true);
	}

	/**
	 * Detects features inside the two images and computes descriptions at those points.
	 */
	private void describeImage( T input, List<Point2D_F64> points, DogArray<TD> descs ) {
		detDesc.detect(input);

		for (int i = 0; i < detDesc.getNumberOfFeatures(); i++) {
			points.add(detDesc.getLocation(i).copy());
			descs.grow().setTo(detDesc.getDescription(i));
		}
	}

	public static void main( String[] args ) {

		Class imageType = GrayF32.class;
//		Class imageType = GrayU8.class;

		// select which algorithms to use
		DetectDescribePoint detDesc = FactoryDetectDescribe.
				surfStable(new ConfigFastHessian(1, 2, 300, 1, 9, 4, 4), null, null, imageType);
//				sift(new ConfigCompleteSift(0,5,600));

		ScoreAssociation scorer = FactoryAssociation.defaultScore(detDesc.getDescriptionType());
		AssociateDescription associate = FactoryAssociation.greedy(new ConfigAssociateGreedy(true), scorer);

		// load and match images
		ExampleAssociatePoints app = new ExampleAssociatePoints(detDesc, associate, imageType);

		BufferedImage imageA = UtilImageIO.loadImageNotNull(UtilIO.pathExample("stitch/kayak_01.jpg"));
		BufferedImage imageB = UtilImageIO.loadImageNotNull(UtilIO.pathExample("stitch/kayak_03.jpg"));

		app.associate(imageA, imageB);
	}
}