Difference between revisions of "Example Detect Describe Interface"

From BoofCV
Jump to navigationJump to search
(Created page with "BoofCV provides multiple ways to detect and describe interest points inside of images. The easiest high level interface to work with is DetectDescribePoint. It will detect a...")
 
m
 
(11 intermediate revisions by the same user not shown)
Line 2: Line 2:


Example Code:
Example Code:
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.12/examples/src/boofcv/examples/ExampleDetectDescribe.java ExampleDetectDescribe.java]
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.40/examples/src/main/java/boofcv/examples/features/ExampleDetectDescribe.java ExampleDetectDescribe.java]


Concepts:
Concepts:
* Interest point detection
* Interest point detection
* Local region descriptors
* Local region descriptors
Relevant Applets:
* [[Applet_Point_Detected| Interest Point Detector]]
* [[Applet_Description_Region| Describe Points]]


Related Examples
Related Examples
Line 21: Line 17:
  * {@link DetectDescribePoint} provides a single unified interface for detecting interest points inside of images
  * {@link DetectDescribePoint} provides a single unified interface for detecting interest points inside of images
  * and describing the features. For some features (e.g. SIFT) it can be much faster than the alternative approach
  * and describing the features. For some features (e.g. SIFT) it can be much faster than the alternative approach
  * where individual algorithms are used for feature detection, orientation estimation, and describe. It also
  * where individual algorithms are used for feature detection, orientation estimation, and describe. It also
  * simplifies the code.
  * simplifies the code.
*
* This example demonstrates how to create instances, but the {@link ExampleAssociatePoints} demonstrates how
* to use the interface.
  *
  *
  * @author Peter Abeles
  * @author Peter Abeles
Line 29: Line 28:


/**
/**
* For some features, there are pre-made implementations of DetectDescribePoint. This has only been done
* For some features, there are pre-made implementations of DetectDescribePoint. This has only been done
* in situations where there was a performance advantage or that it was a very common combination.
* in situations where there was a performance advantage or that it was a very common combination.
*/
*/
public static <T extends ImageSingleBand, D extends TupleDesc>
public static <T extends ImageGray<T>, TD extends TupleDesc<TD>>
DetectDescribePoint<T,D> createFromPremade( Class<T> imageType ) {
DetectDescribePoint<T, TD> createFromPremade( Class<T> imageType ) {
return (DetectDescribePoint)FactoryDetectDescribe.surf(1, 2, 200, 1, 9, 4, 4,true,imageType);
var config = new ConfigFastHessian();
// note that SIFT only supports ImageFloat32
config.maxFeaturesPerScale = 200;
// if( imageType == ImageFloat32.class )
return (DetectDescribePoint)FactoryDetectDescribe.surfStable(config, null, null, imageType);
// return (DetectDescribePoint)FactoryDetectDescribe.sift(4,2,false,-1);
// var config = new ConfigCompleteSift();
// else
// config.detector.maxFeaturesPerScale = 400;
// throw new RuntimeException("Unsupported image type");
// return (DetectDescribePoint)FactoryDetectDescribe.sift(config, imageType);
}
}


/**
/**
* Any arbitrary implementation of InterestPointDetector, OrientationImage, DescribeRegionPoint
* Any arbitrary implementation of InterestPointDetector, OrientationImage, DescribeRegionPoint
* can be combined into DetectDescribePoint. The syntax is more complex, but the end result is more flexible.
* can be combined into DetectDescribePoint. The syntax is more complex, but the end result is more flexible.
* This should only be done if there isn't a pre-made DetectDescribePoint.
* This should only be done if there isn't a pre-made DetectDescribePoint.
*/
*/
public static <T extends ImageSingleBand, D extends ImageSingleBand, TD extends TupleDesc>
public static <T extends ImageGray<T>, TD extends TupleDesc<TD>>
DetectDescribePoint<T, TD> createFromComponents( Class<T> imageType ) {
DetectDescribePoint<T, TD> createFromComponents( Class<T> imageType ) {
// create a corner detector
// create a corner detector
Class<D> derivType = GImageDerivativeOps.getDerivativeType(imageType);
Class derivType = GImageDerivativeOps.getDerivativeType(imageType);
GeneralFeatureDetector<T,D> corner = FactoryDetectPoint.createShiTomasi(2, false, 1, 300, derivType);
GeneralFeatureDetector corner = FactoryDetectPoint.createShiTomasi(new ConfigGeneralDetector(1000, 5, 1), null, derivType);
InterestPointDetector detector = FactoryInterestPoint.wrapPoint(corner, 1, imageType, derivType);


// describe points using BRIEF
// describe points using BRIEF
DescribeRegionPoint describe = FactoryDescribeRegionPoint.brief(16, 512, -1, 4, true, imageType);
DescribePointRadiusAngle describe = FactoryDescribePointRadiusAngle.brief(new ConfigBrief(true), imageType);


// Combine together.
// Combine together.
// NOTE: orientation will not be estimated
// NOTE: orientation will not be estimated
return FactoryDetectDescribe.fuseTogether(corner, null, describe,1,imageType,derivType);
return FactoryDetectDescribe.fuseTogether(detector, null, describe);
}
}


public static void main( String args[] ) {
public static void main( String[] args ) {
 
Class imageType = GrayF32.class;


// select which feature to use
DetectDescribePoint detDesc = createFromPremade(imageType);
DetectDescribePoint<ImageFloat32,?> detDesc = createFromPremade(ImageFloat32.class);
// DetectDescribePoint detDesc = createFromComponents(imageType);
// DetectDescribePoint<ImageFloat32,?> detDesc = createFromComponents(ImageFloat32.class);


// Load an image
// Might as well have this example do something useful, like associate two images
BufferedImage buffered = UtilImageIO.loadImage("../data/evaluation/outdoors01.jpg");
ScoreAssociation scorer = FactoryAssociation.defaultScore(detDesc.getDescriptionType());
ImageFloat32 input = ConvertBufferedImage.convertFrom(buffered,(ImageFloat32)null);
AssociateDescription associate = FactoryAssociation.greedy(new ConfigAssociateGreedy(true), scorer);


// detect features inside the image
// load and match images
detDesc.detect(input);
var app = new ExampleAssociatePoints(detDesc, associate, imageType);


// print how out many were found
BufferedImage imageA = UtilImageIO.loadImageNotNull(UtilIO.pathExample("stitch/kayak_01.jpg"));
System.out.println("Found "+detDesc.getNumberOfFeatures());
BufferedImage imageB = UtilImageIO.loadImageNotNull(UtilIO.pathExample("stitch/kayak_03.jpg"));


// print out info for the first feature
app.associate(imageA, imageB);
System.out.println("Properties of first feature:");
System.out.println("Location: "+detDesc.getLocation(0));
System.out.println("Scale: "+detDesc.getScale(0));
System.out.println("Orientation: "+detDesc.getOrientation(0));
System.out.println("Descriptor: "+detDesc.getDescriptor(0));
}
}
}
}
</syntaxhighlight>
</syntaxhighlight>

Latest revision as of 12:58, 17 January 2022

BoofCV provides multiple ways to detect and describe interest points inside of images. The easiest high level interface to work with is DetectDescribePoint. It will detect and describe all interest points in the image at the same time. The alternative involves using separate interfaces for detection, orientation, and describing.

Example Code:

Concepts:

  • Interest point detection
  • Local region descriptors

Related Examples

Example Code

/**
 * {@link DetectDescribePoint} provides a single unified interface for detecting interest points inside of images
 * and describing the features. For some features (e.g. SIFT) it can be much faster than the alternative approach
 * where individual algorithms are used for feature detection, orientation estimation, and describe. It also
 * simplifies the code.
 *
 * This example demonstrates how to create instances, but the {@link ExampleAssociatePoints} demonstrates how
 * to use the interface.
 *
 * @author Peter Abeles
 */
public class ExampleDetectDescribe {

	/**
	 * For some features, there are pre-made implementations of DetectDescribePoint. This has only been done
	 * in situations where there was a performance advantage or that it was a very common combination.
	 */
	public static <T extends ImageGray<T>, TD extends TupleDesc<TD>>
	DetectDescribePoint<T, TD> createFromPremade( Class<T> imageType ) {
		var config = new ConfigFastHessian();
		config.maxFeaturesPerScale = 200;
		return (DetectDescribePoint)FactoryDetectDescribe.surfStable(config, null, null, imageType);
//		var config = new ConfigCompleteSift();
//		config.detector.maxFeaturesPerScale = 400;
//		return (DetectDescribePoint)FactoryDetectDescribe.sift(config, imageType);
	}

	/**
	 * Any arbitrary implementation of InterestPointDetector, OrientationImage, DescribeRegionPoint
	 * can be combined into DetectDescribePoint. The syntax is more complex, but the end result is more flexible.
	 * This should only be done if there isn't a pre-made DetectDescribePoint.
	 */
	public static <T extends ImageGray<T>, TD extends TupleDesc<TD>>
	DetectDescribePoint<T, TD> createFromComponents( Class<T> imageType ) {
		// create a corner detector
		Class derivType = GImageDerivativeOps.getDerivativeType(imageType);
		GeneralFeatureDetector corner = FactoryDetectPoint.createShiTomasi(new ConfigGeneralDetector(1000, 5, 1), null, derivType);
		InterestPointDetector detector = FactoryInterestPoint.wrapPoint(corner, 1, imageType, derivType);

		// describe points using BRIEF
		DescribePointRadiusAngle describe = FactoryDescribePointRadiusAngle.brief(new ConfigBrief(true), imageType);

		// Combine together.
		// NOTE: orientation will not be estimated
		return FactoryDetectDescribe.fuseTogether(detector, null, describe);
	}

	public static void main( String[] args ) {

		Class imageType = GrayF32.class;

		DetectDescribePoint detDesc = createFromPremade(imageType);
//		DetectDescribePoint detDesc = createFromComponents(imageType);

		// Might as well have this example do something useful, like associate two images
		ScoreAssociation scorer = FactoryAssociation.defaultScore(detDesc.getDescriptionType());
		AssociateDescription associate = FactoryAssociation.greedy(new ConfigAssociateGreedy(true), scorer);

		// load and match images
		var app = new ExampleAssociatePoints(detDesc, associate, imageType);

		BufferedImage imageA = UtilImageIO.loadImageNotNull(UtilIO.pathExample("stitch/kayak_01.jpg"));
		BufferedImage imageB = UtilImageIO.loadImageNotNull(UtilIO.pathExample("stitch/kayak_03.jpg"));

		app.associate(imageA, imageB);
	}
}