Difference between revisions of "Example Background Moving Camera"
From BoofCV
Jump to navigationJump to searchm |
m |
||
(8 intermediate revisions by the same user not shown) | |||
Line 8: | Line 8: | ||
Example File: | Example File: | ||
* [https://github.com/lessthanoptimal/BoofCV/blob/v0. | * [https://github.com/lessthanoptimal/BoofCV/blob/v0.38/examples/src/main/java/boofcv/examples/tracking/ExampleBackgroundRemovalMoving.java ExampleBackgroundRemovalMoving.java] | ||
Concepts: | Concepts: | ||
Line 21: | Line 21: | ||
<syntaxhighlight lang="java"> | <syntaxhighlight lang="java"> | ||
/** | /** | ||
* Example showing how to perform background modeling with a moving camera. | * Example showing how to perform background modeling with a moving camera. Here the camera's motion is explicitly | ||
* estimated using a motion model. | * estimated using a motion model. That motion model is then used to distort the image and generate background. | ||
* The net affect is a significant reduction in false positives around the objects of images in oscillating cameras | * The net affect is a significant reduction in false positives around the objects of images in oscillating cameras | ||
* and the ability to detect motion in moving scenes. | * and the ability to detect motion in moving scenes. | ||
Line 29: | Line 29: | ||
*/ | */ | ||
public class ExampleBackgroundRemovalMoving { | public class ExampleBackgroundRemovalMoving { | ||
public static void main(String[] args) { | public static void main( String[] args ) { | ||
// Example with a moving camera. Highlights why motion estimation is sometimes required | |||
// Example with a moving camera. | String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg"); | ||
String fileName = " | // Camera has a bit of jitter in it. Static kinda works but motion reduces false positives | ||
// Camera has a bit of jitter in it. | // String fileName = UtilIO.pathExample("background/horse_jitter.mp4"); | ||
// String fileName = " | |||
// Comment/Uncomment to switch input image type | // Comment/Uncomment to switch input image type | ||
ImageType imageType = ImageType.single( | ImageType imageType = ImageType.single(GrayF32.class); | ||
// ImageType imageType = ImageType.il(3, InterleavedF32.class); | // ImageType imageType = ImageType.il(3, InterleavedF32.class); | ||
// ImageType imageType = ImageType.il(3, InterleavedU8.class); | // ImageType imageType = ImageType.il(3, InterleavedU8.class); | ||
// Configure the feature detector | // Configure the feature detector | ||
ConfigPointDetector configDetector = new ConfigPointDetector(); | |||
configDetector.type = PointDetectorTypes.SHI_TOMASI; | |||
configDetector.general.maxFeatures = 300; | |||
configDetector.general.radius = 6; | |||
configDetector.general.threshold = 10; | |||
// Use a KLT tracker | // Use a KLT tracker | ||
PointTracker tracker = FactoryPointTracker.klt( | PointTracker tracker = FactoryPointTracker.klt(4, configDetector, 3, GrayF32.class, null); | ||
// This estimates the 2D image motion | // This estimates the 2D image motion | ||
ImageMotion2D< | ImageMotion2D<GrayF32, Homography2D_F64> motion2D = | ||
FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64()); | FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64()); | ||
ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f); | ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f); | ||
// Configuration for Gaussian model. | // Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands | ||
// 12 = gray scale and 40 = color | // 12 = gray scale and 40 = color | ||
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12,0.001f); | ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f); | ||
configGaussian.initialVariance = 64; | configGaussian.initialVariance = 64; | ||
configGaussian.minimumDifference = 5; | configGaussian.minimumDifference = 5; | ||
// Note that GMM doesn't interpolate the input image. Making it harder to model object edges. | |||
// However it runs faster because of this. | |||
ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm(); | |||
configGmm.initialVariance = 1600; | |||
configGmm.significantWeight = 1e-1f; | |||
// Comment/Uncomment to switch background mode | // Comment/Uncomment to switch background mode | ||
Line 67: | Line 72: | ||
FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType); | FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType); | ||
// FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType); | // FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType); | ||
// FactoryBackgroundModel.movingGmm(configGmm,new PointTransformHomography_F32(), imageType); | |||
background.setUnknownValue(1); | |||
MediaManager media = DefaultMediaManager.INSTANCE; | MediaManager media = DefaultMediaManager.INSTANCE; | ||
SimpleImageSequence video = media.openVideo(fileName, background.getImageType()); | SimpleImageSequence video = | ||
media.openVideo(fileName, background.getImageType()); | |||
// media.openCamera(null,640,480,background.getImageType()); | |||
//====== Initialize Images | //====== Initialize Images | ||
// storage for segmented image. | // storage for segmented image. Background = 0, Foreground = 1 | ||
GrayU8 segmented = new GrayU8(video.getWidth(), video.getHeight()); | |||
// Grey scale image that's the input for motion estimation | // Grey scale image that's the input for motion estimation | ||
GrayF32 grey = new GrayF32(segmented.width, segmented.height); | |||
// coordinate frames | // coordinate frames | ||
Line 85: | Line 94: | ||
homeToWorld.a23 = grey.height/2; | homeToWorld.a23 = grey.height/2; | ||
// Create a background image twice the size of the input image. | // Create a background image twice the size of the input image. Tell it that the home is in the center | ||
background.initialize(grey.width * 2, grey.height * 2, homeToWorld); | background.initialize(grey.width*2, grey.height*2, homeToWorld); | ||
BufferedImage visualized = new BufferedImage(segmented.width,segmented.height,BufferedImage.TYPE_INT_RGB); | BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB); | ||
ImageGridPanel gui = new ImageGridPanel(1,2); | ImageGridPanel gui = new ImageGridPanel(1, 2); | ||
gui.setImages(visualized, visualized); | gui.setImages(visualized, visualized); | ||
Line 97: | Line 106: | ||
double alpha = 0.01; // smoothing factor for FPS | double alpha = 0.01; // smoothing factor for FPS | ||
while( video.hasNext() ) { | while (video.hasNext()) { | ||
ImageBase input = video.next(); | ImageBase input = video.next(); | ||
Line 103: | Line 112: | ||
GConvertImage.convert(input, grey); | GConvertImage.convert(input, grey); | ||
if( !motion2D.process(grey) ) { | if (!motion2D.process(grey)) { | ||
throw new RuntimeException("Should handle this scenario"); | throw new RuntimeException("Should handle this scenario"); | ||
} | } | ||
Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent(); | Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent(); | ||
ConvertMatrixData.convert(firstToCurrent64, firstToCurrent32); | |||
background.segment(firstToCurrent32, input, segmented); | background.segment(firstToCurrent32, input, segmented); | ||
background.updateBackground(firstToCurrent32,input); | background.updateBackground(firstToCurrent32, input); | ||
long after = System.nanoTime(); | long after = System.nanoTime(); | ||
fps = (1.0-alpha)*fps + alpha*(1.0/((after-before)/1e9)); | fps = (1.0 - alpha)*fps + alpha*(1.0/((after - before)/1e9)); | ||
VisualizeBinaryData.renderBinary(segmented,false,visualized); | VisualizeBinaryData.renderBinary(segmented, false, visualized); | ||
gui.setImage(0, 0, (BufferedImage)video.getGuiImage()); | gui.setImage(0, 0, (BufferedImage)video.getGuiImage()); | ||
gui.setImage(0, 1, visualized); | gui.setImage(0, 1, visualized); | ||
gui.repaint(); | gui.repaint(); | ||
System.out.println("FPS = "+fps); | System.out.println("FPS = " + fps); | ||
BoofMiscOps.sleep(5); | |||
} | } | ||
} | } | ||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> |
Latest revision as of 13:13, 12 July 2021
In this example the objects which are moving relative to the background are highlighted in a binary image. While much slower than background modeling for static cameras, it can handle gradual camera motion when viewing environments that are approximately planar or being viewed for a distance. If the same videos were feed into a static camera background motion the entire image would be lit up as moving.
Example File:
Concepts:
- Motion Detection
- 2D Image Stitching
Related Examples:
Example Code
/**
* Example showing how to perform background modeling with a moving camera. Here the camera's motion is explicitly
* estimated using a motion model. That motion model is then used to distort the image and generate background.
* The net affect is a significant reduction in false positives around the objects of images in oscillating cameras
* and the ability to detect motion in moving scenes.
*
* @author Peter Abeles
*/
public class ExampleBackgroundRemovalMoving {
public static void main( String[] args ) {
// Example with a moving camera. Highlights why motion estimation is sometimes required
String fileName = UtilIO.pathExample("tracking/chipmunk.mjpeg");
// Camera has a bit of jitter in it. Static kinda works but motion reduces false positives
// String fileName = UtilIO.pathExample("background/horse_jitter.mp4");
// Comment/Uncomment to switch input image type
ImageType imageType = ImageType.single(GrayF32.class);
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
// Configure the feature detector
ConfigPointDetector configDetector = new ConfigPointDetector();
configDetector.type = PointDetectorTypes.SHI_TOMASI;
configDetector.general.maxFeatures = 300;
configDetector.general.radius = 6;
configDetector.general.threshold = 10;
// Use a KLT tracker
PointTracker tracker = FactoryPointTracker.klt(4, configDetector, 3, GrayF32.class, null);
// This estimates the 2D image motion
ImageMotion2D<GrayF32, Homography2D_F64> motion2D =
FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());
ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);
// Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands
// 12 = gray scale and 40 = color
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12, 0.001f);
configGaussian.initialVariance = 64;
configGaussian.minimumDifference = 5;
// Note that GMM doesn't interpolate the input image. Making it harder to model object edges.
// However it runs faster because of this.
ConfigBackgroundGmm configGmm = new ConfigBackgroundGmm();
configGmm.initialVariance = 1600;
configGmm.significantWeight = 1e-1f;
// Comment/Uncomment to switch background mode
BackgroundModelMoving background =
FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGmm(configGmm,new PointTransformHomography_F32(), imageType);
background.setUnknownValue(1);
MediaManager media = DefaultMediaManager.INSTANCE;
SimpleImageSequence video =
media.openVideo(fileName, background.getImageType());
// media.openCamera(null,640,480,background.getImageType());
//====== Initialize Images
// storage for segmented image. Background = 0, Foreground = 1
GrayU8 segmented = new GrayU8(video.getWidth(), video.getHeight());
// Grey scale image that's the input for motion estimation
GrayF32 grey = new GrayF32(segmented.width, segmented.height);
// coordinate frames
Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
Homography2D_F32 homeToWorld = new Homography2D_F32();
homeToWorld.a13 = grey.width/2;
homeToWorld.a23 = grey.height/2;
// Create a background image twice the size of the input image. Tell it that the home is in the center
background.initialize(grey.width*2, grey.height*2, homeToWorld);
BufferedImage visualized = new BufferedImage(segmented.width, segmented.height, BufferedImage.TYPE_INT_RGB);
ImageGridPanel gui = new ImageGridPanel(1, 2);
gui.setImages(visualized, visualized);
ShowImages.showWindow(gui, "Detections", true);
double fps = 0;
double alpha = 0.01; // smoothing factor for FPS
while (video.hasNext()) {
ImageBase input = video.next();
long before = System.nanoTime();
GConvertImage.convert(input, grey);
if (!motion2D.process(grey)) {
throw new RuntimeException("Should handle this scenario");
}
Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
ConvertMatrixData.convert(firstToCurrent64, firstToCurrent32);
background.segment(firstToCurrent32, input, segmented);
background.updateBackground(firstToCurrent32, input);
long after = System.nanoTime();
fps = (1.0 - alpha)*fps + alpha*(1.0/((after - before)/1e9));
VisualizeBinaryData.renderBinary(segmented, false, visualized);
gui.setImage(0, 0, (BufferedImage)video.getGuiImage());
gui.setImage(0, 1, visualized);
gui.repaint();
System.out.println("FPS = " + fps);
BoofMiscOps.sleep(5);
}
}
}