Difference between revisions of "Example Background Moving Camera"
From BoofCV
Jump to navigationJump to searchm |
m |
||
Line 1: | Line 1: | ||
<center> | <center> | ||
<gallery heights=200 widths=500 > | <gallery heights=200 widths=500 > | ||
File:Background_stationary_camera.jpg|Still from a traffic camera video. | |||
</gallery> | </gallery> | ||
</center> | </center> | ||
Example of background modeling/motion detection from a stationary camera. Moving objects are detected inside the video based on their difference from a background model. These techniques can run very fast (basic runs over 2,000 fps) and be very effective in tracking algorithms | |||
Example File: | Example File: | ||
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.19/examples/src/boofcv/examples/tracking/ | * [https://github.com/lessthanoptimal/BoofCV/blob/v0.19/examples/src/boofcv/examples/tracking/ExampleBackgroundRemovalStationary.java ExampleBackgroundRemovalStationary.java] | ||
Concepts: | Concepts: | ||
Line 15: | Line 15: | ||
Related Examples: | Related Examples: | ||
* [[ | * [[Example_Background_Moving_Camera| Background Moving Camera]] | ||
= Example Code = | = Example Code = | ||
Line 21: | Line 21: | ||
<syntaxhighlight lang="java"> | <syntaxhighlight lang="java"> | ||
/** | /** | ||
* Example showing how to perform background modeling | * Example showing how to perform background modeling when the camera is assumed to be stationary. This scenario | ||
* | * can be computed much faster than the moving camera case and depending on the background model can some times produce | ||
* | * reasonable results when the camera has a little bit of jitter. | ||
* | * | ||
* @author Peter Abeles | * @author Peter Abeles | ||
*/ | */ | ||
public class | public class ExampleBackgroundRemovalStationary { | ||
public static void main(String[] args) { | public static void main(String[] args) { | ||
// | String fileName = "../data/applet/background/street_intersection.mp4"; | ||
// String fileName = "../data/applet/background/horse_jitter.mp4"; // degraded performance because of jitter | |||
// String fileName = "../data/applet/tracking/chipmunk.mjpeg"; // Camera moves. Stationary will fail here | |||
// String fileName = "../data/applet/ | |||
// Comment/Uncomment to switch input image type | // Comment/Uncomment to switch input image type | ||
Line 40: | Line 38: | ||
// ImageType imageType = ImageType.il(3, InterleavedF32.class); | // ImageType imageType = ImageType.il(3, InterleavedF32.class); | ||
// ImageType imageType = ImageType.il(3, InterleavedU8.class); | // ImageType imageType = ImageType.il(3, InterleavedU8.class); | ||
// Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands | // Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands | ||
// 12 = gray scale and 40 = color | // 12 = gray scale and 40 = color | ||
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12,0. | ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12,0.005f); | ||
configGaussian.initialVariance = | configGaussian.initialVariance = 100; | ||
configGaussian.minimumDifference = | configGaussian.minimumDifference = 10; | ||
// Comment/Uncomment to switch algorithms | |||
BackgroundModelStationary background = | |||
FactoryBackgroundModel.stationaryBasic(new ConfigBackgroundBasic(35, 0.005f), imageType); | |||
// FactoryBackgroundModel.stationaryGaussian(configGaussian, imageType); | |||
MediaManager media = DefaultMediaManager.INSTANCE; | MediaManager media = DefaultMediaManager.INSTANCE; | ||
SimpleImageSequence video = media.openVideo(fileName, background.getImageType()); | SimpleImageSequence video = media.openVideo(fileName, background.getImageType()); | ||
// | // Declare storage for segmented image. 1 = moving foreground and 0 = background | ||
ImageUInt8 segmented = new ImageUInt8(video.getNextWidth(),video.getNextHeight()); | ImageUInt8 segmented = new ImageUInt8(video.getNextWidth(),video.getNextHeight()); | ||
BufferedImage visualized = new BufferedImage(segmented.width,segmented.height,BufferedImage.TYPE_INT_RGB); | BufferedImage visualized = new BufferedImage(segmented.width,segmented.height,BufferedImage.TYPE_INT_RGB); | ||
Line 92: | Line 60: | ||
gui.setImages(visualized, visualized); | gui.setImages(visualized, visualized); | ||
ShowImages.showWindow(gui, " | ShowImages.showWindow(gui, "Static Scene: Background Segmentation", true); | ||
double fps = 0; | double fps = 0; | ||
Line 101: | Line 69: | ||
long before = System.nanoTime(); | long before = System.nanoTime(); | ||
background.segment(input,segmented); | |||
background.updateBackground(input); | |||
background.segment( | |||
background.updateBackground( | |||
long after = System.nanoTime(); | long after = System.nanoTime(); | ||
fps = (1.0-alpha)*fps + alpha*(1.0/((after-before)/1e9)); | fps = (1.0-alpha)*fps + alpha*(1.0/((after-before)/1e9)); | ||
VisualizeBinaryData.renderBinary(segmented,false,visualized); | VisualizeBinaryData.renderBinary(segmented, false, visualized); | ||
gui.setImage(0, 0, (BufferedImage)video.getGuiImage()); | gui.setImage(0, 0, (BufferedImage)video.getGuiImage()); | ||
gui.setImage(0, 1, visualized); | gui.setImage(0, 1, visualized); | ||
gui.repaint(); | gui.repaint(); | ||
System.out.println("FPS = "+fps); | System.out.println("FPS = "+fps); | ||
try {Thread.sleep(5);} catch (InterruptedException e) {} | try {Thread.sleep(5);} catch (InterruptedException e) {} | ||
} | } | ||
System.out.println("done!"); | |||
} | } | ||
} | } | ||
</syntaxhighlight> | </syntaxhighlight> |
Revision as of 20:37, 19 September 2015
Example of background modeling/motion detection from a stationary camera. Moving objects are detected inside the video based on their difference from a background model. These techniques can run very fast (basic runs over 2,000 fps) and be very effective in tracking algorithms
Example File:
Concepts:
- Motion Detection
- 2D Image Stitching
Related Examples:
Example Code
/**
* Example showing how to perform background modeling when the camera is assumed to be stationary. This scenario
* can be computed much faster than the moving camera case and depending on the background model can some times produce
* reasonable results when the camera has a little bit of jitter.
*
* @author Peter Abeles
*/
public class ExampleBackgroundRemovalStationary {
public static void main(String[] args) {
String fileName = "../data/applet/background/street_intersection.mp4";
// String fileName = "../data/applet/background/horse_jitter.mp4"; // degraded performance because of jitter
// String fileName = "../data/applet/tracking/chipmunk.mjpeg"; // Camera moves. Stationary will fail here
// Comment/Uncomment to switch input image type
ImageType imageType = ImageType.single(ImageFloat32.class);
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
// Configuration for Gaussian model. Note that the threshold changes depending on the number of image bands
// 12 = gray scale and 40 = color
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12,0.005f);
configGaussian.initialVariance = 100;
configGaussian.minimumDifference = 10;
// Comment/Uncomment to switch algorithms
BackgroundModelStationary background =
FactoryBackgroundModel.stationaryBasic(new ConfigBackgroundBasic(35, 0.005f), imageType);
// FactoryBackgroundModel.stationaryGaussian(configGaussian, imageType);
MediaManager media = DefaultMediaManager.INSTANCE;
SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
// Declare storage for segmented image. 1 = moving foreground and 0 = background
ImageUInt8 segmented = new ImageUInt8(video.getNextWidth(),video.getNextHeight());
BufferedImage visualized = new BufferedImage(segmented.width,segmented.height,BufferedImage.TYPE_INT_RGB);
ImageGridPanel gui = new ImageGridPanel(1,2);
gui.setImages(visualized, visualized);
ShowImages.showWindow(gui, "Static Scene: Background Segmentation", true);
double fps = 0;
double alpha = 0.01; // smoothing factor for FPS
while( video.hasNext() ) {
ImageBase input = video.next();
long before = System.nanoTime();
background.segment(input,segmented);
background.updateBackground(input);
long after = System.nanoTime();
fps = (1.0-alpha)*fps + alpha*(1.0/((after-before)/1e9));
VisualizeBinaryData.renderBinary(segmented, false, visualized);
gui.setImage(0, 0, (BufferedImage)video.getGuiImage());
gui.setImage(0, 1, visualized);
gui.repaint();
System.out.println("FPS = "+fps);
try {Thread.sleep(5);} catch (InterruptedException e) {}
}
System.out.println("done!");
}
}