Difference between revisions of "Example Background Moving Camera"

From BoofCV
Jump to navigationJump to search
m
m (Undo revision 1623 by Peter (talk))
Line 1: Line 1:
<center>
<center>
<gallery heights=200 widths=500 >
<gallery heights=200 widths=500 >
File:Background_stationary_camera.jpg|Still from a traffic camera video.
Image:Background_moving_camera.jpg|Still from a video where a camera is following a chipmunk walk across a stone path.
</gallery>
</gallery>
</center>
</center>


Example of background modeling/motion detection from a stationary cameraMoving objects are detected inside the video based on their difference from a background model. These techniques can run very fast (basic runs over 2,000 fps) and be very effective in tracking algorithms
In this example the objects which are moving relative to the background are highlighted in a binary image.  While much slower than background modeling for static cameras, it can handle gradual camera motion when viewing environments that are approximately planar or being viewed for a distanceIf the same videos were feed into a static camera background motion the entire image would be lit up as moving.  


Example File:  
Example File:  
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.19/examples/src/boofcv/examples/tracking/ExampleBackgroundRemovalStationary.java ExampleBackgroundRemovalStationary.java]
* [https://github.com/lessthanoptimal/BoofCV/blob/v0.19/examples/src/boofcv/examples/tracking/ExampleBackgroundRemovalMoving.java ExampleBackgroundRemovalMoving.java]


Concepts:
Concepts:
Line 15: Line 15:


Related Examples:
Related Examples:
* [[Example_Background_Moving_Camera| Background Moving Camera]]
* [[Example_Background_Stationary_Camera| Background Stationary Camera]]


= Example Code =
= Example Code =
Line 21: Line 21:
<syntaxhighlight lang="java">
<syntaxhighlight lang="java">
/**
/**
  * Example showing how to perform background modeling when the camera is assumed to be stationary.  This scenario
  * Example showing how to perform background modeling with a moving camera.  Here the camera's motion is explicitly
  * can be computed much faster than the moving camera case and depending on the background model can some times produce
  * estimated using a motion model.  That motion model is then used to distort the image and generate background.
  * reasonable results when the camera has a little bit of jitter.
  * The net affect is a significant reduction in false positives around the objects of images in oscillating cameras
* and the ability to detect motion in moving scenes.
  *
  *
  * @author Peter Abeles
  * @author Peter Abeles
  */
  */
public class ExampleBackgroundRemovalStationary {
public class ExampleBackgroundRemovalMoving {
public static void main(String[] args) {
public static void main(String[] args) {


String fileName = "../data/applet/background/street_intersection.mp4";
// Example with a moving camera. Highlights why motion estimation is sometimes required
// String fileName = "../data/applet/background/horse_jitter.mp4"; // degraded performance because of jitter
String fileName = "../data/applet/tracking/chipmunk.mjpeg";
// String fileName = "../data/applet/tracking/chipmunk.mjpeg"; // Camera moves.  Stationary will fail here
// Camera has a bit of jitter in it.  Static kinda works but motion reduces false positives
// String fileName = "../data/applet/background/horse_jitter.mp4";


// Comment/Uncomment to switch input image type
// Comment/Uncomment to switch input image type
Line 38: Line 40:
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedF32.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
// ImageType imageType = ImageType.il(3, InterleavedU8.class);
// Configure the feature detector
ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
confDetector.threshold = 10;
confDetector.maxFeatures = 300;
confDetector.radius = 6;
// Use a KLT tracker
PointTracker tracker = FactoryPointTracker.klt(new int[]{1, 2, 4, 8}, confDetector, 3,
ImageFloat32.class, null);
// This estimates the 2D image motion
ImageMotion2D<ImageFloat32,Homography2D_F64> motion2D =
FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());
ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);


// Configuration for Gaussian model.  Note that the threshold changes depending on the number of image bands
// Configuration for Gaussian model.  Note that the threshold changes depending on the number of image bands
// 12 = gray scale and 40 = color
// 12 = gray scale and 40 = color
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12,0.005f);
ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12,0.001f);
configGaussian.initialVariance = 100;
configGaussian.initialVariance = 64;
configGaussian.minimumDifference = 10;
configGaussian.minimumDifference = 5;
 
// Comment/Uncomment to switch background mode
BackgroundModelMoving background =
FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
// FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);


// Comment/Uncomment to switch algorithms
BackgroundModelStationary background =
FactoryBackgroundModel.stationaryBasic(new ConfigBackgroundBasic(35, 0.005f), imageType);
// FactoryBackgroundModel.stationaryGaussian(configGaussian, imageType);


MediaManager media = DefaultMediaManager.INSTANCE;
MediaManager media = DefaultMediaManager.INSTANCE;
SimpleImageSequence video = media.openVideo(fileName, background.getImageType());
SimpleImageSequence video = media.openVideo(fileName, background.getImageType());


// Declare storage for segmented image.  1 = moving foreground and 0 = background
//====== Initialize Images
 
// storage for segmented image.  Background = 0, Foreground = 1
ImageUInt8 segmented = new ImageUInt8(video.getNextWidth(),video.getNextHeight());
ImageUInt8 segmented = new ImageUInt8(video.getNextWidth(),video.getNextHeight());
// Grey scale image that's the input for motion estimation
ImageFloat32 grey = new ImageFloat32(segmented.width,segmented.height);
// coordinate frames
Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
Homography2D_F32 homeToWorld = new Homography2D_F32();
homeToWorld.a13 = grey.width/2;
homeToWorld.a23 = grey.height/2;
// Create a background image twice the size of the input image.  Tell it that the home is in the center
background.initialize(grey.width * 2, grey.height * 2, homeToWorld);


BufferedImage visualized = new BufferedImage(segmented.width,segmented.height,BufferedImage.TYPE_INT_RGB);
BufferedImage visualized = new BufferedImage(segmented.width,segmented.height,BufferedImage.TYPE_INT_RGB);
Line 60: Line 92:
gui.setImages(visualized, visualized);
gui.setImages(visualized, visualized);


ShowImages.showWindow(gui, "Static Scene: Background Segmentation", true);
ShowImages.showWindow(gui, "Detections", true);


double fps = 0;
double fps = 0;
Line 69: Line 101:


long before = System.nanoTime();
long before = System.nanoTime();
background.segment(input,segmented);
GConvertImage.convert(input, grey);
background.updateBackground(input);
 
if( !motion2D.process(grey) ) {
throw new RuntimeException("Should handle this scenario");
}
 
Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
UtilHomography.convert(firstToCurrent64, firstToCurrent32);
 
background.segment(firstToCurrent32, input, segmented);
background.updateBackground(firstToCurrent32,input);
long after = System.nanoTime();
long after = System.nanoTime();


fps = (1.0-alpha)*fps + alpha*(1.0/((after-before)/1e9));
fps = (1.0-alpha)*fps + alpha*(1.0/((after-before)/1e9));


VisualizeBinaryData.renderBinary(segmented, false, visualized);
VisualizeBinaryData.renderBinary(segmented,false,visualized);
gui.setImage(0, 0, (BufferedImage)video.getGuiImage());
gui.setImage(0, 0, (BufferedImage)video.getGuiImage());
gui.setImage(0, 1, visualized);
gui.setImage(0, 1, visualized);
gui.repaint();
gui.repaint();
System.out.println("FPS = "+fps);
System.out.println("FPS = "+fps);


try {Thread.sleep(5);} catch (InterruptedException e) {}
try {Thread.sleep(5);} catch (InterruptedException e) {}
}
}
System.out.println("done!");
}
}
}
}
</syntaxhighlight>
</syntaxhighlight>

Revision as of 20:39, 19 September 2015

In this example the objects which are moving relative to the background are highlighted in a binary image. While much slower than background modeling for static cameras, it can handle gradual camera motion when viewing environments that are approximately planar or being viewed for a distance. If the same videos were feed into a static camera background motion the entire image would be lit up as moving.

Example File:

Concepts:

  • Motion Detection
  • 2D Image Stitching

Related Examples:

Example Code

/**
 * Example showing how to perform background modeling with a moving camera.  Here the camera's motion is explicitly
 * estimated using a motion model.  That motion model is then used to distort the image and generate background.
 * The net affect is a significant reduction in false positives around the objects of images in oscillating cameras
 * and the ability to detect motion in moving scenes.
 *
 * @author Peter Abeles
 */
public class ExampleBackgroundRemovalMoving {
	public static void main(String[] args) {

		// Example with a moving camera.  Highlights why motion estimation is sometimes required
		String fileName = "../data/applet/tracking/chipmunk.mjpeg";
		// Camera has a bit of jitter in it.  Static kinda works but motion reduces false positives
//		String fileName = "../data/applet/background/horse_jitter.mp4";

		// Comment/Uncomment to switch input image type
		ImageType imageType = ImageType.single(ImageFloat32.class);
//		ImageType imageType = ImageType.il(3, InterleavedF32.class);
//		ImageType imageType = ImageType.il(3, InterleavedU8.class);

		// Configure the feature detector
		ConfigGeneralDetector confDetector = new ConfigGeneralDetector();
		confDetector.threshold = 10;
		confDetector.maxFeatures = 300;
		confDetector.radius = 6;

		// Use a KLT tracker
		PointTracker tracker = FactoryPointTracker.klt(new int[]{1, 2, 4, 8}, confDetector, 3,
				ImageFloat32.class, null);

		// This estimates the 2D image motion
		ImageMotion2D<ImageFloat32,Homography2D_F64> motion2D =
				FactoryMotion2D.createMotion2D(500, 0.5, 3, 100, 0.6, 0.5, false, tracker, new Homography2D_F64());

		ConfigBackgroundBasic configBasic = new ConfigBackgroundBasic(30, 0.005f);

		// Configuration for Gaussian model.  Note that the threshold changes depending on the number of image bands
		// 12 = gray scale and 40 = color
		ConfigBackgroundGaussian configGaussian = new ConfigBackgroundGaussian(12,0.001f);
		configGaussian.initialVariance = 64;
		configGaussian.minimumDifference = 5;

		// Comment/Uncomment to switch background mode
		BackgroundModelMoving background =
				FactoryBackgroundModel.movingBasic(configBasic, new PointTransformHomography_F32(), imageType);
//				FactoryBackgroundModel.movingGaussian(configGaussian, new PointTransformHomography_F32(), imageType);


		MediaManager media = DefaultMediaManager.INSTANCE;
		SimpleImageSequence video = media.openVideo(fileName, background.getImageType());

		//====== Initialize Images

		// storage for segmented image.  Background = 0, Foreground = 1
		ImageUInt8 segmented = new ImageUInt8(video.getNextWidth(),video.getNextHeight());
		// Grey scale image that's the input for motion estimation
		ImageFloat32 grey = new ImageFloat32(segmented.width,segmented.height);

		// coordinate frames
		Homography2D_F32 firstToCurrent32 = new Homography2D_F32();
		Homography2D_F32 homeToWorld = new Homography2D_F32();
		homeToWorld.a13 = grey.width/2;
		homeToWorld.a23 = grey.height/2;

		// Create a background image twice the size of the input image.  Tell it that the home is in the center
		background.initialize(grey.width * 2, grey.height * 2, homeToWorld);

		BufferedImage visualized = new BufferedImage(segmented.width,segmented.height,BufferedImage.TYPE_INT_RGB);
		ImageGridPanel gui = new ImageGridPanel(1,2);
		gui.setImages(visualized, visualized);

		ShowImages.showWindow(gui, "Detections", true);

		double fps = 0;
		double alpha = 0.01; // smoothing factor for FPS

		while( video.hasNext() ) {
			ImageBase input = video.next();

			long before = System.nanoTime();
			GConvertImage.convert(input, grey);

			if( !motion2D.process(grey) ) {
				throw new RuntimeException("Should handle this scenario");
			}

			Homography2D_F64 firstToCurrent64 = motion2D.getFirstToCurrent();
			UtilHomography.convert(firstToCurrent64, firstToCurrent32);

			background.segment(firstToCurrent32, input, segmented);
			background.updateBackground(firstToCurrent32,input);
			long after = System.nanoTime();

			fps = (1.0-alpha)*fps + alpha*(1.0/((after-before)/1e9));

			VisualizeBinaryData.renderBinary(segmented,false,visualized);
			gui.setImage(0, 0, (BufferedImage)video.getGuiImage());
			gui.setImage(0, 1, visualized);
			gui.repaint();

			System.out.println("FPS = "+fps);

			try {Thread.sleep(5);} catch (InterruptedException e) {}
		}
	}
}