001/**
002 * Copyright (c) 2011, The University of Southampton and the individual contributors.
003 * All rights reserved.
004 *
005 * Redistribution and use in source and binary forms, with or without modification,
006 * are permitted provided that the following conditions are met:
007 *
008 *   *  Redistributions of source code must retain the above copyright notice,
009 *      this list of conditions and the following disclaimer.
010 *
011 *   *  Redistributions in binary form must reproduce the above copyright notice,
012 *      this list of conditions and the following disclaimer in the documentation
013 *      and/or other materials provided with the distribution.
014 *
015 *   *  Neither the name of the University of Southampton nor the names of its
016 *      contributors may be used to endorse or promote products derived from this
017 *      software without specific prior written permission.
018 *
019 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
020 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
021 * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
022 * DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR
023 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
024 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
025 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
026 * ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
027 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
028 * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
029 */
030/**
031 *
032 */
033package org.openimaj.demos.sandbox.video;
034
035import java.io.File;
036import java.io.IOException;
037import java.lang.reflect.Field;
038import java.util.ArrayList;
039import java.util.Collections;
040import java.util.Comparator;
041import java.util.HashMap;
042import java.util.HashSet;
043import java.util.Iterator;
044import java.util.List;
045import java.util.Map;
046import java.util.Set;
047
048import org.kohsuke.args4j.Argument;
049import org.kohsuke.args4j.CmdLineException;
050import org.kohsuke.args4j.CmdLineParser;
051import org.kohsuke.args4j.Option;
052import org.openimaj.image.DisplayUtilities;
053import org.openimaj.image.FImage;
054import org.openimaj.image.MBFImage;
055import org.openimaj.image.colour.RGBColour;
056import org.openimaj.image.processing.face.detection.CLMDetectedFace;
057import org.openimaj.image.processing.face.detection.DetectedFace;
058import org.openimaj.image.processing.face.recognition.FaceRecognitionEngine;
059import org.openimaj.image.processing.face.tracking.clm.CLMFaceTracker;
060import org.openimaj.image.processing.face.tracking.clm.MultiTracker;
061import org.openimaj.image.processing.face.tracking.clm.MultiTracker.TrackedFace;
062import org.openimaj.image.typography.hershey.HersheyFont;
063import org.openimaj.ml.annotation.ScoredAnnotation;
064import org.openimaj.time.Timecode;
065import org.openimaj.tools.faces.recognition.options.RecognitionEngineProvider;
066import org.openimaj.tools.faces.recognition.options.RecognitionStrategy;
067import org.openimaj.util.pair.IndependentPair;
068import org.openimaj.video.Video;
069import org.openimaj.video.capture.VideoCapture;
070import org.openimaj.video.capture.VideoCaptureException;
071import org.openimaj.video.processing.shotdetector.HistogramVideoShotDetector;
072import org.openimaj.video.processing.shotdetector.VideoShotDetector;
073import org.openimaj.video.timecode.HrsMinSecFrameTimecode;
074import org.openimaj.video.xuggle.XuggleVideo;
075
076/**
077 * Person linker takes a video and makes links between the various depictions of
078 * the same person by using a face recognition engine for face verification.
079 * <p>
080 * There are various challenges associated with doing this. For example, how
081 * much training is needed? Currently the amount of training is determined
082 * by the user's settings.
083 * <p>
084 * The distance threshold that determines whether a person is the same person or not
085 * is determined by the user's settings, and is fed through the to annotator
086 * that is instantiated.
087 *
088 * @author David Dupplaw (dpd@ecs.soton.ac.uk)
089 * @created 12 Mar 2013
090 */
091public class PersonLinker
092{
093        /**
094         * A tracked person is a tracked face with a person identifier.
095         *
096         * @author David Dupplaw (dpd@ecs.soton.ac.uk)
097         * @created 18 Mar 2013
098         */
099        protected static class TrackedPerson
100        {
101                /** The face */
102                public CLMDetectedFace face;
103
104                /** An identifier for the person */
105                public String personIdentifier;
106        }
107
108        /**
109         * Options for the person matcher.
110         *
111         * @author David Dupplaw (dpd@ecs.soton.ac.uk) 8 * @created 12 Mar 2013
112         */
113        protected static class PersonLinkerOptions
114        {
115                @Argument(metaVar = "FILE",
116                                usage = "Video file to process", required = false)
117                /** The input file to process */
118                public File inputFile = null;
119
120                @Option(name = "--recogniser", aliases = "-r",
121                                usage = "Pre-trained recogniser to use (default: none)")
122                /** The recogniser file to load */
123                public File recogniserFile = null;
124
125                @Option(name = "--display", aliases = "-d",
126                                usage = "Display video during processing (default: false)")
127                /** Whether to display the video during processing */
128                public boolean display = false;
129
130                @Option(name = "--strategy", aliases = "-s",
131                                usage = "The recognition strategy to use (default: CLMFeature_KNN)")
132                /** The recognition strategy to use */
133                public RecognitionStrategy strategy = RecognitionStrategy.CLMFeature_KNN;
134
135                @Option(name = "--threshold", aliases = "-t",
136                                usage = "The distance threshold for face matching (default: 30)")
137                /** The confidence threshold determining when a face is matched or a new face is found */
138                public float threshold = 30f;
139
140                @Option(name = "--maxTrainingFrames", aliases = "-m",
141                                usage = "The maximum number of frames to train a single person on (default: 20)")
142                /** The maximum number of frames to train a single person on. */
143                public int maxTrainingFrames = 20;
144
145                @Option(name = "--help", aliases = "-h",
146                                usage = "Display this help")
147                /** Whether to display help information */
148                public boolean displayHelp = false;
149        }
150
151        /** The frame predicate that links a video to a frame instance */
152        private static final String HAS_FRAME_PRED = "http://onto.dupplaw.me.uk/video/hasFrame";
153
154        /** The options in use for this linker */
155        private PersonLinkerOptions options = null;
156
157        /** The face recognition engine used to match people */
158        private FaceRecognitionEngine<DetectedFace, String> faceRecogniser;
159
160        /** The shot detector we'll use to know when to reset the tracker */
161        private final VideoShotDetector<MBFImage> shotDetector = new HistogramVideoShotDetector();
162
163        /** The face tracker we'll use to track faces across frames */
164        private final CLMFaceTracker tracker = new CLMFaceTracker();
165
166        /** The map from faces to people to avoid having recognise all the time */
167        private final Map<CLMDetectedFace, String> trackedFacesMap =
168                        new HashMap<CLMDetectedFace, String>();
169
170        /** Whether we're currently tracking faces */
171        private boolean currentlyTracking = false;
172
173        /** If we're training the annotator on a new face, we store it in here */
174        private final Set<IndependentPair<DetectedFace, String>> trainingFaces =
175                        new HashSet<IndependentPair<DetectedFace, String>>();
176
177        /** The number of training examples encountered for any particular person */
178        private final Map<String, Integer> trainingExamplesCount =
179                        new HashMap<String, Integer>();
180
181        /** Cache for conversion for robustly converting tracked faces */
182        private final Map<TrackedFace, CLMDetectedFace> conversionCache =
183                        new HashMap<TrackedFace, CLMDetectedFace>();
184
185        /** Inverse index for the conversion cache */
186        private final Map<CLMDetectedFace, TrackedFace> inverseConversionCache =
187                        new HashMap<CLMDetectedFace, MultiTracker.TrackedFace>();
188
189        /** This gives the frames to and from for each instance of a person */
190        private final Map<String,List<IndependentPair<Timecode,Timecode>>> linkedRange =
191                        new HashMap<String, List<IndependentPair<Timecode,Timecode>>>();
192
193        /** List of the Triples generated by this person linker */
194        private final Map<String,Map<String,List<String>>> triples = new HashMap<String, Map<String,List<String>>>();
195
196        /** Generate links between frames containing the same person */
197        private final boolean generateFrameLinkingTriples = true;
198
199        /** The URI base to append to person names */
200        private final String personInstanceOntologyBase = "http://david.dupplaw.me.uk/people/";
201
202        /** The URI of the video being processed */
203        private String videoURI = null;
204
205        /**
206         * Default constructor that takes the options object.
207         *
208         * @param options
209         *            The options for the new PersonLinker
210         */
211        public PersonLinker(final PersonLinkerOptions options)
212        {
213                this.options = options;
214
215                try
216                {
217                        // Set the face tracker to redetect faces regularly.
218                        this.tracker.fpd = 10;
219
220                        // Instantiate the face recognition engine.
221                        this.faceRecogniser = this.getFaceRecogniserEngine(this.options.recogniserFile);
222                } catch (final IOException e)
223                {
224                        e.printStackTrace();
225                }
226        }
227
228        /**
229         *      Process a video.
230         *      @param v The video to process
231         */
232        public void processVideo( final Video<MBFImage> v )
233        {
234                this.processVideo( v, null );
235        }
236
237        /**
238         * Process the video and provide a URI which all relations will be linked to.
239         *
240         * @param v The video to process
241         * @param uri The URI of the video
242         */
243        public void processVideo( final Video<MBFImage> v, final String uri )
244        {
245                this.videoURI = uri;
246                for (final MBFImage frame : v)
247                        this.processFrame( frame,
248                                new HrsMinSecFrameTimecode( v.getCurrentFrameIndex(), v.getFPS() ) );
249        }
250
251        /**
252         * Process the video frame given
253         *
254         * @param frame
255         *            The frame to process
256         */
257        public void processFrame( final MBFImage frame )
258        {
259                this.processFrame( frame, null );
260        }
261
262        /**
263         *      Process the given video frame. The timecode can be null, if the frame's
264         *      timecode is not known, but the RDF will not be output for the frame's
265         *      timecode.
266         *
267         *      @param frame The frame
268         *      @param timecode The frame's timecode
269         */
270        public void processFrame( final MBFImage frame, final Timecode timecode )
271        {
272                // Use the shot detector to find out if this frame is a shot boundary.
273                this.shotDetector.processFrame(frame);
274
275                // If a shot boundary was detected, reset the tracker.
276                if (this.shotDetector.wasLastFrameBoundary())
277                {
278                        this.tracker.reset();
279                        this.currentlyTracking = false;
280                }
281
282                // Track the faces in the frame (if there are any)
283                this.tracker.track(frame);
284
285                // Get the list of faces being tracked.
286                final List<CLMDetectedFace> faces = this.cachedConvert(
287                                this.tracker.getTrackedFaces(), frame.flatten());
288
289//              System.out.println( ""+timecode+": "+this.shotDetector.wasLastFrameBoundary()+", "+faces.size() );
290
291                // If we have at least one face, we can do something.
292                if (faces.size() > 0)
293                {
294                        this.trainingFaces.clear();
295
296                        // If there are no people annotated in our recogniser yet,
297                        // then we simply train with the current faces.
298                        if (this.faceRecogniser.getRecogniser().getAnnotations().size() == 0)
299                        {
300                                final int nPersons = this.faceRecogniser.getRecogniser().getAnnotations().size();
301                                System.out.println("Annotator empty. Adding " + faces.size() + " faces to training list...");
302                                for (int i = 0; i < faces.size(); i++)
303                                {
304                                        final String personName = "Person " + (nPersons + i);
305                                        this.trainingFaces.add(new IndependentPair<DetectedFace, String>(
306                                                        faces.get(i), personName));
307                                        this.trainingExamplesCount.put(personName, 1);
308
309                                        // Start a range that says this person was first seen here.
310                                        final ArrayList<IndependentPair<Timecode, Timecode>> xx
311                                                = new ArrayList<IndependentPair<Timecode,Timecode>>();
312                                        xx.add( new IndependentPair<Timecode,Timecode>( timecode.clone(), timecode.clone() ) );
313                                        this.linkedRange.put( personName, xx );
314                                }
315                        }
316                        // Otherwise, we check to see if any of the faces are recognised
317                        // with the current annotator. If the confidence of a match
318                        // is very small, then we'll train instead.
319                        else
320                        {
321                                // This is how many faces we already have trained on
322                                // We need this to "name" each of the new people we find
323                                int nPersons = this.faceRecogniser.getRecogniser().getAnnotations().size();
324
325                                // Check each of the faces for a match.
326                                for (int i = 0; i < faces.size(); i++)
327                                {
328                                        // Get the face
329                                        final CLMDetectedFace face = faces.get(i);
330
331                                        // Check if we're tracking it already
332                                        if (this.trackedFacesMap.get(face) != null)
333                                        {
334                                                // We already know who it is... but we can carry
335                                                // on training for them based on this image
336                                                this.trainingFaces.add(new IndependentPair<DetectedFace, String>(
337                                                                face, this.trackedFacesMap.get(face)));
338
339                                                // Update the timecode for the end of the range of this person.
340                                                this.getLatest( this.trackedFacesMap.get(face) ).setSecondObject( timecode.clone() );
341                                        }
342                                        // It's not a face we're already tracking...
343                                        else
344                                        {
345                                                // We've started tracking a new face, but we
346                                                // already know of some other faces. So, we need
347                                                // to first check if it's a face we already recognise.
348                                                final ScoredAnnotation<String> x =
349                                                                this.faceRecogniser.getRecogniser().annotateBest(face);
350
351                                                // It's possible there'll be an error getting the face
352                                                // from
353                                                // the face patch, in which case we better ignore this.
354                                                if (x != null)
355                                                {
356                                                        this.trackedFacesMap.put(face, x.annotation);
357
358                                                        System.out.println( this.currentlyTracking );
359                                                        if( this.currentlyTracking )
360                                                                        this.linkedRange.get( x.annotation ).add(
361                                                                                new IndependentPair<Timecode,Timecode>( timecode.clone(), timecode.clone() ) );
362                                                        else    this.getLatest( x.annotation ).setSecondObject( timecode.clone() );
363                                                        System.out.println("Recognised " + x.annotation + " with confidence " + x.confidence);
364                                                }
365                                                else
366                                                {
367                                                        System.out.println("Warning: unable to find matching face for " + face);
368                                                        final String name = "Person " + nPersons;
369                                                        System.out.println("   - Adding " + name);
370                                                        this.trainingFaces.add(
371                                                                        new IndependentPair<DetectedFace, String>(
372                                                                                        face, name));
373                                                        nPersons++;
374
375                                                        // Start a range that says this person was first seen here.
376                                                        final ArrayList<IndependentPair<Timecode, Timecode>> xx
377                                                                = new ArrayList<IndependentPair<Timecode,Timecode>>();
378                                                        xx.add( new IndependentPair<Timecode,Timecode>( timecode.clone(), timecode.clone() ) );
379                                                        this.linkedRange.put( name, xx );
380                                                }
381                                        }
382                                }
383                        }
384
385                        // Loop through the list of faces we should be training for, and
386                        // train, train, train! Woo Woo!
387                        final Iterator<IndependentPair<DetectedFace, String>> it = this.trainingFaces.iterator();
388                        while (it.hasNext())
389                        {
390                                final IndependentPair<DetectedFace, String> facePair = it.next();
391                                final String person = facePair.getSecondObject();
392                                Integer nExamplesSoFar = this.trainingExamplesCount.get(person);
393                                if (nExamplesSoFar == null)
394                                        nExamplesSoFar = new Integer(0);
395                                final DetectedFace face = facePair.getFirstObject();
396                                if (nExamplesSoFar < this.options.maxTrainingFrames)
397                                {
398                                        if (this.options.display)
399                                                DisplayUtilities.displayName(face.getFacePatch(), "Face Patch");
400
401                                        // Train the recogniser with this face
402                                        this.faceRecogniser.train(face, person);
403
404                                        // Only train if we're still under the maximum number of
405                                        // training
406                                        // frames set within the options. If we are, then we also
407                                        // increase
408                                        // that counter so we keep track of how many examples we've
409                                        // used.
410                                        this.trainingExamplesCount.put(person, nExamplesSoFar + 1);
411                                }
412                                else
413                                {
414                                        // If we've done enough training on this face, we remove it
415                                        // from the training faces list
416                                        it.remove();
417                                }
418                        }
419
420                        // If we're to display the video while we're processing it, then
421                        // we'll do that here. We'll add on the overlays too.
422                        if (this.options.display)
423                        {
424                                final MBFImage f = frame.clone();
425
426                                // Draw all the tracked faces
427                                for (final CLMDetectedFace face : this.trackedFacesMap.keySet())
428                                {
429                                        // If the face is one being trained on, then it will be
430                                        // coloured
431                                        // red, otherwise coloured green.
432                                        Float[] colour = RGBColour.GREEN;
433                                        for (final IndependentPair<DetectedFace, String> x : this.trainingFaces)
434                                                if (x.firstObject() == face)
435                                                        colour = RGBColour.RED;
436
437                                        // Draw the face model to the frame
438                                        final TrackedFace trackedFace = this.inverseConversionCache.get(face);
439                                        if (trackedFace != null)
440                                        {
441                                                // Draw the face model
442                                                CLMFaceTracker.drawFaceModel(f, trackedFace,
443                                                                true, true, true, true, true,
444                                                                this.tracker.triangles, this.tracker.connections,
445                                                                1f, colour, RGBColour.WHITE, RGBColour.WHITE, RGBColour.RED);
446
447                                                // Draw the name of the person
448                                                final String person = this.trackedFacesMap.get(face);
449                                                f.drawText(person, (int) trackedFace.lastMatchBounds.x,
450                                                                (int) trackedFace.lastMatchBounds.y,
451                                                                HersheyFont.TIMES_BOLD, 10, colour);
452                                        }
453                                }
454
455                                DisplayUtilities.displayName(f, "video processing");
456                        }
457
458                        // If we can output some information about the video (we can't if there's
459                        // no URI to link the information to)...
460                        if( this.videoURI != null )
461                        {
462                                this.addTriple( this.videoURI, PersonLinker.HAS_FRAME_PRED, this.videoURI+"_"+timecode.toString() );
463                        }
464
465                        this.currentlyTracking = true;
466                }
467                else
468                        this.currentlyTracking = false;
469        }
470
471        /**
472         * Provides a cached conversion of {@link TrackedFace}s to
473         * {@link CLMDetectedFace}s. That is, if the same {@link TrackedFace} is
474         * passed in to the method, the same {@link CLMDetectedFace} will be
475         * returned from the method. This method will attempt to tidy up the cache
476         * as it goes - that is, if a {@link TrackedFace} does not exist in the list
477         * passed in that does exist in the cache, it will be removed from the
478         * cache.
479         *
480         * @param list
481         *            The list of {@link TrackedFace}s to convert
482         * @param img
483         *            The image from which they were tracked
484         * @return A list of {@link CLMDetectedFace}s.
485         */
486        private List<CLMDetectedFace> cachedConvert(final List<TrackedFace> list, final FImage img)
487        {
488                final List<CLMDetectedFace> cvt = new ArrayList<CLMDetectedFace>();
489
490                // Clear the inverse cache, we'll reinstate it below
491                this.inverseConversionCache.clear();
492
493                // Clean up the cache.
494                final Iterator<TrackedFace> it = this.conversionCache.keySet().iterator();
495                while (it.hasNext())
496                        if (!list.contains(it.next()))
497                                it.remove();
498
499                // Convert the passed in information
500                for (final TrackedFace f : list)
501                {
502                        CLMDetectedFace m = null;
503                        if ((m = this.conversionCache.get(f)) == null)
504                        {
505                                m = new CLMDetectedFace(f, img);
506                                this.conversionCache.put(f, m);
507                        }
508
509                        cvt.add(m);
510                        this.inverseConversionCache.put(m, f);
511                }
512
513                return cvt;
514        }
515
516        /**
517         * Returns the shot detector in use.
518         *
519         * @return The shot detector being used.
520         */
521        public VideoShotDetector<MBFImage> getShotDetector()
522        {
523                return this.shotDetector;
524        }
525
526        // ======================================================================
527        /**
528         * Returns a face recogniser by using the FaceRecogniserTools.
529         *
530         * @param recogniserFile
531         * @return The face recogniser engine
532         * @throws IOException
533         */
534        @SuppressWarnings("unchecked")
535        private FaceRecognitionEngine<DetectedFace, String> getFaceRecogniserEngine(final File recogniserFile)
536                        throws IOException
537        {
538                // If we have a pre-trained file to load, load it in.
539                if (recogniserFile != null && recogniserFile.exists())
540                {
541                        System.out.println("Loading existing recogniser from " + recogniserFile + " to update...");
542
543                        final FaceRecognitionEngine<DetectedFace, String> fre = FaceRecognitionEngine
544                                        .load(recogniserFile);
545                        return fre;
546                }
547
548                // No pre-trained file? Then just create a new, clean, fresh and sparkly
549                // new engine.
550                try
551                {
552                        // This is a bit of a hack:
553                        // We look for a field called "threshold" in the strategy and set
554                        // the threshold
555                        // to the value in the options. If the field doesn't exist, we'll
556                        // ignore it.
557                        final Field f = this.options.strategy.getClass().getDeclaredField("threshold");
558                        f.setAccessible(true);
559                        f.setFloat(this.options.strategy, this.options.threshold);
560                        System.out.println("Field: " + f);
561                } catch (final NoSuchFieldException e)
562                {
563                        System.out.println("WARNING: No threshold field to set in " + this.options.strategy + ".");
564                } catch (final SecurityException e)
565                {
566                        System.out.println("WARNING: No threshold field to set in " + this.options.strategy + ".");
567                } catch (final IllegalArgumentException e)
568                {
569                        e.printStackTrace();
570                } catch (final IllegalAccessException e)
571                {
572                        e.printStackTrace();
573                }
574                final RecognitionEngineProvider<?> o = this.options.strategy.getOptions();
575                return (FaceRecognitionEngine<DetectedFace, String>) o.createRecognitionEngine();
576        }
577
578        /**
579         *      Resets the state of the person linker. Call this prior to running a new
580         *      video through the linker. This will remove all the triples from a
581         *      previous run.
582         */
583        public void reset()
584        {
585                this.triples.clear();
586        }
587
588        /**
589         *      Add a triple to the list of triples.
590         *      @param subject The subject
591         *      @param predicate The predicate
592         *      @param object The object
593         */
594        private void addTriple( final String subject, final String predicate, final String object )
595        {
596                Map<String, List<String>> s = this.triples.get(subject);
597                if( s == null )
598                {
599                        s = new HashMap<String, List<String>>();
600                        this.triples.put( subject, s );
601                }
602
603                List<String> p = s.get( predicate );
604                if( p == null )
605                {
606                        p = new ArrayList<String>();
607                        s.put( predicate, p );
608                }
609
610                p.add( object );
611        }
612
613        /**
614         *      Returns the triples
615         *      @return
616         */
617        private Map<String, Map<String, List<String>>> getTriples()
618        {
619                return this.triples;
620        }
621
622        /**
623         *      Get the latest range for the given person
624         *      @param person The person to get the latest range for
625         *      @return The unfilled range, or null if they're all filled.
626         */
627        private IndependentPair<Timecode, Timecode> getLatest( final String person )
628        {
629                final IndependentPair<Timecode, Timecode> max =
630                        Collections.max( this.linkedRange.get(person),
631                                new Comparator<IndependentPair<Timecode, Timecode>>()
632                                {
633                                        @Override
634                                        public int compare(
635                                                        final IndependentPair<Timecode, Timecode> o1,
636                                                        final IndependentPair<Timecode, Timecode> o2 )
637                                        {
638                                                return (int)(o2.secondObject().getTimecodeInMilliseconds() -
639                                                             o1.secondObject().getTimecodeInMilliseconds());
640                                        }
641                                } );
642                return max;
643        }
644
645        /**
646         * Parses the command line arguments to create an options object.
647         *
648         * @param args
649         *            The arguments from the command-line
650         * @return The options that were parsed from the command-line
651         */
652        public static PersonLinkerOptions parseArgs(final String args[])
653        {
654                final PersonLinkerOptions o = new PersonLinkerOptions();
655                final CmdLineParser p = new CmdLineParser(o);
656                try
657                {
658                        p.parseArgument(args);
659
660                        if (o.displayHelp)
661                                throw new CmdLineException(p, "");
662
663                        if (o.inputFile != null && !o.inputFile.exists())
664                                throw new CmdLineException(p, "File " + o.inputFile + " does not exist.");
665                } catch (final CmdLineException e)
666                {
667                        System.err.println(e.getMessage());
668                        System.err.println("java PersonLinker [OPTIONS] [INPUT-FILE]");
669                        System.err.println("If no input file is provided, the webcam will be used.");
670                        p.printUsage(System.err);
671                        System.exit(1);
672                }
673
674                return o;
675        }
676
677        /**
678         * @param args
679         *            Command-line args
680         * @throws VideoCaptureException
681         */
682        public static void main(final String[] args) throws VideoCaptureException
683        {
684                final PersonLinkerOptions o = PersonLinker.parseArgs(args);
685                final PersonLinker pm = new PersonLinker(o);
686
687                Video<MBFImage> video = null;
688                if (o.inputFile != null)
689                        video = new XuggleVideo(o.inputFile);
690                else
691                        video = new VideoCapture(320, 240);
692
693                System.out.println("Processing video from " + (o.inputFile == null ? "webcam" : o.inputFile));
694                pm.getShotDetector().setFPS(video.getFPS());
695                pm.processVideo(video);
696
697                System.out.println( pm.linkedRange );
698        }
699}