I adjusted the code like this:
clear all
close all
clc
% Create a cascade detector object.
faceDetector = vision.CascadeObjectDetector();
% Read a video frame and run the face detector.
videoFileReader = vision.VideoFileReader('video2.avi');
videoFrame = step(videoFileReader);
bbox = step(faceDetector, videoFrame);
[N M] = size(bbox);
% Draw the returned bounding box around the detected face.
for i = 1:N
videoFrame = insertShape(videoFrame, 'rectangle', bbox(i,:));
end
figure; imshow(videoFrame); title('Detected face');
% Detect feature points in the face region.
figure, imshow(videoFrame), hold on, title('Detected features');
for i = 1:N
points = detectMinEigenFeatures(rgb2gray(videoFrame), 'ROI', bbox(i,:));
 punti{i} = points.Location;
plot(points);
end
% Create a point tracker and enable the bidirectional error constraint to % make it more robust in the presence of noise and clutter.
for i = 1:N
pointTracker{i} = vision.PointTracker('MaxBidirectionalError', 2);
% Initialize the tracker with the initial point locations and the initial
% video frame.
initialize(pointTracker{i}, punti{i}, videoFrame);
% Make a copy of the points to be used for computing the geometric
% transformation between the points in the previous and the current frames
oldPoints{i} = punti{i};
end
videoPlayer = vision.VideoPlayer('Position',... [100 100 [size(videoFrame, 2), size(videoFrame, 1)]+30]);
while ~isDone(videoFileReader) % get the next frame
videoFrame = step(videoFileReader);
    % Track the points. Note that some points may be lost.
    for i =1:N
        [punti{i}, isFound] = step(pointTracker{i}, videoFrame);
        visiblePoints{i} = punti{i}(isFound, :);
        oldInliers{i} = oldPoints{i}(isFound, :);
        if size(visiblePoints{i}, 1) >= 2 % need at least 2 points
            % Estimate the geometric transformation between the old points
            % and the new points and eliminate outliers
            [xform, oldInliers{i}, visiblePoints{i}] = estimateGeometricTransform(...
                oldInliers{i}, visiblePoints{i}, 'similarity', 'MaxDistance', 4);
            % Apply the transformation to the bounding box
            bbox(i:M:end)=transformPointsForward(xform,bbox(i:M:end));
            % Insert a bounding box around the object being tracked
            videoFrame = insertShape(videoFrame, 'Rectangle',bbox(i,:));
            % Reset the points
            oldPoints{i} = visiblePoints{i};
            setPoints(pointTracker{i}, oldPoints{i});
            plot(visiblePoints{i});
        end
    end
    % Display the annotated video frame using the video player object
    step(videoPlayer, videoFrame);
end
% Clean up
release(videoFileReader);
release(videoPlayer);
release(pointTracker);
_____________________________________________________________________
but it doesn't work very well..Any help please??


