Showing posts with label Basics. Show all posts
Showing posts with label Basics. Show all posts

Calculating absolute values with scaling

void convertScaleAbs(InputArray src, OutputArray dst, double alpha=1, double beta=0)
        - Scales, calculates absolute values, and converts the result to 8-bit.

Parameters:
  • src – input array.
  • dst – output array.
  • alpha – optional scale factor.
  • beta – optional delta added to the scaled values.
 You can find some applications here - Application1, Application2.


Window management in OpenCV


void namedWindow(const string&winname, int flags=WINDOW_AUTOSIZE )

Parameters:
  • name – Name of the window in the window caption that may be used as a window identifier.
  • flags – Flags of the window. Currently the only supported flag is CV_WINDOW_AUTOSIZE. If this is set, the window size is automatically adjusted to fit the displayed image, and you cannot change the window size manually.
  • CV_WINDOW_NORMAL - On Qt back end, you may use this to allow window resize. The image will resize itself according to the current window size. By using the | operator you also need to specify if you would like the image to keep its aspect ratio (CV_WINDOW_KEEPRATIO) or not (CV_WINDOW_FREERATIO). 

int waitKey(int delay=0)

Parameters: delay – Delay in milliseconds. 0 is the special value that means “forever”.

Click here for Examples.

Rotate image

void warpAffine(InputArray src, OutputArray dst, InputArray M, Size dsize, int flags=INTER_LINEAR, int borderMode=BORDER_CONSTANT, const Scalar& borderValue=Scalar())

Parameters:
  • src – input image.
  • dst – output image that has the size dsize and the same type as src .
  • M2\times 3 transformation matrix.
  • dsize – size of the output image.
  • flags – combination of interpolation methods (see resize() ) and the optional flag WARP_INVERSE_MAP that means that M is the inverse transformation ( \texttt{dst}\rightarrow\texttt{src} ).
  • borderMode – pixel extrapolation method (see borderInterpolate()); when borderMode=BORDER_TRANSPARENT , it means that the pixels in the destination image corresponding to the “outliers” in the source image are not modified by the function.
  • borderValue – value used in case of a constant border; by default, it is 0.

The "rotate" function takes an image and returns the rotated image.

Example:

-----------
#include <opencv2/highgui/highgui.hpp>
#include <opencv2/imgproc/imgproc.hpp>
#include <iostream>

using namespace cv;
using namespace std;

Mat rotate(Mat src, double angle)
{
    Mat dst;
    Point2f pt(src.cols/2., src.rows/2.);    
    Mat r = getRotationMatrix2D(pt, angle, 1.0);
    warpAffine(src, dst, r, Size(src.cols, src.rows));
    return dst;
}

int main()
{
    Mat src = imread("lena.jpg");

    Mat dst;
    dst = rotate(src, 10);

    imshow("src", src);
    imshow("dst", dst);
    waitKey(0);
    return 0;
}
-----------

Result:


Histogram Equalization

void equalizeHist(InputArray src, OutputArray dst)
Parameters:
  • src – Source 8-bit single channel image.
  • dst – Destination image of the same size and type as src .

Example:

----------
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int main( )
{
 Mat src1;
 src1 = imread("lena.jpg", CV_LOAD_IMAGE_COLOR);
 
 Mat gray, dst;

 // convert to gray
 cvtColor(src1, gray, CV_BGR2GRAY);
 namedWindow( "Original image", CV_WINDOW_AUTOSIZE );
 imshow( "Original image", gray );

 // hisogram equalization
 equalizeHist( gray, dst );

 namedWindow("image", CV_WINDOW_AUTOSIZE);
 imshow("image", dst);

 waitKey(0);                                       
 return 0;
}
----------

Result:


Select a region on mouse click

The following program allows the user to select a point on the image and extracts the region around that point, from which it extracts information such as mean and variance of the locality.

void setMouseCallback(const string& winname, MouseCallback onMouse, void* userdata=0 )
- Sets mouse handler for the specified window
Parameters:
  • winname – Window name
  • onMouse – Mouse callback.
  • userdata – The optional parameter passed to the callback.

Example:

-----------
#include "opencv2/opencv.hpp"
#include<cmath>
#include "iostream"

using namespace cv;
using namespace std;

int var1=1;
Point pos;
int radius = 7;

void CallBackFunc(int event, int x, int y, int flags, void* userdata)
{
    if  ( event == EVENT_LBUTTONDOWN )
    {
        pos.x=x;
        pos.y=y;
        var1=0;
    }
}


int main(int, char**)
{
    VideoCapture cap(0); // open the default camera
    if(!cap.isOpened())  // check if we succeeded
        return -1;

    namedWindow("Video",1);    
    int area = (2*radius+1)*(2*radius+1);
    
    Mat frame;        

    //select the region on the image
    while(var1==1)
    {
        //set the callback function for any mouse event
        setMouseCallback("Video", CallBackFunc, NULL);

        cap >> frame;         // get a new frame from camera
        //show the image
        imshow("Video", frame);

        // Wait until user press some key
        waitKey(10);    
    }
        
    GaussianBlur( frame, frame, Size( 15, 15 ), 0, 0 );imshow("blur",frame);
    Mat imgHSV,out;
    cvtColor(frame, imgHSV, CV_BGR2HSV);    imshow("HSV",imgHSV);
    Mat final;
    final = frame;

    float blue=0, green=0,red=0, b_var=0, g_var=0, r_var=0;
    for(int var2=pos.y-radius;var2<=pos.y+radius;var2++)
        for(int var3=pos.x-radius;var3<=pos.x+radius;var3++)
        {
            Vec3b intensity = final.at<Vec3b>(var2,var3);   
            blue = blue + intensity.val[0];
            green = green + intensity.val[1];
            red = red + intensity.val[2];
        }
    red=red/area; green=green/area; blue=blue/area;

    for(int var2=pos.y-radius;var2<=pos.y+radius;var2++)
        for(int var3=pos.x-radius;var3<=pos.x+radius;var3++)
        {
            Vec3b intensity = final.at<Vec3b>(var2,var3);   
            b_var = b_var + (intensity.val[0]-blue)*(intensity.val[0]-blue);
            g_var = g_var + (intensity.val[1]-green)*(intensity.val[1]-green);
            r_var = r_var + (intensity.val[2]-red)*(intensity.val[2]-red);
        }    
    
    r_var/=area; g_var/=area; b_var/=area;
    r_var=sqrt(r_var); g_var=sqrt(g_var); b_var=sqrt(b_var);
    cout<<red<<" "<<green<<"  "<<blue<<" "<<r_var<<" "<<g_var<<" "<<b_var;



    inRange(final,Scalar(blue-b_var,green-g_var,red-r_var),Scalar(blue+b_var,green+g_var,red+r_var),out);imshow("out",out);

    waitKey(0);
    return 0;
}
-----------

Detect Mouse Clicks and Moves on Image Window

void setMouseCallback(const string& winname, MouseCallback onMouse, void* userdata = 0)

This function sets a callback function to be called every time any mouse events occurs in the specified window. Here is the detailed explanation of the each parameters of the above OpenCV function.
    • winname - Name of the OpenCV window. All mouse events related to this window will be registered
    • onMouse - Name of the callback function. Whenever mouse events related to the above window occur, this callback function will be called. This function should have the signature like the following
      • void FunctionName(int event, int x, int y, int flags, void* userdata)
        • event - Type of the mouse event. These are the entire list of mouse events
          • EVENT_MOUSEMOVE
          • EVENT_LBUTTONDOWN
          • EVENT_RBUTTONDOWN
          • EVENT_MBUTTONDOWN
          • EVENT_LBUTTONUP
          • EVENT_RBUTTONUP
          • EVENT_MBUTTONUP
          • EVENT_LBUTTONDBLCLK
          • EVENT_RBUTTONDBLCLK
          • EVENT_MBUTTONDBLCLK
        • x - x coordinate of the mouse event
        • y - y coordinate of the mouse event
        • flags - Specific condition whenever a mouse event occurs. See the next OpenCV example code for the usage of this parameter. Here is the entire list of enum values which will be possesed by "flags"
          • EVENT_FLAG_LBUTTON
          • EVENT_FLAG_RBUTTON
          • EVENT_FLAG_MBUTTON
          • EVENT_FLAG_CTRLKEY
          • EVENT_FLAG_SHIFTKEY
          • EVENT_FLAG_ALTKEY
        • userdata - Any pointer passes to the "setMouseCallback" function as the 3rd parameter (see below)
    • userdata - This pointer will be passed to the callback function
This program takes care of mouse button press and mouse movements.

This may be helpful while you want to select a region on an image and perform certain task in that region only.

Example:

-----------
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

using namespace std;
using namespace cv;

void CallBackFunc(int event, int x, int y, int flags, void* userdata)
{
    if  ( event == EVENT_LBUTTONDOWN )
    {
        cout << "Left button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
    }
    else if  ( event == EVENT_RBUTTONDOWN )
    {
        cout << "Right button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
    }
    else if  ( event == EVENT_MBUTTONDOWN )
    {
        cout << "Middle button of the mouse is clicked - position (" << x << ", " << y << ")" << endl;
    }
    else if ( event == EVENT_MOUSEMOVE )
    {
        cout << "Mouse move over the window - position (" << x << ", " << y << ")" << endl;
    }    
}

int main()
{
    // Read image from file 
    Mat img = imread("lena.JPG");

    //if fail to read the image
    if ( img.empty() ) 
    { 
        cout << "Error loading the image" << endl;
        return -1; 
    }

    //Create a window
    namedWindow("ImageDisplay", 1);

    //set the callback function for any mouse event
    setMouseCallback("ImageDisplay", CallBackFunc, NULL);

    //show the image
    imshow("ImageDisplay", img);

    // Wait until user press some key
    waitKey(0);

    return 0;

}
-----------

Example 2:

This code will detect left mouse clicks while pressing the "CTRL" key , right mouse clicks while pressing the "SHIFT" key and movements of the mouse over the OpenCV window while pressing the "ALT" key.
------------
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

using namespace std;
using namespace cv;

void CallBackFunc(int event, int x, int y, int flags, void* userdata)
{
    if ( flags == (EVENT_FLAG_CTRLKEY + EVENT_FLAG_LBUTTON) )
    {
        cout << "Left mouse button is clicked while pressing CTRL key - position (" << x << ", " << y << ")" << endl;
    }
    else if ( flags == (EVENT_FLAG_RBUTTON + EVENT_FLAG_SHIFTKEY) )
    {
        cout << "Right mouse button is clicked while pressing SHIFT key - position (" << x << ", " << y << ")" << endl;
    }
    else if ( event == EVENT_MOUSEMOVE && flags == EVENT_FLAG_ALTKEY)
    {
        cout << "Mouse is moved over the window while pressing ALT key - position (" << x << ", " << y << ")" << endl;
    }
}

int main(int argc, char** argv)
{
    // Read image from file 
    Mat img = imread("lena.JPG");

    //if fail to read the image
    if ( img.empty() ) 
    { 
        cout << "Error loading the image" << endl;
        return -1; 
    }

    //Create a window
    namedWindow("My Window", 1);

    //set the callback function for any mouse event
    setMouseCallback("My Window", CallBackFunc, NULL);

    //show the image
    imshow("My Window", img);

    // Wait until user press some key
    waitKey(0);

    return 0;
}
------------

Example 3: 

This code will extract color information at a pixel location in an image.
------------
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

using namespace std;
using namespace cv;

void mouseEvent(int evt, int x, int y, int flags, void* param) 
{                    
    Mat* rgb = (Mat*) param;
    if (evt == CV_EVENT_LBUTTONDOWN) 
    { 
        printf("%d %d: %d, %d, %d\n", 
        x, y, 
        (int)(*rgb).at<Vec3b>(y, x)[0], 
        (int)(*rgb).at<Vec3b>(y, x)[1], 
        (int)(*rgb).at<Vec3b>(y, x)[2]); 
    }         
}

int main(int argc, char** argv)
{
    // Read image from file
    Mat img = imread("lena.JPG");

    //if fail to read the image
    if ( img.empty() )
    {
        cout << "Error loading the image" << endl;
        return -1;
    }

    //Create a window
    namedWindow("My Window", 1);

    //set the callback function for any mouse event
    setMouseCallback("My Window", mouseEvent, &img);

    //show the image
    imshow("My Window", img);

    // Wait until user press some key
    waitKey(0);

    return 0;
} 
------------

Example 4:

------------
#include "opencv2/highgui/highgui.hpp"
#include <iostream>

using namespace std;
using namespace cv;

void on_mouse( int e, int x, int y, int d, void *ptr )
{
    Point*p = (Point*)ptr;
    p->x = x;
    p->y = y;
    cout<<*p;
}

int main(int argc, char** argv)
{
    // Read image from file
    Mat img = imread("lena.JPG");

    //if fail to read the image
    if ( img.empty() )
    {
        cout << "Error loading the image" << endl;
        return -1;
    }

    //Create a window
    namedWindow("My Window", 1);

    //set the callback function for any mouse event
    Point p;
    setMouseCallback("My Window", on_mouse, &p );

    //show the image
    imshow("My Window", img);
    
    // Wait until user press some key
    waitKey(0);
    return 0;
}
------------
Sources:
http://opencv-srf.blogspot.in/2011/11/mouse-events.html
http://stackoverflow.com/questions/14874449/opencv-set-mouse-callback
http://stackoverflow.com/questions/15570431/opencv-return-value-from-mouse-callback-function

Resize image

void resize(InputArray src, OutputArray dst, Size dsize, double fx=0, double fy=0, int interpolation=INTER_LINEAR )
       - The function resize resizes the image src down to or up to the specified size.
Parameters:
  • src – input image.
  • dst – output image; it has the size dsize (when it is non-zero) or the size computed from src.size(), fx, and fy; the type of dst is the same as of src.
  • dsize – output image size; if it equals zero, it is computed as:
    \texttt{dsize = Size(round(fx*src.cols), round(fy*src.rows))}
    Either dsize or both fx and fy must be non-zero.
  • fx – scale factor along the horizontal axis; when it equals 0, it is computed as
    \texttt{(double)dsize.width/src.cols}
  • fy – scale factor along the vertical axis; when it equals 0, it is computed as
    \texttt{(double)dsize.height/src.rows}
  • interpolation – interpolation method:
    • INTER_NEAREST - a nearest-neighbor interpolation
    • INTER_LINEAR - a bilinear interpolation (used by default)
    • INTER_AREA - resampling using pixel area relation. It may be a preferred method for image decimation, as it gives moire’-free results. But when the image is zoomed, it is similar to the INTER_NEAREST method.
    • INTER_CUBIC - a bicubic interpolation over 4x4 pixel neighborhood
    • INTER_LANCZOS4 - a Lanczos interpolation over 8x8 pixel neighborhood
Example:
resize(src, dst, dst.size(), 0, 0, interpolation_method);


Bitwise AND, OR, XOR and NOT

void bitwise_and(InputArray src1, InputArray src2, OutputArray dst, InputArray mask=noArray())
Calculates the per-element bit-wise conjunction of two arrays or an array and a scalar.
Parameters:
  • src1 – first input array or a scalar.
  • src2 – second input array or a scalar.
  • src – single input array.
  • value – scalar value.
  • dst – output array that has the same size and type as the input arrays.
  • mask – optional operation mask, 8-bit single channel array, that specifies elements of the output array to be changed.
The function calculates the per-element bit-wise logical conjunction for:
  • Two arrays when src1 and src2 have the same size:
    \texttt{dst} (I) =  \texttt{src1} (I)  \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0
  • An array and a scalar when src2 is constructed from Scalar or has the same number of elements as src1.channels():
    \texttt{dst} (I) =  \texttt{src1} (I)  \wedge \texttt{src2} \quad \texttt{if mask} (I) \ne0
  • A scalar and an array when src1 is constructed from Scalar or has the same number of elements as src2.channels():
    \texttt{dst} (I) =  \texttt{src1}  \wedge \texttt{src2} (I) \quad \texttt{if mask} (I) \ne0
void bitwise_not(InputArray src, OutputArray dst, InputArray mask=noArray())
Inverts every bit of an array.
void bitwise_or(InputArray src1, InputArray src2, OutputArray dst, InputArray mask=noArray())
Calculates the per-element bit-wise disjunction of two arrays or an array and a scalar.
void bitwise_xor(InputArray src1, InputArray src2, OutputArray dst, InputArray mask=noArray())
Calculates the per-element bit-wise “exclusive or” operation on two arrays or an array and a scalar.

Example:

-----------
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

using namespace cv;
using namespace std;

int main(  )
{
    Mat drawing1 = Mat::zeros( Size(400,200), CV_8UC1 );
    Mat drawing2 = Mat::zeros( Size(400,200), CV_8UC1 );

    drawing1(Range(0,drawing1.rows),Range(0,drawing1.cols/2))=255; imshow("drawing1",drawing1);
    drawing2(Range(100,150),Range(150,350))=255; imshow("drawing2",drawing2);

    Mat res;
    bitwise_and(drawing1,drawing2,res);     imshow("AND",res);
    bitwise_or(drawing1,drawing2,res);      imshow("OR",res);
    bitwise_xor(drawing1,drawing2,res);     imshow("XOR",res);
    bitwise_not(drawing1,res);              imshow("NOT",res);


    waitKey(0);
    return(0);
}
-----------

 Result:


Histogram Calculation

void calcHist(const Mat* images, int nimages, const int* channels, InputArray mask, OutputArray hist, int dims, const int* histSize, const float** ranges, bool uniform=true, bool accumulate=false )
Calculates a histogram of a set of arrays.
Parameters:
  • images – Source arrays. They all should have the same depth, CV_8U or CV_32F , and the same size. Each of them can have an arbitrary number of channels.
  • nimages – Number of source images.
  • channels – List of the dims channels used to compute the histogram. The first array channels are numerated from 0 to images[0].channels()-1 , the second array channels are counted from images[0].channels() to images[0].channels() + images[1].channels()-1, and so on.
  • mask – Optional mask. If the matrix is not empty, it must be an 8-bit array of the same size as images[i] . The non-zero mask elements mark the array elements counted in the histogram.
  • hist – Output histogram, which is a dense or sparse dims -dimensional array.
  • dims – Histogram dimensionality that must be positive and not greater than CV_MAX_DIMS (equal to 32 in the current OpenCV version).
  • histSize – Array of histogram sizes in each dimension.
  • ranges – Array of the dims arrays of the histogram bin boundaries in each dimension. When the histogram is uniform ( uniform =true), then for each dimension i it is enough to specify the lower (inclusive) boundary L_0 of the 0-th histogram bin and the upper (exclusive) boundary U_{\texttt{histSize}[i]-1} for the last histogram bin histSize[i]-1 . That is, in case of a uniform histogram each of ranges[i] is an array of 2 elements. When the histogram is not uniform ( uniform=false ), then each of ranges[i] contains histSize[i]+1 elements: L_0, U_0=L_1, U_1=L_2, ..., U_{\texttt{histSize[i]}-2}=L_{\texttt{histSize[i]}-1}, U_{\texttt{histSize[i]}-1} . The array elements, that are not between L_0 and U_{\texttt{histSize[i]}-1} , are not counted in the histogram.
  • uniform – Flag indicating whether the histogram is uniform or not (see above).
  • accumulate – Accumulation flag. If it is set, the histogram is not cleared in the beginning when it is allocated. This feature enables you to compute a single histogram from several sets of arrays, or to update the histogram in time.
void normalize(InputArray src, OutputArray dst, double alpha=1, double beta=0, int norm_type=NORM_L2, int dtype=-1, InputArray mask=noArray() )
                           (or)
void normalize(const SparseMat& src, SparseMat& dst, double alpha, int normType)
Normalizes the norm or value range of an array.
 Parameters:
  • src – input array.
  • dst – output array of the same size as src .
  • alpha – norm value to normalize to or the lower range boundary in case of the range normalization.
  • beta – upper range boundary in case of the range normalization; it is not used for the norm normalization.
  • normType – normalization type (NORM_MINMAX, NORM_INF, NORM_L1, or NORM_L2).
  • dtype – when negative, the output array has the same type as src; otherwise, it has the same number of channels as src and the depth =CV_MAT_DEPTH(dtype).
  • mask – optional operation mask.
The functions normalize scale and shift the input array elements so that
                        
(where p=Inf, 1 or 2) when normType=NORM_INF, NORM_L1, or NORM_L2, respectively; or so that
                        \min _I  \texttt{dst} (I)= \texttt{alpha} , \, \, \max _I  \texttt{dst} (I)= \texttt{beta}
when normType=NORM_MINMAX (for dense arrays only). The optional mask specifies a sub-array to be normalized. This means that the norm or min-n-max are calculated over the sub-array, and then this sub-array is modified to be normalized.

Example:

Find some more examples in OpenCV documentation. (example 1, example 2)
-----------
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>

using namespace std;
using namespace cv;

int main(int, char**)
{
    Mat gray=imread("image.jpg",0);
    namedWindow( "Gray", 1 );    imshow( "Gray", gray );

    // Initialize parameters
    int histSize = 256;    // bin size
    float range[] = { 0, 255 };
    const float *ranges[] = { range };

    // Calculate histogram
    MatND hist;
    calcHist( &gray, 1, 0, Mat(), hist, 1, &histSize, ranges, true, false );
    
    // Show the calculated histogram in command window
    double total;
    total = gray.rows * gray.cols;
    for( int h = 0; h < histSize; h++ )
         {
            float binVal = hist.at<float>(h);
            cout<<" "<<binVal;
         }

    // Plot the histogram
    int hist_w = 512; int hist_h = 400;
    int bin_w = cvRound( (double) hist_w/histSize );

    Mat histImage( hist_h, hist_w, CV_8UC1, Scalar( 0,0,0) );
    normalize(hist, hist, 0, histImage.rows, NORM_MINMAX, -1, Mat() );
    
    for( int i = 1; i < histSize; i++ )
    {
      line( histImage, Point( bin_w*(i-1), hist_h - cvRound(hist.at<float>(i-1)) ) ,
                       Point( bin_w*(i), hist_h - cvRound(hist.at<float>(i)) ),
                       Scalar( 255, 0, 0), 2, 8, 0  );
    }

    namedWindow( "Result", 1 );    imshow( "Result", histImage );

    waitKey(0);    
    return 0;
}
-----------

Result:


Split and Merge functions

void split(InputArray m, OutputArrayOfArrays mv)
Divides a multi-channel array into several single-channel arrays.
Parameters:
  • src – input multi-channel array.
  • mv – output array or vector of arrays; in the first variant of the function the number of arrays must match src.channels(); the arrays themselves are reallocated, if needed.
void merge(InputArrayOfArrays mv, OutputArray dst)
Creates one multichannel array out of several single-channel ones.
Parameters:
  • mv – input array or vector of matrices to be merged; all the matrices in mv must have the same size and the same depth.
  • dst – output array of the same size and the same depth as mv[0]; The number of channels will be the total number of channels in the matrix array.

Example:

-----------
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>

using namespace std;
using namespace cv;

int main()
{
    Mat src=imread("image.jpg",1);    
    namedWindow("src",1);imshow("src",src);

    // Split the image into different channels
    vector<Mat> rgbChannels(3);
    split(src, rgbChannels);

    // Show individual channels
    Mat g, fin_img;
    g = Mat::zeros(Size(src.cols, src.rows), CV_8UC1);
    
    // Showing Red Channel
    // G and B channels are kept as zero matrix for visual perception
    {
    vector<Mat> channels;
    channels.push_back(g);
    channels.push_back(g);
    channels.push_back(rgbChannels[2]);

    /// Merge the three channels
    merge(channels, fin_img);
    namedWindow("R",1);imshow("R", fin_img);
    }

    // Showing Green Channel
    {
    vector<Mat> channels;
    channels.push_back(g);
    channels.push_back(rgbChannels[1]);
    channels.push_back(g);    
    merge(channels, fin_img);
    namedWindow("G",1);imshow("G", fin_img);
    }

    // Showing Blue Channel
    {
    vector<Mat> channels;
    channels.push_back(rgbChannels[0]);
    channels.push_back(g);
    channels.push_back(g);
    merge(channels, fin_img);
    namedWindow("B",1);imshow("B", fin_img);
    }

    waitKey(0);
    return 0;
}

-----------

Basic drawing examples

Drawing a line

void line(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
 Parameters:
  • img – Image.
  • pt1 – First point of the line segment.
  • pt2 – Second point of the line segment.
  • color – Line color.
  • thickness – Line thickness.
  • lineType – Type of the line:
    • 8 (or omitted) - 8-connected line.
    • 4 - 4-connected line.
    • CV_AA - antialiased line.
  • shift – Number of fractional bits in the point coordinates.

Example 1: Drawing a line

-------------
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;

int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
  
  // Draw a line 
  line( image, Point( 15, 20 ), Point( 70, 50), Scalar( 110, 220, 0 ),  2, 8 );
  imshow("Image",image);

  waitKey( 0 );
  return(0);
}
-------------

Drawing a Circle

void circle(Mat& img, Point center, int radius, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
Parameters:
  • img – Image where the circle is drawn.
  • center – Center of the circle.
  • radius – Radius of the circle.
  • color – Circle color.
  • thickness – Thickness of the circle outline, if positive. Negative thickness means that a filled circle is to be drawn.
  • lineType – Type of the circle boundary. See the line() description.
  • shift – Number of fractional bits in the coordinates of the center and in the radius value.
The function circle draws a simple or filled circle with a given center and radius.

Example 2: Drawing a Circle

-------------
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;

int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
  
  // Draw a circle 
  circle( image, Point( 200, 200 ), 32.0, Scalar( 0, 0, 255 ), 1, 8 );
  imshow("Image",image);

  waitKey( 0 );
  return(0);
}
-------------

Drawing an Ellipse

void ellipse(Mat& img, Point center, Size axes, double angle, double startAngle, double endAngle, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
Parameters:
  • img – Image.
  • center – Center of the ellipse.
  • axes – Length of the ellipse axes.
  • angle – Ellipse rotation angle in degrees.
  • startAngle – Starting angle of the elliptic arc in degrees.
  • endAngle – Ending angle of the elliptic arc in degrees.
  • box – Alternative ellipse representation via RotatedRect or CvBox2D. This means that the function draws an ellipse inscribed in the rotated rectangle.
  • color – Ellipse color.
  • thickness – Thickness of the ellipse arc outline, if positive. Otherwise, this indicates that a filled ellipse sector is to be drawn.
  • lineType – Type of the ellipse boundary. See the line() description.
  • shift – Number of fractional bits in the coordinates of the center and values of axes.
The functions ellipse with less parameters draw an ellipse outline, a filled ellipse, an elliptic arc, or a filled ellipse sector. A piecewise-linear curve is used to approximate the elliptic arc boundary.

If you use the first variant of the function and want to draw the whole ellipse, not an arc, pass startAngle=0 and endAngle=360.

Example 3: Drawing an Ellipse

-------------
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;

int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
  
  // Draw a ellipse 
  ellipse( image, Point( 200, 200 ), Size( 100.0, 160.0 ), 45, 0, 360, Scalar( 255, 0, 0 ), 1, 8 );
  ellipse( image, Point( 200, 200 ), Size( 100.0, 160.0 ), 135, 0, 360, Scalar( 255, 0, 0 ), 10, 8 );
  ellipse( image, Point( 200, 200 ), Size( 150.0, 50.0 ), 135, 0, 360, Scalar( 0, 255, 0 ), 1, 8 );
  imshow("Image",image);

  waitKey( 0 );
  return(0);
}
-------------

Drawing a Rectangle

void rectangle(Mat& img, Point pt1, Point pt2, const Scalar& color, int thickness=1, int lineType=8, int shift=0)
Parameters:
  • img – Image.
  • pt1 – Vertex of the rectangle.
  • pt2 – Vertex of the rectangle opposite to pt1 .
  • rec – Alternative specification of the drawn rectangle.
  • colorRectangle color or brightness (grayscale image).
  • thickness – Thickness of lines that make up the rectangle. Negative values, like CV_FILLED , mean that the function has to draw a filled rectangle.
  • lineType – Type of the line. See the line() description.
  • shift – Number of fractional bits in the point coordinates.

Example 4: Drawing a Rectangle

-------------
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;

int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
  
  // Draw a rectangle ( 5th argument is not -ve)
  rectangle( image, Point( 15, 20 ), Point( 70, 50), Scalar( 0, 55, 255 ), +1, 4 );
  imshow("Image1",image);
  // Draw a filled rectangle ( 5th argument is -ve)
  rectangle( image, Point( 115, 120 ), Point( 170, 150), Scalar( 100, 155, 25 ), -1, 8 );
  imshow("Image2",image);

  waitKey( 0 );
  return(0);
}
-------------

Drawing a Filled Polygon

void fillPoly(Mat& img, const Point** pts, const int* npts, int ncontours, const Scalar& color, int lineType=8, int shift=0, Point offset=Point() )
Parameters:
  • img – Image.
  • pts – Array of polygons where each polygon is represented as an array of points.
  • npts – Array of polygon vertex counters.
  • ncontours – Number of contours that bind the filled region.
  • color – Polygon color.
  • lineType – Type of the polygon boundaries. See the line() description.
  • shift – Number of fractional bits in the vertex coordinates.
  • offset – Optional offset of all points of the contours.
The function fillPoly fills an area bounded by several polygonal contours. The function can fill complex areas, for example, areas with holes, contours with self-intersections (some of their parts), and so forth.

Example 4: Drawing a Filled Polygon

-------------
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;

int main( )
{    
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
  
  int w=400;
  // Draw a circle 
  /** Create some points */
  Point rook_points[1][20];
  rook_points[0][0] = Point( w/4.0, 7*w/8.0 );
  rook_points[0][1] = Point( 3*w/4.0, 7*w/8.0 );
  rook_points[0][2] = Point( 3*w/4.0, 13*w/16.0 );
  rook_points[0][3] = Point( 11*w/16.0, 13*w/16.0 );
  rook_points[0][4] = Point( 19*w/32.0, 3*w/8.0 );
  rook_points[0][5] = Point( 3*w/4.0, 3*w/8.0 );
  rook_points[0][6] = Point( 3*w/4.0, w/8.0 );
  rook_points[0][7] = Point( 26*w/40.0, w/8.0 );
  rook_points[0][8] = Point( 26*w/40.0, w/4.0 );
  rook_points[0][9] = Point( 22*w/40.0, w/4.0 );
  rook_points[0][10] = Point( 22*w/40.0, w/8.0 );
  rook_points[0][11] = Point( 18*w/40.0, w/8.0 );
  rook_points[0][12] = Point( 18*w/40.0, w/4.0 );
  rook_points[0][13] = Point( 14*w/40.0, w/4.0 );
  rook_points[0][14] = Point( 14*w/40.0, w/8.0 );
  rook_points[0][15] = Point( w/4.0, w/8.0 );
  rook_points[0][16] = Point( w/4.0, 3*w/8.0 );
  rook_points[0][17] = Point( 13*w/32.0, 3*w/8.0 );
  rook_points[0][18] = Point( 5*w/16.0, 13*w/16.0 );
  rook_points[0][19] = Point( w/4.0, 13*w/16.0) ;

  const Point* ppt[1] = { rook_points[0] };
  int npt[] = { 20 };

  fillPoly( image, ppt, npt, 1, Scalar( 255, 255, 255 ), 8 );
  imshow("Image",image);

  waitKey( 0 );
  return(0);
}
-------------

Putting Text in image

putText renders the specified text string in the image.

void putText(Mat& img, const string& text, Point org, int fontFace, double fontScale, Scalar color, int thickness=1, int lineType=8, bool bottomLeftOrigin=false )
Parameters:
  • img – Image.
  • text – Text string to be drawn.
  • org – Bottom-left corner of the text string in the image.
  • fontFace – Font type. One of FONT_HERSHEY_SIMPLEX, FONT_HERSHEY_PLAIN, FONT_HERSHEY_DUPLEX, FONT_HERSHEY_COMPLEX, FONT_HERSHEY_TRIPLEX, FONT_HERSHEY_COMPLEX_SMALL, FONT_HERSHEY_SCRIPT_SIMPLEX, or FONT_HERSHEY_SCRIPT_COMPLEX, where each of the font ID’s can be combined with FONT_HERSHEY_ITALIC to get the slanted letters.
  • fontScale – Font scale factor that is multiplied by the font-specific base size.
  • color – Text color.
  • thickness – Thickness of the lines used to draw a text.
  • lineType – Line type. See the line for details.
  • bottomLeftOrigin – When true, the image data origin is at the bottom-left corner. Otherwise, it is at the top-left corner.

Example 5: Putting Text in image

-------------
#include <opencv2/core/core.hpp>
#include <opencv2/highgui/highgui.hpp>
using namespace cv;

int main( )
{ 
  // Create black empty images
  Mat image = Mat::zeros( 400, 400, CV_8UC3 );
  
  putText(image, "Hi all...", Point(50,100), FONT_HERSHEY_SIMPLEX, 1, Scalar(0,200,200), 4);
  imshow("Image",image);

  waitKey( 0 );
  return(0);
}
-------------

Source:
http://docs.opencv.org/modules/core/doc/drawing_functions.html?highlight=rectangle#void%20line%28Mat&%20img,%20Point%20pt1,%20Point%20pt2,%20const%20Scalar&%20color,%20int%20thickness,%20int%20lineType,%20int%20shift%29

Face Detection using Haar-Cascade Classifier

class CascadeClassifier - Cascade classifier class for object detection

CascadeClassifier::CascadeClassifier(const string& filename) // Constructor - Loads a classifier from a file
CascadeClassifier face_cascade( "C:/OpenCV243/data/Haarcascades/haarcascade_frontalface_alt2.xml" );

bool CascadeClassifier::empty() const // Checks whether the classifier has been loaded.bool CascadeClassifier::load(const string& filename) // Loads a classifier from a file
CascadeClassifier face_cascade;
face_cascade.load( "C:/OpenCV243/data/Haarcascades/haarcascade_frontalface_alt2.xml" ); 

bool CascadeClassifier::read(const FileNode& node) // Reads a classifier from a FileStorage node


void CascadeClassifier::detectMultiScale(const Mat& image, vector<Rect>& objects, double scaleFactor=1.1, int minNeighbors=3, int flags=0, Size minSize=Size(), Size maxSize=Size())
// Detects objects of different sizes in the input image.
// The detected objects are returned as a list of rectangles.

Parameters:   
  • cascade – Haar classifier cascade (OpenCV 1.x API only). It can be loaded from XML or YAML file using Load(). When the cascade is not needed anymore, release it using cvReleaseHaarClassifierCascade(&cascade).
  • image – Matrix of the type CV_8U containing an image where objects are detected.
  • objects – Vector of rectangles where each rectangle contains the detected object.
  • scaleFactor – Parameter specifying how much the image size is reduced at each image scale.
  • minNeighbors – Parameter specifying how many neighbors each candidate rectangle should have to retain it.
  • flags – Parameter with the same meaning for an old cascade as in the function cvHaarDetectObjects. It is not used for a new cascade.
  • minSize – Minimum possible object size. Objects smaller than that are ignored.
  • maxSize – Maximum possible object size. Objects larger than that are ignored.
bool CascadeClassifier::setImage(Ptr<FeatureEvaluator>& feval, const Mat& image)
// Sets an image for detection
Parameters:
  • cascade – Haar classifier cascade (OpenCV 1.x API only). See CascadeClassifier::detectMultiScale() for more information.
  • feval – Pointer to the feature evaluator used for computing features.
  • image – Matrix of the type CV_8UC1 containing an image where the features are computed
int CascadeClassifier::runAt(Ptr<FeatureEvaluator>& feval, Point pt, double& weight)
// Runs the detector at the specified point. The function returns 1 if the cascade classifier detects an object in the given location. Otherwise, it returns negated index of the stage at which the candidate has been rejected.
Parameters:
  • cascade – Haar classifier cascade (OpenCV 1.x API only). See CascadeClassifier::detectMultiScale() for more information.
  • feval – Feature evaluator used for computing features.
  • pt – Upper left point of the window where the features are computed. Size of the window is equal to the size of training images.

Steps:

  1. Read the image.
  2. Load Face cascade (CascadeClassifier > load)
  3. Detect faces (detectMultiScale)
  4. Draw circles on the detected faces (ellipse)
  5. Show the result.

Functions:


Example:

------------
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"

#include <iostream>
#include <stdio.h>

using namespace std;
using namespace cv;

int main( )
{
    Mat image;
    image = imread("lena.jpg", CV_LOAD_IMAGE_COLOR);  
    namedWindow( "window1", 1 );   imshow( "window1", image );

    // Load Face cascade (.xml file)
    CascadeClassifier face_cascade;
    face_cascade.load( "C:/OpenCV243/data/Haarcascades/haarcascade_frontalface_alt2.xml" );

    // Detect faces
    std::vector<Rect> faces;
    face_cascade.detectMultiScale( image, faces, 1.1, 2, 0|CV_HAAR_SCALE_IMAGE, Size(30, 30) );

    // Draw circles on the detected faces
    for( int i = 0; i < faces.size(); i++ )
    {
        Point center( faces[i].x + faces[i].width*0.5, faces[i].y + faces[i].height*0.5 );
        ellipse( image, center, Size( faces[i].width*0.5, faces[i].height*0.5), 0, 0, 360, Scalar( 255, 0, 255 ), 4, 8, 0 );
    }
    
    imshow( "Detected Face", image );
    
    waitKey(0);                   
    return 0;
}
------------

Capture Video from Camera

class VideoCapture - Class for video capturing from video files or cameras.

bool VideoCapture::open(const string& filename) // filename – name of the opened video file
bool VideoCapture::open(int device) // device – id of the opened video capturing device (i.e. a camera index)
bool VideoCapture::isOpened() // Returns true if video capturing has been initialized already
void VideoCapture::release() // Closes video file or capturing device
bool VideoCapture::grab() // Grabs the next frame from video file or capturing device
bool VideoCapture::retrieve(Mat& image, int channel=0) // Decodes and returns the grabbed video frame
The primary use of the function is in multi-camera environments, especially when the cameras do not have hardware synchronization. That is, you call VideoCapture::grab() for each camera and after that call the slower method VideoCapture::retrieve() to decode and get frame from each camera. This way the overhead on demosaicing or motion jpeg decompression etc. is eliminated and the retrieved frames from different cameras will be closer in time.
VideoCapture& VideoCapture::(operator)>>(Mat& image) // Grabs, decodes and returns the next video frame
double VideoCapture::get(int propId) // Returns the specified VideoCapture property
Parameters: 
       propId – Property identifier. It can be one of the following:
  • CV_CAP_PROP_POS_MSEC Current position of the video file in milliseconds or video capture timestamp.
  • CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.
  • CV_CAP_PROP_POS_AVI_RATIO Relative position of the video file: 0 - start of the film, 1 - end of the film.
  • CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
  • CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
  • CV_CAP_PROP_FPS Frame rate.
  • CV_CAP_PROP_FOURCC 4-character code of codec.
  • CV_CAP_PROP_FRAME_COUNT Number of frames in the video file.
  • CV_CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .
  • CV_CAP_PROP_MODE Backend-specific value indicating the current capture mode.
  • CV_CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).
  • CV_CAP_PROP_CONTRAST Contrast of the image (only for cameras).
  • CV_CAP_PROP_SATURATION Saturation of the image (only for cameras).
  • CV_CAP_PROP_HUE Hue of the image (only for cameras).
  • CV_CAP_PROP_GAIN Gain of the image (only for cameras).
  • CV_CAP_PROP_EXPOSURE Exposure (only for cameras).
  • CV_CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.
  • CV_CAP_PROP_WHITE_BALANCE Currently not supported
  • CV_CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)

bool VideoCapture::set(int propId, double value) // Sets a property in the VideoCapture

Parameters:
  • propId
    • CV_CAP_PROP_POS_MSEC Current position of the video file in milliseconds.
    • CV_CAP_PROP_POS_FRAMES 0-based index of the frame to be decoded/captured next.
    • CV_CAP_PROP_POS_AVI_RATIO Relative position of the video file: 0 - start of the film, 1 - end of the film.
    • CV_CAP_PROP_FRAME_WIDTH Width of the frames in the video stream.
    • CV_CAP_PROP_FRAME_HEIGHT Height of the frames in the video stream.
    • CV_CAP_PROP_FPS Frame rate.
    • CV_CAP_PROP_FOURCC 4-character code of codec.
    • CV_CAP_PROP_FRAME_COUNT Number of frames in the video file.
    • CV_CAP_PROP_FORMAT Format of the Mat objects returned by retrieve() .
    • CV_CAP_PROP_MODE Backend-specific value indicating the current capture mode.
    • CV_CAP_PROP_BRIGHTNESS Brightness of the image (only for cameras).
    • CV_CAP_PROP_CONTRAST Contrast of the image (only for cameras).
    • CV_CAP_PROP_SATURATION Saturation of the image (only for cameras).
    • CV_CAP_PROP_HUE Hue of the image (only for cameras).
    • CV_CAP_PROP_GAIN Gain of the image (only for cameras).
    • CV_CAP_PROP_EXPOSURE Exposure (only for cameras).
    • CV_CAP_PROP_CONVERT_RGB Boolean flags indicating whether images should be converted to RGB.
    • CV_CAP_PROP_WHITE_BALANCE Currently unsupported
    • CV_CAP_PROP_RECTIFICATION Rectification flag for stereo cameras (note: only supported by DC1394 v 2.x backend currently)
  • value – Value of the property.

Example 1:

-----------
#include "opencv2/opencv.hpp"
using namespace cv;

int main(int, char**)
{
    VideoCapture cap(0); // open the default camera
    if(!cap.isOpened())  // check if we succeeded
        return -1;

    namedWindow("Video",1);
    while(1)
    {
        Mat frame;
        cap >> frame;         // get a new frame from camera
        imshow("Video", frame);

        // Press 'c' to escape
        if(waitKey(30) == 'c') break;
    }
    return 0;
}
-----------

Example 2:

-----------
#include "opencv2/objdetect/objdetect.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
using namespace cv;

int main( int argc, const char** argv )
{
    CvCapture* capture;
    Mat frame;
    
    // Read the video stream
    capture = cvCaptureFromCAM( -1 );
    namedWindow("Video",CV_WINDOW_AUTOSIZE);
    if( capture )
    {
        while( true )
        {
            frame = cvQueryFrame( capture );
            imshow("Video",frame);

            // Press 'c' to escape
            int c = waitKey(10);
            if( (char)c == 'c' ) { break; }
        }
    }
    return 0;
}
-----------

Random number generator (Drawing colorful random lines)

class RNG - Random number generator.


RNG::next - Returns the next random number.
RNG::operator T - Returns the next random number of the specified type.
RNG::operator uchar()
RNG::operator ushort()
RNG::operator int()
RNG::operator unsigned int()
RNG::operator float()
RNG::operator double()
RNG::uniform - Returns the next random number sampled from the uniform distribution.
float RNG::uniform(float a, float b)
double RNG::uniform(double a, double b)
RNG::gaussian - Returns the next random number sampled from the Gaussian distribution.
double RNG::gaussian(double sigma)

RNG::fill - Fills arrays with random numbers.
void RNG::fill(InputOutputArray mat, int distType, InputArray a, InputArray b, bool saturateRange=false )


Example:
------------
RNG rng;

// always produces 0
double a = rng.uniform(0, 1);

// produces double from [0, 1)
double a1 = rng.uniform((double)0, (double)1);

// produces float from [0, 1)
double b = rng.uniform(0.f, 1.f);

// produces double from [0, 1)
double c = rng.uniform(0., 1.);

// may cause compiler error because of ambiguity:
//  RNG::uniform(0, (int)0.999999)? or RNG::uniform((double)0, 0.99999)?
double d = rng.uniform(0, 0.999999);
------------

The code provided below is slight modification of code given in OpenCV documentation.

Example 1: ( Drawing Colorful Random Lines )

-------------
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>

using namespace cv;
using namespace std;

void Drawing_Random_Lines( Mat image, char* window_name, RNG rng, int NumOfLines, int windowHeight, int windowWidth );
static Scalar randomColor( RNG& rng );

int main( )
{    
    int windowHeight = 480, windowWidth=640;
    Mat image = Mat::zeros( windowHeight, windowWidth, CV_8UC3 );
    namedWindow( "Source", CV_WINDOW_AUTOSIZE );
    int n=1;
    while(1)
    {
    RNG rng(n);
    Drawing_Random_Lines(image, "Source", rng, 5, windowHeight, windowWidth);    
    imshow( "Source", image );
    waitKey(100);
    n++;
    }

    return(0);
}

void Drawing_Random_Lines( Mat image, char* window_name, RNG rng, int NumOfLines, int windowHeight, int windowWidth )
{
    int lineType = 8;
    Point pt1, pt2;

    for( int i = 0; i < NumOfLines; i++ )
    {
        pt1.x = rng.uniform( 0, windowWidth );
        pt1.y = rng.uniform( 0, windowHeight );
        pt2.x = rng.uniform( 0, windowWidth );
        pt2.y = rng.uniform( 0, windowHeight );

        line( image, pt1, pt2, randomColor(rng), rng.uniform(1, 10), 8 );
        imshow( window_name, image );
    }
}

static Scalar randomColor( RNG& rng )
{
    int icolor = (unsigned) rng;
    return Scalar( icolor&255, (icolor>>8)&255, (icolor>>16)&255 );
}
-------------

Example 2: ( Drawing Colorful Random Lines )

-------------
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include <iostream>
#include <stdio.h>
#include <stdlib.h>

using namespace cv;
using namespace std;

void Drawing_Random_Lines( Mat image, char* window_name, RNG rng, int NumOfLines, int windowHeight, int windowWidth );
static Scalar randomColor( RNG& rng );

int main( )
{   
    int windowHeight = 480, windowWidth=640;
    Mat image = Mat::zeros( windowHeight, windowWidth, CV_8UC3 );
    namedWindow( "Source", CV_WINDOW_AUTOSIZE );
    int n=1;
    while(1)
    {
    RNG rng(n);
    Drawing_Random_Lines(image, "Source", rng, n, windowHeight, windowWidth);   
    imshow( "Source", image );
    waitKey(100);
    n++;
    }

    return(0);
}

void Drawing_Random_Lines( Mat image, char* window_name, RNG rng, int NumOfLines, int windowHeight, int windowWidth )
{
    int lineType = 8;
    Point pt1, pt2;

    for( int i = 0; i < NumOfLines; i++ )
    {
        pt1.x = rng.uniform( 0, windowWidth );
        pt1.y = rng.uniform( 0, windowHeight );
        pt2.x = rng.uniform( 0, windowWidth );
        pt2.y = rng.uniform( 0, windowHeight );

        line( image, pt1, pt2, randomColor(rng), rng.uniform(1, 10), 8 );
        imshow( window_name, image );
    }
}

static Scalar randomColor( RNG& rng )
{
    int icolor = (unsigned) rng;
    return Scalar( icolor&255, (icolor>>8)&255, (icolor>>16)&255 );
}

-------------

Result:


Adding a Trackbar

int createTrackbar(const string& trackbarname, const string& winname, int* value, int count, TrackbarCallback onChange=0, void* userdata=0)

Parameters:
  • trackbarname – Name of the created trackbar.
  • winname – Name of the window that will be used as a parent of the created trackbar.
  • value – Optional pointer to an integer variable whose value reflects the position of the slider. Upon creation, the slider position is defined by this variable.
  • count – Maximal position of the slider. The minimal position is always 0.
  • onChange – Pointer to the function to be called every time the slider changes position. This function should be prototyped as void Foo(int,void*); , where the first parameter is the trackbar position and the second parameter is the user data (see the next parameter). If the callback is the NULL pointer, no callbacks are called, but only value is updated.
  • userdata – User data that is passed as is to the callback. It can be used to handle trackbar events without using global variables.
The code provided below is slight modification of code given in OpenCV documentation.

Example:

-------------
#include <opencv/cv.h>
#include <opencv2/highgui/highgui.hpp>

using namespace cv;

/// Global Variables
const int alpha_slider_max = 100;
int alpha_slider;
Mat src1,src2,dst;

void on_trackbar( int, void* )
{
 double alpha;
 double beta;

 alpha = (double) alpha_slider/alpha_slider_max ;
 beta = ( 1.0 - alpha );

 addWeighted( src1, alpha, src2, beta, 0.0, dst);

 imshow( "Linear Blend", dst );
}

int main( )
{
 // Read image ( same size, same type )
 src1 = imread("image2.jpg",0);
 src2 = imread("image1.jpg",0);

 // Create Windows
 namedWindow("Linear Blend", 1);

 // Initialize values
 alpha_slider = 0;
 createTrackbar( "Alpha ", "Linear Blend", &alpha_slider,  alpha_slider_max, on_trackbar );

 // Initialize trackbar
 on_trackbar( 0, 0 );

 waitKey(0);
 return 0;
}
-------------

Creating matrix in different ways


Example:

----------------------------------
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int main( )
{

    Mat src1;
    src1 = imread("lena.jpg", CV_LOAD_IMAGE_COLOR); 
    namedWindow( "Original image", CV_WINDOW_AUTOSIZE ); 
    imshow( "Original image", src1 ); 

    Mat gray;
    cvtColor(src1, gray, CV_BGR2GRAY);
    namedWindow( "Result window", CV_WINDOW_AUTOSIZE ); 
    imshow( "Result window", gray );

    // Use the copy constructor
    Mat src2(src1);
    namedWindow( "window2", CV_WINDOW_AUTOSIZE ); 
    imshow( "window2", src2 ); 

    // Assignment operator
    Mat src3;
    src3 = src2; 
    namedWindow( "window3", CV_WINDOW_AUTOSIZE ); 
    imshow( "window3", src3 ); 

    // Selecting a region of interest using a rectangle 
    Mat src4 (src1, Rect(50, 50, 150, 150) ); 
    namedWindow( "window4", CV_WINDOW_AUTOSIZE ); 
    imshow( "window4", src4 ); 
    // Another way of doing same
    Rect r(10, 10, 100, 100);
    Mat src41 = src1(r);
    namedWindow( "window4.1", CV_WINDOW_AUTOSIZE ); 
    imshow( "window4.1", src41 ); 

    // clone() function
    Mat src5 = src1.clone();
    namedWindow( "window5", CV_WINDOW_AUTOSIZE ); 
    imshow( "window5", src5 ); 

    // copyTo() function
    Mat src6;
    src1.copyTo(src6);
    namedWindow( "window6", CV_WINDOW_AUTOSIZE ); 
    imshow( "window6", src6 ); 

    // Using Range()
    Mat P;
    src6.rowRange(1,2).copyTo(P);
    cout << "P = " << endl << " " << P(Range::all(),Range::all()) << endl << endl ;

    // Range::all() - “the whole sequence”
    // Range(a,b) - "[start , end]
    // Copy {row : 0 to 4} and {col : 5 to 9} to matrix Q
    Mat Q; Q = gray(Range(0,5),Range(5,10));
    cout<< Q << endl << endl;

    //make a black image from an existing image
    src1 = Scalar(0);
    namedWindow( "window7", CV_WINDOW_AUTOSIZE ); 
    imshow( "window7", src1 ); 

    // Create a header for an already existing IplImage pointer
    IplImage* img = cvLoadImage("lena.jpg", 1);
    Mat mtx(img); // convert IplImage* -> Mat
    namedWindow( "window8", CV_WINDOW_AUTOSIZE ); 
    imshow( "window8", mtx );

    // Mat() Constructor
    Mat M(2,2, CV_8UC3, Scalar(0,0,255));
    cout << "M = " << endl << " " << M << endl << endl;

    // Use C\C++ arrays and initialize via constructor
    // Create a matrix with more than two dimensions
    int sz[3] = {4,2,3};
    Mat L(3,sz, CV_8UC(1), Scalar::all(0));

    // Create() function
    Mat N;
    N.create(4,4, CV_8UC(2));
    cout << "N = "<< endl << " " << N << endl << endl;

    // initializer: zeros(), ones(), :eyes(). Specify size and data type to use
    Mat E = Mat::eye(4, 4, CV_64F);
    cout << "E = " << endl << " " << E << endl << endl;

    Mat O = Mat::ones(2, 2, CV_32F);
    cout << "O = " << endl << " " << O << endl << endl;

    Mat Z = Mat::zeros(3,3, CV_8UC1);
    cout << "Z = " << endl << " " << Z << endl << endl;

    // For small matrices you may use comma separated initializers:
    Mat C = (Mat_<double>(3,3) << 0, -1, 0, -1, 5, -1, 0, -1, 0);
    cout << "C = " << endl << " " << C << endl << endl;

    Mat RowClone = C.row(1).clone();
    cout << "RowClone = " << endl << " " << RowClone << endl << endl;

    // Fill out a matrix with random values using the randu() function. 
    // You need to give the lower and upper value for the random values:
    Mat R = Mat(3, 2, CV_8UC3);
    randu(R, Scalar::all(0), Scalar::all(255));
    cout << "R = " << endl << " " << R << endl << endl; 

    // allocates a 30x40 floating-point matrix
    Mat A(30, 40, DataType<float>::type);

    Mat B = Mat_<std::complex<double> >(3, 3);
    // the statement below will print 6, 2 /*, that is depth == CV_64F, channels == 2 */
    cout << B.depth() << ", " << B.channels() << endl;
    
    waitKey(0);
    return 0;
}
----------------------------------

Change image type : Convert 8U to 32F or 64F

void Mat::convertTo(OutputArray m, int rtype, double alpha=1, double beta=0 )

Parameters:
  • m – output matrix; if it does not have a proper size or type before the operation, it is reallocated.
  • rtype – desired output matrix type or, rather, the depth since the number of channels are the same as the input has; if rtype is negative, the output matrix will have the same type as the input.
  • alpha – optional scale factor.
  • beta – optional delta added to the scaled values.
saturate_cast<> is applied at the end to avoid possible overflows:
m(x,y) = saturate \_ cast<rType>( \alpha (*this)(x,y) +  \beta )

Steps:

  1. Load an image 
  2. Convert image type
  3. Show result

Functions:

Example:

-------------
#include "opencv2/core/core.hpp"
#include "opencv2/highgui/highgui.hpp"
#include "opencv2/imgproc/imgproc.hpp"
#include "iostream"

using namespace cv;
using namespace std;

int main( )
{

    Mat src1;
    src1 = imread("lena.jpg", CV_LOAD_IMAGE_COLOR); 
    namedWindow( "Original image", CV_WINDOW_AUTOSIZE ); 
    imshow( "Original image", src1 ); 
    Mat dst,dst1;
    // Change image type from 8UC1 to 32FC1
    src1.convertTo(dst, CV_32F);

    cout << "src1 intensity = " << (int) src1.at<Vec3b>(2,5)[0] << endl << "dst intensity = " << dst.at<Vec3f>(2,5)[0] <<endl << endl;
    // both intensity are same, but one is 8U and other is 32F

    dst.convertTo(dst1,CV_8U,1.5,10); // converting back to 8U with scaling
    namedWindow( "dst1", CV_WINDOW_AUTOSIZE );  
    imshow( "dst1", dst1 );
    
    waitKey(0);                                         
    return 0;
}
--------------