Saturday 26 July 2014

Multimedia Development Assignment 2

 Assignment 1

In this assignment, I have gain the knowledge from the three question. Which is
1. Apply a combination of neighborhood operation and point operation to an image
2. Read 2 image in jpg/png, be avble to perform a merging of two image to form  new image of character which is merge an image to an image with green screen.
3. Create application that can be able to find the place where the sub image was taken from the first image by comparing the value to detect the correct positioning .


4.you will apply a combination of neighborhood operation and  point operation to an image. You will need to create appropriate GUI for loading images and applying above filters. will read a host image in jpg/png, be able to perform a cartoon/sketches like ( black  ) output  based on several  combination of image filtering processes.


 Output Of GrayScale - Neighborhood Operation 


Output Of Sketch & Edge Detection - Neighborhood Operation 





Figure above shoes the output of the images of neighborhood operation with an on-click function.
In this question i've do a calculation by multiply the matrix of sharpen's and edge detection's masks. The matrix that i finally calculated with a matrix masks value of {-2,-2,0},{-2,8.25,2},{-2,-2,0}. This is the calculated masks by combination of the image processing edge detection and sharpen.

GUI Code : 


import javax.imageio.ImageIO;
import javax.swing.*;
import java.awt.*;
import java.awt.event.*;
import java.awt.image.*;
import java.awt.image.BufferedImage;
import java.io.*;
import java.io.File;
import java.io.IOException;
import java.awt.geom.*;

public class GUI extends JFrame{

 public JButton opt1,opt2,opt3,opt4;

    public GUI() {

        Container k = getContentPane();
        k.setLayout(new FlowLayout());
        opt1 = new JButton("Gray Scale");
        opt2 = new JButton("Black & White");
        opt3 = new JButton("Edge Detection");
        opt4 = new JButton("Sketch");


        k.add(opt1);
        k.add(opt2);
        k.add(opt3);
        k.add(opt4);

        opt1.addActionListener(new ActionListener(){
            public void actionPerformed(ActionEvent e){

            Frame p1 = new grayScale();
            p1.setVisible(true);
            p1.setSize(500,500);

            }
        });

        opt2.addActionListener(new ActionListener(){
            public void actionPerformed(ActionEvent e){

            Frame p1 = new blackWhite();
            p1.setVisible(true);
            p1.setSize(500,500);

            }
        });

        opt3.addActionListener(new ActionListener(){
            public void actionPerformed(ActionEvent e){

            Frame p1 = new edgeDetectInvert();
            p1.setVisible(true);
            p1.setSize(500,500);

            }
        });

        opt4.addActionListener(new ActionListener(){
            public void actionPerformed(ActionEvent e){

            Frame p1 = new sketch();
            p1.setVisible(true);
            p1.setSize(500,500);

            }
        });

    }


    public static void main(String[] args){

        GUI p = new GUI();
        p.setVisible(true);
        p.setSize(100,100);

        }

}


Sketch =

import javax.imageio.ImageIO;
import javax.swing.*;
import java.awt.*;
import java.awt.event.*;
import java.awt.image.*;
import java.awt.image.BufferedImage;
import java.io.*;
import java.io.File;
import java.io.IOException;
import java.awt.geom.*;

public class sketch extends Frame{

public BufferedImage im1,crop1,crop2,crop3;
public int w,h;
//declare and initialize edge detection filter
float[] edgeKernel = {  0.0f,  -1.0f,  0.0f,
                         -1.0f,  4.0f,  -1.0f,
                         0.0f,  -1.0f,  0.0f};


    public sketch() {
            try{
        im1 = ImageIO.read(new File("success.jpg"));
        w = im1.getWidth();
        h = im1.getHeight();
        crop1 = new BufferedImage(w,h,BufferedImage.TYPE_INT_RGB);
        crop3 = new BufferedImage(w,h,BufferedImage.TYPE_INT_RGB);

        for(int y=0; y<h ; y++){
            for(int x=0; x<w ; x++){

                //to read the RGB value from each of the pixels available in the original image.
                int p = im1.getRGB(x,y);
                int a = (p>>24)&0xff;
                int r = (p>>16)&0xff;
                int g = (p>>8)&0xff;
                int b = p&0xff;

                //converting into grayscale reading
                int avg = (r+g+b)/3;

                //converting to black and white algorithm
                if(avg <= 128){
                    r = 0;
                    g = 0;
                    b = 0;
                }else{
                    r = 255;
                    g = 255;
                    b = 255;
                }

                //manipulate the value of pixel and replace it with grayscale value
                p = (a<<24) | (r<<16) | (g<<8) | b;
                crop1.setRGB(x,y,p);
            }
        }

        //Edge detection algorithms
        Kernel kernel = new Kernel(3,3,edgeKernel);
        ConvolveOp cop = new ConvolveOp(kernel,ConvolveOp.EDGE_ZERO_FILL,null);
        crop2  = cop.filter(im1,null);

        //Invertion algorithms
        for(int y=0; y<h ; y++){
            for(int x=0; x<w; x++){

                int p = crop2.getRGB(x,y);
                int a = (p>>24)&0xff;
                int r = (p>>16)&0xff;
                int g = (p>>8)&0xff;
                int b = p&0xff;

                r = 255 - r;
                g = 255 - g;
                b = 255 - b;

                p = (a<<24) | (r<<16) | (g<<8) | b;
                crop3.setRGB(x,y,p);
            }
        }

        }catch(IOException e){
            e.printStackTrace();
        }
    }

    public void paint(Graphics g){
        g.drawImage(im1,10,10,this);
        g.drawImage(crop3,im1.getWidth(this)+10,10,this);

    }


}



GrayScale -
import javax.imageio.ImageIO;
import javax.swing.*;
import java.awt.*;
import java.awt.event.*;
import java.awt.image.*;
import java.awt.image.BufferedImage;
import java.io.*;
import java.io.File;
import java.io.IOException;
import java.awt.geom.*;

public class grayScale extends Frame{

        BufferedImage im1,crop1;
        int w,h;

    public grayScale(){

        try{
            //retrieve the file from directory and save it to image store created.
            im1=ImageIO.read(new File("success.jpg"));
            //find the width and height of the original image.
            w = im1.getWidth();
            h = im1.getHeight();
            //create a place to store processed image
            crop1 = new BufferedImage(w,h,BufferedImage.TYPE_INT_RGB);

        for(int y=0; y<h ; y++){
            for(int x=0; x<w ; x++){

                //to read the RGB value from each of the pixels available in the original image.
                int p = im1.getRGB(x,y);
                int a = (p>>24)&0xff;
                int r = (p>>16)&0xff;
                int g = (p>>8)&0xff;
                int b = p&0xff;

                //converting into grayscale reading
                int avg = (r+g+b)/3;

                //manipulate the value of pixel and replace it with grayscale value
                p = (a<<24) | (avg<<16) | (avg<<8) | avg;
                crop1.setRGB(x,y,p);
            }
        }

        }catch(IOException e){
            e.printStackTrace();
        }
    }

    //display the result
    public void paint(Graphics g){
            g.drawImage(im1,10,10,this);
            g.drawImage(crop1,im1.getWidth(this)+10,10,this);
        }
}

Part 2 : NOT DONE.


In part 3, you will be performing histogram equalization for both greyscale images and colour images.





For GrayScale image =



Does colour Equalizition give you the better result based on original image compared to greyscale ? Give reason for your answer. 

Yes. As we can clearly see from the images that the new image contrast has been enhanced and its histogram has also been equalized. Besides, there is also one important thing to notice here that during histogram equalization, the overall shape of the histogram changes,  although in histogram stretching the overall shape of histogram still remains the same.
 



Assignment 2 - Part1



Second part is similar with the first part(Video and Image). Second part merging two video together and run at the same time.

1. First create the basic HTML syntax.

2. Then we create 3 canvas, a radio button and a button, which is for the output when clicked.

3. After that, we will be putting the code for reading and drawing the image on the canvas, then combine the video with green screen with a another video as background together, which same as the previous part by combining video and image. This is video with video by just replacing the background video on the green screen video.

4. The code is shown here


var mp4Supported = false;
        var isPlaying = false;
     

        function draw() {

            if (window.requestAnimationFrame) window.requestAnimationFrame(draw);
            // IE implementation
            else if (window.msRequestAnimationFrame) window.msRequestAnimationFrame(draw);
            // Firefox implementation
            else if (window.mozRequestAnimationFrame) window.mozRequestAnimationFrame(draw);
            // Chrome implementation
            else if (window.webkitRequestAnimationFrame) window.webkitRequestAnimationFrame(draw);
            // Other browsers that do not yet support feature
            else setTimeout(draw, 16.7);
            DrawVideoOnCanvas();
        }

        function Play() {
            if (!isPlaying) {
                document.getElementById("videodata").play();
                document.getElementById("Zombie").play();
                document.getElementById("PlayPause").value = "Pause";
                isPlaying = true;               
            }
            else {
                document.getElementById("videodata").pause();
                document.getElementById("Zombie").pause();
                document.getElementById("PlayPause").value = "Play";
                isPlaying = false;                
            }
            draw();
        }

        function DrawVideoOnCanvas() {

            var object = document.getElementById("videodata")

            var backgroundObject;

            if (isBackgroundVideo) {
                backgroundObject = document.getElementById("Zombie");
            }
            
            
            var width = object.width;
            var height = object.height;
            var canvas = document.getElementById("videoscreen");
            canvas.setAttribute('width', width);
            canvas.setAttribute('height', height);
            if (canvas.getContext) {
                var context = canvas.getContext('2d');                            //implement the green screen video and background video(canvas output)
                context.drawImage(backgroundObject, 0, 0, width, height);
                var imgBackgroundData = context.getImageData(0, 0, width, height);//merge green screen video to the canvas output
                context.drawImage(object, 0, 0, width, height);
                imgDataNormal = context.getImageData(0, 0, width, height); //implement the background video to the canvas output
                var imgData = context.createImageData(width, height);

                for (i = 0; i < imgData.width * imgData.height * 4; i += 4) {

                    var r = imgDataNormal.data[i + 0];
                    var g = imgDataNormal.data[i + 1];
                    var b = imgDataNormal.data[i + 2];
                    var a = imgDataNormal.data[i + 3];
                    // compare rgb levels for green and set alphachannel to 0;
                   
                    if (r <= 110 && b <= 110 && g >= 100) {        // green screen video RGB
                        a = 0;
                    }
                    if (a != 0) {
                        imgData.data[i + 0] = r;
                        imgData.data[i + 1] = g;
                        imgData.data[i + 2] = b;
                        imgData.data[i + 3] = a;
                    }
                }

             


        for (i = 0; i < imgData.width * imgData.height * 4; i += 4) { // background video of the RGB

            var r = imgData.data[i + 0];
            var g = imgData.data[i + 1];
            var b = imgData.data[i + 2];
            var a = imgData.data[i + 3];                
               if (a == 0) {
                            imgData.data[i + 0] = imgBackgroundData.data[i + 0];
                            imgData.data[i + 1] = imgBackgroundData.data[i + 1];
                            imgData.data[i + 2] = imgBackgroundData.data[i + 2];
                            imgData.data[i + 3] = imgBackgroundData.data[i + 3];
                            }                   
                }
                
                context.putImageData(imgData, 0, 0);
              
            }
        }      

        function SupportedVideoFormat() {

            var video = document.createElement("video");
        
            if (video.canPlayType('video/mp4')) {
                // it can play (maybe)!
                mp4Supported = true;
            }
        }

        function StartBackground() {

            SupportedVideoFormat();
        
            if (mp4Supported) {
                videoExt = ".mp4";
            }
            loadBackgroundVideo();
        }

        function loadBackgroundVideo() {

            var value = "";
            var radioObj = document.getElementsByName("background");
            if (!radioObj)
                return "";
            var radioLength = radioObj.length;
            if (radioLength == undefined)
                if (radioObj.checked)
                    value= radioObj.value;
                else
                    value= "";
            for (var i = 0; i < radioLength; i++) {
                if (radioObj[i].checked) {
                    value= radioObj[i].value;
                }
            }

            

            var backgroundType= value.split("/");
            if (backgroundType[0] == "videos") {
                isBackgroundVideo = true;
                var backgroundFileName = value + videoExt;
                document.getElementById("backgroundvideo").style.display = "inline";
                document.getElementById("videoBackgrounddata").src = backgroundFileName;
                document.getElementById("videoBackgrounddata").loop = true
                if (isPlaying)
                    document.getElementById("videoBackgrounddata").play();
            }
            
            else {
                isBackgroundVideo = false;
                document.getElementById("backgroundvideo").style.display = "none";
            }
        }

</script>

</head>
<body onload="StartBackground();">
<h1></p>
<div id="wrapper" class="clearfix">
    <div id="output">
        <p>Canvas output</p>
        <canvas id="videoscreen" width="320" height="180" ></canvas>
    </div>
    <div id="source">
        <p>Green
        <video style="" id="videodata" loop="loop" preload="auto" width="320" height="180">
            <source src="Zombie.mp4" type='video/mp4' />
        </video>  
    </div>
    <div id="backgroundvideo">
        <p>Background Video</p>
        <video style="" id="Zombie" loop="loop" preload="auto" width="320" height="180" src= "BackgroundVid.mp4">
        </video>
    </div>
       
</div>

<div id="wrapper2" class="clearfix">

    <div id="backgroundvideoselection">
    <p>Background video selection</p>
    <ul>
        <input type="radio" id="Zombie" name="background" value="videos" onclick="loadBackgroundVideo();" checked=checked /><label for="Zombie">Zombie (Friendly Zombie)</label>
    </ul>
    </div>
    
    <div id="videocontrols">
        <p>Video controls</p>
        <input id="PlayPause" type="button" onclick="Play();" value="Play" />
    </div>
</div>
</body>
</html>

Output ;




Assignment 2 - Part 2

1) As you can see that the HTML 5 supports multimedia like audio and video. So to set the video in html5, we first set the video in the video tag.

<p> <font size="10" color="blue"> Backgroundd Video : </font> </p>
            <video id="sourceVideo" controls = "true">
            <source src="Dinosaur.mp4" />
            </video>


2) Then we create 1 hidden canvas and 1 display canvas to display the video.

<p> <font size="10" color="blue"> Output Effect : </font> </p>
            <canvas id="hiddenCanvas" width="1920" height="1200"> </canvas>
            <br />
            <br />
            <canvas id="displayCanvas" width="1920" height="1200"> </canvas>

3) Then we are going to set a variable for the canvas, and then draw it in the context. Here we are going to set another canvas that is the #hidden canvas. The hidden canvas is set, because we need to overlap the green image on another image, so that we can retrieve the object that is on the green screen.

var sourceVideo = document.getElementById("sourceVideo");
       
        var hiddenCanvas = document.getElementById("hiddenCanvas");
        var hiddenctx = hiddenCanvas.getContext ("2d");
       
        var displayCanvas = document.getElementById("displayCanvas");
        var displayctx = displayCanvas.getContext ("2d");
       
        sourceVideo.addEventListener('play', function(){runAnalysis();});

4) Now, we are going to set the controls of the video.

    sourceVideo.addEventListener('play', function(){runAnalysis();});
      
        var runAnalysis = function()
        {
      
            if(sourceVideo.paused || sourceVideo.ended)
            {
            return
            }

5) After that, we will request the browser to call upon the specific functions with the requestAnimationFrame, so that the browser will allow the user to perform/run Animation on the browser.

    frameConversion();
        if(window.requestAnimationFrame)
      
            {
            requestAnimationFrame(runAnalysis);
            }
      
        else
            {
            setTimeout(runAnalysis,0);
            }
          
     };

6) After that, comes to the hardest part where we will extract the Object from the green screen video and past it in on another video with different background. To do this, first we need to draw the object on the hidden canvas.

var frameConversion = function()
   
    {
   
    hiddenctx.drawImage(sourceVideo,0,0,sourceVideo.videoWidth, sourceVideo.videoHeight);
   
    var frame = hiddenctx.getImageData(0,0,sourceVideo.videoWidth, sourceVideo.videoHeight);

7) Then, we will start to set a variable for the frame length so that it will be getting all of the pixels while the videos is running

 var length = frame.data.length / 4;

8) After that we will loop through every pixel, and set the variable where g > 100, means if this is greater than 100, then the image is green and it will extract the object within the green image and paste it on another background.

for (var i = 0 ; i < length ; i++)
        {
        var r = frame.data [i*4+ 0];  //r
        var g = frame.data [i*4 + 1]; //g
        var b = frame.data [i*4 + 2]; //b
   
            if (g > 120 && r < 120 && b < 120)
            {
            frame.data[i * 4 + 3] = 0;
            }

        }

9) Once the manipulation is finished, then we will put the frame data back.

  displayctx.putImageData(frame, 0, 0);
   
    return
    }; 
 

Output:

Question 3
Create application that can be able to find the place where the sub image was taken from the first image by comparing the value to detect the correct positioning .
 The result of this question is to able to change the detected same color pixels from original to others. The way to implement this is:
1) create three canvas which the first one is the main image, second is the sub image which going to be used  to find the same pixels in the main image, and lastly third one is for the result of the founded image ,which going to change the pixels for the pixels match.
2 ) apply a click function and implement for loops to actually scan both the image for use to comparison.The function is : apply.onclick = function()

<html>
<head>
<script>
function load()
{
var cd = document.getElementById('c1');
var co = cd.getContext('2d');

var cd1 = document.getElementById('c2');
var co1 = cd1.getContext('2d');

var cd2 = document.getElementById('c3');
var co2 = cd2.getContext('2d');

var img = new Image();
img.src = "field.jpg";

var img1 = new Image();
img1.src = "football.jpg";

img.onload = function()
{
co.drawImage(img,0,0);
co1.drawImage(img1,0,0);
};
apply.onclick = function()
{
var img3=co.getImageData(0,0,cd.width,cd.height);
var pix1=img3.data;

var img4=co1.getImageData(0,0,cd1.width,cd1.height);
var pix2=img4.data;
for (var i=0; i<img3.data.length; i+=4) 
{
for (var b=0; b<img4.data.length; b+=4) 
{
if(pix1[i]==pix2[b] && pix1[i+1]==pix2[b+1] && pix1[i+2]==pix2[b+2])
{
pix1[i] = 255;
pix1[i+1] =0;
pix1[i+2] =0;
}
else
{
pix1[i] = pix1[i];
pix1[i+1] = pix1[i+1];
pix1[i+2] = pix1[i+2];
}
}

}


co2.putImageData(img3,0,0);

}

};

</script>
</head>
<body onload = "load()">
<table border = "1">
<tr>
<td><canvas id = "c1" width="375" height="281" style="border:solid 1px ;"></canvas></td>
<td><canvas id = "c2" width="52" height="46" style="border:solid 1px;"></canvas></td>
<td><canvas id = "c3" width="375" height="281" style="border:solid 1px;"></canvas></td>
</tr>
<tr>
<td><button id="apply">Find</button></td>
</tr>
</table>
</body>
</html>