Vous êtes sur la page 1sur 6

MATRIX MULTIPLICATION:

DRIVER:

import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.MultipleInputs;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.apache.hadoop.io.*;
import org.apache.hadoop.conf.*;
import org.apache.hadoop.fs.Path;
import java.io.*;
import java.util.*;

public class MatMul{


public static void main(String args[]) throws Exception
{
Configuration conf=new Configuration();
Job job=Job.getInstance(conf,"xyz");
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(MatMulMapper1.class);
job.setMapperClass(MatMulMapper2.class);
job.setReducerClass(MatMulReducer.class);
//FileInputFormat.setInputPaths(job,new Path(args[0]));
MultipleInputs.addInputPath(job, new
Path(args[0]),TextInputFormat.class, MatMulMapper1.class);
MultipleInputs.addInputPath(job, new
Path(args[1]),TextInputFormat.class, MatMulMapper2.class);
FileOutputFormat.setOutputPath(job,new Path(args[2]));
job.setJarByClass(MatMul.class);
job.setNumReduceTasks(1);
job.waitForCompletion(true);
}
}

MAPPER1:

import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.io.*;
import java.io.*;
public class MatMulMapper1 extendS
Mapper<LongWritable,Text,Text,Text>
{
int d=0,row=0,col=0;;
public void map(LongWritable key,Text value,Context context)
throws IOException,InterruptedException
{
int j=0,r=0,c=0,v=0;
String line=value.toString();
String[] wordsinline=line.split(",");
if(d==0)
{
row=Integer.parseInt(wordsinline[0]);
col=Integer.parseInt(wordsinline[1]);
d++;
}
else
{
r=Integer.parseInt(wordsinline[0]);
c=Integer.parseInt(wordsinline[1]);
v=Integer.parseInt(wordsinline[2]);
for(int i=1;i<=col;i++)
{
String s1=r+","+i;
String s2=r+","+c+","+v+","+"0"+","+col;
context.write(new Text(s1),new
Text(s2));
}
}
}
}

MAPPER2

import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.io.*;
import java.io.*;
public class MatMulMapper2 extends
Mapper<LongWritable,Text,Text,Text> {
int d=0,row=0,col=0;;
public void map(LongWritable key,Text value,Context context)
throws IOException,InterruptedException
{
int j=0,r=0,c=0,v=0;
String line=value.toString();
String[] wordsinline=line.split(",");
if(d==0)
{
row=Integer.parseInt(wordsinline[0]);
col=Integer.parseInt(wordsinline[1]);
d++;
}
else
{
r=Integer.parseInt(wordsinline[0]);
c=Integer.parseInt(wordsinline[1]);
v=Integer.parseInt(wordsinline[2]);
for(int i=1;i<=row;i++)
{
String s1=i+","+c;
String s2=r+","+c+","+v+","+"1"+","+col;
context.write(new Text(s1),new Text(s2));
}
}
}
}

REDUCER:

import org.apache.hadoop.mapreduce.*;
import org.apache.hadoop.io.*;
import java.io.*;
import java.util.*;
public class MatMulReducer extends
Reducer<Text,Text,Text,LongWritable>{
LongWritable Val=new LongWritable();
public void reduce(Text key,Iterable<Text>values,Context
context)throws IOException,InterruptedException
{
int[][] a=new int[100][100];
int[][] b=new int[100][100];
int[][] c=new int[100][100];
int row=0,col=0,val=0,bit=0,column1=0,
column=0,sum=0,i=0,j=0;
String lne=key.toString();
String[] wordinline=lne.split(",");
i=Integer.parseInt(wordinline[0]);
j=Integer.parseInt(wordinline[1]);
for(Text txt:values)
{
String line=txt.toString();
String[] wordsinline=line.split(",");
row=Integer.parseInt(wordsinline[0]);
col=Integer.parseInt(wordsinline[1]);
val=Integer.parseInt(wordsinline[2]);
bit=Integer.parseInt(wordsinline[3]);
if(bit==0){
a[row][col]=val;
column=Integer.parseInt(wordsinline[4]);
}
else{
b[row][col]=val;
column1=Integer.parseInt(wordsinline[4]);
}
}
if(j<=column1)
{
for(int k=1;k<=column;k++)
{
sum+=a[i][k]*b[k][j];
}
Val.set(sum);
String s1="("+i+","+j+")";
context.write(new Text(s1),Val);
}
}
}

INPUT:

FILE1:

2,2
1,1,2
1,2,3
2,1,4
2,2,5

FILE 2:

2,2
1,1,6
1,2,7
2,1,8
2,2,9
OUTPUT:

Vous aimerez peut-être aussi