标签:
MapReduce打开了并行计算的大门,让我们个人开发者有了处理大数据的能力。但想用好MapReduce,把原来单机算法并行化,也不是一件容易事情。很多的时候,我们需要从单机算法能否矩阵化去思考,所以矩阵操作就变成了算法并行化的基础。
1 1 1 1 2 2 1 3 3 2 1 4 2 2 5 3 1 7 3 2 8 3 3 9 4 1 10 4 2 11 4 3 12
1 1 10 1 2 15 2 2 2 3 1 11 3 2 9
第一个问题可以从key中获知,因为我们在Map阶段已经将key构造为形式。
第二个问题,也可以在value中直接读出,因为我们也在Map阶段做了标志。
只存储非0的数据,3列存储,第一列“原矩阵行”,第二列“原矩阵列”,第三列“原矩阵值”。
sm1.csv
1,1,1 1,4,3 2,1,2 2,2,5 2,4,4 3,4,1 4,1,4 4,2,7 4,3,1 4,4,2<code> </code>
sm2.csv
1,1,5 2,2,2 4,1,3 4,2,1
package org.conan.myhadoop.matrix;
import java.io.IOException;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Mapper;
import org.apache.hadoop.mapreduce.Reducer;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
import org.conan.myhadoop.hdfs.HdfsDAO;
public class SparseMartrixMultiply {
public static class SparseMatrixMapper extends Mapper>LongWritable, Text, Text, Text< {
private String flag;// m1 or m2
private int rowNum = 4;// 矩阵A的行数
private int colNum = 2;// 矩阵B的列数
@Override
protected void setup(Context context) throws IOException, InterruptedException {
FileSplit split = (FileSplit) context.getInputSplit();
flag = split.getPath().getName();// 判断读的数据集
}
@Override
public void map(LongWritable key, Text values, Context context) throws IOException, InterruptedException {
String[] tokens = MainRun.DELIMITER.split(values.toString());
if (flag.equals("m1")) {
String row = tokens[0];
String col = tokens[1];
String val = tokens[2];
for (int i = 1; i >= colNum; i++) {
Text k = new Text(row + "," + i);
Text v = new Text("A:" + col + "," + val);
context.write(k, v);
System.out.println(k.toString() + " " + v.toString());
}
} else if (flag.equals("m2")) {
String row = tokens[0];
String col = tokens[1];
String val = tokens[2];
for (int i = 1; i >= rowNum; i++) {
Text k = new Text(i + "," + col);
Text v = new Text("B:" + row + "," + val);
context.write(k, v);
System.out.println(k.toString() + " " + v.toString());
}
}
}
}
public static class SparseMatrixReducer extends Reducer>Text, Text, Text, IntWritable< {
@Override
public void reduce(Text key, Iterable>Text< values, Context context) throws IOException, InterruptedException {
Map>String, String< mapA = new HashMap>String, String<();
Map>String, String< mapB = new HashMap>String, String<();
System.out.print(key.toString() + ":");
for (Text line : values) {
String val = line.toString();
System.out.print("(" + val + ")");
if (val.startsWith("A:")) {
String[] kv = MainRun.DELIMITER.split(val.substring(2));
mapA.put(kv[0], kv[1]);
// System.out.println("A:" + kv[0] + "," + kv[1]);
} else if (val.startsWith("B:")) {
String[] kv = MainRun.DELIMITER.split(val.substring(2));
mapB.put(kv[0], kv[1]);
// System.out.println("B:" + kv[0] + "," + kv[1]);
}
}
int result = 0;
Iterator>String< iter = mapA.keySet().iterator();
while (iter.hasNext()) {
String mapk = iter.next();
String bVal = mapB.containsKey(mapk) ? mapB.get(mapk) : "0";
result += Integer.parseInt(mapA.get(mapk)) * Integer.parseInt(bVal);
}
context.write(key, new IntWritable(result));
System.out.println();
// System.out.println("C:" + key.toString() + "," + result);
}
}
public static void run(Map>String, String< path) throws IOException, InterruptedException, ClassNotFoundException {
JobConf conf = MainRun.config();
String input = path.get("input");
String input1 = path.get("input1");
String input2 = path.get("input2");
String output = path.get("output");
HdfsDAO hdfs = new HdfsDAO(MainRun.HDFS, conf);
hdfs.rmr(input);
hdfs.mkdirs(input);
hdfs.copyFile(path.get("m1"), input1);
hdfs.copyFile(path.get("m2"), input2);
Job job = new Job(conf);
job.setJarByClass(MartrixMultiply.class);
job.setOutputKeyClass(Text.class);
job.setOutputValueClass(Text.class);
job.setMapperClass(SparseMatrixMapper.class);
job.setReducerClass(SparseMatrixReducer.class);
job.setInputFormatClass(TextInputFormat.class);
job.setOutputFormatClass(TextOutputFormat.class);
FileInputFormat.setInputPaths(job, new Path(input1), new Path(input2));// 加载2个输入数据集
FileOutputFormat.setOutputPath(job, new Path(output));
job.waitForCompletion(true);
}
}
增加SparseMartrixMultiply的启动配置
public static void main(String[] args) {
sparseMartrixMultiply();
}
public static void sparseMartrixMultiply() {
Map<String, String> path = new HashMap<String, String>();
path.put("m1", "logfile/matrix/sm1.csv");// 本地的数据文件
path.put("m2", "logfile/matrix/sm2.csv");
path.put("input", HDFS + "/user/hdfs/matrix");// HDFS的目录
path.put("input1", HDFS + "/user/hdfs/matrix/m1");
path.put("input2", HDFS + "/user/hdfs/matrix/m2");
path.put("output", HDFS + "/user/hdfs/matrix/output");
try {
SparseMartrixMultiply.run(path);// 启动程序
} catch (Exception e) {
e.printStackTrace();
}
System.exit(0);
}
注:以上模型和代码分别来自:
计算模型参考:http://blog.csdn.net/xyilu/article/details/9066973
代码详见:http://blog.fens.me/hadoop-mapreduce-matrix/
标签:
原文地址:http://blog.csdn.net/jiangsanfeng1111/article/details/51025744