本文為您介紹MapReduce的二次排序示例。
前提條件
已通過快速入門完成測試所需環境配置。
測試準備
準備好測試程序的JAR包,假設名字為mapreduce-examples.jar,本地存放路徑為MaxCompute客戶端bin目錄下data\resources。
準備好SecondarySort的測試表和資源。
創建測試表。
CREATE TABLE ss_in(key BIGINT, value BIGINT); CREATE TABLE ss_out(key BIGINT, value BIGINT);
添加測試資源。
-- 首次添加忽略-f覆蓋指令。 add jar data\resources\mapreduce-examples.jar -f;
使用Tunnel將MaxCompute客戶端bin目錄下data.txt導入ss_in表中。
tunnel upload data.txt ss_in;
導入ss_in表的數據如下。
1,2 2,1 1,1 2,2
測試步驟
在MaxCompute客戶端中執行SecondarySort。
jar -resources mapreduce-examples.jar -classpath data\resources\mapreduce-examples.jar
com.aliyun.odps.mapred.open.example.SecondarySort ss_in ss_out;
預期結果
作業成功結束后,輸出表ss_out中的內容如下。
+------------+------------+
| key | value |
+------------+------------+
| 1 | 1 |
| 1 | 2 |
| 2 | 1 |
| 2 | 2 |
+------------+------------+
代碼示例
Pom依賴信息,請參見注意事項。
package com.aliyun.odps.mapred.open.example;
import java.io.IOException;
import java.util.Iterator;
import com.aliyun.odps.data.Record;
import com.aliyun.odps.mapred.JobClient;
import com.aliyun.odps.mapred.MapperBase;
import com.aliyun.odps.mapred.ReducerBase;
import com.aliyun.odps.mapred.TaskContext;
import com.aliyun.odps.mapred.conf.JobConf;
import com.aliyun.odps.mapred.utils.SchemaUtils;
import com.aliyun.odps.mapred.utils.InputUtils;
import com.aliyun.odps.mapred.utils.OutputUtils;
import com.aliyun.odps.data.TableInfo;
/**
*
* This is an example ODPS Map/Reduce application. It reads the input table that
* must contain two integers per record. The output is sorted by the first and
* second number and grouped on the first number.
*
**/
public class SecondarySort {
/**
* Read two integers from each line and generate a key, value pair as ((left,
* right), right).
**/
public static class MapClass extends MapperBase {
private Record key;
private Record value;
@Override
public void setup(TaskContext context) throws IOException {
key = context.createMapOutputKeyRecord();
value = context.createMapOutputValueRecord();
}
@Override
public void map(long recordNum, Record record, TaskContext context)
throws IOException {
long left = 0;
long right = 0;
if (record.getColumnCount() > 0) {
left = (Long) record.get(0);
if (record.getColumnCount() > 1) {
right = (Long) record.get(1);
}
key.set(new Object[] { (Long) left, (Long) right });
value.set(new Object[] { (Long) right });
context.write(key, value);
}
}
}
/**
* A reducer class that just emits the sum of the input values.
**/
public static class ReduceClass extends ReducerBase {
private Record result = null;
@Override
public void setup(TaskContext context) throws IOException {
result = context.createOutputRecord();
}
@Override
public void reduce(Record key, Iterator<Record> values, TaskContext context)
throws IOException {
result.set(0, key.get(0));
while (values.hasNext()) {
Record value = values.next();
result.set(1, value.get(0));
context.write(result);
}
}
}
public static void main(String[] args) throws Exception {
if (args.length != 2) {
System.err.println("Usage: secondarysrot <in> <out>");
System.exit(2);
}
JobConf job = new JobConf();
job.setMapperClass(MapClass.class);
job.setReducerClass(ReduceClass.class);
/**將多列設置為Key。*/
//compare first and second parts of the pair
job.setOutputKeySortColumns(new String[] { "i1", "i2" });
//partition based on the first part of the pair
job.setPartitionColumns(new String[] { "i1" });
//grouping comparator based on the first part of the pair
job.setOutputGroupingColumns(new String[] { "i1" });
//the map output is LongPair, Long
job.setMapOutputKeySchema(SchemaUtils.fromString("i1:bigint,i2:bigint"));
job.setMapOutputValueSchema(SchemaUtils.fromString("i2x:bigint"));
InputUtils.addTable(TableInfo.builder().tableName(args[0]).build(), job);
OutputUtils.addTable(TableInfo.builder().tableName(args[1]).build(), job);
JobClient.runJob(job);
System.exit(0);
}
}
文檔內容是否對您有幫助?