
需要通過(guò)Java程序提交Yarn的MapReduce的計(jì)算任務(wù)。與一般的通過(guò)Jar包提交MapReduce任務(wù)不同,通過(guò)程序提交MapReduce任務(wù)需要有點(diǎn)小變動(dòng),詳見(jiàn)以下代碼。
以下為MapReduce主程序,有幾點(diǎn)需要提一下:
1、在程序中,我將文件讀入格式設(shè)定為WholeFileInputFormat,即不對(duì)文件進(jìn)行切分。
2、為了控制reduce的處理過(guò)程,map的輸出鍵的格式為組合鍵格式。與常規(guī)的<key,value>不同,這里變?yōu)榱?lt;textpair,value>,TextPair的格式為<key1,key2>。
3、為了適應(yīng)組合鍵,重新設(shè)定了分組函數(shù),即GroupComparator。分組規(guī)則為,只要TextPair中的key1相同(不要求key2相同),則數(shù)據(jù)被分配到一個(gè)reduce容器中。這樣,當(dāng)相同key1的數(shù)據(jù)進(jìn)入reduce容器后,key2起到了一個(gè)數(shù)據(jù)標(biāo)識(shí)的作用。
package web.Hadoop;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.WritableComparable;
import org.apache.hadoop.io.WritableComparator;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.JobStatus;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.Partitioner;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.NullOutputFormat;
import util.Utils;
public class GEMIMain {
public GEMIMain(){
job = null;
}
public Job job;
public static class NamePartitioner extends
Partitioner<textpair, byteswritable=""> {
@Override
public int getPartition(TextPair key, BytesWritable value,
int numPartitions) {
return Math.abs(key.getFirst().hashCode() * 127) % numPartitions;
}
}
/**
* 分組設(shè)置類,只要兩個(gè)TextPair的第一個(gè)key相同,他們就屬于同一組。他們的Value就放到一個(gè)Value迭代器中,
* 然后進(jìn)入Reducer的reduce方法中。
*
* @author hduser
*
*/
public static class GroupComparator extends WritableComparator {
public GroupComparator() {
super(TextPair.class, true);
}
@Override
public int compare(WritableComparable a, WritableComparable b) {
TextPair t1 = (TextPair) a;
TextPair t2 = (TextPair) b;
// 比較相同則返回0,比較不同則返回-1
return t1.getFirst().compareTo(t2.getFirst()); // 只要是第一個(gè)字段相同的就分成為同一組
}
}
public boolean runJob(String[] args) throws IOException,
ClassNotFoundException, InterruptedException {
Configuration conf = new Configuration();
// 在conf中設(shè)置outputath變量,以在reduce函數(shù)中可以獲取到該參數(shù)的值
conf.set("outputPath", args[args.length - 1].toString());
//設(shè)置HDFS中,每次任務(wù)生成產(chǎn)品的質(zhì)量文件所在文件夾。args數(shù)組的倒數(shù)第二個(gè)原數(shù)為質(zhì)量文件所在文件夾
conf.set("qualityFolder", args[args.length - 2].toString());
//如果在Server中運(yùn)行,則需要獲取web項(xiàng)目的根路徑;如果以java應(yīng)用方式調(diào)試,則讀取/opt/hadoop-2.5.0/etc/hadoop/目錄下的配置文件
//MapReduceProgress mprogress = new MapReduceProgress();
//String rootPath= mprogress.rootPath;
String rootPath="/opt/hadoop-2.5.0/etc/hadoop/";
conf.addResource(new Path(rootPath+"yarn-site.xml"));
conf.addResource(new Path(rootPath+"core-site.xml"));
conf.addResource(new Path(rootPath+"hdfs-site.xml"));
conf.addResource(new Path(rootPath+"mapred-site.xml"));
this.job = new Job(conf);
job.setJobName("Job name:" + args[0]);
job.setJarByClass(GEMIMain.class);
job.setMapperClass(GEMIMapper.class);
job.setMapOutputKeyClass(TextPair.class);
job.setMapOutputValueClass(BytesWritable.class);
// 設(shè)置partition
job.setPartitionerClass(NamePartitioner.class);
// 在分區(qū)之后按照指定的條件分組
job.setGroupingComparatorClass(GroupComparator.class);
job.setReducerClass(GEMIReducer.class);
job.setInputFormatClass(WholeFileInputFormat.class);
job.setOutputFormatClass(NullOutputFormat.class);
// job.setOutputKeyClass(NullWritable.class);
// job.setOutputValueClass(Text.class);
job.setNumReduceTasks(8);
// 設(shè)置計(jì)算輸入數(shù)據(jù)的路徑
for (int i = 1; i < args.length - 2; i++) {
FileInputFormat.addInputPath(job, new Path(args[i]));
}
// args數(shù)組的最后一個(gè)元素為輸出路徑
FileOutputFormat.setOutputPath(job, new Path(args[args.length - 1]));
boolean flag = job.waitForCompletion(true);
return flag;
}
@SuppressWarnings("static-access")
public static void main(String[] args) throws ClassNotFoundException,
IOException, InterruptedException {
String[] inputPaths = new String[] { "normalizeJob",
"hdfs://192.168.168.101:9000/user/hduser/red1/",
"hdfs://192.168.168.101:9000/user/hduser/nir1/","quality11111",
"hdfs://192.168.168.101:9000/user/hduser/test" };
GEMIMain test = new GEMIMain();
boolean result = test.runJob(inputPaths);
}
}
以下為TextPair類
public class TextPair implements WritableComparable {
private Text first;
private Text second;
public TextPair() {
set(new Text(), new Text());
}
public TextPair(String first, String second) {
set(new Text(first), new Text(second));
}
public TextPair(Text first, Text second) {
set(first, second);
}
public void set(Text first, Text second) {
this.first = first;
this.second = second;
}
public Text getFirst() {
return first;
}
public Text getSecond() {
return second;
}
@Override
public void write(DataOutput out) throws IOException {
first.write(out);
second.write(out);
}
@Override
public void readFields(DataInput in) throws IOException {
first.readFields(in);
second.readFields(in);
}
@Override
public int hashCode() {
return first.hashCode() * 163 + second.hashCode();
}
@Override
public boolean equals(Object o) {
if (o instanceof TextPair) {
TextPair tp = (TextPair) o;
return first.equals(tp.first) && second.equals(tp.second);
}
return false;
}
@Override
public String toString() {
return first + "\t" + second;
}
@Override
/**A.compareTo(B)
* 如果比較相同,則比較結(jié)果為0
* 如果A大于B,則比較結(jié)果為1
* 如果A小于B,則比較結(jié)果為-1
*
*/
public int compareTo(TextPair tp) {
int cmp = first.compareTo(tp.first);
if (cmp != 0) {
return cmp;
}
//此時(shí)實(shí)現(xiàn)的是升序排列
return second.compareTo(tp.second);
}
}
以下為WholeFileInputFormat,其控制數(shù)據(jù)在mapreduce過(guò)程中不被切分
package web.hadoop;
import java.io.IOException;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.JobContext;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
public class WholeFileInputFormat extends FileInputFormat<text, byteswritable=""> {
@Override
public RecordReader<text, byteswritable=""> createRecordReader(
InputSplit arg0, TaskAttemptContext arg1) throws IOException,
InterruptedException {
// TODO Auto-generated method stub
return new WholeFileRecordReader();
}
@Override
protected boolean isSplitable(JobContext context, Path filename) {
// TODO Auto-generated method stub
return false;
}
}
以下為WholeFileRecordReader類
package web.hadoop;
import java.io.IOException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FSDataInputStream;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.BytesWritable;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapreduce.InputSplit;
import org.apache.hadoop.mapreduce.RecordReader;
import org.apache.hadoop.mapreduce.TaskAttemptContext;
import org.apache.hadoop.mapreduce.lib.input.FileSplit;
public class WholeFileRecordReader extends RecordReader<text, byteswritable=""> {
private FileSplit fileSplit;
private FSDataInputStream fis;
private Text key = null;
private BytesWritable value = null;
private boolean processed = false;
@Override
public void close() throws IOException {
// TODO Auto-generated method stub
// fis.close();
}
@Override
public Text getCurrentKey() throws IOException, InterruptedException {
// TODO Auto-generated method stub
return this.key;
}
@Override
public BytesWritable getCurrentValue() throws IOException,
InterruptedException {
// TODO Auto-generated method stub
return this.value;
}
@Override
public void initialize(InputSplit inputSplit, TaskAttemptContext tacontext)
throws IOException, InterruptedException {
fileSplit = (FileSplit) inputSplit;
Configuration job = tacontext.getConfiguration();
Path file = fileSplit.getPath();
FileSystem fs = file.getFileSystem(job);
fis = fs.open(file);
}
@Override
public boolean nextKeyValue() {
if (key == null) {
key = new Text();
}
if (value == null) {
value = new BytesWritable();
}
if (!processed) {
byte[] content = new byte[(int) fileSplit.getLength()];
Path file = fileSplit.getPath();
System.out.println(file.getName());
key.set(file.getName());
try {
IOUtils.readFully(fis, content, 0, content.length);
// value.set(content, 0, content.length);
value.set(new BytesWritable(content));
} catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
} finally {
IOUtils.closeStream(fis);
}
processed = true;
return true;
}
return false;
}
@Override
public float getProgress() throws IOException, InterruptedException {
// TODO Auto-generated method stub
return processed ? fileSplit.getLength() : 0;
}
}
數(shù)據(jù)分析咨詢請(qǐng)掃描二維碼
若不方便掃碼,搜微信號(hào):CDAshujufenxi
LSTM 模型輸入長(zhǎng)度選擇技巧:提升序列建模效能的關(guān)鍵? 在循環(huán)神經(jīng)網(wǎng)絡(luò)(RNN)家族中,長(zhǎng)短期記憶網(wǎng)絡(luò)(LSTM)憑借其解決長(zhǎng)序列 ...
2025-07-11CDA 數(shù)據(jù)分析師報(bào)考條件詳解與準(zhǔn)備指南? ? 在數(shù)據(jù)驅(qū)動(dòng)決策的時(shí)代浪潮下,CDA 數(shù)據(jù)分析師認(rèn)證愈發(fā)受到矚目,成為眾多有志投身數(shù) ...
2025-07-11數(shù)據(jù)透視表中兩列相乘合計(jì)的實(shí)用指南? 在數(shù)據(jù)分析的日常工作中,數(shù)據(jù)透視表憑借其強(qiáng)大的數(shù)據(jù)匯總和分析功能,成為了 Excel 用戶 ...
2025-07-11尊敬的考生: 您好! 我們誠(chéng)摯通知您,CDA Level I和 Level II考試大綱將于 2025年7月25日 實(shí)施重大更新。 此次更新旨在確保認(rèn) ...
2025-07-10BI 大數(shù)據(jù)分析師:連接數(shù)據(jù)與業(yè)務(wù)的價(jià)值轉(zhuǎn)化者? ? 在大數(shù)據(jù)與商業(yè)智能(Business Intelligence,簡(jiǎn)稱 BI)深度融合的時(shí)代,BI ...
2025-07-10SQL 在預(yù)測(cè)分析中的應(yīng)用:從數(shù)據(jù)查詢到趨勢(shì)預(yù)判? ? 在數(shù)據(jù)驅(qū)動(dòng)決策的時(shí)代,預(yù)測(cè)分析作為挖掘數(shù)據(jù)潛在價(jià)值的核心手段,正被廣泛 ...
2025-07-10數(shù)據(jù)查詢結(jié)束后:分析師的收尾工作與價(jià)值深化? ? 在數(shù)據(jù)分析的全流程中,“query end”(查詢結(jié)束)并非工作的終點(diǎn),而是將數(shù) ...
2025-07-10CDA 數(shù)據(jù)分析師考試:從報(bào)考到取證的全攻略? 在數(shù)字經(jīng)濟(jì)蓬勃發(fā)展的今天,數(shù)據(jù)分析師已成為各行業(yè)爭(zhēng)搶的核心人才,而 CDA(Certi ...
2025-07-09【CDA干貨】單樣本趨勢(shì)性檢驗(yàn):捕捉數(shù)據(jù)背后的時(shí)間軌跡? 在數(shù)據(jù)分析的版圖中,單樣本趨勢(shì)性檢驗(yàn)如同一位耐心的偵探,專注于從單 ...
2025-07-09year_month數(shù)據(jù)類型:時(shí)間維度的精準(zhǔn)切片? ? 在數(shù)據(jù)的世界里,時(shí)間是最不可或缺的維度之一,而year_month數(shù)據(jù)類型就像一把精準(zhǔn) ...
2025-07-09CDA 備考干貨:Python 在數(shù)據(jù)分析中的核心應(yīng)用與實(shí)戰(zhàn)技巧? ? 在 CDA 數(shù)據(jù)分析師認(rèn)證考試中,Python 作為數(shù)據(jù)處理與分析的核心 ...
2025-07-08SPSS 中的 Mann-Kendall 檢驗(yàn):數(shù)據(jù)趨勢(shì)與突變分析的有力工具? ? ? 在數(shù)據(jù)分析的廣袤領(lǐng)域中,準(zhǔn)確捕捉數(shù)據(jù)的趨勢(shì)變化以及識(shí)別 ...
2025-07-08備戰(zhàn) CDA 數(shù)據(jù)分析師考試:需要多久?如何規(guī)劃? CDA(Certified Data Analyst)數(shù)據(jù)分析師認(rèn)證作為國(guó)內(nèi)權(quán)威的數(shù)據(jù)分析能力認(rèn)證 ...
2025-07-08LSTM 輸出不確定的成因、影響與應(yīng)對(duì)策略? 長(zhǎng)短期記憶網(wǎng)絡(luò)(LSTM)作為循環(huán)神經(jīng)網(wǎng)絡(luò)(RNN)的一種變體,憑借獨(dú)特的門控機(jī)制,在 ...
2025-07-07統(tǒng)計(jì)學(xué)方法在市場(chǎng)調(diào)研數(shù)據(jù)中的深度應(yīng)用? 市場(chǎng)調(diào)研是企業(yè)洞察市場(chǎng)動(dòng)態(tài)、了解消費(fèi)者需求的重要途徑,而統(tǒng)計(jì)學(xué)方法則是市場(chǎng)調(diào)研數(shù) ...
2025-07-07CDA數(shù)據(jù)分析師證書(shū)考試全攻略? 在數(shù)字化浪潮席卷全球的當(dāng)下,數(shù)據(jù)已成為企業(yè)決策、行業(yè)發(fā)展的核心驅(qū)動(dòng)力,數(shù)據(jù)分析師也因此成為 ...
2025-07-07剖析 CDA 數(shù)據(jù)分析師考試題型:解鎖高效備考與答題策略? CDA(Certified Data Analyst)數(shù)據(jù)分析師考試作為衡量數(shù)據(jù)專業(yè)能力的 ...
2025-07-04SQL Server 字符串截取轉(zhuǎn)日期:解鎖數(shù)據(jù)處理的關(guān)鍵技能? 在數(shù)據(jù)處理與分析工作中,數(shù)據(jù)格式的規(guī)范性是保證后續(xù)分析準(zhǔn)確性的基礎(chǔ) ...
2025-07-04CDA 數(shù)據(jù)分析師視角:從數(shù)據(jù)迷霧中探尋商業(yè)真相? 在數(shù)字化浪潮席卷全球的今天,數(shù)據(jù)已成為企業(yè)決策的核心驅(qū)動(dòng)力,CDA(Certifie ...
2025-07-04CDA 數(shù)據(jù)分析師:開(kāi)啟數(shù)據(jù)職業(yè)發(fā)展新征程? ? 在數(shù)據(jù)成為核心生產(chǎn)要素的今天,數(shù)據(jù)分析師的職業(yè)價(jià)值愈發(fā)凸顯。CDA(Certified D ...
2025-07-03