日韩性视频-久久久蜜桃-www中文字幕-在线中文字幕av-亚洲欧美一区二区三区四区-撸久久-香蕉视频一区-久久无码精品丰满人妻-国产高潮av-激情福利社-日韩av网址大全-国产精品久久999-日本五十路在线-性欧美在线-久久99精品波多结衣一区-男女午夜免费视频-黑人极品ⅴideos精品欧美棵-人人妻人人澡人人爽精品欧美一区-日韩一区在线看-欧美a级在线免费观看

歡迎訪問 生活随笔!

生活随笔

當前位置: 首頁 > 编程资源 > 编程问答 >内容正文

编程问答

通过MapReduce统计每个单子在每个文件中出现的次数(FileSplit的使用),单词作为key,所在文本和次数作为value进行统计

發布時間:2024/9/27 编程问答 21 豆豆
生活随笔 收集整理的這篇文章主要介紹了 通过MapReduce统计每个单子在每个文件中出现的次数(FileSplit的使用),单词作为key,所在文本和次数作为value进行统计 小編覺得挺不錯的,現在分享給大家,幫大家做個參考.

代碼如下:

package cn.toto.bigdata.mr.index;import java.io.IOException;import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IntWritable; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.input.FileSplit; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;public class IndexCreateStepOne {public static class IndexCreateMapper extends Mapper<LongWritable, Text, Text, IntWritable> {Text k = new Text();IntWritable v = new IntWritable(1);@Overrideprotected void map(LongWritable key, Text value, Context context) throws IOException, InterruptedException {String line = value.toString();String[] words = line.split(" ");FileSplit inputSplit = (FileSplit) context.getInputSplit();//獲取到word(單詞)所在的文件的名稱String fileName = inputSplit.getPath().getName();//最終輸出的格式效果如: key:單詞---文件名 value:1for(String word : words) {k.set(word + "--" + fileName);context.write(k, v);}} }public static class IndexCreateReducer extends Reducer<Text, IntWritable, Text, IntWritable> {IntWritable v = new IntWritable();@Overrideprotected void reduce(Text key, Iterable<IntWritable> values,Context context) throws IOException, InterruptedException {int count = 0;for (IntWritable value : values) {count += value.get();}v.set(count);context.write(key, v);}}public static void main(String[] args) throws Exception {Configuration conf = new Configuration();Job job = Job.getInstance(conf);//告訴框架,我們的程序所在jar包的路徑// job.setJar("c:/wordcount.jar");job.setJarByClass(IndexCreateStepOne.class);//告訴框架,我們的程序所用的mapper類和reducer類job.setMapperClass(IndexCreateMapper.class);job.setReducerClass(IndexCreateReducer.class);job.setCombinerClass(IndexCreateReducer.class);//告訴框架,我們的mapperreducer輸出的數據類型job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(IntWritable.class);job.setOutputKeyClass(Text.class);job.setOutputValueClass(IntWritable.class);FileInputFormat.setInputPaths(job, new Path("E:/wordcount/inverindexinput/"));//告訴框架,我們的處理結果要輸出到哪里FileOutputFormat.setOutputPath(job, new Path("E:/wordcount/index-1/"));boolean res = job.waitForCompletion(true);System.exit(res ? 0 : 1);} } 準備條件

1、要處理的數據文件

b.txt的內容如下:



其它的c.txt,d.txt和上面的類似


運行后的結果如下:


這樣,可以列出各各單詞在每個文件中的數量了


接著,做如下的功能:單詞作為key,在文件和文件中的個數的數值作為value,然后去做統計,實例代碼如下:


package cn.toto.bigdata.mr.index;import java.io.IOException;import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.LongWritable; import org.apache.hadoop.io.Text; import org.apache.hadoop.mapreduce.Job; import org.apache.hadoop.mapreduce.Mapper; import org.apache.hadoop.mapreduce.Reducer; import org.apache.hadoop.mapreduce.lib.input.FileInputFormat; import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat; import org.mockito.internal.stubbing.StubbedInvocationMatcher;import io.netty.handler.codec.http.HttpHeaders.Values;public class IndexCreateStepTwo {public static class IndexCreateStepTwoMapper extends Mapper<LongWritable, Text, Text, Text> {Text k = new Text();Text v = new Text();@Overrideprotected void map(LongWritable key, Text value, Mapper<LongWritable, Text, Text, Text>.Context context)throws IOException, InterruptedException {String line = value.toString();String[] fields = line.split("\t");String word_file = fields[0];String count = fields[1];String[] split = word_file.split("--");String word = split[0];String file = split[1];k.set(word);v.set(file + "--" + count);context.write(k, v);}}public static class IndexCreateStepTwoReducer extends Reducer<Text, Text, Text, Text> {Text v = new Text();@Overrideprotected void reduce(Text key, Iterable<Text> values, Context context)throws IOException, InterruptedException {StringBuffer sb = new StringBuffer();for (Text value : values) {sb.append(value.toString()).append(" ");}v.set(sb.toString());context.write(key, v);}}public static void main(String[] args) throws Exception {Configuration conf = new Configuration();Job job = Job.getInstance(conf);//告訴框架,我們的程序所在jar包的路徑// job.setJar("c:/wordcount.jar");job.setJarByClass(IndexCreateStepTwo.class);//告訴框架,我們的程序所用的mapper類和reducer類job.setMapperClass(IndexCreateStepTwoMapper.class);job.setReducerClass(IndexCreateStepTwoReducer.class);job.setCombinerClass(IndexCreateStepTwoReducer.class);//告訴框架,我們的mapperreducer輸出的數據類型job.setMapOutputKeyClass(Text.class);job.setMapOutputValueClass(Text.class);job.setOutputKeyClass(Text.class);job.setOutputValueClass(Text.class);FileInputFormat.setInputPaths(job, new Path("E:/wordcount/index-1/"));//告訴框架,我們的處理結果要輸出到哪里去FileOutputFormat.setOutputPath(job, new Path("E:/wordcount/index-2/"));boolean res = job.waitForCompletion(true);System.exit(res ? 0 : 1);} }
程序運行的結果如下:








與50位技術專家面對面20年技術見證,附贈技術全景圖

總結

以上是生活随笔為你收集整理的通过MapReduce统计每个单子在每个文件中出现的次数(FileSplit的使用),单词作为key,所在文本和次数作为value进行统计的全部內容,希望文章能夠幫你解決所遇到的問題。

如果覺得生活随笔網站內容還不錯,歡迎將生活随笔推薦給好友。