hadoop相关

执行wordcount

代码

package org.apache.hadoop.examples;

import java.io.IOException;
import java.util.Iterator;
import java.util.StringTokenizer;

import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IntWritable;
import org.apache.hadoop.io.LongWritable;
import org.apache.hadoop.io.Text;
import org.apache.hadoop.mapred.FileInputFormat;
import org.apache.hadoop.mapred.FileOutputFormat;
import org.apache.hadoop.mapred.JobClient;
import org.apache.hadoop.mapred.JobConf;
import org.apache.hadoop.mapred.MapReduceBase;
import org.apache.hadoop.mapred.Mapper;
import org.apache.hadoop.mapred.OutputCollector;
import org.apache.hadoop.mapred.Reducer;
import org.apache.hadoop.mapred.Reporter;
import org.apache.hadoop.mapred.TextInputFormat;
import org.apache.hadoop.mapred.TextOutputFormat;

public class WordCount {

    public static class Map extends MapReduceBase implements
            Mapper<LongWritable, Text, Text, IntWritable> {
        private final static IntWritable one = new IntWritable(1);
        private Text word = new Text();

        public void map(LongWritable key, Text value,
                OutputCollector<Text, IntWritable> output, Reporter reporter)
                throws IOException {
            String line = value.toString();
            StringTokenizer tokenizer = new StringTokenizer(line);
            while (tokenizer.hasMoreTokens()) {
                word.set(tokenizer.nextToken());
                output.collect(word, one);
            }
        }
    }

    public static class Reduce extends MapReduceBase implements
            Reducer<Text, IntWritable, Text, IntWritable> {
        public void reduce(Text key, Iterator<IntWritable> values,
                OutputCollector<Text, IntWritable> output, Reporter reporter)
                throws IOException {
            int sum = 0;
            while (values.hasNext()) {
                sum += values.next().get();
            }
            output.collect(key, new IntWritable(sum));
        }
    }

    public static void main(String[] args) throws Exception {
        JobConf conf = new JobConf(WordCount.class);
        conf.setJobName("wordcount");

        conf.setOutputKeyClass(Text.class);
        conf.setOutputValueClass(IntWritable.class);

        conf.setMapperClass(Map.class);
        conf.setCombinerClass(Reduce.class);
        conf.setReducerClass(Reduce.class);

        conf.setInputFormat(TextInputFormat.class);
        conf.setOutputFormat(TextOutputFormat.class);

        FileInputFormat.setInputPaths(conf, new Path(args[0]));
        FileOutputFormat.setOutputPath(conf, new Path(args[1]));

        JobClient.runJob(conf);
    }
}
View Code

首先进行编译:

javac -classpath ./share/hadoop/common/hadoop-common-2.7.6.jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.6.jar -d WordCount ./WordCount/WordCount.java
View Code

 然后压包

jar -cvf wordcount.jar org/*
View Code

在复制到hadoop的工作目录下

然后在hadoop工作目录下面新建一个input目录 mkdir input,在目录里面新建一个文件vi file1,输入以下内容: 
hello world 
hello hadoop 
hello mapreduce 
,把该文件上传到hadoop的分布式文件系统中去 

Shell代码  收藏代码
  1. ./bin/hadoop fs -put input/file* input  


(6)然后我们开始执行 

Shell代码  收藏代码
  1. ./bin/hadoop jar wordcount.jar org.apache.hadoop.examples.WordCount input wordcount_output  


(7)最后我们查看运行结果 

Shell代码  收藏代码
    1. ./bin/hadoop fs -cat wordcount_output/part-r-00000  

参考:

http://cardyn.iteye.com/blog/1356361

https://blog.csdn.net/qichangleixin/article/details/43376587

 

 

二.往hdfs写数据

java代码

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;

import java.io.*;
import java.net.URI;

/**
 * blog: http://www.iteblog.com/
 * Date: 14-1-2
 * Time: 下午6:09
 */
public class AppendContent {
    public static void main(String[] args) {
        String hdfs_path = "input/file1";//文件路径
        Configuration conf = new Configuration();
        conf.setBoolean("dfs.support.append", true);

        String inpath = "./append.txt";
        FileSystem fs = null;
        try {
            fs = FileSystem.get(URI.create(hdfs_path), conf);
            //要追加的文件流,inpath为文件
            InputStream in = new 
                  BufferedInputStream(new FileInputStream(inpath));
            OutputStream out = fs.append(new Path(hdfs_path));
            IOUtils.copyBytes(in, out, 4096, true);
        } catch (IOException e) {
            e.printStackTrace();
        }
    }
}  
View Code

注意指定的hdfs路径,用hdfs://localhost:9000/input/路径一直不行,不知道什么原因。

编译

javac -classpath ./share/hadoop/common/hadoop-common-2.7.6.jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.6.jar -d ./classes ./my_append/AppendContent.java
View Code

压包

jar -cvf ./my_jar/append.jar ./classes/*
View Code

运行

./bin/hadoop jar ./my_jar/append.jar AppendContent
View Code

AppendContent是类的名字

查看

./bin/hdfs dfs -cat input/*
View Code

 

或者代码可以改为通过args传参的方式传入hdfs路径, 方便多进程操作

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.io.IOUtils;
import org.apache.hadoop.fs.FSDataInputStream;  
import org.apache.hadoop.fs.FSDataOutputStream; 

import java.io.*;
import java.net.URI;

/**
 * blog: http://www.iteblog.com/
 * Date: 14-1-2
 * Time: 下午6:09
 */
public class AppendContent {
    public static void main(String[] args) {
        //String hdfs_path = "input/file1";//文件路径
        String hdfs_path = args[0];
        Configuration conf = new Configuration();
        conf.setBoolean("dfs.support.append", true);



        //String inpath = "./append.txt";
        FileSystem fs = null;
        try {
            fs = FileSystem.get(URI.create(hdfs_path), conf);
            FSDataOutputStream out = fs.append(new Path(hdfs_path));


            String s="";
            for(int i=0;i<10;i++)
            {
                for(int j=0;j<1024;j++)
                {
                    s+='a';
                }
                int readLen = s.getBytes().length;
                out.write(s.getBytes(), 0, readLen); 
            }

            //int readLen = "0123456789".getBytes().length;    

            //while (-1 != readLen) 
                
            //out.write("0123456789".getBytes(), 0, readLen); 
            
            //要追加的文件流,inpath为文件
            //InputStream in = new 
            //      BufferedInputStream(new FileInputStream(inpath));
            //OutputStream out = fs.append(new Path(hdfs_path));
            //IOUtils.copyBytes(in, out, 4096, true);
            } catch (IOException e) {
                e.printStackTrace();
            }
        }
    }  
View Code

编译与压包命令同上,执行命令如下

./bin/hadoop jar ./my_jar/append.jar AppendContent input/file1
View Code

 

参考:https://blog.csdn.net/jameshadoop/article/details/24179413

https://blog.csdn.net/wypblog/article/details/17914021

 脚本

#!/bin/bash

#开始时间
begin=$(date +%s%N)


for ((i=0; i<7;i++))
do
    {
        ./bin/hadoop jar ./my_jar/append.jar AppendContent input/file${i}
    }
done

wait
#结束时间
end=$(date +%s%N)
#spend=$(expr $end - $begin)

use_tm=`echo $end $begin | awk '{ print ($1 - $2) / 1000000000}'`
echo "花费时间为$use_tm"
View Code

 

 

二. java在ext3中的测试

程序

import java.io.*;
import java.net.URI;
import java.io.BufferedWriter;  
import java.io.File;  
import java.io.FileOutputStream;  
import java.io.FileWriter;  
import java.io.IOException;  
import java.io.OutputStreamWriter;  
import java.io.RandomAccessFile;

public class Toext3 {
    public static void main(String[] args) {
        //String hdfs_path = "input/file1";//文件路径
        String ext3_path = args[0];
        FileWriter writer = null;
        //String inpath = "./append.txt";
        try {
            String s="";
            for(int i=0;i<10;i++)
            {
                s="";
                for(int j=0;j<1024;j++)
                {
                    s+='b';
                }
                writer = new FileWriter(ext3_path, true); 
                writer.write(s);
                System.out.println(ext3_path);
            }

        } catch (IOException e) {
          e.printStackTrace();
        }finally {     
            try {     
                if(writer != null){  
                    writer.close();     
                }  
            } catch (IOException e) {     
                e.printStackTrace();     
            }     
        }
        
  }
}  
View Code

编译:

javac -classpath ./share/hadoop/common/hadoop-common-2.7.6.jar:./share/hadoop/mapreduce/hadoop-mapreduce-client-core-2.7.6.jar -d ./data/classes data/Toext3.java
View Code

运行

java -cp ./data/classes/ Toext3 ./data/ext3/file0 
View Code

在运行中,-cp指明class文件的路径,Toext3指出要运行的类

 

posted @ 2018-05-24 22:24  JarvisLau  阅读(129)  评论(0编辑  收藏  举报