Hadoop(7)-HDFS客户端的API操作

1 客户端环境准备

根据自己电脑的操作系统拷贝对应的编译后的hadoop jar包到非中文路径

配置HADOOP_HOME的环境变量,并且在path中配置hadoop的bin

重启电脑

 

2. HdfsClientDemo

创建一个Maven项目,在pom.xml中导入相应的依赖,导入失败的话,试一试Reimport

<dependencies>
        <dependency>
            <groupId>junit</groupId>
            <artifactId>junit</artifactId>
            <version>RELEASE</version>
        </dependency>
        <dependency>
            <groupId>org.apache.logging.log4j</groupId>
            <artifactId>log4j-core</artifactId>
            <version>2.8.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-common</artifactId>
            <version>2.7.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-client</artifactId>
            <version>2.7.2</version>
        </dependency>
        <dependency>
            <groupId>org.apache.hadoop</groupId>
            <artifactId>hadoop-hdfs</artifactId>
            <version>2.7.2</version>
        </dependency>
        <dependency>
            <groupId>jdk.tools</groupId>
            <artifactId>jdk.tools</artifactId>
            <version>1.8</version>
            <scope>system</scope>
            <systemPath>${JAVA_HOME}/lib/tools.jar</systemPath>
        </dependency>
</dependencies>

在src/main/resources目录下创建log4j.properties文件

log4j.rootLogger=INFO, stdout
log4j.appender.stdout=org.apache.log4j.ConsoleAppender
log4j.appender.stdout.layout=org.apache.log4j.PatternLayout
log4j.appender.stdout.layout.ConversionPattern=%d %p [%c] - %m%n
log4j.appender.logfile=org.apache.log4j.FileAppender
log4j.appender.logfile.File=target/spring.log
log4j.appender.logfile.layout=org.apache.log4j.PatternLayout
log4j.appender.logfile.layout.ConversionPattern=%d %p [%c] - %m%n

创建包和HdfsClientDemo类

import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileStatus;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.junit.After;
import org.junit.Before;
import org.junit.Test;

import java.net.URI;


public class HdfsClient {

    private static final String HADOOP_URI = "hdfs://hadoop100:9000";

    private Configuration configuration;
    private FileSystem fileSystem;


    @Before
    public void before() throws Exception {
        //configuration 对象相对应的就是 hadoop的那些配置文件,比如修改当前客户端上传文件的备份数量为1
        //configuration.set("replication","1");
        configuration = new Configuration();
        fileSystem = FileSystem.get(new URI(HADOOP_URI),configuration,"nty");
    }

    @After
    public void after() throws Exception {
        fileSystem.close();
    }

    /**
     * 创建目录
     */
    @Test
    public void mkdir() throws Exception {
        fileSystem.mkdirs(new Path("/client_test"));
    }

    /**
     * 上传文件
     */
    @Test
    public void upload() throws Exception {
        fileSystem.copyFromLocalFile(new Path("d:\\Hadoop_test\\test1.txt"), new Path("/client_test"));
    }

    /**
     * 下载文件
     */
    @Test
    public void download() throws Exception {
        fileSystem.copyToLocalFile(new Path("/client_test/test1.txt"), new Path("d:\\Hadoop_test\\test1_1.txt"));
    }

    /**
     * 删除文件
     *
     */
    @Test
    public void delete() throws Exception {
        fileSystem.delete(new Path("/output"),true);
    }

    /**
     * 重命名
     */
    @Test
    public void rename() throws Exception {
        fileSystem.rename(new Path("/input"), new Path("/input_rename"));
    }

    /**
     * 文件遍历
     */
    @Test
    public void liststatus() throws Exception {
        FileStatus[] fileStatuses = fileSystem.listStatus(new Path("/"));
        for(FileStatus fs : fileStatuses){
            System.out.println(fs.isDirectory() ? (fs.getPath().getName() + " is directory") : (fs.getPath().getName() + " is file"));
        }

    }
    

}

 

posted on 2018-12-07 19:22  nt杨  阅读(286)  评论(0编辑  收藏  举报

导航