2022.10.2周总结
建造者模式、抽象工厂模式、
hdfs文件由java api操作
package cn.itcast.hdfs; import com.fasterxml.jackson.databind.ObjectMapper; import org.apache.hadoop.conf.Configuration; import org.apache.hadoop.fs.FSDataInputStream; import org.apache.hadoop.fs.FSDataOutputStream; import org.apache.hadoop.fs.FileSystem; import org.apache.hadoop.fs.Path; import org.apache.hadoop.io.IOUtils; import org.junit.After; import org.junit.Before; import org.junit.Test; import java.io.IOException; public class HDFSClientTest { private static Configuration conf = null; private static FileSystem fs = null; private static String ss = null; /** * * 初始化方法 用于和hdfs集群建立连接 * @throws IOException */ @Before public void connect2HDFS() throws IOException { //设置客户端身份 以具备权限在hdfs上进行操作 System.setProperty("HADOOP_USER_NAME","atguigu"); // 创建配置对象实例 conf = new Configuration(); // 设置操作的文件系统是HDFS 并且制定HDFS 操作地址 conf.set("fs.defaultFS","hdfs://hadoop102:8020"); conf.setBoolean("dfs.support.append", true); conf.set("dfs.client.block.write.replace-datanode-on-failure.policy", "NEVER"); conf.set("fe.hdfs.impl","org.apache.hadoop.hdfs.DistributedFileSystem"); conf.setBoolean("dfs.client.block.write.replace-datanode-on-failure.enable", true); //创建FileSystem对象实例 fs = FileSystem.get(conf); } /** * 创建文件操作 */ @Test public void mkdir() throws IOException { //首先判断文件夹是否存在,如果不存在再创建 if(!fs.exists(new Path("/z"))){ //创建文件夹 fs.mkdirs(new Path("/z")); } } /** * 上传文件 */ @Test public void putFile2HDFS() throws IOException { //创建本地文件路径 hdfs上传路径 Path src = new Path("D:\\1.txt"); Path dst = new Path("/z/hdfstest1.txt"); //文件上传动作(local-->hdfs) fs.copyFromLocalFile(src,dst); } /** * 查看HDFS内容 */ @Test public void text() throws IOException { FSDataInputStream in = fs.open(new Path("/itheima/hdfstest1.txt")); IOUtils.copyBytes(in,System.out,4096,true); } public void write(Path path,String string) throws IOException { FSDataOutputStream in = fs.create(path); in.write(string.getBytes()); in.flush(); fs.close(); } public String cat(Path path) throws IOException { FSDataInputStream out = fs.open(path); String s = out.readLine(); System.out.println(s); return s; } /** * 从一个文件写到另一个文件 */ @Test public void turn() throws IOException { String cat = cat(new Path("/z/hdfstest1.txt")); write(new Path("/z/hdfstest2.txt"), cat); } /** * 追加 */ public static String a = "你好"; @Test public void write2hdfs() throws IOException { Object listContent = "张张张张张"; String filePath = "/z/hdfstest1.txt"; ObjectMapper objectMapper = new ObjectMapper(); FileSystem fs = null; Path path = new Path(filePath); FSDataOutputStream output = null; fs = path.getFileSystem(conf); //如果此文件不存在则创建新文件 if (!fs.exists(path)) { fs.createNewFile(path); } output = fs.append(new Path(filePath)); //System.out.println(listContent.toString()); output.write(objectMapper.writeValueAsString(listContent).getBytes("UTF-8")); output.write("\n".getBytes("UTF-8"));//换行 fs.close(); output.close(); } /** * 下载文件 */ /*@Test public void getFile2Local() throws IOException { //源路径: hdfs的路径 Path src = new Path("D:\\1.txt"); //目标路径:local本地路径 Path dst = new Path("/z/hdfstest1.txt"); //文件下载动作(hdfs--->local) fs.copyToLocalFile(src,dst); }*/ /** * 关闭客户端和hdfs连接 * @throws IOException */ @After public void close() throws IOException { //首先判断文件系统实例是否为空,如果不为null if (fs != null) { fs.close(); } } }