美文网首页
HDFS详解二:客户端Java Api

HDFS详解二:客户端Java Api

作者: 如果仲有听日 | 来源:发表于2018-06-25 14:23 被阅读0次

    在《Hadoop三:Hadoop Java API初探(完结)》中已经介绍了为什么需要从winddows的eclipse转到linux下eclipse+maven来写java api,以及在linux搭建开发环境的demo程序。(详见:https://www.jianshu.com/p/dd13c1dba52d)

    这一节我会介绍更详细的HDFS客户端API程序

    写代码的时候需要从官网查看api文档,地址:https://hadoop.apache.org/docs/r2.9.1/api/


    1. 初始化部分代码

    conf.set("fs.defaultFS", "hdfs://10.10.77.194:9000"); //这一行的作用很重要,因为new出来的conf文件仅仅存储的是hdfs配置文件默认值,必须要在客户端手动设置

    conf.set("dfs.replication", "2"); //另外需要根据实际情况设置每一个block的副本数量,我做实验设置的是2(配置文件中也是设置的2,跟配置文件保持同步就好)

    package com.gamebear.test1;

    import java.net.URI;

    import java.util.Iterator;

    import java.util.Map.Entry;

    import org.apache.hadoop.conf.Configuration;

    import org.apache.hadoop.fs.FileStatus;

    import org.apache.hadoop.fs.FileSystem;

    import org.apache.hadoop.fs.LocatedFileStatus;

    import org.apache.hadoop.fs.Path;

    import org.apache.hadoop.fs.RemoteIterator;

    public class HadoopHdfs {

       static FileSystem fsObj = null;

      static Configuration conf = null;

      private static void init() throws Exception {

        conf = new Configuration(); /* set the "http://hadoop.apache.org/docs/stable/api/index.html" * set core-site.xml, "10.10.77.194" is the nameNode, "9000" is the nameNode client port */

        conf.set("fs.defaultFS", "hdfs://10.10.77.194:9000");

        conf.set("dfs.replication", "2");

        //conf.set("fs.hdfs.impl", "org.apache.hadoop.hdfs.DistributedFileSystem");

        System.out.println(conf.get("fs.hdfs.impl")); fsObj = FileSystem.get(conf); //get a hadoop FileSystem client object, refer to the web:"http://hadoop.apache.org/docs/stable/api/index.html"

        //fsObj = FileSystem.get(new URI("hdfs://10.10.77.194:9000"), conf, "root"); //get the configuration file from hadoop server

      }

    2. 查看默认配置参数

    需要注意的是,有些参数是Hadoop服务器端参数,有些是客户端参数

    private static void testConf() throws Exception{

        //import java.util.Map.Entry;

        Iterator<Entry<String, String>> it = conf.iterator();

        while(it.hasNext()){

            Entry<String, String> ent = it.next();

            System.out.println(ent.getKey() + ":" + ent.getValue());

        }

    }

    关注如下默认参数:

    dfs.blocksize:134217728 //默认值128M,最小值是1M

    dfs.replication:3 //默认值3, 我这里测试只有2台datanode,设置为了2

    dfs.datanode.data.dir:file://${hadoop.tmp.dir}/dfs/data

    3. 上传下载文件

    在linux环境下的eclipse没有跨平台的问题

    private static void testUpload() throws Exception

    {

        fsObj.copyFromLocalFile(new Path("/kluter/hdpTestFile"), new Path("/json201710.tgz.copy"));

        System.out.println("upload finished!");

        fsObj.close();

    }

    private static void testDownload() throws Exception

    {

        fsObj.copyToLocalFile(new Path("/json201710.tgz.copy"), new Path("/kluter/hdpTestFileDl"));

        System.out.println("download finished!");

        fsObj.close();

    }

    4. 嵌套创建目录结构

    private static void testMkDir() throws Exception

    {

        boolean mkdirs = fsObj.mkdirs(new Path("/aaa/bbb/ccc"));

        System.out.println("mkdirs finished!");

    }

    5. 递归删除目录

    private static void testDel() throws Exception

    {

        boolean delBl = fsObj.delete(new Path("/aaa/bbb"), true);

        System.out.println(delBl);

    }

    6. 递归查看文件夹下的文件信息

    private static void testLs() throws Exception{

        RemoteIterator fileLst = fsObj.listFiles(new Path("/"), true);

        while(fileLst.hasNext()){

            LocatedFileStatus fileStatus = fileLst.next();

            System.out.println("blocksize: " + fileStatus.getBlockSize());

            System.out.println("owner: " + fileStatus.getOwner());

            System.out.println("Replication: " + fileStatus.getReplication());

            System.out.println("permission: " + fileStatus.getPermission());

            System.out.println("permission2: " + fileStatus.getPath().getName());

            BlockLocation[] blockLocations = fileStatus.getBlockLocations();

            for(BlockLocation bl:blockLocations){

                System.out.println("block-len: " + bl.getLength() + "---" + "block-offset:" + bl.getOffset());

                String[] hosts = bl.getHosts();

                for(String host:hosts){

                    System.out.println(host);

                }

            }

        }

    }

    这里使用Iterator而不使用List的原因是因为,如果hdfs中有上亿个文件,那么客户端使用List的话内存容易爆掉,而且网络传输也很久。

    而迭代器只是一种取数据的方式,调用next的时候才分配一个文件的少量内存,不占用大内存空间

    代码打印出了分block文件的信息,查看block所在的host,便于mapreduce的时候分布式计算

    如果不想递归查看,参数写false

    7. 查看单目录下文件信息

    private static void testLs2() throws Exception {

        FileStatus[] listStatus = fsObj.listStatus(new Path("/"));

        for(FileStatus file:listStatus) {

            System.out.println("permission2: " + file.getPath().getName());

            System.out.println("permission2: " + file.getPath());

            System.out.println(file.isDir()?"directory":"file");

            System.out.println("----------------------------------------------");

        }

    }

    这种方式适用于单个目录下文件较少的情况

    8. 使用stream方式上传、下载文件

    用流的方式来操作hdfs上的文件,可以实现读取指定偏移量范围的数据

    8.1 使用stream方式上传文件

    将/kluter/hdpTestFile以steam的形式上传到hdfs的aaa目录下:

    private static void testUpload() throws Exception{

        FSDataOutputStream dfsOutStream = fsObj.create(new Path("/streamTestFile"), true);

        FileInputStream inputStream = new FileInputStream("/kluter/streamTestFile");

        IOUtils.copy(inputStream, dfsOutStream);

    }

    8.2 使用stream方式下载文件

    private static void testDownload() throws Exception{

        FSDataInputStream inputStream = fsObj.open(new Path("/streamTestFile"));

        FileOutputStream dfsOutStream = new     FileOutputStream("/kluter/downloadStreamFile");

        IOUtils.copy(inputStream, dfsOutStream);

    }

    9. 使用stream方式,按需求读取

    private static void testRamdomAccess() throws Exception{

        FSDataInputStream inputStream = fsObj.open(new Path("/streamTestFile"));

        FileOutputStream OutStream = new     FileOutputStream("/kluter/downloadStreamFile");

        /**

        * just seek 100, pass the first 100 bytes

        */

        // inputStream.seek(100);

        // IOUtils.copy(inputStream, OutStream);

        /**

        * copy by client control, u can use while here

        */

        IOUtils.copyLarge(inputStream, OutStream, 100, 300);

    }

    private static void testPrintScr() throws Exception{

        FSDataInputStream inputStream = fsObj.open(new Path("/streamTestFile"));

        IOUtils.copy(inputStream, System.out);

    }

    相关文章

      网友评论

          本文标题:HDFS详解二:客户端Java Api

          本文链接:https://www.haomeiwen.com/subject/jkwkyftx.html