美文网首页
马士兵 day3_使用Java开发hdfs

马士兵 day3_使用Java开发hdfs

作者: PC_Repair | 来源:发表于2018-10-16 10:51 被阅读94次
    day3_使用Java开发hdfs

    hadoop.tmp.dir默认值为/tmp/hadoop-${user.name},由于/tmp目录在系统重启时会被删除,所以应该修改目录位置。

    • vim /usr/local/hadoop/etc/hadoop/core-site.xml,namenode和datanode都需要修改
    # 添加如下内容
    <property>
        <name>hadoop.tmp.dir</name>
        <value>/var/hadoop</value>
    </property>
    

    修改完namenode和datanode上的hadoop.tmp.dir参数后,需要格式化namenode,在master上执行:hdfs namenode -format(注:会出现Y确认)

    • 注:马士兵只修改了master上的配置文件,不修改datanode会导致datanode无法正常启动

    测试期间关闭权限检查,namenode上操作:vim /usr/local/hadoop/etc/hadoop/hdfs-site.xml

    # 添加如下内容
    <property>
        <name>dfs.permissions.enabled</name>
        <value>false</value>
    </property>
    

    重启namenode:

    hadoop-daemon.sh stop namenode
    hadoop-daemon.sh start namenode
    

    使用FileSystem类来读写hdfs

    • maven项目,pom.xml配置文件
    <dependencies>
            <dependency>
                <groupId>org.apache.hadoop</groupId>
                <artifactId>hadoop-common</artifactId>
                <version>2.7.3</version>
            </dependency>
    
            <dependency>
                <groupId>org.apache.hadoop</groupId>
                <artifactId>hadoop-hdfs</artifactId>
                <version>2.7.3</version>
            </dependency>
    
            <dependency>
                <groupId>org.apache.hadoop</groupId>
                <artifactId>hadoop-mapreduce-client-core</artifactId>
                <version>2.7.3</version>
            </dependency>
    
            <dependency>
                <groupId>org.apache.hadoop</groupId>
                <artifactId>hadoop-client</artifactId>
                <version>2.7.3</version>
            </dependency>
    
            <dependency>
                <groupId>log4j</groupId>
                <artifactId>log4j</artifactId>
                <version>1.2.17</version>
            </dependency>
    </dependencies>
    
    
    • HelloHDFS类
    package com.hadoop.hdfs;
    
    
    import java.io.FileInputStream;
    import org.slf4j.Logger;
    import org.slf4j.LoggerFactory;
    import org.apache.hadoop.conf.Configuration;
    import org.apache.hadoop.fs.FSDataOutputStream;
    import org.apache.hadoop.fs.FileStatus;
    import org.apache.hadoop.fs.FileSystem;
    import org.apache.hadoop.fs.Path;
    import org.apache.hadoop.io.IOUtils;
    
    /**
     * Created by lujiafeng on 2018/10/6.
     */
    public class HelloHDFS {
        public static final Logger logger = LoggerFactory.getLogger(HelloHDFS.class);
    
        public static void main(String[] args) throws Exception {
    
            Configuration conf = new Configuration();
            conf.set("fs.defaultFS", "hdfs://192.168.56.100:9000");
            conf.set("dfs.replication", "2"); //上传文件复制份数,默认为3
            FileSystem fileSystem = FileSystem.get(conf);
    
            boolean success;
    
    //        success = fileSystem.mkdirs(new Path("/hadoopljf"));
    //        logger.info("创建文件是否成功:" + success);
    //
    //        success = fileSystem.exists(new Path("/hadoopljf"));
    //        logger.info("文件是否存在:" + success);
    
    //        success = fileSystem.delete(new Path("/hadoopljf"), true);
    //        logger.info("删除文件是否成功:" + success);
    
    //        success = fileSystem.exists(new Path("/hadoopljf"));
    //        logger.info("文件是否存在:" + success);
    
            //把本地文件传到hdfs中
    //        FSDataOutputStream out = fileSystem.create(new Path("/test.data"), true);
    //        FileInputStream in = new FileInputStream("/Users/lujiafeng/GitRepositories/test.txt");
    //        IOUtils.copyBytes(in, out, 4096, true);
    
    //        FSDataOutputStream out = fileSystem.create(new Path("/test2.data"));
    //        FileInputStream in = new FileInputStream("/Users/lujiafeng/GitRepositories/test.txt");
    //        byte[] buf = new byte[4096];
    //        int len = in.read(buf);
    //        while (len != -1) {
    //            out.write(buf, 0, len);
    //            len = in.read();
    //        }
    //        in.close();
    //        out.close();
    
            FileStatus[] statuses = fileSystem.listStatus(new Path("/"));
            logger.info(statuses.length + "");
            for (FileStatus status : statuses) {
                logger.info(status.getPath() + "");
                logger.info(status.getPermission() + "");
                logger.info(status.getReplication() + "");
            }
    
        }
    }
    

    相关文章

      网友评论

          本文标题:马士兵 day3_使用Java开发hdfs

          本文链接:https://www.haomeiwen.com/subject/rkgczftx.html