美文网首页
Hadoop全分布式安装

Hadoop全分布式安装

作者: 小左伯爵 | 来源:发表于2020-11-23 20:39 被阅读0次
NameNode Secondary NameNode DataNode
node01 *
node02 * *
node03 *
node04 *

1.修改/etc/hosts

[root@localhost ~]# vim /etc/hosts
#添加
192.168.52.81 node01
192.168.52.82 node02
192.168.52.83 node03
192.168.52.84 node04
#分发到其他节点
[root@localhost ~]# scp /etc/hosts root@192.168.52.82:/etc/hosts
[root@localhost ~]# scp /etc/hosts root@192.168.52.83:/etc/hosts
[root@localhost ~]# scp /etc/hosts root@192.168.52.84:/etc/hosts
#在node01上ping其他节点,确保都能够ping通
[root@localhost ~]# ping node02
[root@localhost ~]# ping node03
[root@localhost ~]# ping node04

2.免密登录

node01是主节点需要管理其他节点,node01要把自己的秘钥文件,分发给其他节点

#若home目录下ll -a无.ssh目录,首先ssh localhost(四台服务器都执行)
#在node01上,生成秘钥
[root@localhost .ssh]# ssh-keygen -t rsa -P '' -f ~/.ssh/id_rsa
Generating public/private rsa key pair.
Your identification has been saved in /root/.ssh/id_rsa.
Your public key has been saved in /root/.ssh/id_rsa.pub.
The key fingerprint is:
7b:93:e5:67:bb:15:5e:c5:d2:0b:07:f6:be:e6:7d:46 root@localhost.localdomain
The key’s randomart image is:
+--[ RSA 2048]----+
|             o   |
|            . oo |
|             ..o+|
|              +.o|
|        S   .  +.|
|         . +  . E|
|        . + . o=.|
|         . . ooo+|
|              o+o|
+-----------------+
#在node01上
#[root@node01 .ssh]# cat id_rsa.pub >> authorized_keys 
#分发公钥到其他节点
[root@localhost .ssh]# scp id_rsa.pub root@node02:`pwd`/node01.pub
#`pwd`代表当前目录
#将id_rsa.pub重命名为node01.pub,用于标识这是node01的公钥
The authenticity of host 'node02 (192.168.52.82)' can't be established.
RSA key fingerprint is de:e6:65:b3:e0:b3:4e:a7:ab:d1:f9:40:54:63:15:24.
Are you sure you want to continue connecting (yes/no)? yes
Warning: Permanently added 'node02' (RSA) to the list of known hosts.
root@node02's password: 
id_rsa.pub                                                                                     100%  408     0.4KB/s   00:00  
#在node02上,将node01.pub的内容追加到authorized_keys
[root@localhost .ssh]# ls -rlt
total 8
-rw-r--r--. 1 root root 391 Nov 23 06:49 known_hosts
-rw-r--r--. 1 root root 408 Nov 23 06:54 node01.pub
[root@localhost .ssh]# cat node01.pub >> authorized_keys
[root@localhost .ssh]# ls
authorized_keys  known_hosts  node01.pub
#在node01上 ssh node02 可实现免秘钥
[root@localhost .ssh]# ssh node02
Last login: Mon Nov 23 07:00:31 2020 from node01
#node03,node04做同样的操作

3.将原来搭建的伪分布式发送到node01上

[root@localhost opt]# scp -r hadoop/ root@192.168.52.81:/opt/

4.修改node01 上的 core-site.xml

[root@localhost hadoop]# vim /opt/hadoop/hadoop-3.2.1/etc/hadoop/core-site.xml 
#修改为node01
#/opt/hadoop/full
<configuration>
<property>
      <name>fs.defaultFS</name>
      <value>hdfs://node01:9000</value>
</property>
<property>
      <name>hadoop.tmp.dir</name>
      <value>/opt/hadoop/full</value>
</property>
</configuration>

5.修改node01上的 hdfs-site.xml

[root@localhost hadoop]# vim /opt/hadoop/hadoop-3.2.1/etc/hadoop/hdfs-site.xml
#分片数改为2
#secondary 改为node02
<configuration>
<property>
        <name>dfs.replication</name>
        <value>2</value>
</property>
<property>
        <name>dfs.namenode.secondary.http-address</name>
        <value>node02:9868</value>
</property>
</configuration>

6.修改workes

[root@localhost hadoop]# vim /opt/hadoop/hadoop-3.2.1/etc/hadoop/workers
#修改为
node02
node03
node04

7.将node01上的hadoop分发到其他节点

#在opt目录下
[root@localhost opt]# scp -r hadoop/ root@node02:`pwd`/
[root@localhost opt]# scp -r hadoop/ root@node03:`pwd`/
[root@localhost opt]# scp -r hadoop/ root@node04:`pwd`/

8.配置hadoop环境变量

#在node01上修改/etc/profile 然后source /etc/profile 然后分发到其他节点并source  /etc/profile
[root@localhost opt]# vim /etc/profile
export JAVA_HOME=/opt/jdk/jdk1.8.0_144
export HADOOP_HOME=/opt/hadoop/hadoop-3.2.1
export PATH=$JAVA_HOME/bin:$HADOOP_HOME/bin:$HADOOP_HOME/sbin:$PATH
[root@localhost opt]# source /etc/profile

[root@localhost etc]# scp profile root@node02:`pwd`/
profile                                                                                        100% 1947     1.9KB/s   00:00    
[root@localhost etc]# scp profile root@node03:`pwd`/
profile                                                                                        100% 1947     1.9KB/s   00:00    
[root@localhost etc]# scp profile root@node04:`pwd`/
profile                                                                                        100% 1947     1.9KB/s   00:00   
#分别 source /etc/profile

9.格式化

#在node01上
[root@localhost etc]# hdfs namenode -format

10.启动

[root@node01 .ssh]# start-dfs.sh
WARNING: HADOOP_SECURE_DN_USER has been replaced by HDFS_DATANODE_SECURE_USER. Using value of HADOOP_SECURE_DN_USER.
Starting namenodes on [node01]
Starting datanodes
Starting secondary namenodes [node02]
[root@node01 .ssh]# jps
3792 Jps
3487 NameNode
[root@node01 ~]# ss -nal
State       Recv-Q Send-Q                                Local Address:Port                                  Peer Address:Port 
LISTEN      0      128                                               *:35116                                            *:*     
LISTEN      0      128                                               *:9870                                             *:*     
LISTEN      0      128                                              :::111                                             :::*     
LISTEN      0      128                                               *:111                                              *:*     
LISTEN      0      128                                              :::22                                              :::*     
LISTEN      0      128                                               *:22                                               *:*     
LISTEN      0      128                                       127.0.0.1:631                                              *:*     
LISTEN      0      128                                             ::1:631                                             :::*     
LISTEN      0      100                                             ::1:25                                              :::*     
LISTEN      0      100                                       127.0.0.1:25                                               *:*     
LISTEN      0      128                                              :::59999                                           :::*     
LISTEN      0      128                                   192.168.52.81:9000                                             *:*  
#访问192.168.52.81:9870

相关文章

网友评论

      本文标题:Hadoop全分布式安装

      本文链接:https://www.haomeiwen.com/subject/upepiktx.html