一.机器规划
二.部署完全分布式hadoop与zookeeper
三.修改配置文件并分发配置
vim hdfs-site.xml
<property>
<name>dfs.nameservices</name>
<value>ns</value>
</property>
<property>
<name>dfs.ha.namenodes.ns</name>
<value>nn1,nn2</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns.nn1</name>
<value>bigguider22.com:8020</value>
</property>
<property>
<name>dfs.namenode.rpc-address.ns.nn2</name>
<value>bigguider23.com:8020</value>
</property>
<property>
<name>dfs.namenode.http-address.ns.nn1</name>
<value>bigguider22.com:50070</value>
</property>
<property>
<name>dfs.namenode.http-address.ns.nn2</name>
<value>bigguider23.com:50070</value>
</property>
<property>
<name>dfs.namenode.shared.edits.dir</name>
<value>qjournal://bigguider22.com:8485;bigguider23.com:8485;bigguider24.com:8485/ns</value>
</property>
<property>
<name>dfs.journalnode.edits.dir</name>
<value>/root/modules/hadoop-2.5.0-cdh5.3.6/data/dfs/jn</value>
</property>
<property>
<name>dfs.client.failover.proxy.provider.ns</name>
<value>org.apache.hadoop.hdfs.server.namenode.ha.ConfiguredFailoverProxyProvider</value>
</property>
<property>
<name>dfs.ha.fencing.methods</name>
<value>sshfence</value>
</property>
<property>
<name>dfs.ha.fencing.ssh.private-key-files</name>
<value>/root/.ssh/id_rsa</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>/root/modules/hadoop-2.5.0-cdh5.3.6/data/hdfs/name</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>/root/modules/hadoop-2.5.0-cdh5.3.6/data/hdfs/data</value>
</property>
<property>
<name>dfs.ha.automatic-failover.enabled</name>
<value>true</value>
</property>
vim core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://ns</value>
</property>
<property>
<name>ha.zookeeper.quorum</name>
<value>bigguider22.com:2181,bigguider23.com:2181,bigguider24.com:2181</value>
</property>
四.传送配置文件
scp -r core-site.xml bigguider23.com:/root/modules/hadoop-2.5.0-cdh5.3.6/etc/hadoop/
scp -r hdfs-site.xml bigguider23.com:/root/modules/hadoop-2.5.0-cdh5.3.6/etc/hadoop/
scp -r core-site.xml bigguider24.com:/root/modules/hadoop-2.5.0-cdh5.3.6/etc/hadoop/
scp -r hdfs-site.xml bigguider24.com:/root/modules/hadoop-2.5.0-cdh5.3.6/etc/hadoop/
四.启动流程
- 启动zookeeper,再启动journalnode
#三个都启动
bin/zkServer.sh start #启动服务器
bin/zkServer.sh status #状态查看
hadoop-daemon.sh start journalnode
``
2. 格式化 namenode(只能格式化一次)
```sh
#在bigguider22.com上执行
hdfs namenode -format
hadoop-daemon.sh start namenode
- 同步元数据(bin/hdfs -bootstraptand)
#在bigguider23.com上执行
hdfs namenode -bootstrapStandby
- 初始化ZKFC状态
#在其中一个namenode节点中初始化ZKFC
hdfs zkfc -formatZK
- 启动zkfc
sbin/hadoop-daemon.sh start zkfc
- 启动HDFS各个服务
# bigguider22.com
# hdfs namenode -format
hadoop-daemon.sh start namenode
hadoop-daemon.sh start datanode
yarn-daemon.sh start nodemanager
# bigguider23.com
hadoop-daemon.sh start namenode
hadoop-daemon.sh start datanode
yarn-daemon.sh start resourcemanager
yarn-daemon.sh start nodemanager
# bigguider24.com
hadoop-daemon.sh start datanode
yarn-daemon.sh start nodemanager
hadoop-daemon.sh start secondarynamenode
mr-jobhistory-daemon.sh start historyserver
网友评论