一、简介
二、生产集群搭建
准备
- Linux服务器三台(以三台为列)
- Linux服务器安装配置JDK(自己完成)
- Zookeeper集群(HA模式需要),Zookeeper搭建
配置三台服务器hosts(root用户)
vim /etc/hosts
#配置内容 根据实际IP替换即可
192.168.32.4 hadoop0001 hadoop0001
192.168.32.6 hadoop0002 hadoop0002
192.168.32.5 hadoop0003 hadoop0003
创建用户组和用户
groupadd hadoop ---创建用户组
useradd -m -d /home/hadoop -g hadoop hadoop ---创建用户
passwd hadoop ---修改密码
配置 hadoop 用户具有 root 权限
vim /etc/sudoers
## Allow root to run any commands anywhere root ALL=(ALL) ALL
root ALL=(ALL) ALL
## Allows people in group wheel to run all commands %wheel ALL=(ALL) ALL
%wheel ALL=(ALL) ALL
hadoop ALL=(ALL) NOPASSWD:ALL #添加hadoop用户root权限,加到最后一行
编写集群分发脚本 xsync
#在/home/hadoop/bin 目录下创建 xsync 文件
cd /home/hadoop
mkdir bin
cd bin
vim xsync
#文件内容如下
#!/bin/bash
#1. 判断参数个数
if [ $# -lt 1 ]
then
echo Not Enough Arguement!
exit;
fi
#2. 遍历集群所有机器
for host in hadoop0001 hadoop0002 hadoop0003
do
echo ==================== $host ====================
#3. 遍历所有目录,挨个发送
for file in $@
do
#4. 判断文件是否存在
if [ -e $file ]
then
#5. 获取父目录
pdir=$(cd -P $(dirname $file); pwd)
#6. 获取当前文件的名称
fname=$(basename $file)
ssh $host "mkdir -p $pdir"
rsync -av $pdir/$fname $host:$pdir
else
echo $file does not exists!
fi
done
done
#配置脚本执行权限
chmod +x xsync
服务器免密配置
用hadoop用户登录三台服务器,分别执行命令
ssh-keygen -t rsa
#然后敲(三个回车),就会生成两个文件 id_rsa(私钥)、id_rsa.pub(公钥)
cd /home/hadoop/.ssh & ls
id_rsa id_rsa.pub
#然后三台服务器分别命令执行,把公钥复制到hadoop0001的authorized_keys文件中
ssh-copy-id -i id_rsa.pub hadoop@hadoop0001
#然后分发authorized_keys文件到集群 /home/hadoop/.ssh/
xsync authorized_keys
Hadoop 完全分布式配置(注意切换用户hadoop)
#上传Hadoop压缩包到 hadoop0001 /home/hadoop/soft/
cd /home/hadoop/soft
#解压
tar -xzvf hadoop-3.2.2.tar.gz
修改配置
解压成功后,进入/home/hadoop/soft/hadoop-3.2.2/etc/hadoop目录,所有的配置文件都在这里,需要进行配置。
- 修改core-site.xml
<configuration>
<!-- 指定 HDFS(nn) 的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop0001:8020</value>
</property>
<!-- 指定 hadoop 数据的存储目录 可以指定多个目录 用逗号分割 注意hadoop用户是否有读写权限-->
<property>
<name>hadoop.tmp.dir</name>
<value>/home/hadoop/soft/hadoop-3.2.2/data</value>
</property>
<!-- 配置 HDFS 网页登录使用的静态用户为 hadoop -->
<property>
<name>hadoop.http.staticuser.user</name>
<value>hadoop</value>
</property>
</configuration>
- 修改hdfs-site.xml
<configuration>
<!-- nn web 端访问地址-->
<property>
<name>dfs.namenode.http-address</name>
<value>hadoop0001:9870</value>
</property>
<!-- 2nn web 端访问地址-->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop0003:9870</value>
</property>
</configuration>
- 修改yarn-site.xml
<configuration>
<!-- 指定 MR 走 shuffle -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- 指定 ResourceManager 的地址-->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop0002</value>
</property>
<!-- 指定 ResourceManager 的端口-->
<property>
<name>yarn.resourcemanager.webapp.address</name>
<value>${yarn.resourcemanager.hostname}:8088</value>
</property>
<!-- 环境变量的继承 -->
<property>
<name>yarn.nodemanager.env-whitelist</name>
<value>JAVA_HOME,HADOOP_COMMON_HOME,HADOOP_HDFS_HOME,HADOOP_CONF_DIR,CLASSPATH_PREPEND_DISTCACHE,HADOOP_YARN_HOME,HADOOP_MAPRED_HOME</value>
</property>
<!-- 开启日志聚集功能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!-- 设置日志聚集服务器地址 -->
<property>
<name>yarn.log.server.url</name>
<value>http://hadoop0001:19888/jobhistory/logs</value>
</property>
<!-- 设置日志保留时间为 7 天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
<!-- 选择调度器,默认容量 -->
<property>
<name>yarn.resourcemanager.scheduler.class</name>
<value>org.apache.hadoop.yarn.server.resourcemanager.scheduler.capacity.CapacityScheduler</value>
</property>
<!-- ResourceManager处理调度器请求的线程数量,默认50-->
<property>
<name>yarn.resourcemanager.scheduler.client.thread-count</name>
<value>6</value>
</property>
<!-- NodeManager 使用内存数,默认 8G -->
<property>
<name>yarn.nodemanager.resource.memory-mb</name>
<value>4096</value>
</property>
<!-- nodemanager 的 CPU 核数,不按照硬件环境自动设定时默认是 8 个,修改为 4 个 -->
<property>
<name>yarn.nodemanager.resource.cpu-vcores</name>
<value>4</value>
</property>
<!-- 容器最小内存,默认 1G -->
<property>
<name>yarn.scheduler.minimum-allocation-mb</name>
<value>1024</value>
</property>
<!-- 容器最大内存,默认 8G,修改为 2G -->
<property>
<name>yarn.scheduler.maximum-allocation-mb</name>
<value>2048</value>
</property>
<!-- 容器最小 CPU 核数,默认 1 个 -->
<property>
<name>yarn.scheduler.minimum-allocation-vcores</name>
<value>1</value>
</property>
<!-- 容器最大 CPU 核数,默认 4 个,修改为 2 个 -->
<property>
<name>yarn.scheduler.maximum-allocation-vcores</name>
<value>2</value>
</property>
<!-- 虚拟内存检查,默认打开,修改为关闭 -->
<property>
<name>yarn.nodemanager.vmem-check-enabled</name>
<value>false</value>
</property>
</configuration>
- 修改mapred-site.xml
<configuration>
<!-- 指定 MapReduce 程序运行在 Yarn 上 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- 历史服务器端地址 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop0001:10020</value> </property>
<!-- 历史服务器 web 端地址 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop0001:19888</value>
</property>
</configuration>
- 修改workers
hadoop0001
hadoop0002
hadoop0003
- 同步所有节点配置文件
xsync /home/hadoop/soft/hadoop-3.2.2/etc
- 启动集群
#格式化 第一次启动需要格式化
hdfs namenode -format
#启动 HDFS
sbin/start-dfs.sh #查看 :http://hadoop0001:9870
#在配置了 ResourceManager 的节点(hadoop0002)启动 YARN
sbin/start-yarn.sh #查看 http://hadoop0002:8088
#在 hadoop0001 启动历史服务器
mapred --daemon start historyserver #查看 http://hadoop0001:19888/jobhistory
- Hadoop 集群常用脚本
#Hadoop 集群启停脚本(包含 HDFS,Yarn,Historyserver) : hadoop.sh
vim /home/hadoop/bin/hadoop.sh
#!/bin/bash
if [ $# -lt 1 ]
then
echo "No Args Input..."
exit ;
fi
case $1 in
"start")
echo " =================== 启动 hadoop集群 ==================="
echo " --------------- 启动 hdfs ---------------"
ssh hadoop0001 "/home/hadoop/soft/hadoop-3.2.2/sbin/start-dfs.sh"
echo " --------------- 启动 yarn ---------------"
ssh hadoop0002 "/home/hadoop/soft/hadoop-3.2.2/sbin/start-yarn.sh"
echo " --------------- 启动 historyserver ---------------"
ssh hadoop0001 "/home/hadoop/soft/hadoop-3.2.2/bin/mapred --daemon start historyserver"
;;
"stop")
echo " =================== 关闭 hadoop集群 ==================="
echo " --------------- 关闭 historyserver ---------------"
ssh hadoop0001 "/home/hadoop/soft/hadoop-3.2.2/bin/mapred --daemon stop historyserver"
echo " --------------- 关闭 yarn ---------------"
ssh hadoop0002 "/home/hadoop/soft/hadoop-3.2.2/sbin/stop-yarn.sh"
echo " --------------- 关闭 hdfs ---------------"
ssh hadoop0001 "/home/hadoop/soft/hadoop-3.2.2/sbin/stop-dfs.sh"
;;
*)
echo "Input Args Error..."
;;
esac
#查看三台服务器 Java 进程脚本:jpsall
vim /home/hadoop/bin/jpsall
#!/bin/bash
for host in hadoop0001 hadoop0002 hadoop0003
do
echo =============== $host ===============
ssh $host jps
done
#赋予脚本执行权限
chmod +x jpsall
#分发/home/hadoop/bin 目录
xsync /home/hadoop/bin
网友评论