1. 安装jdk
Ubuntu16.04的安装源已经默认没有openjdk7了,所以要自己手动添加仓库,如下:
# oracle openjdk ppa source
sudo add-apt-repository ppa:openjdk-r/ppa
sudo apt-get update
sudo apt-get install openjdk-8-jdk
安装jdk.png
2. 创建haddop用户组和用户
sudo addgroup hadoop
sudo adduser --ingroup hadoop hduser
sudo adduser hduser sudo
su hduser
添加hadoop用户组和用户.png
3. 安装ssh服务
sudo apt-get install openssh-server
安装ssh.png
4. 配置SSH
切换到hduser用户,执行如下命令
ssh-keygen -t rsa -P ''
cat ~/.ssh/id_rsa.pub >> ~/.ssh/authorized_keys
配置hadoop用户的ssh.png
5. 下载hadoop
sudo tar -zxvf hadoop-2.7.3.tar.gz -C /opt
cd /opt
sudo mv hadoop-2.7.3 hadoop
sudo chown -R hduser:hadoop hadoop
6. 配置hadoop环境
编辑以下文件
-
.bashrc
vim ~/.bashrc
将下面的内容复制到.bashrc中
#Hadoop variables
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64
export HADOOP_INSTALL=/opt/hadoop
export PATH=$PATH:$HADOOP_INSTALL/bin
export PATH=$PATH:$HADOOP_INSTALL/sbin
export HADOOP_MAPRED_HOME=$HADOOP_INSTALL
export HADOOP_COMMON_HOME=$HADOOP_INSTALL
export HADOOP_HDFS_HOME=$HADOOP_INSTALL
export YARN_HOME=$HADOOP_INSTALL
-
hadoop-env.sh
cd /usr/local/hadoop/etc/hadoop && vim hadoop-env.sh
将下面的三行加入到hadoop-env.sh中,注释原来的 "export JAVA_HOME"那行
export JAVA_HOME=/usr/lib/jvm/java-1.8.0-openjdk-amd64
export HADOOP_COMMON_LIB_NATIVE_DIR="/opt/hadoop/lib/native/"
export HADOOP_OPTS="$HADOOP_OPTS -Djava.library.path=/opt/hadoop/lib/"
7.配置hadoop
cd /usr/local/hadoop/etc/hadoop
编辑以下文件
-
core-site.xml
将下面的内容复制到 <configuration> 标签内
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:9000</value>
</property>
-
yarn-site.xml
将下面的内容复制到 <configuration> 标签内
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce.shuffle.class</name> <value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
-
mapred-site.xml
mv mapred-site.xml.template mapred-site.xml
将下面的内容复制到 <configuration> 标签内
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
-
hdfs-site.xml
mkdir -p ~/mydata/hdfs/namenode && mkdir -p ~/mydata/hdfs/datanode
将下面的内容复制到 <configuration> 标签内
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:/home/hduser/mydata/hdfs/namenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:/home/hduser/mydata/hdfs/datanode</value>
</property>
8. 启动并查看服务
1. 格式化 namenode
第一次启动hadoop服务之前,必须执行格式化namenode
hdfs namenode -format
2. 启动服务
start-dfs.sh && start-yarn.sh
3. jps查看服务
jps
如果一切顺利,你会看到:
17785 SecondaryNameNode
17436 NameNode
17591 DataNode
18096 NodeManager
17952 ResourceManager
23635 Jps
jps查看服务.png
网友评论