在本地模式的基础上配置文件。
#cd training/hadoop-2.4.1/etc/hadoop/
#vi hdfs-site.xml
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
#mkdir ~/training/hadoop/data
#vi core-site.xml
<property>
<name>fs.defaultFS</name>
<value>hdfs://192.168.56.100:9000</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/root/training/hadoop/data</value>
</property>
#cp mapred-site.xml.template mapred-site.xml
#vi mapred-site.xml
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
#vi yarn-site.xml
<property>
<name>yarn.resourcemanager.hostname</name>
<value>192.168.56.100</value>
</property>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
#cd ~/training
#hdfs namenode -format
#start-all.sh
#jps
2738 NameNode
2930 SecondaryNameNode
3059 ResourceManager
3145 NodeManager
3420 Jps
2815 DataNode
#hdfs dfsadmin -report
# hdfs dfs -mkdir /input
# hdfs dfs -put data/input/data.txt /input/data.txt
# hdfs dfs -lsr /
# cd hadoop/share/hadoop/mapreduce/
# hadoop jar hadoop-mapreduce-examples-2.4.1.jar wordcount /input/data.txt /output
#hdfs dfs -lsr /
#hdfs dfs -cat /output/part-r-00000
#stop-all.sh
网友评论