1、Zookeeper
1)zoo.cfg
initLimit=10
syncLimit=5
dataDir=/opt/module/zookeeper-3.4.10/zkdata
dataLogDir=/opt/module/zookeeper-3.4.10/logs
clientPort=2181
server.1=hadoop102:2888:3888
server.2=hadoop103:2888:3888
server.3=hadoop104:2888:3888
maxClientCnxns=300
autopurge.snapRetainCount=20
autopurge.purgeInterval=48
tickTime=600000
minSessionTimeout=600000
maxSessionTimeout=6000000
2)zkEnv.sh
if [ "x${ZOO_LOG_DIR}" = "x" ]
then
#ZOO_LOG_DIR="."
ZOO_LOG_DIR="/opt/module/zookeeper-3.4.10/logs"
fi
2、Hadoop
1)core-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- 指定HDFS中NameNode的地址 -->
<property>
<name>fs.defaultFS</name>
<value>hdfs://hadoop102:9000</value>
</property>
<!-- 指定hadoop运行时产生文件的存储目录 -->
<property>
<name>hadoop.tmp.dir</name>
<value>/opt/module/hadoop-2.8.2/data/tmp</value>
</property>
</configuration>
2)hdfs-site.xml
<?xml version="1.0" encoding="UTF-8"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- 指定HDFS副本的数量 -->
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<!-- 指定secondarynamenode位置 -->
<property>
<name>dfs.namenode.secondary.http-address</name>
<value>hadoop104:50090</value>
</property>
<!-- 关闭HDFS 检查-->
<property>
<name>dfs.permissions.enabled</name>
<value>false</value>
</property>
</configuration>
3)hadoop-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
export HADOOP_LOG_DIR=/opt/module/hadoop-2.8.2/logs
4)mapred-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<!-- Put site-specific property overrides in this file. -->
<configuration>
<!-- 指定mr运行在yarn上 -->
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<!-- 配置 MapReduce JobHistory Server 地址,默认端口10020 -->
<property>
<name>mapreduce.jobhistory.address</name>
<value>hadoop102:10020</value>
</property>
<!-- 配置 MapReduce JobHistory Server web ui 地址,默认端口19888 -->
<property>
<name>mapreduce.jobhistory.webapp.address</name>
<value>hadoop102:19888</value>
</property>
</configuration>
5)mapred-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
6)yarn-site.xml
<?xml version="1.0"?>
<!--
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License. See accompanying LICENSE file.
-->
<configuration>
<!-- reducer获取数据的方式 -->
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<!-- 指定YARN的ResourceManager的地址 -->
<property>
<name>yarn.resourcemanager.hostname</name>
<value>hadoop103</value>
</property>
<!-- 开启日志聚集功能 -->
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
<!-- 日志保留时间设置7天 -->
<property>
<name>yarn.log-aggregation.retain-seconds</name>
<value>604800</value>
</property>
<!-- 日志检查时间设置为1天 -->
<property>
<name>yarn.log-aggregation.retain-check-interval-seconds</name>
<value>86400</value>
</property>
</configuration>
7)yarn-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
8)slaves
hadoop102
hadoop103
hadoop104
3、Hive
1)hive-env.sh
export HADOOP_HOME=/opt/module/hadoop-2.8.2
export HIVE_CONF_DIR=/opt/module/apache-hive-1.2.1-bin/conf
2)hive-log4j.properties
hive.log.dir=/opt/module/apache-hive-1.2.1-bin/logs
hive.log.file=hive.log
3)hive-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>javax.jdo.option.ConnectionURL</name>
<value>jdbc:mysql://hadoop102:3306/metastore?createDatabaseIfNotExist=true</value>
<description>JDBC connect string for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionDriverName</name>
<value>com.mysql.jdbc.Driver</value>
<description>Driver class name for a JDBC metastore</description>
</property>
<property>
<name>javax.jdo.option.ConnectionUserName</name>
<value>root</value>
<description>username to use against metastore database</description>
</property>
<property>
<name>javax.jdo.option.ConnectionPassword</name>
<value>000000</value>
<description>password to use against metastore database</description>
</property>
</configuration>
4、Hbase
1)hbase-env.sh
export JAVA_HOME=/opt/module/jdk1.8.0_144
export HBASE_CLASSPATH=/opt/module/hbase-1.2.5/conf
# Configure PermSize. Only needed in JDK7. You can safely remove it for JDK8+
#export HBASE_MASTER_OPTS="$HBASE_MASTER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
#export HBASE_REGIONSERVER_OPTS="$HBASE_REGIONSERVER_OPTS -XX:PermSize=128m -XX:MaxPermSize=128m"
export HBASE_LOG_DIR=/opt/module/hbase-1.2.5/logs
export HBASE_MANAGES_ZK=false
export HBASE_OPTS="$HBASE_OPTS -verbose:gc -Xloggc:$HBASE_LOG_DIR/hbase.gc.log -XX:ErrorFile=$HBASE_LOG_DIR/hs_err_pid.log -XX:+PrintGCDateStamps -XX:+PrintGCDetails -XX:+HeapDumpOnOutOfMemoryError -XX:+UseConcMarkSweepGC -XX:+UseParNewGC -XX:CMSInitiatingOccupancyFraction=70"
2)hbase-site.xml
<?xml version="1.0"?>
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<!--
/**
*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
-->
<configuration>
<!-- hbase存放在hdfs上的路径,端口要和hadoop的fs.defaultFS端口一致 -->
<property>
<name>hbase.rootdir</name>
<value>hdfs://hadoop102:9000/hbase</value>
<description> </description>
</property>
<!-- 是否分布式部署 -->
<property>
<name>hbase.cluster.distributed</name>
<value>true</value>
</property>
<!-- 0.98后的新变动,之前版本没有.port,低版本默认端口为60000 -->
<property>
<name>hbase.master.port</name>
<value>16000</value>
</property>
<!-- Zookeeper 节点列表 -->
<property>
<name>hbase.zookeeper.quorum</name>
<value>hadoop102,hadoop103,hadoop104</value>
<!-- 只配置主机名就可以-->
</property>
<property>
<name>hbase.zookeeper.property.clientPort</name>
<value>2181</value>
</property>
<!-- Zookeeper 存储位置 -->
<property>
<name>hbase.zookeeper.property.dataDir</name>
<value>/opt/module/zookeeper-3.4.10/zkdata</value>
</property>
<property>
<name>hbase.master.maxclockskew</name>
<value>180000</value>
<description>Time difference of regionserver from master</description>
</property>
<property>
<name>hbase.rpc.timeout</name>
<value>600000</value>
</property>
<property>
<name>hbase.client.operation.timeout</name>
<value>600000</value>
</property>
<property>
<name>hbase.client.scanner.timeout.period</name>
<value>600000</value>
</property>
<property>
<name>hbase.regionserver.lease.period</name>
<value>600000</value>
</property>
<property>
<name>phoenix.query.timeoutMs</name>
<value>600000</value>
</property>
<property>
<name>phoenix.query.keepAliveMs</name>
<value>600000</value>
</property>
<property>
<name>hbase.client.ipc.pool.type</name>
<value>RoundRobinPool</value>
</property>
<property>
<name>hbase.client.ipc.pool.size</name>
<value>10</value>
</property>
<property>
<name>hbase.coprocessor.abortonerror</name>
<value>false</value>
</property>
</configuration>
3)regionservers
hadoop102
hadoop103
hadoop104
5、kylin
1)kylin.sh
[alex@hadoop102 kylin]$ vim bin/kylin.sh
export KYLIN_HOME=/opt/module/kylin
export HBASE_CLASSPATH_PREFIX=${KYLIN_HOME}/conf:${KYLIN_HOME}/lib/*:${KYLIN_HOME}/ext/*:${hive_dependency}:${HBASE_CLASSPATH_PREFIX}
export HBASE_CLASSPATH=${HBASE_CLASSPATH}:${hive_dependency}:${kafka_dependency}:${spark_dependency}
2)kylin_job_conf_inmem.xml
[alex@hadoop102 conf]$ pwd
/opt/module/kylin/conf
[alex@hadoop102 conf]$ vim kylin_job_conf_inmem.xml
<!--Additional config for in-mem cubing, giving mapper more memory -->
<property>
<name>mapreduce.map.memory.mb</name>
<value>1024value>
<description></description>
</property>
6、环境变量
[alex@hadoop102 kylin]$ sudo vim /etc/profile
##JAVA_HOME
export JAVA_HOME=/opt/module/jdk1.8.0_144
export CLASSPATH=.:$JAVA_HOME/lib/dt.jar:$JAVA_HOME/lib/tools.jar
export PATH=$PATH:$JAVA_HOME/bin
##HBASE_HOME
export HBASE_HOME=/opt/module/hbase-1.2.5
export PATH=$PATH:$HBASE_HOME/bin
##HIVE_HOME
export HIVE_HOME=/opt/module/apache-hive-1.2.1-bin
export PATH=$PATH:$HIVE_HOME/bin
##HADOOP_HOME
export HADOOP_HOME=/opt/module/hadoop-2.8.2
export PATH=$PATH:$HADOOP_HOME/bin
export CATALINA_HOME=/opt/module/kylin/tomcat
export HIVE_CONF_DIR=$HIVE_HOME/conf
export HCAT_HOME=$HIVE_HOME/hcatalog
export KYLIN_HOME=/opt/module/kylin
export hive_dependency=$HIVE_HOME/conf:$HIVE_HOME/lib/*:$HCAT_HOME/share/hcatalog/hive-hcatalog-core-1.1.0.jar
网友评论