美文网首页bigData
SUSE RPM安装CDH5.13.0

SUSE RPM安装CDH5.13.0

作者: 叫小七的剑客 | 来源:发表于2020-04-13 10:43 被阅读0次

    关闭防火墙

    1. 检查 SuSE防火墙是否设置为启动
         chkconfig --list | grep firewall
    2. 关闭防火墙服务
         chkconfig SuSEfirewall2_init off
         chkconfig SuSEfirewall2_setup off
    

    配置ssh免密钥登陆

    SUSE配置本地源

    mount -o loop SLES-11-SP3-DVD-x86_64-GM-DVD1.iso /media
    zypper ar file:///media/local-sles
    zypper lr
    zypper clean 
    zypper ref 
    zypper install gcc 
    

    安装mysql

     1、zypper isntall mysql
    

    2、启动mysql
    service mysqld start
    3、进入mysql更改密码

    输入mysql
    mysql>update mysql.user set password=password('123') where user="root";
    mysql>flush privileges;
    

    设置权限

    mysql>GRANT ALL PRIVILEGES ON *.* TO 'root'@'%'IDENTIFIED BY '123' WITH GRANT OPTION;
    mysql>FLUSH PRIVILEGES;
    

    4、退出mysql

    mysql>quit;
    

    5、重新进入mysql 检验

    mysql -uroot -p
    

    6、设置大小写不敏感

    vim /etc/my.cnf
    在[mysqld]下加入
    lower_case_table_names=1
    

    7、重启mysql 服务

    service mysql restart
    

    安装jdk

    rpm -ivh jdk
    vim /etc/profile
    export JAVA_HOME=/usr/java/jdk1.8.0_111
    export JRE_HOME=$JAVA_HOME/jre
    export CLASSPATH=.:$JAVA_HOME/lib:$JRE_HOME/lib
    export PATH=$PATH:$JAVA_HOME/bin:$JRE_HOME/bin
    source /etc/profile
    java -version
    

    安装CDH

    上传安装包至opt下
    1、安装hdfs

    cd /opt/CDH5.13/cdh/5.13.0/RPMS/x86_64
    
    rpm -ivh zookeeper-3.4.5+cdh5.13.0+118-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hadoop-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hadoop-hdfs-namenode-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hadoop-hdfs-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hadoop-yarn-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hadoop-0.20-mapreduce-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hadoop-mapreduce-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hadoop-client-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm bigtop-jsvc-0.6.0+cdh5.13.0+911-1.cdh5.13.0.p0.34.sles11.x86_64.rpm  hadoop-yarn-resourcemanager-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm  hadoop-yarn-nodemanager-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hadoop-yarn-proxyserver-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm  hadoop-hdfs-datanode-2.6.0+cdh5.13.0+2639-1.cdh5.13.0.p0.34.sles11.x86_64.rpm ../noarch/avro-libs-1.7.6+cdh5.13.0+135-1.cdh5.13.0.p0.34.sles11.noarch.rpm ../noarch/parquet-format-2.1.0+cdh5.13.0+18-1.cdh5.13.0.p0.34.sles11.noarch.rpm ../noarch/parquet-1.5.0+cdh5.13.0+191-1.cdh5.13.0.p0.34.sles11.noarch.rpm ../noarch/bigtop-utils-0.7.0+cdh5.13.0+0-1.cdh5.13.0.p0.34.sles11.noarch.rpm
    

    2、安装zookeeper

    cd /opt/CDH5.13/cdh/5.13.0/RPMS/x86_64
    
    rpm -ivh zookeeper-server-3.4.5+cdh5.13.0+118-1.cdh5.13.0.p0.34.sles11.x86_64.rpm
    

    3、安装hbase

    cd /opt/CDH5.13/cdh/5.13.0/RPMS/x86_64
    
    rpm -ivh hbase-master-1.2.0+cdh5.13.0+411-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hbase-1.2.0+cdh5.13.0+411-1.cdh5.13.0.p0.34.sles11.x86_64.rpm hbase-regionserver-1.2.0+cdh5.13.0+411-1.cdh5.13.0.p0.34.sles11.x86_64.rpm 
    

    4、安装hive

    cd /opt/CDH5.13/cdh/5.13.0/RPMS/noarch
    
    rpm -ivh hive-1.1.0+cdh5.13.0+1269-1.cdh5.13.0.p0.34.sles11.noarch.rpm hive-jdbc-1.1.0+cdh5.13.0+1269-1.cdh5.13.0.p0.34.sles11.noarch.rpm sentry-1.5.1+cdh5.13.0+410-1.cdh5.13.0.p0.34.sles11.noarch.rpm solr-4.10.3+cdh5.13.0+519-1.cdh5.13.0.p0.34.sles11.noarch.rpm bigtop-tomcat-0.7.0+cdh5.13.0+0-1.cdh5.13.0.p0.34.sles11.noarch.rpm   hive-metastore-1.1.0+cdh5.13.0+1269-1.cdh5.13.0.p0.34.sles11.noarch.rpm hive-server2-1.1.0+cdh5.13.0+1269-1.cdh5.13.0.p0.34.sles11.noarch.rpm 
    

    5、安装spark

     /opt/CDH5.13/cdh/5.13.0/RPMS/noarch
     
    rpm -ivh spark-core-1.6.0+cdh5.13.0+530-1.cdh5.13.0.p0.34.sles11.noarch.rpm spark-python-1.6.0+cdh5.13.0+530-1.cdh5.13.0.p0.34.sles11.noarch.rpm spark-history-server-1.6.0+cdh5.13.0+530-1.cdh5.13.0.p0.34.sles11.noarch.rpm spark-worker-1.6.0+cdh5.13.0+530-1.cdh5.13.0.p0.34.sles11.noarch.rpm spark-master-1.6.0+cdh5.13.0+530-1.cdh5.13.0.p0.34.sles11.noarch.rpm flume-ng-1.6.0+cdh5.13.0+169-1.cdh5.13.0.p0.34.sles11.noarch.rpm kite-1.0.0+cdh5.13.0+145-1.cdh5.13.0.p0.34.sles11.noarch.rpm 
    

    6、安装kafka

     /opt/kafka/3.0.0/RPMS/noarch
     
     rpm -ivh kafka-0.11.0+kafka3.0.0-1.3.0.0.p0.50.sles11.noarch.rpm kafka-mirror-maker-0.11.0+kafka3.0.0-1.3.0.0.p0.50.sles11.noarch.rpm kafka-server-0.11.0+kafka3.0.0-1.3.0.0.p0.50.sles11.noarch.rpm
    

    修改配置文件

    1、hdfs

    cd /etc/hadoop/conf
    
    core-site.xml
    hdfs-site.xml
    mapred-site.xml
    yarn-site.xml
    

    2.创建namenode目录,赋予权限

    mkdir -p /mnt/drbd/hdfs/dfs/name
    chown -R hdfs:hdfs /mnt/drbd/hdfs/dfs/name
    chmod 700 /mnt/drbd/hdfs/dfs/name
    mkdir -p /var/hadoop/log/dfs/namesecondary
    chown -R hdfs:hdfs /var/hadoop/log/dfs/namesecondary
    mkdir -p /mnt/diskb/dfs
    chown -R hdfs:hdfs /mnt/diskb/dfs
    

    3.格式化namenode

    su - hdfs 
    hdfs namenode –format
    
    

    4.启动namenode(root用户)

    service hadoop-hdfs-namenode start
    

    5.创建hdfs临时目录

    su - hdfs 
    hadoop fs -mkdir -p /tmp
    hadoop fs -chmod -R 1777 /tmp
    hadoop fs -mkdir -p /var/log/hadoop-yarn
    hadoop fs -chown yarn:mapred /var/log/hadoop-yarn
    hadoop fs -mkdir -p /user/history
    hadoop fs -chmod -R 1777 /user/history
    hadoop fs -mkdir -p /user/history/done_intermediate
    hadoop fs -chown mapred:hadoop /user/history/done_intermediate
    hadoop fs -chmod -R 1777 /user/history/done_intermediate
    hadoop fs -mkdir -p /user/history/done
    hadoop fs -chown mapred:hadoop /user/history/done
    hadoop fs -chmod -R 750 /user/history/done
    hadoop fs -mkdir -p /user/mapreduce
    hadoop fs -chown mapreduce /user/mapreduce
    
    

    6.启动 其他服务

    service hadoop-yarn-nodemanager start
    service hadoop-yarn-resourcemanager start
    service hadoop-mapreduce-historyserver start
    service hadoop-yarn-proxyserver start
    service hadoop-hdfs-datanode start
    

    7.检查namenode是否启动成功

    service --status-all | grep Hadoop
    

    配置hive

    1.将mysql的驱动jar包放入/usr/lib/hive/lib中。

    2.在mysql数据库中创建metastore数据库

    mysql -uroot -p123
    CREATE DATABASE metastore;
    USE metastore;
    
    SOURCE /usr/lib/hive/scripts/metastore/upgrade/mysql/hive-txn-schema-0.13.0.mysql.sql;
    SOURCE /usr/lib/hive/scripts/metastore/upgrade/mysql/hive-schema-1.1.0.mysql.sql; 
    
    

    3.修改hive的配置文件

    cd /etc/hive/conf
    
    hive-site.xml
    

    4.在hdfs上创建hive的用户目录

    su - hdfs 
    hdfs dfs -mkdir /user/hive
    hdfs dfs -chmod a+w  /user/hive
    
    
    

    配置zookeeper

    cd /etc/zookeeper/conf
    修改zoo.cfg文件
    在最后一行加入

    server.1=CDH1:2888:3888
    

    3.初始化zookeeper

    /etc/init.d/zookeeper-server init --myid=1
    

    4.在/usr/lib/zookeeper/bin/zkServer.sh中添加java环境变量

    4.启动zookeeper

    service zookeeper-server start
    

    5.检查zookeeper是否成功

    service --status-all | grep zookeeper
    

    配置hbase

    1.修改hbase-site.xml
    2.在hdfs上创建hbase目录

    su - hdfs 
    hdfs dfs -mkdir /hbase
    hdfs dfs -chown hbase /hbase
    
    

    3.启动服务:

    service hbase-master start
    service hbase-regionserver start
    

    4.检查hbase是否成功

    service --status-all | grep Hbase
    

    配置spark

    1.修改配置文件

    cd /etc/spark/conf
    
    slaves
    spark-defaults.conf
    spark-env.sh
    

    2.启动服务:

    service spark-master start
    service spark-worker start  
    service spark-history-server start
    
    

    3.检查spark是否成功

    service --status-all | grep spark
    

    配置kafka

    1.修改配置文件

        cd /etc/kafka/conf
        
        server.properties
    

    2.启动服务:

    service kafka-server start
    

    3.检查kafka是否成功

    service --status-all | grep kafka
    

    相关文章

      网友评论

        本文标题:SUSE RPM安装CDH5.13.0

        本文链接:https://www.haomeiwen.com/subject/fjxgmhtx.html