美文网首页IT技术篇
服务器安装(4.8.3)Docker Swarm 搭建 sol

服务器安装(4.8.3)Docker Swarm 搭建 sol

作者: 王滕辉 | 来源:发表于2021-05-14 18:32 被阅读0次

信息来源

第一步:构建ik-solr

# 删除镜像
docker rm -f solr

# 拉取最新的solr镜像
docker pull solr:latest

# 创建一个solr的volume
docker volume create solr_data

# 运行solr
docker run --name solr -p 8983:8983 -v solr_data:/var/solr  -d -t solr:latest

# 复制jar 到lib下
docker cp /mnt/cache/solr/load/ik-analyzer-8.3.0.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/ik-analyzer-solr7-7.x.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/solr-dataimporthandler-8.4.0.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/solr-dataimporthandler-extras-8.4.0.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/mysql-connector-java-8.0.22.jar solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/

# 新建文件夹 classes
docker exec -it --user=root solr /bin/bash
cd server/solr-webapp/webapp/WEB-INF
mkdir classes
exit

# 复制字典
docker cp /mnt/cache/solr/load/IKAnalyzer.cfg.xml solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/classes/
docker cp /mnt/cache/solr/load/dynamicdic.txt solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/lib/
docker cp /mnt/cache/solr/load/ext.dic solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/classes/
docker cp /mnt/cache/solr/load/ik.conf solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/classes/
docker cp /mnt/cache/solr/load/stopword.dic solr:/opt/solr/server/solr-webapp/webapp/WEB-INF/classes/

# 保存为image
docker commit solr solr-ik-mysql:1.0

 # 单机部署到这里就结束了,如果希望使用集群的请继续下面的代码
docker rm -f solr
docker run --name solr -p 8983:8983  -v solr_data:/var/solr -e SOLR_HEAP=6G -d -t solr-ik-mysql:1.0

第二步

docker rm -f solr
docker run --name solr -p 8983:8983  -v solr_data:/var/solr -e SOLR_HEAP=6G -d -t solr-ik-mysql:1.0

到此处docker 单机版的solr 搭建完,下面是swarm版

第三步 配置好 stacks

#docker-compose.yaml
version: '3'

services:
  zoo1:
    image: zookeeper:3.6.1
    restart: always
    hostname: zoo1
    ports:
      - 2181:2181
    volumes:
      - /home/zk/zookeeper1/data:/data
      - /home/zk/zookeeper1/datalog:/datalog
      - /home/zk/zookeeper1/logs:/logs
    environment:
      ZOO_MY_ID: 1
      ZOO_SERVERS: server.1=0.0.0.0:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=zoo3:2888:3888;2181
    deploy:
      placement:
        constraints:
          - node.hostname == manager      

  zoo2:
    image: zookeeper:3.6.1
    restart: always
    hostname: zoo2
    ports:
      - 2182:2181
    volumes:
      - /home/zk/zookeeper2/data:/data
      - /home/zk/zookeeper2/datalog:/datalog
      - /home/zk/zookeeper2/logs:/logs    
    environment:
      ZOO_MY_ID: 2
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=0.0.0.0:2888:3888;2181 server.3=zoo3:2888:3888;2181
    deploy:
      placement:
        constraints:
          - node.hostname == manager
  zoo3:
    image: zookeeper:3.6.1
    restart: always
    hostname: zoo3
    ports:
      - 2183:2181
    volumes:
      - /home/zk/zookeeper3/data:/data
      - /home/zk/zookeeper3/datalog:/datalog
      - /home/zk/zookeeper3/logs:/logs
    environment:
      ZOO_MY_ID: 3
      ZOO_SERVERS: server.1=zoo1:2888:3888;2181 server.2=zoo2:2888:3888;2181 server.3=0.0.0.0:2888:3888;2181
    deploy:
      placement:
        constraints:
          - node.hostname == manager      
  solr1:
    image: solr-ik:1.0
    ports:
      - "28983:8983"
    container_name: solr1
    restart: always
    volumes:
      - solr_cluster1:/var/solr 
    environment:
      ZK_HOST: zoo3:2181,zoo2:2181,zoo1:2181
      SOLR_HEAP: 6G
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    deploy:
      placement:
        constraints:
          - node.hostname == manager      
  solr2:
    image: solr-ik:1.0
    ports:
      - "28984:8983"
    container_name: solr2
    restart: always
    volumes:
      - solr_cluster2:/var/solr
    environment:
      ZK_HOST: zoo3:2181,zoo2:2181,zoo1:2181
      SOLR_HEAP: 6G
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    deploy:
      placement:
        constraints:
          - node.hostname == manager      
  solr3:
    image: solr-ik:1.0
    ports:
      - "28985:8983"
    container_name: solr3
    restart: always
    volumes:
      - solr_cluster3:/var/solr
    environment:
      ZK_HOST: zoo3:2181,zoo2:2181,zoo1:2181
      SOLR_HEAP: 6G
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    deploy:
      placement:
        constraints:
          - node.hostname == manager    
    
  kafka0:
    hostname: kafka0
    container_name: kafka0
    image: wurstmeister/kafka:2.13-2.7.0
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "19092:9092"
    volumes:
      - "/home/kafka/k0:/kafka"      
    environment:
      TZ: "Asia/Shanghai" 
      KAFKA_BROKER_ID: 0
      KAFKA_ZOOKEEPER_CONNECT: 192.168.0.105:2181 
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.0.105:19092 
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9092 
    deploy:
      placement:
        constraints:
          - node.hostname == manager       
  kafka1:
    hostname: kafka1
    container_name: kafka1
    image: wurstmeister/kafka:2.13-2.7.0
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "19093:9093"
    volumes:
      - "/home/kafka/k1:/kafka"      
    environment:
      TZ: "Asia/Shanghai" 
      KAFKA_BROKER_ID: 1
      KAFKA_ZOOKEEPER_CONNECT: 192.168.0.105:2181 
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.0.105:19093 
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9093  
    deploy:
      placement:
        constraints:
          - node.hostname == manager
  kafka2:
    hostname: kafka2
    container_name: kafka2
    image: wurstmeister/kafka:2.13-2.7.0
    depends_on:
      - zoo1
      - zoo2
      - zoo3
    ports:
      - "19094:9094"
    volumes:
      - "/home/kafka/k2:/kafka"      
    environment:
      TZ: "Asia/Shanghai" 
      KAFKA_BROKER_ID: 2
      KAFKA_ZOOKEEPER_CONNECT: 192.168.0.105:2181 
      KAFKA_ADVERTISED_LISTENERS: PLAINTEXT://192.168.0.105:19094 
      KAFKA_LISTENERS: PLAINTEXT://0.0.0.0:9094       
    deploy:
      placement:
        constraints:
          - node.hostname == manager 
  kafka-manager:
    image: sheepkiller/kafka-manager            # 如果要安装web管理工具可以同时安装这个,最后通过苏主机IP的9000端口进行访问,例如172.31.148.174:9000
    environment:
      ZK_HOSTS: 192.168.0.105:2181,192.168.0.105:2182,192.168.0.105:2183
      APPLICATION_SECRET: "letmein"
    ports:
      - "9005:9000"
    expose:
      - "9000"
    depends_on:
      - kafka2
      - kafka1
      - kafka0
    deploy:
      placement:
        constraints:
          - node.hostname == manager      

volumes:
  solr_cluster1:
  solr_cluster2:
  solr_cluster3:
  
image.png image.png

第四步 创建一个默认的collection

#创建一个默认的collection,以后就可以用这个作为基础;
docker exec -it 841d3120b488a749ad421df61328ce35bd16714875e4e858156bad81db3e2217  /opt/solr/bin/solr create_collection  -c zc_company -shards 2 -replicationFactor 3 -p 8983 


#删除 collection 
docker exec -it 841d3120b488a749ad421df61328ce35bd16714875e4e858156bad81db3e2217  /opt/solr/bin/solr delete  -c collection1   -p 8983 

841d3120b488a749ad421df61328ce35bd16714875e4e858156bad81db3e2217 为容器id

image.png

第五 集群配置文件

将resources目录下的IKAnalyzer.cfg.xml、ext.dic、stopword.dic放入solr服务的Jetty或Tomcat的webapp/WEB-INF/classes/目录下;

① IKAnalyzer.cfg.xml (IK默认的配置文件,用于配置自带的扩展词典及停用词典)
② ext.dic (默认的扩展词典)
③ stopword.dic (默认的停词词典)
注意:与单机版不同,ik.conf及dynamicdic.txt请不要放在classes目录下!

将resources目录下的ik.conf及dynamicdic.txt放入solr配置文件夹中,与solr的managed-schema文件同目录中;
配置Solr的managed-schema,添加ik分词器,示例如下;

<!-- ik分词器 -->
<fieldType name="text_ik" class="solr.TextField">
  <analyzer type="index">
      <tokenizer class="org.wltea.analyzer.lucene.IKTokenizerFactory" useSmart="false" conf="ik.conf"/>
      <filter class="solr.LowerCaseFilterFactory"/>
  </analyzer>
  <analyzer type="query">
      <tokenizer class="org.wltea.analyzer.lucene.IKTokenizerFactory" useSmart="true" conf="ik.conf"/>
      <filter class="solr.LowerCaseFilterFactory"/>
  </analyzer>
</fieldType>

第六 提交配置到zk

比如我们修改schema.xml配置文件之后,根本不用登录zookeeper删除原有文件,文件会自动覆盖,这里直接上传即可,命令如下:


docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/managed-schema -confname zc_company/managed-schema

docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/data-config.xml -confname zc_company/data-config.xml

docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/dynamicdic.txt -confname zc_company/dynamicdic.txt

docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/solrconfig.xml -confname zc_company/solrconfig.xml

docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/stopword.dic -confname zc_company/stopword.dic

docker exec -it aeb104a4a4d1a73deecef94d8696d77e498acce4dbd33c90a02e2cd162c1c1f4 /opt/solr-8.9.0/server/scripts/cloud-scripts/zkcli.sh -zkhost zoo1:2181,zoo2:2181,zoo3:2181 -cmd upconfig -confdir /var/solr/data/conf/ik.conf -confname zc_company/ik.conf

备份恢复
参考: https://solr.apache.org/guide/6_6/making-and-restoring-backups.html

# 定期备份  每个 节点都要执行
http://192.168.0.105:28983/solr/zc_company/replication?command=backup&location=/var/solr/data/&name=aaa.
# 备份状态
http://192.168.0.105:28983/solr/zc_company/replication?command=details
# 备份恢复
http://192.168.0.105:28983/solr/zc_company/replication?command=restore&name=backup_name

导出
http://192.168.0.105:28983/solr/zc_company/select?q.op=OR&q=%3A&rows=100000000&start=0&wt=csv

相关文章

网友评论

    本文标题:服务器安装(4.8.3)Docker Swarm 搭建 sol

    本文链接:https://www.haomeiwen.com/subject/yziwrltx.html