美文网首页
2.5-ES的docker-compose安装配置(ES版本7.

2.5-ES的docker-compose安装配置(ES版本7.

作者: 落日彼岸 | 来源:发表于2020-04-05 16:40 被阅读0次

    一个集群两个节点

    • docker-compose.yaml
    # 单服务器,两个节点-容器名称不能重复
    # 整体的名字是父级文件夹的名字
    version: '2.2'
    services:
      cerebro:
        image: lmenezes/cerebro:0.8.3
        container_name: cerebro
        ports:
          - "9000:9000"
        command:
          - -Dhosts.0.host=http://elasticsearch:9200
        networks:
          - es72net
      kibana:
        image: docker.elastic.co/kibana/kibana:7.2.0
        container_name: kibana72
        environment:
          - I18N_LOCALE=zh-CN
          - XPACK_GRAPH_ENABLED=true
          - TIMELION_ENABLED=true
          - XPACK_MONITORING_COLLECTION_ENABLED="true"
        ports:
          - "5601:5601"
        networks:
          - es72net
      elasticsearch:
        image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
        container_name: es72_01
        environment:
          - cluster.name=es-cluster
          - node.name=es72_01
          - bootstrap.memory_lock=true
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
          - discovery.seed_hosts=es72_01,es72_02
          - cluster.initial_master_nodes=es72_01,es72_02
        ulimits:
          memlock:
            soft: -1
            hard: -1
        volumes: #挂载本地文件 
          - es72data1:/usr/share/elasticsearch/data
          - ~/docker-es-7.2:/home/docker-es
        ports:
          - 9200:9200
        networks:
          - es72net
      elasticsearch2:
        image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
        container_name: es72_02
        environment:
          - cluster.name=es-cluster
          - node.name=es72_02
          - bootstrap.memory_lock=true
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
          - discovery.seed_hosts=es72_01,es72_02
          - cluster.initial_master_nodes=es72_01,es72_02
        ulimits:
          memlock:
            soft: -1
            hard: -1
        volumes: #挂载本地文件 
          - es72data2:/usr/share/elasticsearch/data
          - ~/docker-es-7.2:/home/docker-es
        networks:
          - es72net
    
    
    volumes:
      es72data1:
        driver: local
      es72data2:
        driver: local
    
    networks:
      es72net:
        driver: bridge
    
    
    • 执行命令 docker-compose up -d
    • 查看结果 curl elastic:changeme@localhost:9200

    三个集群每个集群一个节点

    • 目录结构
    ├── README
    ├── es72data1                    es1数据目录
    ├── es72data2                    es2数据目录
    ├── config                       配置目录
    │   ├── es.yml                   es配置文件
    │   ├── es1.yml                  es1配置文件
    │   ├── es2.yml                  es2配置文件
    └── docker-compose.yaml          docker-compose配置文件
    
    • docker-compose.yaml
    # es集群-容器名称不能重复
    # 整体的名字是父级文件夹的名字
    version: '2.2'
    services:
      cerebro:
        image: lmenezes/cerebro:0.8.3
        container_name: cerebro-cluster
        ports:
          - "9000:9000"
        command:
          - -Dhosts.0.host=http://elasticsearch:9200 #和es主集群名称保持一致
        networks:
          - es72net
      kibana:
        image: docker.elastic.co/kibana/kibana:7.2.0
        container_name: kibana72-cluster
        environment:
          - ELASTICSEARCH_URL=http://elasticsearch:9200 #未知原因-修改后无法生效
          - I18N_LOCALE=zh-CN
          - XPACK_GRAPH_ENABLED=true
          - TIMELION_ENABLED=true
          - XPACK_MONITORING_COLLECTION_ENABLED="true"
        ports:
          - "5601:5601"
        networks:
          - es72net
      elasticsearch: #本地测试主节点这个名字必须为 elasticsearch,否则kibana无法连接:http://elasticsearch:9200
        image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
        container_name: es72_00-cluster
        environment:
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
        ulimits:
          memlock:
            soft: -1
            hard: -1
          nofile:
            soft: 65536
            hard: 65536
        cap_add:
          - IPC_LOCK
        volumes: #挂载本地文件 
          # - ./logs/es1/:/usr/share/elasticsearch/logs/:rw #若想方便看日志, 也可将 logs 挂载出来
          # - ./es72data1:/usr/share/elasticsearch/data #同理 data 挂载出来可加上
          - ./config/es.yml:/usr/share/elasticsearch/config/elasticsearch.yml #挂载集群配置文件
          - ~/docker-es-7.2:/home/docker-es
        ports:
          - 9200:9200
          - 9300:9300
        networks:
          - es72net
      es72_01:
        image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
        container_name: es72_01-cluster
        environment:
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
        ulimits:
          memlock:
            soft: -1
            hard: -1
          nofile:
            soft: 65536
            hard: 65536
        cap_add:
          - IPC_LOCK
        volumes: #挂载本地文件 
          # - ./logs/es1/:/usr/share/elasticsearch/logs/:rw #若想方便看日志, 也可将 logs 挂载出来
          # - ./es72data1:/usr/share/elasticsearch/data #同理 data 挂载出来可加上
          - ./config/es1.yml:/usr/share/elasticsearch/config/elasticsearch.yml
          - ~/docker-es-7.2:/home/docker-es
        ports:
          - 9201:9200
          - 9301:9300
        networks:
          - es72net
      es72_02:
        image: docker.elastic.co/elasticsearch/elasticsearch:7.2.0
        container_name: es72_02-cluster
        environment:
          - "ES_JAVA_OPTS=-Xms512m -Xmx512m"
        ulimits:
          memlock:
            soft: -1
            hard: -1
          nofile:
            soft: 65536
            hard: 65536
        cap_add:
          - IPC_LOCK
        volumes: #挂载本地文件 
          # - ./logs/es2/:/usr/share/elasticsearch/logs/:rw #若想方便看日志, 也可将 logs 挂载出来
          # - ./es72data2:/usr/share/elasticsearch/data #同理 data 挂载出来可加上
          - ./config/es2.yml:/usr/share/elasticsearch/config/elasticsearch.yml
          - ~/docker-es-7.2:/home/docker-es
        ports:
          - 9202:9200
          - 9302:9300
        links:
          - es72_01
        networks:
          - es72net
    
    volumes:
      es72data1:
        driver: local
      es72data2:
        driver: local
    
    networks:
      es72net:
        driver: bridge
    
    • es.yml
    #es72_00-相关节点名称和docker-compose.yaml中相关节点名称保持一致
    cluster.name: elk-cluster
    node.name: es72_00-cluster
    node.master: true
    node.data: true
    
    network.host: es72_00-cluster
    #同一机器的端口号是不能被同时占用的,这里我说一下ES的端口机制:ES默认的HTTP监听端口是9200,如果当前被占用,则在9200~9300范围内递增;
    #另外TCP的默认监听端口是9300,如果当前被占用,则再9300~9400范围内递增。所以单机少量节点,不配置具体端口的话,也是可以运行的
    http.port: 9200
    transport.tcp.port: 9300
    http.cors.enabled: true
    http.cors.allow-origin: "*"
    
    
    discovery.seed_hosts: ["es72_00-cluster:9300", "es72_01-cluster:9300", "es72_02-cluster:9300"] #是用来集群通信的,那么逻辑上只要保证能获取master(直接或间接)的信息,配置任意组合的ip都是可行的,通常情况下,都是配置master列表的
    discovery.zen.ping_timeout: 5s
     
    bootstrap.memory_lock: true
    action.destructive_requires_name: true
    
    
    #master-eligible nodes
    cluster.initial_master_nodes: ["es72_00-cluster", "es72_01-cluster", "es72_02-cluster"]
    
    • es1.yml
    #es72_01-cluster-相关节点名称和docker-compose.yaml中相关节点名称保持一致
    cluster.name: elk-cluster
    node.name: es72_01-cluster
    node.master: true
    node.data: true
    
    network.host: es72_01-cluster
    #同一机器的端口号是不能被同时占用的,这里我说一下ES的端口机制:ES默认的HTTP监听端口是9200,如果当前被占用,则在9200~9300范围内递增;
    #另外TCP的默认监听端口是9300,如果当前被占用,则再9300~9400范围内递增。所以单机少量节点,不配置具体端口的话,也是可以运行的
    http.port: 9200
    transport.tcp.port: 9300
    http.cors.enabled: true
    http.cors.allow-origin: "*"
    
    
    discovery.seed_hosts: ["es72_00-cluster:9300", "es72_01-cluster:9300", "es72_02-cluster:9300"] #是用来集群通信的,那么逻辑上只要保证能获取master(直接或间接)的信息,配置任意组合的ip都是可行的,通常情况下,都是配置master列表的
    discovery.zen.ping_timeout: 5s
     
    bootstrap.memory_lock: true
    action.destructive_requires_name: true
    
    • es2.yml
    #es72_02-cluster-相关节点名称和docker-compose.yaml中相关节点名称保持一致
    cluster.name: elk-cluster
    node.name: es72_02-cluster
    node.master: false
    node.data: true
     
    network.host: es72_02-cluster
    #同一机器的端口号是不能被同时占用的,这里我说一下ES的端口机制:ES默认的HTTP监听端口是9200,如果当前被占用,则在9200~9300范围内递增;
    #另外TCP的默认监听端口是9300,如果当前被占用,则再9300~9400范围内递增。所以单机少量节点,不配置具体端口的话,也是可以运行的
    http.port: 9200
    transport.tcp.port: 9300
    http.cors.enabled: true
    http.cors.allow-origin: "*"
     
    
    discovery.seed_hosts: ["es72_00-cluster:9300", "es72_01-cluster:9300", "es72_02-cluster:9300"] #是用来集群通信的,那么逻辑上只要保证能获取master(直接或间接)的信息,配置任意组合的ip都是可行的,通常情况下,都是配置master列表的
    discovery.zen.ping_timeout: 5s
     
    bootstrap.memory_lock: true
    action.destructive_requires_name: true
    
    • 执行命令 docker-compose up -d
    • 查看结果
      curl elastic:changeme@localhost:9200
      curl -u elastic:changeme 'localhost:9200/_cat/nodes'

    踩过的坑

    • 7.0版本后, discovery.zen.ping.unicast.hosts 配置改名为 discovery.seed_hosts, 在网上参考了几篇文章布的都是7.0之前的版本, 如果用的是老名称则需使用cluster.initial_master_nodes: ["es72"]主动声明master节点, 否则会报 master not discovered yet, this node has not previously joined a bootstrapped (v7+) cluster,and [cluster.initial_master_nodes] is empty on this node 错误无法构建集群, 详见文章Elasticsearch 集群协调迎来新时代

    相关文章

      网友评论

          本文标题:2.5-ES的docker-compose安装配置(ES版本7.

          本文链接:https://www.haomeiwen.com/subject/aewuphtx.html