美文网首页
seata0.9.0客户端 集成

seata0.9.0客户端 集成

作者: 甯缺毋滥 | 来源:发表于2021-01-11 16:37 被阅读0次

    一.搭建seata客户端

    我这边是新建了一个model,并依赖seata,其余微服务可以直接依赖新建的model
    并且需要让seata来代理数据源,简化版流程参考官网seata.io

    1.1.pom文件

    <dependency>
        <groupId>com.alibaba.cloud</groupId>
        <artifactId>spring-cloud-starter-alibaba-seata</artifactId>
    </dependency>
    

    1.2.配置yml文件

    server:
      port: 19000
    spring:
      application:
        name: seata-client
        alibaba:
          seata:
    #注意这点,这块的值需要和服务端的file.conf文件中的service.vgroup_mapping.xxxx 一致,否则找不到服务端
            tx-service-group: #my_test_tx_group 
    

    二.配置客户端.conf文件

    /src/mian/resources目录下新增2个文件 file.confregistry.conf

    file.conf文件(32~34行修改内容)

    transport {
      # tcp udt unix-domain-socket
      type = "TCP"
      #NIO NATIVE
      server = "NIO"
      #enable heartbeat
      heartbeat = true
      #thread factory for netty
      thread-factory {
        boss-thread-prefix = "NettyBoss"
        worker-thread-prefix = "NettyServerNIOWorker"
        server-executor-thread-prefix = "NettyServerBizHandler"
        share-boss-worker = false
        client-selector-thread-prefix = "NettyClientSelector"
        client-selector-thread-size = 1
        client-worker-thread-prefix = "NettyClientWorkerThread"
        # netty boss thread size,will not be used for UDT
        boss-thread-size = 1
        #auto default pin or 8
        worker-thread-size = 8
      }
      shutdown {
        # when destroy server, wait seconds
        wait = 3
      }
      serialization = "seata"
      compressor = "none"
    }
    
    service {
      #vgroup->rgroup
      #默认default,my_test_tx_group这个值与spring.cloud.alibaba.seata.tx-service-group 一致,也就是上面yml文件中的group值
      vgroup_mapping.my_test_tx_group = "default" 
      #这个值用seata服务端的地址,建议不使用127.0.0.1
      default.grouplist = "127.0.0.1:8091"  
      #degrade current not support
      enableDegrade = false
      #disable
      disable = false
      #unit ms,s,m,h,d represents milliseconds, seconds, minutes, hours, days, default permanent
      max.commit.retry.timeout = "-1"
      max.rollback.retry.timeout = "-1"
    }
    
    client {
      async.commit.buffer.limit = 10000
      lock {
        retry.internal = 10
        retry.times = 30
      }
      report.retry.count = 5
      tm.commit.retry.count = 1
      tm.rollback.retry.count = 1
    }
    
    transaction {
      undo.data.validation = true
      undo.log.serialization = "jackson"
      undo.log.save.days = 7
      #schedule delete expired undo_log in milliseconds
      undo.log.delete.period = 86400000
      undo.log.table = "undo_log"
    }
    
    support {
      ## spring
      spring {
        # auto proxy the DataSource bean
        datasource.autoproxy = false
      }
    }
    

    registry.conf(3~9行修改内容)

    registry {
      # file 、nacos 、eureka、redis、zk、consul、etcd3、sofa
      type = "nacos"    #修改为采用nacos为注册中心
    
      nacos {
        serverAddr = "nacos_ip:8848"  #nacos 地址
        namespace = ""
        cluster = "default"
      }
      eureka {
        serviceUrl = "http://localhost:53000/eureka"    
        application = "house-server"
        weight = "1"
      }
      redis {
        serverAddr = "localhost:6379"
        db = "0"
      }
      zk {
        cluster = "default"
        serverAddr = "127.0.0.1:2181"
        session.timeout = 6000
        connect.timeout = 2000
      }
      consul {
        cluster = "default"
        serverAddr = "127.0.0.1:8500"
      }
      etcd3 {
        cluster = "default"
        serverAddr = "http://localhost:2379"
      }
      sofa {
        serverAddr = "127.0.0.1:9603"
        application = "default"
        region = "DEFAULT_ZONE"
        datacenter = "DefaultDataCenter"
        cluster = "default"
        group = "SEATA_GROUP"
        addressWaitTime = "3000"
      }
      file {
        name = "file.conf"
      }
    }
    
    config {
      # file、nacos 、apollo、zk、consul、etcd3
      type = "file"
    
      nacos {
        serverAddr = "localhost"
        namespace = ""
      }
      consul {
        serverAddr = "127.0.0.1:8500"
      }
      apollo {
        app.id = "seata-server"
        apollo.meta = "http://192.168.1.204:8801"
      }
      zk {
        serverAddr = "127.0.0.1:2181"
        session.timeout = 6000
        connect.timeout = 2000
      }
      etcd3 {
        serverAddr = "http://localhost:2379"
      }
      file {
        name = "file.conf"
      }
    }
    

    三.启动

    3.1. no available server to connect.

    错误信息

    如果出现这种信息,那么就是seata服务端找不到,可能原因
    1.由于spring.cloud.alibaba.seata.tx-service-group的值没有对应上
    2.seata 客户端,服务端版本不一致
    3.服务端地址填写错误
    4.其他原因可能需要自己排查,且seata相关文档过少,建议看底层http请求,分析参数

    3.2. 正常信息

    可以看得到seata服务器的i

    3.3. RM(业务项目)也需要注册到和seata-server同一个注册中心

    相关文章

      网友评论

          本文标题:seata0.9.0客户端 集成

          本文链接:https://www.haomeiwen.com/subject/sypdaktx.html