以读写分离为例. 主数据源master负责写入,从数据源cluster负责读取
以上个项目为基础,添加如下依赖
<!-- aop -->
<dependency>
<groupId>org.springframework.boot</groupId>
<artifactId>spring-boot-starter-aop</artifactId>
</dependency>
<!-- 里面包含java常用工具类 -->
<dependency>
<groupId>org.apache.commons</groupId>
<artifactId>commons-lang3</artifactId>
<version>3.4</version>
</dependency>
application.yml - 配置中添加master、cluster两个数据源
spring:
datasource:
name: shop
master:
jdbcurl: jdbc:mysql://127.0.0.1:3306/shop?useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true&userSSL=false
username: root
password: root
driver-class-name: com.mysql.jdbc.Driver
cluster:
url: jdbc:mysql://127.0.0.1:3306/shop?useUnicode=true&characterEncoding=UTF-8&allowMultiQueries=true&userSSL=false
username: root
password: root
driver-class-name: com.mysql.jdbc.Driver
read: get,select,count,list,query,find
write: add,create,update,delete,remove,insert
#druid相关配置
druid:
#监控统计拦截的filters
filters: stat,wall
#配置初始化大小/最小/最大
initial-size: 1
min-idle: 1
max-active: 20
#获取连接等待超时时间
max-wait: 60000
#间隔多久进行一次检测,检测需要关闭的空闲连接
time-between-eviction-runs-millis: 60000
#一个连接在池中最小生存的时间
min-evictable-idle-time-millis: 300000
validation-query: SELECT 'x'
test-while-idle: true
test-on-borrow: false
test-on-return: false
#打开PSCache,并指定每个连接上PSCache的大小。oracle设为true,mysql设为false。分库分表较多推荐设置为false
pool-prepared-statements: false
max-pool-prepared-statement-per-connection-size: 20
mybatis:
mapper-locations: classpath:mybatis/mapper/*Mapper.xml
type-aliases-package: com.littlebear.pojo
#pagehelper分页插件
pagehelper:
helperDialect: mysql
reasonable: true
supportMethodsArguments: true
params: count=countSql
returnPageInfo: check
创建数据源类型的枚举DatabaseType,用来区分读和写
package com.littlebear.config;
/**
* 数据源类型
* Created by shenhelin on 2018/7/17-22:43
**/
public enum DatabaseType {
master("write"), cluster("read");
DatabaseType(String name) {
this.name = name;
}
private String name;
public String getName() {
return name;
}
public void setName(String name) {
this.name = name;
}
@Override
public String toString() {
return "DatabaseType{" +
"name='" + name + '\'' +
'}';
}
}
创建线程安全的DatabaseType容器,多数据源必须要保证数据源的线程安全的
package com.littlebear.config;
/**
* 保存一个线程安全的DatabaseType容器
* Created by shenhelin on 2018/7/17-23:06
**/
public class DatabaseContextHolder {
//用于存放多线程环境下的成员变量
private static final ThreadLocal<DatabaseType> contextHolder = new ThreadLocal<>();
public static void setDatabaseType(DatabaseType type) {
contextHolder.set(type);
}
public static DatabaseType getDatabaseType() {
return contextHolder.get();
}
}
创建动态数据源,实现数据源切换的功能就是自定义一个类扩展AbstractRoutingDataSource
抽象类,其实该相当于数据源DataSource的路由中介,可以实现在项目运行时根据相应key值切换到对应的数据源DataSource上
package com.littlebear.config;
import org.springframework.jdbc.datasource.lookup.AbstractRoutingDataSource;
import java.util.Arrays;
import java.util.HashMap;
import java.util.List;
import java.util.Map;
public class DynamicDataSource extends AbstractRoutingDataSource {
static final Map<DatabaseType, List<String>> METHOD_TYPE_MAP = new HashMap<>();
@Override
protected Object determineCurrentLookupKey() {
DatabaseType type = DatabaseContextHolder.getDatabaseType();
//AbstractRoutingDataSource这个类里面声明了logger,因为继承关系,可以直接拿过来用
//protected final Log logger = LogFactory.getLog(getClass());
logger.info("dataSource : " + type);
return type;
}
void setMethodType(DatabaseType type, String content) {
List<String> list = Arrays.asList(content.split(","));
METHOD_TYPE_MAP.put(type, list);
}
}
创建数据源配置类DataSourceConfig
DataSourceProperties
和@ConfigurationProperties(prefix = "spring.datasource.master")
配合使用,将配置文件中的数据自动封装到DataSourceProperties中
package com.littlebear.config;
import com.alibaba.druid.pool.DruidDataSource;
import lombok.extern.slf4j.Slf4j;
import org.apache.ibatis.session.SqlSessionFactory;
import org.mybatis.spring.SqlSessionFactoryBean;
import org.mybatis.spring.annotation.MapperScan;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.beans.factory.annotation.Qualifier;
import org.springframework.beans.factory.annotation.Value;
import org.springframework.boot.autoconfigure.jdbc.DataSourceProperties;
import org.springframework.boot.context.properties.ConfigurationProperties;
import org.springframework.boot.jdbc.DataSourceBuilder;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.context.annotation.Primary;
import org.springframework.core.env.Environment;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.jdbc.datasource.DataSourceTransactionManager;
import org.springframework.transaction.annotation.EnableTransactionManagement;
import javax.sql.DataSource;
import java.sql.SQLException;
import java.util.HashMap;
import java.util.Map;
@Configuration
@MapperScan("com.littlebear.mapper")
@EnableTransactionManagement
@Slf4j
public class DataSourceConfig {
@Autowired
private Environment env; //注入类Environment可以很方便的获取配置文件中的参数
@Autowired
private DataSourceProperties properties;
//把配置文件中的内容放到该注解下
@Value("${spring.datasource.druid.filters}")
private String filters;
@Value("${spring.datasource.druid.initial-size}")
private Integer initialSize;
@Value("${spring.datasource.druid.min-idle}")
private Integer minIdle;
@Value("${spring.datasource.druid.max-active}")
private Integer maxActive;
@Value("${spring.datasource.druid.max-wait}")
private Integer maxWait;
@Value("${spring.datasource.druid.time-between-eviction-runs-millis}")
private Long timeBetweenEvictionRunsMillis;
@Value("${spring.datasource.druid.min-evictable-idle-time-millis}")
private Long minEvictableIdleTimeMillis;
@Value("${spring.datasource.druid.validation-query}")
private String validationQuery;
@Value("${spring.datasource.druid.test-while-idle}")
private Boolean testWhileIdle;
@Value("${spring.datasource.druid.test-on-borrow}")
private boolean testOnBorrow;
@Value("${spring.datasource.druid.test-on-return}")
private boolean testOnReturn;
@Value("${spring.datasource.druid.pool-prepared-statements}")
private boolean poolPreparedStatements;
@Value("${spring.datasource.druid.max-pool-prepared-statement-per-connection-size}")
private Integer maxPoolPreparedStatementPerConnectionSize;
/**
* 通过Spring JDBC 快速创建 DataSource
* @return
*/
@Bean(name = "masterDataSource")
@Qualifier("masterDataSource")
@ConfigurationProperties(prefix = "spring.datasource.master")
public DataSource masterDataSource() {
log.info("## masterDataSource");
return DataSourceBuilder.create().build();
}
/**
* DruidDataSource需要手动创建,通过DataSourceProperties 读取配置
* @return
* @throws SQLException
*/
@Bean(name = "clusterDataSource")
@Qualifier("clusterDataSource")
@ConfigurationProperties(prefix = "spring.datasource.cluster")
public DataSource clusterDataSource() throws SQLException {
log.info("## clusterDataSource");
DruidDataSource dataSource = new DruidDataSource();
dataSource.setFilters(filters);
dataSource.setUrl(properties.getUrl());
dataSource.setDriverClassName(properties.getDriverClassName());
dataSource.setUsername(properties.getUsername());
dataSource.setPassword(properties.getPassword());
dataSource.setInitialSize(initialSize);
dataSource.setMinIdle(minIdle);
dataSource.setMaxActive(maxActive);
dataSource.setMaxWait(maxWait);
dataSource.setTimeBetweenEvictionRunsMillis(timeBetweenEvictionRunsMillis);
dataSource.setMinEvictableIdleTimeMillis(minEvictableIdleTimeMillis);
dataSource.setValidationQuery(validationQuery);
dataSource.setTestWhileIdle(testWhileIdle);
dataSource.setTestOnBorrow(testOnBorrow);
dataSource.setTestOnReturn(testOnReturn);
dataSource.setPoolPreparedStatements(poolPreparedStatements);
dataSource.setMaxPoolPreparedStatementPerConnectionSize(maxPoolPreparedStatementPerConnectionSize);
return dataSource;
}
/**
* 构造多数据源连接池
* Master 数据源连接池采用 HikariDataSource
* Cluster 数据源连接池采用 DruidDataSource
* @param master
* @param cluster
* @return
*/
@Bean
@Primary
public DynamicDataSource dataSource(@Qualifier("masterDataSource") DataSource master,
@Qualifier("clusterDataSource") DataSource cluster) {
log.info("## dataSource");
Map<Object, Object> targetDataSources = new HashMap<>();
//{write:masterDataSource}
targetDataSources.put(DatabaseType.master, master);
//{read:clusterDataSource}
targetDataSources.put(DatabaseType.cluster, cluster);
DynamicDataSource dataSource = new DynamicDataSource();
//该方法是DynamicDataSource所继承的AbstractRoutingDataSource类里的方法(需要的参数为Map<Object, Object> targetDataSources),所以在上面把数据源放在map里
//public void setTargetDataSources(Map<Object, Object> targetDataSources) {
// this.targetDataSources = targetDataSources;
//}
dataSource.setTargetDataSources(targetDataSources);
// 默认的datasource设置为myTestDbDataSource
dataSource.setDefaultTargetDataSource(cluster);
String write = env.getProperty("spring.datasource.write");
//{write:insert,delete,update}
dataSource.setMethodType(DatabaseType.master, write);
String read = env.getProperty("spring.datasource.read");
//{read:select,query,get,count}
dataSource.setMethodType(DatabaseType.cluster, read);
return dataSource;
}
/*在定义SqlSessionFactoryBean的时候,dataSource属性是必须指定的,它表示用于连接数据库的数据源。当然,我们也可以指定一些其他的属性,下面简单列举几个:
mapperLocations:它表示我们的Mapper文件存放的位置,当我们的Mapper文件跟对应的Mapper接口处于同一位置的时候可以不用指定该属性的值
configLocation:用于指定Mybatis的配置文件位置。如果指定了该属性,那么会以该配置文件的内容作为配置信息构建对应的SqlSessionFactoryBuilder,但是后续属性指定的内容会覆盖该配置文件里面指定的对应内容
typeAliasesPackage:它一般对应我们的实体类所在的包,这个时候会自动取对应包中不包括包名的简单类名作为包括包名的别名。多个package之间可以用逗号或者分号等来进行分隔(value的值一定要是包的全)
typeAliases:数组类型,用来指定别名的。指定了这个属性后,Mybatis会把这个类型的短名称作为这个类型的别名,前提是该类上没有标注@Alias注解,否则将使用该注解对应的值作为此种类型的别名(value的值一定要是类的完全限定名)*/
@Bean
public SqlSessionFactory sqlSessionFactory(@Qualifier("masterDataSource") DataSource myTestDbDataSource,
@Qualifier("clusterDataSource") DataSource myTestDb2DataSource) throws Exception {
log.info("## sqlSessionFactory");
SqlSessionFactoryBean sqlSessionFactoryBean = new SqlSessionFactoryBean();
/*把构造好的多数据源连接池设置到sqlSessionFactoryBean里
public void setDataSource(DataSource dataSource)这个方法需要的参数类型是DataSource类型
DynamicDataSource extends AbstractRoutingDataSource
AbstractRoutingDataSource extends AbstractDataSource
AbstractDataSource implements DataSource*/
sqlSessionFactoryBean.setDataSource(this.dataSource(myTestDbDataSource, myTestDb2DataSource));
sqlSessionFactoryBean.setMapperLocations(new PathMatchingResourcePatternResolver().getResources(env.getProperty("mybatis.mapper-locations")));
sqlSessionFactoryBean.setTypeAliasesPackage(env.getProperty("mybatis.type-aliases-package"));
return sqlSessionFactoryBean.getObject();
}
@Bean
public DataSourceTransactionManager transactionManager(DynamicDataSource dataSource) throws Exception {
log.info("## transactionManager");
return new DataSourceTransactionManager(dataSource);
}
}
配置AOP,以mapper的接口为切点,根据接口的名称来切换数据源,所以接口名要规范,或者是把自己的方法名加到读写配置规则里
package com.littlebear.config;
import lombok.extern.slf4j.Slf4j;
import org.apache.commons.lang3.StringUtils;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.aspectj.lang.annotation.Pointcut;
import org.springframework.context.annotation.EnableAspectJAutoProxy;
import org.springframework.stereotype.Component;
import java.util.List;
@Aspect
@Component
@EnableAspectJAutoProxy(proxyTargetClass = true)
@Slf4j
public class DataSourceAspect {
@Pointcut("execution(* com.littlebear.mapper..*.*(..))")//切点
public void aspect() {
}
/**
* 在指定切点的方法之前执行
* @param point
*/
@Before("aspect()")
public void before(JoinPoint point) {
String className = point.getTarget().getClass().getName();
String method = point.getSignature().getName();
String args = StringUtils.join(point.getArgs(), ",");
log.info("# className:{}, method:{}, args:{} ", className, method, args);
try {
outer:for (DatabaseType type : DatabaseType.values()) {
List<String> values = DynamicDataSource.METHOD_TYPE_MAP.get(type);
for (String key : values) {
if (method.startsWith(key)) {
log.info("# method: {} key: {}", method, key);
//根据接口方法的方法名进行匹配数据源,然后将数据源set到用于存放数据源线程安全的容器中
DatabaseContextHolder.setDatabaseType(type);
DatabaseType types = DatabaseContextHolder.getDatabaseType();
log.info("# {} 方法使用的数据源为: {}", method, types);
break outer;
}
}
}
} catch (Exception e) {
log.error(e.getMessage(), e);
}
}
}
项目启动时初始化顺序
可以看出add.do用的是write数据源,该数据源是使用HikariPool作为数据库连接池,
list.do用的是read数据源,该数据源是使用DruidPool作为数据库连接池
调用add.do / list.do后的日志
最终项目结构
网友评论