美文网首页
Sharding-JDBC 简单应用

Sharding-JDBC 简单应用

作者: Hystrix_Hu | 来源:发表于2017-05-10 17:41 被阅读0次

    作为一种简单的分表分库的中间件,sharding一种完全基于程序的分表分库策略,无需其他的代理服务,是一种能够快速应用在开发中的策略。

    本文是对我参加过的一个项目使用sharding的一个简单总结。我们使用的很简单,把原来纯使用程序代码分表的程序替换为sharding,sharding集成到jdbcTemplate中使用。

    pom
    <!-- 引入sharding-jdbc核心模块 -->
            <dependency>
                <groupId>com.dangdang</groupId>
                <artifactId>sharding-jdbc-core</artifactId>
                <version>1.4.1</version>
            </dependency>
    
            <dependency>
                <groupId>com.dangdang</groupId>
                <artifactId>sharding-jdbc-config-spring</artifactId>
                <version>1.4.1</version>
            </dependency>
    

    首先要引入sharding的依赖,之前我们项目使用的是1.4.0,在并发高的时候会产生一些Caused by:java.lang.IndexOutOfBoundsException: Index: 1, Size: 1的异常,应该是1.4.0版本内部有些变量公用导致的,升级1.4.1就没有这个问题了。

    sprint-data.xml
    <?xml version="1.0" encoding="UTF-8"?>
    <beans xmlns="http://www.springframework.org/schema/beans"
        xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance" xmlns:context="http://www.springframework.org/schema/context"
        xmlns:rdb="http://www.dangdang.com/schema/ddframe/rdb"
        xmlns:tx="http://www.springframework.org/schema/tx"
        xsi:schemaLocation="http://www.springframework.org/schema/beans 
                            http://www.springframework.org/schema/beans/spring-beans.xsd
                            http://www.springframework.org/schema/context 
                            http://www.springframework.org/schema/context/spring-context.xsd 
                            http://www.dangdang.com/schema/ddframe/rdb 
                            http://www.dangdang.com/schema/ddframe/rdb/rdb.xsd 
                            http://www.springframework.org/schema/tx 
                            http://www.springframework.org/schema/tx/spring-tx-4.0.xsd
                            ">
                            
        <bean id="defaultDataSource" class="org.apache.commons.dbcp.BasicDataSource" destroy-method="close">      
            <property name="driverClassName">
                <value>${datasource.driverClassName}</value>
            </property>
            <property name="url">
                <value>${datasource.url}</value>
            </property>
            <property name="username">
                <value>${datasource.username}</value>
            </property>
            <property name="password">
                <value>${datasource.password}</value>
            </property>
            <property name="maxActive">
               <value>${datasource.maxActive}</value>
            </property>
            <property name="maxIdle">
                <value>${datasource.maxIdle}</value>
            </property>
            <property name="maxWait">
                <value>${datasource.maxWait}</value>
            </property>
            <property name="defaultAutoCommit">
                <value>${datasource.defaultAutoCommit}</value>
            </property>
        </bean>
        
        <bean id="readDataSource" class="org.apache.commons.dbcp.BasicDataSource" destroy-method="close">      
            <property name="driverClassName">
                <value>${datasource.readonly.driverClassName}</value>
            </property>
            <property name="url">
                <value>${datasource.readonly.url}</value>
            </property>
            <property name="username">
                <value>${datasource.readonly.username}</value>
            </property>
            <property name="password">
                <value>${datasource.readonly.password}</value>
            </property>
            <property name="maxActive">
                <value>${datasource.readonly.maxActive}</value>
            </property>
            <property name="maxIdle">
                <value>${datasource.readonly.maxIdle}</value>
            </property>
            <property name="maxWait">
                <value>${datasource.readonly.maxWait}</value>
            </property>
            <property name="defaultAutoCommit">
                <value>${datasource.readonly.defaultAutoCommit}</value>
            </property>
        </bean>
        
        <rdb:master-slave-data-source id="masterSlaveDataSource" master-data-source-ref="defaultDataSource" slave-data-sources-ref="readDataSource" />
        <!-- user_ticket的分表策略 -->
        <rdb:strategy id="userTicketTableStrategy" sharding-columns="username"
            algorithm-class="com.common.sharding.SingleKeyHashShardingAlgorithm" />
    
        <rdb:data-source id="shardingDataSource">
            <rdb:sharding-rule data-sources="masterSlaveDataSource">
                <rdb:table-rules>
                    <rdb:table-rule logic-table="user_ticket" actual-tables="user_ticket_01,user_ticket_02,user_ticket_03,user_ticket_04,user_ticket_05,user_ticket_06,user_ticket_07,user_ticket_08,user_ticket_09,user_ticket_10,user_ticket_11,user_ticket_12,user_ticket_13,user_ticket_14,user_ticket_15,user_ticket_16,user_ticket_17,user_ticket_18,user_ticket_19,user_ticket_20"
                        table-strategy="userTicketTableStrategy" />
                </rdb:table-rules>
            </rdb:sharding-rule>
        </rdb:data-source>
    
        <!--JdbcTemplate -->
        <bean id="shardingJdbcTemplate" class="org.springframework.jdbc.core.JdbcTemplate">
            <property name="dataSource">
                <ref bean="shardingDataSource" />
            </property>
        </bean>
        
        <!--TransactionManager -->
        <bean name="shardingTransactionManager" class="org.springframework.jdbc.datasource.DataSourceTransactionManager">
            <property name="dataSource" ref="shardingDataSource"></property>
        </bean>
    
        <!-- 使用annotation定义事务,实际做法就是在类上使用@Transactional注解,这个配置才能开启注解事务支持 -->
        <tx:annotation-driven transaction-manager="shardingTransactionManager" proxy-target-class="true"/>
    </beans>
    

    在我们的项目中使用了读写分离,其实就是只实现单库的分表,多库应用也差不多,如果有同道找到相关分库的文章请在下面留言。sharding的使用方式很简单,就是对数据源进行包装,配上分表策略,然后把包装的数据源配到JdbcTemplate和事务中。

                <rdb:table-rules>
                    <rdb:table-rule logic-table="user_ticket" actual-tables="user_ticket_01,user_ticket_02,user_ticket_03,user_ticket_04,user_ticket_05,user_ticket_06,user_ticket_07,user_ticket_08,user_ticket_09,user_ticket_10,user_ticket_11,user_ticket_12,user_ticket_13,user_ticket_14,user_ticket_15,user_ticket_16,user_ticket_17,user_ticket_18,user_ticket_19,user_ticket_20"
                        table-strategy="userTicketTableStrategy" />
                </rdb:table-rules>
    

    把20张表合成一个来使用。

    <rdb:strategy id="userTicketTableStrategy" sharding-columns="username"
            algorithm-class="com.common.sharding.SingleKeyHashShardingAlgorithm" />
    

    userTicketTableStrategy策略以username做hash分区,具体策略代码如下:

    package com.common.sharding;
    
    import java.util.Collection;
    import java.util.HashSet;
    import java.util.Set;
    
    import com.dangdang.ddframe.rdb.sharding.api.ShardingValue;
    import com.dangdang.ddframe.rdb.sharding.api.strategy.table.SingleKeyTableShardingAlgorithm;
    
    /**
     * 根据单个字段hash来分表的实现
     *
     */
    public class SingleKeyHashShardingAlgorithm implements SingleKeyTableShardingAlgorithm<String> {
    
        /**
         * 
         * allActualTableNames 所有的物理表名;shardingValue 分表的key值属性
         */
        public String doEqualSharding(final Collection<String> allActualTableNames,
                final ShardingValue<String> shardingValue) {
            // 逻辑表名
            String logicTableName = shardingValue.getLogicTableName();
            // 根据比较的值,算出物理分表
            String actualTableName = logicTableName
                    + "_"
                    + String.format("%02d",
                            (Math.abs(shardingValue.getValue().hashCode()) % allActualTableNames.size()) + 1);
            if (allActualTableNames.contains(actualTableName))
                return actualTableName;
    
            // 如果没有匹配到相应的物理表名,那一定是有问题的
            throw new UnsupportedOperationException();
        }
    
        /**
         * 支持分表字段的in表达式
         */
        @Override
        public Collection<String> doInSharding(Collection<String> allActualTableNames,
                ShardingValue<String> paramShardingValue) {
            // in表达式的值对应的数据表
            Set<String> inValueTables = new HashSet<String>();
            Collection<String> inValues = paramShardingValue.getValues();
    
            String logicTableName = paramShardingValue.getLogicTableName();
            for (String value : inValues) {
                String actualTableName = logicTableName + "_" + value.hashCode() % allActualTableNames.size();
                if (allActualTableNames.contains(actualTableName))
                    inValueTables.add(actualTableName);
            }
    
            if (inValueTables.size() == 0)
                throw new UnsupportedOperationException();
    
            return inValueTables;
        }
    
        @Override
        public Collection<String> doBetweenSharding(Collection<String> allActualTableNames,
                ShardingValue<String> paramShardingValue) {
            // 不支持between操作,有需求的时候再实现
            throw new UnsupportedOperationException();
        }
    }
    
    

    使用方式其实和普通jdbctemplate没什么区别,用user_ticket代替所有的分区表。

    package com.base.dao;
    
    import java.sql.Types;
    import java.util.ArrayList;
    import java.util.Date;
    import java.util.List;
    
    import org.springframework.beans.factory.annotation.Autowired;
    import org.springframework.jdbc.core.BeanPropertyRowMapper;
    import org.springframework.jdbc.core.JdbcTemplate;
    import org.springframework.stereotype.Repository;
    
    import com.base.bean.UserTicket;
    
    @Repository
    public class UserTicketDao {
        @Autowired
        private JdbcTemplate jdbcTemplate;
    
        /**
         * @param username
         * @param page
         * @return
         * @throws Exception
         */
        public List<UserTicket> getUserTicketDetail(String username, int pageid, int pagesize) throws Exception {
            StringBuilder sqlBuilder = new StringBuilder();
            sqlBuilder
                    .append("select * from user_ticket where username = ? and typecode='vod' and displaytime < now() order by id desc limit ?, ?");
            return jdbcTemplate.query(sqlBuilder.toString(), new Object[] {username, (pageid - 1) * pagesize, pagesize},
                    new int[] {Types.VARCHAR, Types.INTEGER, Types.INTEGER}, new BeanPropertyRowMapper<UserTicket>(
                            UserTicket.class));
        }
    
        /**
         * 
         * @param ut
         */
        public void updateUserTicket(UserTicket ut) {
            StringBuilder sqlBuilder = new StringBuilder();
            sqlBuilder
                    .append("update user_ticket set username=?, times=?, validtime=?, typecode=?, fromcode=?, createtime=?, usetime=?, channelid=?, status=?, displaytime=?, starttime=? where ticketno=? and username=?");
            Object[] args = new Object[] {ut.getUsername(), ut.getTimes(), ut.getValidtime(), ut.getTypecode(),
                    ut.getFromcode(), ut.getCreatetime(), ut.getUsetime(), ut.getChannelid(), ut.getStatus(),
                    ut.getDisplaytime(), ut.getStarttime(), ut.getTicketno(), ut.getUsername()};
            int[] types = new int[] {Types.VARCHAR, Types.INTEGER, Types.TIMESTAMP, Types.VARCHAR, Types.VARCHAR,
                    Types.TIMESTAMP, Types.TIMESTAMP, Types.INTEGER, Types.INTEGER, Types.TIMESTAMP, Types.TIMESTAMP,
                    Types.VARCHAR, Types.VARCHAR};
            jdbcTemplate.update(sqlBuilder.toString(), args, types);
        }
    
        /**
         * @param list
         * @return
         */
        public void batchSaveUserTicket(final List<UserTicket> list) {
            for (int k = 0; k < list.size(); k++) {
                StringBuffer sb = new StringBuffer();
                UserTicket ut = list.get(k);
                sb.append("insert into user_ticket(ticketno, username, validtime, typecode, fromcode, createtime, channelid, status, displaytime, starttime)");
                sb.append("values(");
                sb.append("'").append(ut.getTicketno()).append("'").append(",");
                sb.append("'").append(ut.getUsername()).append("'").append(",");
                sb.append("'").append(new SuperDate(ut.getValidtime()).getDateTimeString()).append("'").append(",");
                sb.append("'").append(ut.getTypecode()).append("'").append(",");
                sb.append("'").append(ut.getFromcode()).append("'").append(",");
                sb.append("'").append(new SuperDate(ut.getCreatetime()).getDateTimeString()).append("'").append(",");
                sb.append(ut.getChannelid()).append(",");
                sb.append(ut.getStatus()).append(",");
                sb.append("'").append(new SuperDate(ut.getDisplaytime()).getDateTimeString()).append("'").append(",");
                sb.append("'").append(new SuperDate(ut.getStarttime()).getDateTimeString()).append("'");
                sb.append(")");
                jdbcTemplate.update(sb.toString());
            }
        }
    }
    

    最后提一下迁移过程中发现的坑,虽然说使用基本和原生相同,但还有一些需要注意一下,1.表名钱不要加上库名,原生的情况加库名,不加库名其实是一样的,但使用shareding的表就会报错;2.shareding是不支持jdbctemplate的批量修改操作的。

    欢迎大家指正。

    相关文章

      网友评论

          本文标题:Sharding-JDBC 简单应用

          本文链接:https://www.haomeiwen.com/subject/gaygtxtx.html