美文网首页
数据源监控 - 监控系列一

数据源监控 - 监控系列一

作者: liaomengge | 来源:发表于2019-10-27 16:25 被阅读0次

    背景:
    针对线上数据库数据源的配置,怎么合理的设置参数,更加清楚的了解线上服务的运行状态,下面👇将说下常用数据源的监控。

    1.Druid监控

    问题:
    众所周知,alibaba druid提供了比较完善的数据库监控,但是也是有比较明显的劣势(比如:数据源的连接数等在监控页面只能看到那瞬间的值等),不能持久化监控以及和公司内部监控告警集成
    解决:
    通过内部druid监控方法

    private class DruidStatsThread extends Thread {
    
        public DruidStatsThread(String name) {
            super(name);
            this.setDaemon(true);
        }
    
        @Override
        public void run() {
            long initialDelay = metricDruidProperties.getInitialDelay() * 1000;
            if (initialDelay > 0) {
                MwThreadUtil.sleep(initialDelay);
            }
            while (!this.isInterrupted()) {
                try {
                    try {
                        Set<DruidDataSource> druidDataSources =
                                DruidDataSourceStatManager.getDruidDataSourceInstances();
                        Optional.ofNullable(druidDataSources).ifPresent(val -> val.forEach(druidDataSource -> {
                            DruidDataSourceStatValue statValue = druidDataSource.getStatValueAndReset();
                            long maxWaitMillis = druidDataSource.getMaxWait();//最大等待时间
                            long waitThreadCount = statValue.getWaitThreadCount();//当前等待获取连接的线程数
                            long notEmptyWaitMillis = statValue.getNotEmptyWaitMillis();//获取连接时累计等待多长时间
                            long notEmptyWaitCount = statValue.getNotEmptyWaitCount();//获取连接时累计等待多少次'
    
                            int maxActive = druidDataSource.getMaxActive();//最大活跃数
                            int poolingCount = statValue.getPoolingCount();//当前连接池数
                            int poolingPeak = statValue.getPoolingPeak();//连接池峰值
                            int activeCount = statValue.getActiveCount();//当前活跃连接数
                            int activePeak = statValue.getActivePeak();//活跃数峰值
    
                            if (Objects.nonNull(statsDClient)) {
                                URI jdbcUri = parseJdbcUrl(druidDataSource.getUrl());
                                Optional.ofNullable(jdbcUri).ifPresent(val2 -> {
                                    String host = StringUtils.replaceChars(val2.getHost(), '.', '_');
                                    String prefix = METRIC_DRUID_PREFIX + host + '.' + val2.getPort() + '.';
                                    statsDClient.recordExecutionTime(prefix + "maxWaitMillis", maxWaitMillis);
                                    statsDClient.recordExecutionTime(prefix + "waitThreadCount", waitThreadCount);
                                    statsDClient.recordExecutionTime(prefix + "notEmptyWaitMillis", notEmptyWaitMillis);
                                    statsDClient.recordExecutionTime(prefix + "notEmptyWaitCount", notEmptyWaitCount);
                                    statsDClient.recordExecutionTime(prefix + "maxActive", maxActive);
                                    statsDClient.recordExecutionTime(prefix + "poolingCount", poolingCount);
                                    statsDClient.recordExecutionTime(prefix + "poolingPeak", poolingPeak);
                                    statsDClient.recordExecutionTime(prefix + "activeCount", activeCount);
                                    statsDClient.recordExecutionTime(prefix + "activePeak", activePeak);
                                });
                            } else {
                                druidDataSource.logStats();
                            }
                        }));
                    } catch (Exception e) {
                        logger.error("druid stats exception", e);
                    }
                    TimeUnit.SECONDS.sleep(metricDruidProperties.getStatsInterval());
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    logger.info("metric druid interrupt exit...");
                } catch (Exception e) {
                    logger.error("metric druid exception...", e);
                }
            }
        }
    }
    
    private URI parseJdbcUrl(String url) {
        if (StringUtils.isBlank(url) || !StringUtils.startsWith(url, "jdbc:")) {
            return null;
        }
        String cleanURI = url.substring(5);
        return URI.create(cleanURI);
    }
    
    2.Hikari监控

    问题:
    针对Hikari数据源,没有统一的监控处理,但是,提供了JMX入口,同理,持久化在监控服务上
    解决:

    private class HikariStatsThread extends Thread {
    
        public HikariStatsThread(String name) {
            super(name);
            this.setDaemon(true);
        }
    
        @Override
        public void run() {
            long initialDelay = metricHikariProperties.getInitialDelay() * 1000;
            if (initialDelay > 0) {
                MwThreadUtil.sleep(initialDelay);
            }
            while (!this.isInterrupted()) {
                try {
                    Optional.ofNullable(hikariDataSources).ifPresent(val -> val.forEach(hikariDataSource -> {
                        URI jdbcUri = parseJdbcUrl(hikariDataSource.getJdbcUrl());
                        Optional.ofNullable(jdbcUri).ifPresent(val2 -> {
                            String host = StringUtils.replaceChars(val2.getHost(), '.', '_');
                            String prefix = METRIC_HIKARI_PREFIX + host + '.' + val2.getPort() + '.';
    
                            PoolStatBean poolStatBean = PoolStatBean.builder().build();
                            HikariPoolMXBean hikariPoolMXBean = hikariDataSource.getHikariPoolMXBean();
                            Optional.ofNullable(hikariPoolMXBean).ifPresent(val3 -> {
                                int activeConnections = val3.getActiveConnections();
                                int idleConnections = val3.getIdleConnections();
                                int totalConnections = val3.getTotalConnections();
                                int threadsAwaitingConnection = val3.getThreadsAwaitingConnection();
                                poolStatBean.setActiveConnections(activeConnections);
                                poolStatBean.setIdleConnections(idleConnections);
                                poolStatBean.setTotalConnections(totalConnections);
                                poolStatBean.setThreadsAwaitingConnection(threadsAwaitingConnection);
                            });
                            HikariConfigMXBean hikariConfigMXBean = hikariDataSource.getHikariConfigMXBean();
                            Optional.ofNullable(hikariConfigMXBean).ifPresent(val3 -> {
                                int maximumPoolSize = val3.getMaximumPoolSize();
                                int minimumIdle = val3.getMinimumIdle();
                                poolStatBean.setMaximumPoolSize(maximumPoolSize);
                                poolStatBean.setMinimumIdle(minimumIdle);
                            });
                            statsPool(prefix, poolStatBean);
                        });
                    }));
                    TimeUnit.SECONDS.sleep(metricHikariProperties.getStatsInterval());
                } catch (InterruptedException e) {
                    Thread.currentThread().interrupt();
                    logger.info("metric hikari interrupt exit...");
                } catch (Exception e) {
                    logger.error("metric hikari exception...", e);
                }
            }
        }
    }
    
    private void statsPool(String prefix, PoolStatBean poolStatBean) {
        if (Objects.nonNull(statsDClient)) {
            statsDClient.recordExecutionTime(prefix + "activeConnections", poolStatBean.getActiveConnections());
            statsDClient.recordExecutionTime(prefix + "idleConnections", poolStatBean.getIdleConnections());
            statsDClient.recordExecutionTime(prefix + "totalConnections", poolStatBean.getTotalConnections());
            statsDClient.recordExecutionTime(prefix + "threadsAwaitingConnection",
                    poolStatBean.getThreadsAwaitingConnection());
            statsDClient.recordExecutionTime(prefix + "maximumPoolSize", poolStatBean.getMaximumPoolSize());
            statsDClient.recordExecutionTime(prefix + "minimumIdle", poolStatBean.getMinimumIdle());
            return;
        }
        StringBuilder sBuilder = new StringBuilder(16);
        sBuilder.append(prefix + "activeConnections => [" + poolStatBean.getActiveConnections() + "],");
        sBuilder.append(prefix + "idleConnections => [" + poolStatBean.getIdleConnections() + "],");
        sBuilder.append(prefix + "totalConnections => [" + poolStatBean.getTotalConnections() + "],");
        sBuilder.append(prefix + "threadsAwaitingConnection => [" + poolStatBean.getThreadsAwaitingConnection() + "],");
        sBuilder.append(prefix + "maximumPoolSize => [" + poolStatBean.getMaximumPoolSize() + "],");
        sBuilder.append(prefix + "minimumIdle => [" + poolStatBean.getMinimumIdle() + "]");
        logger.info(sBuilder.toString());
    }
    
    private URI parseJdbcUrl(String url) {
        if (StringUtils.isBlank(url) || !StringUtils.startsWith(url, "jdbc:")) {
            return null;
        }
        String cleanURI = url.substring(5);
        return URI.create(cleanURI);
    }
    
    @Data
    @Builder
    private static class PoolStatBean {
        private int activeConnections;
        private int idleConnections;
        private int totalConnections;
        private int threadsAwaitingConnection;
        private int maximumPoolSize;
        private int minimumIdle;
    }
    

    注:以上只是提供一种解决方案,小伙伴们也可以用Prometheus采集,然后再Grafana等展示出来~

    本文由博客一文多发平台 OpenWrite 发布!

    相关文章

      网友评论

          本文标题:数据源监控 - 监控系列一

          本文链接:https://www.haomeiwen.com/subject/lpfmvctx.html