如何将druid连接池监控到的sql执行效率,连接池资源情况等进行持久化存储,方便系统运维分析优化,以下案例初步测试成功。
第一部:
新建MyDruidStatLogger类实现接口 extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger,详细代码如下:本实例只实现接收消息并在控制台打印,实际业务应用需要具体实现存储方案。
package xxx; import com.alibaba.druid.support.logging.Log; import com.alibaba.druid.support.logging.LogFactory; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.Map; import java.util.Properties; import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim; import com.alibaba.druid.pool.DruidDataSourceStatLogger; import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter; import com.alibaba.druid.pool.DruidDataSourceStatValue; import com.alibaba.druid.stat.JdbcSqlStatValue; import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger { private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG; public MyDruidStatLogger(){ this.configFromProperties(System.getProperties()); } @Override public void configFromProperties(Properties properties) { String property = properties.getProperty("druid.stat.loggerName"); if (property != null && property.length() > 0) { setLoggerName(property); } } public Log getLogger() { return logger; } @Override public void setLoggerName(String loggerName) { logger = LogFactory.getLog(loggerName); } @Override public void setLogger(Log logger) { if (logger == null) { throw new IllegalArgumentException("logger can not be null"); } this.logger = logger; } public boolean isLogEnable() { return true; } public void log(String value) { logger.info(value); } @Override public void log(DruidDataSourceStatValue statValue) { Map<String, Object> map = new LinkedHashMap<String, Object>(); map.put("url", statValue.getUrl()); map.put("dbType", statValue.getDbType()); map.put("name", statValue.getName()); map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) { map.put("activePeak", statValue.getActivePeak()); map.put("activePeakTime", statValue.getActivePeakTime()); } map.put("poolingCount", statValue.getPoolingCount()); if (statValue.getPoolingPeak() > 0) { map.put("poolingPeak", statValue.getPoolingPeak()); map.put("poolingPeakTime", statValue.getPoolingPeakTime()); } map.put("connectCount", statValue.getConnectCount()); map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) { map.put("waitThreadCount", statValue.getWaitThreadCount()); } if (statValue.getNotEmptyWaitCount() > 0) { map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount()); } if (statValue.getNotEmptyWaitMillis() > 0) { map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis()); } if (statValue.getLogicConnectErrorCount() > 0) { map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount()); } if (statValue.getPhysicalConnectCount() > 0) { map.put("physicalConnectCount", statValue.getPhysicalConnectCount()); } if (statValue.getPhysicalCloseCount() > 0) { map.put("physicalCloseCount", statValue.getPhysicalCloseCount()); } if (statValue.getPhysicalConnectErrorCount() > 0) { map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount()); } if (statValue.getExecuteCount() > 0) { map.put("executeCount", statValue.getExecuteCount()); } if (statValue.getErrorCount() > 0) { map.put("errorCount", statValue.getErrorCount()); } if (statValue.getCommitCount() > 0) { map.put("commitCount", statValue.getCommitCount()); } if (statValue.getRollbackCount() > 0) { map.put("rollbackCount", statValue.getRollbackCount()); } if (statValue.getPstmtCacheHitCount() > 0) { map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount()); } if (statValue.getPstmtCacheMissCount() > 0) { map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount()); } if (statValue.getStartTransactionCount() > 0) { map.put("startTransactionCount", statValue.getStartTransactionCount()); map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram())); } if (statValue.getConnectCount() > 0) { map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram())); } if (statValue.getClobOpenCount() > 0) { map.put("clobOpenCount", statValue.getClobOpenCount()); } if (statValue.getBlobOpenCount() > 0) { map.put("blobOpenCount", statValue.getBlobOpenCount()); } if (statValue.getSqlSkipCount() > 0) { map.put("sqlSkipCount", statValue.getSqlSkipCount()); } ArrayList<Map<String, Object>> sqlList = new ArrayList<Map<String, Object>>(); if (statValue.getSqlList().size() > 0) { for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) { Map<String, Object> sqlStatMap = new LinkedHashMap<String, Object>(); sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) { sqlStatMap.put("executeCount", sqlStat.getExecuteCount()); sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax()); sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram())); sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram())); } long executeErrorCount = sqlStat.getExecuteErrorCount(); if (executeErrorCount > 0) { sqlStatMap.put("executeErrorCount", executeErrorCount); } int runningCount = sqlStat.getRunningCount(); if (runningCount > 0) { sqlStatMap.put("runningCount", runningCount); } int concurrentMax = sqlStat.getConcurrentMax(); if (concurrentMax > 0) { sqlStatMap.put("concurrentMax", concurrentMax); } if (sqlStat.getFetchRowCount() > 0) { sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount()); sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax()); sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram())); } if (sqlStat.getUpdateCount() > 0) { sqlStatMap.put("updateCount", sqlStat.getUpdateCount()); sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax()); sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram())); } if (sqlStat.getInTransactionCount() > 0) { sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount()); } if (sqlStat.getClobOpenCount() > 0) { sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount()); } if (sqlStat.getBlobOpenCount() > 0) { sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount()); } sqlList.add(sqlStatMap); } map.put("sqlList", sqlList); } if (statValue.getKeepAliveCheckCount() > 0) { map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount()); } String text = JSONUtils.toJSONString(map); System.out.println("==============:"+text); } }
第二步:配置spring bean
<bean id="myStatLogger" class="com.andaily.web.context.MyDruidStatLogger"> </bean> <bean id="dataSource" class="com.alibaba.druid.pool.DruidDataSource" init-method="init" destroy-method="close"> <!-- 基本属性 url、user、password --> <property name="url" value="${jdbc.url}" /> <property name="username" value="${jdbc.username}" /> <property name="password" value="${jdbc.password}" /> <!-- 配置初始化大小、最小、最大 --> <property name="initialSize" value="1" /> <property name="minIdle" value="1" /> <property name="maxActive" value="20" /> <!-- 配置获取连接等待超时的时间 --> <property name="maxWait" value="60000" /> <!-- 配置间隔多久才进行一次检测,检测需要关闭的空闲连接,单位是毫秒 --> <property name="timeBetweenEvictionRunsMillis" value="60000" /> <!-- 配置一个连接在池中最小生存的时间,单位是毫秒 --> <property name="minEvictableIdleTimeMillis" value="300000" /> <property name="validationQuery" value="SELECT 'x'" /> <property name="testWhileIdle" value="true" /> <property name="testOnBorrow" value="false" /> <property name="testOnReturn" value="false" /> <!-- 打开PSCache,并且指定每个连接上PSCache的大小 --> <property name="poolPreparedStatements" value="true" /> <property name="maxPoolPreparedStatementPerConnectionSize" value="20" /> <!-- 配置监控统计拦截的filters,去掉后监控界面sql无法统计 --> <property name="filters" value="stat" /> <property name="timeBetweenLogStatsMillis" value="1000" /> <property name="statLogger" ref="myStatLogger"/> </bean>
启动后就可以看到控制台打印的durid监控信息了。
package com.andaily.web.context; import com.alibaba.druid.support.logging.Log; import com.alibaba.druid.support.logging.LogFactory; import java.util.ArrayList; import java.util.LinkedHashMap; import java.util.Map; import java.util.Properties; import static com.alibaba.druid.util.JdbcSqlStatUtils.rtrim; import com.alibaba.druid.pool.DruidDataSourceStatLogger; import com.alibaba.druid.pool.DruidDataSourceStatLoggerAdapter; import com.alibaba.druid.pool.DruidDataSourceStatValue; import com.alibaba.druid.stat.JdbcSqlStatValue; import com.alibaba.druid.support.json.JSONUtils; public class MyDruidStatLogger extends DruidDataSourceStatLoggerAdapter implements DruidDataSourceStatLogger { private static Log LOG = LogFactory.getLog(MyDruidStatLogger.class); private Log logger = LOG; public MyDruidStatLogger(){ this.configFromProperties(System.getProperties()); } @Override public void configFromProperties(Properties properties) { String property = properties.getProperty("druid.stat.loggerName"); if (property != null && property.length() > 0) { setLoggerName(property); } } public Log getLogger() { return logger; } @Override public void setLoggerName(String loggerName) { logger = LogFactory.getLog(loggerName); } @Override public void setLogger(Log logger) { if (logger == null) { throw new IllegalArgumentException("logger can not be null"); } this.logger = logger; } public boolean isLogEnable() { return true; } public void log(String value) { logger.info(value); } @Override public void log(DruidDataSourceStatValue statValue) { Map map = new LinkedHashMap(); map.put("url", statValue.getUrl()); map.put("dbType", statValue.getDbType()); map.put("name", statValue.getName()); map.put("activeCount", statValue.getActiveCount()); if (statValue.getActivePeak() > 0) { map.put("activePeak", statValue.getActivePeak()); map.put("activePeakTime", statValue.getActivePeakTime()); } map.put("poolingCount", statValue.getPoolingCount()); if (statValue.getPoolingPeak() > 0) { map.put("poolingPeak", statValue.getPoolingPeak()); map.put("poolingPeakTime", statValue.getPoolingPeakTime()); } map.put("connectCount", statValue.getConnectCount()); map.put("closeCount", statValue.getCloseCount()); if (statValue.getWaitThreadCount() > 0) { map.put("waitThreadCount", statValue.getWaitThreadCount()); } if (statValue.getNotEmptyWaitCount() > 0) { map.put("notEmptyWaitCount", statValue.getNotEmptyWaitCount()); } if (statValue.getNotEmptyWaitMillis() > 0) { map.put("notEmptyWaitMillis", statValue.getNotEmptyWaitMillis()); } if (statValue.getLogicConnectErrorCount() > 0) { map.put("logicConnectErrorCount", statValue.getLogicConnectErrorCount()); } if (statValue.getPhysicalConnectCount() > 0) { map.put("physicalConnectCount", statValue.getPhysicalConnectCount()); } if (statValue.getPhysicalCloseCount() > 0) { map.put("physicalCloseCount", statValue.getPhysicalCloseCount()); } if (statValue.getPhysicalConnectErrorCount() > 0) { map.put("physicalConnectErrorCount", statValue.getPhysicalConnectErrorCount()); } if (statValue.getExecuteCount() > 0) { map.put("executeCount", statValue.getExecuteCount()); } if (statValue.getErrorCount() > 0) { map.put("errorCount", statValue.getErrorCount()); } if (statValue.getCommitCount() > 0) { map.put("commitCount", statValue.getCommitCount()); } if (statValue.getRollbackCount() > 0) { map.put("rollbackCount", statValue.getRollbackCount()); } if (statValue.getPstmtCacheHitCount() > 0) { map.put("pstmtCacheHitCount", statValue.getPstmtCacheHitCount()); } if (statValue.getPstmtCacheMissCount() > 0) { map.put("pstmtCacheMissCount", statValue.getPstmtCacheMissCount()); } if (statValue.getStartTransactionCount() > 0) { map.put("startTransactionCount", statValue.getStartTransactionCount()); map.put("transactionHistogram", rtrim(statValue.getTransactionHistogram())); } if (statValue.getConnectCount() > 0) { map.put("connectionHoldTimeHistogram", rtrim(statValue.getConnectionHoldTimeHistogram())); } if (statValue.getClobOpenCount() > 0) { map.put("clobOpenCount", statValue.getClobOpenCount()); } if (statValue.getBlobOpenCount() > 0) { map.put("blobOpenCount", statValue.getBlobOpenCount()); } if (statValue.getSqlSkipCount() > 0) { map.put("sqlSkipCount", statValue.getSqlSkipCount()); } ArrayList> sqlList = new ArrayList>(); if (statValue.getSqlList().size() > 0) { for (JdbcSqlStatValue sqlStat : statValue.getSqlList()) { Map sqlStatMap = new LinkedHashMap(); sqlStatMap.put("sql", sqlStat.getSql()); if (sqlStat.getExecuteCount() > 0) { sqlStatMap.put("executeCount", sqlStat.getExecuteCount()); sqlStatMap.put("executeMillisMax", sqlStat.getExecuteMillisMax()); sqlStatMap.put("executeMillisTotal", sqlStat.getExecuteMillisTotal()); sqlStatMap.put("executeHistogram", rtrim(sqlStat.getExecuteHistogram())); sqlStatMap.put("executeAndResultHoldHistogram", rtrim(sqlStat.getExecuteAndResultHoldHistogram())); } long executeErrorCount = sqlStat.getExecuteErrorCount(); if (executeErrorCount > 0) { sqlStatMap.put("executeErrorCount", executeErrorCount); } int runningCount = sqlStat.getRunningCount(); if (runningCount > 0) { sqlStatMap.put("runningCount", runningCount); } int concurrentMax = sqlStat.getConcurrentMax(); if (concurrentMax > 0) { sqlStatMap.put("concurrentMax", concurrentMax); } if (sqlStat.getFetchRowCount() > 0) { sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCount()); sqlStatMap.put("fetchRowCount", sqlStat.getFetchRowCountMax()); sqlStatMap.put("fetchRowHistogram", rtrim(sqlStat.getFetchRowHistogram())); } if (sqlStat.getUpdateCount() > 0) { sqlStatMap.put("updateCount", sqlStat.getUpdateCount()); sqlStatMap.put("updateCountMax", sqlStat.getUpdateCountMax()); sqlStatMap.put("updateHistogram", rtrim(sqlStat.getUpdateHistogram())); } if (sqlStat.getInTransactionCount() > 0) { sqlStatMap.put("inTransactionCount", sqlStat.getInTransactionCount()); } if (sqlStat.getClobOpenCount() > 0) { sqlStatMap.put("clobOpenCount", sqlStat.getClobOpenCount()); } if (sqlStat.getBlobOpenCount() > 0) { sqlStatMap.put("blobOpenCount", sqlStat.getBlobOpenCount()); } sqlList.add(sqlStatMap); } map.put("sqlList", sqlList); } if (statValue.getKeepAliveCheckCount() > 0) { map.put("keepAliveCheckCount", statValue.getKeepAliveCheckCount()); } String text = JSONUtils.toJSONString(map); System.out.println("==============:"+text); } }
分类:
Java
【推荐】国内首个AI IDE,深度理解中文开发场景,立即下载体验Trae
【推荐】编程新体验,更懂你的AI,立即体验豆包MarsCode编程助手
【推荐】抖音旗下AI助手豆包,你的智能百科全书,全免费不限次数
【推荐】轻量又高性能的 SSH 工具 IShell:AI 加持,快人一步
· 从 HTTP 原因短语缺失研究 HTTP/2 和 HTTP/3 的设计差异
· AI与.NET技术实操系列:向量存储与相似性搜索在 .NET 中的实现
· 基于Microsoft.Extensions.AI核心库实现RAG应用
· Linux系列:如何用heaptrack跟踪.NET程序的非托管内存泄露
· 开发者必知的日志记录最佳实践
· winform 绘制太阳,地球,月球 运作规律
· AI与.NET技术实操系列(五):向量存储与相似性搜索在 .NET 中的实现
· 超详细:普通电脑也行Windows部署deepseek R1训练数据并当服务器共享给他人
· 上周热点回顾(3.3-3.9)
· AI 智能体引爆开源社区「GitHub 热点速览」
2016-09-27 在VS 2010中使用 VS2013的解决方案
2012-09-27 FarPoint表格数字框中小数点位数的设置