logback.xml详解

<?xml version="1.0" encoding="utf-8"?>

<!-- scan:当此属性设置为true时,配置文件如果发生改变,将会被重新加载,默认值为true。 scanPeriod:设置监测配置文件是否有修改的时间间隔,如果没有给出时间单位,默认单位是毫秒当scan为true时,此属性生效。默认的时间间隔为1分钟。 
debug:当此属性设置为true时,将打印出logback内部日志信息,实时查看logback运行状态。默认值为false。 -->
<configuration scan="true" scanPeriod="10 seconds" debug="false"> 
  <!-- 获取时间戳 -->  
  <timestamp key="bySecond" datePattern="yyyy-MM-dd'T'HHmmss"/>  
  <contextName>${bySecond}</contextName>  
  <!-- address performance concern with jul-to-slf4j -->s 
  <contextListener class="ch.qos.logback.classic.jul.LevelChangePropagator"> 
    <resetJUL>true</resetJUL> 
  </contextListener>  
  <!-- To enable JMX Management -->  
  <jmxConfigurator/>  
  <!-- 定义日志的根目录 .linux应改成全路径 -->  
  <property name="LOG_HOME" value="./logs"/>  
  <!-- 定义日志文件名称 -->  
  <property name="log.app" value="${LOG_HOME}/app"/>  
  <!-- 定义access监控日志 -->  
  <property name="log.access" value="${LOG_HOME}/access"/>  
  <!-- interface日志 -->  
  <property name="log.interface" value="${LOG_HOME}/interface"/>  
  <!-- error级别日志 -->  
  <property name="log.error" value="${LOG_HOME}/error"/>  
  <!-- remote日志 -->  
  <property name="log.remote" value="${LOG_HOME}/remote"/>  
  <!-- 单个压缩文件最大大小 -->  
  <property name="log.maxFileSize" value="10MB"/>  
  <!-- 日志保存60天 -->  
  <property name="log.maxHistory" value="60"/>  
  <!-- 压缩文件总共大小最大30GB -->  
  <property name="log.totalSizeCap" value="30GB"/>  
  <!-- ch.qos.logback.core.ConsoleAppender 表示控制台输出 -->  
  <appender name="stdout" class="ch.qos.logback.core.ConsoleAppender"> 
    <!-- 日志输出格式:%d表示日期时间,%thread表示线程名,%-5level:级别从左显示5个字符宽度 %logger{80} 表示logger名字最长80个字符,否则按照句点分割。 
%msg:日志消息,%n是换行符 -->  
    <layout class="ch.qos.logback.classic.PatternLayout"> 
      <pattern>%date{yyyy-MM-dd HH:mm:ss} %-5level %logger{80} %X{transactionId} - %msg%n</pattern> 
    </layout> 
  </appender>  
  <!-- 滚动记录文件,先将日志记录到指定文件,当符合某个条件时,将日志记录到其他文件 -->  
  <appender name="appLogAppender" class="ch.qos.logback.core.rolling.RollingFileAppender"> 
    <!-- 过滤掉 TRACE 和 DEBUG 级别的日志-->  
    <filter class="ch.qos.logback.classic.filter.ThresholdFilter"> 
      <level>INFO</level>  
      <onMatch>ACCEPT</onMatch>  
      <onMismatch>DENY</onMismatch> 
    </filter>  
    <!-- 当发生滚动时,决定 RollingFileAppender 的行为,涉及文件移动和重命名 TimeBasedRollingPolicy: 
最常用的滚动策略,它根据时间来制定滚动策略,既负责滚动也负责出发滚动。 -->  
    <File>${log.app}/app.log</File>  
    <!--如果是 true,⽇志被追加到⽂件结尾,如果是 false,清空现存⽂件 -->  
    <append>true</append>  
    <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> 
      <!-- 滚动时产生的文件的存放位置及文件名称 %d{yyyy-MM-dd}:按天进行日志滚动 %i:当文件大小超过maxFileSize时,按照i进行文件滚动 -->  
      <fileNamePattern>${log.app}/app.%d{yyyy-MM-dd-HH}.%i.log</fileNamePattern>  
      <!-- 可选节点,控制保留的归档文件的最大数量,超出数量就删除旧文件。假设设置每天滚动, 且maxHistory是60,则只保存最近60天的文件,删除之前的旧文件。注意,删除旧文件是, 
那些为了归档而创建的目录也会被删除。 -->  
      <MaxHistory>${log.maxHistory}</MaxHistory>  
      <!-- 当日志文件超过maxFileSize指定的大小是,根据上面提到的%i进行日志文件滚动 注意此处配置SizeBasedTriggeringPolicy是无法实现按文件大小进行滚动的,必须配置timeBasedFileNamingAndTriggeringPolicy -->  
      <timeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> 
        <maxFileSize>${log.maxFileSize}</maxFileSize> 
      </timeBasedFileNamingAndTriggeringPolicy> 
    </rollingPolicy>  
    <!-- 日志输出格式:%d表示日期时间,%thread表示线程名,%-5level:级别从左显示5个字符宽度 %logger{80} 表示logger名字最长80个字符,否则按照句点分割。 
%msg:日志消息,%n是换行符 -->  
    <encoder> 
      <Pattern>%date{yyyy-MM-dd HH:mm:ss} %-5level [%logger{80} ] %X{transactionId} - %msg%n</Pattern> 
    </encoder>  
    <layout class="ch.qos.logback.classic.PatternLayout"> 
      <pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} [ %thread ] - [ %-5level ] [ %logger{50} : %line ] - %msg%n</pattern> 
    </layout> 
  </appender>  
  <!-- 运维监控的日志 -->  
  <appender name="access" class="ch.qos.logback.core.rolling.RollingFileAppender"> 
    <!-- 过滤掉 TRACE 和 DEBUG 级别的日志-->  
    <filter class="ch.qos.logback.classic.filter.LevelFilter"> 
      <level>INFO</level>  
      <onMatch>ACCEPT</onMatch>  
      <onMismatch>DENY</onMismatch> 
    </filter>  
    <File>${log.access}/access.log</File>  
    <!--如果是 true,⽇志被追加到⽂件结尾,如果是 false,清空现存⽂件 -->  
    <append>true</append>  
    <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> 
      <FileNamePattern>${log.access}/access.log.%d{yyyy-MM-dd-HH}.%i</FileNamePattern>  
      <TimeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> 
        <MaxFileSize>${log.maxFileSize}</MaxFileSize> 
      </TimeBasedFileNamingAndTriggeringPolicy> 
    </rollingPolicy>  
    <encoder> 
      <Pattern>%msg%n</Pattern>  
      <charset>UTF-8</charset> 
    </encoder> 
  </appender>  
  <!--接口日志打印,该服务本身被请求的内部接口 -->  
  <appender name="interface" class="ch.qos.logback.core.rolling.RollingFileAppender"> 
    <!-- 过滤掉 TRACE 和 DEBUG 级别的日志-->  
    <filter class="ch.qos.logback.classic.filter.LevelFilter"> 
      <level>INFO</level>  
      <onMatch>ACCEPT</onMatch>  
      <onMismatch>DENY</onMismatch> 
    </filter>  
    <File>${log.interface}/interface.log</File>  
    <!--如果是 true,⽇志被追加到⽂件结尾,如果是 false,清空现存⽂件 -->  
    <append>true</append>  
    <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> 
      <FileNamePattern>${log.interface}/interface.log.%d{yyyy-MM-dd-HH}.%i</FileNamePattern>  
      <TimeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> 
        <MaxFileSize>${log.maxFileSize}</MaxFileSize> 
      </TimeBasedFileNamingAndTriggeringPolicy> 
    </rollingPolicy>  
    <encoder> 
      <Pattern>%date{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level [%logger{80} ] %X{transactionId} - %msg%n</Pattern>  
      <charset>UTF-8</charset> 
    </encoder> 
  </appender>  
  <!--错误日志,包括业务错误日志、系统错误日志,是所有错误日志的汇总 -->  
  <appender name="errorlog" class="ch.qos.logback.core.rolling.RollingFileAppender"> 
    <!-- 时间滚动输出 level为 ERROR 日志 -->  
    <filter class="ch.qos.logback.classic.filter.LevelFilter"> 
      <level>ERROR</level>  
      <onMatch>ACCEPT</onMatch>  
      <onMismatch>DENY</onMismatch> 
    </filter>  
    <File>${log.error}/error.log</File>  
    <!-- 如果是 true,⽇志被追加到⽂件结尾,如果是 false,清空现存⽂件 -->  
    <append>true</append>  
    <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> 
      <fileNamePattern>${log.error}/error.log.%d{yyyy-MM-dd-HH}-%i</fileNamePattern>  
      <TimeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> 
        <MaxFileSize>${log.maxFileSize}</MaxFileSize> 
      </TimeBasedFileNamingAndTriggeringPolicy> 
    </rollingPolicy>  
    <encoder> 
      <Pattern>%date{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level [%logger{80} ] %X{transactionId} - %msg%n</Pattern>  
      <charset>UTF-8</charset> 
    </encoder> 
  </appender>  
  <!--第三方接口日志,调用第三方接口服务(dubbo、webservice) -->  
  <appender name="remote" class="ch.qos.logback.core.rolling.RollingFileAppender"> 
    <!-- 过滤掉 TRACE 和 DEBUG级别的日志-->  
    <filter class="ch.qos.logback.classic.filter.LevelFilter"> 
      <level>INFO</level>  
      <onMatch>ACCEPT</onMatch>  
      <onMismatch>DENY</onMismatch> 
    </filter>  
    <File>${log.remote}/remote.log</File>  
    <append>true</append>  
    <rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy"> 
      <FileNamePattern>${log.remote}/remote.log.%d{yyyy-MM-dd-HH}.%i</FileNamePattern>  
      <TimeBasedFileNamingAndTriggeringPolicy class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP"> 
        <MaxFileSize>${log.maxFileSize}</MaxFileSize> 
      </TimeBasedFileNamingAndTriggeringPolicy> 
    </rollingPolicy>  
    <encoder> 
      <Pattern>%date{yyyy-MM-dd HH:mm:ss.SSS} [%t] %-5level [%logger{80} ] %X{transactionId} - %msg%n</Pattern>  
      <charset>UTF-8</charset> 
    </encoder> 
  </appender>  
  <!-- flume start -->  
  <!-- <appender name="log_to_flume_tracer"
class="com.gilt.logback.flume.FlumeLogstashV1Appender">
<flumeAgents> 172.30.0.52:5070 </flumeAgents>
<flumeProperties> connect-timeout=4000;
request-timeout=8000
</flumeProperties>
<reporterMaxThreadPoolSize>6</reporterMaxThreadPoolSize>
<reporterMaxQueueSize>100</reporterMaxQueueSize>
<batchSize>100</batchSize>
<reportingWindow>10</reportingWindow>
<additionalAvroHeaders> logType=log-type
</additionalAvroHeaders>
<application>smapleapp</application>
<layout class="ch.qos.logback.classic.PatternLayout">
<pattern>%msg%n</pattern>
</layout>
</appender>
<logger name="com.iboxpay.base.tracer.Node" level="INFO"
additivity="false">
<appender-ref ref="log_to_flume_tracer" />
</logger> -->  
  <!-- flume end -->  
  <!-- sql日志打印 -->  
  <!-- <appender name="SQL_FILE"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<rollingPolicy
class="ch.qos.logback.core.rolling.SizeAndTimeBasedRollingPolicy">
把按大小日期分割出来的文件压缩成zip格式
<fileNamePattern>${LOG_HOME}/sql_%d{yyyy-MM-dd-HH}.%i.log.zip
</fileNamePattern>
单个文件最大大小
<maxFileSize>${log.maxFileSize}</maxFileSize>
日志保存60天
<maxHistory>${log.maxHistory}</maxHistory>
压缩文件总共大小最大30G
<totalSizeCap>${log.totalSizeCap}</totalSizeCap>
</rollingPolicy>
<encoder charset="UTF-8">
<pattern>%d{yyyy-MM-dd HH:mm:ss.SSS} %level [%F:%L] %msg%n</pattern>
</encoder>
</appender> -->  
  <!-- logger主要用于存放日志对象,也可以定义日志类型、级别 name:表示匹配的logger类型前缀,也就是包的前半部分 level:要记录的日志级别,包括 
TRACE < DEBUG < INFO < WARN < ERROR additivity:作用在于children-logger是否使用 rootLogger配置的appender进行输出,false:表示只用当前logger的appender-ref,true:表示当前logger的appender-ref和rootLogger的appender-ref都有效 -->  
  <!-- hibernate logger -->  
  <!-- <logger name="org.hibernate.type.descriptor.sql.BasicBinder" level="TRACE">
<appender-ref ref="SQL_FILE" />
</logger>
<logger name="org.hibernate.type.descriptor.sql.BasicExtractor" level="DEBUG">
<appender-ref ref="SQL_FILE" />
</logger>
<logger name="org.hibernate.SQL" level="DEBUG">
<appender-ref ref="SQL_FILE" />
</logger>
<logger name="org.hibernate.type" level="INFO">
<appender-ref ref="SQL_FILE" />
</logger>
<logger name="org.hibernate.engine.QueryParameters" level="DEBUG">
<appender-ref ref="SQL_FILE" />
</logger>
<logger name="org.hibernate.engine.query.HQLQueryPlan" level="DEBUG">
<appender-ref ref="SQL_FILE" />
</logger> -->  
  <!-- show parameters for hibernate sql 专为 Hibernate 定制 -->  
  <logger name="org.hibernate.type.descriptor.sql.BasicBinder" level="TRACE"/>  
  <logger name="org.hibernate.type.descriptor.sql.BasicExtractor" level="DEBUG"/>  
  <logger name="org.hibernate.SQL" level="DEBUG"/>  
  <logger name="org.hibernate.engine.QueryParameters" level="DEBUG"/>  
  <logger name="org.hibernate.engine.query.HQLQueryPlan" level="DEBUG"/>  
  <logger name="org.jeecgframework" level="info" additivity="false"> 
    <appender-ref ref="appLogAppender"/> 
  </logger>  
  <!--myibatis log configure -->  
  <logger name="com.apache.ibatis" level="TRACE"/>  
  <logger name="java.sql.Connection" level="DEBUG"/>  
  <logger name="java.sql.Statement" level="DEBUG"/>  
  <logger name="java.sql.PreparedStatement" level="DEBUG"/>  
  <!--druid log configure -->  
  <!-- <appender name="Druid"
class="ch.qos.logback.core.rolling.RollingFileAppender">
<filter class="ch.qos.logback.classic.filter.LevelFilter">
<level>ERROR</level>
<onMatch>ACCEPT</onMatch>
<onMismatch>DENY</onMismatch>
</filter>
<File>${log.druid}/druid.log</File>
<rollingPolicy class="ch.qos.logback.core.rolling.TimeBasedRollingPolicy">
<FileNamePattern>
${log.druid}/druid.log.%d{yyyy-MM-dd-HH}.%i
</FileNamePattern>
<TimeBasedFileNamingAndTriggeringPolicy
class="ch.qos.logback.core.rolling.SizeAndTimeBasedFNATP">
<MaxFileSize>${log.maxFileSize}</MaxFileSize>
</TimeBasedFileNamingAndTriggeringPolicy>
</rollingPolicy>
<encoder>
<Pattern>%date{yyyy-MM-dd HH:mm:ss} %-5level [%logger{80} ] - %msg%n
</Pattern>
<charset>UTF-8</charset>
</encoder>
</appender>
<logger name="com.alibaba.druid" additivity="true">
<appender-ref ref="Druid" />
</logger> -->  
  <!-- root与logger是父子关系,没有特别定义则默认为root,任何一个类只会和一个logger对应, 要么是定义的logger,要么是root,判断的关键在于找到这个logger,然后判断这个logger的appender和level。 -->  
  <root level="info"> 
    <appender-ref ref="stdout"/>  
    <appender-ref ref="appLogAppender"/>  
    <appender-ref ref="access"/>  
    <appender-ref ref="interface"/>  
    <appender-ref ref="errorlog"/>  
    <appender-ref ref="remote"/> 
  </root> 
</configuration>

 

pom.xml文件增加包引入:

<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-classic</artifactId>
<version>1.2.3</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-core</artifactId>
<version>1.2.3</version>
</dependency>
<dependency>
<groupId>ch.qos.logback</groupId>
<artifactId>logback-access</artifactId>
<version>1.2.3</version>
</dependency>
<dependency>
<groupId>org.logback-extensions</groupId>
<artifactId>logback-ext-spring</artifactId>
<version>0.1.4</version>
</dependency>
View Code

web.xml文件配置:

<context-param>
<param-name>logbackConfigLocation</param-name>
<param-value>classpath:logback.xml</param-value>
</context-param>
<listener>
<listener-class>ch.qos.logback.ext.spring.web.LogbackConfigListener</listener-class>
</listener>
View Code

 

posted @ 2017-07-03 16:59  归去来2011  阅读(490)  评论(0)    收藏  举报