Kafka 源代码分析之LogManager

这里分析kafka 0.8.2的LogManager

logmanager是kafka用来管理log文件的子系统.源代码文件在log目录下.

这里会逐步分析logmanager的源代码.首先看class 初始化部分.

private def createLogManager(zkClient: ZkClient, brokerState: BrokerState): LogManager = { 
//这个函数就是在kafkaServer.start函数里调用的封装函数 val defaultLogConfig
= LogConfig(segmentSize = config.logSegmentBytes, //创建各种logconfig segmentMs = config.logRollTimeMillis, segmentJitterMs = config.logRollTimeJitterMillis, flushInterval = config.logFlushIntervalMessages, flushMs = config.logFlushIntervalMs.toLong, retentionSize = config.logRetentionBytes, retentionMs = config.logRetentionTimeMillis, maxMessageSize = config.messageMaxBytes, maxIndexSize = config.logIndexSizeMaxBytes, indexInterval = config.logIndexIntervalBytes, deleteRetentionMs = config.logCleanerDeleteRetentionMs, fileDeleteDelayMs = config.logDeleteDelayMs, minCleanableRatio = config.logCleanerMinCleanRatio, compact = config.logCleanupPolicy.trim.toLowerCase == "compact") val defaultProps = defaultLogConfig.toProps val configs = AdminUtils.fetchAllTopicConfigs(zkClient).mapValues(LogConfig.fromProps(defaultProps, _)) // read the log configurations from zookeeper val cleanerConfig = CleanerConfig(numThreads = config.logCleanerThreads, //创建压缩log的配置文件 dedupeBufferSize = config.logCleanerDedupeBufferSize, dedupeBufferLoadFactor = config.logCleanerDedupeBufferLoadFactor, ioBufferSize = config.logCleanerIoBufferSize, maxMessageSize = config.messageMaxBytes, maxIoBytesPerSecond = config.logCleanerIoMaxBytesPerSecond, backOffMs = config.logCleanerBackoffMs, enableCleaner = config.logCleanerEnable)
//这里就是创建了logmanager的实例对象.
new LogManager(logDirs = config.logDirs.map(new File(_)).toArray, //这是logdirs的位置.即配置文件中的logdir topicConfigs = configs, //topic的配置信息,从zookeeper上得到的. defaultConfig = defaultLogConfig, //默认配置信息 cleanerConfig = cleanerConfig, //压缩log配置信息 ioThreads = config.numRecoveryThreadsPerDataDir, //io线程的个数.一般是8 flushCheckMs = config.logFlushSchedulerIntervalMs, //log刷新到磁盘的间隔.一般是10000ms flushCheckpointMs = config.logFlushOffsetCheckpointIntervalMs, //log检查点的间隔.一般是10000ms retentionCheckMs = config.logCleanupIntervalMs, //log 清除的时间间隔.即保留时长.一般是7*24hour scheduler = kafkaScheduler, //这个即kafkaserver.start函数里最早启动和声明的对象.用来做后台任务. brokerState = brokerState, //borker状态 time = time) }

  上面这个函数就是kafkaserver里创建logmanager对象的入口.下面看看logmanager本身的初始化部分.

class LogManager(val logDirs: Array[File],           //这是class声明部分.可以跟调用部分对照.每个参数的类型都很清楚.
                 val topicConfigs: Map[String, LogConfig],
                 val defaultConfig: LogConfig,
                 val cleanerConfig: CleanerConfig,
                 ioThreads: Int,
                 val flushCheckMs: Long,
                 val flushCheckpointMs: Long,
                 val retentionCheckMs: Long,
                 scheduler: Scheduler,
                 val brokerState: BrokerState,
                 private val time: Time) extends Logging {
  val RecoveryPointCheckpointFile = "recovery-point-offset-checkpoint" //默认的检查点文件.
  val LockFile = ".lock"      //默认的锁文件.kafka不正常关闭的时候可以看见这个文件未被清理.在logdir下.
  val InitialTaskDelayMs = 30*1000 //初始任务时常.
  private val logCreationOrDeletionLock = new Object  
  private val logs = new Pool[TopicAndPartition, Log]() //logs 是后面所有topic对象的总集.之后关于所有log上的操作都是通过logs.

  createAndValidateLogDirs(logDirs)   //创建和验证logdir.
  private val dirLocks = lockLogDirs(logDirs) //对logdir加锁.创建锁文件.就是上面的lockFile文件.
  private val recoveryPointCheckpoints = logDirs.map(dir => (dir, new OffsetCheckpoint(new File(dir, RecoveryPointCheckpointFile)))).toMap      //为每个log存储路径做一个恢复点检查文件的map集合.数据结构是(存储路径1:File类型->OffsetCheckpoint对象,...),每个存储路径下都有一个恢复文件.文件内容记录按行记录.第一行记录版本,第二行记录所有topic个数,之后的行按"topicname partition lastoffset"的格式记录所有topic的名字,分区,最后的offset.recoveryPointCheckpoints这个对象在之后会用到为每一个topic查询最后的offset用.
  loadLogs()  //将所有存储路径下的log文件创建一组log对象,并put到logs中.

  // public, so we can access this from kafka.admin.DeleteTopicTest
//这一部分就是log归整压缩的功能是否启用.然后创建相应的对象.我从来没用过这个功能. val cleaner: LogCleaner = if(cleanerConfig.enableCleaner) new LogCleaner(cleanerConfig, logDirs, logs, time = time) else null

  初始化部分已经介绍完了.下面看看初始化部分用到的具体函数部分.

private def createAndValidateLogDirs(dirs: Seq[File]) {   //这个就是初始化部分创建和验证logdir的函数.
    if(dirs.map(_.getCanonicalPath).toSet.size < dirs.size) //检查logdir的路径是否合法.
      throw new KafkaException("Duplicate log directory found: " + logDirs.mkString(", "))
    for(dir <- dirs) {
      if(!dir.exists) {  //不存在就创建.
        info("Log directory '" + dir.getAbsolutePath + "' not found, creating it.")
        val created = dir.mkdirs()
        if(!created)
          throw new KafkaException("Failed to create data directory " + dir.getAbsolutePath)
      }
      if(!dir.isDirectory || !dir.canRead) //不是目录或不可读就抛出异常
        throw new KafkaException(dir.getAbsolutePath + " is not a readable log directory.")
    }
  }
  
  /**
   * Lock all the given directories
   */
  private def lockLogDirs(dirs: Seq[File]): Seq[FileLock] = {
    dirs.map { dir =>
      val lock = new FileLock(new File(dir, LockFile)) //创建锁文件
      if(!lock.tryLock()) //尝试获得锁.
        throw new KafkaException("Failed to acquire lock on file .lock in " + lock.file.getParentFile.getAbsolutePath + 
                               ". A Kafka instance in another process or thread is using this directory.")
      lock //返回锁.
    }
  }

  上面两个函数是对目录做的一些检查和枷锁工作.下面的loadlogs函数就是将logdir下所有的日志加载的复杂工作了.

private def loadLogs(): Unit = {
    info("Loading logs.")

    val threadPools = mutable.ArrayBuffer.empty[ExecutorService]  //初始化了一个线程工厂池,容纳所有存储路径对应的线程池对象.
    val jobs = mutable.Map.empty[File, Seq[Future[_]]]  //jobs用来记录每一个创建log对象的runnable工作结果集.

    for (dir <- this.logDirs) {    
//这里开始遍历每一个logdirs下面的log路径.logdirs一般都是"/data1/kafka/logs,/data2/kafka/logs,/data3/kafka/logs"这种数据目录格式.因此dir对应的应该是每一个单独的log存储目录. val pool
= Executors.newFixedThreadPool(ioThreads) //这里为每一个存储目录创建一个固定数量的线程池.因为一般为了提高磁盘读写性能都会设置多个磁盘目录.因此这个实际上是为每一个磁盘创建一个固定数量的线程池. threadPools.append(pool) //把创建的线程池加入到线程工厂池里. val cleanShutdownFile = new File(dir, Log.CleanShutdownFile)
//这个是获取标志这个log存储目录是否要被恢复的文件对象.Log.CleanShutdownFile在同目录下Log.scala里定义的.值是".kafka_cleanshutdown",这个也会成为下文用来识别每一个存储路径对应的工作线程池的标识.
if (cleanShutdownFile.exists) { //这里判断是否跳过恢复这个log存储目录,否则就创建一个新的borkerstate debug( "Found clean shutdown file. " + "Skipping recovery for all logs in data directory: " + dir.getAbsolutePath) } else { // log recovery itself is being performed by `Log` class during initialization brokerState.newState(RecoveringFromUncleanShutdown) }
   //这个对象是一个Map[TopicAndPartition Long]对象.内容是通过上OffsetCheckpoints里的read方法.将对象存储路径下的恢复文件解析成Map类型,下文会介绍这个类和方法. val recoveryPoints
= this.recoveryPointCheckpoints(dir).read
//通过for循环来生成工作集. val jobsForDir
= for { dirContent <- Option(dir.listFiles).toList logDir <- dirContent if logDir.isDirectory } yield { Utils.runnable { //通过这个方法为每一个topic生成一个runnable类型对象.这个对象的run方法就是这个块. debug("Loading log '" + logDir.getName + "'")
//这个对象实现的主要功能就在这里.通过目录名字获得topic名字和分区号.然后为每个分区生成一份默认的配置信息.再通过recoveryPoints对象获得lastoffset val topicPartition
= Log.parseTopicPartitionName(logDir.getName) //获得topic信息 val config = topicConfigs.getOrElse(topicPartition.topic, defaultConfig) //生成配置信息 val logRecoveryPoint = recoveryPoints.getOrElse(topicPartition, 0L) //获取最后的offset
//根据上面获得到的lastoffset和配置.生成一个log对象.并将这个对象put到logs全局变量中去.供之后操作. val current
= new Log(logDir, config, logRecoveryPoint, scheduler, time) //创建topic对应log对象 val previous = this.logs.put(topicPartition, current) //添加到logs全局变量中. if (previous != null) { throw new IllegalArgumentException( "Duplicate log directories found: %s, %s!".format( current.dir.getAbsolutePath, previous.dir.getAbsolutePath)) } } }
//最后在这里把上面生成的topic对应的runnable对象放到函数最开始声明的线程池中去执行.并将返回future对象集放到jobs对应的标识中去. jobs(cleanShutdownFile)
= jobsForDir.map(pool.submit).toSeq }
//函数最后在这里获取所有runnable对象执行结果.并且清理cleanShutdownFile文件,关闭所有执行线程.
try { for ((cleanShutdownFile, dirJobs) <- jobs) { dirJobs.foreach(_.get) //获取结果 cleanShutdownFile.delete() //清理对象 } } catch { case e: ExecutionException => { error("There was an error in one of the threads during logs loading: " + e.getCause) throw e.getCause } } finally { threadPools.foreach(_.shutdown()) //关闭所有存储路径对应的线程池. } info("Logs loading complete.") }

   上面就是所有topic分区被加载的过程.下面插入一些被这个函数用到的一些关键函数.

class OffsetCheckpoint(val file: File) extends Logging {       //这个class就是上面用来恢复log的读写类.
  private val lock = new Object()
  new File(file + ".tmp").delete() // try to delete any existing temp files for cleanliness
  file.createNewFile() // in case the file doesn't exist


//这个方法是写检查点文件的 具体也比较简单.就不再做过多说明 def write(offsets: Map[TopicAndPartition, Long]) { lock synchronized { // write to temp file and then swap with the existing file val temp = new File(file.getAbsolutePath + ".tmp") val fileOutputStream = new FileOutputStream(temp) val writer = new BufferedWriter(new OutputStreamWriter(fileOutputStream)) try { // write the current version writer.write(0.toString) writer.newLine() // write the number of entries writer.write(offsets.size.toString) writer.newLine() // write the entries offsets.foreach { case (topicPart, offset) => writer.write("%s %d %d".format(topicPart.topic, topicPart.partition, offset)) writer.newLine() } // flush the buffer and then fsync the underlying file writer.flush() fileOutputStream.getFD().sync() } finally { writer.close() } // swap new offset checkpoint file with previous one if(!temp.renameTo(file)) { // renameTo() fails on Windows if the destination file exists. file.delete() if(!temp.renameTo(file)) throw new IOException("File rename from %s to %s failed.".format(temp.getAbsolutePath, file.getAbsolutePath)) } } }
//这个就是loadlogs函数中被调用的read函数.用来读取检查点文件的. def read(): Map[TopicAndPartition, Long]
= { lock synchronized { val reader = new BufferedReader(new FileReader(file)) try { var line = reader.readLine() if(line == null) return Map.empty val version = line.toInt version match { case 0 => line = reader.readLine() if(line == null) return Map.empty val expectedSize = line.toInt var offsets = Map[TopicAndPartition, Long]() //这个对象就是最后返回的对象. line = reader.readLine() while(line != null) { val pieces = line.split("\\s+") if(pieces.length != 3) throw new IOException("Malformed line in offset checkpoint file: '%s'.".format(line)) val topic = pieces(0) val partition = pieces(1).toInt val offset = pieces(2).toLong offsets += (TopicAndPartition(topic, partition) -> offset) //将解析的每一行都添加到offset里. line = reader.readLine() } if(offsets.size != expectedSize) throw new IOException("Expected %d entries but found only %d".format(expectedSize, offsets.size)) offsets //最后在这里返回. case _ => throw new IOException("Unrecognized version of the highwatermark checkpoint file: " + version) } } finally { reader.close() } } } }

  上面这个类是在server目录下.用来恢复和写入检查点文件的.

  下面看看start函数.start函数负责启动三个定时任务.这个函数在KafkaServer.start里被调用.

def startup() {
    /* Schedule the cleanup task to delete old logs */
    if(scheduler != null) {
      info("Starting log cleanup with a period of %d ms.".format(retentionCheckMs))
      scheduler.schedule("kafka-log-retention",           //启动log保留任务.这个任务用来确定log保留多长时间和多大.
                         cleanupLogs,                     //在这个函数里做这些任务.
                         delay = InitialTaskDelayMs,      //任务开始定时
                         period = retentionCheckMs,       //保留时间
                         TimeUnit.MILLISECONDS)
      info("Starting log flusher with a default period of %d ms.".format(flushCheckMs))
      scheduler.schedule("kafka-log-flusher",            //将log刷新到磁盘的任务
                         flushDirtyLogs,                 //这个函数做主要工作
                         delay = InitialTaskDelayMs,     
                         period = flushCheckMs,          //刷新时间间隔
                         TimeUnit.MILLISECONDS)
      scheduler.schedule("kafka-recovery-point-checkpoint",  //检查log的检查点
                         checkpointRecoveryPointOffsets,     //这个函数主要做这些工作.
                         delay = InitialTaskDelayMs,         
                         period = flushCheckpointMs,         //检查点刷新时间
                         TimeUnit.MILLISECONDS)
    }
    if(cleanerConfig.enableCleaner)
      cleaner.startup()                                    //检查是否启用log归整压缩
  }

 

  start函数将三个任务添加到在KafkaServer.start函数里最早初始化的schedulerThreadPoolExecutor对象中去.在后台周期性执行.下面看看这三个任务具体的工作函数

def checkpointRecoveryPointOffsets() {    //定时刷新检查点文件的工作函数
    this.logDirs.foreach(checkpointLogsInDir)
  }

  /**
   * Make a checkpoint for all logs in provided directory.
   */
  private def checkpointLogsInDir(dir: File): Unit = {
    val recoveryPoints = this.logsByDir.get(dir.toString) //这里按存储目录获取所有topic对应log对象.
    if (recoveryPoints.isDefined) {
      this.recoveryPointCheckpoints(dir).write(recoveryPoints.get.mapValues(_.recoveryPoint)) //这里调用前面介绍的offsetcheck对象的write写入文件.
    }
  }

//这下面将logs对象集做了简单映射处理.logs即Loadlogs函数里产生的topic对象集.

def logsByTopicPartition = logs.toMap

/**
* Map of log dir to logs by topic and partitions in that dir
*/

//这里将logs对象集按存储目录分组.
private def logsByDir = {
  this.logsByTopicPartition.groupBy {
    case (_, log) => log.dir.getParent
  }
}

 

   上面介绍的是刷新存储目录下对应的检查点文件的工作函数.下面看看保留log的工作函数.

private def cleanupExpiredSegments(log: Log): Int = {  //这个函数被cleanupLogs函数调用.
    val startMs = time.milliseconds
    log.deleteOldSegments(startMs - _.lastModified > log.config.retentionMs) //函数最后封装的是Log对象里的删除老分片的方法.
  }

  /**
   *  Runs through the log removing segments until the size of the log
   *  is at least logRetentionSize bytes in size
   */
  private def cleanupSegmentsToMaintainSize(log: Log): Int = { //这个函数同样被cleanupLogs调用.
    if(log.config.retentionSize < 0 || log.size < log.config.retentionSize) //这里检查分片大小是否超过了保留大小或者是否配置了不限制保留大小.
      return 0
    var diff = log.size - log.config.retentionSize
    def shouldDelete(segment: LogSegment) = {
      if(diff - segment.size >= 0) {
        diff -= segment.size
        true
      } else {
        false
      }
    }
    log.deleteOldSegments(shouldDelete) //同样是调用Log对象的删除老分片的方法.
  }

  /**
   * Delete any eligible logs. Return the number of segments deleted.
   */
  def cleanupLogs() {          //工作函数从这里开始.可以看见这个函数是调用上面两个函数进行具体工作的.alllogs是所有logs的values集.即log对象集.
    debug("Beginning log cleanup...")
    var total = 0
    val startMs = time.milliseconds
    for(log <- allLogs; if !log.config.compact) {
      debug("Garbage collecting '" + log.name + "'")
//这两个函数一个用来清除过期的分片文件.一个用来清楚超过保持尺寸的文件.所有topic都在这个工作进程里完成. total
+= cleanupExpiredSegments(log) + cleanupSegmentsToMaintainSize(log) } debug("Log cleanup completed. " + total + " files deleted in " + (time.milliseconds - startMs) / 1000 + " seconds") } /** * Get all the partition logs */ def allLogs(): Iterable[Log] = logs.values //alllogs在这里映射.

  下面看看将log刷新到磁盘上的flushDirtyLogs函数.

private def flushDirtyLogs() = {        //定时刷新到磁盘上的工作函数
    debug("Checking for dirty logs to flush...")

    for ((topicAndPartition, log) <- logs) {  //遍历所有topic分区和对应的log对象
      try {
        val timeSinceLastFlush = time.milliseconds - log.lastFlushTime   //获取距上次刷新过了多长时间
        debug("Checking if flush is needed on " + topicAndPartition.topic + " flush interval  " + log.config.flushMs +
              " last flushed " + log.lastFlushTime + " time since last flush: " + timeSinceLastFlush)
        if(timeSinceLastFlush >= log.config.flushMs)   //判断是否需要刷新.
          log.flush                                    //需要刷新则调用log对象的flush函数完成.
      } catch {
        case e: Throwable =>
          error("Error flushing topic " + topicAndPartition.topic, e)
      }
    }
  }

 

这里已经将LogManager对象的主要和核心的功能介绍讲解完了.还有一些createlog,deletelog等函数暂时未在logmanager 类中用到.等在其他管理类中使用的时候将会再做分析.

可以看见这个对象里的主要任务是管理log,这里主要做的是将所有log由文件加载到内存对象中,并封装所有log对象的管理任务.实际上所有log的任务由log对象自己完成.log对象的源代码分析将在下一篇进行.

 

posted @ 2016-08-02 18:25  vv.past  阅读(1538)  评论(0编辑  收藏  举报