Kafka 源代码分析之Log

这里分析Log对象本身的源代码.

Log类是一个topic分区的基础类.一个topic分区的所有基本管理动作.都在这个对象里完成.类源代码文件为Log.scala.在源代码log目录下.

Log类是LogSegment的集合和管理封装.首先看看初始化代码.

class Log(val dir: File,                           //log的实例化对象在LogManager分析中已经介绍过.这里可以对照一下.
          @volatile var config: LogConfig,
          @volatile var recoveryPoint: Long = 0L,
          scheduler: Scheduler,
          time: Time = SystemTime) extends Logging with KafkaMetricsGroup {

  import kafka.log.Log._    //这里是把同文件下的object加载进来.代码在文件的末尾.

  /* A lock that guards all modifications to the log */
  private val lock = new Object             //锁对象

  /* last time it was flushed */
  private val lastflushedTime = new AtomicLong(time.milliseconds)  //最后log刷新到磁盘的时间,这个变量贯穿整个管理过程.

  /* the actual segments of the log */
//这个对象是这个topic下所有分片的集合.这个集合贯彻整个log管理过程.之后所有动作都依赖此集合. private val segments: ConcurrentNavigableMap[java.lang.Long, LogSegment] = new ConcurrentSkipListMap[java.lang.Long, LogSegment] loadSegments() //将topic所有的分片加载到segments集合了.并做一些topic分片文件检查工作. /* Calculate the offset of the next message */ @volatile var nextOffsetMetadata = new LogOffsetMetadata(activeSegment.nextOffset(), activeSegment.baseOffset, activeSegment.size.toInt) //activeSegment表示当前最后一个分片.因为分片是按大小分布.最大的就是最新的.也就是活跃的分片.这里生成下一个offsetmetadata val topicAndPartition: TopicAndPartition = Log.parseTopicPartitionName(name) //获取topic名称和分区. info("Completed load of log %s with log end offset %d".format(name, logEndOffset)) val tags = Map("topic" -> topicAndPartition.topic, "partition" -> topicAndPartition.partition.toString) //监控度量的映射标签.
//下面全是通过metrics做的一些监控. newGauge(
"NumLogSegments", new Gauge[Int] { def value = numberOfSegments }, tags) newGauge("LogStartOffset", new Gauge[Long] { def value = logStartOffset }, tags) newGauge("LogEndOffset", new Gauge[Long] { def value = logEndOffset }, tags) newGauge("Size", new Gauge[Long] { def value = size }, tags) /** The name of this log */ def name = dir.getName()

 

  上面是Log class初始化的部分.这个部分最重要的就是声明了几个贯穿全过程的对象,并且将分片文件加载到内存对象中.

  下面看看主要的加载函数loadSegments.

private def loadSegments() {
    // create the log directory if it doesn't exist
    dir.mkdirs()       //这里是创建topic目录的.本身的注释也说明了这个.
    
    // first do a pass through the files in the log directory and remove any temporary files 
    // and complete any interrupted swap operations
    for(file <- dir.listFiles if file.isFile) {            //这个for循环是用来检查分片是否是要被清除或者删除的.
      if(!file.canRead)
        throw new IOException("Could not read file " + file)
      val filename = file.getName
      if(filename.endsWith(DeletedFileSuffix) || filename.endsWith(CleanedFileSuffix)) {
        // if the file ends in .deleted or .cleaned, delete it
        file.delete()
      } else if(filename.endsWith(SwapFileSuffix)) {       //这里检查是不是有swap文件存在.根据不同情况删除或重命名swap文件.
        // we crashed in the middle of a swap operation, to recover:
        // if a log, swap it in and delete the .index file
        // if an index just delete it, it will be rebuilt
        val baseName = new File(Utils.replaceSuffix(file.getPath, SwapFileSuffix, ""))
        if(baseName.getPath.endsWith(IndexFileSuffix)) {
          file.delete()
        } else if(baseName.getPath.endsWith(LogFileSuffix)){
          // delete the index
          val index = new File(Utils.replaceSuffix(baseName.getPath, LogFileSuffix, IndexFileSuffix))
          index.delete()
          // complete the swap operation
          val renamed = file.renameTo(baseName)
          if(renamed)
            info("Found log file %s from interrupted swap operation, repairing.".format(file.getPath))
          else
            throw new KafkaException("Failed to rename file %s.".format(file.getPath))
        }
      }
    }

    // now do a second pass and load all the .log and .index files
    for(file <- dir.listFiles if file.isFile) {   //这个for循环是加载和检查log分片是否存在的.
      val filename = file.getName
      if(filename.endsWith(IndexFileSuffix)) {
        // if it is an index file, make sure it has a corresponding .log file
        val logFile = new File(file.getAbsolutePath.replace(IndexFileSuffix, LogFileSuffix))
        if(!logFile.exists) {   //这里是如果只有index文件没有对应的log文件.就把index文件清理掉.
          warn("Found an orphaned index file, %s, with no corresponding log file.".format(file.getAbsolutePath))
          file.delete()
        }
      } else if(filename.endsWith(LogFileSuffix)) {   //这里是创建LogSegment对象的地方.
        // if its a log file, load the corresponding log segment
        val start = filename.substring(0, filename.length - LogFileSuffix.length).toLong
        val hasIndex = Log.indexFilename(dir, start).exists  //确认对应的index文件是否存在.
        val segment = new LogSegment(dir = dir, 
                                     startOffset = start,
                                     indexIntervalBytes = config.indexInterval, 
                                     maxIndexSize = config.maxIndexSize,
                                     rollJitterMs = config.randomSegmentJitter,
                                     time = time)
        if(!hasIndex) {
          error("Could not find index file corresponding to log file %s, rebuilding index...".format(segment.log.file.getAbsolutePath))
          segment.recover(config.maxMessageSize)  //对应log文件的index不存在的话,进行recover.这个地方就是平常碰见kafka index出错需要重新建立的时候管理员删除了对应的index会引起的动作.
        }
        segments.put(start, segment) //将segment对象添加到总集里.
      }
    }

    if(logSegments.size == 0) {  //这里判断是否是一个新的topic分区.尚不存在分片文件.所以创建一个空的分片文件对象.
      // no existing segments, create a new mutable segment beginning at offset 0
      segments.put(0L, new LogSegment(dir = dir,
                                     startOffset = 0,
                                     indexIntervalBytes = config.indexInterval, 
                                     maxIndexSize = config.maxIndexSize,
                                     rollJitterMs = config.randomSegmentJitter,
                                     time = time))
    } else {
      recoverLog()  //这里是topic分片不为空的话.就为检查点设置新offset值.
      // reset the index size of the currently active log segment to allow more entries
      activeSegment.index.resize(config.maxIndexSize)
    }

    // sanity check the index file of every segment to ensure we don't proceed with a corrupt segment
    for (s <- logSegments)
      s.index.sanityCheck()  //index文件检查.
  }

   看看recoverLog是做了哪些工作.

private def recoverLog() {
    // if we have the clean shutdown marker, skip recovery
    if(hasCleanShutdownFile) {    //看看是否有cleanshutdownfile存在.hasCleanShutdownFile函数就是判断这个文件存不存在
      this.recoveryPoint = activeSegment.nextOffset //存在则直接把恢复检查点设置成最后一个分片的最新offset值
      return
    }

    // okay we need to actually recovery this log
    val unflushed = logSegments(this.recoveryPoint, Long.MaxValue).iterator //这个是获取检查点到最大值之间是否还有其他的分片.也就是检查检查点是不是就是最后一个分片文件.
    while(unflushed.hasNext) { //如果不是最后一个分片.就获取这个分片.然后调用这个对象的recover函数如果函数返回错误就删除这个分片.
      val curr = unflushed.next 
      info("Recovering unflushed segment %d in log %s.".format(curr.baseOffset, name))
      val truncatedBytes = 
        try {
          curr.recover(config.maxMessageSize)
        } catch {
          case e: InvalidOffsetException => 
            val startOffset = curr.baseOffset
            warn("Found invalid offset during recovery for log " + dir.getName +". Deleting the corrupt segment and " +
                 "creating an empty one with starting offset " + startOffset)
            curr.truncateTo(startOffset)
        }
      if(truncatedBytes > 0) {
        // we had an invalid message, delete all remaining log
        warn("Corruption found in segment %d of log %s, truncating to offset %d.".format(curr.baseOffset, name, curr.nextOffset))
        unflushed.foreach(deleteSegment)
      }
    }
  }

    这个函数的处理动作.包装的是LogSegment对同名对象.LogSegment的分析会在后续文章里继续分析.现在接着看看Log对象的其他方法,即被LogManager函数里封装的两个个功能函数deleteOldSegment和flush.

  首先是deleteOldSegment函数

def deleteOldSegments(predicate: LogSegment => Boolean): Int = {  //这个函数就是在LogManager类中被用来处理清除log的函数.函数的参数是一个匿名函数
    // find any segments that match the user-supplied predicate UNLESS it is the final segment 
    // and it is empty (since we would just end up re-creating it
    val lastSegment = activeSegment //activeSegment 在上面也提到过是最后一个分片对象.
//这里predicate函数在LogManager类中是判断是否超过大小限制和时间限制的.这里遍历判断每个分片是否达到限制,并且不是第一个分片或者分片不是空的. val deletable
= logSegments.takeWhile(s => predicate(s) && (s.baseOffset != lastSegment.baseOffset || s.size > 0)) val numToDelete = deletable.size //这里获得有多少个分片要被删除. if(numToDelete > 0) { //如果有分片要被删除则执行这个 lock synchronized { //这是一个同步块. // we must always have at least one segment, so if we are going to delete all the segments, create a new one first if(segments.size == numToDelete) //如果要删除的分片是这个topic下的所有分片的话.需要先通过roll创建新的分片. roll() // remove the segments for lookups deletable.foreach(deleteSegment(_)) //遍历所有要被删除的分片.将之删除. } } numToDelete //返回删除的分片个数. }

 

  上面函数可以看的很清楚,删除一个分片所面临的动作.下面贴上关于这个函数的一些相关被调用函数的解析.

  activeSegment,logSegments,deleteSegment函数.

def activeSegment = segments.lastEntry.getValue  //可以看见这个被多次使用的函数.就是分片集里的最后一个分片.
  
  /**
   * All the log segments in this log ordered from oldest to newest
   */
  def logSegments: Iterable[LogSegment] = {    //这是在上面deleteOldSegment函数中被调用的函数.是分片集的一个值集.
    import JavaConversions._
    segments.values
  }
  
  /**
   * Get all segments beginning with the segment that includes "from" and ending with the segment
   * that includes up to "to-1" or the end of the log (if to > logEndOffset)
   */
  def logSegments(from: Long, to: Long): Iterable[LogSegment] = {  //这是在recoverLog函数里被调用来查找检查点记录的offset是否是最后一个分片.
    import JavaConversions._
    lock synchronized {
      val floor = segments.floorKey(from)  //获取最后一个分片.或者返回给出的值到Long.maxvalue之间的所有分片对象.
      if(floor eq null)
        segments.headMap(to).values
      else
        segments.subMap(floor, true, to, false).values
    }
  }

  再接着看看deleteSegment函数.这个函数是主要的删除函数.并且这个函数也调用了其他函数来删除分片对象.

private def deleteSegment(segment: LogSegment) {
    info("Scheduling log segment %d for log %s for deletion.".format(segment.baseOffset, name))
    lock synchronized {   //这里进行同步,
      segments.remove(segment.baseOffset)  //从集合里清楚这个分片对象.
      asyncDeleteSegment(segment)   //调用异步删除方法来清理对象的相关文件.
    }
  }
  
  /**
   * Perform an asynchronous delete on the given file if it exists (otherwise do nothing)
   * @throws KafkaStorageException if the file can't be renamed and still exists 
   */
  private def asyncDeleteSegment(segment: LogSegment) {
    segment.changeFileSuffixes("", Log.DeletedFileSuffix)
    def deleteSeg() {
      info("Deleting segment %d from log %s.".format(segment.baseOffset, name))
      segment.delete()
    }
//可以看见这里是调用了构建对象的时候传递进来的最初由KafkaServer.start里最开始初始化的线程池.然后把经过包装的LogSegment.delete方法提交到线程池中运行. scheduler.schedule(
"delete-file", deleteSeg, delay = config.fileDeleteDelayMs) }

  上面的部分讲解了LogManager.cleanupLogs函数的封装函数具体处理工作.下面看看LogManager.flushDirtyLogs里的Log.flush是如何工作的.

def flush(): Unit = flush(this.logEndOffset)  //这是在logmanager中被调用的函数.

  /**
   * Flush log segments for all offsets up to offset-1
   * @param offset The offset to flush up to (non-inclusive); the new recovery point
   */
  def flush(offset: Long) : Unit = {  //这是真正工作的函数.
    if (offset <= this.recoveryPoint)  //首先判断现在的offset是否是在检查点offset范围内的.如果是则不做任何动作.
      return
    debug("Flushing log '" + name + " up to offset " + offset + ", last flushed: " + lastFlushTime + " current time: " +
          time.milliseconds + " unflushed = " + unflushedMessages)
    for(segment <- logSegments(this.recoveryPoint, offset))  //找到检查点offset到最新offset之间的所有分片.为这些分片调用LogSegment.flush函数
      segment.flush()   //通过这个函数刷新到磁盘.具体动作如何.会在后续LogSegment的文章里分析.
    lock synchronized {  //同步方法.
      if(offset > this.recoveryPoint) { 
        this.recoveryPoint = offset    //刷新之后更新最新的检查点offset.
        lastflushedTime.set(time.milliseconds)  //更新最新刷新时间.
      }
    }
  }

  上面说完了核心的管理函数和加载函数.下面看看读取和写入相关的函数.read和append.

def append(messages: ByteBufferMessageSet, assignOffsets: Boolean = true): LogAppendInfo = {  //将消息添加到分片尾
    val appendInfo = analyzeAndValidateMessageSet(messages) //这里验证log信息和创建logappendinfo.
    
    // if we have any valid messages, append them to the log
    if(appendInfo.shallowCount == 0)  //如果消息为空的话,就直接返回信息.
      return appendInfo
      
    // trim any invalid bytes or partial messages before appending it to the on-disk log
    var validMessages = trimInvalidBytes(messages, appendInfo)  //这个函数将消息里多余的部分截掉.

    try {
      // they are valid, insert them in the log
      lock synchronized {
        appendInfo.firstOffset = nextOffsetMetadata.messageOffset  //这里开始分配offset值.即上最后一个分片的最后一个offset值.

        if(assignOffsets) {
          // assign offsets to the message set
          val offset = new AtomicLong(nextOffsetMetadata.messageOffset) //创建新的offset
          try {
            validMessages = validMessages.assignOffsets(offset, appendInfo.codec) //使用ByteBufferMessageSet类中的分配方法分配offset值.
          } catch {
            case e: IOException => throw new KafkaException("Error in validating messages while appending to log '%s'".format(name), e)
          }
          appendInfo.lastOffset = offset.get - 1 //因为offset被assignOffsets方法累加过.所以最后要减1.
        } else {
          // we are taking the offsets we are given
          if(!appendInfo.offsetsMonotonic || appendInfo.firstOffset < nextOffsetMetadata.messageOffset)
            throw new IllegalArgumentException("Out of order offsets found in " + messages)
        }

        // re-validate message sizes since after re-compression some may exceed the limit
        for(messageAndOffset <- validMessages.shallowIterator) {
          if(MessageSet.entrySize(messageAndOffset.message) > config.maxMessageSize) { //这里判断每一个消息是否大于配置的消息最大长度.
            // we record the original message set size instead of trimmed size
            // to be consistent with pre-compression bytesRejectedRate recording
            BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesRejectedRate.mark(messages.sizeInBytes)
            BrokerTopicStats.getBrokerAllTopicsStats.bytesRejectedRate.mark(messages.sizeInBytes)
            throw new MessageSizeTooLargeException("Message size is %d bytes which exceeds the maximum configured message size of %d."
              .format(MessageSet.entrySize(messageAndOffset.message), config.maxMessageSize))
          }
        }

        // check messages set size may be exceed config.segmentSize
        if(validMessages.sizeInBytes > config.segmentSize) {  //判断要写入的消息集大小是否超过配置的分片大小.
          throw new MessageSetSizeTooLargeException("Message set size is %d bytes which exceeds the maximum configured segment size of %d."
            .format(validMessages.sizeInBytes, config.segmentSize))
        }


        // maybe roll the log if this segment is full
        val segment = maybeRoll(validMessages.sizeInBytes)  //这里是判断是否需要滚动分片.

        // now append to the log
        segment.append(appendInfo.firstOffset, validMessages)  //这里真正调用LogSegment对象写入消息.

        // increment the log end offset
        updateLogEndOffset(appendInfo.lastOffset + 1) //更新lastoffset.

        trace("Appended message set to log %s with first offset: %d, next offset: %d, and messages: %s"
                .format(this.name, appendInfo.firstOffset, nextOffsetMetadata.messageOffset, validMessages))

        if(unflushedMessages >= config.flush)  //判断是否需要刷新到磁盘
          flush()

        appendInfo
      }
    } catch {
      case e: IOException => throw new KafkaStorageException("I/O exception in append to log '%s'".format(name), e)
    }
  }

   由于涉及到消息写入检查等等动作.所以其中有很多操作需要看见message目录下的关于message的具体实现才能了解.关于Message的具体解析会在后续的篇章里继续分析.先简略看看对应的验证和处理函数analyzeAndValidateMessageSet和trimInvalidBytes

private def analyzeAndValidateMessageSet(messages: ByteBufferMessageSet): LogAppendInfo = {
    var shallowMessageCount = 0
    var validBytesCount = 0
    var firstOffset, lastOffset = -1L
    var codec: CompressionCodec = NoCompressionCodec
    var monotonic = true
    for(messageAndOffset <- messages.shallowIterator) { //这里是遍历所有message对象.
      // update the first offset if on the first message
      if(firstOffset < 0)
        firstOffset = messageAndOffset.offset //设置第一个offset
      // check that offsets are monotonically increasing
      if(lastOffset >= messageAndOffset.offset)  //判断最后offset是否失效.
        monotonic = false
      // update the last offset seen
      lastOffset = messageAndOffset.offset //设置最后一个offset

      val m = messageAndOffset.message  //具体message消息.

      // Check if the message sizes are valid.
      val messageSize = MessageSet.entrySize(m)
      if(messageSize > config.maxMessageSize) { //判断消息大小时候超过配置最大消息大小.
        BrokerTopicStats.getBrokerTopicStats(topicAndPartition.topic).bytesRejectedRate.mark(messages.sizeInBytes)
        BrokerTopicStats.getBrokerAllTopicsStats.bytesRejectedRate.mark(messages.sizeInBytes)
        throw new MessageSizeTooLargeException("Message size is %d bytes which exceeds the maximum configured message size of %d."
          .format(messageSize, config.maxMessageSize))
      }

      // check the validity of the message by checking CRC
      m.ensureValid() //校验消息是否完整.

      shallowMessageCount += 1 //统计验证完成的消息总个数.
      validBytesCount += messageSize //统计总大小.
      
      val messageCodec = m.compressionCodec  //是否启用压缩.
      if(messageCodec != NoCompressionCodec)
        codec = messageCodec
    }
//返回一个LogAppendInfo的对象. LogAppendInfo(firstOffset, lastOffset, codec, shallowMessageCount, validBytesCount, monotonic) }
/** * Trim any invalid bytes from the end of this message set (if there are any) * @param messages The message set to trim * @param info The general information of the message set * @return A trimmed message set. This may be the same as what was passed in or it may not. */ private def trimInvalidBytes(messages: ByteBufferMessageSet, info: LogAppendInfo): ByteBufferMessageSet = { val messageSetValidBytes = info.validBytes if(messageSetValidBytes < 0) //查看消息是不是正常. throw new InvalidMessageSizeException("Illegal length of message set " + messageSetValidBytes + " Message set cannot be appended to log. Possible causes are corrupted produce requests") if(messageSetValidBytes == messages.sizeInBytes) { //消息正好正常.就直接返回 messages } else { // trim invalid bytes val validByteBuffer = messages.buffer.duplicate() validByteBuffer.limit(messageSetValidBytes) //不正常则通过从新设置limit把大小设置在验证的大小上.丢弃后续部分. new ByteBufferMessageSet(validByteBuffer) //返回新的消息. } }

 

  这两个函数是在append函数里被调用的预处理方法.里面涉及了message的操作.具体会在message的篇幅里分析.  

  写入日志里做了日志管理中的另外一个工作.就是滚动日志分片.maybeRoll用来跟配置文件对照看是否需要创建新分片.

private def maybeRoll(messagesSize: Int): LogSegment = {
    val segment = activeSegment
//这里判断是否需要滚动log分片.
if (segment.size > config.segmentSize - messagesSize || segment.size > 0 && time.milliseconds - segment.created > config.segmentMs - segment.rollJitterMs || segment.index.isFull) { debug("Rolling new log segment in %s (log_size = %d/%d, index_size = %d/%d, age_ms = %d/%d)." .format(name, segment.size, config.segmentSize, segment.index.entries, segment.index.maxEntries, time.milliseconds - segment.created, config.segmentMs - segment.rollJitterMs)) roll() //调用roll函数完成. } else { segment //不需要则直接返回当前分片. } } /** * Roll the log over to a new active segment starting with the current logEndOffset. * This will trim the index to the exact size of the number of entries it currently contains. * @return The newly rolled segment */ def roll(): LogSegment = { val start = time.nanoseconds lock synchronized { val newOffset = logEndOffset //以最后offset当作新分片文件名 val logFile = logFilename(dir, newOffset) //log文件名 val indexFile = indexFilename(dir, newOffset) //index文件名. for(file <- List(logFile, indexFile); if file.exists) { //判断这两个文件是否存在 warn("Newly rolled segment file " + file.getName + " already exists; deleting it first") file.delete() //存在就删除. } segments.lastEntry() match { case null => case entry => entry.getValue.index.trimToValidSize() } val segment = new LogSegment(dir, //创建一个新分片. startOffset = newOffset, indexIntervalBytes = config.indexInterval, maxIndexSize = config.maxIndexSize, rollJitterMs = config.randomSegmentJitter, time = time) val prev = addSegment(segment) //将新分片添加到集合中. if(prev != null) throw new KafkaException("Trying to roll a new log segment for topic partition %s with start offset %d while it already exists.".format(name, newOffset)) // schedule an asynchronous flush of the old segment scheduler.schedule("flush-log", () => flush(newOffset), delay = 0L) //提交一个刷新任务到线程池中. info("Rolled new log segment for '" + name + "' in %.0f ms.".format((System.nanoTime - start) / (1000.0*1000.0))) segment } }

  上面讨论了写入一段消息.下面看看读取一段消息.

def read(startOffset: Long, maxLength: Int, maxOffset: Option[Long] = None): FetchDataInfo = {
    trace("Reading %d bytes from offset %d in log %s of length %d bytes".format(maxLength, startOffset, name, size))

    // check if the offset is valid and in range
    val next = nextOffsetMetadata.messageOffset
    if(startOffset == next) //如果读取的消息就是最新的消息.返回一个空消息对象.
      return FetchDataInfo(nextOffsetMetadata, MessageSet.Empty)
    
    var entry = segments.floorEntry(startOffset) //获取对应的offset分片对象.
      
    // attempt to read beyond the log end offset is an error
    if(startOffset > next || entry == null)
      throw new OffsetOutOfRangeException("Request for offset %d but we only have log segments in the range %d to %d.".format(startOffset, segments.firstKey, next))
    
    // do the read on the segment with a base offset less than the target offset
    // but if that segment doesn't contain any messages with an offset greater than that
    // continue to read from successive segments until we get some messages or we reach the end of the log
    while(entry != null) {
      val fetchInfo = entry.getValue.read(startOffset, maxOffset, maxLength)  //调用logsegment的read方法获取消息.
      if(fetchInfo == null) {
        entry = segments.higherEntry(entry.getKey)
      } else {
        return fetchInfo  //成功返回新消息对象.
      }
    }
    
    // okay we are beyond the end of the last segment with no data fetched although the start offset is in range,
    // this can happen when all messages with offset larger than start offsets have been deleted.
    // In this case, we will return the empty set with log end offset metadata
    FetchDataInfo(nextOffsetMetadata, MessageSet.Empty) //失败返回空消息对象.
  }

   读取一段消息,依赖LogSegment的实现.具体将在后续篇章里分析.

到这里已经将Log类中的主要功能,方法都分析过了.关于Log的分析就到此结束了.

关于在Log中使用到的一些常量,以及常量方法.Log Object的内容就直接贴在下面部分了.

object Log {
  //这里就是一些常量和一些组合函数.
  /** a log file */
  val LogFileSuffix = ".log"
    
  /** an index file */
  val IndexFileSuffix = ".index"
    
  /** a file that is scheduled to be deleted */
  val DeletedFileSuffix = ".deleted"
    
  /** A temporary file that is being used for log cleaning */
  val CleanedFileSuffix = ".cleaned"
    
  /** A temporary file used when swapping files into the log */
  val SwapFileSuffix = ".swap"

  /** Clean shutdown file that indicates the broker was cleanly shutdown in 0.8. This is required to maintain backwards compatibility
    * with 0.8 and avoid unnecessary log recovery when upgrading from 0.8 to 0.8.1 */
  /** TODO: Get rid of CleanShutdownFile in 0.8.2 */
  val CleanShutdownFile = ".kafka_cleanshutdown"

  /**
   * Make log segment file name from offset bytes. All this does is pad out the offset number with zeros
   * so that ls sorts the files numerically.
   * @param offset The offset to use in the file name
   * @return The filename
   */
  def filenamePrefixFromOffset(offset: Long): String = {
    val nf = NumberFormat.getInstance()
    nf.setMinimumIntegerDigits(20)
    nf.setMaximumFractionDigits(0)
    nf.setGroupingUsed(false)
    nf.format(offset)
  }
  
  /**
   * Construct a log file name in the given dir with the given base offset
   * @param dir The directory in which the log will reside
   * @param offset The base offset of the log file
   */
  def logFilename(dir: File, offset: Long) = 
    new File(dir, filenamePrefixFromOffset(offset) + LogFileSuffix)
  
  /**
   * Construct an index file name in the given dir using the given base offset
   * @param dir The directory in which the log will reside
   * @param offset The base offset of the log file
   */
  def indexFilename(dir: File, offset: Long) = 
    new File(dir, filenamePrefixFromOffset(offset) + IndexFileSuffix)
  

  /**
   * Parse the topic and partition out of the directory name of a log
   */
  def parseTopicPartitionName(name: String): TopicAndPartition = {
    val index = name.lastIndexOf('-')
    TopicAndPartition(name.substring(0,index), name.substring(index+1).toInt)
  }
}

 

 

 

 

 

 

posted @ 2016-08-03 17:47  vv.past  阅读(2348)  评论(0编辑  收藏  举报