Zookeeper开源客户端框架Curator的使用
CuratorFramework
Curator框架提供了一套高级的API, 简化了ZooKeeper的操作。
话不多说,看代码
package com.donews.data.util import java.util.concurrent.TimeUnit import kafka.common.TopicAndPartition import org.apache.curator.framework.CuratorFrameworkFactory import org.apache.curator.framework.recipes.locks.InterProcessMutex import org.apache.curator.framework.recipes.nodes.PersistentEphemeralNode import org.apache.curator.retry.ExponentialBackoffRetry import org.apache.zookeeper.CreateMode import org.slf4j.LoggerFactory import java.util.{List => JList} import com.donews.data.Config import scala.collection.JavaConversions._ /** * Created by reynold on 17-3-20. */ object ZookeeperHelper { val LOG = LoggerFactory.getLogger(ZookeeperHelper.getClass) val client = { val client = CuratorFrameworkFactory .builder .connectString(Config.ZOOKEEPER_CONNECT) .retryPolicy(new ExponentialBackoffRetry(1000, 3)) .namespace("reynold") .build() client.start() client }
//传入path,将path上锁 def lock(path: String)(body: => Unit) { val lock = new InterProcessMutex(client, path) lock.acquire() try { body } finally { lock.release() } }
//将路径上锁然后执行body里面的程序 def tryDo(path: String)(body: => Unit): Boolean = { val lock = new InterProcessMutex(client, path) if (!lock.acquire(10, TimeUnit.SECONDS)) { LOG.info(s"不能获得锁 {$path},已经有任务在运行,本次任务退出") return false } try { LOG.info("获准运行") body true } finally { lock.release() LOG.info(s"释放锁 {$path}") } } //zookeeper创建路径 def ensurePathExists(path: String): Unit = { if (client.checkExists().forPath(path) == null) { client.create().creatingParentsIfNeeded().forPath(path) } } //zookeeper加载offset的方法 def loadOffsets(topicSet: Set[String], defaultOffset: Map[TopicAndPartition, Long]): Map[TopicAndPartition, Long] = { val kafkaOffsetPath = s"/kafkaOffsets" ensurePathExists(kafkaOffsetPath) val offsets = for { //t就是路径webstatistic/kafkaOffsets下面的子目录遍历 t <- client.getChildren.forPath(kafkaOffsetPath) if topicSet.contains(t) //p就是新路径 /reynold/kafkaOffsets/donews_website p <- client.getChildren.forPath(s"$kafkaOffsetPath/$t") } yield { //遍历路径下面的partition中的offset val data = client.getData.forPath(s"$kafkaOffsetPath/$t/$p") //将data变成Long类型 val offset = java.lang.Long.valueOf(new String(data)).toLong (TopicAndPartition(t, Integer.parseInt(p)), offset) } defaultOffset ++ offsets.toMap } //zookeeper存储offset的方法 def storeOffsets(offsets: Map[TopicAndPartition, Long]): Unit = { val kafkaOffsetPath = s"/kafkaOffsets" if (client.checkExists().forPath(kafkaOffsetPath) == null) { client.create().creatingParentsIfNeeded().forPath(kafkaOffsetPath) } for ((tp, offset) <- offsets) { val data = String.valueOf(offset).getBytes val path = s"$kafkaOffsetPath/${tp.topic}/${tp.partition}" ensurePathExists(path) client.setData().forPath(path, data) } } def main(args: Array[String]) { // println(Config.ZOOKEEPER_CONNECT) // tryDo("/locks/test"){ // println("hello world") // } // val n=new PersistentEphemeralNode(client,PersistentEphemeralNode.Mode.EPHEMERAL,"/appstatistic/test","hello".getBytes) // n.start() // client.setData().forPath("/appstatistic/test","xxx".getBytes) // val kafkaParams = Map[String, String]( // "metadata.broker.list" -> "spark-slave03:9092,spark-slave04:9092,spark-slave05:9092" // ) // val kafka = new KafkaClusterHelper(kafkaParams) // val offsets = kafka.getFromOffsets(kafkaParams, Set("donews")) // println(offsets) // storeOffsets(offsets) loadOffsets(Set("donews"), Map()).foreach(println(_)) // val done=tryDo("/appstatistic/batchMainx") { // println("it works") // Thread.sleep(1000L*35) // } // println(done) } }