vertx的ShardData共享数据

数据类型

一共4种

synchronous shared maps (local)

asynchronous maps (local or cluster-wide)

asynchronous locks (local or cluster-wide)

asynchronous counters (local or cluster-wide)

synchronous shared maps (local)

数据结构: Map<key,Map<key,value>> , LocalMapImpl 类, 注意scope:vertx instances Global Map,  生命周期结束后或Application临界点时调用remove、clean,close方法,防止内存泄露等问题

private final ConcurrentMap<String, LocalMap<?, ?>> maps;
private final String name;
private final ConcurrentMap<K, V> map = new ConcurrentHashMap<>();//储存的数据结构

LocalMapImpl(String name, ConcurrentMap<String, LocalMap<?, ?>> maps) {
    this.name = name;
    this.maps = maps;
}

 

asynchronous maps (local or cluster-wide) cluster: zookeeper

利用zookeeper作集中式存储,V 采用序列化/反序列化, cluster模式效率不是很高

public <K, V> void getAsyncMap(String name, Handler<AsyncResult<AsyncMap<K, V>>> resultHandler) {
    Objects.requireNonNull(name, "name");
    Objects.requireNonNull(resultHandler, "resultHandler");
    if (clusterManager == null) {//是否启用集群
      /**
        * local: Map<key,Map<key,Holder<V>>>
        * 新增了插入时间单位纳秒,和TTL有效时间防止内存越来越大,连续内存不足导致内存溢出
        */
      getLocalAsyncMap(name, resultHandler);
    } else {
     /**
       * 获取name,不存在创建zk path
       */
      clusterManager.<K, V>getAsyncMap(name, ar -> {
        if (ar.succeeded()) {
          // Wrap it
          resultHandler.handle(Future.succeededFuture(new WrappedAsyncMap<K, V>(ar.result())));
        } else {
          resultHandler.handle(Future.failedFuture(ar.cause()));
        }
      });
    }
}

 

cluster model:

public void put(K k, V v, Handler<AsyncResult<Void>> completionHandler) {
    put(k, v, Optional.empty(), completionHandler);
  }

  
  public void put(K k, V v, long timeout, Handler<AsyncResult<Void>> completionHandler) {
    put(k, v, Optional.of(timeout), completionHandler);
  }
  
  /**
    * 添加数据 key/value
    */
  private void put(K k, V v, Optional<Long> timeoutOptional, Handler<AsyncResult<Void>> completionHandler) {
    assertKeyAndValueAreNotNull(k, v)//数据不为null
      .compose(aVoid -> checkExists(k))//检查key是否存在
      .compose(checkResult -> checkResult ? setData(k, v):create(k, v))//存在就赋值,不存在就创建
      .compose(aVoid -> {
        //keyPath 方法 k转化为字节流再 Base64 编码
        JsonObject body = new JsonObject().put(TTL_KEY_BODY_KEY_PATH, keyPath(k));
        
        if (timeoutOptional.isPresent()) {//数据是否有生存时效
          asyncMapTTLMonitor.addAsyncMapWithPath(keyPath(k), this);
          body.put(TTL_KEY_BODY_TIMEOUT, timeoutOptional.get());
        } else body.put(TTL_KEY_IS_CANCEL, true);
        
        //publish 所有node 消息
        vertx.eventBus().publish(TTL_KEY_HANDLER_ADDRESS, body);
    
    
        Future<Void> future = Future.future();
        future.complete();
        return future;
      })
      .setHandler(completionHandler);/**处理完成回调*/
  }


/**
  * 先查询再删除
  */
public void remove(K k, Handler<AsyncResult<V>> asyncResultHandler) {
    assertKeyIsNotNull(k).compose(aVoid -> {
      Future<V> future = Future.future();
      get(k, future.completer()); //获取数据
      return future;
    }).compose(value -> {
      Future<V> future = Future.future();
      if (value != null) {
        return delete(k, value); //删除
      } else {
        future.complete();
      }
      return future;
    }).setHandler(asyncResultHandler);/**处理完成回调*/
}

/**
 * 获取data
 */
public void get(K k, Handler<AsyncResult<V>> asyncResultHandler) {
    assertKeyIsNotNull(k) //检查k不为null
      .compose(aVoid -> checkExists(k)) //检查是否存在
      .compose(checkResult -> {
        Future<V> future = Future.future();
        if (checkResult) {
         //获取data
          ChildData childData = curatorCache.getCurrentData(keyPath(k));
          if (childData != null && childData.getData() != null) {
            try {
              V value = asObject(childData.getData());//反序列化
              future.complete(value);
            } catch (Exception e) {
              future.fail(e);
            }
          } else {
            future.complete();
          }
        } else {
          //ignore
          future.complete();
        }
        return future;
      })
      .setHandler(asyncResultHandler);/**处理完成回调*/
}

 

asynchronous locks (local or cluster-wide) cluster: zookeeper

/**
  * 获取锁
  */
public void getLock(String name, Handler<AsyncResult<Lock>> resultHandler) {
    Objects.requireNonNull(name, "name");
    Objects.requireNonNull(resultHandler, "resultHandler");
    //默认超时 10s
    getLockWithTimeout(name, DEFAULT_LOCK_TIMEOUT, resultHandler);
}

  
public void getLockWithTimeout(String name, long timeout, Handler<AsyncResult<Lock>> resultHandler) {
    Objects.requireNonNull(name, "name");
    Objects.requireNonNull(resultHandler, "resultHandler");
    Arguments.require(timeout >= 0, "timeout must be >= 0");
    if (clusterManager == null) {//是否是集群模式
      getLocalLock(name, timeout, resultHandler);
    } else {
      clusterManager.getLockWithTimeout(name, timeout, resultHandler);
    }
}

 

local model:

/**
  * 释放lock
  */
public synchronized void release() {
    LockWaiter waiter = pollWaiters();
    if (waiter != null) {
      waiter.acquire(this);//queue中的下一个 owner getLock
    } else {
      owned = false;
    }
}

/**
  * Queue poll
  */
private LockWaiter pollWaiters() {
    //使用while用途:getlock超时情况
    while (true) {
      LockWaiter waiter = waiters.poll();
      if (waiter == null) {
        return null;
      } else if (!waiter.timedOut) {
        return waiter;
      }
    }
}

/**
  * 获取锁
  * 采用状态来判断,存在并发问题所以采用 synchronized
  */
public void doAcquire(Context context, long timeout, Handler<AsyncResult<Lock>> resultHandler) {
    synchronized (this) {
      if (!owned) {
        // 获取得到 lock
        owned = true;
        lockAcquired(context, resultHandler);
      } else {
        //添加到wait Queue 中,并添加延时任务getLockTimeOut
        waiters.add(new LockWaiter(this, context, timeout, resultHandler));
      }
    }
}

 

cluster model:

/**
  * 利用ZK curator客户端自带实现的 DistributedLock
  */
public void getLockWithTimeout(String name, long timeout, Handler<AsyncResult<Lock>> resultHandler) {
    ContextImpl context = (ContextImpl) vertx.getOrCreateContext();//获取context
    // 在 internalBlocking Pool 执行有序阻塞任务,利用Queque保证有序(FIFO)
    context.executeBlocking(() -> {
      ZKLock lock = locks.get(name);
      if (lock == null) {
        //初始不可重入的互斥锁
        InterProcessSemaphoreMutex mutexLock = new InterProcessSemaphoreMutex(curator, ZK_PATH_LOCKS + name);
        lock = new ZKLock(mutexLock);
      }
      try {
       //获取锁直到 timeout
        if (lock.getLock().acquire(timeout, TimeUnit.MILLISECONDS)) {
          locks.putIfAbsent(name, lock);
          return lock;
        } else {
          throw new VertxException("Timed out waiting to get lock " + name);
        }
      } catch (Exception e) {
        throw new VertxException("get lock exception", e);
      }
    }, resultHandler);
}


public void release() {
  // 使用 worker Pool 释放锁
  vertx.executeBlocking(future -> {
    try {
      lock.release();
    } catch (Exception e) {
      log.error(e);
    }
    future.complete();
  }, false, null);
}

 

asynchronous counters (local or cluster-wide) cluster: zookeeper

local model: counters 采用 AtomicLong

private void getLocalCounter(String name, Handler<AsyncResult<Counter>> resultHandler) {
  //获取计数器,AsynchronousCounter类对AtomicLong的封装
    Counter counter = localCounters.computeIfAbsent(name, n -> new AsynchronousCounter(vertx));
    Context context = vertx.getOrCreateContext();
    context.runOnContext(v -> resultHandler.handle(Future.succeededFuture(counter)));
}

 

cluster model:

/**
 * 使用ZK curator客户端自带实现的 DistributedAtomicLong
 */
public void getCounter(String name, Handler<AsyncResult<Counter>> resultHandler) {
    //使用worker Pool执行阻塞任务
    vertx.executeBlocking(future -> {
      try {
        Objects.requireNonNull(name);
        future.complete(new ZKCounter(name, retryPolicy));
      } catch (Exception e) {
        future.fail(new VertxException(e));
      }
    }, resultHandler);
}

 

posted @ 2019-03-21 16:31  cat_with_mouse  阅读(807)  评论(1编辑  收藏  举报