数据结构与算法_ID生成算法

ID需求:

ID的生成,可以用来映射字符串的用户ID
  唯一性:确保生成的ID是全网唯一的。
  有序递增性:确保生成的ID是对于某个用户或者业务是按一定的数字有序递增的。
  带时间:ID里面包含时间,一眼扫过去就知道哪天的交易

唯一ID

 Mysql的自增长主键(auto_increment)时,
  整个系统ID唯一,ID是数字类型,而且是趋势递增的,ID简短,查询效率快

分布式id生成算法

Leaf——美团点评分布式ID生成系统
    1)Leaf-segment方案:可生成全局唯一、全局有序的ID;
   2)Leaf-snowflake方案:可生成全局唯一、局部有序的ID。
snowflake算法  分布式唯一id:Twitter
   snowflake算法 
   分布式id生成算法的有很多种,Twitter的SnowFlake就是其中经典的一种。
    Twitter-Snowflake算法产生的背景相当简单,为了满足Twitter每秒上万条消息的请求,每条消息都必须分配一条唯一的id,这些id还需要一些大致的顺序,让twitter可以通过一定的索引来进行检索,
	而在Twitter庞大的分布式系统中不同机器产生的id必须又必须不同。
	Snowflake的逻辑也非常简单,雪花算法生成64位的二进制正整数,然后转换成10进制的数。
UidGenerator是百度开源的分布式ID生成器,基于于snowflake算法的实现

示例代码

网上代码-得空看看自己实现一下

import java.util.ArrayList;
import java.util.HashMap;
import java.util.Iterator;
import java.util.Map;

public class SnowFlake {
    /**
     * 起始的时间戳
     */
    private final static long START_STMP = 1602298613000L;
    /**
     *
     * datacenterId;  //数据中心
     * machineId;     //机器标识
     * sequence = 0L; //序列号
     * lastStmp = -1L;//上一次时间戳
     */
    private long datacenterId;
    private long machineId;
    private long sequence = 0L;
    private long lastStmp = -1L;

    /**
     * 每一部分占用的位数
     * SEQUENCE_BIT = 12; //序列号占用的位数
     * MACHINE_BIT = 5;   //机器标识占用的位数
     * DATACENTER_BIT = 5;//数据中心占用的位数
     */
    private final static long SEQUENCE_BIT = 12;
    private final static long MACHINE_BIT = 5;
    private final static long DATACENTER_BIT = 5;

    /**
     * 每一部分的最大值
     */
    private final static long MAX_SEQUENCE = -1L ^ (-1L << SEQUENCE_BIT);
    private final static long MAX_MACHINE_NUM = -1L ^ (-1L << MACHINE_BIT);
    private final static long MAX_DATACENTER_NUM = -1L ^ (-1L << DATACENTER_BIT);

    /**
     * 每一部分向左的位移
     */
    private final static long MACHINE_LEFT = SEQUENCE_BIT;
    private final static long DATACENTER_LEFT = SEQUENCE_BIT + MACHINE_BIT;
    private final static long TIMESTMP_LEFT = DATACENTER_LEFT + DATACENTER_BIT;


    public SnowFlake(long datacenterId, long machineId) {
        if (datacenterId > MAX_DATACENTER_NUM || datacenterId < 0) {
            throw new IllegalArgumentException("datacenterId can't be greater than MAX_DATACENTER_NUM or less than 0");
        }
        if (machineId > MAX_MACHINE_NUM || machineId < 0) {
            throw new IllegalArgumentException("machineId can't be greater than MAX_MACHINE_NUM or less than 0");
        }
        this.datacenterId = datacenterId;
        this.machineId = machineId;
    }

    /**
     * 产生下一个ID
     * @return
     */
    public synchronized long nextId() {
        long currStmp = getNewstmp();
        if (currStmp < lastStmp) {
            throw new RuntimeException("Clock moved backwards.  Refusing to generate id");
        }
        if (currStmp == lastStmp) {
            //相同毫秒内,序列号自增
            sequence = (sequence + 1) & MAX_SEQUENCE;
            //同一毫秒的序列数已经达到最大
            if (sequence == 0L) {
                currStmp = getNextMill();
            }
        } else {
            //不同毫秒内,序列号置为0
            sequence = 0L;
        }

        lastStmp = currStmp;
        return (currStmp - START_STMP) << TIMESTMP_LEFT
                | datacenterId << DATACENTER_LEFT
                | machineId << MACHINE_LEFT
                | sequence;
    }

    private long getNextMill() {
        long mill = getNewstmp();
        while (mill <= lastStmp) {
            mill = getNewstmp();
        }
        return mill;
    }

    private long getNewstmp() {
        return System.currentTimeMillis();
    }

//==============================Test=============================================
    /** 测试 */
    public static void main(String[] args) {
        SnowFlake idWorker = new SnowFlake(0, 0);
        for (int i = 0; i < 10; i++) {
            long id = idWorker.nextId();
            // System.out.println(Long.toBinaryString(id));
            System.out.println(id);
        }
        //ID-mapping
        /**
         *  mem_cd --> Long  memDirMap
         * Long -- > mem_cd  reverseMemDirMap
         */
        Map<String, Long> memDirMap = new HashMap<String, Long>();
        Map<Long, String> reverseMemDirMap = new HashMap<Long, String>();
        /**
         * SnowFlake 算法
         */
        SnowFlake idWorkerMy = new SnowFlake(1, 1);
        ArrayList<String> objList = new ArrayList<String>();
        /**
         * 测试数据
         */
        objList.add("Mem2020111166");
        objList.add("Mem2020111166");
        objList.add("Mem2020111199");
        //
        long row_id;
        for (String mem_st : objList) {
            System.out.println(mem_st);
            if(!memDirMap.containsKey(mem_st)) {
                row_id = idWorkerMy.nextId();
                memDirMap.put(mem_st, row_id);
                reverseMemDirMap.put(row_id, mem_st);
            }
        }
        /**
         * 查看数据
         */
        System.out.println("############");
        Iterator<Map.Entry<String, Long> > entries = memDirMap.entrySet().iterator();
        while (entries.hasNext()) {
            Map.Entry<String, Long>  entry = entries.next();
            System.out.println("遍历方法二 Key = " + entry.getKey() + ", Value = " + entry.getValue());
        }
        for(Map.Entry<Long, String> entry : reverseMemDirMap.entrySet()) {
            System.out.println("遍历方法一:key ="+entry.getKey()+" Value="+entry.getValue());
        }
    }

}

参考:

Flink去重第四弹:bitmap精确去重 https://blog.csdn.net/u013516966/article/details/103951787/
RoaringBitmap精确去重 https://blog.csdn.net/lao000bei/article/details/105725579
https://blog.csdn.net/bjweimengshu/article/details/80162731
https://www.cnblogs.com/jiangxinlingdu/p/8440413.html
posted @ 2020-12-08 21:40  辰令  阅读(338)  评论(0编辑  收藏  举报