第二节:基于Confluent.Kafka实操Kafka的各项功能--纯代码

一. Topic和分区相关

1. 创建Topic(默认一个分区)

        /// <summary>
        /// 创建Topic(默认一个分区)
        /// </summary>
        /// <param name="topicName">topic的名称</param>
        /// <returns></returns>
        [HttpPost]
        public async Task CreateTopic(string topicName)
        {
            AdminClientConfig adminClientConfig = new()
            {
                BootstrapServers = Configuration["kafkaUrl"],
            };

            var bu = new AdminClientBuilder(adminClientConfig).Build();
            await bu.CreateTopicsAsync(new TopicSpecification[] {
                                         new TopicSpecification { Name = topicName}
                                       });
        }

2. 创建主题和分区(主题不存在的情况下使用)

        /// <summary>
        /// 创建主题和分区【主题不存在的情况下使用】
        /// </summary>
        /// <param name="topicName">topic的名称</param>
        /// <param name="PartitionCount">分区个数</param>
        /// <returns></returns>
        [HttpPost]
        public async Task CreateTopicPartition(string topicName, int PartitionCount)
        {
            AdminClientConfig adminClientConfig = new()
            {
                BootstrapServers = Configuration["kafkaUrl"],
            };

            var bu = new AdminClientBuilder(adminClientConfig).Build();
            bu.CreateTopicsAsync(new TopicSpecification[] {
                    new TopicSpecification { Name = topicName,NumPartitions =PartitionCount}
                }).Wait();
            await Task.CompletedTask;
        }

3. 更新分区数量(指主题存在的情况下,修改分区的个数)

        /// <summary>
        /// 更新分区数量(指主题存在的情况下,修改分区的个数)
        /// </summary>
        /// <param name="topicName">topic的名称</param>
        /// <param name="PartitionCount">分区数量</param>
        /// <returns></returns>
        [HttpPost]
        public async Task PartitionUpdate(string topicName, int PartitionCount)
        {
            AdminClientConfig adminClientConfig = new()
            {
                BootstrapServers = Configuration["kafkaUrl"],
            };
            var bu = new AdminClientBuilder(adminClientConfig).Build();
            bu.CreatePartitionsAsync(new PartitionsSpecification[] {
                    new PartitionsSpecification { Topic = topicName, IncreaseTo=PartitionCount}
                }).Wait();
            await Task.CompletedTask;
        }

 

二. 生产者相关

1. 创建订单-随机分区

        /// <summary>
        /// 创建订单-随机分区
        /// </summary>
        [HttpPost]
        public void CreateOrder1(OrderCreateDto orderCreateDto)
        {
            var producerConfig = new ProducerConfig
            {
                BootstrapServers = Configuration["kafkaUrl"],
                MessageTimeoutMs = 50000,
                EnableIdempotence = true
            };
            var builder = new ProducerBuilder<string, string>(producerConfig);
            using (var producer = builder.Build())
            {
                try
                {
                    var OrderJson = JsonSerializer.Serialize(orderCreateDto);
                    var dr = producer.ProduceAsync(topicName1, new Message<string, string> { Key = "order", Value = OrderJson }).GetAwaiter().GetResult();

                    Console.WriteLine($"消息 {dr.Value} 发送到 {dr.TopicPartitionOffset} 成功");
                }
                catch (ProduceException<string, string> ex)
                {
                    Console.WriteLine($"消息发送失败,原因为{ex.Error.Reason}");
                }
            }
        }

2. 创建订单-指定分区

        /// <summary>
        /// 创建订单-指定分区
        /// </summary>
        [HttpPost]
        public void CreateOrder2(OrderCreateDto orderCreateDto)
        {
            var producerConfig = new ProducerConfig
            {
                BootstrapServers = Configuration["kafkaUrl"],
                MessageTimeoutMs = 50000
            };
            var builder = new ProducerBuilder<string, string>(producerConfig);
            using (var producer = builder.Build())
            {
                try
                {
                    var OrderJson = JsonSerializer.Serialize(orderCreateDto);
                    TopicPartition topicPartition = new(topicName1, new Partition(2));
                    var dr = producer.ProduceAsync(topicPartition, new Message<string, string> { Key = "order", Value = OrderJson }).GetAwaiter().GetResult();

                    Console.WriteLine($"消息 {dr.Value} 发送到 {dr.TopicPartitionOffset} 成功");
                }
                catch (ProduceException<string, string> ex)
                {
                    Console.WriteLine($"消息发送失败,原因为{ex.Error.Reason}");
                }
            }
        }

 

三. 消费者相关

1.  消息自动确认

 

 /// <summary>
        /// 01 消费者--消息自动确认
        /// </summary>
        [HttpPost]
        public void OrderConsume1()
        {

            var consumerConfig = new ConsumerConfig
            {
                BootstrapServers = Configuration["kafkaUrl"],
                AutoOffsetReset = AutoOffsetReset.Earliest,
                GroupId = "order",
                EnableAutoCommit = true      //自动确认消息
            };
            var builder = new ConsumerBuilder<string, string>(consumerConfig);
            using (var consumer = builder.Build())
            {
                // 1.订阅
                consumer.Subscribe(topicName1);
                while (true)
                {
                    try
                    {
                        // 2. 消费(自动确认)
                        var result = consumer.Consume();      //该方法只会调用一次,所以要用上面的while循环
                        string key = result.Message.Key;
                        string value = result.Message.Value;
                        Console.WriteLine($"消费者获取的信息为:Key:{key},Order:{value}");
                    }
                    catch (Exception e)
                    {
                        Console.WriteLine($"异常:Order:{e}");
                    }
                }
            }
        }
View Code

 

2. 消息手动确认

 

 /// <summary>
        /// 02 消费者--消息手动确认
        /// </summary>
        [HttpPost]
        public void OrderConsume2()
        {

            var consumerConfig = new ConsumerConfig
            {
                BootstrapServers = Configuration["kafkaUrl"],
                AutoOffsetReset = AutoOffsetReset.Earliest,
                GroupId = "order",
                EnableAutoCommit = false      //关闭自动确认消息
            };
            var builder = new ConsumerBuilder<string, string>(consumerConfig);
            using (var consumer = builder.Build())
            {
                // 1.订阅
                consumer.Subscribe(topicName1);
                while (true)
                {
                    try
                    {
                        // 2. 消费(自动确认)
                        var result = consumer.Consume();      //该方法只会调用一次,所以要用上面的while循环
                        string key = result.Message.Key;
                        string value = result.Message.Value;
                        Console.WriteLine($"消费者获取的信息为:Key:{key},Order:{value}");

                        // 3. 手动提交确认消息(向kafka确认消息)
                        consumer.Commit(result);
                    }
                    catch (Exception e)
                    {
                        Console.WriteLine($"异常:Order:{e}");
                    }
                }
            }
        }
View Code

 

3. 重置偏移量--出现重复消费问题

 

 /// <summary>
        /// 03 重置偏移量--(出现重复消费的现象)
        /// </summary>
        [HttpPost]
        public void OrderConsume3()
        {

            var consumerConfig = new ConsumerConfig
            {
                BootstrapServers = Configuration["kafkaUrl"],
                AutoOffsetReset = AutoOffsetReset.Earliest,
                GroupId = "order",
                EnableAutoCommit = true      //自动确认消息
            };
            var builder = new ConsumerBuilder<string, string>(consumerConfig);
            using (var consumer = builder.Build())
            {
                //1.订阅
                consumer.Subscribe(topicName1);
                //2.重置偏移量, 从1开始---会出现重复消费
                consumer.Assign(new TopicPartitionOffset(new TopicPartition(topicName1, 0), 1));
                while (true)
                {
                    try
                    {
                        // 3. 消费(自动确认)
                        var result = consumer.Consume();

                        //4. 输出偏移量
                        Console.WriteLine($"消息偏移量为:Offset:{result.Offset}");

                        //5. 业务逻辑
                        string key = result.Message.Key;
                        string value = result.Message.Value;
                        Console.WriteLine($"消费者获取的信息为:Key:{key},Order:{value}");

                        //6. 手动提交确认消息(向kafka确认消息)
                        //consumer.Commit(result);

                    }
                    catch (Exception e)
                    {
                        Console.WriteLine($"异常:Order:{e}");
                    }
                }
            }
        }
View Code

 

4. 存储偏移至缓存,解决重复消费

 

  /// <summary>
        ///04 存储偏移量,这里临时存到内存缓存中
        ///(实际场景存放到redis中)
        /// 存放到redis中也有弊端:比如偏移量存储成功了,但是实际业务执行失败了
        /// 可以通过DB的事务来解决这个问题
        /// </summary>
        [HttpPost]
        public void OrderConsume4()
        {

            var consumerConfig = new ConsumerConfig
            {
                BootstrapServers = Configuration["kafkaUrl"],
                AutoOffsetReset = AutoOffsetReset.Earliest,
                GroupId = "order",
                EnableAutoCommit = false      //关闭自动确认消息
            };
            var builder = new ConsumerBuilder<string, string>(consumerConfig);
            using (var consumer = builder.Build())
            {
                //1.订阅
                consumer.Subscribe(topicName1);

                //从缓存中获取偏移量
                string offset = distributedCache.GetString(topicName1);
                if (string.IsNullOrEmpty(offset))
                {
                    offset = "0";
                }

                //2.重置偏移量
                consumer.Assign(new TopicPartitionOffset(new TopicPartition(topicName1, 0), int.Parse(offset) + 1));
                while (true)
                {
                    try
                    {
                        // 3. 消费(自动确认)
                        var result = consumer.Consume();

                        //4. 输出偏移量
                        Console.WriteLine($"消息偏移量为:Offset:{result.Offset}");

                        //将偏移量存放到缓存中
                        distributedCache.SetString(topicName1, result.Offset.Value.ToString());


                        //5. 业务逻辑
                        string key = result.Message.Key;
                        string value = result.Message.Value;
                        Console.WriteLine($"消费者获取的信息为:Key:{key},Order:{value}");

                        //6. 手动提交确认消息(向kafka确认消息)
                        consumer.Commit(result);

                    }
                    catch (Exception e)
                    {
                        Console.WriteLine($"异常:Order:{e}");
                    }
                }
            }
        }
View Code

 

5. 随机分区消费

 

   /// <summary>
        ///05 随机分区进行消费
        ///(不指定分区,也能从有数据的分区中消费)
        /// </summary>
        [HttpPost]
        public void OrderConsume5()
        {

            var consumerConfig = new ConsumerConfig
            {
                BootstrapServers = Configuration["kafkaUrl"],
                AutoOffsetReset = AutoOffsetReset.Earliest,
                GroupId = "order",
                EnableAutoCommit = false      //关闭自动确认消息
            };
            var builder = new ConsumerBuilder<string, string>(consumerConfig);
            using (var consumer = builder.Build())
            {
                //1.订阅
                consumer.Subscribe(topicName1);

                //从缓存中获取偏移量
                string offset = distributedCache.GetString(topicName1);
                if (string.IsNullOrEmpty(offset))
                {
                    offset = "0";
                }

                //2.重置偏移量, 
                consumer.Assign(new TopicPartitionOffset(new TopicPartition(topicName1, 0), int.Parse(offset) + 1));
                while (true)
                {
                    try
                    {
                        // 3. 消费(自动确认)
                        var result = consumer.Consume();

                        //4. 输出偏移量
                        Console.WriteLine($"消息偏移量为:Offset:{result.Offset}");

                        //将偏移量存放到缓存中
                        distributedCache.SetString(topicName1, result.Offset.Value.ToString());


                        //5. 业务逻辑
                        string key = result.Message.Key;
                        string value = result.Message.Value;
                        Console.WriteLine($"消费者获取的信息为:Key:{key},Order:{value}");

                        //6. 手动提交确认消息(向kafka确认消息)
                        consumer.Commit(result);

                    }
                    catch (Exception e)
                    {
                        Console.WriteLine($"异常:Order:{e}");
                    }
                }
            }
        }
View Code

 

6. 指定分区消费

 

  /// <summary>
        ///06 指定分区进行消费
        /// </summary>
        [HttpPost]
        public void OrderConsume6()
        {

            var consumerConfig = new ConsumerConfig
            {
                BootstrapServers = Configuration["kafkaUrl"],
                AutoOffsetReset = AutoOffsetReset.Earliest,
                GroupId = "order",
                EnableAutoCommit = false      //关闭自动确认消息
            };
            var builder = new ConsumerBuilder<string, string>(consumerConfig);
            using (var consumer = builder.Build())
            {
                //1.订阅
                consumer.Subscribe(topicName1);

                //从缓存中获取偏移量
                string offset = distributedCache.GetString(topicName1);
                if (string.IsNullOrEmpty(offset))
                {
                    offset = "0";
                }

                //2.重置偏移量, 从2分区开始消费
                consumer.Assign(new TopicPartitionOffset(new TopicPartition(topicName1, 2), int.Parse(offset) + 1));
                while (true)
                {
                    try
                    {
                        // 3. 消费(自动确认)
                        var result = consumer.Consume();

                        //4. 输出偏移量
                        Console.WriteLine($"消息偏移量为:Offset:{result.Offset}");

                        //将偏移量存放到缓存中
                        distributedCache.SetString(topicName1, result.Offset.Value.ToString());


                        //5. 业务逻辑
                        string key = result.Message.Key;
                        string value = result.Message.Value;
                        Console.WriteLine($"消费者获取的信息为:Key:{key},Order:{value}");

                        //6. 手动提交确认消息(向kafka确认消息)
                        consumer.Commit(result);

                    }
                    catch (Exception e)
                    {
                        Console.WriteLine($"异常:Order:{e}");
                    }
                }
            }
        } 
View Code

 

 

 

 

 

 

 

!

  • 作       者 : Yaopengfei(姚鹏飞)
  • 博客地址 : http://www.cnblogs.com/yaopengfei/
  • 声     明1 : 如有错误,欢迎讨论,请勿谩骂^_^。
  • 声     明2 : 原创博客请在转载时保留原文链接或在文章开头加上本人博客地址,否则保留追究法律责任的权利。
 
posted @ 2022-07-27 13:32  Yaopengfei  阅读(431)  评论(1编辑  收藏  举报