.net core 下使用 Kafka(二)
上篇文章 介绍了 docker 下安装 kafka 这篇文章介绍如何在.net core 下使用 Kafka
项目结构
KafkaCommom 类库项目
KafkaWebAPI .net core webapi 项目
KafKaWorker 为 work service 辅助角色服务
项目引用
Kafka 操作类库
Confluent.Kafka
微软分布式缓存 redis 扩展
Microsoft.Extensions.Caching.Redis
以下是 KafkaCommom 类库项目 的各个类
1 namespace KafkaCommom 2 { 3 public class KafkaOptions 4 { 5 /// <summary> 6 /// kafka地址 7 /// </summary> 8 public string BootstrapServers { get; set; } 9 } 10 }
1 namespace KafkaCommom 2 { 3 public class KafkaTopic 4 { 5 public static string Topic = "create-order"; 6 } 7 }
重点说明 消费者在消费消息时候 需要向kafka发送 消息收到确认 类似rabbitmq ack 原理 但是存在一个问题 当发送 消息确认时 kafka出现宕机 再重启 会出现 重复消费消息情况,所以这里需要
将消息 偏移量 offset+1 存储到redis里 ,进行偏移量重置 ,这样就不会出现消息重复消费
以下是代码
1 using Confluent.Kafka; 2 using Microsoft.Extensions.Caching.Distributed; 3 using Microsoft.Extensions.Logging; 4 using Microsoft.Extensions.Options; 5 using System; 6 using System.Collections.Generic; 7 using System.Linq; 8 using System.Text; 9 using System.Threading.Tasks; 10 using Microsoft.Extensions.Primitives; 11 namespace KafkaCommom 12 { 13 public class KafKaSevice 14 { 15 private readonly KafkaOptions _kafkaOptions; 16 private object _lock = new object(); 17 private ILogger<KafKaSevice> _logger; 18 19 private ConsumerConfig _consumerConfig; 20 private ProducerConfig _producerConfig; 21 //注入 redis 22 private readonly IDistributedCache _distributedCache; 23 public KafKaSevice(IOptionsMonitor<KafkaOptions> optionsMonitor, 24 ILogger<KafKaSevice> logger, IDistributedCache distributedCache) 25 { 26 this._kafkaOptions = optionsMonitor.CurrentValue; 27 this._logger = logger; 28 this._distributedCache = distributedCache; 29 } 30 31 /// <summary> 32 /// 建立生产者连接 33 /// </summary> 34 /// <returns></returns> 35 public ProducerConfig CreateProducerConnection() 36 { 37 if (_producerConfig == null) 38 { 39 lock (_lock) 40 { 41 return new ProducerConfig 42 { 43 BootstrapServers = _kafkaOptions.BootstrapServers, 44 }; 45 } 46 } 47 else 48 { 49 return this._producerConfig; 50 } 51 52 } 53 54 /// <summary> 55 /// 建立消费者连接 56 /// </summary> 57 /// <returns></returns> 58 public ConsumerConfig CreateConsumerConnection() 59 { 60 if (_consumerConfig == null) 61 { 62 lock (_lock) 63 { 64 return new ConsumerConfig 65 { 66 BootstrapServers = _kafkaOptions.BootstrapServers, 67 }; 68 } 69 } 70 else 71 { 72 return this._consumerConfig; 73 } 74 75 } 76 77 78 79 /// <summary> 80 /// 发送消息 81 /// </summary> 82 /// <param name="msg"></param> 83 public void Send(string msg) 84 { 85 ProducerConfig producerConfig =this.CreateProducerConnection(); 86 producerConfig.MessageTimeoutMs = 5000;//失败重试时间 87 producerConfig.EnableIdempotence = true;///幂等性:如果生产者发送失败不重复发消息失败重试 88 var builder = new ProducerBuilder<string, string>(producerConfig); 89 builder.SetDefaultPartitioner(RoundRobinPartitioner); 90 using (var producer = builder.Build()) 91 { 92 try 93 { 94 //create-order这里是主题 95 var dr = producer.ProduceAsync(KafkaTopic.Topic, new Message<string, string> { Key = "order-1", Value = msg }).GetAwaiter().GetResult(); 96 _logger.LogInformation("发送事件 {0} 到 {1} 成功", dr.Value, dr.TopicPartitionOffset); 97 } 98 catch (ProduceException<string, string> ex) 99 { 100 _logger.LogError(ex, "发送事件到 {0} 失败,原因 {1} ", "order", ex.Error.Reason); 101 } 102 } 103 } 104 105 ///// <summary> 106 ///// 接收消息 107 ///// </summary> 108 //public void Reveice(Func<string, bool> fun) 109 //{ 110 // //建立连接 111 // ConsumerConfig consumerConfig = ConsumerConnection(); 112 // consumerConfig.EnableAutoCommit = false; 113 // var builder = new ConsumerBuilder<string, string>(consumerConfig); 114 // using (var consumer = builder.Build()) 115 // { 116 // // 1、订阅 117 // consumer.Subscribe("create-order"); 118 // while (true) 119 // { 120 // try 121 // { 122 // // 2、消费(自动确认) 123 // var result = consumer.Consume(); 124 125 // // 3、业务逻辑:业务逻辑---->执行失败--->消息丢失 126 // string key = result.Key; 127 // string value = result.Value; 128 // fun(result.Value); 129 // _logger.LogInformation($"创建商品:Key:{key}"); 130 // _logger.LogInformation($"创建商品:Order:{value}"); 131 // // consumer.Commit(result);//手动确认 性能差 先去缓冲队列 确认 然后 等存放磁盘 返回确认 132 // //consumerConfig.EnableAutoCommit = false; 必须是false禁止自动确认 133 134 // //consumer.StoreOffset(result);//手动确认 他性能比较高所以选他 原因 他直接到缓冲队列确认不等消息存磁盘 就返回了 135 // //缺陷 如果 当提交时候kafka宕机了 136 137 // } 138 // catch (Exception e) 139 // { 140 // _logger.LogInformation($"异常:Order:{e}"); 141 // } 142 // } 143 // } 144 //} 145 146 147 /// <summary> 148 /// 消费消息 redis 存储 offset 偏移量 149 /// </summary> 150 public void Reveice() 151 { 152 ConsumerConfig consumerConfig = this.CreateConsumerConnection(); 153 //AutoOffsetReset.Latest 这里是kafka 停止重启 从最新产生的数据开始消费 154 //AutoOffsetReset.Earliest 这里是kafka 停止重启 从第0个索引开始消费 155 //earliest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费 156 //latest:当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,消费新产生的该分区下的数据 157 consumerConfig.AutoOffsetReset = AutoOffsetReset.Earliest; 158 consumerConfig.GroupId = "order";//分组为order组 159 //consumerConfig.EnableAutoCommit = true; 160 //EnableAutoCommit = true//自动确认 如果设置false 会被重复消费 161 //缺陷 当业务逻辑 失败时候 可以自动确认 导致 消息丢失 162 //所以 要手动确认 但是如果手动确认时候 kafka服务宕机 重启时候会导致重复消费 163 //此时就需要 偏移量+1 重置 确保偏移量 是最新的 164 var builder = new ConsumerBuilder<string, string>(consumerConfig); 165 string redisValue = this._distributedCache.GetString(KafkaTopic.Topic); 166 int offset = 0; 167 if (!string.IsNullOrEmpty(redisValue)) 168 { 169 offset = int.Parse(redisValue); 170 } 171 this._distributedCache.SetString(KafkaTopic.Topic, (offset + 1).ToString()); 172 using (var consumer = builder.Build()) 173 { 174 //offset重置 175 consumer.Assign(new TopicPartitionOffset(KafkaTopic.Topic, new Partition(0), offset + 1)); 176 while (true) 177 { 178 // 订阅 生产者的主题 179 consumer.Subscribe(KafkaTopic.Topic); 180 // 消息 获取 181 var result = consumer.Consume(); 182 // 获取偏移量 183 _logger.LogInformation($"订单消息偏移量:Offset:{result.Offset}"); 184 // 把kafka队列中偏移量存起来。redis mysql 185 // redis存储 kafka队列的偏移量 186 _distributedCache.SetString("create-order", result.Offset.Value.ToString()); 187 string key = result.Key; 188 string value = result.Value; 189 //使用了redis 缓存 offset 重置 就不需要手动确认了 190 //consumer.Commit(result); 191 //consumer.StoreOffset(result); 192 this._logger.LogError($" 收到消息了:{value} / offset:{result.Offset}/Partition: {result.Partition} "); 193 } 194 } 195 } 196 197 198 199 /// <summary> 200 /// 分区轮询算法。两个分区得到消息是一致的 201 /// </summary> 202 /// <param name="topic"></param> 203 /// <param name="partitionCount"></param> 204 /// <param name="keyData"></param> 205 /// <param name="keyIsNull"></param> 206 /// <returns></returns> 207 static int requestCount = 0; 208 private Partition RoundRobinPartitioner(string topic, int partitionCount, ReadOnlySpan<byte> keyData, bool keyIsNull) 209 { 210 int partition = requestCount % partitionCount; 211 requestCount++; 212 return new Partition(partition); 213 } 214 } 215 }
KafkaWebAPI 项目
1 using KafkaCommom; 2 using Microsoft.Extensions.Configuration; 3 4 var builder = WebApplication.CreateBuilder(args); 5 6 // Add services to the container. 7 8 builder.Services.AddControllers(); 9 // Learn more about configuring Swagger/OpenAPI at https://aka.ms/aspnetcore/swashbuckle 10 builder.Services.AddEndpointsApiExplorer(); 11 builder.Services.AddSwaggerGen(); 12 builder.Services.Configure<KafkaOptions>(builder.Configuration.GetSection("KafkaOptions")); 13 builder.Services.AddDistributedRedisCache(options => 14 { 15 options.Configuration = builder.Configuration.GetValue<string>("redis"); 16 }); 17 builder.Services.AddTransient<KafKaSevice>(); 18 var app = builder.Build(); 19 20 // Configure the HTTP request pipeline. 21 if (app.Environment.IsDevelopment()) 22 { 23 app.UseSwagger(); 24 app.UseSwaggerUI(); 25 } 26 27 app.UseHttpsRedirection(); 28 29 app.UseAuthorization(); 30 31 app.MapControllers(); 32 33 app.Run();
1 using KafkaCommom; 2 using Microsoft.AspNetCore.Http; 3 using Microsoft.AspNetCore.Mvc; 4 using Microsoft.Extensions.Options; 5 6 namespace KafkaWebAPI.Controllers 7 { 8 [Route("api/[controller]")] 9 [ApiController] 10 public class KafkaController : ControllerBase 11 { 12 private readonly KafKaSevice _kafKaSevice; 13 public KafkaController(KafKaSevice kafKaSevice) 14 { 15 this._kafKaSevice = kafKaSevice; 16 } 17 18 [HttpGet] 19 public string Get(string msg) 20 { 21 this._kafKaSevice.Send(msg); 22 return ""; 23 } 24 } 25 }
KafKaWorker 项目
1 using KafkaCommom; 2 using KafKaWorker; 3 using Microsoft.Extensions.Caching.Distributed; 4 5 IHost host = Host.CreateDefaultBuilder(args) 6 .ConfigureServices(services => 7 { 8 IConfiguration configuration 9 = services.BuildServiceProvider().GetRequiredService<IConfiguration>(); 10 services.Configure<KafkaOptions>(configuration.GetSection("KafkaOptions")); 11 services.AddHostedService<Worker>(); 12 services.AddTransient<KafKaSevice>(); 13 string redisCon = configuration.GetValue<string>("redis"); 14 services.AddDistributedRedisCache(options => 15 { 16 options.Configuration = redisCon; 17 }); 18 }) 19 .Build(); 20 21 await host.RunAsync();
1 using KafkaCommom; 2 3 namespace KafKaWorker 4 { 5 public class Worker : BackgroundService 6 { 7 private readonly ILogger<Worker> _logger; 8 private readonly KafKaSevice _kafKaSevice; 9 public Worker(ILogger<Worker> logger, KafKaSevice kafKaSevice) 10 { 11 _logger = logger; 12 _kafKaSevice = kafKaSevice; 13 } 14 15 protected override Task ExecuteAsync(CancellationToken stoppingToken) 16 { 17 _kafKaSevice.Reveice(); 18 return Task.CompletedTask; 19 } 20 } 21 }