kafka基本使用
1. 配置application.properties
server.port=8080 spring.kafka.bootstrap-servers=192.168.9.137:9092 spring.kafka.producer.retries=3 spring.kafka.producer.batch-size=16384 spring.kafka.producer.buffer-memory=33554432 spring.kafka.producer.acks=1 spring.kafka.consumer.max-poll-records=2000 spring.kafka.producer.linger=10 spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringDeserializer spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer spring.kafka.consumer.group-id=default-group spring.kafka.consumer.enable-auto-commit=false spring.kafka.consumer.auto-offset-reset=earliest spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer spring.kafka.consumer.value-serializer=org.apache.kafka.common.serialization.StringSerializer spring.kafka.listener.ack-mode=MANUAL_IMMEDIATE
package com.example.kafkademo; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.AdminClientConfig; import org.apache.kafka.clients.consumer.ConsumerConfig; import org.apache.kafka.clients.producer.ProducerConfig; import org.apache.kafka.common.serialization.StringDeserializer; import org.apache.kafka.common.serialization.StringSerializer; import org.springframework.beans.factory.annotation.Value; import org.springframework.context.annotation.Bean; import org.springframework.context.annotation.Configuration; import org.springframework.kafka.annotation.EnableKafka; import org.springframework.kafka.config.ConcurrentKafkaListenerContainerFactory; import org.springframework.kafka.config.KafkaListenerContainerFactory; import org.springframework.kafka.core.*; import org.springframework.kafka.listener.ContainerProperties; import java.util.HashMap; import java.util.Map; /** * @Author: John * @Date: 2021.11.3 16:27 */ @Configuration @EnableKafka public class KafkaConfig { @Value("${spring.kafka.bootstrap-servers}") private String bootstrapServers; @Value("${spring.kafka.consumer.group-id}") private String groupId; @Value("${spring.kafka.consumer.enable-auto-commit}") private Boolean autoCommit; @Value("${spring.kafka.consumer.auto-offset-reset}") private String autoOffsetReset; @Value("${spring.kafka.consumer.max-poll-records}") private Integer maxPollRecords; @Value("${spring.kafka.producer.linger}") private int linger; @Value("${spring.kafka.producer.retries}") private Integer retries; @Value("${spring.kafka.producer.batch-size}") private Integer batchSize; @Value("${spring.kafka.producer.buffer-memory}") private Integer bufferMemory; //cankao :https://blog.csdn.net/tmeng521/article/details/90901925 public Map<String, Object> producerConfigs() { Map<String, Object> props = new HashMap<>(); props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); //设置重试次数 props.put(ProducerConfig.RETRIES_CONFIG, retries); //达到batchSize大小的时候会发送消息 props.put(ProducerConfig.BATCH_SIZE_CONFIG, batchSize); //延时时间,延时时间到达之后计算批量发送的大小没达到也发送消息 props.put(ProducerConfig.LINGER_MS_CONFIG, linger); //缓冲区的值 props.put(ProducerConfig.BUFFER_MEMORY_CONFIG, bufferMemory); //序列化手段 props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class); props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class); //producer端的消息确认机制,-1和all都表示消息不仅要写入本地的leader中还要写入对应的副本中 props.put(ProducerConfig.ACKS_CONFIG, "-1");//单个brok 推荐使用'1' //单条消息的最大值以字节为单位,默认值为1048576 props.put(ProducerConfig.LINGER_MS_CONFIG, 10485760); //设置broker响应时间,如果broker在60秒之内还是没有返回给producer确认消息,则认为发送失败 props.put(ProducerConfig.REQUEST_TIMEOUT_MS_CONFIG, 60000); //指定拦截器(value为对应的class) //props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG, "com.te.handler.KafkaProducerInterceptor"); //设置压缩算法(默认是木有压缩算法的) props.put(ProducerConfig.COMPRESSION_TYPE_CONFIG, "snappy");//snappy return props; } @Bean //创建一个kafka管理类,相当于rabbitMQ的管理类rabbitAdmin,没有此bean无法自定义的使用adminClient创建topic public KafkaAdmin kafkaAdmin() { Map<String, Object> props = new HashMap<>(); //配置Kafka实例的连接地址 //kafka的地址,不是zookeeper props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); KafkaAdmin admin = new KafkaAdmin(props); return admin; } @Bean //kafka客户端,在spring中创建这个bean之后可以注入并且创建topic,用于集群环境,创建对个副本 public AdminClient adminClient() { return AdminClient.create(kafkaAdmin().getConfigurationProperties()); } @Bean public ProducerFactory<String, String> producerFactory() { return new DefaultKafkaProducerFactory<>(producerConfigs()); } @Bean public KafkaTemplate<String, String> kafkaTemplate() { return new KafkaTemplate<>(producerFactory()); } @Bean public Map<String, Object> consumerConfigs() { Map<String, Object> props = new HashMap<>(); props.put(ConsumerConfig.GROUP_ID_CONFIG, groupId); props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, autoCommit); props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, autoOffsetReset); props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers); props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, maxPollRecords); // props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 180000); // props.put(ConsumerConfig.REQUEST_TIMEOUT_MS_CONFIG, 900000); // props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 900000); props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class); return props; } @Bean public KafkaListenerContainerFactory<?> batchFactory() { ConcurrentKafkaListenerContainerFactory<Integer, String> factory = new ConcurrentKafkaListenerContainerFactory<>(); factory.setConsumerFactory(new DefaultKafkaConsumerFactory<>(consumerConfigs())); //设置为批量消费,每个批次数量在Kafka配置参数中设置ConsumerConfig.MAX_POLL_RECORDS_CONFIG factory.setBatchListener(true); // set the retry template // factory.setRetryTemplate(retryTemplate()); factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL); return factory; } }
package com.example.kafkademo; import org.apache.kafka.clients.admin.AdminClient; import org.apache.kafka.clients.admin.CreateTopicsResult; import org.apache.kafka.clients.admin.ListTopicsResult; import org.apache.kafka.clients.admin.NewTopic; import org.springframework.beans.factory.annotation.Autowired; import org.springframework.kafka.core.KafkaTemplate; import org.springframework.web.bind.annotation.RequestMapping; import org.springframework.web.bind.annotation.RestController; import java.util.Arrays; import java.util.Set; import java.util.concurrent.ExecutionException; @RestController public class KafkaController { private final static String TOPIC_NAME = "my-replicated-topic"; @Autowired private KafkaTemplate<String, String> kafkaTemplate; @Autowired // private AdminClient adminClient; @RequestMapping("/send") public void send() { kafkaTemplate.send(TOPIC_NAME, 0, "key", "this is a msg"); } @RequestMapping("/create_topic") public Set<String> createTopic() throws InterruptedException, ExecutionException { NewTopic topic = new NewTopic("topic.manual.create", 1, (short) 1); CreateTopicsResult topics = adminClient.createTopics(Arrays.asList(topic)); Thread.sleep(1000); ListTopicsResult listTopics = adminClient.listTopics(); Set<String> topicset = listTopics.names().get(); for (String topicc : topicset) { System.out.println(topicc); } return topicset; } }