springboot中kafka

准备

  • Linux

    1. 关闭防火墙
      2.检查: telnet IP:端口
  • 依赖

<dependency>
  <groupId>org.springframework.boot</groupId>
  <artifactId>spring-boot-starter-web</artifactId>
</dependency>
  
<dependency>
  <groupId>org.springframework.kafka</groupId>
  <artifactId>spring-kafka</artifactId>
</dependency>
  
<dependency>
  <groupId>log4j</groupId>
  <artifactId>log4j</artifactId>
  <version>1.2.17</version>
</dependency>

代码

生产者
import java.util.Properties;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.common.serialization.StringSerializer;

public class ProducerFastStart {
    //多个broker用逗号隔开
    private static final String brokerList = "192.168.0.191:9092";
    private static final String topic = "jpy";

    public static void main(String[] args) {
        Properties properties = new Properties();
        //1.设置key序列化器
        //properties.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());

        //设置重试次数
        properties.put(ProducerConfig.RETRIES_CONFIG, 10);

        //设置值序列化器
        //properties.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        properties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,StringSerializer.class.getName());

        //设置集群地址
        //properties.put("bootstrap.servers", brokerList);
        properties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);

        KafkaProducer<String, String> producer = new KafkaProducer<String, String>(properties);
        //里面最多可以6个(全参构造器)   
        ProducerRecord<String, String> record = new ProducerRecord<>(topic, "kafka-demo", "hello 123");

        try {
            producer.send(record);
        } catch (Exception e) {
            e.printStackTrace();
        } finally {
            producer.close();
        }

    }
}
消费者
import java.time.Duration;
import java.util.Collections;
import java.util.Properties;

import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.serialization.StringDeserializer;

public class ConsumerFastStart {
    private static final String brokerList = "192.168.0.191:9092";
    private static final String topic = "jpy";
    private static final String groupId = "group.demo";

    public static void main(String[] args) {
        Properties properties = new Properties();
        //key反序列化器
        //properties.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

        //value反序列化器
        //properties.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
        properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());

        //集群地址
        //properties.put("bootstrap.servers", brokerList);
        properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, brokerList);

        //消费者组,不指定会抛出异常
        //properties.put("group.id", groupId);
        properties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);

        //声明消费者
        KafkaConsumer<String, String> consumer = new KafkaConsumer<>(properties);

        //订阅topic中消息
        consumer.subscribe(Collections.singletonList(topic));
        //获得消息
        while (true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
            for (ConsumerRecord<String, String> record : records) {
                System.out.println(record.value());
            }
        }
    }
}

服务端常用参数配置

config/server.properties

zookeeper.connect
  • 如果是集群,以逗号分开
listeners
  • 监听列表,broker对外提供服务时绑定的ip和端口,多个以逗号分开。

  • 如果监听器名称不是一个安全的协议,listener.security.protocol.map也必须设置

  • 主机名称设置0.0.0.0绑定所有的接口,主机名称为空则绑定默认的接口。

    如:PLAINTEXT://myhost:9092,SSL://9091 CLIENT://0.0.0.0:9092,REPLICATION://localhost:9093

broker.id
  • broker的唯一标识,如果不配置自动生成,建议配置并保证在集群中唯一,默认为-1
log.dirs
  • 日志数据存放目录,如果没有配置则使用log.dir。建议配置
message.max.bytes
  • 服务器接收单个消息的最大大小,默认1000012约等于976.6kb
posted @   jpy  阅读(63)  评论(0编辑  收藏  举报
编辑推荐:
· 开发者必知的日志记录最佳实践
· SQL Server 2025 AI相关能力初探
· Linux系列:如何用 C#调用 C方法造成内存泄露
· AI与.NET技术实操系列(二):开始使用ML.NET
· 记一次.NET内存居高不下排查解决与启示
阅读排行:
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· 没有Manus邀请码?试试免邀请码的MGX或者开源的OpenManus吧
· 园子的第一款AI主题卫衣上架——"HELLO! HOW CAN I ASSIST YOU TODAY
点击右上角即可分享
微信分享提示