springboot整合kafka

maven配置:

        
<properties>
<java.version>1.8</java.version>
<oracle.version>11.2.0.4</oracle.version>
</properties>

<!--kafka--> <dependency> <groupId>org.springframework.kafka</groupId> <artifactId>spring-kafka</artifactId> <version>2.3.4.RELEASE</version> </dependency>

<!--oracle-->
<dependency>
<groupId>com.oracle.database.jdbc</groupId>
<artifactId>ojdbc6</artifactId>
<version>${oracle.version}</version>
</dependency>

  

yml:

#省内集中调度平台资源查询
server:
  port: 9080

spring:
  datasource:
    driver-class-name: oracle.jdbc.OracleDriver
#    url: jdbc:oracle:thin:@10.216.86.211:1521/irmsdb
#    username: hbrmw6
#    password: ZIYhbrmw6.184
    url: ${rmsdb_url:jdbc:oracle:thin:@10.110.74.166:1521/rmsdb}
    username: ${rmsdb_username:uirms_yn}
    password: ${rmsdb_password:uirms_yn}
  kafka:
    bootstrap-servers: 127.0.0.1:9092 # kafka集群信息
    producer: # 生产者配置
      retries: 3 # 设置大于0的值,则客户端会将发送失败的记录重新发送
      batch-size: 16384 #16K
      buffer-memory: 33554432 #32M
      acks: 1
      # 指定消息key和消息体的编解码方式
      key-serializer: org.apache.kafka.common.serialization.StringSerializer
      value-serializer: org.apache.kafka.common.serialization.StringSerializer
    consumer:
      group-id: zhTestGroup # 消费者组
      enable-auto-commit: false # 关闭自动提交
      auto-offset-reset: earliest # 当各分区下有已提交的offset时,从提交的offset开始消费;无提交的offset时,从头开始消费
      key-deserializer: org.apache.kafka.common.serialization.StringDeserializer
      value-deserializer: org.apache.kafka.common.serialization.StringDeserializer
    listener:
      # 当每一条记录被消费者监听器(ListenerConsumer)处理之后提交
      # RECORD
      # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后提交
      # BATCH
      # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,距离上次提交时间大于TIME时提交
      # TIME
      # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后,被处理record数量大于等于COUNT时提交
      # COUNT
      # TIME | COUNT 有一个条件满足时提交
      # COUNT_TIME
      # 当每一批poll()的数据被消费者监听器(ListenerConsumer)处理之后, 手动调用Acknowledgment.acknowledge()后提交
      # MANUAL
      # 手动调用Acknowledgment.acknowledge()后立即提交,一般使用这种
      # MANUAL_IMMEDIATE
      ack-mode: manual_immediate

生产者:

package com.inspur.resource.module.kafka;

import com.alibaba.fastjson.JSON;
import com.inspur.resource.util.DataShareUtil;
import lombok.extern.slf4j.Slf4j;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.jdbc.core.JdbcTemplate;
import org.springframework.kafka.core.KafkaTemplate;
import org.springframework.scheduling.annotation.Scheduled;
import org.springframework.stereotype.Component;

import javax.annotation.PostConstruct;
import java.util.Arrays;
import java.util.List;
import java.util.Map;

/**
 * @author :fengwenzhe
 * @date :Created in 2022/11/14 10:43
 * 文件说明: </p>
 */
@Component
@ConditionalOnProperty(value = "spring.profiles.active",havingValue = "kafka")
@Slf4j
public class KafkaTimer {
    @Autowired
    private KafkaTemplate kafkaTemplate;
    @Autowired
    private JdbcTemplate jdbcTemplate;

    @Scheduled(cron = "*/20 * * * * ?") // 每20秒执行一次
    //@Scheduled(cron = "0 */2 * * * ?") //每2分钟执行一次
    @PostConstruct
    public void loopSendKafka(){
        //查询数据库 如果有指定数据  则发送对应kafka消息
        String sql = "select * from RMS_KAFKA_ACCESS_ORDER where stateflag ='0' ";
        List<Map<String,Object>> list = this.jdbcTemplate.queryForList(sql);
        if(null != list){
            for(Map map:list){
                //需要发送kafka消息的入网工单数据
                log.info("需要发送kafka消息的入网工单数据"+ map.toString());
                System.out.println("sendkafka");
                kafkaTemplate.send("test", JSON.toJSONString(map));
            }
        }
    }
}

消费者:

package com.inspur.resource.module.kafka;


import lombok.extern.slf4j.Slf4j;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.boot.autoconfigure.condition.ConditionalOnProperty;
import org.springframework.kafka.annotation.KafkaListener;
import org.springframework.kafka.support.Acknowledgment;
import org.springframework.stereotype.Component;

import java.util.Optional;


@Component
@ConditionalOnProperty(value = "spring.profiles.active",havingValue = "kafka")
@Slf4j
public class KafkaConsumer {
    private final Logger logger = LoggerFactory.getLogger(KafkaConsumer.class);

    //不指定group,默认取yml里配置的
    @KafkaListener(topics = {"test"},groupId = "${spring.kafka.consumer.group-id}")
    public void onMessage1(ConsumerRecord<?, ?> consumerRecord, Acknowledgment ack) {
        //消费者必须手动调用ack.acknowledge();不然会重复消费 因为在yml中配置了
        //ack-mode: manual_immediate
        ack.acknowledge();
        Optional<?> optional = Optional.ofNullable(consumerRecord.value());
        if (optional.isPresent()) {
            Object msg = optional.get();
            logger.info("message:{}", msg);
        }
    }
}

 

posted @ 2022-11-14 16:45  陈扬天  阅读(276)  评论(0编辑  收藏  举报