kafka-producer生产者案例

在服务器中环境已经部署完成

配置文件仅供参考

复制代码
server:
  port: 1011
  servlet:
    context-path: /production

environment: dev
#environment: master

kafka-bootstrap-servers: 122.225.12.166:11456,122.225.12.166:11457,122.225.12.166:11458

topic-name: syncData
old-topic-name: oldSyncData

#过滤表
subscribe-value: "mxalpha\\\\..*,mxalpha.b_quotationlist"

#canal地址 canal
-address: "192.168.8.146"
#canal端口 canal
-port: 11111

page:
1 #分页大小最大500
size:
500

spring:
  datasource:
    dynamic:
      primary: db1 # 配置默认数据库
      datasource: db1: # 数据源1配置
        url:

        username:
        password:
    driver
-class-name: com.mysql.cj.jdbc.Driver
    durid:
      initial
-size: 1
      max
-active: 20
      min
-idle: 1
      max
-wait: 60000
    autoconfigure:
      exclude: com.alibaba.druid.spring.boot.autoconfigure.DruidDataSourceAutoConfigure # 去除druid配置
复制代码

topic-name要和消费者那边保持一致,不然消息接收不到

subscribe-value 是指生产者监听到哪些表去拦截消息

canal入库方法可以参考一下

复制代码
/**
     * canal入库方法
     */
    public void run() {

        // 创建链接
        CanalConnector connector = CanalConnectors.newSingleConnector(new InetSocketAddress(canalAddress,
                canalPort), "example", "", "");
        int batchSize = 1000;
        int emptyCount = 0;
        try {
            connector.connect();
            connector.subscribe(subscribeValue);
            connector.rollback();
            int totalEmptyCount = 120;
            while (emptyCount < totalEmptyCount) {
                Message message = connector.getWithoutAck(batchSize); // 获取指定数量的数据
                long batchId = message.getId();
                int size = message.getEntries().size();
                if (batchId == -1 || size == 0) {
                    emptyCount++;
                    try {
                        Thread.sleep(1000);
                    } catch (InterruptedException e) {
                        e.printStackTrace();
                    }
                } else {
                    emptyCount = 0;
                    printEntry(message.getEntries());
                }
                connector.ack(batchId); // 提交确认
                // connector.rollback(batchId); // 处理失败, 回滚数据
            }
            logger.info("empty too many times, exit");
            System.out.println("empty too many times, exit");
        } catch (InterruptedException e) {
            e.printStackTrace();
        } catch (ExecutionException e) {
            e.printStackTrace();
        } finally {
            connector.disconnect();
        }
    }
复制代码

消息发送(这里是根据业务需求做的一个旧数据同步)

复制代码
@GetMapping("sync4")
    public void getBAccountlist(){
        Properties props = new Properties();
        props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, kafkaServer);
        props.put(ProducerConfig.ACKS_CONFIG, "all");
        props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");

        // 2 构建拦截链
        List<String> interceptors = new ArrayList<>();
        interceptors.add("com.fumasoft.service.interceptor.CounterInterceptor");
        props.put(ProducerConfig.INTERCEPTOR_CLASSES_CONFIG,
                interceptors);
        //获取企业id
        if("dev".equals(environment)){
            cId = "1837412";
        }
        logger.info("生产消息 cId: "+cId+" environment: "+environment+" page: "+page+" size"+size);
        KafkaProducer<String, String> kafkaProducer = new KafkaProducer<>(props);

        //b_quotationlist
        page = 1;
        try {
            while (true){
                pageNo = (page - 1)*size;
                List<JSONObject> dataJson =bQuotationlistMapper.selectData(Integer.valueOf(cId), pageNo, size);
                if(dataJson == null || dataJson.isEmpty()){
                    break;
                }
                for (JSONObject quotationlist : dataJson) {
                    sendJson(kafkaProducer,
                            dataJson(quotationlist,"b_quotationlist"));
                }
                page+=1;
            }
        }catch (Exception e){
            logger.error(" b_quotationlist 同步异常:{}"+e);
        }

        kafkaProducer.close();
        // 4.关闭生产者
        page = 1;
    }

    //kafka发送
    public void sendJson(KafkaProducer<String,String> kafkaProducer,JSONObject jsonObject){
        String jsonData = JSONObject.toJSONString(jsonObject);
        ProducerRecord<String, String> producerRecord = new ProducerRecord<>(oldTopicName, null, jsonData);
        kafkaProducer.send(producerRecord);
    }

    /**
     * @Author zh
     * @param dataJson
     * @param tabName 表名
     * @return com.alibaba.fastjson.JSONObject
     **/
    public JSONObject dataJson(JSONObject dataJson,String tabName){
        dataJson.put("tabName",tabName);
        dataJson.put("eventType","INSERT");
        return dataJson;
    }
复制代码

 

 
 
posted @   乔治叔叔  阅读(190)  评论(0编辑  收藏  举报
相关博文:
阅读排行:
· 阿里最新开源QwQ-32B,效果媲美deepseek-r1满血版,部署成本又又又降低了!
· 开源Multi-agent AI智能体框架aevatar.ai,欢迎大家贡献代码
· Manus重磅发布:全球首款通用AI代理技术深度解析与实战指南
· 被坑几百块钱后,我竟然真的恢复了删除的微信聊天记录!
· AI技术革命,工作效率10个最佳AI工具
点击右上角即可分享
微信分享提示