java api调用kafka 广播形式

已经启用了zookeeper和kafka

单机的ip为192.168.80.128

加入maven 的pom.xml代码如下 

dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka-clients</artifactId>
            <version>0.8.2.1</version>
          </dependency>

        <dependency>
            <groupId>org.apache.kafka</groupId>
            <artifactId>kafka_2.11</artifactId>
            <version>0.8.2.1</version>
        </dependency>

KafkaProducerDemo的代码如下

package com.anjubao.weixin.web.weChat;

import java.util.Properties;
import java.util.concurrent.ExecutionException;

import org.apache.kafka.clients.producer.Callback;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;

/**
  @project:standardProject
  @class:KafkaProducerDemo.java
  @author:fuanyu E-mail:fuanyu163@163.com
  @date:2022年11月9日 下午2:20:14
 */
public class KafkaProducerDemo {
    
     public static void main(String[] args) throws InterruptedException {
            /* 1、连接集群,通过配置文件的方式
             * 2、发送数据-topic:order,value
             */
            Properties props = new Properties(); props.put("bootstrap.servers", "192.168.80.128:9092"); props.put("acks", "all");
            props.put("retries", 0);
            props.put("batch.size", 16384);
            props.put("linger.ms", 1);
            props.put("buffer.memory", 33554432); 
            props.put("key.serializer",
                    "org.apache.kafka.common.serialization.StringSerializer"); 
            props.put("value.serializer",
                    "org.apache.kafka.common.serialization.StringSerializer");
            // 设置消息发送后,等待响应的最大时间
            props.put("request.timeout.ms",30);
            // 非必须参数配置
            // acks=0表明producer完全不管发送结果;
            // acks=all或-1表明producer会等待ISR所有节点均写入后的响应结果;
            // acks=1,表明producer会等待leader写入后的响应结果
            props.put("acks","-1");
            // 发生可重试异常时的重试次数
            props.put("retries",3);
     KafkaProducer<String, String> kafkaProducer = new KafkaProducer<String, String>
                    (props);
     System.out.println("*222*********");
            for (int i = 0; i < 10; i++) {
    // 发送数据 ,需要一个producerRecord对象,最少参数 String topic, V value 
                 System.out.println("*1*****");
                 //第一种:发送完之后不管结果
                     ProducerRecord record =  new ProducerRecord<String, String>("topic", "订单 息!"+i);
            //             kafkaProducer.send(record);
                     secondSend(kafkaProducer,record);
                 //    System.out.println(""+);
                         
                
                Thread.sleep(100);
                System.out.println("*2*****");
            }
            
         
        }

    private static void firstSend(KafkaProducer<String, String> kafkaProducer,
            ProducerRecord record) {
            kafkaProducer.send(record);
        // TODO Auto-generated method stub
        
    }
    //异步发送
    private static void secondSend(KafkaProducer<String, String> kafkaProducer,
            ProducerRecord record) {
            kafkaProducer.send(record,new Callback(){
              @Override
              public void onCompletion(RecordMetadata metadata,Exception exception){
                if(exception == null){
                  System.out.println("消息发送成功");
                } else {
                  System.out.println("消息发送失败");
                }    
              }
            });  

        // TODO Auto-generated method stub
        
    }
    
    //同步发送
        private static boolean thirdSend(KafkaProducer<String, String> kafkaProducer,
                ProducerRecord record) {
         
            boolean  result  = false;
             try {
                RecordMetadata recordMetadata = kafkaProducer.send(record).get();
                if(recordMetadata!=null)result = true;
            } catch (InterruptedException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            } catch (ExecutionException e) {
                // TODO Auto-generated catch block
                e.printStackTrace();
            }
             return result ;
     

            // TODO Auto-generated method stub
            
        }
        
    
     
     
     

}

KafkaConsumerDemo 代码如下

package com.anjubao.weixin.web.weChat;

import java.util.List;
import java.util.Properties;
import java.util.UUID;
import java.util.concurrent.TimeUnit;

import kafka.consumer.Consumer;
import kafka.consumer.ConsumerConfig;
import kafka.consumer.ConsumerIterator;
import kafka.consumer.KafkaStream;
import kafka.consumer.Whitelist;
import kafka.javaapi.consumer.ConsumerConnector;
import kafka.message.MessageAndMetadata;

import org.apache.kafka.common.utils.CollectionUtils;

/**
  @project:standardProject
  @class:KafkaConsumerDemo.java
  @author:fuanyu E-mail:fuanyu163@163.com
  @date:2022年11月9日 下午2:22:30
 */
public class KafkaConsumerDemo {
    
     
        public static void main(String[] args) throws Exception {
            Properties properties = new Properties();
            properties.put("zookeeper.connect", "192.168.80.128:2181");
            properties.put("auto.commit.enable", "true");
            properties.put("auto.commit.interval.ms", "60000");
            String groupId = UUID.randomUUID().toString();
            //properties.put("group.id", "test");
            properties.put("group.id", groupId);  //注意:当启动多个consumer时,groupId为相同时,只能其中一个线程消费!但groupId为不同时,每个线程都能消费.相当于广播

            ConsumerConfig consumerConfig = new ConsumerConfig(properties);

            ConsumerConnector javaConsumerConnector = (ConsumerConnector) Consumer.createJavaConsumerConnector(consumerConfig);
            System.out.println(groupId);
            //topic的过滤器
            Whitelist whitelist = new Whitelist("topic");
            List<KafkaStream<byte[], byte[]>> partitions = javaConsumerConnector.createMessageStreamsByFilter(whitelist);

            if (partitions==null) {
            System.out.println("empty!");
            TimeUnit.SECONDS.sleep(1);
            }

            //消费消息
            for (KafkaStream<byte[], byte[]> partition : partitions) {

            ConsumerIterator<byte[], byte[]> iterator = partition.iterator();
            while (iterator.hasNext()) {
            MessageAndMetadata<byte[], byte[]> next = iterator.next();
            System.out.println("partiton:" + next.partition());
            System.out.println("offset:" + next.offset());
            System.out.println("接收到message:" + new String(next.message(), "utf-8"));
            }
            }
            }

}

 

当启动KafkaConsumerDemo两次后,再启动KafkaProducerDemo.java,在命令行中可以看到KafkaConsumerDemo 两个界面都能收到消息

 

posted @ 2023-05-24 16:21  幸福眼泪  阅读(224)  评论(0编辑  收藏  举报