kafka使用

kafka启动:

sudo sh kafka-server-start.sh  ../conf/server.properties

kafka关闭:

sudo sh kafka-server-stop.sh

kafka查看topic:

 sudo sh kafka-topics.sh  --zookeeper localhost:2181 --list(相关命令: --describe --create --alter or --delete)

 修改分区数量:

 sudo sh kafka-topics.sh  --zookeeper localhost:2181 --alter --topic ono --partitions 3

查看指定topic信息:

 sudo sh kafka-topics.sh  --zookeeper localhost:2181 --describe  --topic hello

 

 

 

java版的kafka生产者之基本使用

package com.yangpeng.producer;

import java.nio.channels.NonReadableChannelException;
import java.util.Map;
import java.util.Properties;

import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.stereotype.Component;

public class KafkaProducerDemo {
    Properties properties = null;
    KafkaProducer<String ,String> producer = null;
    Logger logger = LoggerFactory.getLogger(KafkaProducerDemo.class);
    
    public KafkaProducerDemo() {
        super();
        Properties kafkaProps = new Properties();
        kafkaProps.put("bootstrap.servers", "***.**.**.30:9092");
     
        kafkaProps.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        kafkaProps.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
        producer = new KafkaProducer<String, String>(kafkaProps);
    }

    public Properties getProperties() {
        return properties;
    }

    public void setProperties(Properties properties) {
        this.properties = properties;
    }

    public void sendMessage() {
        Thread.currentThread().setContextClassLoader(null);
    ProducerRecord<String, String> record = new ProducerRecord<String, String>("test", "key", "this is value from topic 'test' ,is ok!");
    try {
        producer.send(record);
        
    } catch (Exception e) {
        // TODO: handle exception
    }
    }
    public static void main(String[] args) {
        new KafkaProducerDemo().sendMessage();
    }

}

 其中有以下几个注意点:如果出现logger没有加载器的情况,可在maven下加

    <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-api</artifactId>
            <version>1.8.0-beta0</version>
        </dependency>

        <dependency>
            <groupId>org.slf4j</groupId>
            <artifactId>slf4j-simple</artifactId>
            <version>1.8.0-beta0</version>
        </dependency>
View Code

 

kafka消费者基本使用:

package com.yangpeng.consumer;

import java.time.Duration;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.Properties;
import java.util.concurrent.TimeUnit;

import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.springframework.stereotype.Component;

import com.sun.org.apache.xml.internal.security.Init;


public class KafkaConsumerDemo {
    Properties props = new Properties();
    public void ininProps() {
        props.put("bootstrap.servers","119.23.226.30:9092");
        props.put("group.id","test");
        props.put("enable.auto.commit", "false"); //不自动提交offset
        props.put("key.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
        props.put("value.deserializer","org.apache.kafka.common.serialization.StringDeserializer");
    }
    public void ReceiMsg() {
        KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
        consumer.subscribe(Arrays.asList("test"));
        final int minBatchSize = 200;
        
        List<ConsumerRecord<String, String>> buffer =new ArrayList<ConsumerRecord<String,String>>();
        
        while(true) {
            ConsumerRecords<String, String> records = consumer.poll(Duration.ofMinutes(1));
            for(ConsumerRecord<String, String> record:records) {
                buffer.add(record);
                System.out.println(record.toString());
            }
        
                consumer.commitAsync();
                buffer.clear();
        
        }
    }
    public static void main(String[] args) {
        KafkaConsumerDemo demo = new KafkaConsumerDemo();
        demo.ininProps();
        demo.ReceiMsg();
    }

    
    
}
View Code

 spring kafka傻瓜式配置:

spring.kafka.bootstrap-servers=192.168.1.180:9092
#设置一个默认组
spring.kafka.consumer.group-id=0
#key-value序列化反序列化
spring.kafka.consumer.key-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.consumer.value-deserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.kafka.producer.key-serializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.producer.value-serializer=org.apache.kafka.common.serialization.StringSerializer
#每次批量发送消息的数量
spring.kafka.producer.batch-size=65536
spring.kafka.producer.buffer-memory=524288

 

注意:

ConsumerOffsetChecker以及被取消了.....

应该使如下的命令:

bin/kafka-consumer-groups.sh --bootstrap-server 192.168.47.133:9092 --describe --group test-consumer-group,得到如下的数据

 

 查看配置信息:

sudo sh kafka-configs.sh

 

posted @ 2019-06-08 15:06  林冲—first  阅读(146)  评论(0编辑  收藏  举报