|
|
|
@ -6,6 +6,7 @@ import org.apache.kafka.common.serialization.StringDeserializer;
|
|
|
|
|
|
|
|
|
|
import java.text.ParseException;
|
|
|
|
|
import java.text.SimpleDateFormat;
|
|
|
|
|
import java.time.Duration;
|
|
|
|
|
import java.util.*;
|
|
|
|
|
|
|
|
|
|
public class kafkaConsumerTest {
|
|
|
|
@ -25,39 +26,123 @@ public class kafkaConsumerTest {
|
|
|
|
|
return kafkaTimestamp;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
protected static Properties initConfig(){
|
|
|
|
|
protected static Properties initConfig() {
|
|
|
|
|
Properties properties = new Properties();
|
|
|
|
|
properties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.10.14.67:9092");
|
|
|
|
|
properties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
|
|
|
|
|
properties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
|
|
|
|
|
properties.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
|
|
|
|
|
properties.put(ConsumerConfig.GROUP_ID_CONFIG,"dsideal-group");
|
|
|
|
|
properties.put(ConsumerConfig.GROUP_ID_CONFIG, "dsideal-group");
|
|
|
|
|
properties.put(ConsumerConfig.CLIENT_ID_CONFIG, "dsideal1");
|
|
|
|
|
return properties;
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
public static void main(String[] args) throws ParseException {
|
|
|
|
|
KafkaConsumer<String, String> kafkaConsumer = new KafkaConsumer<String, String>(initConfig());
|
|
|
|
|
String topic="test";
|
|
|
|
|
kafkaConsumer.subscribe(Arrays.asList(topic));
|
|
|
|
|
//kafka的分区逻辑是在poll方法里执行的,所以执行seek方法之前先执行一次poll方法
|
|
|
|
|
//获取当前消费者消费分区的情况
|
|
|
|
|
Set<TopicPartition> assignment = new HashSet<>();
|
|
|
|
|
while (assignment.size() == 0) {
|
|
|
|
|
//如果没有分配到分区,就一直循环下去
|
|
|
|
|
kafkaConsumer.poll(100L);
|
|
|
|
|
assignment = kafkaConsumer.assignment();
|
|
|
|
|
}
|
|
|
|
|
for (TopicPartition tp : assignment) {
|
|
|
|
|
//消费第当前分区的offset为10的消息
|
|
|
|
|
kafkaConsumer.seek(tp, 10);
|
|
|
|
|
Properties props = new Properties();
|
|
|
|
|
props.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "10.10.14.67:9092");
|
|
|
|
|
String CONSUMER_GROUP_NAME="dsideal_group";
|
|
|
|
|
String TOPIC_NAME="test";
|
|
|
|
|
|
|
|
|
|
// 消费分组名
|
|
|
|
|
props.put(ConsumerConfig.GROUP_ID_CONFIG, CONSUMER_GROUP_NAME);
|
|
|
|
|
// 是否自动提交offset,默认就是true
|
|
|
|
|
props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
|
|
|
|
|
// 自动提交offset的间隔时间
|
|
|
|
|
props.put(ConsumerConfig.AUTO_COMMIT_INTERVAL_MS_CONFIG, "1000");
|
|
|
|
|
// props.put(ConsumerConfig.ENABLE_AUTO_COMMIT_CONFIG, "false");
|
|
|
|
|
/*
|
|
|
|
|
当消费主题的是一个新的消费组,或者指定offset的消费方式,offset不存在,那么应该如何消费
|
|
|
|
|
latest(默认) :只消费自己启动之后发送到主题的消息
|
|
|
|
|
earliest:第一次从头开始消费,以后按照消费offset记录继续消费,这个需要区别于consumer.seekToBeginning(每次都从头开始消费)
|
|
|
|
|
*/
|
|
|
|
|
//props.put(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest");
|
|
|
|
|
/*
|
|
|
|
|
consumer给broker发送心跳的间隔时间,broker接收到心跳如果此时有rebalance发生会通过心跳响应将
|
|
|
|
|
rebalance方案下发给consumer,这个时间可以稍微短一点
|
|
|
|
|
*/
|
|
|
|
|
props.put(ConsumerConfig.HEARTBEAT_INTERVAL_MS_CONFIG, 1000);
|
|
|
|
|
/*
|
|
|
|
|
服务端broker多久感知不到一个consumer心跳就认为他故障了,会将其踢出消费组,
|
|
|
|
|
对应的Partition也会被重新分配给其他consumer,默认是10秒
|
|
|
|
|
*/
|
|
|
|
|
props.put(ConsumerConfig.SESSION_TIMEOUT_MS_CONFIG, 10 * 1000);
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
//一次poll最大拉取消息的条数,如果消费者处理速度很快,可以设置大点,如果处理速度一般,可以设置小点
|
|
|
|
|
props.put(ConsumerConfig.MAX_POLL_RECORDS_CONFIG, 50);
|
|
|
|
|
/*
|
|
|
|
|
如果两次poll操作间隔超过了这个时间,broker就会认为这个consumer处理能力太弱,
|
|
|
|
|
会将其踢出消费组,将分区分配给别的consumer消费
|
|
|
|
|
*/
|
|
|
|
|
props.put(ConsumerConfig.MAX_POLL_INTERVAL_MS_CONFIG, 30 * 1000);
|
|
|
|
|
|
|
|
|
|
props.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
|
|
|
|
|
props.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, StringDeserializer.class.getName());
|
|
|
|
|
KafkaConsumer<String, String> consumer = new KafkaConsumer<String, String>(props);
|
|
|
|
|
|
|
|
|
|
consumer.subscribe(Arrays.asList(TOPIC_NAME));
|
|
|
|
|
// 消费指定分区
|
|
|
|
|
//consumer.assign(Arrays.asList(new TopicPartition(TOPIC_NAME, 0)));
|
|
|
|
|
|
|
|
|
|
//消息回溯消费
|
|
|
|
|
/*consumer.assign(Arrays.asList(new TopicPartition(TOPIC_NAME, 0)));
|
|
|
|
|
consumer.seekToBeginning(Arrays.asList(new TopicPartition(TOPIC_NAME, 0)));*/
|
|
|
|
|
|
|
|
|
|
//指定offset消费
|
|
|
|
|
/*consumer.assign(Arrays.asList(new TopicPartition(TOPIC_NAME, 0)));
|
|
|
|
|
consumer.seek(new TopicPartition(TOPIC_NAME, 0), 10);*/
|
|
|
|
|
|
|
|
|
|
//从指定时间点开始消费
|
|
|
|
|
|
|
|
|
|
/*List<PartitionInfo> topicPartitions = consumer.partitionsFor(TOPIC_NAME);
|
|
|
|
|
//从1小时前开始消费
|
|
|
|
|
long fetchDataTime = new Date().getTime() - 1000 * 60 * 60;
|
|
|
|
|
Map<TopicPartition, Long> map = new HashMap<>();
|
|
|
|
|
for (PartitionInfo par : topicPartitions) {
|
|
|
|
|
map.put(new TopicPartition(TOPIC_NAME, par.partition()), fetchDataTime);
|
|
|
|
|
}
|
|
|
|
|
Map<TopicPartition, OffsetAndTimestamp> parMap = consumer.offsetsForTimes(map);
|
|
|
|
|
for (Map.Entry<TopicPartition, OffsetAndTimestamp> entry : parMap.entrySet()) {
|
|
|
|
|
TopicPartition key = entry.getKey();
|
|
|
|
|
OffsetAndTimestamp value = entry.getValue();
|
|
|
|
|
if (key == null || value == null) continue;
|
|
|
|
|
Long offset = value.offset();
|
|
|
|
|
System.out.println("partition-" + key.partition() + "|offset-" + offset);
|
|
|
|
|
System.out.println();
|
|
|
|
|
//根据消费里的timestamp确定offset
|
|
|
|
|
if (value != null) {
|
|
|
|
|
consumer.assign(Arrays.asList(key));
|
|
|
|
|
consumer.seek(key, offset);
|
|
|
|
|
}
|
|
|
|
|
}*/
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
while (true) {
|
|
|
|
|
ConsumerRecords<String, String> consumerRecords = kafkaConsumer.poll(2000L);
|
|
|
|
|
System.out.println("本次拉取的消息数量:" + consumerRecords.count());
|
|
|
|
|
System.out.println("消息集合是否为空:" + consumerRecords.isEmpty());
|
|
|
|
|
for (ConsumerRecord<String, String> consumerRecord : consumerRecords) {
|
|
|
|
|
System.out.println("消费到的消息key:" + consumerRecord.key() + ",value:" + consumerRecord.value() + ",offset:" + consumerRecord.offset());
|
|
|
|
|
/*
|
|
|
|
|
* poll() API 是拉取消息的长轮询
|
|
|
|
|
*/
|
|
|
|
|
ConsumerRecords<String, String> records = consumer.poll(Duration.ofMillis(1000));
|
|
|
|
|
for (ConsumerRecord<String, String> record : records) {
|
|
|
|
|
System.out.printf("收到消息:partition = %d,offset = %d, key = %s, value = %s%n", record.partition(),
|
|
|
|
|
record.offset(), record.key(), record.value());
|
|
|
|
|
}
|
|
|
|
|
|
|
|
|
|
if (records.count() > 0) {
|
|
|
|
|
// 手动同步提交offset,当前线程会阻塞直到offset提交成功
|
|
|
|
|
// 一般使用同步提交,因为提交之后一般也没有什么逻辑代码了
|
|
|
|
|
consumer.commitSync();
|
|
|
|
|
|
|
|
|
|
// 手动异步提交offset,当前线程提交offset不会阻塞,可以继续处理后面的程序逻辑
|
|
|
|
|
/*consumer.commitAsync(new OffsetCommitCallback() {
|
|
|
|
|
@Override
|
|
|
|
|
public void onComplete(Map<TopicPartition, OffsetAndMetadata> offsets, Exception exception) {
|
|
|
|
|
if (exception != null) {
|
|
|
|
|
System.err.println("Commit failed for " + offsets);
|
|
|
|
|
System.err.println("Commit failed exception: " + exception.getStackTrace());
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
});*/
|
|
|
|
|
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|
}
|
|
|
|
|