Error producing to embedded kafka - apache-kafka

I'm trying to embed a kafkaserver in my code. I've used the following example code to try to learn how to do so but for some reason, my producer can't send messages to the embedded server (it times out after 60 secs). I'm using kafka 0.8.2.2. Can someone tell me what I'm doing wrong?
import kafka.api.FetchRequest;
import kafka.api.FetchRequestBuilder;
import kafka.javaapi.FetchResponse;
import kafka.javaapi.TopicMetadata;
import kafka.javaapi.consumer.SimpleConsumer;
import kafka.javaapi.message.ByteBufferMessageSet;
import kafka.message.MessageAndOffset;
import kafka.producer.ProducerConfig;
import kafka.server.KafkaConfig;
import kafka.server.KafkaServer;
import kafka.utils.Time;
import kafka.utils.Utils;
import org.apache.commons.collections.functors.ExceptionPredicate;
import org.apache.curator.test.TestingServer;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerRecord;
import org.apache.kafka.clients.producer.RecordMetadata;
import java.io.File;
import java.nio.ByteBuffer;
import java.util.Properties;
public class KafkaLocalBroker {
public static final String TEST_TOPIC = "test-topic";
public KafkaConfig kafkaConfig;
public KafkaServer kafkaServer;
public TestingServer zookeeper;
public KafkaLocalBroker() throws Exception{
zookeeper = new TestingServer(true);
Properties props = new Properties();
props.put("zookeeper.connect", zookeeper.getConnectString());
props.put("broker.id", 0);
kafkaConfig = new KafkaConfig(props);
kafkaServer = new KafkaServer(kafkaConfig, new Time() {
public long nanoseconds() {
return System.nanoTime();
}
public long milliseconds() {
return System.currentTimeMillis();
}
public void sleep(long ms) {
try {
Thread.sleep(ms);
} catch(InterruptedException e){
// Do Nothing
}
}
});
kafkaServer.startup();
System.out.println("embedded kafka is up");
}
public void stop(){
kafkaServer.shutdown();
System.out.println("embedded kafka stop");
}
/**
* a main that tests the embedded kafka
* #param args
*/
public static void main(String[] args) {
KafkaLocalBroker kafkaLocalBroker = null;
//init kafka server and start it:
try {
kafkaLocalBroker = new KafkaLocalBroker();
} catch (Exception e){
}
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("acks", "all");
props.put("retries", 1);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
KafkaProducer<String, String> producer = new KafkaProducer<String, String>(props);
//send one message to local kafka server:
for (int i=0; i<10; i++){
ProducerRecord<String, String> data = new ProducerRecord<String, String>(TEST_TOPIC, "test-message" + i);
producer.send(data, (metadata, exception) -> {
if (exception != null) {
System.out.println("Failed to write log message: " + exception.getMessage());
} else {
System.out.println("Successful write to offset {} in partition {} on topic {}: " +
metadata.offset() + ", " + metadata.partition() + ", "+ metadata.topic());
}
});
}
//consume messages from Kafka:
SimpleConsumer consumer = new SimpleConsumer("localhost", 9092, 10000, 1024000, "clientId");
long offset = 0L;
while (offset < 160) { //this is an exit criteria just for this test so we are not stuck in enless loop
// create a fetch request for topic “test”, partition 0, current offset, and fetch size of 1MB
FetchRequest fetchRequest = new FetchRequestBuilder().addFetch(TEST_TOPIC, 0, offset, 100000).build();//new FetchRequest(TEST_TOPIC, 0, offset, 1000000);
// get the message set from the consumer and print them out
FetchResponse messages = consumer.fetch(fetchRequest);
for(MessageAndOffset msg : messages.messageSet(TEST_TOPIC, 0)) {
ByteBuffer payload = msg.message().payload();
byte[] bytes = new byte[payload.limit()];
payload.get(bytes);
try {
System.out.println(new String(bytes, "UTF-8"));
} catch (Exception e){
}
// advance the offset after consuming each message
offset = msg.offset();
}
}
producer.close();
//close the consumer
consumer.close();
//stop the kafka broker:
if(kafkaLocalBroker != null) {
kafkaLocalBroker.stop();
}
}
}
EDIT: I've included the exception returned from the producer below:
org.apache.kafka.common.errors.TimeoutException: Failed to update metadata after 60000 ms.

The properties used to create kafka producer is not valid for 0.8. Go through producerconfig and change the properties. or update kafka version

Related

Kafka Consumer does not read data from Producer

My Kafka consumer doesnt read from my producer. I noticed that after calling the poll method thae the code does not execute the print "Hello" and there is no error message showing.
The code execute well but it's like if it breaks after the poll method
Note: my producer works well. I created a consumer to test it.
Code:
public class ConsumerApp {
public static void main(String[] args) {
// Create Propety dictionary for the producer Config settings
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> myconsumer = new KafkaConsumer<String, String>(props);
myconsumer.subscribe(Arrays.asList("test"));
try {
while (true) {
ConsumerRecords<String, String> records = myconsumer.poll(100);
System.out.println("hello");
// processing logic goes here
for (ConsumerRecord<String, String> record : records) {
// processing records
System.out.println(String.format(record.topic(), record.partition(), record.offset(), record.key(),
record.value()));
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
// Closing Consumer
myconsumer.close();
}
}
}
I found the solution i didnt set a connection with the zookeeper server , now that i did my consumer reads the Data ! Here is the code
public static void main(String[] args) {
//Create Propety dictionary for the producer Config settings
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("zookeeper.connect", "localhost:2181");
props.put("group.id", "console");
props.put("zookeeper.session.timeout.ms", "500");
props.put("zookeeper.sync.timeout.ms", "500");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer< String, String > myconsumer = new KafkaConsumer<String, String> (props);
myconsumer.subscribe(Collections.singletonList("test"));
try {
while(true){
ConsumerRecords<String, String> records = myconsumer.poll(100);
// processing logic goes here
for (ConsumerRecord<String, String> record : records) {
// processing records
System.out.printf("offset = %d, key = %s, value = %s\n",
record.offset(), record.key(), record.value());
}
}
} catch (Exception e) {
e.printStackTrace();
} finally {
// Closing Consumer
myconsumer.close();
}
}
}
Long time ago I was playing with this example and it worked well, try it:
Consumer:
package com.spnotes.kafka.simple;
import org.apache.kafka.clients.consumer.ConsumerConfig;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.common.errors.WakeupException;
import java.util.Arrays;
import java.util.Properties;
import java.util.Scanner;
/**
* Created by sunilpatil on 12/28/15.
*/
public class Consumer {
private static Scanner in;
public static void main(String[] argv)throws Exception{
if (argv.length != 2) {
System.err.printf("Usage: %s <topicName> <groupId>\n",
Consumer.class.getSimpleName());
System.exit(-1);
}
in = new Scanner(System.in);
String topicName = argv[0];
String groupId = argv[1];
ConsumerThread consumerRunnable = new ConsumerThread(topicName,groupId);
consumerRunnable.start();
String line = "";
while (!line.equals("exit")) {
line = in.next();
}
consumerRunnable.getKafkaConsumer().wakeup();
System.out.println("Stopping consumer .....");
consumerRunnable.join();
}
private static class ConsumerThread extends Thread{
private String topicName;
private String groupId;
private KafkaConsumer<String,String> kafkaConsumer;
public ConsumerThread(String topicName, String groupId){
this.topicName = topicName;
this.groupId = groupId;
}
public void run() {
Properties configProperties = new Properties();
configProperties.put(ConsumerConfig.BOOTSTRAP_SERVERS_CONFIG, "localhost:9092");
configProperties.put(ConsumerConfig.KEY_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.ByteArrayDeserializer");
configProperties.put(ConsumerConfig.VALUE_DESERIALIZER_CLASS_CONFIG, "org.apache.kafka.common.serialization.StringDeserializer");
configProperties.put(ConsumerConfig.GROUP_ID_CONFIG, groupId);
configProperties.put(ConsumerConfig.CLIENT_ID_CONFIG, "simple");
//Figure out where to start processing messages from
kafkaConsumer = new KafkaConsumer<String, String>(configProperties);
kafkaConsumer.subscribe(Arrays.asList(topicName));
//Start processing messages
try {
while (true) {
ConsumerRecords<String, String> records = kafkaConsumer.poll(100);
for (ConsumerRecord<String, String> record : records)
System.out.println(record.value());
}
}catch(WakeupException ex){
System.out.println("Exception caught " + ex.getMessage());
}finally{
kafkaConsumer.close();
System.out.println("After closing KafkaConsumer");
}
}
public KafkaConsumer<String,String> getKafkaConsumer(){
return this.kafkaConsumer;
}
}
}
Producer:
package com.spnotes.kafka.simple;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.ProducerConfig;
import org.apache.kafka.clients.producer.ProducerRecord;
import java.util.Properties;
import java.util.Scanner;
/**
* Created by sunilpatil on 12/28/15.
*/
public class Producer {
private static Scanner in;
public static void main(String[] argv)throws Exception {
if (argv.length != 1) {
System.err.println("Please specify 1 parameters ");
System.exit(-1);
}
String topicName = argv[0];
in = new Scanner(System.in);
System.out.println("Enter message(type exit to quit)");
//Configure the Producer
Properties configProperties = new Properties();
configProperties.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG,"localhost:9092");
configProperties.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.ByteArraySerializer");
configProperties.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG,"org.apache.kafka.common.serialization.StringSerializer");
org.apache.kafka.clients.producer.Producer producer = new KafkaProducer(configProperties);
String line = in.nextLine();
while(!line.equals("exit")) {
//TODO: Make sure to use the ProducerRecord constructor that does not take parition Id
ProducerRecord<String, String> rec = new ProducerRecord<String, String>(topicName,line);
producer.send(rec);
line = in.nextLine();
}
in.close();
producer.close();
}
}
You can find another one nice example here: https://www.codenotfound.com/spring-kafka-consumer-producer-example.html

Apache Kafka Producer Consumer API Issue

I am trying to create a simple Java API to test Kafka Producer and Consumer. When I am running the producer and consumer on separate terminals on my Mac machine its working fine. But when I tried to connect to Kafka server using java api code, getting this error :
Exception in thread "main" java.lang.NullPointerException
at kafkatest2.ProducerTest.main(ProducerTest.java:34)
Producer Code :
package kafkatest2;
import java.util.Properties;
import org.apache.kafka.clients.producer.KafkaProducer;
import org.apache.kafka.clients.producer.Producer;
import org.apache.kafka.clients.producer.ProducerRecord;
public class ProducerTest {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("acks", "all");
props.put("retries", 0);
props.put("batch.size", 16384);
props.put("linger.ms", 1);
props.put("buffer.memory", 33554432);
props.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer","org.apache.kafka.common.serialization.StringSerializer");
Producer<String, String> producer = null;
try {
producer = new KafkaProducer<>(props);
for (int i = 0; i < 10; i++) {
String msg = "Message " + i;
producer.send(new ProducerRecord<String, String>("tested", msg));
System.out.println("Sent:" + msg);
}
} catch (Exception e) {
e.printStackTrace();
} finally {
System.out.println("last");
producer.close();
}
}
}
Consumer Code :
package kafkatest2;
import java.util.Arrays;
import java.util.Properties;
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
public class ConsumerTest {
public static void main(String[] args) {
Properties props = new Properties();
props.put("zookeeper.connect", "127.0.0.1:2181");
props.put("group.id", "test-consumer-group");
props.put("enable.auto.commit", "true");
props.put("auto.commit.interval.ms", "1000");
props.put("auto.offset.reset", "earliest");
props.put("session.timeout.ms", "30000");
props.put("key.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
props.put("value.deserializer", "org.apache.kafka.common.serialization.StringDeserializer");
KafkaConsumer<String, String> kafkaConsumer1 = new KafkaConsumer<>(props);
kafkaConsumer1.subscribe(Arrays.asList("tested"));
while (true) {
ConsumerRecords<String, String> records = kafkaConsumer1.poll(10);
for (ConsumerRecord<String, String> record : records) {
System.out.println("Partition: " + record.partition() + " Offset: " + record.offset() + " Value: " + record.value() + " ThreadID: " + Thread.currentThread().getId());
}
}
}
}
Please let me know what am I missing?? is there some issue with the configuration values??
Thanks,
Vipul
The stack trace indicates an NPE in your producer code:
Exception in thread "main" java.lang.NullPointerException at
kafkatest2.ProducerTest.main(ProducerTest.java:34)
This can happen if producer = new KafkaProducer<>(props); throws an Exception. If so, when you enter the finally block, the local variable producer won't be defined so calling producer.close() will throw an NPE. Simply wrap the call to close in an if (producer != null) block.

Write in Topic in Kafka through Java Code

i am trying to write in Kafka Topic through JAVA, as i have created the Topic, but want to insert some data in that topic.
Thanks in advance.
Here's an example of a synchronous producer. It should work with Kafka 0.11 (and a few prior releases too):
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.serialization.LongSerializer;
import org.apache.kafka.common.serialization.StringSerializer;
import java.util.Properties;
public class MyKafkaProducer {
private final static String TOPIC = "my-example-topic";
private final static String BOOTSTRAP_SERVERS = "localhost:9092,localhost:9093,localhost:9094";
private static Producer<Long, String> createProducer() {
Properties props = new Properties();
props.put(ProducerConfig.BOOTSTRAP_SERVERS_CONFIG, BOOTSTRAP_SERVERS);
props.put(ProducerConfig.CLIENT_ID_CONFIG, "MyKafkaProducer");
props.put(ProducerConfig.KEY_SERIALIZER_CLASS_CONFIG, LongSerializer.class.getName());
props.put(ProducerConfig.VALUE_SERIALIZER_CLASS_CONFIG, StringSerializer.class.getName());
return new KafkaProducer<>(props);
}
static void runProducer(final int sendMessageCount) throws Exception {
final Producer<Long, String> producer = createProducer();
try {
for (long index = 1; index <= sendMessageCount; index++) {
final ProducerRecord<Long, String> record = new ProducerRecord<>(TOPIC, index, "Message " + index);
RecordMetadata metadata = producer.send(record).get();
System.out.printf("sent record(key=%s value='%s')" + " metadata(partition=%d, offset=%d)\n",
record.key(), record.value(), metadata.partition(), metadata.offset());
}
} finally {
producer.flush();
producer.close();
}
}
public static void main(String[] args) throws Exception {
if (args.length == 0) {
runProducer(5);
} else {
runProducer(Integer.parseInt(args[0]));
}
}
}
You may need to modify some of the hard-coded settings.
Reference: http://cloudurable.com/blog/kafka-tutorial-kafka-producer/index.html

kafka customet partioner error

i wrote custom partitioner program,both class in same project but not sure why i am getting below error.Please help me on this.Below program is a kafka producer program.When i run the program i am getting error as SensorPartitioner class not found.
error:
Exception in thread "main" org.apache.kafka.common.config.ConfigException: Invalid value SensorPartitioner for configuration partitioner.class: Class SensorPartitioner could not be found.
at org.apache.kafka.common.config.ConfigDef.parseType(ConfigDef.java:671)
at org.apache.kafka.common.config.ConfigDef.parse(ConfigDef.java:418)
at org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:56)
at org.apache.kafka.common.config.AbstractConfig.<init>(AbstractConfig.java:63)
at org.apache.kafka.clients.producer.ProducerConfig.<init>(ProducerConfig.java:338)
at org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:188)
at kafka.test.main(test.java:19)
package kafka;
import org.apache.kafka.clients.producer.*;
import java.io.BufferedReader;
import java.io.FileReader;
import java.util.Properties;
public class test {
public static void main(String[] args) throws Exception{
System.out.println("new");
String topicName = "partitionTopic";
String sCurrentLine;
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092,localhost:9093");
props.put("key.serializer","org.apache.kafka.common.serialization.StringSerializer");
props.put("value.serializer", "org.apache.kafka.common.serialization.StringSerializer");
props.put("partitioner.class", "SensorPartitioner");
props.put("speed.sensor.name", "core");
Producer<String, String> producer = new KafkaProducer <String, String>(props);
try{
BufferedReader br = null;
br = new BufferedReader(new FileReader("datagen_10.txt"));
//String arr1=" ";
while ((sCurrentLine = br.readLine()) != null) {
System.out.println(sCurrentLine);
String[] arr11 = sCurrentLine.split(",");
String key=arr11[0];
ProducerRecord<String, String> record = new ProducerRecord<String, String>(topicName,key,sCurrentLine);
RecordMetadata metadata = producer.send(record).get();
System.out.println("Message is sent to Partition no " + metadata.partition() + " and offset " + metadata.offset());
System.out.println("SynchronousProducer Completed with success.");
}
br.close();
}catch (Exception e) {
e.printStackTrace();
System.out.println("SynchronousProducer failed with an exception");
}finally{
producer.close();
}
}
}
below program is custom partitioner program.
package kafka;
import java.util.*;
import org.apache.kafka.clients.producer.*;
import org.apache.kafka.common.*;
import org.apache.kafka.common.utils.*;
import org.apache.kafka.common.record.*;
public class SensorPartitioner implements Partitioner {
private String speedSensorName;
public void configure(Map<String, ?> configs) {
speedSensorName = configs.get("speed.sensor.name").toString();
}
public int partition(String topic, Object key, byte[] keyBytes, Object value, byte[] valueBytes, Cluster cluster) {
List<PartitionInfo> partitions = cluster.partitionsForTopic(topic);
int numPartitions = partitions.size();
int sp = (int)Math.abs(numPartitions*0.3);
int p=0;
if ( (keyBytes == null) || (!(key instanceof String)) )
throw new InvalidRecordException("All messages must have sensor name as key");
if ( ((String)key).equals(speedSensorName) )
p = Utils.toPositive(Utils.murmur2(valueBytes)) % sp;
else
p = Utils.toPositive(Utils.murmur2(keyBytes)) % (numPartitions-sp) + sp ;
System.out.println("Key = " + (String)key + " Partition = " + p );
return p;
}
public void close() {}
}
I also met the same problem, but I found the SensorPartitioner instead of a Kafka class, actually is another class in the tutorial. You create the SensorPartitioner class, then the problem would be solved.
The class:
https://github.com/LearningJournal/ApacheKafkaTutorials/blob/master/ProducerExamples/SensorPartitioner.java
vivman I had the same problem with IDEA devTools.And I fixed it by set fullPath to partitioner.class like this props.put("partitioner.class", "xx.xxx.x.SensorPartitioner"); in you program you can try to modify like this props.put("partitioner.class", "kafka.SensorPartitioner");
hope this helps.
Use absolute path to SensorPartitioner class as below:
props.put("partitioner.class", "com.subpackage.subsubpackage.SensorPartitioner");

kafka consumer group is rebalancing

I am using Kafka .9 and new java consumer. I am polling inside a loop. I am getting commitfailedexcption because of group rebalance, when code try to execute consumer.commitSycn . Please note, I am adding session.timeout.ms as 30000 and heartbeat.interval.ms as 10000 to consumer and polling happens for sure in 30000. Can anyone help me out. Please let me know if any information is needed.
Here is the code :-
Properties props = new Properties();
props.put("bootstrap.servers", {allthreeservers});
props.put("group.id", groupId);
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", ObjectSerializer.class.getName());
props.put("auto.offset.reset", erlierst);
props.put("enable.auto.commit", false);
props.put("session.timeout.ms", 30000);
props.put("heartbeat.interval.ms", 10000);
props.put("request.timeout.ms", 31000);
props.put("kafka.consumer.topic.name", topic);
props.put("max.partition.fetch.bytes", 1000);
while (true) {
Boolean isPassed = true;
try {
ConsumerRecords<Object, Object> records = consumer.poll(1000);
if (records.count() > 0) {
ConsumeEventInThread consumerEventInThread = new ConsumeEventInThread(records, consumerService);
FutureTask<Boolean> futureTask = new FutureTask<>(consumerEventInThread);
executorServiceForAsyncKafkaEventProcessing.execute(futureTask);
try {
isPassed = (Boolean) futureTask.get(Long.parseLong(props.getProperty("session.timeout.ms")) - Long.parseLong("5000"), TimeUnit.MILLISECONDS);
} catch (Exception Exception) {
logger.warn("Time out after waiting till session time out");
}
consumer.commitSync();
logger.info("Successfully committed offset for topic " + Arrays.asList(props.getProperty("kafka.consumer.topic.name")));
}else{
logger.info("Failed to process consumed messages, will not Commit and consume again");
}
}
} catch (Exception e) {
logger.error("Unhandled exception in while consuming messages " + Arrays.asList(props.getProperty("kafka.consumer.topic.name")), e);
}
}
The CommitFailedException is thrown when the commit cannot be completed because the group has been rebalanced. This is the main thing we have to be careful of when using the Java client. Since all network IO (including heartbeating) and message processing is done in the foreground, it is possible for the session timeout to expire while a batch of messages is being processed. To handle this, you have two choices.
First you can adjust the session.timeout.ms setting to ensure that the handler has enough time to finish processing messages. You can then tune max.partition.fetch.bytes to limit the amount of data returned in a single batch, though you will have to consider how many partitions are in the subscribed topics.
The second option is to do message processing in a separate thread, but you will have to manage flow control to ensure that the threads can keep up.
You can set session.timeout.ms large enough that commit failures from rebalances are rare.The only drawback to this is a longer delay before partitions can be re-assigned in the event of a hard failure.
For more info please see doc
This is working example.
----Worker code-----
import java.util.HashMap;
import java.util.Map;
import java.util.concurrent.Callable;
public class Worker implements Callable<Boolean> {
ConsumerRecord record;
public Worker(ConsumerRecord record) {
this.record = record;
}
public Boolean call() {
Map<String, Object> data = new HashMap<>();
try {
data.put("partition", record.partition());
data.put("offset", record.offset());
data.put("value", record.value());
Thread.sleep(10000);
System.out.println("Processing Thread---" + Thread.currentThread().getName() + " data: " + data);
return Boolean.TRUE;
} catch (Exception e) {
e.printStackTrace();
return Boolean.FALSE;
}
}
}
---------Execution code------------------
import org.apache.kafka.clients.consumer.ConsumerRecord;
import org.apache.kafka.clients.consumer.ConsumerRecords;
import org.apache.kafka.clients.consumer.KafkaConsumer;
import org.apache.kafka.clients.consumer.OffsetAndMetadata;
import org.apache.kafka.common.TopicPartition;
import org.apache.kafka.common.serialization.StringDeserializer;
import java.util.*;
import java.util.concurrent.*;
public class AsyncConsumer {
public static void main(String[] args) {
Properties props = new Properties();
props.put("bootstrap.servers", "localhost:9092");
props.put("group.id", "test-group");
props.put("key.deserializer", StringDeserializer.class.getName());
props.put("value.deserializer", StringDeserializer.class.getName());
props.put("enable.auto.commit", false);
props.put("session.timeout.ms", 30000);
props.put("heartbeat.interval.ms", 10000);
props.put("request.timeout.ms", 31000);
KafkaConsumer<String, String> consumer = new KafkaConsumer<>(props);
consumer.subscribe(Arrays.asList("Test1", "Test2"));
int poolSize=10;
ExecutorService es= Executors.newFixedThreadPool(poolSize);
CompletionService<Boolean> completionService=new ExecutorCompletionService<Boolean>(es);
try {
while (true) {
System.out.println("Polling................");
ConsumerRecords<String, String> records = consumer.poll(1000);
List<ConsumerRecord> recordList = new ArrayList();
for (ConsumerRecord<String, String> record : records) {
recordList.add(record);
if(recordList.size() ==poolSize){
int taskCount=poolSize;
//process it
recordList.forEach( recordTobeProcess -> completionService.submit(new Worker(recordTobeProcess)));
while(taskCount >0){
try {
Future<Boolean> futureResult = completionService.poll(1, TimeUnit.SECONDS);
if (futureResult != null) {
boolean result = futureResult.get().booleanValue();
taskCount = taskCount - 1;
}
}catch (Exception e) {
e.printStackTrace();
}
}
recordList.clear();
Map<TopicPartition,OffsetAndMetadata> commitOffset= Collections.singletonMap(new TopicPartition(record.topic(),record.partition()),
new OffsetAndMetadata(record.offset() + 1));
consumer.commitSync(commitOffset);
}
}
}
} finally {
consumer.close();
}
}
}
You need to follow some rule like:
1) You need to pass fixed number of record(for example 10) to ConsumeEventInThread.
2) Create more thread for processing instead of one thread and submit all task on completionservice.
3) poll all submitted task and verify.
4) then commit(should use parametric commitSync method instead of non parametric).