Failed to deserialize data for topic to protobuf sink connector - apache-kafka

I can consume produced protobuf message data from kafka topic using different tool like conduktor. However, when I try to poll data using JdbcSinkConnector, it throws the exception like
org.apache.kafka.common.errors.SerializationException: Error
deserializing Protobuf message
Please take a look at the following error detail when I call kafka-connect api as follows
URL : http://localhost:8083/connectors?expand=info&expand=status
JSON Response and trace
{
"sink_postgres_03_proto":{
"info":{
"name":"sink_postgres_03_proto",
"config":{
"connector.class":"io.confluent.connect.jdbc.JdbcSinkConnector",
"connection.password":"54321",
"topics":"order-messages",
"value.converter.schema.registry.url":"http://localhost:8081",
"key.converter.schemas.enable":"false",
"auto.evolve":"true",
"connection.user":"postgres",
"value.converter.schemas.enable":"true",
"name":"sink_postgres_03_proto",
"auto.create":"true",
"connection.url":"jdbc:postgresql://localhost:5432/CallHistoryService",
"value.converter":"io.confluent.connect.protobuf.ProtobufConverter",
"insert.mode":"insert",
"key.converter":"org.apache.kafka.connect.storage.StringConverter"
},
"tasks":[
{
"connector":"sink_postgres_03_proto",
"task":0
}
],
"type":"sink"
},
"status":{
"name":"sink_postgres_03_proto",
"connector":{
"state":"RUNNING",
"worker_id":"kafka-connect:8083"
},
"tasks":[
{
"id":0,
"state":"FAILED",
"worker_id":"kafka-connect:8083",
"trace":"org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:196)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:122)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:495)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:472)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:226)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:198)\n\tat org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:185)\n\tat org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:235)\n\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)\n\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:834)\nCaused by: org.apache.kafka.connect.errors.DataException: Failed to deserialize data for topic order-messages to Protobuf: \n\tat io.confluent.connect.protobuf.ProtobufConverter.toConnectData(ProtobufConverter.java:123)\n\tat org.apache.kafka.connect.storage.Converter.toConnectData(Converter.java:87)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$1(WorkerSinkTask.java:495)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:146)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:180)\n\t... 13 more\nCaused by: org.apache.kafka.common.errors.SerializationException: Error deserializing Protobuf message for id 1\nCaused by: java.net.ConnectException: Connection refused (Connection refused)\n\tat java.base/java.net.PlainSocketImpl.socketConnect(Native Method)\n\tat java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:399)\n\tat java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:242)\n\tat java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:224)\n\tat java.base/java.net.Socket.connect(Socket.java:609)\n\tat java.base/sun.net.NetworkClient.doConnect(NetworkClient.java:177)\n\tat java.base/sun.net.www.http.HttpClient.openServer(HttpClient.java:474)\n\tat java.base/sun.net.www.http.HttpClient.openServer(HttpClient.java:569)\n\tat java.base/sun.net.www.http.HttpClient.<init>(HttpClient.java:242)\n\tat java.base/sun.net.www.http.HttpClient.New(HttpClient.java:341)\n\tat java.base/sun.net.www.http.HttpClient.New(HttpClient.java:362)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:1253)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.plainConnect0(HttpURLConnection.java:1187)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:1081)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:1015)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1592)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1520)\n\tat java.base/java.net.HttpURLConnection.getResponseCode(HttpURLConnection.java:527)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.sendHttpRequest(RestService.java:272)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.httpRequest(RestService.java:352)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.getId(RestService.java:660)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.getId(RestService.java:642)\n\tat io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.getSchemaByIdFromRegistry(CachedSchemaRegistryClient.java:217)\n\tat io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.getSchemaBySubjectAndId(CachedSchemaRegistryClient.java:291)\n\tat io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.getSchemaById(CachedSchemaRegistryClient.java:276)\n\tat io.confluent.kafka.serializers.protobuf.AbstractKafkaProtobufDeserializer.deserialize(AbstractKafkaProtobufDeserializer.java:117)\n\tat io.confluent.kafka.serializers.protobuf.AbstractKafkaProtobufDeserializer.deserializeWithSchemaAndVersion(AbstractKafkaProtobufDeserializer.java:235)\n\tat io.confluent.connect.protobuf.ProtobufConverter$Deserializer.deserialize(ProtobufConverter.java:163)\n\tat io.confluent.connect.protobuf.ProtobufConverter.toConnectData(ProtobufConverter.java:107)\n\tat org.apache.kafka.connect.storage.Converter.toConnectData(Converter.java:87)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$1(WorkerSinkTask.java:495)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:146)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:180)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:122)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:495)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:472)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:226)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:198)\n\tat org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:185)\n\tat org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:235)\n\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)\n\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:834)\n"
}
],
"type":"sink"
}
}
}
Please advice
Thanks you!

If you take the trace node and convert the \n and \t to newlines and tabs you get a readable stack trace with shows the problem:
Caused by: org.apache.kafka.connect.errors.DataException: Failed to deserialize data for topic order-messages to Protobuf:
at io.confluent.connect.protobuf.ProtobufConverter.toConnectData(ProtobufConverter.java:123)
at org.apache.kafka.connect.storage.Converter.toConnectData(Converter.java:87)
at org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$1(WorkerSinkTask.java:495)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:146)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:180)
... 13 more
Caused by: org.apache.kafka.common.errors.SerializationException: Error deserializing Protobuf message for id 1
Caused by: java.net.ConnectException: Connection refused (Connection refused)
The error is java.net.ConnectException: Connection refused and means that either (a) you've misconfigured the location of your Schema Registry or (b) the Schema Registry is not running

Related

java.lang.NoSuchMethodError: org.apache.kafka.common.protocol.Readable.readArray([B)V

We started getting below exception while we upgraded spring-kafka to 2.8.9 and kafka-clients to 3.0.1. Please suggest.
laris-default-group-id] Request joining group due to: consumer pro-actively leaving the group
2022-11-04--16-41-17-047 [T: U: D: Tx:/ URI: M:] [org.springframework.kafka.KafkaListenerEndpointContainer#9-0-C-1] ERROR org.springframework.kafka.listener.KafkaMessageListenerContainer - Stopping container due to an Error
java.lang.NoSuchMethodError: org.apache.kafka.common.protocol.Readable.readArray([B)V
at org.apache.kafka.common.message.SyncGroupResponseData.read(SyncGroupResponseData.java:173)
at org.apache.kafka.common.message.SyncGroupResponseData.<init>(SyncGroupResponseData.java:102)
at org.apache.kafka.common.requests.SyncGroupResponse.parse(SyncGroupResponse.java:61)
at org.apache.kafka.common.requests.AbstractResponse.parseResponse(AbstractResponse.java:135)
at org.apache.kafka.common.requests.AbstractResponse.parseResponse(AbstractResponse.java:109)
at org.apache.kafka.clients.NetworkClient.parseResponse(NetworkClient.java:720)
at org.apache.kafka.clients.NetworkClient.handleCompletedReceives(NetworkClient.java:865)
at org.apache.kafka.clients.NetworkClient.poll(NetworkClient.java:560)
at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:265)
at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:236)
at org.apache.kafka.clients.consumer.internals.ConsumerNetworkClient.poll(ConsumerNetworkClient.java:215)
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.joinGroupIfNeeded(AbstractCoordinator.java:427)
at org.apache.kafka.clients.consumer.internals.AbstractCoordinator.ensureActiveGroup(AbstractCoordinator.java:366)
at org.apache.kafka.clients.consumer.internals.ConsumerCoordinator.poll(ConsumerCoordinator.java:511)
at org.apache.kafka.clients.consumer.KafkaConsumer.updateAssignmentMetadataIfNeeded(KafkaConsumer.java:1262)
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1233)
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1166)
at brave.kafka.clients.TracingConsumer.poll(TracingConsumer.java:93)
at brave.kafka.clients.TracingConsumer.poll(TracingConsumer.java:87)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollConsumer(KafkaMessageListenerContainer.java:1529)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.doPoll(KafkaMessageListenerContainer.java:1519)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.pollAndInvoke(KafkaMessageListenerContainer.java:1343)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:1255)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.lang.Thread.run(Thread.java:748)```

Error in Confluent Kafka Source Connector Tasks (DatagenConnector) Data Serialization into Avro Format

I am trying to produce data from Data Source Connector with Confluent kafka DatagenConnector I am converting the value into Avro format using confluent schema registry. The configuration I am using for creating the Source connector is:
{
"connector.class": "io.confluent.kafka.connect.datagen.DatagenConnector",
"kafka.topic": "inventories_un3",
"quickstart": "inventory",
"key.converter": "org.apache.kafka.connect.storage.StringConverter",
"key.converter.schemas.enable": false,
"value.converter":"io.confluent.connect.avro.AvroConverter",
"value.converter.schema.registry.url": "http://schema-registry:8083",
"value.converter.schemas.enable": true,
"max.interval": 1000,
"iterations": 10000000,
"tasks.max": "1",
"compatibility": "NONE",
"auto.register.schemas":false,
"use.latest.version": true
}
The Schema registered is:
{"schema": "{"type":"record","name":"Payment","namespace":"my.examples","fields":[{"name":"id","type":"long"},{"name":"quantity","type":"long"}, {"name":"productid","type":"long"}]}"}
Getting the following errors:
"id": 0,
"state": "FAILED",
"worker_id": "kafka-connect:8083",
"trace": "org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:206)\n\tat org
g.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:243)\n\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)\n\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:829)\nCaused by: org.apache.kafka.connect.errors.DataException: Failed to serialize Avro data from topic inventories_un3 :\n\tat io.confluent.connect.avro.AvroConverter.fromConnectData(AvroConverter.java:93)\n\tat org.apache.kafka.connect.storage.Converter.fromConnectData(Converter.java:63)
org.apache.kafka.connect.runtime.WorkerSourceTask.lambda$convertTransformedRecord$3(WorkerSourceTask.java:329)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:156)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:190)\n\t... 11 more\nCaused by: org.apache.kafka.common.errors.SerializationException: Error serializing Avro message\n\tat io.confluent.kafka.serializers.AbstractKafkaAvroSerializer.serializeImpl(AbstractKafkaAvroSerializer.java:154)\n\tat io.confluent.connect.avro.AvroConverter$Serializer.serialize(AvroConverter.java:153)\n\tat io.confluent.connect.avro.AvroConverter.fromConnectData(AvroConverter.java:86)\n\t... 15 more\nCaused by: java.net.ConnectException: Connection refused (Connection refused)\n\tat java.base/java.net.PlainSocketImpl.socketConnect(Native Method)\n\tat java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:412)\n\tat java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:255)\n\tat java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:237)\n\tat java.base/java.net.Socket.connect(Socket.java:615)\n\tat java.base/sun.net.NetworkClient.doConnect(NetworkClient.java:177)\n\tat java.base/sun.net.www.http.HttpClient.openServer(HttpClient.java:474)\n\tat java.base/sun.net.www.http.HttpClient.openServer(HttpClient.java:569)\n\tat java.base/sun.net.www.http.HttpClient.(HttpClient.java:242)\n\tat java.base/sun.net.www.http.HttpClient.New(HttpClient.java:341)\n\tat java.base/sun.net.www.http.HttpClient.New(HttpClient.java:362)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:1258)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.plainConnect0(HttpURLConnection.java:1192)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:1086)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:1020)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getOutputStream0(HttpURLConnection.java:1372)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getOutputStream(HttpURLConnection.java:1347)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.sendHttpRequest(RestService.java:268)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.httpRequest(RestService.java:367)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.registerSchema(RestService.java:544)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.registerSchema(RestService.java:532)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.registerSchema(RestService.java:490)\n\tat io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.registerAndGetId(CachedSchemaRegistryClient.java:257)\n\tat io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.register(CachedSchemaRegistryClient.java:366)

kafka-connect : Getting error in distributed configuration for connector sink cassandra

I get task error for a distributed configuration for a connector sink cassandra. I was running the command :
curl -s localhost:8083/connectors/cassandraSinkConnector2/status | jq
to get the status
{
"name": "cassandraSinkConnector2",
"connector": {
"state": "RUNNING",
"worker_id": localhost:8083"
},
"tasks": [
{
"id": 0,
"state": "FAILED",
"worker_id": "localhost:8083",
"trace": "org.apache.kafka.common.KafkaException: Failed to construct kafka consumer\n\tat org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:811)\n\tat org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624)\n\tat org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:605)\n\tat org.apache.kafka.connect.runtime.Worker.buildWorkerTask(Worker.java:505)\n\tat org.apache.kafka.connect.runtime.Worker.startTask(Worker.java:441)\n\tat org.apache.kafka.connect.runtime.distributed.DistributedHerder.startTask(DistributedHerder.java:865)\n\tat org.apache.kafka.connect.runtime.distributed.DistributedHerder.access$1600(DistributedHerder.java:110)\n\tat org.apache.kafka.connect.runtime.distributed.DistributedHerder$13.call(DistributedHerder.java:880)\n\tat org.apache.kafka.connect.runtime.distributed.DistributedHerder$13.call(DistributedHerder.java:876)\n\tat java.util.concurrent.FutureTask.run(FutureTask.java:266)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: org.apache.kafka.common.KafkaException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor ClassNotFoundException exception occurred\n\tat org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:357)\n\tat org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:332)\n\tat org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:319)\n\tat org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:701)\n\t... 12 more\nCaused by: java.lang.ClassNotFoundException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor\n\tat java.net.URLClassLoader.findClass(URLClassLoader.java:382)\n\tat java.lang.ClassLoader.loadClass(ClassLoader.java:424)\n\tat org.apache.kafka.connect.runtime.isolation.PluginClassLoader.loadClass(PluginClassLoader.java:104)\n\tat java.lang.ClassLoader.loadClass(ClassLoader.java:357)\n\tat java.lang.Class.forName0(Native Method)\n\tat java.lang.Class.forName(Class.java:348)\n\tat org.apache.kafka.common.utils.Utils.loadClass(Utils.java:338)\n\tat org.apache.kafka.common.utils.Utils.newInstance(Utils.java:327)\n\tat org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:355)\n\t... 15 more\n"
}
],
"type": "sink"
Stack trace:
"trace": "org.apache.kafka.common.KafkaException: Failed to construct kafka consumer
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:811)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:605)
at org.apache.kafka.connect.runtime.Worker.buildWorkerTask(Worker.java:505)
at org.apache.kafka.connect.runtime.Worker.startTask(Worker.java:441)
at org.apache.kafka.connect.runtime.distributed.DistributedHerder.startTask(DistributedHerder.java:865)
at org.apache.kafka.connect.runtime.distributed.DistributedHerder.access$1600(DistributedHerder.java:110)
at org.apache.kafka.connect.runtime.distributed.DistributedHerder$13.call(DistributedHerder.java:880)
at org.apache.kafka.connect.runtime.distributed.DistributedHerder$13.call(DistributedHerder.java:876)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.kafka.common.KafkaException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor ClassNotFoundException exception occurred
at org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:357)
at org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:332)
at org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:319)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:701)
... 12 more
Caused by: java.lang.ClassNotFoundException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor
at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
at org.apache.kafka.connect.runtime.isolation.PluginClassLoader.loadClass(PluginClassLoader.java:104)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:348)
at org.apache.kafka.common.utils.Utils.loadClass(Utils.java:338)
at org.apache.kafka.common.utils.Utils.newInstance(Utils.java:327)
at org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:355)
... 15 more
You can find below the configuration of the connector.
{
"name": "cassandraSinkConnector2",
"config": {
"connector.class": "io.confluent.connect.cassandra.CassandraSinkConnector",
"tasks.max": "1",
"topics": "appartenance_de",
"cassandra.contact.points": "localhost",
"cassandra.kcql": "INSERT INTO app_test SELECT * FROM app_de",
"cassandra.port": "9042",
"cassandra.keyspace": "dev_dkks",
"cassandra.username": "superuser",
"cassandra.password": "password",
"cassandra.write.mode": "insert",
"value.converter.schemas.enable": "true",
"value.converter": "io.confluent.connect.avro.AvroConverter",
"value.converter.schema.registry.url": "http://localhost:8081",
"name": "cassandraSinkConnector2"
},
"tasks": [
{
"connector": "cassandraSinkConnector2",
"task": 0
}
],
"type": "sink"
}
New error:
org.apache.kafka.connect.errors.ConnectException: Exiting WorkerSinkTask due to unrecoverable exception.
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:560)
at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:321)
at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:224)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:192)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:175)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:219)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.kafka.connect.errors.DataException: Record with a null key was encountered. This connector requires that records from Kafka contain the keys for the Cassandra table. Please use a transformation like org.apache.kafka.connect.transforms.ValueToKey to create a key with the proper fields.
at io.confluent.connect.cassandra.CassandraSinkTask.put(CassandraSinkTask.java:86)
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:538)
... 10 more
"
The root error is
java.lang.ClassNotFoundException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor
The Monitoring Interceptors are part of Confluent Platform. You can either disable their use in your Kafka Connect worker config, or better, make sure that the /usr/share/java/monitoring-interceptors/monitoring-interceptors-5.2.1.jar JAR is available to your Kafka Connect worker.
The new error you're seeing is
org.apache.kafka.connect.errors.DataException:
Record with a null key was encountered. This connector requires that records from Kafka contain the keys for the Cassandra table.
Please use a transformation like org.apache.kafka.connect.transforms.ValueToKey to create a key with the proper fields.
I'd suggest using a Single Message Transform as suggested in the error to correctly key your data. You can see an example of doing this here and the documentation for the transform here.

Spring Cloud Stream Kafka binder fails to publish to DLQ with a key

I'm getting the following exception when processing fails with #StreamListener and Spring Cloud Stream Kafka binder tries to re-route messages to DLQ. Using Spring Cloud Edgware.SR5.
org.springframework.messaging.MessageDeliveryException: failed to send Message to channel 'my.message.destination.my.message.group.errors'; nested exception is java.lang.ClassCastException: java.lang.String cannot be cast to [B
at org.springframework.integration.channel.AbstractMessageChannel.send(AbstractMessageChannel.java:451) ~[spring-integration-core-4.3.17.RELEASE.jar:4.3.17.RELEASE]
at org.springframework.integration.channel.AbstractMessageChannel.send(AbstractMessageChannel.java:375) ~[spring-integration-core-4.3.17.RELEASE.jar:4.3.17.RELEASE]
at org.springframework.messaging.core.GenericMessagingTemplate.doSend(GenericMessagingTemplate.java:115) ~[spring-messaging-4.3.19.RELEASE.jar:4.3.19.RELEASE]
at org.springframework.messaging.core.GenericMessagingTemplate.doSend(GenericMessagingTemplate.java:45) ~[spring-messaging-4.3.19.RELEASE.jar:4.3.19.RELEASE]
at org.springframework.messaging.core.AbstractMessageSendingTemplate.send(AbstractMessageSendingTemplate.java:105) ~[spring-messaging-4.3.19.RELEASE.jar:4.3.19.RELEASE]
at org.springframework.messaging.core.AbstractMessageSendingTemplate.send(AbstractMessageSendingTemplate.java:95) ~[spring-messaging-4.3.19.RELEASE.jar:4.3.19.RELEASE]
at org.springframework.integration.support.ErrorMessagePublisher.publish(ErrorMessagePublisher.java:155) ~[spring-integration-core-4.3.17.RELEASE.jar:4.3.17.RELEASE]
at org.springframework.integration.handler.advice.ErrorMessageSendingRecoverer.recover(ErrorMessageSendingRecoverer.java:83) ~[spring-integration-core-4.3.17.RELEASE.jar:4.3.17.RELEASE]
at org.springframework.retry.support.RetryTemplate.handleRetryExhausted(RetryTemplate.java:512) ~[spring-retry-1.2.2.RELEASE.jar:?]
at org.springframework.retry.support.RetryTemplate.doExecute(RetryTemplate.java:351) ~[spring-retry-1.2.2.RELEASE.jar:?]
at org.springframework.retry.support.RetryTemplate.execute(RetryTemplate.java:180) ~[spring-retry-1.2.2.RELEASE.jar:?]
at org.springframework.kafka.listener.adapter.RetryingAcknowledgingMessageListenerAdapter.onMessage(RetryingAcknowledgingMessageListenerAdapter.java:73) ~[spring-kafka-1.1.8.RELEASE.jar:?]
at org.springframework.kafka.listener.adapter.RetryingAcknowledgingMessageListenerAdapter.onMessage(RetryingAcknowledgingMessageListenerAdapter.java:39) ~[spring-kafka-1.1.8.RELEASE.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeRecordListener(KafkaMessageListenerContainer.java:792) [spring-kafka-1.1.8.RELEASE.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.invokeListener(KafkaMessageListenerContainer.java:736) [spring-kafka-1.1.8.RELEASE.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.access$2100(KafkaMessageListenerContainer.java:246) [spring-kafka-1.1.8.RELEASE.jar:?]
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer$ListenerInvoker.run(KafkaMessageListenerContainer.java:1025) [spring-kafka-1.1.8.RELEASE.jar:?]
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511) [?:1.8.0_192]
at java.util.concurrent.FutureTask.run(FutureTask.java:266) [?:1.8.0_192]
at java.lang.Thread.run(Thread.java:748) [?:1.8.0_192]
Caused by: java.lang.ClassCastException: java.lang.String cannot be cast to [B
at org.springframework.cloud.stream.binder.kafka.KafkaMessageChannelBinder$4.handleMessage(KafkaMessageChannelBinder.java:360) ~[spring-cloud-stream-binder-kafka-1.3.3.RELEASE.jar:1.3.3.RELEASE]
at org.springframework.integration.dispatcher.BroadcastingDispatcher.invokeHandler(BroadcastingDispatcher.java:236) ~[spring-integration-core-4.3.17.RELEASE.jar:4.3.17.RELEASE]
at org.springframework.integration.dispatcher.BroadcastingDispatcher.dispatch(BroadcastingDispatcher.java:185) ~[spring-integration-core-4.3.17.RELEASE.jar:4.3.17.RELEASE]
at org.springframework.integration.channel.AbstractSubscribableChannel.doSend(AbstractSubscribableChannel.java:89) ~[spring-integration-core-4.3.17.RELEASE.jar:4.3.17.RELEASE]
at org.springframework.integration.channel.AbstractMessageChannel.send(AbstractMessageChannel.java:425) ~[spring-integration-core-4.3.17.RELEASE.jar:4.3.17.RELEASE]
... 19 more
Tried producing messages from kafka-console-producer, and figured out that this only happens when Kafka key is used.
Following is the relevant code snippets:
MyMessageConsumer.java:
#StreamListener(MyMessageSink.MY_MESSAGE_INPUT)
#Transactional
public void consumeMyMessage(#Header(KafkaHeaders.RECEIVED_MESSAGE_KEY) String myMessageId, #Payload MyMessage myMessage) {
if (true) {
throw new RuntimeException("MockRuntimeException");
}
}
application.properties (for consumer):
spring.kafka.producer.keySerializer=org.apache.kafka.common.serialization.StringSerializer
spring.kafka.consumer.keyDeserializer=org.apache.kafka.common.serialization.StringDeserializer
spring.cloud.stream.bindings.my-message-in.destination=my.message.destination
spring.cloud.stream.bindings.my-message-in.group=my.message.group
spring.cloud.stream.bindings.my-message-in.content-type=application/json
spring.cloud.stream.bindings.my-message-in.consumer.headerMode=raw
spring.cloud.stream.bindings.my-message-in.consumer.partitioned=true
spring.cloud.stream.kafka.bindings.my-message-in.consumer.enableDlq=true
spring.cloud.stream.kafka.bindings.my-message-in.consumer.dlqName=my.message.destination.dlq
spring.cloud.stream.kafka.bindings.my-message-in.consumer.dlqProducerProperties.configuration.key.serializer=org.apache.kafka.common.serialization.StringSerializer
spring.cloud.stream.kafka.bindings.my-message-in.consumer.dlqProducerProperties.configuration.value.serializer=org.apache.kafka.common.serialization.StringSerializer
spring.cloud.stream.kafka.bindings.my-message-in.consumer.maxAttempts=3
application.properties (for producer):
spring.kafka.producer.keySerializer=org.apache.kafka.common.serialization.StringSerializer
spring.cloud.stream.bindings.my-message-out.destination=my.message.destination
spring.cloud.stream.bindings.my-message-out.content-type=application/json
spring.cloud.stream.bindings.my-message-out.producer.headerMode=raw
spring.cloud.stream.bindings.my-message-out.producer.partitionKeyExtractorClass=com.example.message.TransactionKeyExtractor
spring.cloud.stream.bindings.my-message-out.producer.partitionCount=80
Is there any way to get the DLQ re-routing to work with a message key?
Dead lettering with 1.3.x only supports Spring Cloud Stream's default key/value type (byte[]/byte[]).
Try upgrading to a more recent version.

How to Set Spring Kafka consumer max attempts when using Schema Registry

I am developing Spring boot server with Spring kafka(1.3.2.RELEASE), apache avro(1.8.2) and io.confluent's Schema Registry(3.1.2). So evenytime the kafka listener gets a kafka message, it will find the schema id in message and get the avro schema from the registry server by id. The problem is, if the scheme registry config server is down, my listener will keep trying to send http request to the registry server to get the avro schema when it get a message(also prints large amount of error log), and it will block all the next kafka message since the offset won't move on.
16:56:41.541 ERROR KafkaMessageListenerContainer$ListenerConsumer - - org.springframework.kafka.KafkaListenerEndpointContainer#0-0-C-1 - Container exception
org.apache.kafka.common.errors.SerializationException: Error deserializing key/value for partition trade-0 at offset 810845
Caused by: org.apache.kafka.common.errors.SerializationException: Error deserializing Avro message for id 21
Caused by: java.net.ConnectException: Connection refused (Connection refused)
at java.net.PlainSocketImpl.socketConnect(Native Method)
at java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:350)
at java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:206)
at java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:188)
at java.net.SocksSocketImpl.connect(SocksSocketImpl.java:392)
at java.net.Socket.connect(Socket.java:589)
at java.net.Socket.connect(Socket.java:538)
at sun.net.NetworkClient.doConnect(NetworkClient.java:180)
at sun.net.www.http.HttpClient.openServer(HttpClient.java:463)
at sun.net.www.http.HttpClient.openServer(HttpClient.java:558)
at sun.net.www.http.HttpClient.<init>(HttpClient.java:242)
at sun.net.www.http.HttpClient.New(HttpClient.java:339)
at sun.net.www.http.HttpClient.New(HttpClient.java:357)
at sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:1202)
at sun.net.www.protocol.http.HttpURLConnection.plainConnect0(HttpURLConnection.java:1138)
at sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:1032)
at sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:966)
at sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1546)
at sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1474)
at java.net.HttpURLConnection.getResponseCode(HttpURLConnection.java:480)
at io.confluent.kafka.schemaregistry.client.rest.RestService.sendHttpRequest(RestService.java:153)
at io.confluent.kafka.schemaregistry.client.rest.RestService.httpRequest(RestService.java:187)
at io.confluent.kafka.schemaregistry.client.rest.RestService.getId(RestService.java:323)
at io.confluent.kafka.schemaregistry.client.rest.RestService.getId(RestService.java:316)
at io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.getSchemaByIdFromRegistry(CachedSchemaRegistryClient.java:63)
at io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.getBySubjectAndID(CachedSchemaRegistryClient.java:118)
at io.confluent.kafka.serializers.AbstractKafkaAvroDeserializer.deserialize(AbstractKafkaAvroDeserializer.java:121)
at io.confluent.kafka.serializers.AbstractKafkaAvroDeserializer.deserialize(AbstractKafkaAvroDeserializer.java:92)
at io.confluent.kafka.serializers.KafkaAvroDeserializer.deserialize(KafkaAvroDeserializer.java:54)
at org.apache.kafka.common.serialization.ExtendedDeserializer$Wrapper.deserialize(ExtendedDeserializer.java:65)
at org.apache.kafka.common.serialization.ExtendedDeserializer$Wrapper.deserialize(ExtendedDeserializer.java:55)
at org.apache.kafka.clients.consumer.internals.Fetcher.parseRecord(Fetcher.java:918)
at org.apache.kafka.clients.consumer.internals.Fetcher.access$2600(Fetcher.java:93)
at org.apache.kafka.clients.consumer.internals.Fetcher$PartitionRecords.fetchRecords(Fetcher.java:1095)
at org.apache.kafka.clients.consumer.internals.Fetcher$PartitionRecords.access$1200(Fetcher.java:944)
at org.apache.kafka.clients.consumer.internals.Fetcher.fetchRecords(Fetcher.java:567)
at org.apache.kafka.clients.consumer.internals.Fetcher.fetchedRecords(Fetcher.java:528)
at org.apache.kafka.clients.consumer.KafkaConsumer.pollOnce(KafkaConsumer.java:1086)
at org.apache.kafka.clients.consumer.KafkaConsumer.poll(KafkaConsumer.java:1043)
at org.springframework.kafka.listener.KafkaMessageListenerContainer$ListenerConsumer.run(KafkaMessageListenerContainer.java:614)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.lang.Thread.run(Thread.java:748)
I have tried to use RetryTemplate to set the max attempts but it didn't work, It seems that the RetryTemplate may only works in my listener method. Also I didn't find any helpful config in the io confluent's website.
Now I replace the KafkaAvroDeserializer by using a CustomAvroDeserializer, which extends the KafkaAvroDeserializer and override its deserialize method with adding a try-catch to its content, like this:
#Log4j
public class CustomAvroDeserializer extends KafkaAvroDeserializer {
#Override
public Object deserialize(String s, byte[] bytes) {
try {
return this.deserialize(bytes);
} catch (Exception e) {
log.error("encounter a problem when deserializer message with schema registry:{}", e);
return null;
}
}
}