Keycloak PUT Group APIs failing in case of large payload size - keycloak

We have been using Keycloak attributes in groups to store some large payload values. But we have noticing when adding a large payload like the below example:
{
"id": "42f508af-03f1-42ee-aafa-721cd06a9962",
"name": "test-group",
"path": "/formsflow-analytics/test-group",
"attributes": {
"dashboards": [
"[{'6': 'New Business License Application'}, {'7': 'Freedom Of Information Form'}, {'12': 'dashboard4'}, {'1': 'SAMPLE'}, {'2': 'SAMPL'}, {'5': 'New Business License Application'}, {'9': 'Freedom of Information and Protection of Privacy'},{'1': 'GROUP1'},{'2': 'SAMPLE1'}]"
]
},
"realmRoles": [],
"clientRoles": {},
"subGroups": [],
"access": {
"view": true,
"manage": true,
"manageMembership": true
}
}
gives a 500 internal server error with response:
{"error":"unknown_error"}
Keycloak logs:
07:56:06,323 ERROR [io.undertow.request] (default task-14) UT005023: Exception handling request to /auth/admin/realms/forms-flow-ai/groups/2a74cd01-3b09-453b-ac5b-90ffa95e8c2a: org.keycloak.models.ModelException: org.hibernate.exception.DataException: could not execute statement
at org.keycloak.keycloak-model-jpa#14.0.0//org.keycloak.connections.jpa.PersistenceExceptionConverter.convert(PersistenceExceptionConverter.java:84)
at org.keycloak.keycloak-model-jpa#14.0.0//org.keycloak.connections.jpa.JpaExceptionConverter.convert(JpaExceptionConverter.java:31)
at org.keycloak.keycloak-services#14.0.0//org.keycloak.transaction.JtaTransactionWrapper.lambda$handleException$0(JtaTransactionWrapper.java:65)
at java.base/java.util.stream.ReferencePipeline$3$1.accept(ReferencePipeline.java:195)
at java.base/java.util.HashMap$ValueSpliterator.forEachRemaining(HashMap.java:1675)
at java.base/java.util.stream.AbstractPipeline.copyInto(AbstractPipeline.java:484)
at java.base/java.util.stream.AbstractPipeline.wrapAndCopyInto(AbstractPipeline.java:474)
at java.base/java.util.stream.ForEachOps$ForEachOp.evaluateSequential(ForEachOps.java:150)
at java.base/java.util.stream.ForEachOps$ForEachOp$OfRef.evaluateSequential(ForEachOps.java:173)
at java.base/java.util.stream.AbstractPipeline.evaluate(AbstractPipeline.java:234)
at java.base/java.util.stream.ReferencePipeline.forEach(ReferencePipeline.java:497)
at org.keycloak.keycloak-services#14.0.0//org.keycloak.transaction.JtaTransactionWrapper.handleException(JtaTransactionWrapper.java:67)
at org.keycloak.keycloak-services#14.0.0//org.keycloak.transaction.JtaTransactionWrapper.commit(JtaTransactionWrapper.java:92)
at org.keycloak.keycloak-services#14.0.0//org.keycloak.services.DefaultKeycloakTransactionManager.commit(DefaultKeycloakTransactionManager.java:136)
at org.keycloak.keycloak-services#14.0.0//org.keycloak.services.filters.AbstractRequestFilter.close(AbstractRequestFilter.java:64)
at org.keycloak.keycloak-services#14.0.0//org.keycloak.services.filters.AbstractRequestFilter.filter(AbstractRequestFilter.java:49)
at org.keycloak.keycloak-wildfly-extensions#14.0.0//org.keycloak.provider.wildfly.WildFlyRequestFilter.doFilter(WildFlyRequestFilter.java:39)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.core.ManagedFilter.doFilter(ManagedFilter.java:61)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.FilterHandler$FilterChainImpl.doFilter(FilterHandler.java:131)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.FilterHandler.handleRequest(FilterHandler.java:84)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.security.ServletSecurityRoleHandler.handleRequest(ServletSecurityRoleHandler.java:62)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletChain$1.handleRequest(ServletChain.java:68)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletDispatchingHandler.handleRequest(ServletDispatchingHandler.java:36)
at org.wildfly.extension.undertow#23.0.2.Final//org.wildfly.extension.undertow.security.SecurityContextAssociationHandler.handleRequest(SecurityContextAssociationHandler.java:78)
at io.undertow.core#2.2.5.Final//io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:43)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.RedirectDirHandler.handleRequest(RedirectDirHandler.java:68)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.security.SSLInformationAssociationHandler.handleRequest(SSLInformationAssociationHandler.java:117)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.security.ServletAuthenticationCallHandler.handleRequest(ServletAuthenticationCallHandler.java:57)
at io.undertow.core#2.2.5.Final//io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:43)
at io.undertow.core#2.2.5.Final//io.undertow.security.handlers.AbstractConfidentialityHandler.handleRequest(AbstractConfidentialityHandler.java:46)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.security.ServletConfidentialityConstraintHandler.handleRequest(ServletConfidentialityConstraintHandler.java:64)
at io.undertow.core#2.2.5.Final//io.undertow.security.handlers.AuthenticationMechanismsHandler.handleRequest(AuthenticationMechanismsHandler.java:60)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.security.CachedAuthenticatedSessionHandler.handleRequest(CachedAuthenticatedSessionHandler.java:77)
at io.undertow.core#2.2.5.Final//io.undertow.security.handlers.NotificationReceiverHandler.handleRequest(NotificationReceiverHandler.java:50)
at io.undertow.core#2.2.5.Final//io.undertow.security.handlers.AbstractSecurityContextAssociationHandler.handleRequest(AbstractSecurityContextAssociationHandler.java:43)
at io.undertow.core#2.2.5.Final//io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:43)
at org.wildfly.extension.undertow#23.0.2.Final//org.wildfly.extension.undertow.security.jacc.JACCContextIdHandler.handleRequest(JACCContextIdHandler.java:61)
at io.undertow.core#2.2.5.Final//io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:43)
at org.wildfly.extension.undertow#23.0.2.Final//org.wildfly.extension.undertow.deployment.GlobalRequestControllerHandler.handleRequest(GlobalRequestControllerHandler.java:68)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.SendErrorPageHandler.handleRequest(SendErrorPageHandler.java:52)
at io.undertow.core#2.2.5.Final//io.undertow.server.handlers.PredicateHandler.handleRequest(PredicateHandler.java:43)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletInitialHandler.handleFirstRequest(ServletInitialHandler.java:269)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletInitialHandler.access$100(ServletInitialHandler.java:78)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletInitialHandler$2.call(ServletInitialHandler.java:133)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletInitialHandler$2.call(ServletInitialHandler.java:130)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.core.ServletRequestContextThreadSetupAction$1.call(ServletRequestContextThreadSetupAction.java:48)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.core.ContextClassLoaderSetupAction$1.call(ContextClassLoaderSetupAction.java:43)
at org.wildfly.extension.undertow#23.0.2.Final//org.wildfly.extension.undertow.security.SecurityContextThreadSetupAction.lambda$create$0(SecurityContextThreadSetupAction.java:105)
at org.wildfly.extension.undertow#23.0.2.Final//org.wildfly.extension.undertow.deployment.UndertowDeploymentInfoService$UndertowThreadSetupAction.lambda$create$0(UndertowDeploymentInfoService.java:1530)
at org.wildfly.extension.undertow#23.0.2.Final//org.wildfly.extension.undertow.deployment.UndertowDeploymentInfoService$UndertowThreadSetupAction.lambda$create$0(UndertowDeploymentInfoService.java:1530)
at org.wildfly.extension.undertow#23.0.2.Final//org.wildfly.extension.undertow.deployment.UndertowDeploymentInfoService$UndertowThreadSetupAction.lambda$create$0(UndertowDeploymentInfoService.java:1530)
at org.wildfly.extension.undertow#23.0.2.Final//org.wildfly.extension.undertow.deployment.UndertowDeploymentInfoService$UndertowThreadSetupAction.lambda$create$0(UndertowDeploymentInfoService.java:1530)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletInitialHandler.dispatchRequest(ServletInitialHandler.java:249)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletInitialHandler.access$000(ServletInitialHandler.java:78)
at io.undertow.servlet#2.2.5.Final//io.undertow.servlet.handlers.ServletInitialHandler$1.handleRequest(ServletInitialHandler.java:99)
at io.undertow.core#2.2.5.Final//io.undertow.server.Connectors.executeRootHandler(Connectors.java:387) at io.undertow.core#2.2.5.Final//io.undertow.server.HttpServerExchange$1.run(HttpServerExchange.java:841)
at org.jboss.threads#2.4.0.Final//org.jboss.threads.ContextClassLoaderSavingRunnable.run(ContextClassLoaderSavingRunnable.java:35)
at org.jboss.threads#2.4.0.Final//org.jboss.threads.EnhancedQueueExecutor.safeRun(EnhancedQueueExecutor.java:1990)
at org.jboss.threads#2.4.0.Final//org.jboss.threads.EnhancedQueueExecutor$ThreadBody.doRunTask(EnhancedQueueExecutor.java:1486)
at org.jboss.threads#2.4.0.Final//org.jboss.threads.EnhancedQueueExecutor$ThreadBody.run(EnhancedQueueExecutor.java:1377)
at org.jboss.xnio#3.8.4.Final//org.xnio.XnioWorker$WorkerThreadFactory$1$1.run(XnioWorker.java:1280)
at java.base/java.lang.Thread.run(Thread.java:829)
Caused by: org.hibernate.exception.DataException: could not execute statement
at org.hibernate#5.3.20.Final//org.hibernate.exception.internal.SQLStateConversionDelegate.convert(SQLStateConversionDelegate.java:118)
at org.hibernate#5.3.20.Final//org.hibernate.exception.internal.StandardSQLExceptionConverter.convert(StandardSQLExceptionConverter.java:42)
at org.hibernate#5.3.20.Final//org.hibernate.engine.jdbc.spi.SqlExceptionHelper.convert(SqlExceptionHelper.java:113)
at org.hibernate#5.3.20.Final//org.hibernate.engine.jdbc.spi.SqlExceptionHelper.convert(SqlExceptionHelper.java:99)
at org.hibernate#5.3.20.Final//org.hibernate.engine.jdbc.internal.ResultSetReturnImpl.executeUpdate(ResultSetReturnImpl.java:178)
at org.hibernate#5.3.20.Final//org.hibernate.persister.entity.AbstractEntityPersister.insert(AbstractEntityPersister.java:3192)
at org.hibernate#5.3.20.Final//org.hibernate.persister.entity.AbstractEntityPersister.insert(AbstractEntityPersister.java:3706)
at org.hibernate#5.3.20.Final//org.hibernate.action.internal.EntityInsertAction.execute(EntityInsertAction.java:90)
at org.hibernate#5.3.20.Final//org.hibernate.engine.spi.ActionQueue.executeActions(ActionQueue.java:604)
at org.hibernate#5.3.20.Final//org.hibernate.engine.spi.ActionQueue.executeActions(ActionQueue.java:478)
at org.hibernate#5.3.20.Final//org.hibernate.event.internal.AbstractFlushingEventListener.performExecutions(AbstractFlushingEventListener.java:356)
at org.hibernate#5.3.20.Final//org.hibernate.event.internal.DefaultFlushEventListener.onFlush(DefaultFlushEventListener.java:39)
at org.hibernate#5.3.20.Final//org.hibernate.internal.SessionImpl.doFlush(SessionImpl.java:1472)
at org.hibernate#5.3.20.Final//org.hibernate.internal.SessionImpl.managedFlush(SessionImpl.java:512)
at org.hibernate#5.3.20.Final//org.hibernate.internal.SessionImpl.flushBeforeTransactionCompletion(SessionImpl.java:3310)
at org.hibernate#5.3.20.Final//org.hibernate.internal.SessionImpl.beforeTransactionCompletion(SessionImpl.java:2506)
at org.hibernate#5.3.20.Final//org.hibernate.engine.jdbc.internal.JdbcCoordinatorImpl.beforeTransactionCompletion(JdbcCoordinatorImpl.java:447)
at org.hibernate#5.3.20.Final//org.hibernate.resource.transaction.backend.jta.internal.JtaTransactionCoordinatorImpl.beforeCompletion(JtaTransactionCoordinatorImpl.java:352)
at org.hibernate#5.3.20.Final//org.hibernate.resource.transaction.backend.jta.internal.synchronization.SynchronizationCallbackCoordinatorNonTrackingImpl.beforeCompletion(SynchronizationCallbackCoordinatorNonTrackingImpl.java:47)
at org.hibernate#5.3.20.Final//org.hibernate.resource.transaction.backend.jta.internal.synchronization.RegisteredSynchronization.beforeCompletion(RegisteredSynchronization.java:37)
at org.wildfly.transaction.client#1.1.13.Final//org.wildfly.transaction.client.AbstractTransaction.performConsumer(AbstractTransaction.java:236)
at org.wildfly.transaction.client#1.1.13.Final//org.wildfly.transaction.client.AbstractTransaction.performConsumer(AbstractTransaction.java:247)
at org.wildfly.transaction.client#1.1.13.Final//org.wildfly.transaction.client.AbstractTransaction$AssociatingSynchronization.beforeCompletion(AbstractTransaction.java:292)
at org.jboss.jts//com.arjuna.ats.internal.jta.resources.arjunacore.SynchronizationImple.beforeCompletion(SynchronizationImple.java:76)
at org.jboss.jts//com.arjuna.ats.arjuna.coordinator.TwoPhaseCoordinator.beforeCompletion(TwoPhaseCoordinator.java:360)
at org.jboss.jts//com.arjuna.ats.arjuna.coordinator.TwoPhaseCoordinator.end(TwoPhaseCoordinator.java:91)
at org.jboss.jts//com.arjuna.ats.arjuna.AtomicAction.commit(AtomicAction.java:162)
at org.jboss.jts//com.arjuna.ats.internal.jta.transaction.arjunacore.TransactionImple.commitAndDisassociate(TransactionImple.java:1295)
at org.jboss.jts//com.arjuna.ats.internal.jta.transaction.arjunacore.BaseTransaction.commit(BaseTransaction.java:126)
at org.jboss.jts.integration//com.arjuna.ats.jbossatx.BaseTransactionManagerDelegate.commit(BaseTransactionManagerDelegate.java:94)
at org.wildfly.transaction.client#1.1.13.Final//org.wildfly.transaction.client.LocalTransaction.commitAndDissociate(LocalTransaction.java:78)
at org.wildfly.transaction.client#1.1.13.Final//org.wildfly.transaction.client.ContextTransactionManager.commit(ContextTransactionManager.java:71)
at org.keycloak.keycloak-services#14.0.0//org.keycloak.transaction.JtaTransactionWrapper.commit(JtaTransactionWrapper.java:90)
... 50 more
Caused by: org.postgresql.util.PSQLException: ERROR: value too long for type character varying(255)
at org.postgresql.jdbc#42.2.5//org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2440)
at org.postgresql.jdbc#42.2.5//org.postgresql.core.v3.QueryExecutorImpl.processResults(QueryExecutorImpl.java:2183)
at org.postgresql.jdbc#42.2.5//org.postgresql.core.v3.QueryExecutorImpl.execute(QueryExecutorImpl.java:308)
at org.postgresql.jdbc#42.2.5//org.postgresql.jdbc.PgStatement.executeInternal(PgStatement.java:441)
at org.postgresql.jdbc#42.2.5//org.postgresql.jdbc.PgStatement.execute(PgStatement.java:365)
at org.postgresql.jdbc#42.2.5//org.postgresql.jdbc.PgPreparedStatement.executeWithFlags(PgPreparedStatement.java:143)
at org.postgresql.jdbc#42.2.5//org.postgresql.jdbc.PgPreparedStatement.executeUpdate(PgPreparedStatement.java:120)
at org.jboss.ironjacamar.jdbcadapters#1.4.27.Final//org.jboss.jca.adapters.jdbc.WrappedPreparedStatement.executeUpdate(WrappedPreparedStatement.java:537)
at org.hibernate#5.3.20.Final//org.hibernate.engine.jdbc.internal.ResultSetReturnImpl.executeUpdate(ResultSetReturnImpl.java:175)
... 78 more

See your error backtrace:
Caused by: org.postgresql.util.PSQLException:
ERROR: value too long for type character varying(255)
So DB field where that payload should be saved has max size 255 characters, but your payload exceeds this size. It makes sense, because those attributes are not designated to save anything. They have limits - it this case it is determined by used DB column type - max 255 characters.

Related

Failed to deserialize data for topic to protobuf sink connector

I can consume produced protobuf message data from kafka topic using different tool like conduktor. However, when I try to poll data using JdbcSinkConnector, it throws the exception like
org.apache.kafka.common.errors.SerializationException: Error
deserializing Protobuf message
Please take a look at the following error detail when I call kafka-connect api as follows
URL : http://localhost:8083/connectors?expand=info&expand=status
JSON Response and trace
{
"sink_postgres_03_proto":{
"info":{
"name":"sink_postgres_03_proto",
"config":{
"connector.class":"io.confluent.connect.jdbc.JdbcSinkConnector",
"connection.password":"54321",
"topics":"order-messages",
"value.converter.schema.registry.url":"http://localhost:8081",
"key.converter.schemas.enable":"false",
"auto.evolve":"true",
"connection.user":"postgres",
"value.converter.schemas.enable":"true",
"name":"sink_postgres_03_proto",
"auto.create":"true",
"connection.url":"jdbc:postgresql://localhost:5432/CallHistoryService",
"value.converter":"io.confluent.connect.protobuf.ProtobufConverter",
"insert.mode":"insert",
"key.converter":"org.apache.kafka.connect.storage.StringConverter"
},
"tasks":[
{
"connector":"sink_postgres_03_proto",
"task":0
}
],
"type":"sink"
},
"status":{
"name":"sink_postgres_03_proto",
"connector":{
"state":"RUNNING",
"worker_id":"kafka-connect:8083"
},
"tasks":[
{
"id":0,
"state":"FAILED",
"worker_id":"kafka-connect:8083",
"trace":"org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:196)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:122)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:495)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:472)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:226)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:198)\n\tat org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:185)\n\tat org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:235)\n\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)\n\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:834)\nCaused by: org.apache.kafka.connect.errors.DataException: Failed to deserialize data for topic order-messages to Protobuf: \n\tat io.confluent.connect.protobuf.ProtobufConverter.toConnectData(ProtobufConverter.java:123)\n\tat org.apache.kafka.connect.storage.Converter.toConnectData(Converter.java:87)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$1(WorkerSinkTask.java:495)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:146)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:180)\n\t... 13 more\nCaused by: org.apache.kafka.common.errors.SerializationException: Error deserializing Protobuf message for id 1\nCaused by: java.net.ConnectException: Connection refused (Connection refused)\n\tat java.base/java.net.PlainSocketImpl.socketConnect(Native Method)\n\tat java.base/java.net.AbstractPlainSocketImpl.doConnect(AbstractPlainSocketImpl.java:399)\n\tat java.base/java.net.AbstractPlainSocketImpl.connectToAddress(AbstractPlainSocketImpl.java:242)\n\tat java.base/java.net.AbstractPlainSocketImpl.connect(AbstractPlainSocketImpl.java:224)\n\tat java.base/java.net.Socket.connect(Socket.java:609)\n\tat java.base/sun.net.NetworkClient.doConnect(NetworkClient.java:177)\n\tat java.base/sun.net.www.http.HttpClient.openServer(HttpClient.java:474)\n\tat java.base/sun.net.www.http.HttpClient.openServer(HttpClient.java:569)\n\tat java.base/sun.net.www.http.HttpClient.<init>(HttpClient.java:242)\n\tat java.base/sun.net.www.http.HttpClient.New(HttpClient.java:341)\n\tat java.base/sun.net.www.http.HttpClient.New(HttpClient.java:362)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getNewHttpClient(HttpURLConnection.java:1253)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.plainConnect0(HttpURLConnection.java:1187)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.plainConnect(HttpURLConnection.java:1081)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.connect(HttpURLConnection.java:1015)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getInputStream0(HttpURLConnection.java:1592)\n\tat java.base/sun.net.www.protocol.http.HttpURLConnection.getInputStream(HttpURLConnection.java:1520)\n\tat java.base/java.net.HttpURLConnection.getResponseCode(HttpURLConnection.java:527)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.sendHttpRequest(RestService.java:272)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.httpRequest(RestService.java:352)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.getId(RestService.java:660)\n\tat io.confluent.kafka.schemaregistry.client.rest.RestService.getId(RestService.java:642)\n\tat io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.getSchemaByIdFromRegistry(CachedSchemaRegistryClient.java:217)\n\tat io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.getSchemaBySubjectAndId(CachedSchemaRegistryClient.java:291)\n\tat io.confluent.kafka.schemaregistry.client.CachedSchemaRegistryClient.getSchemaById(CachedSchemaRegistryClient.java:276)\n\tat io.confluent.kafka.serializers.protobuf.AbstractKafkaProtobufDeserializer.deserialize(AbstractKafkaProtobufDeserializer.java:117)\n\tat io.confluent.kafka.serializers.protobuf.AbstractKafkaProtobufDeserializer.deserializeWithSchemaAndVersion(AbstractKafkaProtobufDeserializer.java:235)\n\tat io.confluent.connect.protobuf.ProtobufConverter$Deserializer.deserialize(ProtobufConverter.java:163)\n\tat io.confluent.connect.protobuf.ProtobufConverter.toConnectData(ProtobufConverter.java:107)\n\tat org.apache.kafka.connect.storage.Converter.toConnectData(Converter.java:87)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$1(WorkerSinkTask.java:495)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:146)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:180)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:122)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:495)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:472)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:226)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:198)\n\tat org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:185)\n\tat org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:235)\n\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)\n\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:834)\n"
}
],
"type":"sink"
}
}
}
Please advice
Thanks you!
If you take the trace node and convert the \n and \t to newlines and tabs you get a readable stack trace with shows the problem:
Caused by: org.apache.kafka.connect.errors.DataException: Failed to deserialize data for topic order-messages to Protobuf:
at io.confluent.connect.protobuf.ProtobufConverter.toConnectData(ProtobufConverter.java:123)
at org.apache.kafka.connect.storage.Converter.toConnectData(Converter.java:87)
at org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$1(WorkerSinkTask.java:495)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:146)
at org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:180)
... 13 more
Caused by: org.apache.kafka.common.errors.SerializationException: Error deserializing Protobuf message for id 1
Caused by: java.net.ConnectException: Connection refused (Connection refused)
The error is java.net.ConnectException: Connection refused and means that either (a) you've misconfigured the location of your Schema Registry or (b) the Schema Registry is not running

Debezium Postgres sink connector fails to insert values with type DATE

After setting up both source and sink connectors, I get problems with DATE type Postgres columns.
ERROR: column "foo" is of type date but expression is of type integer
I checked Avro schema and see that column foo was serialized as io.debezium.time.Date
{
"default": null,
"name": "foo",
"type": [
"null",
{
"connect.name": "io.debezium.time.Date",
"connect.version": 1,
"type": "int"
}
]
}
What should I do to let sink connector insert this values correctly (as DATE, not INTEGER)?
Full stacktrace:
org.apache.kafka.connect.errors.ConnectException: Exiting WorkerSinkTask due to unrecoverable exception.
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:560)
at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:321)
at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:224)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:192)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:177)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:227)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.kafka.connect.errors.ConnectException: java.sql.SQLException: java.sql.BatchUpdateException: Batch entry 0 INSERT INTO "test_table" ("id","foo") VALUES (75046,18577) ON CONFLICT ("id") DO UPDATE SET "foo"=EXCLUDED."foo" was aborted: ERROR: column "foo" is of type date but expression is of type integer
Hint: You will need to rewrite or cast the expression.
Position: 249 Call getNextException to see other errors in the batch.
org.postgresql.util.PSQLException: ERROR: column "foo" is of type date but expression is of type integer
Hint: You will need to rewrite or cast the expression.
Position: 249
org.postgresql.util.PSQLException: ERROR: column "foo" is of type date but expression is of type integer
Hint: You will need to rewrite or cast the expression.
Position: 249
at io.confluent.connect.jdbc.sink.JdbcSinkTask.put(JdbcSinkTask.java:89)
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:538)
... 10 more
Caused by: java.sql.SQLException: java.sql.BatchUpdateException: Batch entry 0 INSERT INTO "test_table" ("id","foo") VALUES (75046,18577) ON CONFLICT ("id") DO UPDATE SET "foo"=EXCLUDED."foo" was aborted: ERROR: column "foo" is of type date but expression is of type integer
Hint: You will need to rewrite or cast the expression.
Position: 249 Call getNextException to see other errors in the batch.
org.postgresql.util.PSQLException: ERROR: column "foo" is of type date but expression is of type integer
Hint: You will need to rewrite or cast the expression.
Position: 249
org.postgresql.util.PSQLException: ERROR: column "foo" is of type date but expression is of type integer
Hint: You will need to rewrite or cast the expression.
Position: 249
... 12 more
Source config:
{
"name": "dbz-source-test-1",
"config": {
"name":"dbz-source-test-1",
"connector.class":"io.debezium.connector.postgresql.PostgresConnector",
"database.hostname":"some.host",
"database.port":"5432",
"database.user":"test_debezium",
"database.password":"password",
"database.dbname":"dbname",
"plugin.name":"wal2json_rds",
"slot.name":"wal2json_rds",
"database.server.name":"server_test",
"table.whitelist":"public.test_table",
"transforms":"route",
"transforms.route.type":"org.apache.kafka.connect.transforms.RegexRouter",
"transforms.route.regex":"([^.]+)\\.([^.]+)\\.([^.]+)",
"transforms.route.replacement":"dbz_source_$3",
"topic.selection.strategy":"topic_per_table",
"include.unknown.datatypes":true,
"decimal.handling.mode":"double",
"snapshot.mode":"never"
}
}
Sink config:
{
"name": "dbz-sink-test-1",
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
"config.providers" : "file",
"config.providers.file.class" : "org.apache.kafka.common.config.provider.FileConfigProvider",
"config.providers.file.param.secrets" : "/opt/mysecrets",
"topics": "dbz_source_test_table",
"connection.url": "someurl",
"connection.user": "${file:/opt/mysecrets.properties:user}",
"connection.password" : "${file:/opt/mysecrets.properties:pass}",
"transforms": "unwrap",
"transforms.unwrap.type": "io.debezium.transforms.UnwrapFromEnvelope",
"table.name.format": "dbz_source_",
"insert.mode": "upsert",
"pk.field": "id",
"pk.mode": "record_value"
}
}
I fixed problem switching source connector time.precision.mode config to connect
When the time.precision.mode configuration property is set to connect, then the connector will use the predefined Kafka Connect logical types. This may be useful when consumers only know about the built-in Kafka Connect logical types and are unable to handle variable-precision time values.
After it serialization type becomes different:
{
"default": null,
"name": "foo",
"type": [
"null",
{
"connect.name": "org.apache.kafka.connect.data.Date",
"connect.version": 1,
"logicalType": "date",
"type": "int"
}
]
}
Sink connector is aware of org.apache.kafka.connect.data.Date type and inserts in correctly.

kafka-connect : Getting error in distributed configuration for connector sink cassandra

I get task error for a distributed configuration for a connector sink cassandra. I was running the command :
curl -s localhost:8083/connectors/cassandraSinkConnector2/status | jq
to get the status
{
"name": "cassandraSinkConnector2",
"connector": {
"state": "RUNNING",
"worker_id": localhost:8083"
},
"tasks": [
{
"id": 0,
"state": "FAILED",
"worker_id": "localhost:8083",
"trace": "org.apache.kafka.common.KafkaException: Failed to construct kafka consumer\n\tat org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:811)\n\tat org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624)\n\tat org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:605)\n\tat org.apache.kafka.connect.runtime.Worker.buildWorkerTask(Worker.java:505)\n\tat org.apache.kafka.connect.runtime.Worker.startTask(Worker.java:441)\n\tat org.apache.kafka.connect.runtime.distributed.DistributedHerder.startTask(DistributedHerder.java:865)\n\tat org.apache.kafka.connect.runtime.distributed.DistributedHerder.access$1600(DistributedHerder.java:110)\n\tat org.apache.kafka.connect.runtime.distributed.DistributedHerder$13.call(DistributedHerder.java:880)\n\tat org.apache.kafka.connect.runtime.distributed.DistributedHerder$13.call(DistributedHerder.java:876)\n\tat java.util.concurrent.FutureTask.run(FutureTask.java:266)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)\n\tat java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)\n\tat java.lang.Thread.run(Thread.java:748)\nCaused by: org.apache.kafka.common.KafkaException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor ClassNotFoundException exception occurred\n\tat org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:357)\n\tat org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:332)\n\tat org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:319)\n\tat org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:701)\n\t... 12 more\nCaused by: java.lang.ClassNotFoundException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor\n\tat java.net.URLClassLoader.findClass(URLClassLoader.java:382)\n\tat java.lang.ClassLoader.loadClass(ClassLoader.java:424)\n\tat org.apache.kafka.connect.runtime.isolation.PluginClassLoader.loadClass(PluginClassLoader.java:104)\n\tat java.lang.ClassLoader.loadClass(ClassLoader.java:357)\n\tat java.lang.Class.forName0(Native Method)\n\tat java.lang.Class.forName(Class.java:348)\n\tat org.apache.kafka.common.utils.Utils.loadClass(Utils.java:338)\n\tat org.apache.kafka.common.utils.Utils.newInstance(Utils.java:327)\n\tat org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:355)\n\t... 15 more\n"
}
],
"type": "sink"
Stack trace:
"trace": "org.apache.kafka.common.KafkaException: Failed to construct kafka consumer
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:811)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:624)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:605)
at org.apache.kafka.connect.runtime.Worker.buildWorkerTask(Worker.java:505)
at org.apache.kafka.connect.runtime.Worker.startTask(Worker.java:441)
at org.apache.kafka.connect.runtime.distributed.DistributedHerder.startTask(DistributedHerder.java:865)
at org.apache.kafka.connect.runtime.distributed.DistributedHerder.access$1600(DistributedHerder.java:110)
at org.apache.kafka.connect.runtime.distributed.DistributedHerder$13.call(DistributedHerder.java:880)
at org.apache.kafka.connect.runtime.distributed.DistributedHerder$13.call(DistributedHerder.java:876)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.kafka.common.KafkaException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor ClassNotFoundException exception occurred
at org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:357)
at org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:332)
at org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:319)
at org.apache.kafka.clients.consumer.KafkaConsumer.<init>(KafkaConsumer.java:701)
... 12 more
Caused by: java.lang.ClassNotFoundException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor
at java.net.URLClassLoader.findClass(URLClassLoader.java:382)
at java.lang.ClassLoader.loadClass(ClassLoader.java:424)
at org.apache.kafka.connect.runtime.isolation.PluginClassLoader.loadClass(PluginClassLoader.java:104)
at java.lang.ClassLoader.loadClass(ClassLoader.java:357)
at java.lang.Class.forName0(Native Method)
at java.lang.Class.forName(Class.java:348)
at org.apache.kafka.common.utils.Utils.loadClass(Utils.java:338)
at org.apache.kafka.common.utils.Utils.newInstance(Utils.java:327)
at org.apache.kafka.common.config.AbstractConfig.getConfiguredInstances(AbstractConfig.java:355)
... 15 more
You can find below the configuration of the connector.
{
"name": "cassandraSinkConnector2",
"config": {
"connector.class": "io.confluent.connect.cassandra.CassandraSinkConnector",
"tasks.max": "1",
"topics": "appartenance_de",
"cassandra.contact.points": "localhost",
"cassandra.kcql": "INSERT INTO app_test SELECT * FROM app_de",
"cassandra.port": "9042",
"cassandra.keyspace": "dev_dkks",
"cassandra.username": "superuser",
"cassandra.password": "password",
"cassandra.write.mode": "insert",
"value.converter.schemas.enable": "true",
"value.converter": "io.confluent.connect.avro.AvroConverter",
"value.converter.schema.registry.url": "http://localhost:8081",
"name": "cassandraSinkConnector2"
},
"tasks": [
{
"connector": "cassandraSinkConnector2",
"task": 0
}
],
"type": "sink"
}
New error:
org.apache.kafka.connect.errors.ConnectException: Exiting WorkerSinkTask due to unrecoverable exception.
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:560)
at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:321)
at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:224)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:192)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:175)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:219)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.kafka.connect.errors.DataException: Record with a null key was encountered. This connector requires that records from Kafka contain the keys for the Cassandra table. Please use a transformation like org.apache.kafka.connect.transforms.ValueToKey to create a key with the proper fields.
at io.confluent.connect.cassandra.CassandraSinkTask.put(CassandraSinkTask.java:86)
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:538)
... 10 more
"
The root error is
java.lang.ClassNotFoundException: io.confluent.monitoring.clients.interceptor.MonitoringConsumerInterceptor
The Monitoring Interceptors are part of Confluent Platform. You can either disable their use in your Kafka Connect worker config, or better, make sure that the /usr/share/java/monitoring-interceptors/monitoring-interceptors-5.2.1.jar JAR is available to your Kafka Connect worker.
The new error you're seeing is
org.apache.kafka.connect.errors.DataException:
Record with a null key was encountered. This connector requires that records from Kafka contain the keys for the Cassandra table.
Please use a transformation like org.apache.kafka.connect.transforms.ValueToKey to create a key with the proper fields.
I'd suggest using a Single Message Transform as suggested in the error to correctly key your data. You can see an example of doing this here and the documentation for the transform here.

Kafka Connect sink tasks ignore tolerance limits

I try to ignore bad messages in sink connector with errors.tolerance: all option. Full connector configuration:
{
"name": "crm_data-sink_pandora",
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSinkConnector",
"tasks.max": 6,
"topics": "crm_account_detail,crm_account_on_competitors,crm_event,crm_event_participation",
"connection.url": "jdbc:postgresql://dburl/service?prepareThreshold=0",
"connection.user": "pandora.app",
"connection.password": "*******",
"dialect.name": "PostgreSqlDatabaseDialect",
"insert.mode": "upsert",
"pk.mode": "record_value",
"pk.fields": "guid",
"table.name.format": "pandora.${topic}",
"errors.tolerance": "all",
"errors.log.enable":true,
"errors.log.include.messages":true,
"errors.deadletterqueue.topic.name":"crm_data_deadletterqueue",
"errors.deadletterqueue.context.headers.enable":true
}
}
Target table DDL:
create table crm_event_participation
(
guid char(36) not null
constraint crm_event_participation_pkey
primary key,
created_on timestamp,
created_by_guid char(36),
modified_on timestamp,
modified_by_guid char(36),
process_listeners integer,
event_guid char(36),
event_response varchar(250),
note varchar(500),
is_from_group boolean,
contact_guid char(36),
target_item integer,
account_guid char(36),
employer_id integer
);
Connector starts successfully, but it fails if error occurs (e.g. missing field).
curl -X GET http://kafka-connect:9092/connectors/crm_data-sink_pandora/status:
{
"name": "crm_data-sink_pandora",
"connector": {
"state": "RUNNING",
"worker_id": "192.168.2.254:10900"
},
"tasks": [
{
"state": "FAILED",
"trace":
"org.apache.kafka.connect.errors.ConnectException: Exiting WorkerSinkTask due to unrecoverable exception.
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:586)
at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322)
at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:225)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:193)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:175)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:219)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)\n\tat java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Caused by: org.apache.kafka.connect.errors.ConnectException: Table \"pandora\".\"crm_event_participation\" is missing fields ([SinkRecordField{schema=Schema{STRING}, name='event_id', isPrimaryKey=false}, SinkRecordField{schema=Schema{STRING}, name='event_response_guid', isPrimaryKey=false}]) and auto-evolution is disabled
at io.confluent.connect.jdbc.sink.DbStructure.amendIfNecessary(DbStructure.java:140)
at io.confluent.connect.jdbc.sink.DbStructure.createOrAmendIfNecessary(DbStructure.java:73)
at io.confluent.connect.jdbc.sink.BufferedRecords.add(BufferedRecords.java:84)
at io.confluent.connect.jdbc.sink.JdbcDbWriter.write(JdbcDbWriter.java:65)
at io.confluent.connect.jdbc.sink.JdbcSinkTask.put(JdbcSinkTask.java:73)
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:564)
... 10 more",
"id": 0,
"worker_id": "192.168.2.254:10900"
}
...
]
}
Log with exception:
[2019-03-29 16:59:30,924] INFO Unable to find fields [SinkRecordField{schema=Schema{INT32}, name='process_listners', isPrimaryKey=false}] among column names [employer_id, modified_on, modified_by_guid, contact_guid, target_item, guid, created_on, process_listeners, event_guid, created_by_guid, is_from_group, account_guid, event_response, note] (io.confluent.connect.jdbc.sink.DbStructure)
[2019-03-29 16:59:30,924] ERROR WorkerSinkTask{id=crm_data-sink_pandora-1} Task threw an uncaught and unrecoverable exception. Task is being killed and will not recover until manually restarted. (org.apache.kafka.connect.runtime.WorkerSinkTask)
org.apache.kafka.connect.errors.ConnectException: Table "pandora"."crm_event_participation" is missing fields ([SinkRecordField{schema=Schema{INT32}, name='process_listners', isPrimaryKey=false}]) and auto-evolution is disabled at io.confluent.connect.jdbc.sink.DbStructure.amendIfNecessary(DbStructure.java:140)
at io.confluent.connect.jdbc.sink.DbStructure.createOrAmendIfNecessary(DbStructure.java:73)
at io.confluent.connect.jdbc.sink.BufferedRecords.add(BufferedRecords.java:84)
at io.confluent.connect.jdbc.sink.JdbcDbWriter.write(JdbcDbWriter.java:65)
at io.confluent.connect.jdbc.sink.JdbcSinkTask.put(JdbcSinkTask.java:73)
at org.apache.kafka.connect.runtime.WorkerSinkTask.deliverMessages(WorkerSinkTask.java:564)
at org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:322)
at org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:225)
at org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:193)
at org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:175)
at org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:219)
at java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:511)
at java.util.concurrent.FutureTask.run(FutureTask.java:266)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
Please explain me what could be wrong in connector configuration? I use Kafka 2.0.0 and JdbcSinkConnector 5.1.0.
In your Kafka message you have a field process_listners. Column with that name is not present in your table.
I think you have typo. In table you have column process_listeners, not process_listners.
errors.tolerance property apply only to errors during Converting messages.
More regarding errors.tolerance you can read: kafka connect - jdbc sink sql exception

Issue with Oracle JDBC Source Connector

We have Oracle Source from there need to get data, facing error in Avro and Json format.
Connector File
{
"name": "LITERAL_VALUES",
"config": {
"connector.class": "io.confluent.connect.jdbc.JdbcSourceConnector",
"key.serializer": "io.confluent.kafka.serializers.KafkaAvroSerializer",
"value.serializer": "io.confluent.kafka.serializers.KafkaAvroSerializer",
"connection.user": "<user>",
"connection.password": "<Password>",
"tasks.max": "1",
"connection.url": "jdbc:oracle:thin:#<server>:<Port>/<Schema>",
"mode": "bulk",
"topic.prefix": "LITERAL_VALUES",
"batch.max.rows":1000,
"numeric.mapping":"best_fit",
"query":"SELECT abc from xyz"
}
}
Error while consuming with Avro format
DataException: Cannot deserialize type int64 as type float64
Error while consuming with JSON format
WARN task [0_0] Skipping record due to deserialization error. topic=[LITERAL_VALUES_JSON] partition=[0] offset=[12823] (org.apache.kafka.streams.processor.internals.RecordDeserializer:86)
org.apache.kafka.common.errors.SerializationException: KsqlJsonDeserializer failed to deserialize data for topic: LITERAL_VALUES_JSON
Caused by: java.io.CharConversionException: Invalid UTF-32 character 0xf01ae03 (above 0x0010ffff) at char #1, byte #7)
at com.fasterxml.jackson.core.io.UTF32Reader.reportInvalid(UTF32Reader.java:195)
at com.fasterxml.jackson.core.io.UTF32Reader.read(UTF32Reader.java:158)
at com.fasterxml.jackson.core.json.ReaderBasedJsonParser._loadMore(ReaderBasedJsonParser.java:243)
Tried to create connector file with "table.whitelist" property and consume with ksql
Unable to verify the AVRO schema is compatible with KSQL. Subject not found. io.confluent.rest.exceptions.RestNotFoundException: Subject not found.
io.confluent.rest.exceptions.RestNotFoundException: Subject not found.
at io.confluent.kafka.schemaregistry.rest.exceptions.Errors.subjectNotFoundException(Errors.java:50)
Checked rest schema
{
"subject": "RAW-LITERAL_VALUES-value",
"version": 1,
"id": 16,
"schema": "{\"type\":\"record\",\"name\":\"LITERAL_VALUES\",\"fields\":[{\"name\":\"LITERAL_ID\",\"type\":[\"null\",{\"type\":\"bytes\",\"scale\":127,\"precision\":64,\"connect.version\":1,\"connect.parameters\":{\"scale\":\"127\"},\"connect.name\":\"org.apache.kafka.connect.data.Decimal\",\"logicalType\":\"decimal\"}],\"default\":null},{\"name\":\"LITERAL_NAME\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"LITERAL_VALUE\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"SOURCE_SYSTEM_ID\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"SOURCE_SYSTEM_INSTANCE_ID\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"EFF_STRT_DT\",\"type\":[\"null\",{\"type\":\"long\",\"connect.version\":1,\"connect.name\":\"org.apache.kafka.connect.data.Timestamp\",\"logicalType\":\"timestamp-millis\"}],\"default\":null},{\"name\":\"EFF_END_DT\",\"type\":[\"null\",{\"type\":\"long\",\"connect.version\":1,\"connect.name\":\"org.apache.kafka.connect.data.Timestamp\",\"logicalType\":\"timestamp-millis\"}],\"default\":null},{\"name\":\"STRT_DT\",\"type\":[\"null\",{\"type\":\"long\",\"connect.version\":1,\"connect.name\":\"org.apache.kafka.connect.data.Timestamp\",\"logicalType\":\"timestamp-millis\"}],\"default\":null},{\"name\":\"END_DT\",\"type\":[\"null\",{\"type\":\"long\",\"connect.version\":1,\"connect.name\":\"org.apache.kafka.connect.data.Timestamp\",\"logicalType\":\"timestamp-millis\"}],\"default\":null},{\"name\":\"CRTD_BY\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"CRTD_DT\",\"type\":[\"null\",{\"type\":\"long\",\"connect.version\":1,\"connect.name\":\"org.apache.kafka.connect.data.Timestamp\",\"logicalType\":\"timestamp-millis\"}],\"default\":null},{\"name\":\"LST_UPD_BY\",\"type\":[\"null\",\"string\"],\"default\":null},{\"name\":\"LST_UPD_DT\",\"type\":[\"null\",{\"type\":\"long\",\"connect.version\":1,\"connect.name\":\"org.apache.kafka.connect.data.Timestamp\",\"logicalType\":\"timestamp-millis\"}],\"default\":null}],\"connect.name\":\"LITERAL_VALUES\"}"
}
Any help is highly appreciated.