I am running 3 servers and 1 client topology.
1 of the servers node is being run by kafka connector process.
client is not able to send any message to that kafka connector ignite node.
this is the exception
SEVERE: Failed to read message [msg=GridIoMessage [plc=0, topic=null, topicOrd=-1, ordered=false, timeout=0, skipOnTimeout=false, msg=null], buf=java.nio.DirectByteBuffer[pos=4 lim=251 cap=32768], reader=DirectMessageReader [state=DirectMessageState [pos=0, stack=[StateItem [stream=DirectByteBufferStreamImplV2 [baseOff=140323292482432, arrOff=-1, tmpArrOff=0, valReadBytes=0, tmpArrBytes=0, msgTypeDone=false, msg=null, mapIt=null, it=null, arrPos=-1, keyDone=false, readSize=-1, readItems=0, prim=0, primShift=0, uuidState=0, uuidMost=0, uuidLeast=0, uuidLocId=0], state=0], null, null, null, null, null, null, null, null, null]], protoVer=3, lastRead=false], ses=GridSelectorNioSessionImpl [worker=DirectNioClientWorker [super=AbstractNioClientWorker [idx=3, bytesRcvd=251, bytesSent=0, bytesRcvd0=251, bytesSent0=0, select=true, super=GridWorker [name=grid-nio-worker-tcp-comm-3, igniteInstanceName=null, finished=false, heartbeatTs=1604581761046, hashCode=1782557810, interrupted=false, runner=grid-nio-worker-tcp-comm-3-#139]]], writeBuf=java.nio.DirectByteBuffer[pos=0 lim=32768 cap=32768], readBuf=java.nio.DirectByteBuffer[pos=4 lim=251 cap=32768], inRecovery=GridNioRecoveryDescriptor [acked=9, resendCnt=0, rcvCnt=7, sentCnt=9, reserved=true, lastAck=0, nodeLeft=false, node=TcpDiscoveryNode [id=3f63a8d7-8964-4a4b-89c1-124d8eaba14a, consistentId=3f63a8d7-8964-4a4b-89c1-124d8eaba14a, addrs=ArrayList [127.0.0.1, 172.20.50.222], sockAddrs=HashSet [/127.0.0.1:0, /172.20.50.222:0], discPort=0, order=4, intOrder=4, lastExchangeTime=1604581749120, loc=false, ver=8.7.10#20191227-sha1:c481441d, isClient=true], connected=false, connectCnt=1, queueLimit=4096, reserveCnt=2, pairedConnections=false], outRecovery=GridNioRecoveryDescriptor [acked=9, resendCnt=0, rcvCnt=7, sentCnt=9, reserved=true, lastAck=0, nodeLeft=false, node=TcpDiscoveryNode [id=3f63a8d7-8964-4a4b-89c1-124d8eaba14a, consistentId=3f63a8d7-8964-4a4b-89c1-124d8eaba14a, addrs=ArrayList [127.0.0.1, 172.20.50.222], sockAddrs=HashSet [/127.0.0.1:0, /172.20.50.222:0], discPort=0, order=4, intOrder=4, lastExchangeTime=1604581749120, loc=false, ver=8.7.10#20191227-sha1:c481441d, isClient=true], connected=false, connectCnt=1, queueLimit=4096, reserveCnt=2, pairedConnections=false], outboundMessagesQueueSizeMetric=o.a.i.i.processors.metric.impl.AtomicLongMetric#69a257d1, super=GridNioSessionImpl [locAddr=/172.20.52.38:54412, rmtAddr=/172.20.50.222:47100, createTime=1604581761046, closeTime=0, bytesSent=0, bytesRcvd=251, bytesSent0=0, bytesRcvd0=251, sndSchedTime=1604581761046, lastSndTime=1604581761046, lastRcvTime=1604581761046, readsPaused=false, filterChain=FilterChain[filters=[GridNioTracerFilter [tracer=GridProcessorAdapter []], GridNioCodecFilter [parser=o.a.i.i.util.nio.GridDirectParser#1766eecd, directMode=true], GridConnectionBytesVerifyFilter], accepted=false, markedForClose=false]]]
class org.apache.ignite.IgniteException: Invalid message type: -33
at org.apache.ignite.internal.managers.communication.GridIoMessageFactory.create(GridIoMessageFactory.java:1106)
at org.apache.ignite.spi.communication.tcp.TcpCommunicationSpi$5.create(TcpCommunicationSpi.java:2407)
at org.apache.ignite.internal.direct.stream.v2.DirectByteBufferStreamImplV2.readMessage(DirectByteBufferStreamImplV2.java:1175)
at org.apache.ignite.internal.direct.DirectMessageReader.readMessage(DirectMessageReader.java:335)
at org.apache.ignite.internal.managers.communication.GridIoMessage.readFrom(GridIoMessage.java:270)
at org.apache.ignite.internal.util.nio.GridDirectParser.decode(GridDirectParser.java:89)
at org.apache.ignite.internal.util.nio.GridNioCodecFilter.onMessageReceived(GridNioCodecFilter.java:112)
at org.apache.ignite.internal.util.nio.GridNioFilterAdapter.proceedMessageReceived(GridNioFilterAdapter.java:108)
at org.apache.ignite.internal.util.nio.GridConnectionBytesVerifyFilter.onMessageReceived(GridConnectionBytesVerifyFilter.java:87)
at org.apache.ignite.internal.util.nio.GridNioFilterAdapter.proceedMessageReceived(GridNioFilterAdapter.java:108)
at org.apache.ignite.internal.util.nio.GridNioServer$HeadFilter.onMessageReceived(GridNioServer.java:3681)
at org.apache.ignite.internal.util.nio.GridNioFilterChain.onMessageReceived(GridNioFilterChain.java:174)
at org.apache.ignite.internal.util.nio.GridNioServer$DirectNioClientWorker.processRead(GridNioServer.java:1360)
at org.apache.ignite.internal.util.nio.GridNioServer$AbstractNioClientWorker.processSelectedKeysOptimized(GridNioServer.java:2472)
at org.apache.ignite.internal.util.nio.GridNioServer$AbstractNioClientWorker.bodyInternal(GridNioServer.java:2239)
at org.apache.ignite.internal.util.nio.GridNioServer$AbstractNioClientWorker.body(GridNioServer.java:1880)
at org.apache.ignite.internal.util.worker.GridWorker.run(GridWorker.java:119)
at java.lang.Thread.run(Thread.java:748)```
Direct type -33 is GridH2QueryRequest.
Are you sure that your Kafka connector node has ignite-indexing in its classpath? Try adding it explicitly.
Related
I'm trying to create an external access about confluent-kafka from an AKS cluster. I've been able to connect with control center with an Ingress but i can't create the external access from kafka.
I added this to spec of kafka:
listeners:
external:
externalAccess:
type: loadBalancer
loadBalancer:
domain: lb.example.it
advertisedPort: 39093
and it creates two load-balancer services. Then i added the external IPs on my etc/hosts file:
20.31.10.27 kafka.lb.example.it
20.31.9.167 b0.lb.example.it
But when i ceate a nodejs producer i don't know what to put into broker:
const { Kafka } = require('kafkajs')
const kafka = new Kafka({
clientId: 'my-app',
brokers: ['kafka.lb.example.it:39093']
})
const producer = kafka.producer()
console.log("produced")
const asyncOperation = async () => {
console.log("connecting")
await producer.connect()
console.log("connected")
let i = 0
try{
while(true){
await producer.send({
topic: 'topic_prova3',
messages: [
{
key: JSON.stringify("hello"),
value: JSON.stringify({"NUM":i}),
},
]
})
await producer.send({
topic: 'topic_prova3',
messages: [
{
value: JSON.stringify({"NUMERO":i.toString()}),
},
]
})
console.log("sended")
i++
await new Promise(resolve => setTimeout(resolve, 5000));
}
}
catch(err){
console.error("error: " + err)
}
await producer.disconnect()
}
asyncOperation();
This is the log of the error:
{"level":"ERROR","timestamp":"2023-01-22T08:43:20.761Z","logger":"kafkajs","message":"[Connection] Connection error: connect ECONNREFUSED 127.0.0.2:9092","broker":"kafka-0.kafka.ckafka.svc.cluster.local:9092","clientId":"my-app","stack":"Error: connect ECONNREFUSED 127.0.0.2:9092\n at TCPConnectWrap.afterConnect [as oncomplete] (node:net:1495:16)"}
The broker should be kafka.ckafka.svc.cluster.local:9092 , instead of kafka-0.kafka.ckafka.svc.cluster.local:9092 but it does it automatically
The load balancer appears to be working (it's not throwing unknown host exception), and it returned a cluster local broker address. See above comment that explains why this happens within Kubernetes with Kafka.
You need to modify, or use, the appropriate advertised.listeners port that corresponds to the external LoadBalancer.
from an AKS cluster.
You should use Azure services to create a public DNS route rather than specifically target one IP of your cluster for any given service in /etc/hosts; especially when you'd deploy multiple replicas of the Kafka pod.
I am configuring PyFlink to connect to a confluent cloud kafka cluster. I am using SASL/PLAIN. Below is the code snippet:
""" CREATE TABLE {0} (
`transaction_amt` BIGINT NOT NULL,
`event_id` VARCHAR(64) NOT NULL,
`event_time` TIMESTAMP(6) NOT NULL
)
WITH (
'connector' = 'kafka',
'topic' = '{1}',
'properties.bootstrap.servers' = '{2}',
'properties.group.id' = 'testGroupTFI',
'format' = 'json',
'json.timestamp-format.standard' = 'ISO-8601',
'properties.security.protocol' = 'SASL_SSL',
'properties.sasl.mechanism' = 'PLAIN',
'properties.sasl.jaas.config' = 'org.apache.kafka.common.security.plain.PlainLoginModule required username=\"{3}\" password=\"{4}\";'
) """.format(table_name, stream_name, broker, user, secret)
I am getting this error:
{
"applicationARN": "arn:aws:kinesisanalytics:us-east-2:xxxxxxxxxxx:application/sentiment",
"applicationVersionId": "13",
"locationInformation": "org.apache.flink.runtime.taskmanager.Task.transitionState(Task.java:973)",
"logger": "org.apache.flink.runtime.taskmanager.Task",
"message": "Source: TableSourceScan(table=[[default_catalog, default_database, input_table]], fields=[transaction_amt, event_id, event_time]) -> Sink: Sink(table=[default_catalog.default_database.output_table_msk], fields=[transaction_amt, event_id, event_time]) (3/12) (25a905455865731943be6aa60927a49c) switched from RUNNING to FAILED.",
"messageSchemaVersion": "1",
"messageType": "WARN",
"threadName": "Source: TableSourceScan(table=[[default_catalog, default_database, input_table]], fields=[transaction_amt, event_id, event_time]) -> Sink: Sink(table=[default_catalog.default_database.output_table_msk], fields=[transaction_amt, event_id, event_time]) (3/12)",
"throwableInformation": "org.apache.flink.kafka.shaded.org.apache.kafka.common.KafkaException: Failed to construct kafka producer\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:432)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:298)\n\tat org.apache.flink.streaming.connectors.kafka.internal.FlinkKafkaInternalProducer.<init>(FlinkKafkaInternalProducer.java:78)\n\tat org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.createProducer(FlinkKafkaProducer.java:1141)\n\tat org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.initProducer(FlinkKafkaProducer.java:1242)\n\tat org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.initNonTransactionalProducer(FlinkKafkaProducer.java:1238)\n\tat org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.beginTransaction(FlinkKafkaProducer.java:940)\n\tat org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.beginTransaction(FlinkKafkaProducer.java:99)\n\tat org.apache.flink.streaming.api.functions.sink.TwoPhaseCommitSinkFunction.beginTransactionInternal(TwoPhaseCommitSinkFunction.java:398)\n\tat org.apache.flink.streaming.api.functions.sink.TwoPhaseCommitSinkFunction.initializeState(TwoPhaseCommitSinkFunction.java:389)\n\tat org.apache.flink.streaming.connectors.kafka.FlinkKafkaProducer.initializeState(FlinkKafkaProducer.java:1111)\n\tat org.apache.flink.streaming.util.functions.StreamingFunctionUtils.tryRestoreFunction(StreamingFunctionUtils.java:185)\n\tat org.apache.flink.streaming.util.functions.StreamingFunctionUtils.restoreFunctionState(StreamingFunctionUtils.java:167)\n\tat org.apache.flink.streaming.api.operators.AbstractUdfStreamOperator.initializeState(AbstractUdfStreamOperator.java:96)\n\tat org.apache.flink.streaming.api.operators.StreamOperatorStateHandler.initializeOperatorState(StreamOperatorStateHandler.java:106)\n\tat org.apache.flink.streaming.api.operators.AbstractStreamOperator.initializeState(AbstractStreamOperator.java:258)\n\tat org.apache.flink.streaming.runtime.tasks.OperatorChain.initializeStateAndOpenOperators(OperatorChain.java:290)\n\tat org.apache.flink.streaming.runtime.tasks.StreamTask.lambda$beforeInvoke$0(StreamTask.java:474)\n\tat org.apache.flink.streaming.runtime.tasks.StreamTaskActionExecutor$SynchronizedStreamTaskActionExecutor.runThrowing(StreamTaskActionExecutor.java:92)\n\tat org.apache.flink.streaming.runtime.tasks.StreamTask.beforeInvoke(StreamTask.java:470)\n\tat org.apache.flink.streaming.runtime.tasks.StreamTask.invoke(StreamTask.java:529)\n\tat org.apache.flink.runtime.taskmanager.Task.doRun(Task.java:724)\n\tat org.apache.flink.runtime.taskmanager.Task.run(Task.java:549)\n\tat java.base/java.lang.Thread.run(Thread.java:829)\nCaused by: org.apache.flink.kafka.shaded.org.apache.kafka.common.KafkaException: javax.security.auth.login.LoginException: No LoginModule found for org.apache.kafka.common.security.plain.PlainLoginModule\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.common.network.SaslChannelBuilder.configure(SaslChannelBuilder.java:158)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.common.network.ChannelBuilders.create(ChannelBuilders.java:146)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.common.network.ChannelBuilders.clientChannelBuilder(ChannelBuilders.java:67)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.clients.ClientUtils.createChannelBuilder(ClientUtils.java:99)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.clients.producer.KafkaProducer.newSender(KafkaProducer.java:450)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.clients.producer.KafkaProducer.<init>(KafkaProducer.java:421)\n\t... 23 more\nCaused by: javax.security.auth.login.LoginException: No LoginModule found for org.apache.kafka.common.security.plain.PlainLoginModule\n\tat java.base/javax.security.auth.login.LoginContext.invoke(LoginContext.java:731)\n\tat java.base/javax.security.auth.login.LoginContext$4.run(LoginContext.java:672)\n\tat java.base/javax.security.auth.login.LoginContext$4.run(LoginContext.java:670)\n\tat java.base/java.security.AccessController.doPrivileged(Native Method)\n\tat java.base/javax.security.auth.login.LoginContext.invokePriv(LoginContext.java:670)\n\tat java.base/javax.security.auth.login.LoginContext.login(LoginContext.java:581)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.common.security.authenticator.AbstractLogin.login(AbstractLogin.java:60)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.common.security.authenticator.LoginManager.<init>(LoginManager.java:62)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.common.security.authenticator.LoginManager.acquireLoginManager(LoginManager.java:105)\n\tat org.apache.flink.kafka.shaded.org.apache.kafka.common.network.SaslChannelBuilder.configure(SaslChannelBuilder.java:147)\n\t... 28 more\n"
}
I feel that SASL is not supported by PyFlink SQL Connector for 1.11 or 1.13. Is this is correct? Is there a workaround I can work on?
I've seen many questions about this problem, but in pretty much all those cases the solution was to whitelist the IP address. Even after doing so, I'm still receiving the error.
MongoServerSelectionError: connection <monitor> to xx.xx.xxx.xxx:27017 closed
Here is my connection string with credentials changed:
mongodb://username:password#shard.mongodb.net:27017,shard.mongodb.net:27017,shard.mongodb.net:27017/admin?ssl=true&replicaSet=atlas-shard0&readPreference=primary&connectTimeoutMS=10000&authSource=admin&authMechanism=SCRAM-SHA-1
I also enabled all IP addresses to my database, turned off my firewall and still receiving the error on connecting.
Additionally, it appears I'm getting a more specific error before the initial one:
reason: TopologyDescription {
type: 'ReplicaSetNoPrimary',
setName: null,
maxSetVersion: null,
maxElectionId: null,
servers: Map(3) {
'xxx.mongodb.net:27017' => [ServerDescription],
'xxx.mongodb.net:27017' => [ServerDescription],
'xxx.mongodb.net:27017' => [ServerDescription]
},
stale: false,
compatible: true,
compatibilityError: null,
logicalSessionTimeoutMinutes: null,
heartbeatFrequencyMS: 10000,
localThresholdMS: 15,
commonWireVersion: null
I am able to connect via Studio3T, but not through nodejs
I am facing issue while writing to mongodb using mongo kafka sink connector.I am using mongodb of v5.0.3 and Strimzi kafka of v2.8.0. I have added p1/mongo-kafka-connect-1.7.0-all.jar and p2/mongodb-driver-core-4.5.0.jar in connect cluster plugins path.Created connector using below
{
"name": "mongo-sink",
"config": {
"topics": "sinktest2",
"connector.class": "com.mongodb.kafka.connect.MongoSinkConnector",
"tasks.max": "1",
"connection.uri": "mongodb://mm-0.mongoservice.st.svc.cluster.local:27017,mm-1.mongoservice.st.svc.cluster.local:27017",
"database": "sinkdb",
"collection": "sinkcoll",
"mongo.errors.tolerance": "all",
"mongo.errors.log.enable": true,
"errors.log.include.messages": true,
"errors.deadletterqueue.topic.name": "sinktest2.deadletter",
"errors.deadletterqueue.context.headers.enable": true
}
}
root#ubuntuserver-0:/persistent# curl http://localhost:8083/connectors/mongo-sink/status
{"name":"mongo-sink","connector":{"state":"RUNNING","worker_id":"localhost:8083"},"tasks":[{"id":0,"state":"RUNNING","worker_id":"localhost:8083"}],"type":"sink"}
When I check the status after creating connector it is showing running, but when I start sending records to kafka topic connector is running into issues.connector status is showing as below.
root#ubuntuserver-0:/persistent# curl http://localhost:8083/connectors/mongo-sink/status
{
"name":"mongo-sink",
"connector":{
"state":"RUNNING",
"worker_id":"localhost:8083"
},
"tasks":[
{
"id":0,
"state":"FAILED",
"worker_id":"localhost:8083",
"trace":"org.apache.kafka.connect.errors.ConnectException: Tolerance exceeded in error handler\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:206)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:132)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:496)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:473)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:328)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:232)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:201)\n\tat org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:182)\n\tat org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:231)\n\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)\n\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:829)\nCaused by: org.apache.kafka.connect.errors.DataException: Converting byte[] to Kafka Connect data failed due to serialization error: \n\tat org.apache.kafka.connect.json.JsonConverter.toConnectData(JsonConverter.java:324)\n\tat org.apache.kafka.connect.storage.Converter.toConnectData(Converter.java:87)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertValue(WorkerSinkTask.java:540)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$2(WorkerSinkTask.java:496)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:156)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:190)\n\t... 13 more\nCaused by: org.apache.kafka.common.errors.SerializationException: com.fasterxml.jackson.core.io.JsonEOFException: Unexpected end-of-input: expected close marker for Object (start marker at [Source: (byte[])\"{ \"; line: 1, column: 1])\n at [Source: (byte[])\"{ \"; line: 1, column: 4]\nCaused by: com.fasterxml.jackson.core.io.JsonEOFException: Unexpected end-of-input: expected close marker for Object (start marker at [Source: (byte[])\"{ \"; line: 1, column: 1])\n at [Source: (byte[])\"{ \"; line: 1, column: 4]\n\tat com.fasterxml.jackson.core.base.ParserMinimalBase._reportInvalidEOF(ParserMinimalBase.java:664)\n\tat com.fasterxml.jackson.core.base.ParserBase._handleEOF(ParserBase.java:486)\n\tat com.fasterxml.jackson.core.base.ParserBase._eofAsNextChar(ParserBase.java:498)\n\tat com.fasterxml.jackson.core.json.UTF8StreamJsonParser._skipWSOrEnd2(UTF8StreamJsonParser.java:3033)\n\tat com.fasterxml.jackson.core.json.UTF8StreamJsonParser._skipWSOrEnd(UTF8StreamJsonParser.java:3003)\n\tat com.fasterxml.jackson.core.json.UTF8StreamJsonParser.nextFieldName(UTF8StreamJsonParser.java:989)\n\tat com.fasterxml.jackson.databind.deser.std.BaseNodeDeserializer.deserializeObject(JsonNodeDeserializer.java:249)\n\tat com.fasterxml.jackson.databind.deser.std.JsonNodeDeserializer.deserialize(JsonNodeDeserializer.java:68)\n\tat com.fasterxml.jackson.databind.deser.std.JsonNodeDeserializer.deserialize(JsonNodeDeserializer.java:15)\n\tat com.fasterxml.jackson.databind.ObjectMapper._readTreeAndClose(ObjectMapper.java:4270)\n\tat com.fasterxml.jackson.databind.ObjectMapper.readTree(ObjectMapper.java:2734)\n\tat org.apache.kafka.connect.json.JsonDeserializer.deserialize(JsonDeserializer.java:64)\n\tat org.apache.kafka.connect.json.JsonConverter.toConnectData(JsonConverter.java:322)\n\tat org.apache.kafka.connect.storage.Converter.toConnectData(Converter.java:87)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertValue(WorkerSinkTask.java:540)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.lambda$convertAndTransformRecord$2(WorkerSinkTask.java:496)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndRetry(RetryWithToleranceOperator.java:156)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execAndHandleError(RetryWithToleranceOperator.java:190)\n\tat org.apache.kafka.connect.runtime.errors.RetryWithToleranceOperator.execute(RetryWithToleranceOperator.java:132)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertAndTransformRecord(WorkerSinkTask.java:496)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.convertMessages(WorkerSinkTask.java:473)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.poll(WorkerSinkTask.java:328)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.iteration(WorkerSinkTask.java:232)\n\tat org.apache.kafka.connect.runtime.WorkerSinkTask.execute(WorkerSinkTask.java:201)\n\tat org.apache.kafka.connect.runtime.WorkerTask.doRun(WorkerTask.java:182)\n\tat org.apache.kafka.connect.runtime.WorkerTask.run(WorkerTask.java:231)\n\tat java.base/java.util.concurrent.Executors$RunnableAdapter.call(Executors.java:515)\n\tat java.base/java.util.concurrent.FutureTask.run(FutureTask.java:264)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1128)\n\tat java.base/java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:628)\n\tat java.base/java.lang.Thread.run(Thread.java:829)\n"
}
],
"type":"sink"
}
I am writing sample json record to kafka topic.
./kafka-console-producer.sh --topic sinktest2 --bootstrap-server sample-kafka-kafka-bootstrap:9093 --producer.config /persistent/client.txt < /persistent/emp.json
emp.json is below file
{
"employee": {
"name": "abc",
"salary": 56000,
"married": true
}
}
I don't see any logs in connector pod and no databse and collection being created in mongodb.
Please help to resolve this issue. Thank you !!
I think you are missing some configuration parameters like converter, and schema.
Update your config to add following:
"key.converter":"org.apache.kafka.connect.json.JsonConverter",
"value.converter":"org.apache.kafka.connect.json.JsonConverter",
"key.converter.schemas.enable": "false",
"value.converter.schemas.enable": "false",
If you are using KafkaConnect on kubernetes, you may create the sink connector as shown below. Create a file with name like mongo-sink-connector.yaml
apiVersion: kafka.strimzi.io/v1beta2
kind: KafkaConnector
metadata:
name: mongodb-sink-connector
labels:
strimzi.io/cluster: my-connect-cluster
spec:
class: com.mongodb.kafka.connect.MongoSinkConnector
tasksMax: 2
config:
connection.uri: "mongodb://root:password#mongodb-0.mongodb-headless.default.svc.cluster.local:27017"
database: test
collection: sink
topics: sink-topic
key.converter: org.apache.kafka.connect.json.JsonConverter
value.converter: org.apache.kafka.connect.json.JsonConverter
key.converter.schemas.enable: false
value.converter.schemas.enable: false
Execute the command:
$ kubectl apply -f mongo-sink-connector.yaml
you should see the output:
kafkaconnector.kafka.strimzi.io/mongo-apps-sink-connector created
Before starting the producer, check the status of connector and verify the topic has created as follows:
Status:
[kafka#my-connect-cluster-connect-5d47fb574-69xpv kafka]$ curl http://localhost:8083/connectors/mongodb-sink-connector/status
{"name":"mongodb-sink-connector","connector":{"state":"RUNNING","worker_id":"IP-ADDRESS:8083"},"tasks":[{"id":0,"state":"RUNNING","worker_id":"IP-ADDRESS:8083"},{"id":1,"state":"RUNNING","worker_id":"IP-ADDRESS:8083"}],"type":"sink"}
[kafka#my-connect-cluster-connect-5d47fb574-69xpv kafka]$
Check topic creation, you will see sink-topic
[kafka#my-connect-cluster-connect-5d47fb574-69xpv kafka]$ bin/kafka-topics.sh --bootstrap-server my-cluster-kafka-bootstrap:9092 --list
__consumer_offsets
__strimzi-topic-operator-kstreams-topic-store-changelog
__strimzi_store_topic
connect-cluster-configs
connect-cluster-offsets
connect-cluster-status
sink-topic
Now, go on kafka server to execute the producer
[kafka#my-cluster-kafka-0 kafka]$ bin/kafka-console-producer.sh --broker-list my-cluster-kafka-bootstrap:9092 --topic sink-topic
Successful execution will show you a prompt like > to enter/test the data
>{"employee": {"name": "abc", "salary": 56000, "married": true}}
>
On anther terminal, connect to kafka server and start consumer to verify the data
[kafka#my-cluster-kafka-0 kafka]$ bin/kafka-console-consumer.sh --bootstrap-server localhost:9092 --topic sink-topic --from-beginning
{"employee": {"name": "abc", "salary": 56000, "married": true}}
If you see this data, means everything is working fine. Now let us check on mongodb. Connect with your mongodb server and check
rs0:PRIMARY> use test
switched to db test
rs0:PRIMARY> show collections
sink
rs0:PRIMARY> db.sink.find()
{ "_id" : ObjectId("6234a4a0dad1a2638f57a6b2"), "employee" : { "name" : "abc", "salary" : NumberLong(56000), "married" : true } }
et Voila!
You're hitting a serialization exception. I'll break the message out a bit:
com.fasterxml.jackson.core.io.JsonEOFException: Unexpected end-of-input:
expected close marker for Object (start marker at [Source: (byte[])"{ "; line: 1, column: 1])
at [Source: (byte[])"{ "; line: 1, column: 4]
Caused by: com.fasterxml.jackson.core.io.JsonEOFException:
Unexpected end-of-input: expected close marker for Object (start marker at [Source: (byte[])"{ "; line: 1, column: 1])
at [Source: (byte[])"{ "; line: 1, column: 4]
"expected close marker for Object" suggests to me that the parser is expecting to see the entire JSON object as one line, rather than pretty-printed.
{"employee": {"name": "abc", "salary": 56000, "married": true}}
I am following https://github.com/kaiwaehner/kafka-connect-iot-mqtt-connector-example for connecting Mosquitto and Kafka with MQTT source connector. I am getting the data sent by the Mosquitto Publisher into the Mosquitto Subscriber and the Kafka Consumer. But the key and value field in my ConsumerRecord object of kafka-consumer is having some prepended byte characters.
Below are the code snippets and the outputs I'm getting.
mqttPublisher.py
while v3 < 3:
data3 = {
"time": str(datetime.datetime.now().time()),
"val": v3
}
client.publish("sensor/dist", json.dumps(data3), qos=2)
v3 += 1
time.sleep(2)
mqttSubscriber.py
def on_message_print(client, userdata, message):
print(message.topic,message.payload)
subscribe.callback(on_message_print, "sensor/#", hostname="localhost")
kafkaConsumer.py
consumer = KafkaConsumer('mqtt.',
bootstrap_servers=['localhost:9092'])
for message in consumer:
print(message)
Output:mqttSubscriber.py
sensor/dist b'{"time": "12:44:30.817462", "val": 0}'
sensor/dist b'{"time": "12:44:32.820040", "val": 1}'
sensor/dist b'{"time": "12:44:34.822657", "val": 2}'
Output : kafkaConsumer.py
ConsumerRecord(topic='mqtt.', partition=0, offset=225, timestamp=1545117270870, timestamp_type=0, key=b'\x00\x00\x00\x00\x01\x16sensor/dist', value=b'\x00\x00\x00\x00\x02J{"time": "12:44:30.817462", "val": 0}', headers=[('mqtt.message.id', b'0'), ('mqtt.qos', b'0'), ('mqtt.retained', b'false'), ('mqtt.duplicate', b'false')], checksum=None, serialized_key_size=17, serialized_value_size=43, serialized_header_size=62)
ConsumerRecord(topic='mqtt.', partition=0, offset=226, timestamp=1545117272821, timestamp_type=0, key=b'\x00\x00\x00\x00\x01\x16sensor/dist', value=b'\x00\x00\x00\x00\x02J{"time": "12:44:32.820040", "val": 1}', headers=[('mqtt.message.id', b'0'), ('mqtt.qos', b'0'), ('mqtt.retained', b'false'), ('mqtt.duplicate', b'false')], checksum=None, serialized_key_size=17, serialized_value_size=43, serialized_header_size=62)
ConsumerRecord(topic='mqtt.', partition=0, offset=227, timestamp=1545117274824, timestamp_type=0, key=b'\x00\x00\x00\x00\x01\x16sensor/dist', value=b'\x00\x00\x00\x00\x02J{"time": "12:44:34.822657", "val": 2}', headers=[('mqtt.message.id', b'0'), ('mqtt.qos', b'0'), ('mqtt.retained', b'false'), ('mqtt.duplicate', b'false')], checksum=None, serialized_key_size=17, serialized_value_size=43, serialized_header_size=62)
What is causing the above prepending of extra bytes in the Kafka Consumer?
Thanks in advance.
As part of the demo, you're starting a Schema Registry
Start Kafka Connect and dependencies (Kafka, Zookeeper, Schema Registry):
confluent start connect
If you look at the first 5 bytes, you'll see they start with 0, then four more bytes representing an integer.
See the Schema Registry Wire Format and try doing a curl localhost:8081/subjects to see if it lists your topic name for mqtt-key and mqtt-value.
If you didn't want Avro, you would need to configure and edit your Kafka Connect property file to use different Converters, and not use confluent start other than getting Kafka and Zookeeper running
Or if you want Python to deserialize the Avro, you can refer to the confluent-kafka-python repo on Github