ActorTestKit the shutdown method does not work properly - scala

I have the following test:
final class DetectorSpec extends BddSpec {
private val sap = Container.sap()
private val kafka = Container.kafka()
sap.start()
kafka.start()
override def afterAll(): Unit = {
sap.stop()
kafka.stop()
}
private def withKafkaAndSapOnline(testCode: TestProbe[ServerEvent] => Unit)
: Unit = {
val config = ConfigFactory.parseString(
s"""akka.actor.default-dispatcher = {
type = akka.testkit.CallingThreadDispatcherConfigurator
}
akka.actor.testkit.typed.single-expect-default = 0s
akka.loglevel = DEBUG
kafka {
servers = "${kafka.getBootstrapServers}"
zookeeper = "${kafka.getMappedPort(2181)}"
}
sap {
server = "ws://${sap.getContainerIpAddress}:${sap.getMappedPort(8080)}"
}""")
val testKit = ActorTestKit("DetectorSystem1", config)
testKit.spawn(DetectorSupervisor.create(), "DetectorSupervisor")
val inbox = testKit.createTestProbe[ServerEvent]("Receiver")
testKit.system.receptionist ! Receptionist.Register(ServerStateKey, inbox.ref)
testCode(inbox)
testKit.shutdownTestKit()
}
private def withKafkaAndSapOffline(testCode: (TestProbe[ServerEvent], TestProbe[ServerEvent]) => Unit)
: Unit = {
val config = ConfigFactory.parseString(
s"""akka.actor.default-dispatcher = {
type = akka.testkit.CallingThreadDispatcherConfigurator
}
akka.actor.testkit.typed.single-expect-default = 0s
akka.loglevel = DEBUG
kafka {
servers = "PLAINTEXT://localhost:9092"
zookeeper = "2181"
}
sap {
server = "ws://127.0.0.1"
}""")
val testKit = ActorTestKit("DetectorSystem2", config)
testKit.spawn(DetectorSupervisor.create(), "DetectorSupervisor")
val inbox1 = testKit.createTestProbe[ServerEvent]("Receiver1")
val inbox2 = testKit.createTestProbe[ServerEvent]("Receiver2")
testKit.system.receptionist ! Receptionist.Register(ServerStateKey, inbox1.ref)
testKit.system.receptionist ! Receptionist.Register(ServerStateKey, inbox2.ref)
testCode(inbox1, inbox2)
testKit.shutdownTestKit()
}
private def withKafkaOfflineSapOnline(testCode: TestProbe[ServerEvent] => Unit)
: Unit = {
val config = ConfigFactory.parseString(
s"""akka.actor.default-dispatcher = {
type = akka.testkit.CallingThreadDispatcherConfigurator
}
akka.actor.testkit.typed.single-expect-default = 0s
akka.loglevel = DEBUG
kafka {
servers = "PLAINTEXT://localhost:9092"
zookeeper = "2181"
}
sap {
server = "ws://${sap.getContainerIpAddress}:${sap.getMappedPort(8080)}"
}""")
val testKit = ActorTestKit("DetectorSystem3", config)
val inbox = testKit.createTestProbe[ServerEvent]("Receiver")
testKit.spawn(DetectorSupervisor.create(), "DetectorSupervisor")
testKit.system.receptionist ! Receptionist.Register(ServerStateKey, inbox.ref)
testCode(inbox)
testKit.shutdownTestKit()
}
private def withKafkaOnlineSapOffline(testCode: TestProbe[ServerEvent] => Unit)
: Unit = {
val config = ConfigFactory.parseString(
s"""akka.actor.default-dispatcher = {
type = akka.testkit.CallingThreadDispatcherConfigurator
}
akka.actor.testkit.typed.single-expect-default = 0s
akka.loglevel = DEBUG
kafka {
servers = "${kafka.getBootstrapServers}"
zookeeper = "${kafka.getMappedPort(2181)}"
}
sap {
server = "ws://127.0.0.1:8080"
}""")
val testKit = ActorTestKit("DetectorSystem4", config)
testKit.spawn(DetectorSupervisor.create(), "DetectorSupervisor")
val inbox = testKit.createTestProbe[ServerEvent]("Receiver")
testKit.system.receptionist ! Receptionist.Register(ServerStateKey, inbox.ref)
testCode(inbox)
testKit.shutdownTestKit()
}
feature("Detect Kafka and SAP availability") {
info("As a technical user, I want to be notified in real time, if Kafka and SAP is up and running or not.")
scenario("SAP and Kafka are available") {
withKafkaAndSapOnline { inbox =>
Given("I am waiting for the current state message")
When("I am receive the state message")
Then("it should contain `SAP and Kafka are online`")
inbox.fishForMessage(5.second){
case ServerOfflineApproved =>
FishingOutcomes.continue
case ServerOnlineApproved =>
FishingOutcomes.complete
case _ =>
FishingOutcomes.fail("Unexpected message")
}
}
}
scenario("SAP is online and Kafka is offline") {
withKafkaOfflineSapOnline { inbox =>
Given("I am waiting for the current state message")
When("I am receive the state message")
Then("it should contain `Kafka is offline`")
inbox.fishForMessage(5.second){
case ServerOfflineApproved =>
FishingOutcomes.complete
case _ =>
FishingOutcomes.fail("Unexpected message")
}
}
}
scenario("SAP is offline and Kafka is online") {
withKafkaOnlineSapOffline { inbox =>
Given("I am waiting for the current state message")
When("I am receive the state message")
Then("it should contain `SAP is offline`")
inbox.fishForMessage(5.second){
case ServerOfflineApproved =>
FishingOutcomes.complete
case _ =>
FishingOutcomes.fail("Unexpected message")
}
}
}
scenario("SAP and Kafka are offline") {
withKafkaAndSapOffline { (inbox1, inbox2) =>
Given("I am registering two listeners")
When("I am receive the state message")
Then("it should contain `Kafka and SAP are offline`")
inbox1.fishForMessage(5.second){
case ServerOfflineApproved =>
FishingOutcomes.complete
case _ =>
FishingOutcomes.fail("Unexpected message")
}
inbox2.fishForMessage(5.second){
case ServerOfflineApproved =>
FishingOutcomes.complete
case _ =>
FishingOutcomes.fail("Unexpected message")
}
}
}
}
}
As you can see after every test, it will shutdown the ActorTestKit. But sometimes it freezes, that means, I've got the following message:
[DEBUG] [07/19/2019 20:37:57.788] [DetectorSystem3-akka.actor.default-blocking-io-dispatcher-11] [akka://DetectorSystem3/system/IO-TCP/selectors/$a/0] Attempting connection to [localhost/127.0.0.1:32846]
[DEBUG] [07/19/2019 20:37:57.789] [DetectorSystem3-akka.io.pinned-dispatcher-2] [akka://DetectorSystem3/system/IO-TCP/selectors/$a/0] Connection established to [localhost:32846]
[INFO] [07/19/2019 20:37:58.807] [ScalaTest-run-running-DetectorSpec] [akka://DetectorSystem3/user/DetectorSupervisor/KafkaActor/KafkaStreamer] !!!!!!!!!!!!!!!!!!!!! Shutdown KafkaDetectorActor !!!!!!!!!!!!!!!!!!!!!
[INFO] [07/19/2019 20:37:58.810] [ScalaTest-run-running-DetectorSpec] [akka://DetectorSystem3/user/DetectorSupervisor/SapActor/SapStreamer] !!!!!!!!!!!!!!!!!!!!! Shutdown SapDetectorActor !!!!!!!!!!!!!!!!!!!!!
and the ActorTestKit never shuts down.
Here is the evidence:
It is just running and running...
How to stop it?

Related

Akka committableOffset store in DB

I am beginner in akka and I have an problem statement to work with. I have an akka flow that's reading Kafka Events from some topic and doing some transformation before creating Commitable offset of the message.
I am not sure the best way to add a akka sink on top of this code to store the transformed events in some DB
def eventTransform : Flow[KafkaMessage,CommittableRecord[Either[Throwable,SomeEvent]],NotUsed]
def processEvents
: Flow[KafkaMessage, ConsumerMessage.CommittableOffset, NotUsed] =
Flow[KafkaMessage]
.via(eventTransform)
.filter({ x =>
x.value match {
case Right(event: SomeEvent) =>
event.status != "running"
case Left(_) => false
}
})
.map(_.message.committableOffset)
This is my akka source calling the akka flow
private val consumerSettings: ConsumerSettings[String, String] = ConsumerSettings(
system,
new StringDeserializer,
new StringDeserializer,
)
.withGroupId(groupId)
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
private val committerSettings: CommitterSettings = CommitterSettings(system)
private val control = new AtomicReference[Consumer.Control](Consumer.NoopControl)
private val restartableSource = RestartSource
.withBackoff(restartSettings) { () =>
Consumer
.committableSource(consumerSettings, Subscriptions.topics(topicIn))
.mapMaterializedValue(control.set)
.via(processEvents) // calling the flow here
}
restartableSource
.toMat(Committer.sink(committerSettings))(Keep.both)
.run()
def api(): Behavior[Message] =
Behaviors.receive[Message] { (_, message) =>
message match {
case Stop =>
context.pipeToSelf(control.get().shutdown())(_ => Stopped)
Behaviors.same
case Stopped =>
Behaviors.stopped
}
}
.receiveSignal {
case (_, _ #(PostStop | PreRestart)) =>
control.get().shutdown()
Behaviors.same
}
}

How to load configuration for every test scenario?

I am using akka and would like to test my actors. The test looks as follows:
import akka.testkit._
import com.sweetsoft._
import org.scalatest.Assertion
import com.typesafe.config.{Config, ConfigFactory}
import org.testcontainers.containers.KafkaContainer
import concurrent.duration._
final class DetectorSpec extends BddSpec {
private val sapMock = new SapMock()
.withExposedPorts(8080)
private val kafkaMock = new KafkaContainer()
private val config: String => String => Config = kafka => sap =>
ConfigFactory.parseString(
s"""
|kafka {
| servers = "$kafka"
|}
|
|sap {
| server = "ws://${sap}"
|}
|
""")
private val listener1 = TestProbe()
private val listener2 = TestProbe()
private val detector = system.actorOf(DetectorSupervisor.props)
after {
}
override def beforeAll(): Unit = {
}
override def afterAll(): Unit = {
TestKit.shutdownActorSystem(system)
}
feature("Detect Kafka and SAP availability") {
info("As a technical user, I want to be notified in real time, if Kafka and SAP is up and running or not.")
scenario("SAP and Kafka are offline") {
Given("I am registering two listeners")
detector ! AddNewListener(listener1.ref)
detector ! AddNewListener(listener2.ref)
When("I am receive the state message")
val res1 = listener1.expectMsgPF[Assertion](2.second) _
val res2 = listener2.expectMsgPF[Assertion](2.second) _
Then("it should contain `Kafka and SAP are offline`")
res1 {
case status: ServerStatus =>
status.health should be(ServersOffline)
}
res2 {
case status: ServerStatus =>
status.health should be(ServersOffline)
}
}
scenario("SAP is online and Kafka is offline") {
sapMock.start()
Given("I am waiting for the current state message")
detector ! AddNewListener(listener1.ref)
When("I am receive the state message")
val res1 = listener1.expectMsgPF[Assertion](2.second) _
Then("it should contain `Kafka is offline`")
res1 {
case status: ServerStatus =>
sapMock.stop()
status.health should be(ServersOffline)
}
}
scenario("SAP is offline and Kafka is online") {
Given("I am waiting for the current state message")
When("I am receive the state message")
Then("it should contain `SAP is offline`")
cancel()
}
scenario("SAP and Kafka are available") {
Given("I am waiting for the current state message")
When("I am receive the state message")
Then("it should contain `SAP and Kafka are online`")
cancel()
}
}
}
As you can see, I am using testcontainer to build the test environment.
I would like to start the container only on particular scenario and on the scenario, I have would like inject the configuration.
For example, the scenario scenario("SAP and Kafka are offline") has different configuration than scenario("SAP is online and Kafka is offline").
The question is, how to load different configuration on different scenario?
On the akka website, it shows how to load a configuration as follows:
import akka.actor.ActorSystem
import com.typesafe.config.ConfigFactory
val customConf = ConfigFactory.parseString("""
akka.actor.deployment {
/my-service {
router = round-robin-pool
nr-of-instances = 3
}
}
""")
// ConfigFactory.load sandwiches customConfig between default reference
// config and default overrides, and then resolves it.
val system = ActorSystem("MySystem", ConfigFactory.load(customConf))
Why do I have to do it in this way, because the container port is only available, when the container is started and I do not want to start the container on every scenario.
The BddSpec class:
abstract class BddSpec extends TestKit(ActorSystem("PluggerSystem"))
with AsyncFeatureSpecLike
with Matchers
with GivenWhenThen
with BeforeAndAfter
with BeforeAndAfterAll

Websocket client does not receive data from Akka streams Source.queue

I am using Akka stream Source.queue as Source for websocket clients.
Reading from kafka topic with 10k records using kaka consumer API and offering it to Source.queue with buffer 100k.
I am using BroardcastHub for fan-out. The websocket client does not get any data but see records from kafka enqueued on offer result.
Appreciate any help.
def kafkaSourceQueue() = {
val sourceQueue = Source.queue[String](100000, OverflowStrategy.dropHead)
val (theQueue, queueSource)= sourceQueue.toMat(BroadcastHub.sink(bufferSize = 256))(Keep.both).run
val consumer = KafkaEventSource.initKafkaConsumer()
try {
while (true) {
val records = consumer.poll(polltimeout.toMillis)
for (record <- records.records(topic)) {
//println(record.value())
theQueue.offer(record.value()).onComplete{
case Success(QueueOfferResult.Enqueued) =>
println("enqueued")
case _ => println("Failed to enqueue")
}
}
}
}
finally {
if (consumer != null) {
println("consumer unsubscribed")
consumer.unsubscribe()
}
}
queueSource
}
private def logicStreamFlow: Flow[String, String, NotUsed] = {
Flow.fromSinkAndSourceCoupled(Sink.ignore, kafkaSourceQueue)
}
def websocketFlow: Flow[Message, Message, NotUsed] = {
Flow[Message]
.map{
case TextMessage.Strict(msg) => msg
case _ => throw new Exception("exception msg")
}
.via(logicStreamFlow)
.map { msg: String => TextMessage.Strict(msg) }
}
lazy private val streamRoute =
path("stream") {
handleWebSocketMessages {
websocketFlow
.watchTermination() { (_, done) =>
done.onComplete {
case Success(_) =>
log.info("Stream route completed successfully")
case Failure(ex) =>
log.error(s"Stream route completed with failure : $ex")
}
}
}
}
def startServer(): Unit = {
bindingFuture = Http().bindAndHandle(wsRoutes, HOST, PORT)
log.info(s"Server online at http://localhost:9000/")
}
def stopServer(): Unit = {
bindingFuture
.flatMap(_.unbind())
.onComplete{
_ => system.terminate()
log.info("terminated")
}
}

Akka-http first websocket client only receives the data once from a kafka topic

I am using akka-http websocket to push messages from a kafka topic to websocket clients.
For this purpose, i created a plain kafka consumer (using akka-streams-kafka connector) with offset set to "earliest" so that every new websocket client connecting gets all the data from the beginning.
The problem is that the first connected websocket client gets all the data and other ws clients (connecting after the first client has got all the data) do not get any. The kafka topic has 1million records.
I am using the BroadcastHub from Akka-streams.
Appreciate any suggestions.
lazy private val kafkaPlainSource: Source[String, NotUsed] = {
val consumerSettings = ConsumerSettings(system, new StringDeserializer, new StringDeserializer)
.withBootstrapServers(KAFKA_BROKERS)
.withGroupId(UUID.randomUUID().toString)
.withProperty(ConsumerConfig.AUTO_OFFSET_RESET_CONFIG, "earliest")
val kafkaSource = Consumer.plainSource(consumerSettings, Subscriptions.topics(KAFKA_TOPIC))
.mapAsync(PARALLELISM) { cr =>
Future {
cr.value
}
}
kafkaSource.toMat(BroadcastHub.sink)(Keep.right).run
}
def logicFlow: Flow[String, String, NotUsed] =
Flow.fromSinkAndSourceCoupled(Sink.ignore, kafkaSource)
val websocketFlow: Flow[Message, Message, Any] = {
Flow[Message]
.map {
case TextMessage.Strict(msg) => msg
case _ => println("ignore streamed message")
}
.via(logicFlow)
.map { msg: String => TextMessage.Strict(msg) }
}
lazy private val streamRoute =
path("stream") {
handleWebSocketMessages {
websocketFlow
.watchTermination() { (_, done) =>
done.onComplete {
case Success(_) =>
log.info("Stream route completed successfully")
case Failure(ex) =>
log.error(s"Stream route completed with failure : $ex")
}
}
}
}
def startServer(): Unit = {
bindingFuture = Http().bindAndHandle(wsRoutes, HOST, PORT)
log.info(s"Server online at http://localhost:9000/")
}
def stopServer(): Unit = {
bindingFuture
.flatMap(_.unbind())
.onComplete{
_ => system.terminate()
log.info("terminated")
}
}

Apache Kafka: KafkaProducerActor throws exception ASk timeout.

I am using cake solution Akka client for scala and Kafka. While I am creating a KafkaProducerActor actor and trying to send message using ask pattern and return future and perform some operations, but every time, I am facing ask timeout exception. Below is my code:
class SimpleAkkaProducer (config: Config, system: ActorSystem) {
private val producerConf = KafkaProducer.
Conf(config,
keySerializer = new StringSerializer,
valueSerializer = new StringSerializer)
val actorRef = system.actorOf(KafkaProducerActor.props(producerConf))
def sendMessageWayOne(record: ProducerRecords[String, String]) = {
actorRef ! record
}
def sendMessageWayTwo(record: ProducerRecords[String, String]) = {
implicit val timeout = Timeout(100.seconds)
val future = (actorRef ? record).mapTo[String]
future onComplete {
case Success(data) => println(s" >>>>>>>>>>>> ${data}")
case Failure(ex) => ex.printStackTrace()
}
}
}
object SimpleAkkaProducer {
def main(args: Array[String]): Unit = {
val system = ActorSystem("KafkaProducerActor")
val config = ConfigFactory.defaultApplication()
val simpleAkkaProducer = new SimpleAkkaProducer(config, system)
val topic = config.getString("akka.topic")
val messageOne = ProducerRecords.fromKeyValues[String, String](topic,
Seq((Some("Topics"), "First Message")), None, None)
simpleAkkaProducer.sendMessageWayOne(messageOne)
simpleAkkaProducer.sendMessageWayTwo(messageOne)
}
}
Following is exception :
akka.pattern.AskTimeoutException: Ask timed out on [Actor[akka://KafkaProducerActor/user/$a#-1520717141]] after [100000 ms]. Sender[null] sent message of type "cakesolutions.kafka.akka.ProducerRecords".
at akka.pattern.PromiseActorRef$.$anonfun$apply$1(AskSupport.scala:604)
at akka.actor.Scheduler$$anon$4.run(Scheduler.scala:126)
at scala.concurrent.Future$InternalCallbackExecutor$.unbatchedExecute(Future.scala:864)
at scala.concurrent.BatchingExecutor.execute(BatchingExecutor.scala:109)
at scala.concurrent.BatchingExecutor.execute$(BatchingExecutor.scala:103)
at scala.concurrent.Future$InternalCallbackExecutor$.execute(Future.scala:862)
at akka.actor.LightArrayRevolverScheduler$TaskHolder.executeTask(LightArrayRevolverScheduler.scala:329)
at akka.actor.LightArrayRevolverScheduler$$anon$4.executeBucket$1(LightArrayRevolverScheduler.scala:280)
at akka.actor.LightArrayRevolverScheduler$$anon$4.nextTick(LightArrayRevolverScheduler.scala:284)
at akka.actor.LightArrayRevolverScheduler$$anon$4.run(LightArrayRevolverScheduler.scala:236)
at java.lang.Thread.run(Thread.java:745)
The producer actor only responds to the sender, if you specify the successResponse and failureResponse values in the ProducerRecords to be something other than None. The successResponse value is sent back to the sender when the Kafka write succeeds, and failureResponse value is sent back when the Kafka write fails.
Example:
val record = ProducerRecords.fromKeyValues[String, String](
topic = topic,
keyValues = Seq((Some("Topics"), "First Message")),
successResponse = Some("success"),
failureResponse = Some("failure")
)
val future = (actorRef ? record).mapTo[String]
future onComplete {
case Success("success") => println("Send succeeded!")
case Success("failure") => println("Send failed!")
case Success(data) => println(s"Send result: $data")
case Failure(ex) => ex.printStackTrace()
}