Materialising a graph within an actor - scala

I am trying to materialise a graph within an actor. This seems to work if either of the following are true:
The graph does not contain a broadcast (created with alsoTo), or
The same ActorMaterializer is used for each materialisation, or
The graph is materialised outside of an Actor
I have reduced it down to the following test cases:
import java.util.concurrent.{CountDownLatch, TimeUnit}
import akka.NotUsed
import akka.actor.{Actor, ActorSystem}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{RunnableGraph, Sink, Source}
import akka.testkit.{TestActorRef, TestKit}
import org.scalatest.{FlatSpecLike, Matchers}
class ActorFlowTest extends TestKit(ActorSystem("ActorFlowTest")) with Matchers with FlatSpecLike {
def createGraph(withBroadcast: Boolean) = {
if (withBroadcast) Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
else Source.empty.to(Sink.ignore)
}
case object Bomb
class FlowActor(
graph: RunnableGraph[NotUsed],
latch: CountDownLatch,
materializer: (ActorSystem) => ActorMaterializer
) extends Actor {
override def preStart(): Unit = {
graph.run()(materializer(context.system))
latch.countDown()
}
override def receive: Receive = {
case Bomb => throw new RuntimeException
}
}
"Without an actor" should "be able to materialize twice" in {
val graph = Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
val materializer1 = ActorMaterializer()(system)
val materializer2 = ActorMaterializer()(system)
graph.run()(materializer1)
graph.run()(materializer2) // Pass
}
"With a the same materializer" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = true)
val latch = new CountDownLatch(2)
val materializer = ActorMaterializer()(system)
val actorRef = TestActorRef(new FlowActor(graph, latch, _ => materializer))
verify(actorRef, latch) should be(true) // Pass
}
"With a new materializer but no broadcast" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = false)
val latch = new CountDownLatch(2)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef = TestActorRef(new FlowActor(graph, latch, materializer))
verify(actorRef, latch) should be(true) // Pass
}
"With a new materializer and a broadcast" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = true)
val latch = new CountDownLatch(2)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef = TestActorRef(new FlowActor(graph, latch, materializer))
verify(actorRef, latch) should be(true) // Fail
}
def verify(actorRef: TestActorRef[_], latch: CountDownLatch): Boolean = {
actorRef.start()
actorRef ! Bomb
latch.await(25, TimeUnit.SECONDS)
}
}
It seems that the last cases will always timeout with the following error in the log:
[ERROR] [07/05/2016 16:06:30.625] [ActorFlowTest-akka.actor.default-dispatcher-6] [akka://ActorFlowTest/user/$$c] Futures timed out after [20000 milliseconds]
akka.actor.PostRestartException: akka://ActorFlowTest/user/$$c: exception post restart (class java.lang.RuntimeException)
at akka.actor.dungeon.FaultHandling$$anonfun$6.apply(FaultHandling.scala:250)
at akka.actor.dungeon.FaultHandling$$anonfun$6.apply(FaultHandling.scala:248)
at akka.actor.dungeon.FaultHandling$$anonfun$handleNonFatalOrInterruptedException$1.applyOrElse(FaultHandling.scala:303)
at akka.actor.dungeon.FaultHandling$$anonfun$handleNonFatalOrInterruptedException$1.applyOrElse(FaultHandling.scala:298)
at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:36)
at akka.actor.dungeon.FaultHandling$class.finishRecreate(FaultHandling.scala:248)
at akka.actor.dungeon.FaultHandling$class.faultRecreate(FaultHandling.scala:76)
at akka.actor.ActorCell.faultRecreate(ActorCell.scala:374)
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:464)
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:483)
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:282)
at akka.testkit.CallingThreadDispatcher.process$1(CallingThreadDispatcher.scala:243)
at akka.testkit.CallingThreadDispatcher.runQueue(CallingThreadDispatcher.scala:283)
at akka.testkit.CallingThreadDispatcher.systemDispatch(CallingThreadDispatcher.scala:191)
at akka.actor.dungeon.Dispatch$class.restart(Dispatch.scala:119)
at akka.actor.ActorCell.restart(ActorCell.scala:374)
at akka.actor.LocalActorRef.restart(ActorRef.scala:406)
at akka.actor.SupervisorStrategy.restartChild(FaultHandling.scala:365)
at akka.actor.OneForOneStrategy.processFailure(FaultHandling.scala:518)
at akka.actor.SupervisorStrategy.handleFailure(FaultHandling.scala:303)
at akka.actor.dungeon.FaultHandling$class.handleFailure(FaultHandling.scala:263)
at akka.actor.ActorCell.handleFailure(ActorCell.scala:374)
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:459)
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:483)
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:282)
at akka.dispatch.Mailbox.run(Mailbox.scala:223)
at akka.dispatch.Mailbox.exec(Mailbox.scala:234)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
Caused by: java.util.concurrent.TimeoutException: Futures timed out after [20000 milliseconds]
at scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:219)
at scala.concurrent.impl.Promise$DefaultPromise.result(Promise.scala:223)
at scala.concurrent.Await$$anonfun$result$1.apply(package.scala:190)
at akka.dispatch.MonitorableThreadFactory$AkkaForkJoinWorkerThread$$anon$3.block(ThreadPoolBuilder.scala:167)
at scala.concurrent.forkjoin.ForkJoinPool.managedBlock(ForkJoinPool.java:3640)
at akka.dispatch.MonitorableThreadFactory$AkkaForkJoinWorkerThread.blockOn(ThreadPoolBuilder.scala:165)
at scala.concurrent.Await$.result(package.scala:190)
at akka.stream.impl.ActorMaterializerImpl.actorOf(ActorMaterializerImpl.scala:207)
at akka.stream.impl.ActorMaterializerImpl$$anon$2.matGraph(ActorMaterializerImpl.scala:166)
at akka.stream.impl.ActorMaterializerImpl$$anon$2.materializeAtomic(ActorMaterializerImpl.scala:150)
at akka.stream.impl.MaterializerSession$$anonfun$materializeModule$1.apply(StreamLayout.scala:919)
at akka.stream.impl.MaterializerSession$$anonfun$materializeModule$1.apply(StreamLayout.scala:915)
at scala.collection.immutable.Set$Set1.foreach(Set.scala:94)
at akka.stream.impl.MaterializerSession.materializeModule(StreamLayout.scala:915)
at akka.stream.impl.MaterializerSession$$anonfun$materializeModule$1.apply(StreamLayout.scala:922)
at akka.stream.impl.MaterializerSession$$anonfun$materializeModule$1.apply(StreamLayout.scala:915)
at scala.collection.immutable.Set$Set4.foreach(Set.scala:200)
at akka.stream.impl.MaterializerSession.materializeModule(StreamLayout.scala:915)
at akka.stream.impl.MaterializerSession.materialize(StreamLayout.scala:882)
at akka.stream.impl.ActorMaterializerImpl.materialize(ActorMaterializerImpl.scala:182)
at akka.stream.impl.ActorMaterializerImpl.materialize(ActorMaterializerImpl.scala:80)
at akka.stream.scaladsl.RunnableGraph.run(Flow.scala:351)
at ActorFlowTest$FlowActor.preStart(ActorFlowTest.scala:40)
at akka.actor.Actor$class.postRestart(Actor.scala:566)
at ActorFlowTest$FlowActor.postRestart(ActorFlowTest.scala:33)
at akka.actor.Actor$class.aroundPostRestart(Actor.scala:504)
at ActorFlowTest$FlowActor.aroundPostRestart(ActorFlowTest.scala:33)
at akka.actor.dungeon.FaultHandling$class.finishRecreate(FaultHandling.scala:239)
... 25 more
I have tried explicitly terminating the ActorMaterializers but that doesn't reproduce the problem.
A workaround is to create a closure around the ActorMaterializer in the Props but if this also came from another Actor I'm worried I will eventually get similar problems.
Any idea why this is? Obviously it is something to do with the ActorMaterializer but interesting how removing the Broadcast also solves it (even with a much more complicated graph).

This seems to be related to (or at least solved through proper) supervision. I created an extra Supervisor-Actor which for demonstration purposes just starts a single FlowActor in its preStart function and forwards the Bomb messages to it. The following tests execute successfully without any timeout exception:
import java.util.concurrent.{CountDownLatch, TimeUnit}
import akka.NotUsed
import akka.actor.Actor.Receive
import akka.actor.SupervisorStrategy._
import akka.actor.{Actor, ActorRef, ActorSystem, OneForOneStrategy, Props}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{RunnableGraph, Sink, Source}
import akka.testkit.{TestActorRef, TestKit}
import org.scalatest.{FlatSpecLike, Matchers}
import scala.concurrent.duration._
class ActorFlowTest extends TestKit(ActorSystem("TrikloSystem")) with Matchers with FlatSpecLike {
def createGraph(withBroadcast: Boolean) = {
if (withBroadcast) Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
else Source.empty.to(Sink.ignore)
}
case object Bomb
class Supervisor( graph: RunnableGraph[NotUsed],
latch: CountDownLatch,
materializer: (ActorSystem) => ActorMaterializer) extends Actor {
var actorRef: Option[ActorRef] = None
override def preStart(): Unit = {
actorRef = Some(context.actorOf(Props( new FlowActor(graph, latch, materializer))))
}
override def receive: Receive = {
case Bomb => actorRef.map( _ ! Bomb )
}
}
class FlowActor(
graph: RunnableGraph[NotUsed],
latch: CountDownLatch,
materializer: (ActorSystem) => ActorMaterializer
) extends Actor {
override def preStart(): Unit = {
graph.run()(materializer(context.system))
latch.countDown()
}
override def receive: Receive = {
case Bomb =>
throw new RuntimeException
}
}
"Without an actor" should "be able to materialize twice" in {
val graph = Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
val materializer1 = ActorMaterializer()(system)
val materializer2 = ActorMaterializer()(system)
graph.run()(materializer1)
graph.run()(materializer2) // Pass
}
"With a the same materializer" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = true)
val latch = new CountDownLatch(2)
val materializer = ActorMaterializer()(system)
val actorRef = TestActorRef(new Supervisor(graph, latch, _ => materializer))
verify(actorRef, latch) should be(true) // Pass
}
"With a new materializer but no broadcast" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = false)
val latch = new CountDownLatch(2)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef = TestActorRef(new Supervisor(graph, latch, materializer))
verify(actorRef, latch) should be(true) // Pass
}
"With a new materializer and a broadcast" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = true)
val latch = new CountDownLatch(2)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef = TestActorRef(new Supervisor(graph, latch, materializer))
verify(actorRef, latch) should be(true) // Fail
}
def verify(actorRef: TestActorRef[_], latch: CountDownLatch): Boolean = {
actorRef.start()
actorRef ! Bomb
latch.await(25, TimeUnit.SECONDS)
}
}

There are some mis-uses of the Akka TestKit in this test.
TestActorRef is a very special test construct in that it will execute on the calling thread (CallingThreadDispatcher), to allow for easy synchronous unit testing. Using CountDownLatch in a synchronous test is weird since any action is on the same thread so there is no need for inter-thread communication.
When you create an instance of TestActorRef it is started in that same call (you can see this by for example throwing an exception from the constructor or preStart and see it end up in your test case).
Calling start on the ActorRef is definitely not something you should do, TestActorRefs special nature gives you access to it, but you are essentially calling start on an empty shell actor, and not the actor you think you are interacting with (and if it was that actor it would stil be wrong to ever call start() on it).
A proper (but not very useful since there is no a problem materializing a graph twice regardless of context or materializer) test of what you intend to repeat test would be without the latch and look something like this:
class FlowActor(graph: RunnableGraph[NotUsed], materializer: (ActorSystem) => ActorMaterializer) extends Actor {
override def preStart(): Unit = {
graph.run()(materializer(context.system))
}
override def receive: Receive = Actor.emptyBehavior
}
"With a new materializer and a broadcast" should "be able to materialize twice" in {
val graph = Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef1 = TestActorRef(new FlowActor(graph, materializer))
val actorRef2 = TestActorRef(new FlowActor(graph, materializer))
// we'd get an exception here if it was not possible to materialize
// since pre-start is run on the calling thread - the same thread
// that is executing the test case
}
I'd just let the specific weirdnesses of this go instead of digging deeper into the magic in TestActorRef, it will be hard earned insights and they will not be applicable in many cases but this specific one.

Related

Hits more than total requests

So I started learning Scala and Akka actors, Akka-Http. I tried to implement a simple hits counter using Akka Http which counts every hit on the localhost page. I used wrk tool to run 10 threads with 100 connections, after which there is a mismatch between the count and the total requests(Seen on wrk).
This is my code :
object WebServer3 {
var number: Int = 0
final case class Inc()
class ActorClass extends Actor with ActorLogging {
def receive = {
case Inc => number = number + 1
}
}
def main(args: Array[String]) {
implicit val system = ActorSystem("my-system")
implicit val materializer = ActorMaterializer()
implicit val executionContext = system.dispatcher
val actor1 = system.actorOf(Props[ActorClass], "SimpleActor")
val route =
path("Counter") {
get {
actor1 ! Inc
complete(HttpEntity(ContentTypes.`text/html(UTF-8)`, s"<h1>You visited $number times</h1>"))
}
}
val bindingFuture = Http().bindAndHandle(route, "localhost", 8080)
println(s"Server online at http://localhost:8080/\nPress RETURN to stop...")
StdIn.readLine() // let it run until user presses return
bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
.onComplete(_ => system.terminate()) // and shutdown when done
}
}
Pardon my immature/amateurish coding skills. I am still learning and I know this has to do with concurrency. But I cannot find a solution yet. Please help.
edit#1 : I tried AtomicInteger too. That did not help.
edit#2 : I tried the complete akka-http way with ask and await too. that did not help either.
There are few problems with your code.
You're defining a case class final case class Inc() but you're sending a companion object actor1 ! Inc. Though, you still match companion object case Inc => and your code works. But it's should not be done this way.
Other problem, we're storing and modifying and retrieving var number: Int = 0 outside of actor boundaries. And I think this is why you have miscounts. Actor must change only internal state.
I modified your code by introducing ask pattern so a value can be retrieved from within an actor.
import akka.actor.{Actor, ActorLogging, ActorSystem, Props}
import akka.http.scaladsl.Http
import akka.http.scaladsl.model.{ContentTypes, HttpEntity}
import akka.http.scaladsl.server.Directives._
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.util.Timeout
import scala.concurrent.duration._
import scala.io.StdIn
object WebServer3 {
final case object IncAndGet //not a case class anymore
class ActorClass extends Actor with ActorLogging {
private var number: Int = 0 //inner state must be private and not accessible from outside of an actor
def receive = {
case IncAndGet =>
number += 1
context.sender() ! number // send current value back to sender
}
}
def main(args: Array[String]) {
implicit val system = ActorSystem("my-system")
implicit val materializer = ActorMaterializer()
implicit val executionContext = system.dispatcher
implicit val timeout: Timeout = 2.seconds
val actor1 = system.actorOf(Props[ActorClass], "SimpleActor")
val route =
path("counter") {
get {
onComplete((actor1 ? IncAndGet).mapTo[Int]) { number =>
complete(
HttpEntity(ContentTypes.`text/html(UTF-8)`,
s"<h1>You visited $number times</h1>"))
}
}
}
val bindingFuture = Http().bindAndHandle(route, "localhost", 8080)
println(s"Server online at http://localhost:8080/\nPress RETURN to stop...")
StdIn.readLine() // let it run until user presses return
val _ = bindingFuture
.flatMap(_.unbind()) // trigger unbinding from the port
}
}

akka stream integrating akka-htpp web request call into stream

Getting started with Akka Streams I want to perform a simple computation. Extending the basic QuickStart https://doc.akka.io/docs/akka/2.5/stream/stream-quickstart.html with a call to a restful web api:
val source: Source[Int, NotUsed] = Source(1 to 100)
source.runForeach(println)
already works nicely to print the numbers. But when trying to create an Actor to perform the HTTP request (is this actually necessary?) according to https://doc.akka.io/docs/akka/2.5.5/scala/stream/stream-integrations.html
import akka.pattern.ask
implicit val askTimeout = Timeout(5.seconds)
val words: Source[String, NotUsed] =
Source(List("hello", "hi"))
words
.mapAsync(parallelism = 5)(elem => (ref ? elem).mapTo[String])
// continue processing of the replies from the actor
.map(_.toLowerCase)
.runWith(Sink.ignore)
I cannot get it to compile as the ? operator is not defined. As ar as I know this one would only be defined inside an actor.
I also do not understand yet where exactly inside mapAsync my custom actor needs to be called.
edit
https://blog.colinbreck.com/backoff-and-retry-error-handling-for-akka-streams/ contains at least parts of an example.
It looks like it is not mandatory to create an actor i.e.
implicit val system = ActorSystem()
implicit val ec = system.dispatcher
implicit val materializer = ActorMaterializer()
val source = Source(List("232::03::14062::19965186", "232::03::14062::19965189"))
.map(cellKey => {
val splits = cellKey.split("::")
val mcc = splits(0)
val mnc = splits(1)
val lac = splits(2)
val ci = splits(3)
CellKeySource(cellKey, mcc, mnc, lac, ci)
})
.limit(2)
.mapAsyncUnordered(2)(ck => getResponse(ck.cellKey, ck.mobileCountryCode, ck.mobileNetworkCode, ck.locationArea, ck.cellKey)("<<myToken>>"))
def getResponse(cellKey: String, mobileCountryCode:String, mobileNetworkCode:String, locationArea:String, cellId:String)(token:String): Future[String] = {
RestartSource.withBackoff(
minBackoff = 10.milliseconds,
maxBackoff = 30.seconds,
randomFactor = 0.2,
maxRestarts = 2
) { () =>
val responseFuture: Future[HttpResponse] =
Http().singleRequest(HttpRequest(uri = s"https://www.googleapis.com/geolocation/v1/geolocate?key=${token}", entity = ByteString(
// TODO use proper JSON objects
s"""
|{
| "cellTowers": [
| "mobileCountryCode": $mobileCountryCode,
| "mobileNetworkCode": $mobileNetworkCode,
| "locationAreaCode": $locationArea,
| "cellId": $cellId,
| ]
|}
""".stripMargin)))
Source.fromFuture(responseFuture)
.mapAsync(parallelism = 1) {
case HttpResponse(StatusCodes.OK, _, entity, _) =>
Unmarshal(entity).to[String]
case HttpResponse(statusCode, _, _, _) =>
throw WebRequestException(statusCode.toString() )
}
}
.runWith(Sink.head)
.recover {
case _ => throw StreamFailedAfterMaxRetriesException()
}
}
val done: Future[Done] = source.runForeach(println)
done.onComplete(_ ⇒ system.terminate())
is already the (partial) answer for the question i.e. how to integrate Akka-streams + akka-http. However, it does not work, i.e. only throws error 400s and never terminates.
i think you already found an api how to call akka-http client
regarding your first code snippet which doesn't work. i think there happened some misunderstanding of the example itself. you expected the code in the example to work after just copied. but the intension of the doc was to demonstrate just an example/concept, how you can delegate some long running task out of the stream flow and then consuming the result when it's ready. for this was used ask call to akka actor, because call to ask method returns a Future. probably the authors of the doc just omitted the definition of actor. you can try this one example:
import java.lang.System.exit
import akka.NotUsed
import akka.actor.{Actor, ActorRef, ActorSystem, Props}
import akka.pattern.ask
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{Sink, Source}
import akka.util.Timeout
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.duration._
import scala.language.higherKinds
object App extends scala.App {
implicit val sys: ActorSystem = ActorSystem()
implicit val mat: ActorMaterializer = ActorMaterializer()
val ref: ActorRef = sys.actorOf(Props[Translator])
implicit val askTimeout: Timeout = Timeout(5.seconds)
val words: Source[String, NotUsed] = Source(List("hello", "hi"))
words
.mapAsync(parallelism = 5)(elem => (ref ? elem).mapTo[String])
.map(_.toLowerCase)
.runWith(Sink.foreach(println))
.onComplete(t => {
println(s"finished: $t")
exit(1)
})
}
class Translator extends Actor {
override def receive: Receive = {
case msg => sender() ! s"$msg!"
}
}
You must import ask pattern from akka.
import akka.pattern.ask
Edit: OK, sorry, I can see that you have already imported. What is ref in your code? ActorRef?

monitor system users on raspberry pi with akka actors

I've a raspberry pi on my network with an LED strip attached to it.
My purpose is to create a jar file that will sit on the pi, monitor system events such as logins and load average, and drive the LED based on the those inputs.
To continuosly monitor the logged in users, I am trying to use akka actors. Using the examples provided here, this is what I've gotten so far :
import com.pi4j.io.gpio.GpioFactory
import com.pi4j.io.gpio.RaspiPin
import sys.process._
import akka.actor.{Actor, Props, ActorSystem}
import scala.concurrent.duration._
val who :String = "who".!!
class Blinker extends Actor {
private def gpio = GpioFactory.getInstance
private def led = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_08)
def receive = {
case x if who.contains("pi") => led.blink(250)
case x if who.contains("moocow") => println("falalalala")
}
val blinker = system.actorOf(Props(classOf[Blinker], this))
val cancellable = system.scheduler.schedule(
0 milliseconds,
50 milliseconds,
blinker,
who)
}
However, system is not recognised by my IDE (IntelliJ) and it says, cannot resolve symbol
I also have a main object like this:
object ledStrip {
def main(args: Array[String]): Unit = {
val blink = new Blinker
// blink.receive
}
}
In main, I'm not quite sure how to initialise the application.
Needless to say, this my first time writing a scala program
Help?
Edit::
Here is the updated program after incorporating what Michal has said
class Blinker extends Actor {
val who: String = "who".!!
private val gpio = GpioFactory.getInstance
private val led = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_08)
def receive = {
case x if who.contains("pi") => led.blink(250)
case x if who.contains("moocow") => println("falalalala")
}
val system = ActorSystem()
}
object ledStrip extends Blinker {
def main(args: Array[String]): Unit = {
val blinker = system.actorOf(Props(classOf[Blinker], this))
import system.dispatcher
val cancellable =
system.scheduler.schedule(
50 milliseconds,
5000 milliseconds,
blinker,
who)
}
}
This program compiles fine, but throws the following error upon execution:
Exception in thread "main" java.lang.ExceptionInInitializerError at
ledStrip.main(ledStrip.scala) Caused by:
akka.actor.ActorInitializationException: You cannot create an instance
of [ledStrip$] explicitly using the constructor (new). You have to use
one of the 'actorOf' factory methods to create a new actor. See the
documentation. at
akka.actor.ActorInitializationException$.apply(Actor.scala:194) at
akka.actor.Actor.$init$(Actor.scala:472) at
Blinker.(ledStrip.scala:15) at
ledStrip$.(ledStrip.scala:34) at
ledStrip$.(ledStrip.scala) ... 1 more
Edit 2
Code that compiles and runs (behaviour is still not as desired)< blink(1500) is never executed when user: pi logs out from the shell>
object sysUser {
val who: String = "who".!!
}
class Blinker extends Actor {
private val gpio = GpioFactory.getInstance
private val led = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_08)
def receive = {
case x if x.toString.contains("pi") => led.blink(50)
case x if x.toString.contains("moocow") => println("falalalala")
case _ => led.blink(1500)
}
}
object ledStrip {
def main(args: Array[String]): Unit = {
val system = ActorSystem()
val blinker = system.actorOf(Props[Blinker], "blinker")
import system.dispatcher
val cancellable =
system.scheduler.schedule(
50 milliseconds,
5000 milliseconds,
blinker,
sysUser.who)
}
}
Well, it looks like you haven't defined "system" anywhere. See this example for instance:
https://doc.akka.io/docs/akka/current/actors.html#here-is-another-example-that-you-can-edit-and-run-in-the-browser-
you'll find this line there:
val system = ActorSystem("pingpong")
That's what creates the ActorSystem and defines the val called "system", which you then call methods on.
In the main, I don't think you want to create another instance with "new Blinker", just use:
system.actorOf(Props[Blinker], "blinker")
(which you are already doing and putting it into the "blinker" val)
Seems it is just a akka usage issue. I don't know why you do something seems strange, so I change them for change1, change2, change3, FYI.
class Blinker extends Actor {
val who: String = "who".!!
private val gpio = GpioFactory.getInstance
private val led = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_08)
def receive = {
case x if who.contains("pi") => led.blink(250)
case x if who.contains("moocow") => println("falalalala")
}
}
object ledStrip { // change1
def main(args: Array[String]): Unit = {
val system = ActorSystem() // change2
val blinker = system.actorOf(Props(classOf[Blinker])) // change3
import system.dispatcher
val cancellable =
system.scheduler.schedule(
50 milliseconds,
5000 milliseconds,
blinker,
who)
}
}

The Future is not complete?

object Executor extends App {
implicit val system = ActorSystem()
implicit val materializer = ActorMaterializer()
implicit val ec = system.dispatcher
import akka.stream.io._
val file = new File("res/AdviceAnimals.tsv")
import akka.stream.io.Implicits._
val foreach: Future[Long] = SynchronousFileSource(file)
.to( Sink.outputStream(()=>System.out))
.run()
foreach onComplete { v =>
println(s"the foreach is ${v.get}") // the will not be print
}
}
but if I change the Sink.outputStream(()=>System.out) to Sink.ignore, the println(s"the foreach is ${v.get}") will print.
Can somebody explain why?
You are not waiting for the stream to complete, instead, your main method (the body of Executor) will complete, and since the main method is done exits the JVM is shut down.
What you want to do, is to block that thread and not exit the app before the future completes.
object Executor extends App {
// ...your stuff with streams...
val yourFuture: Future[Long] = ???
val result = Await.result(yourFuture, 5 seconds)
println(s"the foreach is ${result}")
// stop the actor system (or it will keep the app alive)
system.terminate()
}
Coincidently I created almost the same app for testing/playing with Akka Streams.
Could the imported implicits cause the problem?
This app works fine for me:
object PrintAllInFile extends App {
val file = new java.io.File("data.txt")
implicit val system = ActorSystem("test")
implicit val mat = ActorMaterializer()
implicit val ec = system.dispatcher
SynchronousFileSource(file)
.to(Sink.outputStream(() => System.out))
.run()
.onComplete(_ => system.shutdown())
}
Note the stopping of the ActorSystem in the 'onComplete'. Otherwise the app will not exit.

Improve this actor calling futures

I've an actor (Worker) which basically ask 3 other actors (Filter1, Filter2 and Filter3) for a result. If any of them return a false, It's unnecessary to wait for the others, like an "and" operation over the results. When a false response is receive, a cancel message is sent to the actors in a way to cancel the queued work and make it more effective in the execution.
Filters aren't children of Worker, but there are a common pool of actor which are used by all Worker actors. I use an Agent to maintain the collection of cancel Works. Then, before a particular work is processed, I check in the cancel agent if that work was cancel, and then avoid the execution for it. Cancel has a higher priority than Work, then, it is processed always first.
The code is something like this
Proxy, who create the actors tree:
import scala.collection.mutable.HashSet
import scala.concurrent.ExecutionContext.Implicits.global
import com.typesafe.config.Config
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.ActorSystem
import akka.actor.PoisonPill
import akka.actor.Props
import akka.agent.Agent
import akka.routing.RoundRobinRouter
class Proxy extends Actor with ActorLogging {
val agent1 = Agent(new HashSet[Work])
val agent2 = Agent(new HashSet[Work])
val agent3 = Agent(new HashSet[Work])
val filter1 = context.actorOf(Props(Filter1(agent1)).withDispatcher("priorityMailBox-dispatcher")
.withRouter(RoundRobinRouter(24)), "filter1")
val filter2 = context.actorOf(Props(Filter2(agent2)).withDispatcher("priorityMailBox-dispatcher")
.withRouter(RoundRobinRouter(24)), "filter2")
val filter3 = context.actorOf(Props(Filter3(agent3)).withDispatcher("priorityMailBox-dispatcher")
.withRouter(RoundRobinRouter(24)), "filter3")
//val workerRouter = context.actorOf(Props[SerialWorker].withRouter(RoundRobinRouter(24)), name = "workerRouter")
val workerRouter = context.actorOf(Props(new Worker(filter1, filter2, filter3)).withRouter(RoundRobinRouter(24)), name = "workerRouter")
def receive = {
case w: Work =>
workerRouter forward w
}
}
Worker:
import scala.concurrent.Await
import scala.concurrent.ExecutionContext.Implicits.global
import scala.concurrent.Future
import scala.concurrent.duration.DurationInt
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.Props
import akka.actor.actorRef2Scala
import akka.pattern.ask
import akka.pattern.pipe
import akka.util.Timeout
import akka.actor.ActorRef
import akka.routing.RoundRobinRouter
import akka.agent.Agent
import scala.collection.mutable.HashSet
class Worker(filter1: ActorRef, filter2: ActorRef, filter3: ActorRef) extends Actor with ActorLogging {
implicit val timeout = Timeout(30.seconds)
def receive = {
case w:Work =>
val start = System.currentTimeMillis();
val futureF3 = (filter3 ? w).mapTo[Response]
val futureF2 = (filter2 ? w).mapTo[Response]
val futureF1 = (filter1 ? w).mapTo[Response]
val aggResult = Future.find(List(futureF3, futureF2, futureF1)) { res => !res.reponse }
Await.result(aggResult, timeout.duration) match {
case None =>
Nqueen.fact(10500000L)
log.info(s"[${w.message}] Procesado mensaje TRUE en ${System.currentTimeMillis() - start} ms");
sender ! WorkResponse(w, true)
case _ =>
filter1 ! Cancel(w)
filter2 ! Cancel(w)
filter3 ! Cancel(w)
log.info(s"[${w.message}] Procesado mensaje FALSE en ${System.currentTimeMillis() - start} ms");
sender ! WorkResponse(w, false)
}
}
}
and Filters:
import scala.collection.mutable.HashSet
import scala.util.Random
import akka.actor.Actor
import akka.actor.ActorLogging
import akka.actor.actorRef2Scala
import akka.agent.Agent
trait CancellableFilter { this: Actor with ActorLogging =>
//val canceledJobs = new HashSet[Int]
val agent: Agent[HashSet[Work]]
def cancelReceive: Receive = {
case Cancel(w) =>
agent.send(_ += w)
//log.info(s"[$t] El trabajo se cancelara (si llega...)")
}
def cancelled(w: Work): Boolean =
if (agent.get.contains(w)) {
agent.send(_ -= w)
true
} else {
false
}
}
abstract class Filter extends Actor with ActorLogging { this: CancellableFilter =>
val random = new Random(System.currentTimeMillis())
def response: Boolean
val timeToWait: Int
val timeToExecutor: Long
def receive = cancelReceive orElse {
case w:Work if !cancelled(w) =>
//log.info(s"[$t] Llego trabajo")
Thread.sleep(timeToWait)
Nqueen.fact(timeToExecutor)
val r = Response(response)
//log.info(s"[$t] Respondio ${r.reponse}")
sender ! r
}
}
object Filter1 {
def apply(agente: Agent[HashSet[Work]]) = new Filter with CancellableFilter {
val timeToWait = 74
val timeToExecutor = 42000000L
val agent = agente
def response = true //random.nextBoolean
}
}
object Filter2 {
def apply(agente: Agent[HashSet[Work]]) = new Filter with CancellableFilter {
val timeToWait = 47
val timeToExecutor = 21000000L
val agent = agente
def response = true //random.nextBoolean
}
}
object Filter3 {
def apply(agente: Agent[HashSet[Work]]) = new Filter with CancellableFilter {
val timeToWait = 47
val timeToExecutor = 21000000L
val agent = agente
def response = true //random.nextBoolean
}
}
Basically, I think Worker code is ugly and I want to make it better. Could you help to improve it?
Other point I want to improve is the cancel message. As I don't know which of the filters are done, I need to Cancel all of them, then, at least one cancel is redundant (Since this work is completed)
It is minor, but why don't you store filters as sequence? filters.foreach(f ! Cancel(w)) is nicer than
filter1 ! Cancel(w)
filter2 ! Cancel(w)
filter3 ! Cancel(w)
Same for other cases:
class Worker(filter1: ActorRef, filter2: ActorRef, filter3: ActorRef) extends Actor with ActorLogging {
private val filters = Seq(filter1, filter2, filter3)
implicit val timeout = Timeout(30.seconds)
def receive = {
case w:Work =>
val start = System.currentTimeMillis();
val futures = filters.map { f =>
(f ? w).mapTo[Response]
}
val aggResult = Future.find(futures) { res => !res.reponse }
Await.result(aggResult, timeout.duration) match {
case None =>
Nqueen.fact(10500000L)
log.info(s"[${w.message}] Procesado mensaje TRUE en ${System.currentTimeMillis() - start} ms");
sender ! WorkResponse(w, true)
case _ =>
filters.foreach(f ! Cancel(w))
log.info(s"[${w.message}] Procesado mensaje FALSE en ${System.currentTimeMillis() - start} ms");
sender ! WorkResponse(w, false)
}
}
You may also consider to write constructor as Worker(filters: ActorRef*) if you do not enforce exactly three filters. It think it is okay to sendoff one redundant cancel (alternatives I see is overly complicated). I'm not sure, but if filters will be created very fast, if may got randoms initialized with the same seed value.