Camel routing to akka endpoint throws ActorNotRegisteredException - scala

I want to set camel endpoint to akka component like akka://some-system/user/myactor
I have following code:
package rsmev
import akka.actor.{ActorRef, Actor, Props, ActorSystem}
import akka.camel.{CamelExtension, CamelMessage, Consumer}
import org.apache.camel.builder.RouteBuilder
object Frontend {
def main(args: Array[String]) {
val system = ActorSystem("my_system")
val actor = system.actorOf(Props[ConsumerActor], "myconsumer")
val context = CamelExtension(system).context
context.addRoutes(new RouteBuilder() {
override def configure(): Unit = {
from("direct:start")
.to("akka://my_system/user/myconsumer")
}
})
context.start()
Thread.sleep(10 * 1000)
val producer = context.createProducerTemplate()
producer.sendBody("direct:start", "HELLO!")
Thread.sleep(10 * 1000)
}
}
class ConsumerActor extends Actor {
override def receive = {
case _ => println("OK")
}
}
When executing this code I get:
akka.camel.ActorNotRegisteredException: Actor [akka://survex_system/user/myconsumer] doesn't exist
at akka.camel.internal.component.ActorProducer$$anonfun$actorFor$1.apply(ActorComponent.scala:175)
at akka.camel.internal.component.ActorProducer$$anonfun$actorFor$1.apply(ActorComponent.scala:175)
at scala.Option.getOrElse(Option.scala:121)
at akka.camel.internal.component.ActorProducer.actorFor(ActorComponent.scala:175)
at akka.camel.internal.component.ActorProducer.fireAndForget(ActorComponent.scala:172)
at akka.camel.internal.component.ActorProducer.processExchangeAdapter(ActorComponent.scala:143)
at akka.camel.internal.component.ActorProducer.process(ActorComponent.scala:120)
at org.apache.camel.processor.SendProcessor.process(SendProcessor.java:113)
at org.apache.camel.processor.RedeliveryErrorHandler.process(RedeliveryErrorHandler.java:416)
at org.apache.camel.processor.CamelInternalProcessor.process(CamelInternalProcessor.java:191)
at org.apache.camel.processor.CamelInternalProcessor.process(CamelInternalProcessor.java:191)
at org.apache.camel.component.direct.DirectProducer.process(DirectProducer.java:51)
at org.apache.camel.processor.CamelInternalProcessor.process(CamelInternalProcessor.java:191)
at org.apache.camel.processor.UnitOfWorkProducer.process(UnitOfWorkProducer.java:73)
at org.apache.camel.impl.ProducerCache$2.doInProducer(ProducerCache.java:375)
at org.apache.camel.impl.ProducerCache$2.doInProducer(ProducerCache.java:343)
at org.apache.camel.impl.ProducerCache.doInProducer(ProducerCache.java:233)
at org.apache.camel.impl.ProducerCache.sendExchange(ProducerCache.java:343)
at org.apache.camel.impl.ProducerCache.send(ProducerCache.java:184)
at org.apache.camel.impl.DefaultProducerTemplate.send(DefaultProducerTemplate.java:124)
at org.apache.camel.impl.DefaultProducerTemplate.sendBody(DefaultProducerTemplate.java:137)
at org.apache.camel.impl.DefaultProducerTemplate.sendBody(DefaultProducerTemplate.java:144)
at rsmev.Frontend$.main(Frontend.scala:17)
at rsmev.Frontend.main(Frontend.scala)
Exception in thread "main" org.apache.camel.CamelExecutionException: Exception occurred during execution on the exchange: Exchange[Message: HELLO!]
at org.apache.camel.util.ObjectHelper.wrapCamelExecutionException(ObjectHelper.java:1379)
at org.apache.camel.util.ExchangeHelper.extractResultBody(ExchangeHelper.java:623)
at org.apache.camel.impl.DefaultProducerTemplate.extractResultBody(DefaultProducerTemplate.java:467)
at org.apache.camel.impl.DefaultProducerTemplate.extractResultBody(DefaultProducerTemplate.java:463)
at org.apache.camel.impl.DefaultProducerTemplate.sendBody(DefaultProducerTemplate.java:139)
at org.apache.camel.impl.DefaultProducerTemplate.sendBody(DefaultProducerTemplate.java:144)
at rsmev.Frontend$.main(Frontend.scala:17)
at rsmev.Frontend.main(Frontend.scala)
Caused by: akka.camel.ActorNotRegisteredException: Actor [akka://survex_system/user/myconsumer] doesn't exist
at akka.camel.internal.component.ActorProducer$$anonfun$actorFor$1.apply(ActorComponent.scala:175)
at akka.camel.internal.component.ActorProducer$$anonfun$actorFor$1.apply(ActorComponent.scala:175)
at scala.Option.getOrElse(Option.scala:121)
at akka.camel.internal.component.ActorProducer.actorFor(ActorComponent.scala:175)
at akka.camel.internal.component.ActorProducer.fireAndForget(ActorComponent.scala:172)
at akka.camel.internal.component.ActorProducer.processExchangeAdapter(ActorComponent.scala:143)
at akka.camel.internal.component.ActorProducer.process(ActorComponent.scala:120)
at org.apache.camel.processor.SendProcessor.process(SendProcessor.java:113)
at org.apache.camel.processor.RedeliveryErrorHandler.process(RedeliveryErrorHandler.java:416)
at org.apache.camel.processor.CamelInternalProcessor.process(CamelInternalProcessor.java:191)
at org.apache.camel.processor.CamelInternalProcessor.process(CamelInternalProcessor.java:191)
at org.apache.camel.component.direct.DirectProducer.process(DirectProducer.java:51)
at org.apache.camel.processor.CamelInternalProcessor.process(CamelInternalProcessor.java:191)
at org.apache.camel.processor.UnitOfWorkProducer.process(UnitOfWorkProducer.java:73)
at org.apache.camel.impl.ProducerCache$2.doInProducer(ProducerCache.java:375)
at org.apache.camel.impl.ProducerCache$2.doInProducer(ProducerCache.java:343)
at org.apache.camel.impl.ProducerCache.doInProducer(ProducerCache.java:233)
at org.apache.camel.impl.ProducerCache.sendExchange(ProducerCache.java:343)
at org.apache.camel.impl.ProducerCache.send(ProducerCache.java:184)
at org.apache.camel.impl.DefaultProducerTemplate.send(DefaultProducerTemplate.java:124)
at org.apache.camel.impl.DefaultProducerTemplate.sendBody(DefaultProducerTemplate.java:137)
... 3 more
I can't figure out why does this happen, because actor myconsumer is registered.

Finally figured out what the problem was - you don't need to start camel context, this is the right way of usage:
package rsmev
import akka.actor.{ActorRef, Actor, Props, ActorSystem}
import akka.camel.{CamelExtension, CamelMessage, Consumer}
import org.apache.camel.builder.RouteBuilder
import akka.camel._
object Frontend {
def main(args: Array[String]) {
val system = ActorSystem("system")
val actor = system.actorOf(Props[ConsumerActor], "myconsumer")
val context = CamelExtension(system).context
context.addRoutes(new RouteBuilder() {
override def configure(): Unit = {
from("direct:start")
.to("akka://system/user/myconsumer")
}
})
Thread.sleep(5 * 1000)
val producer = context.createProducerTemplate()
producer.sendBody("direct:start", "HELLO!")
Thread.sleep(10 * 1000)
}
}
class ConsumerActor extends Actor {
override def receive = {
case _ => println("OK")
}
}

Related

How to bind Slick dependency with Lagom?

So, I have this dependency which is used to create tables and interact with Postgres. Here is a Sample Class:
class ConfigTable {
this: DBFactory =>
import driver.api._
implicit val configKeyMapper = MappedColumnType.base[ConfigKey, String](e => e.toString, s => ConfigKey.withName(s))
val configs = TableQuery[ConfigMapping]
class ConfigMapping(tag: Tag) extends Table[Config](tag, "configs") {
def key = column[ConfigKey]("key")
def value = column[String]("value")
def * = (key, value) <> (Config.tupled, Config.unapply _)
}
/**
* add config
*
* #param config
* #return
*/
def add(config: Config): Try[Config] = try {
sync(db.run(configs += config)) match {
case 1 => Success(config)
case _ => Failure(new Exception("Unable to add config"))
}
} catch {
case ex: PSQLException =>
if (ex.getMessage.contains("duplicate key value")) Failure(new Exception("alt id already exists."))
else Failure(new Exception(ex.getMessage))
}
def get(key: ConfigKey): Option[Config] = sync(db.run(configs.filter(x => x.key === key).result)).headOption
def getAll(): Seq[Config] = sync(db.run(configs.result))
}
object ConfigTable extends ConfigTable with PSQLComponent
PSQLComponent is the Abstraction for Database meta configuration:
import slick.jdbc.PostgresProfile
trait PSQLComponent extends DBFactory {
val driver = PostgresProfile
import driver.api.Database
val db: Database = Database.forConfig("db.default")
}
DBFactory is again an abstraction:
import slick.jdbc.JdbcProfile
trait DBFactory {
val driver: JdbcProfile
import driver.api._
val db: Database
}
application.conf:
db.default {
driver = "org.postgresql.Driver"
url = "jdbc:postgresql://localhost:5432/db"
user = "user"
password = "pass"
hikaricp {
minimumIdle = ${db.default.async-executor.minConnections}
maximumPoolSize = ${db.default.async-executor.maxConnections}
}
}
jdbc-defaults.slick.profile = "slick.jdbc.PostgresProfile$"
lagom.persistence.jdbc.create-tables.auto=false
I compile and publish this dependency to nexus and trying to use this in my Lagom Microservice.
Here is the Loader Class:
class SlickExapleAppLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication = new SlickExampleApp(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication = new SlickExampleApp(context) with LagomDevModeComponents {
}
override def describeService = Some(readDescriptor[SlickExampleLMSServiceImpl])
}
abstract class SlickExampleApp(context: LagomApplicationContext)
extends LagomApplication(context)
// No Idea which to use and how, nothing clear from doc too.
// with ReadSideJdbcPersistenceComponents
// with ReadSideSlickPersistenceComponents
// with SlickPersistenceComponents
with AhcWSComponents {
wire[SlickExampleScheduler]
}
I'm trying to implement it in this scheduler:
class SlickExampleScheduler #Inject()(lmsService: LMSService,
configuration: Configuration)(implicit ec: ExecutionContext) {
val brofile = `SomeDomainObject`
val gson = new Gson()
val concurrency = Runtime.getRuntime.availableProcessors() * 10
implicit val timeout: Timeout = 3.minute
implicit val system: ActorSystem = ActorSystem("LMSActorSystem")
implicit val materializer: ActorMaterializer = ActorMaterializer()
// Getting Exception Initializer here..... For ConfigTable ===> ExceptionLine
val schedulerImplDao = new SchedulerImplDao(ConfigTable)
def hitLMSAPI = {
println("=============>1")
schedulerImplDao.doSomething()
}
system.scheduler.schedule(2.seconds, 2.seconds) {
println("=============>")
hitLMSAPI
}
}
Not sure if it's the correct way, or if it's not what is the correct way of doing this. It is the project requirement to keep the Data Models separate from the service for the obvious reasons of re-usability.
Exception Stack:
17:50:38.666 [info] akka.cluster.Cluster(akka://lms-impl-application) [sourceThread=ForkJoinPool-1-worker-1, akkaTimestamp=12:20:38.665UTC, akkaSource=akka.cluster.Cluster(akka://lms-impl-application), sourceActorSystem=lms-impl-application] - Cluster Node [akka.tcp://lms-impl-application#127.0.0.1:45805] - Started up successfully
17:50:38.707 [info] akka.cluster.Cluster(akka://lms-impl-application) [sourceThread=lms-impl-application-akka.actor.default-dispatcher-6, akkaTimestamp=12:20:38.707UTC, akkaSource=akka.cluster.Cluster(akka://lms-impl-application), sourceActorSystem=lms-impl-application] - Cluster Node [akka.tcp://lms-impl-application#127.0.0.1:45805] - No seed-nodes configured, manual cluster join required
java.lang.ExceptionInInitializerError
at com.slick.init.impl.SlickExampleScheduler.<init>(SlickExampleScheduler.scala:29)
at com.slick.init.impl.SlickExampleApp.<init>(SlickExapleAppLoader.scala:42)
at com.slick.init.impl.SlickExapleAppLoader$$anon$2.<init>(SlickExapleAppLoader.scala:17)
at com.slick.init.impl.SlickExapleAppLoader.loadDevMode(SlickExapleAppLoader.scala:17)
at com.lightbend.lagom.scaladsl.server.LagomApplicationLoader.load(LagomApplicationLoader.scala:76)
at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$5(LagomReloadableDevServerStart.scala:176)
at play.utils.Threads$.withContextClassLoader(Threads.scala:21)
at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$3(LagomReloadableDevServerStart.scala:173)
at scala.Option.map(Option.scala:163)
at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$2(LagomReloadableDevServerStart.scala:149)
at scala.util.Success.flatMap(Try.scala:251)
at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$1(LagomReloadableDevServerStart.scala:147)
at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:658)
at scala.util.Success.$anonfun$map$1(Try.scala:255)
at scala.util.Success.map(Try.scala:213)
at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: java.lang.NullPointerException
at com.example.db.models.LoginTable.<init>(LoginTable.scala:29)
at com.example.db.models.LoginTable$.<init>(LoginTable.scala:293)
at com.example.db.models.LoginTable$.<clinit>(LoginTable.scala)
... 24 more
This is how it is woking:
abstract class SlickExampleApp(context: LagomApplicationContext) extends LagomApplication(context)
with SlickPersistenceComponents with AhcWSComponents {
override implicit lazy val actorSystem: ActorSystem = ActorSystem("LMSActorSystem")
override lazy val materializer: ActorMaterializer = ActorMaterializer()
override lazy val lagomServer = serverFor[SlickExampleLMSService](wire[SlickExampleLMSServiceImpl])
lazy val externalService = serviceClient.implement[LMSService]
override def connectionPool: ConnectionPool = new HikariCPConnectionPool(environment)
override def jsonSerializerRegistry: JsonSerializerRegistry = new JsonSerializerRegistry {
override def serializers: immutable.Seq[JsonSerializer[_]] = Vector.empty
}
val loginTable = wire[LoginTable]
wire[SlickExampleScheduler]
}
> One thing I'd like to report is: Lagom docs about the application.conf configuration of slick is not correct, it misleaded me for two days, the I digged into the Liberary code and this is how it goes:
private val readSideConfig = system.settings.config.getConfig("lagom.persistence.read-side.jdbc")
private val jdbcConfig = system.settings.config.getConfig("lagom.persistence.jdbc")
private val createTables = jdbcConfig.getConfig("create-tables")
val autoCreateTables: Boolean = createTables.getBoolean("auto")
// users can disable the usage of jndiDbName for userland read-side operations by
// setting the jndiDbName to null. In which case we fallback to slick.db.
// slick.db must be defined otherwise the application will fail to start
val db = {
if (readSideConfig.hasPath("slick.jndiDbName")) {
new InitialContext()
.lookup(readSideConfig.getString("slick.jndiDbName"))
.asInstanceOf[Database]
} else if (readSideConfig.hasPath("slick.db")) {
Database.forConfig("slick.db", readSideConfig)
} else {
throw new RuntimeException("Cannot start because read-side database configuration is missing. " +
"You must define either 'lagom.persistence.read-side.jdbc.slick.jndiDbName' or 'lagom.persistence.read-side.jdbc.slick.db' in your application.conf.")
}
}
val profile = DatabaseConfig.forConfig[JdbcProfile]("slick", readSideConfig).profile
The configuration it requires is very much different than the suggested one on the Doc.

Spark Scala UDP receive on listening port

The example mentioned in
http://spark.apache.org/docs/latest/streaming-programming-guide.html
Lets me receive data packets in a TCP stream and listening on port 9999
import org.apache.spark._
import org.apache.spark.streaming._
import org.apache.spark.streaming.StreamingContext._ // not necessary since Spark 1.3
// Create a local StreamingContext with two working thread and batch interval of 1 second.
// The master requires 2 cores to prevent from a starvation scenario.
val conf = new SparkConf().setMaster("local[2]").setAppName("NetworkWordCount")
val ssc = new StreamingContext(conf, Seconds(1))
// Create a DStream that will connect to hostname:port, like localhost:9999
val lines = ssc.socketTextStream("localhost", 9999)
// Split each line into words
val words = lines.flatMap(_.split(" "))
import org.apache.spark.streaming.StreamingContext._ // not necessary since Spark 1.3
// Count each word in each batch
val pairs = words.map(word => (word, 1))
val wordCounts = pairs.reduceByKey(_ + _)
// Print the first ten elements of each RDD generated in this DStream to the console
wordCounts.print()
ssc.start() // Start the computation
ssc.awaitTermination() // Wait for the computation to terminate
I am able to send data over TCP by creating a data server by using in my Linux system
$ nc -lk 9999
Question
I need to receive stream from an android phone streaming using UDP and the Scala/Spark
val lines = ssc.socketTextStream("localhost", 9999)
receives ONLY in TCP streams.
How can I receive UDP streams in a similar simple manner using Scala+Spark and create Spark DStream.
There isn't something built in, but it's not too much work to get it done youself. Here is a simple solution I made based on a custom UdpSocketInputDStream[T]:
import java.io._
import java.net.{ConnectException, DatagramPacket, DatagramSocket, InetAddress}
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.StreamingContext
import org.apache.spark.streaming.dstream.ReceiverInputDStream
import org.apache.spark.streaming.receiver.Receiver
import scala.reflect.ClassTag
import scala.util.control.NonFatal
class UdpSocketInputDStream[T: ClassTag](
_ssc: StreamingContext,
host: String,
port: Int,
bytesToObjects: InputStream => Iterator[T],
storageLevel: StorageLevel
) extends ReceiverInputDStream[T](_ssc) {
def getReceiver(): Receiver[T] = {
new UdpSocketReceiver(host, port, bytesToObjects, storageLevel)
}
}
class UdpSocketReceiver[T: ClassTag](host: String,
port: Int,
bytesToObjects: InputStream => Iterator[T],
storageLevel: StorageLevel) extends Receiver[T](storageLevel) {
var udpSocket: DatagramSocket = _
override def onStart(): Unit = {
try {
udpSocket = new DatagramSocket(port, InetAddress.getByName(host))
} catch {
case e: ConnectException =>
restart(s"Error connecting to $port", e)
return
}
// Start the thread that receives data over a connection
new Thread("Udp Socket Receiver") {
setDaemon(true)
override def run() {
receive()
}
}.start()
}
/** Create a socket connection and receive data until receiver is stopped */
def receive() {
try {
val buffer = new Array[Byte](2048)
// Create a packet to receive data into the buffer
val packet = new DatagramPacket(buffer, buffer.length)
udpSocket.receive(packet)
val iterator = bytesToObjects(new ByteArrayInputStream(packet.getData, packet.getOffset, packet.getLength))
// Now loop forever, waiting to receive packets and printing them.
while (!isStopped() && iterator.hasNext) {
store(iterator.next())
}
if (!isStopped()) {
restart("Udp socket data stream had no more data")
}
} catch {
case NonFatal(e) =>
restart("Error receiving data", e)
} finally {
onStop()
}
}
override def onStop(): Unit = {
synchronized {
if (udpSocket != null) {
udpSocket.close()
udpSocket = null
}
}
}
}
In order to get StreamingContext to add a method on itself, we enrich it with an implicit class:
object Implicits {
implicit class StreamingContextOps(val ssc: StreamingContext) extends AnyVal {
def udpSocketStream[T: ClassTag](host: String,
port: Int,
converter: InputStream => Iterator[T],
storageLevel: StorageLevel): InputDStream[T] = {
new UdpSocketInputDStream(ssc, host, port, converter, storageLevel)
}
}
}
And here is how you call it all:
import java.io.{BufferedReader, InputStream, InputStreamReader}
import java.nio.charset.StandardCharsets
import org.apache.spark.SparkContext
import org.apache.spark.storage.StorageLevel
import org.apache.spark.streaming.dstream.InputDStream
import org.apache.spark.streaming.{Seconds, StreamingContext}
import scala.reflect.ClassTag
object TestRunner {
import Implicits._
def main(args: Array[String]): Unit = {
val sparkContext = new SparkContext("local[*]", "udpTest")
val ssc = new StreamingContext(sparkContext, Seconds(4))
val stream = ssc.udpSocketStream("localhost",
3003,
bytesToLines,
StorageLevel.MEMORY_AND_DISK_SER_2)
stream.print()
ssc.start()
ssc.awaitTermination()
}
def bytesToLines(inputStream: InputStream): Iterator[String] = {
val dataInputStream = new BufferedReader(
new InputStreamReader(inputStream, StandardCharsets.UTF_8))
new NextIterator[String] {
protected override def getNext(): String = {
val nextValue = dataInputStream.readLine()
if (nextValue == null) {
finished = true
}
nextValue
}
protected override def close() {
dataInputStream.close()
}
}
}
abstract class NextIterator[U] extends Iterator[U] {
protected var finished = false
private var gotNext = false
private var nextValue: U = _
private var closed = false
override def next(): U = {
if (!hasNext) {
throw new NoSuchElementException("End of stream")
}
gotNext = false
nextValue
}
override def hasNext: Boolean = {
if (!finished) {
if (!gotNext) {
nextValue = getNext()
if (finished) {
closeIfNeeded()
}
gotNext = true
}
}
!finished
}
def closeIfNeeded() {
if (!closed) {
closed = true
close()
}
}
protected def getNext(): U
protected def close()
}
}
Most of this code is taken from the SocketInputDStream[T] provided by Spark, I simply re-used it. I also took the code for the NextIterator which is used by bytesToLines, all it does is consume the line from the packet and transform it to a String. If you have more complex logic, you can provide it by passing converter: InputStream => Iterator[T] your own implementation.
Testing it with simple UDP packet:
echo -n "hello hello hello!" >/dev/udp/localhost/3003
Yields:
-------------------------------------------
Time: 1482676728000 ms
-------------------------------------------
hello hello hello!
Of course, this has to be further tested. I also has a hidden assumption that each buffer created from the DatagramPacket is 2048 bytes, which is perhaps something you'll want to change.
The problem with the Yuval Itzchakov's solution is that the receiver receives one message and restarts itself. Just replace restart for receive as shown below.
def receive() {
try {
val buffer = new Array[Byte](200000)
// Create a packet to receive data into the buffer
val packet = new DatagramPacket(buffer, buffer.length)
udpSocket.receive(packet)
val iterator = bytesToLines(new ByteArrayInputStream(packet.getData, packet.getOffset, packet.getLength))
// Now loop forever, waiting to receive packets and printing them.
while (!isStopped() && iterator.hasNext) {
store(iterator)
}
if (!isStopped()) {
// restart("Udp socket data stream had no more data")
receive()
}
} catch {
case NonFatal(e) =>
restart("Error receiving data", e)
} finally {
onStop()
}
}

An akka streams function that creates a sink and a source that emits whatever the sink receives

The goal is to implement a function with this signature
def bindedSinkAndSource[A]:(Sink[A, Any], Source[A, Any]) = ???
where the returned source emits whatever the sink receives.
My primary goal is to implement a websocket forwarder by means of the handleWebSocketMessages directive.
The forwarder graph is:
leftReceiver ~> rightEmitter
leftEmitter <~ rightReceiver
where the leftReceiver and leftEmiter are the in and out of the left endpoint handler flow; and rightReceiver and rightEmitter are the in and out of the right endpoint handler flow.
For example:
import akka.NotUsed
import akka.http.scaladsl.model.ws.Message
import akka.http.scaladsl.server.Directive.addByNameNullaryApply
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
def buildHandlers(): Route = {
val (leftReceiver, rightEmitter) = bindedSinkAndSource[Message];
val (rightReceiver, leftEmitter) = bindedSinkAndSource[Message];
val leftHandlerFlow = Flow.fromSinkAndSource(leftReceiver, leftEmitter)
val rightHandlerFlow = Flow.fromSinkAndSource(rightReceiver, rightEmitter)
pathPrefix("leftEndpointChannel") {
handleWebSocketMessages(leftHandlerFlow)
} ~
pathPrefix("rightEndpointChannel") {
handleWebSocketMessages(rightHandlerFlow)
}
}
All the ideas that came to me were frustrated by the fact that thehandleWebSocketMessages(..) directive don't give access to the materialized value of the received flow.
I found a way to achieve the goal, but there could be shorter and easier ways. If you know one, please don't hesitate to add your knowledge.
import org.reactivestreams.Publisher
import org.reactivestreams.Subscriber
import org.reactivestreams.Subscription
import akka.NotUsed
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
def bindedSinkAndSource[A]: (Sink[A, NotUsed], Source[A, NotUsed]) = {
class Binder extends Subscriber[A] with Publisher[A] { binder =>
var oUpStreamSubscription: Option[Subscription] = None;
var oDownStreamSubscriber: Option[Subscriber[_ >: A]] = None;
var pendingRequestFromDownStream: Option[Long] = None;
var pendingCancelFromDownStream: Boolean = false;
def onSubscribe(upStreamSubscription: Subscription): Unit = {
this.oUpStreamSubscription match {
case Some(_) => upStreamSubscription.cancel // rule 2-5
case None =>
this.oUpStreamSubscription = Some(upStreamSubscription);
if (pendingRequestFromDownStream.isDefined) {
upStreamSubscription.request(pendingRequestFromDownStream.get)
pendingRequestFromDownStream = None
}
if (pendingCancelFromDownStream) {
upStreamSubscription.cancel()
pendingCancelFromDownStream = false
}
}
}
def onNext(a: A): Unit = {
oDownStreamSubscriber.get.onNext(a)
}
def onComplete(): Unit = {
oDownStreamSubscriber.foreach { _.onComplete() };
this.oUpStreamSubscription = None
}
def onError(error: Throwable): Unit = {
oDownStreamSubscriber.foreach { _.onError(error) };
this.oUpStreamSubscription = None
}
def subscribe(downStreamSubscriber: Subscriber[_ >: A]): Unit = {
assert(this.oDownStreamSubscriber.isEmpty);
this.oDownStreamSubscriber = Some(downStreamSubscriber);
downStreamSubscriber.onSubscribe(new Subscription() {
def request(n: Long): Unit = {
binder.oUpStreamSubscription match {
case Some(usSub) => usSub.request(n);
case None =>
assert(binder.pendingRequestFromDownStream.isEmpty);
binder.pendingRequestFromDownStream = Some(n);
}
};
def cancel(): Unit = {
binder.oUpStreamSubscription match {
case Some(usSub) => usSub.cancel();
case None =>
assert(binder.pendingCancelFromDownStream == false);
binder.pendingCancelFromDownStream = true;
}
binder.oDownStreamSubscriber = None
}
})
}
}
val binder = new Binder;
val receiver = Sink.fromSubscriber(binder);
val emitter = Source.fromPublisher(binder);
(receiver, emitter);
}
Note that the instance vars of the Binder class may suffer concurrency problems if the sink and source this method creates are not fused later by the user. If that is not the case, all the accesses to these variables should be enclosed inside synchronized zones. Another solution would be to ensure that the sink and the source are materialized in an execution context with a single thread.
Two days later I discovered MergeHub and BroadcastHub. Using them the answer is much shorter:
import akka.stream.Materializer
def bindedSinkAndSource[T](implicit sm: Materializer): (Sink[T, NotUsed], Source[T, NotUsed]) = {
import akka.stream.scaladsl.BroadcastHub;
import akka.stream.scaladsl.MergeHub;
import akka.stream.scaladsl.Keep;
MergeHub.source[T](perProducerBufferSize = 8).toMat(BroadcastHub.sink[T](bufferSize = 256))(Keep.both) run
}
with the advantage that the returned sink and source can be materialized multiple times.

Materialising a graph within an actor

I am trying to materialise a graph within an actor. This seems to work if either of the following are true:
The graph does not contain a broadcast (created with alsoTo), or
The same ActorMaterializer is used for each materialisation, or
The graph is materialised outside of an Actor
I have reduced it down to the following test cases:
import java.util.concurrent.{CountDownLatch, TimeUnit}
import akka.NotUsed
import akka.actor.{Actor, ActorSystem}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{RunnableGraph, Sink, Source}
import akka.testkit.{TestActorRef, TestKit}
import org.scalatest.{FlatSpecLike, Matchers}
class ActorFlowTest extends TestKit(ActorSystem("ActorFlowTest")) with Matchers with FlatSpecLike {
def createGraph(withBroadcast: Boolean) = {
if (withBroadcast) Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
else Source.empty.to(Sink.ignore)
}
case object Bomb
class FlowActor(
graph: RunnableGraph[NotUsed],
latch: CountDownLatch,
materializer: (ActorSystem) => ActorMaterializer
) extends Actor {
override def preStart(): Unit = {
graph.run()(materializer(context.system))
latch.countDown()
}
override def receive: Receive = {
case Bomb => throw new RuntimeException
}
}
"Without an actor" should "be able to materialize twice" in {
val graph = Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
val materializer1 = ActorMaterializer()(system)
val materializer2 = ActorMaterializer()(system)
graph.run()(materializer1)
graph.run()(materializer2) // Pass
}
"With a the same materializer" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = true)
val latch = new CountDownLatch(2)
val materializer = ActorMaterializer()(system)
val actorRef = TestActorRef(new FlowActor(graph, latch, _ => materializer))
verify(actorRef, latch) should be(true) // Pass
}
"With a new materializer but no broadcast" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = false)
val latch = new CountDownLatch(2)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef = TestActorRef(new FlowActor(graph, latch, materializer))
verify(actorRef, latch) should be(true) // Pass
}
"With a new materializer and a broadcast" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = true)
val latch = new CountDownLatch(2)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef = TestActorRef(new FlowActor(graph, latch, materializer))
verify(actorRef, latch) should be(true) // Fail
}
def verify(actorRef: TestActorRef[_], latch: CountDownLatch): Boolean = {
actorRef.start()
actorRef ! Bomb
latch.await(25, TimeUnit.SECONDS)
}
}
It seems that the last cases will always timeout with the following error in the log:
[ERROR] [07/05/2016 16:06:30.625] [ActorFlowTest-akka.actor.default-dispatcher-6] [akka://ActorFlowTest/user/$$c] Futures timed out after [20000 milliseconds]
akka.actor.PostRestartException: akka://ActorFlowTest/user/$$c: exception post restart (class java.lang.RuntimeException)
at akka.actor.dungeon.FaultHandling$$anonfun$6.apply(FaultHandling.scala:250)
at akka.actor.dungeon.FaultHandling$$anonfun$6.apply(FaultHandling.scala:248)
at akka.actor.dungeon.FaultHandling$$anonfun$handleNonFatalOrInterruptedException$1.applyOrElse(FaultHandling.scala:303)
at akka.actor.dungeon.FaultHandling$$anonfun$handleNonFatalOrInterruptedException$1.applyOrElse(FaultHandling.scala:298)
at scala.runtime.AbstractPartialFunction.apply(AbstractPartialFunction.scala:36)
at akka.actor.dungeon.FaultHandling$class.finishRecreate(FaultHandling.scala:248)
at akka.actor.dungeon.FaultHandling$class.faultRecreate(FaultHandling.scala:76)
at akka.actor.ActorCell.faultRecreate(ActorCell.scala:374)
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:464)
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:483)
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:282)
at akka.testkit.CallingThreadDispatcher.process$1(CallingThreadDispatcher.scala:243)
at akka.testkit.CallingThreadDispatcher.runQueue(CallingThreadDispatcher.scala:283)
at akka.testkit.CallingThreadDispatcher.systemDispatch(CallingThreadDispatcher.scala:191)
at akka.actor.dungeon.Dispatch$class.restart(Dispatch.scala:119)
at akka.actor.ActorCell.restart(ActorCell.scala:374)
at akka.actor.LocalActorRef.restart(ActorRef.scala:406)
at akka.actor.SupervisorStrategy.restartChild(FaultHandling.scala:365)
at akka.actor.OneForOneStrategy.processFailure(FaultHandling.scala:518)
at akka.actor.SupervisorStrategy.handleFailure(FaultHandling.scala:303)
at akka.actor.dungeon.FaultHandling$class.handleFailure(FaultHandling.scala:263)
at akka.actor.ActorCell.handleFailure(ActorCell.scala:374)
at akka.actor.ActorCell.invokeAll$1(ActorCell.scala:459)
at akka.actor.ActorCell.systemInvoke(ActorCell.scala:483)
at akka.dispatch.Mailbox.processAllSystemMessages(Mailbox.scala:282)
at akka.dispatch.Mailbox.run(Mailbox.scala:223)
at akka.dispatch.Mailbox.exec(Mailbox.scala:234)
at scala.concurrent.forkjoin.ForkJoinTask.doExec(ForkJoinTask.java:260)
at scala.concurrent.forkjoin.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1339)
at scala.concurrent.forkjoin.ForkJoinPool.runWorker(ForkJoinPool.java:1979)
at scala.concurrent.forkjoin.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:107)
Caused by: java.util.concurrent.TimeoutException: Futures timed out after [20000 milliseconds]
at scala.concurrent.impl.Promise$DefaultPromise.ready(Promise.scala:219)
at scala.concurrent.impl.Promise$DefaultPromise.result(Promise.scala:223)
at scala.concurrent.Await$$anonfun$result$1.apply(package.scala:190)
at akka.dispatch.MonitorableThreadFactory$AkkaForkJoinWorkerThread$$anon$3.block(ThreadPoolBuilder.scala:167)
at scala.concurrent.forkjoin.ForkJoinPool.managedBlock(ForkJoinPool.java:3640)
at akka.dispatch.MonitorableThreadFactory$AkkaForkJoinWorkerThread.blockOn(ThreadPoolBuilder.scala:165)
at scala.concurrent.Await$.result(package.scala:190)
at akka.stream.impl.ActorMaterializerImpl.actorOf(ActorMaterializerImpl.scala:207)
at akka.stream.impl.ActorMaterializerImpl$$anon$2.matGraph(ActorMaterializerImpl.scala:166)
at akka.stream.impl.ActorMaterializerImpl$$anon$2.materializeAtomic(ActorMaterializerImpl.scala:150)
at akka.stream.impl.MaterializerSession$$anonfun$materializeModule$1.apply(StreamLayout.scala:919)
at akka.stream.impl.MaterializerSession$$anonfun$materializeModule$1.apply(StreamLayout.scala:915)
at scala.collection.immutable.Set$Set1.foreach(Set.scala:94)
at akka.stream.impl.MaterializerSession.materializeModule(StreamLayout.scala:915)
at akka.stream.impl.MaterializerSession$$anonfun$materializeModule$1.apply(StreamLayout.scala:922)
at akka.stream.impl.MaterializerSession$$anonfun$materializeModule$1.apply(StreamLayout.scala:915)
at scala.collection.immutable.Set$Set4.foreach(Set.scala:200)
at akka.stream.impl.MaterializerSession.materializeModule(StreamLayout.scala:915)
at akka.stream.impl.MaterializerSession.materialize(StreamLayout.scala:882)
at akka.stream.impl.ActorMaterializerImpl.materialize(ActorMaterializerImpl.scala:182)
at akka.stream.impl.ActorMaterializerImpl.materialize(ActorMaterializerImpl.scala:80)
at akka.stream.scaladsl.RunnableGraph.run(Flow.scala:351)
at ActorFlowTest$FlowActor.preStart(ActorFlowTest.scala:40)
at akka.actor.Actor$class.postRestart(Actor.scala:566)
at ActorFlowTest$FlowActor.postRestart(ActorFlowTest.scala:33)
at akka.actor.Actor$class.aroundPostRestart(Actor.scala:504)
at ActorFlowTest$FlowActor.aroundPostRestart(ActorFlowTest.scala:33)
at akka.actor.dungeon.FaultHandling$class.finishRecreate(FaultHandling.scala:239)
... 25 more
I have tried explicitly terminating the ActorMaterializers but that doesn't reproduce the problem.
A workaround is to create a closure around the ActorMaterializer in the Props but if this also came from another Actor I'm worried I will eventually get similar problems.
Any idea why this is? Obviously it is something to do with the ActorMaterializer but interesting how removing the Broadcast also solves it (even with a much more complicated graph).
This seems to be related to (or at least solved through proper) supervision. I created an extra Supervisor-Actor which for demonstration purposes just starts a single FlowActor in its preStart function and forwards the Bomb messages to it. The following tests execute successfully without any timeout exception:
import java.util.concurrent.{CountDownLatch, TimeUnit}
import akka.NotUsed
import akka.actor.Actor.Receive
import akka.actor.SupervisorStrategy._
import akka.actor.{Actor, ActorRef, ActorSystem, OneForOneStrategy, Props}
import akka.stream.ActorMaterializer
import akka.stream.scaladsl.{RunnableGraph, Sink, Source}
import akka.testkit.{TestActorRef, TestKit}
import org.scalatest.{FlatSpecLike, Matchers}
import scala.concurrent.duration._
class ActorFlowTest extends TestKit(ActorSystem("TrikloSystem")) with Matchers with FlatSpecLike {
def createGraph(withBroadcast: Boolean) = {
if (withBroadcast) Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
else Source.empty.to(Sink.ignore)
}
case object Bomb
class Supervisor( graph: RunnableGraph[NotUsed],
latch: CountDownLatch,
materializer: (ActorSystem) => ActorMaterializer) extends Actor {
var actorRef: Option[ActorRef] = None
override def preStart(): Unit = {
actorRef = Some(context.actorOf(Props( new FlowActor(graph, latch, materializer))))
}
override def receive: Receive = {
case Bomb => actorRef.map( _ ! Bomb )
}
}
class FlowActor(
graph: RunnableGraph[NotUsed],
latch: CountDownLatch,
materializer: (ActorSystem) => ActorMaterializer
) extends Actor {
override def preStart(): Unit = {
graph.run()(materializer(context.system))
latch.countDown()
}
override def receive: Receive = {
case Bomb =>
throw new RuntimeException
}
}
"Without an actor" should "be able to materialize twice" in {
val graph = Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
val materializer1 = ActorMaterializer()(system)
val materializer2 = ActorMaterializer()(system)
graph.run()(materializer1)
graph.run()(materializer2) // Pass
}
"With a the same materializer" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = true)
val latch = new CountDownLatch(2)
val materializer = ActorMaterializer()(system)
val actorRef = TestActorRef(new Supervisor(graph, latch, _ => materializer))
verify(actorRef, latch) should be(true) // Pass
}
"With a new materializer but no broadcast" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = false)
val latch = new CountDownLatch(2)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef = TestActorRef(new Supervisor(graph, latch, materializer))
verify(actorRef, latch) should be(true) // Pass
}
"With a new materializer and a broadcast" should "be able to materialize twice" in {
val graph = createGraph(withBroadcast = true)
val latch = new CountDownLatch(2)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef = TestActorRef(new Supervisor(graph, latch, materializer))
verify(actorRef, latch) should be(true) // Fail
}
def verify(actorRef: TestActorRef[_], latch: CountDownLatch): Boolean = {
actorRef.start()
actorRef ! Bomb
latch.await(25, TimeUnit.SECONDS)
}
}
There are some mis-uses of the Akka TestKit in this test.
TestActorRef is a very special test construct in that it will execute on the calling thread (CallingThreadDispatcher), to allow for easy synchronous unit testing. Using CountDownLatch in a synchronous test is weird since any action is on the same thread so there is no need for inter-thread communication.
When you create an instance of TestActorRef it is started in that same call (you can see this by for example throwing an exception from the constructor or preStart and see it end up in your test case).
Calling start on the ActorRef is definitely not something you should do, TestActorRefs special nature gives you access to it, but you are essentially calling start on an empty shell actor, and not the actor you think you are interacting with (and if it was that actor it would stil be wrong to ever call start() on it).
A proper (but not very useful since there is no a problem materializing a graph twice regardless of context or materializer) test of what you intend to repeat test would be without the latch and look something like this:
class FlowActor(graph: RunnableGraph[NotUsed], materializer: (ActorSystem) => ActorMaterializer) extends Actor {
override def preStart(): Unit = {
graph.run()(materializer(context.system))
}
override def receive: Receive = Actor.emptyBehavior
}
"With a new materializer and a broadcast" should "be able to materialize twice" in {
val graph = Source.empty.alsoTo(Sink.ignore).to(Sink.ignore)
def materializer(system: ActorSystem) = ActorMaterializer()(system)
val actorRef1 = TestActorRef(new FlowActor(graph, materializer))
val actorRef2 = TestActorRef(new FlowActor(graph, materializer))
// we'd get an exception here if it was not possible to materialize
// since pre-start is run on the calling thread - the same thread
// that is executing the test case
}
I'd just let the specific weirdnesses of this go instead of digging deeper into the magic in TestActorRef, it will be hard earned insights and they will not be applicable in many cases but this specific one.

a scala remote actor exception

i with a scala code like this for echo service.
import scala.actors.Actor
import scala.actors.Actor._
import scala.actors.remote.RemoteActor._
class Echo extends Actor {
def act() {
alive(9010)
register('myName, self)
loop {
react {
case msg => println(msg)
}
}
}
}
object EchoServer {
def main(args: Array[String]): unit = {
val echo = new Echo
echo.start
println("Echo server started")
}
}
EchoServer.main(null)
but there has some exception.
java.lang.NoClassDefFoundError: Main$$anon$1$Echo$$anonfun$act$1
at Main$$anon$1$Echo.act((virtual file):16)
at scala.actors.Reaction.run(Reaction.scala:76)
at scala.actors.Actor$$anonfun$start$1.apply(Actor.scala:785)
at scala.actors.Actor$$anonfun$start$1.apply(Actor.scala:783)
at scala.actors.FJTaskScheduler2$$anon$1.run(FJTaskScheduler2.scala:160)
at scala.actors.FJTask$Wrap.run(Unknown Source)
at scala.actors.FJTaskRunner.scanWhileIdling(Unknown Source)
at scala.actors.FJTaskRunner.run(Unknown Source)
Caused by: java.lang.ClassNotFoundException: Main$$anon$1$Echo$$anonfun$act$1
at java.net.URLClassLoader$1.run(URLClassLoader.java:200)
at java.security.AccessController.doPrivileged(Native Method)
at java.net.URLClassLoader.findClass(URLClassLoader.java:188)
at java.lang.ClassLoader.loadClass(ClassLoader.java:307)
at java.lang.ClassLoader.loadClass(ClassLoader.java:252)
at java.lang.ClassLoader.loadClassInternal(ClassLoader.java:320)
... 8 more
i don't konw how can cause it.
by the way .my scala version is 2.7.5
ClassNotFoundException indicates that something was probably not compiled, that should have been compiled. How did you compile it? Manually using scalac?
Try the following rm *.class scalac *.scala scala EchoServer.
The following works:
EchoServer.scala
import scala.actors.Actor
import scala.actors.Actor._
import scala.actors.remote.RemoteActor._
class Echo extends Actor {
def act() {
alive(9010)
register('myName, self)
loop {
react {
case msg => println(msg)
}
}
}
}
object EchoServer {
def main(args: Array[String]): unit = {
val echo = new Echo
echo.start
println("Echo server started")
}
}
Client.scala
import scala.actors.Actor._
import scala.actors.remote.Node
import scala.actors.remote.RemoteActor._
object Client extends Application {
override def main(args: Array[String]) {
if (args.length < 1) {
println("Usage: scala Client [msg]")
return
}
actor {
val remoteActor = select(Node("localhost", 9010), 'myName)
remoteActor !? args(0) match {
case msg => println( "Server's response is [" + msg + "]" )
}
}
}
}
Command line:
rm *.class && scalac *.scala && scala EchoServer
And in other terminal:
scala Client hello
You need to set the classloader on the remote actors.
Before the act() method, add the line:
RemoteActor.classLoader = getClass.getClassLoader
Why is setting the classloader necessary with Scala RemoteActors?