akka {
actor {
provider = "akka.cluster.ClusterActorRefProvider"
}
remote {
enabled-transports = ["akka.remote.netty.tcp"]
netty.tcp {
hostname = "127.0.0.1"
port = 0
}
}
}
akka.cluster {
seed-nodes = [
"akka.tcp://MyCluster#127.0.0.1:2551",
"akka.tcp://MyCluster#127.0.0.1:2552"
]
}
object AndromedaApiClusterActivator extends App {
val system = ActorSystem("MyCluster", ConfigFactory.load())
val clusterController = system.actorOf(Props[MyCluster], name = "MyCluster")
}
class MyCluster extends Actor {
val log = Logging(context.system, this)
val cluster = Cluster(context.system)
override def preStart() {
cluster.subscribe(self, classOf[MemberEvent], classOf[UnreachableMember])
}
override def postStop() {
cluster.unsubscribe(self)
}
override def receive = {
case x: MemberEvent => log.info("MemberEvent: {}", x)
case x: UnreachableMember => log.info("UnreachableMember {}: ", x)
}
}
When I run it I get:
Association with remote system [akka.tcp://MyCluster#127.0.0.1:2552] has failed, address is now gated for [5000] ms. Reason: [Association failed with [akka.tcp://MyCluster#127.0.0.1:2552]] Caused by: [Connection refused: /127.0.0.1:2552]
Association with remote system [akka.tcp://MyCluster#127.0.0.1:2551] has failed, address is now gated for [5000] ms. Reason: [Association failed with [akka.tcp://MyCluster#127.0.0.1:2551]] Caused by: [Connection refused: /127.0.0.1:2551]
I cannot find an explanation. Any help?
You should start 2 nodes first and then connect to them. To illustrate it, I will create both systems inside one App, but you can run 2 instances of the App with different configs/ports specified in command line.
object Main extends App {
val system1 = ActorSystem("MyCluster1", ConfigFactory.load("node1.conf"))
val system2 = ActorSystem("MyCluster2", ConfigFactory.load("node2.conf"))
val clusterController = system1.actorOf(Props[MyCluster], name = "MyCluster1")
}
application.conf:
akka {
actor {
provider = "akka.cluster.ClusterActorRefProvider"
}
remote {
enabled-transports = ["akka.remote.netty.tcp"]
netty.tcp {
hostname = "127.0.0.1"
port = 2552
}
}
}
akka.cluster {
seed-nodes = [
"akka.tcp://MyCluster1#127.0.0.1:2552",
"akka.tcp://MyCluster2#127.0.0.1:2553"
]
}
To start other nodes, I suggest to specify different configs with node1.conf:
include "application"
akka.remote.netty.tcp.port = 2552
node2.conf:
include "application"
akka.remote.netty.tcp.port = 2553
Related
I am using jetty websocket client to connect to websocket server. On every successful connection it is creating around 35 file descriptors, out of 35, 32 are PIPEs. On termination these PIPEs are not getting closed.
class AMPWebSocketClient(remoteHost: String, remoteHandler: String) {
val logger: Logger = LoggerFactory.getLogger(this.getClass)
//1. Create Context
private var masterAMPClient = new WebSocketClientContext()
.setTrustStore(C.GLASS_KEY_STORE)
.setPassword(C.DEFAULT_KEYSTORE_PASSWORD)
.buildSecureClient
private var client = createSession(remoteHost, remoteHandler)
private var session: Session = _
private var socket: MasterAMPFeeder = _
private def createSession(remoteHost: String, remoteHandler: String): MasterAMPFeeder = {
//2. Connect to WebSocket Server
var future = ""
var success = false
var generatedURI: String = "wss://" + remoteHost + "/decoder/" + remoteHandler
socket = new MasterAMPFeeder()
do {
try {
masterAMPClient.getPolicy.setIdleTimeout(TimeUnit.DAYS.toMillis(1))
masterAMPClient.start()
val destURI: URI = new URI(generatedURI)
val request: ClientUpgradeRequest = new ClientUpgradeRequest()
val future = masterAMPClient.connect(socket, destURI, request)
logger.debug("Before connection")
session = future.get(C.ONE_SECOND * 15, TimeUnit.SECONDS)
session.setIdleTimeout(TimeUnit.DAYS.toMillis(1))
logger.debug("Waiting for GlassDecoder to accept the connection")
socket.getLatch().await()
logger.debug("Connected to GlassDecoder")
} catch {
case e: Exception => {
logger.debug("Not able to connect to WebSocket: {}", e.getStackTrace)
logger.warn("Retrying to connect to: {}", generatedURI)
Thread.sleep(10 * C.ONE_MILLISECOND)
}
}
} while(socket.isNotConnected)
return socket
}
This is how the actor section in my application.conf looks like -
actor {
provider = "akka.cluster.ClusterActorRefProvider"
unstarted-push-timeout = 100s
default-mailbox {
mailbox-type = "akka.dispatch.SingleConsumerOnlyUnboundedMailbox"
mailbox-push-timeout-time = 2s
}
default-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 16
parallelism-factor = 4.0
parallelism-max = 64
}
throughput = 1
}
job-manager-dispatcher {
type = Dispatcher
executor = "fork-join-executor"
fork-join-executor {
parallelism-min = 16
parallelism-factor = 4.0
parallelism-max = 64
}
throughput = 1
}
}
remote {
log-remote-lifecycle-events = on
netty.tcp {
hostname = "0.0.0.0"
port = 2557
}
}
extensions = [
"akka.contrib.pattern.DistributedPubSubExtension"
]
cluster {
seed-nodes = [
"akka.tcp://dispatcher#0.0.0.0:2557"
]
auto-down-unreachable-after = 30s
}
}
akka.contrib.cluster.pub-sub {
name = dispatcherPubSubMediator
role = ""
routing-logic = round-robin
gossip-interval = 1s
removed-time-to-live = 120s
}
This is how I create actors -
val aRef = instances match {
case 1 =>
system.actorOf(Props[T].withDispatcher(dispatcher), name)
case _ =>
system.actorOf(
ClusterRouterPool(AdaptiveLoadBalancingPool(
SystemLoadAverageMetricsSelector), ClusterRouterPoolSettings(
totalInstances = instances * 64, maxInstancesPerNode = instances,
allowLocalRoutees = isLocal, useRole = None)
).props(Props[T]).withDispatcher(dispatcher), name)
}
ClusterReceptionistExtension(system).registerService(aRef)
The single instance (local) creation works fine. But the cluster pool instantiation is not working (no exception/error, but constructor/preStart etc are not called.
Any help appreciated.
I setup a db in MongoDB called spotlight. To connect to that database, I use the following environment setup in DataSource.groovy:
grails {
mongo {
host = "localhost"
port = 27017
}
}
environments {
development { // MongoDB instance running without --auth flag.
grails {
mongo {
databaseName = "spotlight"
}
}
}
test {
grails { // MongoDB instance running without --auth flag.
mongo {
databaseName = "spotlight"
}
}
}
}
This is my unit test class:
#TestMixin(MongoDbTestMixin)
class BiographySpec extends Specification {
def setup() {
}
def cleanup() {
}
void "given a bio then find() returns that bio object"() {
given:
mongoDomain([Biography])
Biography bio = new Biography()
def nameOf1stImage = "firstImage.png"
def nameOf2ndImage = "secondImage.png"
def nameInBio = "star"
def descriptionOfBio = "a description"
def id = 0;
bio.firstImage = nameOf1stImage
bio.secondImage = nameOf2ndImage
bio.name = nameInBio
bio.description = descriptionOfBio
bio.images = [nameOf1stImage,nameOf2ndImage]
when:
bio.save(flush:true);
id = bio.id
then:
Biography bioFromDb = bio.get(id)
bioFromDb.firstImage == nameOf1stImage
bioFromDb.secondImage == nameOf2ndImage
bioFromDb.name == nameInBio
bioFromDb.description == descriptionOfBio
}
}
I ran grails test-app, and Grails created a biography document in test db, not spotlight db. Is there anything wrong with the way I configure environments setttings in DataSource.groovy ?
When I'm using Spray.io to develop a RESTful API, how should I structure my application?
I already saw this answer on how to split a Spray application, but I'm not satisfied with it, since it doesn't seem to use the "one actor per request" approach. Can I forward requests from the root actor to other actors in my application based on paths and, inside these actors, define the related routes?
Thanks
You can certainly forward requests from one actor to another, based on paths or whatever else. Check out my example project (which is a fork of a fork of an example project):
https://github.com/gangstead/spray-moviedb/blob/master/src/main/scala/com/example/routes/ApiRouter.scala
Relavent code from the main actor that receives all requests and routes them to other actors that handle each service:
def receive = runRoute {
compressResponseIfRequested(){
alwaysCache(simpleCache) {
pathPrefix("movies") { ctx => asb.moviesRoute ! ctx } ~
pathPrefix("people") { ctx => asb.peopleRoute ! ctx }
} ~
pathPrefix("login") { ctx => asb.loginRoute ! ctx } ~
pathPrefix("account") { ctx => asb.accountRoute ! ctx }
}
}
And for example the movies route:
def receive = runRoute {
get {
parameters('query, 'page ? 1).as(TitleSearchQuery) { query =>
val titleSearchResults = ms.getTitleSearchResults(query)
complete(titleSearchResults)
}~
path(LongNumber) { movieId =>
val movie = ms.getMovie(movieId)
complete(movie)
}~
path(LongNumber / "cast") { movieId =>
val movieCast = ms.getMovieCast(movieId)
complete(movieCast)
}~
path(LongNumber / "trailers") { movieId =>
val trailers = ms.getTrailers(movieId)
complete(trailers)
}
}
}
I was struggling a lot with creating first full REST project. The examples I've found was on hello world level... I've read few blogs, few comments and I decided to create example project. It is based on scala/akka/spray/mysql
It full working example with websocket to notify clients that data was changed etc. You can check it out on https://github.com/vixxx123/scalasprayslickexample
Here is sample code of routing from that project:
val personCreateHandler = actorRefFactory.actorOf(RoundRobinPool(2).props(Props[CreateActor]), s"${TableName}CreateRouter")
val personPutHandler = actorRefFactory.actorOf(RoundRobinPool(5).props(Props[UpdateActor]), s"${TableName}PutRouter")
val personGetHandler = actorRefFactory.actorOf(RoundRobinPool(20).props(Props[GetActor]), s"${TableName}GetRouter")
val personDeleteHandler = actorRefFactory.actorOf(RoundRobinPool(2).props(Props[DeleteActor]), s"${TableName}DeleteRouter")
val userRoute =
pathPrefix("person") {
pathEnd {
get {
ctx => personGetHandler ! GetMessage(ctx, None)
} ~
post {
entity(as[Person]) {
entity =>
ctx => personCreateHandler ! CreateMessage(ctx, entity)
}
}
} ~
pathPrefix (IntNumber){
entityId => {
pathEnd {
get {
ctx => personGetHandler ! GetMessage(ctx, Some(entityId))
} ~ put {
entity(as[Person]) { entity =>
ctx => personPutHandler ! PutMessage(ctx, entity.copy(id = Some(entityId)))
}
} ~ delete {
ctx => personDeleteHandler ! DeleteMessage(ctx, entityId)
} ~ patch {
ctx => personPutHandler ! PatchMessage(ctx, entityId)
}
}
}
}
}
And sample from create actor handler:
override def receive: Receive = {
case CreateMessage(ctx, person) =>
val localCtx = ctx
connectionPool withSession {
implicit session =>
try {
val resId = PersonsIdReturning += person
val addedPerson = person.copy(id = Some(resId.asInstanceOf[Int]))
localCtx.complete(addedPerson)
publishAll(CreatePublishMessage(TableName, localCtx.request.uri + "/" + addedPerson.id.get, addedPerson))
L.debug(s"Person create success")
} catch {
case e: Exception =>
L.error(s"Ups cannot create person: ${e.getMessage}", e)
localCtx.complete(e)
}
}
}
There are still two important things missing: oauth2 and push notifications to specific user/connection via websocket
I put together the code below; the intent was to have a non-blocking server accept a connection and then pass off this connection to an actor for further processing. This works the first time through, but on the subsequent request the program freezes at conServ ! servSoc.accept. Any ideas why this is happening?
import java.net._
import java.io._
import java.nio._
import java.nio.channels._
import java.util._
import scala.actors.Actor
import scala.actors.Actor._
def test() = {
var channel: ServerSocketChannel = null
val isa: InetSocketAddress = new InetSocketAddress(23)
val conServ = actor {
react {
case conn: Socket => {
try {
var pw: PrintWriter = new PrintWriter(conn.getOutputStream(), true)
pw.println("Current time: " + new Date)
pw.close
conn.close
} catch {
case ioe: IOException => println("IOException: " + ioe.getMessage)
case e: Exception => println("Exception: " + e.getMessage)
}
}
}
}
try {
channel = ServerSocketChannel.open
channel.configureBlocking(false)
channel.socket().bind(isa)
var selector: Selector = Selector.open
channel.register(selector, SelectionKey.OP_ACCEPT)
println("** Server ready for requests **")
while (true) {
if (selector.select > 0) {
var selKeys: Set[SelectionKey] = selector.selectedKeys
var selIt: Iterator[SelectionKey] = selKeys.iterator
while (selIt.hasNext) {
var key: SelectionKey = selIt.next.asInstanceOf[SelectionKey]
selIt.remove
if (key.isAcceptable) {
var ssc: ServerSocketChannel = key.channel.asInstanceOf[ServerSocketChannel]
var servSoc: ServerSocket = ssc.socket
try {
conServ ! servSoc.accept
} catch {
case ioe: IOException => println(ioe.printStackTrace)
}
}
}
} else {
continue
}
}
} catch {
case ioe: IOException => println("Could not listen to port 23. " + ioe.printStackTrace)
case e: Exception => println("Error: " + e.printStackTrace)
}
}
test
Enclose your react in a loop block like this:
val conServ = actor {
loop {
react {
// ...
}
}
}
What happens now, is that your actor is started, processes the first message and is not "reacting" again to process additional message from its queue.
See An actor's act method that uses loop.
This is what an actor do, treating one message at the time. What you want is a separate thread to handle each request. For this you can try using Futures.