Server code :
object EchoService {
def route: Route = path("ws-echo") {
get {
handleWebSocketMessages(flow)
}
} ~ path("send-client") {
get {
sourceQueue.map(q => {
println(s"Offering message from server")
q.offer(BinaryMessage(ByteString("ta ta")))
} )
complete("Sent from server successfully")
}
}
val (source, sourceQueue) = {
val p = Promise[SourceQueue[Message]]
val s = Source.queue[Message](100, OverflowStrategy.backpressure).mapMaterializedValue(m => {
p.trySuccess(m)
m
})
(s, p.future)
}
val flow =
Flow.fromSinkAndSourceMat(Sink.ignore, source)(Keep.right)
}
Client Code :
object Client extends App {
implicit val actorSystem = ActorSystem("akka-system")
implicit val flowMaterializer = ActorMaterializer()
val config = actorSystem.settings.config
val interface = config.getString("app.interface")
val port = config.getInt("app.port")
// print each incoming strict text message
val printSink: Sink[Message, Future[Done]] =
Sink.foreach {
case message: TextMessage.Strict =>
println(message.text)
case _ => println(s"received unknown message format")
}
val (source, sourceQueue) = {
val p = Promise[SourceQueue[Message]]
val s = Source.queue[Message](100, OverflowStrategy.backpressure).mapMaterializedValue(m => {
p.trySuccess(m)
m
})
(s, p.future)
}
val flow =
Flow.fromSinkAndSourceMat(printSink, source)(Keep.right)
val (upgradeResponse, sourceClosed) =
Http().singleWebSocketRequest(WebSocketRequest("ws://localhost:8080/ws-echo"), flow)
val connected = upgradeResponse.map { upgrade =>
// just like a regular http request we can get 404 NotFound,
// with a response body, that will be available from upgrade.response
if (upgrade.response.status == StatusCodes.SwitchingProtocols || upgrade.response.status == StatusCodes.OK ) {
Done
} else {
throw new RuntimeException(s"Connection failed: ${upgrade.response.status}")
}
}
connected.onComplete(println)
}
when i hit http://localhost:8080/send-client i see messages coming to client but after a while if try to send to client again i don't see any messages on client side :s . I also tried source.concatMat(Source.maybe)(Keep.right) but no luck :(
Edit : I tested with js client, somehow connection/flow closed on server end , is there anyway to prevent this ? and how can i listen to this event while using akka-http websocket client :s
Hi,
The reason why it does not keep connected is because by default all
HTTP connections have idle-timeout on by default to keep the system
from leaking connections if clients disappear without any signal.
One way to overcome this limitation (and actually my recommended
approach) is to inject keep-alive messages on the client side
(messages that the server otherwise ignore, but informs the underlying
HTTP server that the connection is still live).
You can override the idle-timeouts in the HTTP server configuration to
a larger value but I don't recommend that.
If you are using stream based clients, injecting heartbeats when
necessary is as simple as calling keepAlive and providing it a time
interval and a factory for the message you want to inject:
http://doc.akka.io/api/akka/2.4.7/index.html#akka.stream.scaladsl.Flow#keepAliveU>:Out:FlowOps.this.Repr[U]
That combinator will make sure that no periods more than T will be
silent as it will inject elements to keep this contract if necessary
(and will not inject anything if there is enough background traffic)
-Endre
thank you Endre :) , working snippet ..
// on client side
val (source, sourceQueue) = {
val p = Promise[SourceQueue[Message]]
val s = Source.queue[Message](Int.MaxValue, OverflowStrategy.backpressure).mapMaterializedValue(m => {
p.trySuccess(m)
m
}).keepAlive(FiniteDuration(1, TimeUnit.SECONDS), () => TextMessage.Strict("Heart Beat"))
(s, p.future)
}
Related
i am using akka http one of my routes is interacting with an external service via akka http client side api and the httpRequest is continuously running i am unable to make it work
here is my use case -> i am interacting with a janus server and doing a long poll get request as soon as the server responded back with an 'keepAlive' or an "event" i am requesting again and so on the server keeps on responding
all of this is happening inside an actor and i have an akka htttp route which is intiailising the first request
here is my code
final case class CreateLongPollRequest(sessionId:BigInt)
class LongPollRequestActor (config: Config) extends Actor {
def receive = {
case CreateLongPollRequest(sessionId) =>
senderRef = Some(sender())
val uri: String = "localhost:8080/" + sessionId
val request = HttpRequest(HttpMethods.GET, uri)
val responseFuture = Http(context.system).singleRequest(request)
responseFuture
.onComplete {
case Success(res)
Unmarshal(res.entity.toStrict(40 seconds)).value.map { result =>
val responseStr = result.data.utf8String
log.info("Actor LongPollRequestActor: long poll responseStr {}",responseStr)
senderRef match {
case Some(ref) =>
ref ! responseStr
case None => log.info("Actor LongPollRequestActor: sender ref is null")
}
}
case Failure(e) =>log.error(e)
}
}
}
final case class JanusLongPollRequest(sessionId: BigInt)
class JanusManagerActor(childMaker: List[ActorRefFactory => ActorRef]) extends Actor {
var senderRef: Option[akka.actor.ActorRef] = None
val longPollRequestActor = childMaker(1)(context)
def receive: PartialFunction[Any, Unit] = {
case JanusLongPollRequest(sessionId)=>
senderRef = Some(sender)
keepAlive(sessionId,senderRef)
}
def keepAlive(sessionId:BigInt,sender__Ref: Option[ActorRef]):Unit= {
val senderRef = sender__Ref
val future = ask(longPollRequestActor, CreateLongPollRequest(sessionId)).mapTo[String] //.pipeTo(sender)
if (janus.equals("keepalive")) {
val janusRequestResponse = Future {
JanusSessionRequestResponse(janus = janus)
}
senderRef match {
case Some(sender_ref) =>
janusRequestResponse.pipeTo(sender_ref)
}
keepAlive(sessionId,senderRef)
}
else if (janus.equals("event")) {
//some fetching of values from server
val janusLongPollRequestResponse = Future {
JanusLongPollRequestResponse(janus = janus,sender=sender, transaction=transaction,pluginData=Some(pluginData))
}
senderRef match {
case Some(sender_ref) =>
janusLongPollRequestResponse.pipeTo(sender_ref)
}
keepAlive(sessionId,senderRef)
}
def createLongPollRequest: server.Route =
path("create-long-poll-request") {
post {
entity(as[JsValue]) {
json =>
val sessionID = json.asJsObject.fields("sessionID").convertTo[String]
val future = ask(janusManagerActor, JanusLongPollRequest(sessionID)).mapTo[JanusSessionRequestResponse]
onComplete(future) {
case Success(sessionDetails) =>
log.info("janus long poll request created")
val jsonResponse = JsObject("longpollDetails" -> sessionDetails.toJson)
complete(OK, routeResponseMessage.getResponse(StatusCodes.OK.intValue, ServerMessages.JANUS_SESSION_CREATED, jsonResponse))
case Failure(ex) =>
failWith(ex)
}
}
}
now the above route createLongPollRequest worked fine for the first time I can see the response and for the next attempts i am getting a dead letter as follows
[INFO] [akkaDeadLetter][07/30/2021 12:13:53.587] [demo-Janus-ActorSystem-akka.actor.default-dispatcher-6] [akka://demo-Janus-ActorSystem/deadLetters] Message [com.ifkaar.lufz.janus.models.janus.JanusSessionRequestResponse] from Actor[akka://demo-Janus-ActorSystem/user/ActorManager/ManagerActor#-721316187] to Actor[akka://demo-Janus-ActorSystem/deadLetters] was not delivered. [4] dead letters encountered. If this is not an expected behavior then Actor[akka://demo-Janus-ActorSystem/deadLetters] may have terminated unexpectedly. This logging can be turned off or adjusted with configuration settings 'akka.log-dead-letters' and 'akka.log-dead-letters-during-shutdown'.
probably this is causing the issue after the first iteration
responseFuture.pipeTo(sender()
IS there a way where i can get a response in my akkahttp route when ever my backend server responds?
The Actor should only reply once to the CreateLongPollRequest and it should only do this when it has valid data. If the poll fails the Actor should just issue another poll request.
It is difficult to give more help without the details of the Actor.
I am a bit lost using the akka-http libraries to create a server. The communication I need to establish is as following:
There is one server and n clients (n < 5)
Sometimes the clients send a command to the server, the server evaluates/delegates the command and answers the client
There are constant broadcast messages from the server to all clients
Given that:
my server needs to manage multiple 'sessions' that are connected via a websocket
Here is my websocket endpoint:
path("socket") {
handleWebSocketMessages(listen())
}
And here it the listen() method:
// stores offers to broadcast to all clients
private var offers: List[TextMessage => Unit] = List()
def listen(): Flow[Message, Message, NotUsed] = {
val inbound: Sink[Message, Any] = Sink.foreach(m => /* handle the message */) // (*)
val outbound: Source[Message, SourceQueueWithComplete[Message]] =
Source.queue[Message](16, OverflowStrategy.fail)
Flow.fromSinkAndSourceMat(inbound, outbound)((_, outboundMat) => {
offers ::= outboundMat.offer
NotUsed
})
}
def sendText(text: String): Unit = {
for (connection <- offers) connection(TextMessage.Strict(text))
}
With this approach I can register multiple clients and answer them using the sendText(text: String) method. But, there is one big problem: How do I answer only a specific client after I evaluated it's command. (see (*))
[Another thing that's bugging me is that offers is a var, which seems wrong when programming in a purely FP way, but I can accept that if the rest is working]
Edit:
To elaborate I basically need to be able to implement a method looking like this:
def onMessageReceived(m: Message, answer: TextMessage => Unit): Unit = {
val response: TextMessage = handleMessage(m)
answer(response)
}
But I cannot figure out on where to call this method in my websocket Flow.
I am not really sure if that is the way to go, but this seems to be working:
var actors: List[ActorRef] = Nil
private def wsFlow(implicit materializer: ActorMaterializer): Flow[ws.Message, ws.Message, NotUsed] = {
val (actor, source) = Source.actorRef[String](10, akka.stream.OverflowStrategy.dropTail)
.toMat(BroadcastHub.sink[String])(Keep.both)
.run()
actors = actor :: actors
val wsHandler: Flow[ws.Message, ws.Message, NotUsed] =
Flow[ws.Message]
.merge(source)
.map {
case TextMessage.Strict(tm) => handleMessage(actor, tm)
case _ => TextMessage.Strict("Ignored message!")
}
wsHandler
}
def broadcast(msg: String): Unit = {
actors.foreach(_ ! TextMessage.Strict(msg))
}
I started playing around scala and came to this particular boilerplate of web socket chatroom in scala.
They use MessageHub.source() and BroadcastHub.sink() as their Source and Sink for sending the messages to all connected clients.
The example is working fine for exchanging messages as it is.
private val (chatSink, chatSource) = {
// Don't log MergeHub$ProducerFailed as error if the client disconnects.
// recoverWithRetries -1 is essentially "recoverWith"
val source = MergeHub.source[WSMessage]
.log("source")
.recoverWithRetries(-1, { case _: Exception ⇒ Source.empty })
val sink = BroadcastHub.sink[WSMessage]
source.toMat(sink)(Keep.both).run()
}
private val userFlow: Flow[WSMessage, WSMessage, _] = {
Flow.fromSinkAndSource(chatSink, chatSource)
}
def chat(): WebSocket = {
WebSocket.acceptOrResult[WSMessage, WSMessage] {
case rh if sameOriginCheck(rh) =>
Future.successful(userFlow).map { flow =>
Right(flow)
}.recover {
case e: Exception =>
val msg = "Cannot create websocket"
logger.error(msg, e)
val result = InternalServerError(msg)
Left(result)
}
case rejected =>
logger.error(s"Request ${rejected} failed same origin check")
Future.successful {
Left(Forbidden("forbidden"))
}
}
}
I want to store the messages that are exchanged in the chatroom in a DB.
I tried adding map and fold functions to source and sink to get hold of the messages that are sent but I wasn't able to.
I tried adding a Flow stage between MergeHub and BroadcastHub like below
val flow = Flow[WSMessage].map(element => println(s"Message: $element"))
source.via(flow).toMat(sink)(Keep.both).run()
But it throws a compilation error that cannot reference toMat with such signature.
Can someone help or point me how can I get hold of messages that are sent and store them in DB.
Link for full template:
https://github.com/playframework/play-scala-chatroom-example
Let's look at your flow:
val flow = Flow[WSMessage].map(element => println(s"Message: $element"))
It takes elements of type WSMessage, and returns nothing (Unit). Here it is again with the correct type:
val flow: Flow[Unit] = Flow[WSMessage].map(element => println(s"Message: $element"))
This will clearly not work as the sink expects WSMessage and not Unit.
Here's how you can fix the above problem:
val flow = Flow[WSMessage].map { element =>
println(s"Message: $element")
element
}
Not that for persisting messages in the database, you will most likely want to use an async stage, roughly:
val flow = Flow[WSMessage].mapAsync(parallelism) { element =>
println(s"Message: $element")
// assuming DB.write() returns a Future[Unit]
DB.write(element).map(_ => element)
}
How to keep the client (web) connection in a memory variable and then send outgoing messages to the client (web) when needed?
I already have some simple code for pushing back message to the client once the server receives messages from the client. How to modify the code below for the outgoing messaging part?
implicit val actorSystem = ActorSystem("akka-system")
implicit val flowMaterializer = ActorMaterializer()
implicit val executionContext = actorSystem.dispatcher
val ip = "127.0.0.1"
val port = 32000
val route = get {
pathEndOrSingleSlash {
complete("Welcome to websocket server")
}
} ~
path("hello") {
get {
handleWebSocketMessages(echoService)
}
}
def sendMessageToClient(msg : String) {
// *** How to implement this?
// *** How to save the client connection when it is first connected?
// Then how to send message to this connection?
}
val echoService = Flow[Message].collect {
// *** Here the server push back messages when receiving msg from client
case tm : TextMessage => TextMessage(Source.single("Hello ") ++ tm.textStream)
case _ => TextMessage("Message type unsupported")
}
val binding = Http().bindAndHandle(route, ip, port)
You can look into pipelining the sink flow via .map call. Inside the .map call you can capture the value and then return the same message. For example:
Flow[Message].collect {
case tm : TextMessage =>
TextMessage(Source.single("Hello ") ++ tm.textStream.via(
Flow[String].map((message) => {println(message) /* capture value here*/; message})))
case _ => TextMessage("Message type unsupported")
}
Now, if your intention is to process those values and send out values later, what you want is not a single source-to-sink flow, but two separate streams for sink and source, for which you can use Flow.fromSinkAndSource e.g.
Flow.fromSinkAndSource[Message, Message](
Flow[Message].collect { /* capture values */},
// Or send stream to other sink for more processing
source
)
In all likelihood, this source will be either constructed out of graph DSL, a hand-rolled actor, or you can look into utilizing reusable helpers such as MergeHub.
I'm trying to implement a reverse HTTP proxy with Spray/Akka, but runs into trouble. I found that under some circumstances, my proxy server will keep receivving data from upstream server even after the client has disconnected.
Here's how I implement my Spray proxy directive (just a little modification to bthuillier's implementation):
trait ProxyDirectives {
private def sending(f: RequestContext ⇒ HttpRequest)(implicit system: ActorSystem): Route = {
val transport = IO(Http)(system)
ctx ⇒ transport.tell(f(ctx), ctx.responder)
}
/**
* Re-shape the original request, to match the destination server.
*/
private def reShapeRequest(req: HttpRequest, uri: Uri): HttpRequest = {
req.copy(
uri = uri,
headers = req.headers.map {
case x: HttpHeaders.Host => HttpHeaders.Host(uri.authority.host.address, uri.authority.port)
case x => x
}
)
}
/**
* proxy the request to the specified uri
*
*/
def proxyTo(uri: Uri)(implicit system: ActorSystem): Route = {
sending(ctx => reShapeRequest(ctx.request, uri))
}
}
This reverse proxy will work well if I put one proxy layer between the client and the server (that is, client <-> proxyTo <-> server), but it will have trouble if I put two layers between the client and the server. For example, if I've got the following simple Python HTTP server:
import socket
from threading import Thread, Semaphore
import time
from BaseHTTPServer import BaseHTTPRequestHandler, HTTPServer
from SocketServer import ThreadingMixIn
class MyHTTPHandler(BaseHTTPRequestHandler):
protocol_version = 'HTTP/1.1'
def do_GET(self):
self.send_response(200)
self.send_header('Transfer-Encoding', 'chunked')
self.end_headers()
for i in range(100):
data = ('%s\n' % i).encode('utf-8')
self.wfile.write(hex(len(data))[2:].encode('utf-8'))
self.wfile.write(b'\r\n')
self.wfile.write(data)
self.wfile.write(b'\r\n')
time.sleep(1)
self.wfile.write(b'0\r\n\r\n')
class MyServer(ThreadingMixIn, HTTPServer):
def server_bind(self):
HTTPServer.server_bind(self)
self.socket.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
def server_close(self):
HTTPServer.server_close(self)
if __name__ == '__main__':
server = MyServer(('127.0.0.1', 8080), MyHTTPHandler)
server.serve_forever()
Which basically does nothing but open a chunked response (for long-term running, so that we can exam the issues). And if I chain two layers of proxies in the following way:
class TestActor(val target: String)(implicit val system: ActorSystem) extends Actor
with HttpService
with ProxyDirectives
{
// we use the enclosing ActorContext's or ActorSystem's dispatcher for our Futures and Scheduler
implicit private def executionContext = actorRefFactory.dispatcher
// the HttpService trait defines only one abstract member, which
// connects the services environment to the enclosing actor or test
def actorRefFactory = context
val serviceRoute: Route = {
get {
proxyTo(target)
}
}
// runs the service routes.
def receive = runRoute(serviceRoute) orElse handleTimeouts
private def handleTimeouts: Receive = {
case Timedout(x: HttpRequest) =>
sender ! HttpResponse(StatusCodes.InternalServerError, "Request timed out.")
}
}
object DebugMain extends App {
val actorName = "TestActor"
implicit val system = ActorSystem(actorName)
// create and start our service actor
val service = system.actorOf(
Props { new TestActor("http://127.0.0.1:8080") },
s"${actorName}Service"
)
val service2 = system.actorOf(
Props { new TestActor("http://127.0.0.1:8081") },
s"${actorName}2Service"
)
IO(Http) ! Http.Bind(service, "::0", port = 8081)
IO(Http) ! Http.Bind(service2, "::0", port = 8082)
}
Use curl http://localhost:8082 to connect to the proxy server, and you will see the Akka system keeps transferring data even after curl has been killed (you may turn on the logs of DEBUG level to see details).
How can I deal with this problem? Thanks.
Well, it turns out to be a very complex problem, while my solution takes nearly 100 lines of codes.
Actually, the problem does not only exist when I'm stacking two layers of proxies. When I'm using one layer proxy, the problem does exist, but no log is printed, so I've not aware of this problem before.
The key problem is that while we use IO(Http) ! HttpRequest, it is actually a host-level API from spray-can. The connections of host-level APIs are managed by Spray HttpManager, which is not accessible by our code. Thus we can do nothing with that connection, unless we send a Http.CloseAll to IO(Http), which will cause all the upstream connections to be closed.
(If anyone knows how to get the connection from HttpManager, please tell me).
We have to use connection-level APIs from spray-can to serve for this situation. So I've come up with something like this:
/**
* Proxy to upstream server, where the server response may be a long connection.
*
* #param uri Target URI, where to proxy to.
* #param system Akka actor system.
*/
def proxyToLongConnection(uri: Uri)(implicit system: ActorSystem): Route = {
val io = IO(Http)(system)
ctx => {
val request = reShapeRequest(ctx.request, uri)
// We've successfully opened a connection to upstream server, now start proxying data.
actorRefFactory.actorOf {
Props {
new Actor with ActorLogging {
private var upstream: ActorRef = null
private val upstreamClosed = new AtomicBoolean(false)
private val clientClosed = new AtomicBoolean(false)
private val contextStopped = new AtomicBoolean(false)
// Connect to the upstream server.
{
implicit val timeout = Timeout(FiniteDuration(10, TimeUnit.SECONDS))
io ! Http.Connect(
request.uri.authority.host.toString,
request.uri.effectivePort,
sslEncryption = request.uri.scheme == "https"
)
context.become(connecting)
}
def connecting: Receive = {
case _: Http.Connected =>
upstream = sender()
upstream ! request
context.unbecome() // Restore the context to [[receive]]
case Http.CommandFailed(Http.Connect(address, _, _, _, _)) =>
log.warning("Could not connect to {}", address)
complete(StatusCodes.GatewayTimeout)(ctx)
closeBothSide()
case x: Http.ConnectionClosed =>
closeBothSide()
}
override def receive: Receive = {
case x: HttpResponse =>
ctx.responder ! x.withAck(ContinueSend(0))
case x: ChunkedMessageEnd =>
ctx.responder ! x.withAck(ContinueSend(0))
case x: ContinueSend =>
closeBothSide()
case x: Failure =>
closeBothSide()
case x: Http.ConnectionClosed =>
closeBothSide()
case x =>
// Proxy everything else from server to the client.
ctx.responder ! x
}
private def closeBothSide(): Unit = {
if (upstream != null) {
if (!upstreamClosed.getAndSet(true)) {
upstream ! Http.Close
}
}
if (!clientClosed.getAndSet(true)) {
ctx.responder ! Http.Close
}
if (!contextStopped.getAndSet(true)) {
context.stop(self)
}
}
} // new Actor
} // Props
} // actorOf
} // (ctx: RequestContext) => Unit
}
The code is little long, and I doubt there should be some more clean and simple implementation (actually I'm not familiar with Akka). Nevertheless, this code works, so I put this solution here. You may post your solution to this problem freely, if you've found some better one.