I'm new to Alpakka/Akka Streams and I'm trying to set up a stream where I stream data between two SFTP servers with my system in the middle, here's the code.
import akka.actor.ActorSystem
import akka.stream.ActorMaterializer
import akka.stream.alpakka.ftp.scaladsl.Sftp
import akka.stream.alpakka.ftp.{FtpCredentials, SftpSettings}
import akka.stream.scaladsl.Keep
import net.schmizz.sshj.{DefaultConfig, SSHClient}
import java.net.InetAddress
class StreamingSftpTransport {
implicit val system: ActorSystem = ActorSystem("dr-service")
implicit val materializer: ActorMaterializer = ActorMaterializer()
private val PORT = 22
private val USER = "testsftp"
private val CREDENTIALS = FtpCredentials.create(USER, "t3st123")
private val BASEPATH = s"/home/$USER"
private val FILE_NAME = "testfile"
// Set up the source system connection
private val SOURCE_HOSTNAME = "host1"
private val sourceSettings = SftpSettings.apply(host = InetAddress.getByName(SOURCE_HOSTNAME))
.withCredentials(CREDENTIALS)
.withPort(22)
private val sourceClient = new SSHClient(new DefaultConfig)
private val configuredSourceClient = Sftp(sourceClient)
// Set up the destination system connection
private val DEST_HOSTNAME = "host2"
private val destSettings = SftpSettings.apply(host = InetAddress.getByName(DEST_HOSTNAME))
.withCredentials(CREDENTIALS)
.withPort(22)
private val destClient = new SSHClient(new DefaultConfig)
private val configuredDestClient = Sftp(destClient)
/**
* Execute the stream from host1 to host2
*/
def doTransfer(): Unit = {
val source = configuredSourceClient.fromPath(s"$BASEPATH/$FILE_NAME", sourceSettings)
val sink = configuredDestClient.toPath(s"$BASEPATH/$FILE_NAME", destSettings)
val runnable = source.toMat(sink)(Keep.right).run()
}
}
I've called this from a unit test with new StreamingSftpTransport.doTransfer() but it never attempts to connect. What am I doing wrong?
As suggested by artur in the comment on my question, I wasn't blocking on the future so the JVM was exiting before the connection could be established.
Adding the following line allowed the connections to be established
Await.result(runnable, 180 seconds)
PS: Don't do this in production :)
Related
I've found several resources that provide details on configuring ssl-config options within the application.conf file and I've identified how to access these configurations using AkkaSSLConfig.get(). I've seen that an https context can be created using a AkkaSSLConfig object as a parameter to ConnectionContext.https().
Is it possible to use this for non-http servers? Is the context returned somehow specific to http? I'm trying to take advantage of ssl-config but it isn't clear to me that it provides any advantages for non-http servers and I don't see any convenient way of building a context from the ssl-config definition, in which case it seems I may as well define the context manually.
Lastly, any examples of building the context for non-http servers are difficult to find. It seems the process may be the same as for http servers, but I'm finding that examples often include the use of classes/methods that have 'http' in the name. If anyone knows of a good example I'd be very appreciative.
import java.io.{File, FileInputStream}
import java.security.{KeyStore, SecureRandom}
import akka.actor.ActorSystem
import akka.http.scaladsl.Http.ServerBinding
import akka.http.scaladsl.model.{HttpResponse, StatusCodes}
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.http.scaladsl.server.Directives.pathSingleSlash
import akka.http.scaladsl.{ConnectionContext, Http}
import akka.stream.{ActorMaterializer, TLSClientAuth}
import com.typesafe.sslconfig.akka.AkkaSSLConfig
import com.typesafe.sslconfig.ssl.{KeyManagerConfig, KeyManagerFactoryWrapper, KeyStoreConfig, SSLConfigFactory, SSLConfigSettings}
import javax.net.ssl.{SSLContext, TrustManagerFactory}
import scala.concurrent.{ExecutionContext, Future}
object Test extends App{
implicit val actorSystem: ActorSystem = ActorSystem("test")
implicit val materializer: ActorMaterializer = ActorMaterializer()
implicit val executionContext: ExecutionContext = actorSystem.dispatcher
val ksConfig: KeyStoreConfig = KeyStoreConfig.apply(data = None,
filePath = Some("/Users/mshaik/testApp/src/main/resources/keystore/localhost.p12")
).withPassword(Some("test"))
val kmConfig: KeyManagerConfig = KeyManagerConfig().withKeyStoreConfigs(List(ksConfig))
val sslConfigSettings: SSLConfigSettings = SSLConfigFactory.defaultConfig.withKeyManagerConfig(kmConfig)
val akkaSSLConfig: AkkaSSLConfig = AkkaSSLConfig.get(actorSystem).withSettings(sslConfigSettings)
val ks: KeyStore = KeyStore.getInstance("PKCS12")
ks.load(new FileInputStream(new File(ksConfig.filePath.get)), ksConfig.password.get.toCharArray)
val kmf: KeyManagerFactoryWrapper = akkaSSLConfig.buildKeyManagerFactory(sslConfigSettings)
kmf.init(ks, ksConfig.password.get.toCharArray)
val tmf: TrustManagerFactory = TrustManagerFactory.getInstance("SunX509")
tmf.init(ks)
val sslContext: SSLContext = SSLContext.getInstance("TLS")
sslContext.init(kmf.getKeyManagers, tmf.getTrustManagers, new SecureRandom)
val ctx: ConnectionContext = ConnectionContext.https(sslContext,
sslConfig = Some(akkaSSLConfig),
clientAuth = Some(TLSClientAuth.Want)
)
var bindingFuture: Future[ServerBinding] = _
Http().setDefaultServerHttpContext(ctx)
val route: Route = pathSingleSlash {
get {
complete(HttpResponse(StatusCodes.OK, entity = "Welcome to base path!"))
}
}
try{
bindingFuture = Http().bindAndHandle(route, "localhost", 8085, connectionContext = ctx)
println( s"Server online at https://localhost:8085/")
} catch {
case ex: Exception =>
println(this.getClass, ex.getMessage, ex)
materializer.shutdown()
actorSystem.terminate()
}
}
I believe the answer to my question is that there isn't much use in thoroughly configuring TLS options within ssl-config when creating a non-HTTP TLS connection.
Not a single example I found shows how to define keystore and truststore parameters within the config and then use those configurations to create the SSLContext object (all examples configure the keystore/truststore parameters manually, within the code). Ultimately I found it wasn't useful to use ssl-config for storing configurations. The only place I found it useful is to obtain the list of default ciphers and default protocols (and hence I still use it in my code).
For reference, below is what I ended up doing to configure the context and initial session structure and create the TCP server. This is very similar to other examples found within documentation as well as some responses here on SO. Some differences in this response: 1) This requires client certificates, 2) This is for a server (as opposed to a client), 3) This code shows how to use factory methods to create the TLS BidiFlow (note the Tcp().bindTls call) 4) This allows you to pass in the Flow that will handle the incoming communications.
object TcpServerBindTls extends StrictLogging {
def apply(hostInterface: String, tcpPort: Int, handler: Flow[ByteString, ByteString, NotUsed])(implicit system: ActorSystem, materializer: ActorMaterializer) = {
val sslContext = buildSSLContext
val firstSession = prepareFirstSession(sslContext)
val connections: Source[Tcp.IncomingConnection, Future[Tcp.ServerBinding]] = Tcp().bindTls(hostInterface, tcpPort, sslContext, firstSession)
connections runForeach { connection =>
logger.info(s"New connection: ${connection}")
connection.handleWith(handler)
}
}
def prepareFirstSession(sslContext: SSLContext)(implicit system: ActorSystem) = {
val sslConfig = AkkaSSLConfig.get(system);
val config = sslConfig.config;
val defaultParams = sslContext.getDefaultSSLParameters();
val defaultProtocols = defaultParams.getProtocols();
val defaultCiphers = defaultParams.getCipherSuites();
val clientAuth = TLSClientAuth.need
defaultParams.setProtocols(defaultProtocols)
defaultParams.setCipherSuites(defaultCiphers)
val firstSession = new TLSProtocol.NegotiateNewSession(None, None, None, None)
.withCipherSuites(defaultCiphers: _*)
.withProtocols(defaultProtocols: _*)
.withParameters(defaultParams)
firstSession
}
def buildSSLContext: SSLContext = {
val bufferedSource = io.Source.fromFile("/path/to/password/file")
val keyStorePassword = bufferedSource.getLines.mkString
bufferedSource.close
val keyStore = KeyStore.getInstance("PKCS12");
val keyStoreLocation = "/path/to/keystore/file/server.p12"
val keyStoreFIS = new FileInputStream(keyStoreLocation)
keyStore.load(keyStoreFIS, keyStorePassword.toCharArray())
val trustStore = KeyStore.getInstance("PKCS12");
val trustStoreLocation = settings.tls.keyStoreLocation;
val trustStoreFIS = new FileInputStream(keyStoreLocation)
trustStore.load(trustStoreFIS, keyStorePassword.toCharArray())
val kmf = KeyManagerFactory.getInstance("SunX509")
kmf.init(keyStore, keyStorePassword.toCharArray())
val tmf = TrustManagerFactory.getInstance("SunX509")
tmf.init(trustStore)
val sslContext = SSLContext.getInstance("TLS")
sslContext.init(kmf.getKeyManagers, tmf.getTrustManagers, new SecureRandom())
sslContext
}
}
I've a raspberry pi on my network with an LED strip attached to it.
My purpose is to create a jar file that will sit on the pi, monitor system events such as logins and load average, and drive the LED based on the those inputs.
To continuosly monitor the logged in users, I am trying to use akka actors. Using the examples provided here, this is what I've gotten so far :
import com.pi4j.io.gpio.GpioFactory
import com.pi4j.io.gpio.RaspiPin
import sys.process._
import akka.actor.{Actor, Props, ActorSystem}
import scala.concurrent.duration._
val who :String = "who".!!
class Blinker extends Actor {
private def gpio = GpioFactory.getInstance
private def led = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_08)
def receive = {
case x if who.contains("pi") => led.blink(250)
case x if who.contains("moocow") => println("falalalala")
}
val blinker = system.actorOf(Props(classOf[Blinker], this))
val cancellable = system.scheduler.schedule(
0 milliseconds,
50 milliseconds,
blinker,
who)
}
However, system is not recognised by my IDE (IntelliJ) and it says, cannot resolve symbol
I also have a main object like this:
object ledStrip {
def main(args: Array[String]): Unit = {
val blink = new Blinker
// blink.receive
}
}
In main, I'm not quite sure how to initialise the application.
Needless to say, this my first time writing a scala program
Help?
Edit::
Here is the updated program after incorporating what Michal has said
class Blinker extends Actor {
val who: String = "who".!!
private val gpio = GpioFactory.getInstance
private val led = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_08)
def receive = {
case x if who.contains("pi") => led.blink(250)
case x if who.contains("moocow") => println("falalalala")
}
val system = ActorSystem()
}
object ledStrip extends Blinker {
def main(args: Array[String]): Unit = {
val blinker = system.actorOf(Props(classOf[Blinker], this))
import system.dispatcher
val cancellable =
system.scheduler.schedule(
50 milliseconds,
5000 milliseconds,
blinker,
who)
}
}
This program compiles fine, but throws the following error upon execution:
Exception in thread "main" java.lang.ExceptionInInitializerError at
ledStrip.main(ledStrip.scala) Caused by:
akka.actor.ActorInitializationException: You cannot create an instance
of [ledStrip$] explicitly using the constructor (new). You have to use
one of the 'actorOf' factory methods to create a new actor. See the
documentation. at
akka.actor.ActorInitializationException$.apply(Actor.scala:194) at
akka.actor.Actor.$init$(Actor.scala:472) at
Blinker.(ledStrip.scala:15) at
ledStrip$.(ledStrip.scala:34) at
ledStrip$.(ledStrip.scala) ... 1 more
Edit 2
Code that compiles and runs (behaviour is still not as desired)< blink(1500) is never executed when user: pi logs out from the shell>
object sysUser {
val who: String = "who".!!
}
class Blinker extends Actor {
private val gpio = GpioFactory.getInstance
private val led = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_08)
def receive = {
case x if x.toString.contains("pi") => led.blink(50)
case x if x.toString.contains("moocow") => println("falalalala")
case _ => led.blink(1500)
}
}
object ledStrip {
def main(args: Array[String]): Unit = {
val system = ActorSystem()
val blinker = system.actorOf(Props[Blinker], "blinker")
import system.dispatcher
val cancellable =
system.scheduler.schedule(
50 milliseconds,
5000 milliseconds,
blinker,
sysUser.who)
}
}
Well, it looks like you haven't defined "system" anywhere. See this example for instance:
https://doc.akka.io/docs/akka/current/actors.html#here-is-another-example-that-you-can-edit-and-run-in-the-browser-
you'll find this line there:
val system = ActorSystem("pingpong")
That's what creates the ActorSystem and defines the val called "system", which you then call methods on.
In the main, I don't think you want to create another instance with "new Blinker", just use:
system.actorOf(Props[Blinker], "blinker")
(which you are already doing and putting it into the "blinker" val)
Seems it is just a akka usage issue. I don't know why you do something seems strange, so I change them for change1, change2, change3, FYI.
class Blinker extends Actor {
val who: String = "who".!!
private val gpio = GpioFactory.getInstance
private val led = gpio.provisionDigitalOutputPin(RaspiPin.GPIO_08)
def receive = {
case x if who.contains("pi") => led.blink(250)
case x if who.contains("moocow") => println("falalalala")
}
}
object ledStrip { // change1
def main(args: Array[String]): Unit = {
val system = ActorSystem() // change2
val blinker = system.actorOf(Props(classOf[Blinker])) // change3
import system.dispatcher
val cancellable =
system.scheduler.schedule(
50 milliseconds,
5000 milliseconds,
blinker,
who)
}
}
Imagine a scenario the connection IP becomes unreachable. In this case, QuickFIX/J will try to reconnect automagically every 30s or so, as configured by parameter ReconnectInterval. How do I avoid such behavior?
Your application class should extend ApplicationExtended instead of Application. Then you can override canLogon method and if you return false, Quickfixj will not try to login.
The idea consists on retrieving parameter ReconnectInterval from QuickFix/J and create a separate thread which is going to kill the Session if and only if the Session is not logged on yet.
In order for this to work, our thread must obviously be fired before the QuickFix/J thread tries to reconnect. In other words: if you configured ReconnectInterval=30 ... you have to fire the aforementioned thread before that and close all initiators. This way, QuickFix/J will end up not really retrying to reconnect.
import java.io.InputStream
import java.util.Locale
import scala.util.control.NonFatal
import quickfix._
import quickfix.field._
class SimpleConnection(val configInputStream: InputStream,
val messageFactory: quickfix.MessageFactory)
extends MessageCracker
with quickfix.Application {
private val locale = Locale.getDefault.getCountry
private val settings = new SessionSettings(configInputStream)
private val storeFactory = new FileStoreFactory(settings)
private val loggerFactory = new QuickfixLoggerFactory(settings)
private var initiatorOption: Option[SocketInitiator] = None
private var sessionOption : Option[SessionID] = None
private var senderSeqOption: Option[Int] = None
private var targetSeqOption: Option[Int] = None
override def onLogout(sessionId: SessionID): Unit = {
log.info("onLogout called for %s".format(sessionId))
initiatorOption.foreach(initiator => initiator.stop(true))
fireDisconnectedState // inform listeners
initiatorOption = None
sessionOption = None
}
override def onCreate(sessionId: SessionID): Unit = {
log.info("onCreate called for %s".format(sessionId))
val session = Session.lookupSession(sessionId)
val interval = settings.getLong(session.getSessionID, "ReconnectInterval")
if(interval <= 10)
log.error("ReconnectInterval should be at least 10secs.")
else {
import java.util.concurrent.Executors
import scala.concurrent.ExecutionContext
val executor = ExecutionContext.fromExecutor(Executors.newSingleThreadExecutor)
val monitor = new Runnable {
override def run(): Unit = {
val sleep = (interval-5)*1000
Thread.sleep(sleep)
if(!session.isLoggedOn) {
log.warn("Killing QuickFix/J session before reconnection.")
onLogout(session.getSessionID)
}
}
}
executor.execute(monitor)
}
senderSeqOption.foreach(session.setNextSenderMsgSeqNum(_))
targetSeqOption.foreach(session.setNextTargetMsgSeqNum(_))
senderSeqOption = None
targetSeqOption = None
}
}
I wrote the following class for indexing documents in ElasticSearch:
import java.net.InetAddress
import com.typesafe.config.ConfigFactory
import org.elasticsearch.client.transport.TransportClient
import org.elasticsearch.common.settings.Settings
import org.elasticsearch.common.transport.InetSocketTransportAddress
import play.api.libs.json.{JsString, JsValue}
/**
* Created by liana on 12/07/16.
*/
class ElasticSearchConnector {
private var transportClient: TransportClient = null
private val host = "localhost"
private val port = 9300
private val cluster = "elasticsearch"
private val indexName = "tests"
private val docType = "test"
def configElasticSearch(): Unit =
{
val settings = Settings.settingsBuilder().put("cluster.name", cluster).build()
transportClient = new TransportClient(settings)
transportClient.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(host), port.toInt))
}
def putText(json: String, id: Int): String =
{
val response = transportClient.prepareIndex(indexName, docType, id)
.setSource(json)
.get()
val responseId = response.getId
responseId
}
}
Then I use it as follows:
val json = """val jsonString =
{
"title": "Elastic",
"price": 2000,
"author":{
"first": "Zachary",
"last": "Tong";
}
}"""
val ec = new ElasticSearchConnector()
ec.configElasticSearch()
val id = ec.putText(json)
System.out.println(id)
This is the error message I got:
Error:(28, 23) constructor TransportClient in class TransportClient
cannot be accessed in class ElasticSearchConnector
transportClient = new TransportClient(settings)
What is wrong here?
In the Elasticsearch Connector API the TransportClient class has no public constructor, but one declared private constructor. Thus you cannot "new" up an instance of the TransportClient directly. The API utilizes the Builder Pattern fairly heavily so in order to create an instance of the TransportClient you will need to do something like:
val settings = Settings.settingsBuilder().put("cluster.name", cluster).build()
val transportClient = TransportClient.builder().settings(settings).build()
transportClient.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName(host), port.toInt))
I have two spray http clients, such as the following:
val pipelineFoo: HttpRequest => Future[Foo] = (
sendReceive
~> unmarshal[Message.Foo])
val pipelineBar: HttpRequest => Future[Bar] = (
sendReceive
~> unmarshal[Message.Bar])
def execFoo(h: String, p: Int): Future[Foo] = {
val uri = Uri.from(scheme = "http", host = h, port = p, path = "/foo")
pipelineFoo(Get(uri))
}
def execBar(h: String, p: Int): Future[Bar] = {
val uri = Uri.from(scheme = "http", host = h, port = p, path = "/bar")
pipelineBar(Get(uri))
}
I would like have the foo request retry several times with a long timeout, and have the bar request not retry and have a short timeout (say 1 second). How can I achieve this in spray (sorry if this is somewhere in the documentation, but I've been unable to find it -- I've only found some documentation on setting such config parameters globally).
This shouldn't be too difficult. sendReceive can actually take more parameters. For example, here is the signature for one of the alternatives:
def sendReceive(transport: ActorRef)(implicit ec: ExecutionContext, futureTimeout: Timeout): SendReceive
I use this myself for similar scenarios where I have to have a bigger number of retries and longer timeouts when I hit an external service vs hitting one of our internal ones.
Here's an example of my pipeline that I use:
lazy val pipeline: HttpRequest => Future[HttpResponse] = (
addCredentials(BasicHttpCredentials(clientConnection.credentials._1, clientConnection.credentials._2))
~> addHeader(`User-Agent`(ProductVersion("<YOUR NAME HERE>", "<YOUR VERSION HERE>", "http://github.com/<WHEREVER YOUR PROJECT IS>"), ProductVersion("spray-client", "1.3.1", "http://spray.io")))
~> logRequest(log)
~> sendReceive(clientConnection.connection)(clientConnection.context, clientConnection.timeout)
~> decode(Deflate)
~> decode(Gzip)
)
The clientConnection is nothing special. It's just a case class that I made that can be filled in manually via code or maybe some config in your application.conf
2 years latter, but maybe worth for other people.
We had the same need and we based our solution on a copy/paste of Spray connector files.
import akka.actor.{ActorRef, ActorSystem}
import akka.io.IO
import akka.pattern.ask
import com.typesafe.config.Config
import spray.can.Http
import spray.can.Http.HostConnectorSetup
import spray.can.client.HostConnectorSettings
import spray.client.pipelining.sendReceive
import spray.http.Uri.Host
import spray.http.{HttpRequest, HttpResponse, Uri}
import scala.concurrent.duration._
import scala.concurrent.{ExecutionContextExecutor, Future}
case class HttpCustomSettings(
requestTimeout: Duration,
maxRetries: Int,
maxConnections: Int
)
/**
* Implement a new HTTP client on top of akka IO and spray HTTP
* to provide a way for caller to set client parameters on request basis instead
* of globally in application.conf
*
* This client defaults all its configuration with the one set in spray.conf
* see spray.can.client and spray.can.host-connector
* But you can override some of them on demand
* - maxRetries
* - requestTimeout
* - maxConnections
*/
class HttpClient(actorSystem: ActorSystem, config: Config) {
private implicit val context: ActorSystem = actorSystem
private implicit val dispatcher: ExecutionContextExecutor = actorSystem.dispatcher
private val HTTP = "http"
private val HTTPS = "https"
private val defaultSettings: HostConnectorSettings =
HostConnectorSettings.fromSubConfig(config.getConfig("spray.can"))
//not configurable since this timeout has little to no use practically
//this timeout DOES NOT kill the open connection
//http://kamon.io/teamblog/2014/11/02/understanding-spray-client-timeout-settings/
private implicit val clientFutureTimeout: akka.util.Timeout = 5.seconds
def send(
request: HttpRequest,
customSettings: Option[HttpCustomSettings] = None
): Future[HttpResponse] = {
val pipeline: Future[HttpRequest ⇒ Future[HttpResponse]] =
pipelineForUri(request.uri, customSettings)
pipeline.flatMap(send ⇒ send(request))
}
/**
* To understand more this method
* #see http://kamon.io/assets/img/diagrams/spray-client-actors.png
* #see [[spray.can.HttpManager]]
* #see [[spray.can.client.HttpHostConnector]]
* #see [[spray.can.Http]]
*/
private def pipelineForUri(
uri: Uri,
customSettings: Option[HttpCustomSettings]
): Future[HttpRequest ⇒ Future[HttpResponse]] = {
for {
Http.HostConnectorInfo(connector, _) ← IO(Http) ? connectorSetup(uri, customSettings)
} yield sendReceive(connector)
}
private def connectorSetup(
uri: Uri,
customSettings: Option[HttpCustomSettings]
): HostConnectorSetup = {
require(
uri.scheme == HTTP || uri.scheme == HTTPS,
s"Not a valid $HTTP URI scheme: '${uri.scheme}' in '$uri'. (Did you forget $HTTP:// ?)"
)
val connector: HostConnectorSetup = HostConnectorSetup(
uri.authority.host.toString,
uri.effectivePort,
sslEncryption = uri.scheme == HTTPS
)
customSettings match {
case Some(custom) ⇒ connector.copy(settings = Option(mapCustomSettings(defaultSettings, custom)))
case None ⇒ connector.copy(settings = Option(defaultSettings))
}
}
private def mapCustomSettings(
settings: HostConnectorSettings,
customSettings: HttpCustomSettings
): HostConnectorSettings = {
settings.copy(
maxRetries = customSettings.maxRetries,
maxConnections = customSettings.maxConnections,
connectionSettings = settings.connectionSettings.copy(requestTimeout = customSettings.requestTimeout)
)
}
}