How to unitest gauge metrics in flink - scala

I have a datastrean in flink and I generate my owns metrics using gauge in a ProcessFunction.
As these metrics are important for my activity, i would like to unit test them once the flow is executed.
Unfortunately, I didn't find a way to implement a proper test reporter.
Here is a simple code explaining my issue.
Two concerns with this code:
how do i trigger the gauge
how do I get the reporter instiantiated by env.execute
Here is the sample
import java.util.concurrent.atomic.AtomicInteger
import org.apache.flink.api.scala.metrics.ScalaGauge
import org.apache.flink.configuration.{ConfigConstants, Configuration}
import org.apache.flink.metrics.reporter.AbstractReporter
import org.apache.flink.metrics.{Gauge, Metric, MetricConfig}
import org.apache.flink.streaming.api.TimeCharacteristic
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.streaming.api.functions.sink.SinkFunction
import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, _}
import org.apache.flink.util.Collector
import org.scalatest.FunSuite
import org.scalatest.Matchers._
import org.scalatest.PartialFunctionValues._
import scala.collection.JavaConverters._
import scala.collection.mutable
/* Test based on Flink test example https://ci.apache.org/projects/flink/flink-docs-master/dev/stream/testing.html */
class MultiplyByTwo extends ProcessFunction[Long, Long] {
override def processElement(data: Long, context: ProcessFunction[Long, Long]#Context, collector: Collector[Long]): Unit = {
collector.collect(data * 2L)
}
val nbrCalls = new AtomicInteger(0)
override def open(parameters: Configuration): Unit = {
getRuntimeContext.getMetricGroup
.addGroup("counter")
.gauge[Int, ScalaGauge[Int]]("call" , ScalaGauge[Int]( () => nbrCalls.get()))
}
}
// create a testing sink
class CollectSink extends SinkFunction[Long] {
override def invoke(value: Long): Unit = {
synchronized {
CollectSink.values.add(value)
}
}
}
object CollectSink {
val values: java.util.ArrayList[Long] = new java.util.ArrayList[Long]()
}
class StackOverflowTestReporter extends AbstractReporter {
var gaugesMetrics : mutable.Map[String, String] = mutable.Map[String, String]()
override def open(metricConfig: MetricConfig): Unit = {}
override def close(): Unit = {}
override def filterCharacters(s: String): String = s
def report(): Unit = {
gaugesMetrics = this.gauges.asScala.map(t => (metricValue(t._1), t._2))
}
private def metricValue(m: Metric): String = {
m match {
case g: Gauge[_] => g.getValue.toString
case _ => ""
}
}
}
class StackOverflowTest extends FunSuite with StreamingMultipleProgramsTestBase{
def createConfigForReporter(reporterName : String) : Configuration = {
val cfg : Configuration = new Configuration()
cfg.setString(ConfigConstants.METRICS_REPORTER_PREFIX + reporterName + "." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, classOf[StackOverflowTestReporter].getName)
cfg
}
test("test_metrics") {
val env = StreamExecutionEnvironment.createLocalEnvironment(
StreamExecutionEnvironment.getDefaultLocalParallelism,
createConfigForReporter("reporter"))
// configure your test environment
env.setParallelism(1)
env.setStreamTimeCharacteristic(TimeCharacteristic.ProcessingTime)
// values are collected in a static variable
CollectSink.values.clear()
// create a stream of custom elements and apply transformations
env.fromElements[Long](1L, 21L, 22L)
.process(new MultiplyByTwo())
.addSink(new CollectSink())
// execute
env.execute()
// verify your results
CollectSink.values should have length 3
CollectSink.values should contain (2L)
CollectSink.values should contain (42L)
CollectSink.values should contain (44L)
//verify gauge counter
//pseudo code ...
val testReporter : StackOverflowTestReporter = _ // how to get testReporter instantiate in env
testReporter.gaugesMetrics should have size 1
testReporter.gaugesMetrics should contain key "count.call"
testReporter.gaugesMetrics.valueAt("count.call") should be equals("3")
}
}
Solution thanks to Chesnay Schepler
import java.util.concurrent.atomic.AtomicInteger
import org.apache.flink.api.common.time.Time
import org.apache.flink.api.scala.metrics.ScalaGauge
import org.apache.flink.configuration.{ConfigConstants, Configuration}
import org.apache.flink.metrics.reporter.MetricReporter
import org.apache.flink.metrics.{Metric, MetricConfig, MetricGroup}
import org.apache.flink.streaming.api.functions.ProcessFunction
import org.apache.flink.streaming.api.functions.sink.SinkFunction
import org.apache.flink.streaming.api.scala.{StreamExecutionEnvironment, _}
import org.apache.flink.test.util.MiniClusterResource
import org.apache.flink.util.Collector
import org.scalatest.Matchers._
import org.scalatest.PartialFunctionValues._
import org.scalatest.{BeforeAndAfterAll, FunSuite}
import scala.collection.mutable
/* Test based on Flink test example https://ci.apache.org/projects/flink/flink-docs-master/dev/stream/testing.html */
class MultiplyByTwo extends ProcessFunction[Long, Long] {
override def processElement(data: Long, context: ProcessFunction[Long, Long]#Context, collector: Collector[Long]): Unit = {
nbrCalls.incrementAndGet()
collector.collect(data * 2L)
}
val nbrCalls = new AtomicInteger(0)
override def open(parameters: Configuration): Unit = {
getRuntimeContext.getMetricGroup
.addGroup("counter")
.gauge[Int, ScalaGauge[Int]]("call" , ScalaGauge[Int]( () => nbrCalls.get()))
}
}
// create a testing sink
class CollectSink extends SinkFunction[Long] {
import CollectSink._
override def invoke(value: Long): Unit = {
synchronized {
values.add(value)
}
}
}
object CollectSink {
val values: java.util.ArrayList[Long] = new java.util.ArrayList[Long]()
}
class StackOverflowTestReporter extends MetricReporter {
import StackOverflowTestReporter._
override def open(metricConfig: MetricConfig): Unit = {}
override def close(): Unit = {}
override def notifyOfAddedMetric(metric: Metric, metricName: String, group: MetricGroup) : Unit = {
metric match {
case gauge: ScalaGauge[_] => {
//drop group metrics meaningless for the test, seem's to be the first 6 items
val gaugeKey = group.getScopeComponents.toSeq.drop(6).mkString(".") + "." + metricName
gaugesMetrics(gaugeKey) = gauge.asInstanceOf[ScalaGauge[Int]]
}
case _ =>
}
}
override def notifyOfRemovedMetric(metric: Metric, metricName: String, group: MetricGroup): Unit = {}
}
object StackOverflowTestReporter {
var gaugesMetrics : mutable.Map[String, ScalaGauge[Int]] = mutable.Map[String, ScalaGauge[Int]]()
}
class StackOverflowTest extends FunSuite with BeforeAndAfterAll{
val miniClusterResource : MiniClusterResource = buildMiniClusterResource()
override def beforeAll(): Unit = {
CollectSink.values.clear()
StackOverflowTestReporter.gaugesMetrics.clear()
miniClusterResource.before()
}
override def afterAll(): Unit = {
miniClusterResource.after()
}
def createConfigForReporter() : Configuration = {
val cfg : Configuration = new Configuration()
cfg.setString(ConfigConstants.METRICS_REPORTER_PREFIX + "reporter" + "." + ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX, classOf[StackOverflowTestReporter].getName)
cfg
}
def buildMiniClusterResource() : MiniClusterResource = new MiniClusterResource(
new MiniClusterResource.MiniClusterResourceConfiguration(
createConfigForReporter(),1,1, Time.milliseconds(50L)))
test("test_metrics") {
val env = StreamExecutionEnvironment.getExecutionEnvironment
env.fromElements[Long](1L, 21L, 22L)
.process(new MultiplyByTwo())
.addSink(new CollectSink())
env.execute()
CollectSink.values should have length 3
CollectSink.values should contain (2L)
CollectSink.values should contain (42L)
CollectSink.values should contain (44L)
//verify gauge counter
val gaugeValues = StackOverflowTestReporter.gaugesMetrics.map(t => (t._1, t._2.getValue()))
gaugeValues should have size 1
gaugeValues should contain ("counter.call" -> 3)
}
}

your best bet is to use a MiniClusterResource to explicitly start a cluster before the job and configure a reporter that checks for specific metrics and exposes them through static fields.
#Rule
public final MiniClusterResource clusterResource = new MiniClusterResource(
new MiniClusterResourceConfiguration.Builder()
.setConfiguration(getConfig()));
private static Configuration getConfig() {
Configuration config = new Configuration();
config.setString(
ConfigConstants.METRICS_REPORTER_PREFIX +
"myTestReporter." +
ConfigConstants.METRICS_REPORTER_CLASS_SUFFIX,
MyTestReporter.class.getName());
return config;
}
public static class MyTestReporter implements MetricReporter {
static volatile Gauge<?> myGauge = null;
#Override
public void open(MetricConfig metricConfig) {
}
#Override
public void close() {
}
#Override
public void notifyOfAddedMetric(Metric metric, String name, MetricGroup metricGroup) {
if ("myMetric".equals(name)) {
myGauge = (Gauge<?>) metric;
}
}
#Override
public void notifyOfRemovedMetric(Metric metric, String s, MetricGroup metricGroup) {
}
}

Related

How to eval code that uses InterfaceStability annotation (that fails with "illegal cyclic reference involving class InterfaceStability")?

I want to dynamically generate some kafka stream code when the program is running, but I get an exception when I compile the following code for kafka stream with scala toolbox eval:
val toolbox = runtimeMirror(getClass.getClassLoader).mkToolBox()
val code =
"""
|import org.apache.kafka.streams.scala.kstream.KStream
|import org.apache.kafka.streams.scala.StreamsBuilder
|
|class ClassA {
| def createStream(): KStream[String, String] = {
| import org.apache.kafka.streams.scala.ImplicitConversions._
| import org.apache.kafka.streams.scala.Serdes._
| new StreamsBuilder().stream("TestTopic")
| }
|}
|scala.reflect.classTag[ClassA].runtimeClass
""".stripMargin
toolbox.eval(toolbox.parse(code))
errors:
reflective compilation has failed:
illegal cyclic reference involving class InterfaceStability
scala.tools.reflect.ToolBoxError: reflective compilation has failed:
illegal cyclic reference involving class InterfaceStability
at scala.tools.reflect.ToolBoxFactory$ToolBoxImpl$ToolBoxGlobal.throwIfErrors(ToolBoxFactory.scala:331)
at scala.tools.reflect.ToolBoxFactory$ToolBoxImpl$ToolBoxGlobal.wrapInPackageAndCompile(ToolBoxFactory.scala:213)
at scala.tools.reflect.ToolBoxFactory$ToolBoxImpl$ToolBoxGlobal.compile(ToolBoxFactory.scala:267)
at scala.tools.reflect.ToolBoxFactory$ToolBoxImpl.$anonfun$compile$13(ToolBoxFactory.scala:444)
at scala.tools.reflect.ToolBoxFactory$ToolBoxImpl$withCompilerApi$.apply(ToolBoxFactory.scala:370)
at scala.tools.reflect.ToolBoxFactory$ToolBoxImpl.compile(ToolBoxFactory.scala:437)
at scala.tools.reflect.ToolBoxFactory$ToolBoxImpl.eval(ToolBoxFactory.scala:459)
It seems that the kafka InterfaceStability class annotated itself:
package org.apache.kafka.common.annotation;
#InterfaceStability.Evolving
public class InterfaceStability {
...
}
Scala version:"2.12.8"
kafkaVersion: "2.1.0"
Sbt version : "1.2.7"
After switching to the api below the scala.tools.nsc package, I can compile successfully.
refer to https://eknet.org/main/dev/runtimecompilescala.html [broken link]
import scala.reflect.internal.util.{AbstractFileClassLoader, BatchSourceFile}
import scala.reflect.io.{AbstractFile, VirtualDirectory}
import scala.reflect.runtime
import scala.reflect.runtime.universe
import scala.reflect.runtime.universe._
import scala.tools.nsc.{Global, Settings}
import scala.collection.mutable
import java.security.MessageDigest
import java.math.BigInteger
class Compiler(targetDir: Option[File]) {
val target = targetDir match {
case Some(dir) => AbstractFile.getDirectory(dir)
case None => new VirtualDirectory("(memory)", None)
}
val classCache = mutable.Map[String, Class[_]]()
private val settings = new Settings()
settings.deprecation.value = true // enable detailed deprecation warnings
settings.unchecked.value = true // enable detailed unchecked warnings
settings.outputDirs.setSingleOutput(target)
settings.usejavacp.value = true
private val global = new Global(settings)
private lazy val run = new global.Run
val classLoader = new AbstractFileClassLoader(target, this.getClass.getClassLoader)
/**Compiles the code as a class into the class loader of this compiler.
*
* #param code
* #return
*/
def compile(code: String) = {
val className = classNameForCode(code)
findClass(className).getOrElse {
val sourceFiles = List(new BatchSourceFile("(inline)", wrapCodeInClass(className, code)))
run.compileSources(sourceFiles)
findClass(className).get
}
}
/** Compiles the source string into the class loader and
* evaluates it.
*
* #param code
* #tparam T
* #return
*/
def eval[T](code: String): T = {
val cls = compile(code)
cls.getConstructor().newInstance().asInstanceOf[() => Any].apply().asInstanceOf[T]
}
def findClass(className: String): Option[Class[_]] = {
synchronized {
classCache.get(className).orElse {
try {
val cls = classLoader.loadClass(className)
classCache(className) = cls
Some(cls)
} catch {
case e: ClassNotFoundException => None
}
}
}
}
protected def classNameForCode(code: String): String = {
val digest = MessageDigest.getInstance("SHA-1").digest(code.getBytes)
"sha"+new BigInteger(1, digest).toString(16)
}
/*
* Wrap source code in a new class with an apply method.
*/
private def wrapCodeInClass(className: String, code: String) = {
"class " + className + " extends (() => Any) {\n" +
" def apply() = {\n" +
code + "\n" +
" }\n" +
"}\n"
}
}

How to bind Slick dependency with Lagom?

So, I have this dependency which is used to create tables and interact with Postgres. Here is a Sample Class:
class ConfigTable {
this: DBFactory =>
import driver.api._
implicit val configKeyMapper = MappedColumnType.base[ConfigKey, String](e => e.toString, s => ConfigKey.withName(s))
val configs = TableQuery[ConfigMapping]
class ConfigMapping(tag: Tag) extends Table[Config](tag, "configs") {
def key = column[ConfigKey]("key")
def value = column[String]("value")
def * = (key, value) <> (Config.tupled, Config.unapply _)
}
/**
* add config
*
* #param config
* #return
*/
def add(config: Config): Try[Config] = try {
sync(db.run(configs += config)) match {
case 1 => Success(config)
case _ => Failure(new Exception("Unable to add config"))
}
} catch {
case ex: PSQLException =>
if (ex.getMessage.contains("duplicate key value")) Failure(new Exception("alt id already exists."))
else Failure(new Exception(ex.getMessage))
}
def get(key: ConfigKey): Option[Config] = sync(db.run(configs.filter(x => x.key === key).result)).headOption
def getAll(): Seq[Config] = sync(db.run(configs.result))
}
object ConfigTable extends ConfigTable with PSQLComponent
PSQLComponent is the Abstraction for Database meta configuration:
import slick.jdbc.PostgresProfile
trait PSQLComponent extends DBFactory {
val driver = PostgresProfile
import driver.api.Database
val db: Database = Database.forConfig("db.default")
}
DBFactory is again an abstraction:
import slick.jdbc.JdbcProfile
trait DBFactory {
val driver: JdbcProfile
import driver.api._
val db: Database
}
application.conf:
db.default {
driver = "org.postgresql.Driver"
url = "jdbc:postgresql://localhost:5432/db"
user = "user"
password = "pass"
hikaricp {
minimumIdle = ${db.default.async-executor.minConnections}
maximumPoolSize = ${db.default.async-executor.maxConnections}
}
}
jdbc-defaults.slick.profile = "slick.jdbc.PostgresProfile$"
lagom.persistence.jdbc.create-tables.auto=false
I compile and publish this dependency to nexus and trying to use this in my Lagom Microservice.
Here is the Loader Class:
class SlickExapleAppLoader extends LagomApplicationLoader {
override def load(context: LagomApplicationContext): LagomApplication = new SlickExampleApp(context) {
override def serviceLocator: ServiceLocator = NoServiceLocator
}
override def loadDevMode(context: LagomApplicationContext): LagomApplication = new SlickExampleApp(context) with LagomDevModeComponents {
}
override def describeService = Some(readDescriptor[SlickExampleLMSServiceImpl])
}
abstract class SlickExampleApp(context: LagomApplicationContext)
extends LagomApplication(context)
// No Idea which to use and how, nothing clear from doc too.
// with ReadSideJdbcPersistenceComponents
// with ReadSideSlickPersistenceComponents
// with SlickPersistenceComponents
with AhcWSComponents {
wire[SlickExampleScheduler]
}
I'm trying to implement it in this scheduler:
class SlickExampleScheduler #Inject()(lmsService: LMSService,
configuration: Configuration)(implicit ec: ExecutionContext) {
val brofile = `SomeDomainObject`
val gson = new Gson()
val concurrency = Runtime.getRuntime.availableProcessors() * 10
implicit val timeout: Timeout = 3.minute
implicit val system: ActorSystem = ActorSystem("LMSActorSystem")
implicit val materializer: ActorMaterializer = ActorMaterializer()
// Getting Exception Initializer here..... For ConfigTable ===> ExceptionLine
val schedulerImplDao = new SchedulerImplDao(ConfigTable)
def hitLMSAPI = {
println("=============>1")
schedulerImplDao.doSomething()
}
system.scheduler.schedule(2.seconds, 2.seconds) {
println("=============>")
hitLMSAPI
}
}
Not sure if it's the correct way, or if it's not what is the correct way of doing this. It is the project requirement to keep the Data Models separate from the service for the obvious reasons of re-usability.
Exception Stack:
17:50:38.666 [info] akka.cluster.Cluster(akka://lms-impl-application) [sourceThread=ForkJoinPool-1-worker-1, akkaTimestamp=12:20:38.665UTC, akkaSource=akka.cluster.Cluster(akka://lms-impl-application), sourceActorSystem=lms-impl-application] - Cluster Node [akka.tcp://lms-impl-application#127.0.0.1:45805] - Started up successfully
17:50:38.707 [info] akka.cluster.Cluster(akka://lms-impl-application) [sourceThread=lms-impl-application-akka.actor.default-dispatcher-6, akkaTimestamp=12:20:38.707UTC, akkaSource=akka.cluster.Cluster(akka://lms-impl-application), sourceActorSystem=lms-impl-application] - Cluster Node [akka.tcp://lms-impl-application#127.0.0.1:45805] - No seed-nodes configured, manual cluster join required
java.lang.ExceptionInInitializerError
at com.slick.init.impl.SlickExampleScheduler.<init>(SlickExampleScheduler.scala:29)
at com.slick.init.impl.SlickExampleApp.<init>(SlickExapleAppLoader.scala:42)
at com.slick.init.impl.SlickExapleAppLoader$$anon$2.<init>(SlickExapleAppLoader.scala:17)
at com.slick.init.impl.SlickExapleAppLoader.loadDevMode(SlickExapleAppLoader.scala:17)
at com.lightbend.lagom.scaladsl.server.LagomApplicationLoader.load(LagomApplicationLoader.scala:76)
at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$5(LagomReloadableDevServerStart.scala:176)
at play.utils.Threads$.withContextClassLoader(Threads.scala:21)
at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$3(LagomReloadableDevServerStart.scala:173)
at scala.Option.map(Option.scala:163)
at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$2(LagomReloadableDevServerStart.scala:149)
at scala.util.Success.flatMap(Try.scala:251)
at play.core.server.LagomReloadableDevServerStart$$anon$1.$anonfun$get$1(LagomReloadableDevServerStart.scala:147)
at scala.concurrent.Future$.$anonfun$apply$1(Future.scala:658)
at scala.util.Success.$anonfun$map$1(Try.scala:255)
at scala.util.Success.map(Try.scala:213)
at scala.concurrent.Future.$anonfun$map$1(Future.scala:292)
at scala.concurrent.impl.Promise.liftedTree1$1(Promise.scala:33)
at scala.concurrent.impl.Promise.$anonfun$transform$1(Promise.scala:33)
at scala.concurrent.impl.CallbackRunnable.run(Promise.scala:64)
at java.util.concurrent.ForkJoinTask$RunnableExecuteAction.exec(ForkJoinTask.java:1402)
at java.util.concurrent.ForkJoinTask.doExec(ForkJoinTask.java:289)
at java.util.concurrent.ForkJoinPool$WorkQueue.runTask(ForkJoinPool.java:1056)
at java.util.concurrent.ForkJoinPool.runWorker(ForkJoinPool.java:1692)
at java.util.concurrent.ForkJoinWorkerThread.run(ForkJoinWorkerThread.java:157)
Caused by: java.lang.NullPointerException
at com.example.db.models.LoginTable.<init>(LoginTable.scala:29)
at com.example.db.models.LoginTable$.<init>(LoginTable.scala:293)
at com.example.db.models.LoginTable$.<clinit>(LoginTable.scala)
... 24 more
This is how it is woking:
abstract class SlickExampleApp(context: LagomApplicationContext) extends LagomApplication(context)
with SlickPersistenceComponents with AhcWSComponents {
override implicit lazy val actorSystem: ActorSystem = ActorSystem("LMSActorSystem")
override lazy val materializer: ActorMaterializer = ActorMaterializer()
override lazy val lagomServer = serverFor[SlickExampleLMSService](wire[SlickExampleLMSServiceImpl])
lazy val externalService = serviceClient.implement[LMSService]
override def connectionPool: ConnectionPool = new HikariCPConnectionPool(environment)
override def jsonSerializerRegistry: JsonSerializerRegistry = new JsonSerializerRegistry {
override def serializers: immutable.Seq[JsonSerializer[_]] = Vector.empty
}
val loginTable = wire[LoginTable]
wire[SlickExampleScheduler]
}
> One thing I'd like to report is: Lagom docs about the application.conf configuration of slick is not correct, it misleaded me for two days, the I digged into the Liberary code and this is how it goes:
private val readSideConfig = system.settings.config.getConfig("lagom.persistence.read-side.jdbc")
private val jdbcConfig = system.settings.config.getConfig("lagom.persistence.jdbc")
private val createTables = jdbcConfig.getConfig("create-tables")
val autoCreateTables: Boolean = createTables.getBoolean("auto")
// users can disable the usage of jndiDbName for userland read-side operations by
// setting the jndiDbName to null. In which case we fallback to slick.db.
// slick.db must be defined otherwise the application will fail to start
val db = {
if (readSideConfig.hasPath("slick.jndiDbName")) {
new InitialContext()
.lookup(readSideConfig.getString("slick.jndiDbName"))
.asInstanceOf[Database]
} else if (readSideConfig.hasPath("slick.db")) {
Database.forConfig("slick.db", readSideConfig)
} else {
throw new RuntimeException("Cannot start because read-side database configuration is missing. " +
"You must define either 'lagom.persistence.read-side.jdbc.slick.jndiDbName' or 'lagom.persistence.read-side.jdbc.slick.db' in your application.conf.")
}
}
val profile = DatabaseConfig.forConfig[JdbcProfile]("slick", readSideConfig).profile
The configuration it requires is very much different than the suggested one on the Doc.

How to get defined Arbitrary?

I am using ScalaTest and ScalaCheck. I wrote a custom generator and arbitrary generator as following:
import java.time.LocalDateTime
import org.scalacheck._
import org.scalatest.PropSpec
import org.scalatest.prop.Checkers
import Gen._
import Arbitrary.arbitrary
class AuthJwtSpec extends PropSpec with Checkers {
private val start = LocalDateTime.now.atZone(java.time.ZoneId.systemDefault()).toEpochSecond
private val end = LocalDateTime.now.plusDays(2).atZone(java.time.ZoneId.systemDefault()).toEpochSecond
private val pickTime = Gen.choose(start, end)
private val authUser: Arbitrary[AuthUser] =
Arbitrary {
for {
u <- arbitrary[String]
p <- arbitrary[String]
} yield AuthUser(u, p)
}
property("Generate JWT token.") {
check(Prop.forAll(authUser, pickTime) {(r1: AuthUser, r2: Long) =>
???
})
}
}
The problem is, that r1 has type Arbitrary[AuthUser] but I need AuthUser, how to get it?
As I see, the problem is with authUser field - it should be Gen[AuthUser], not Arbitary[AuthUser]:
import java.time.LocalDateTime
import org.scalacheck._
import org.scalatest._
import prop._
case class AuthUser(u: String, p: String)
class AuthJwtSpec extends PropSpec with Checkers with PropertyChecks {
private val start = LocalDateTime.now.atZone(java.time.ZoneId.systemDefault()).toEpochSecond
private val end = LocalDateTime.now.plusDays(2).atZone(java.time.ZoneId.systemDefault()).toEpochSecond
private val pickTime: Gen[Long] = Gen.choose(start, end)
// AuthUser should be Gen[AuthUser], not Arbitary[AuthUser]
private val authUser: Gen[AuthUser] =
for {
u <- Arbitrary.arbitrary[String]
p <- Arbitrary.arbitrary[String]
} yield AuthUser(u, p)
property("Generate JWT token.") {
val prop = Prop.forAll(authUser, pickTime) {(user: AuthUser, time: Long) =>
println(s"User: $user")
println(s"Time: $time")
// Property checks must always return true or false
true
}
check(prop)
}
}

An akka streams function that creates a sink and a source that emits whatever the sink receives

The goal is to implement a function with this signature
def bindedSinkAndSource[A]:(Sink[A, Any], Source[A, Any]) = ???
where the returned source emits whatever the sink receives.
My primary goal is to implement a websocket forwarder by means of the handleWebSocketMessages directive.
The forwarder graph is:
leftReceiver ~> rightEmitter
leftEmitter <~ rightReceiver
where the leftReceiver and leftEmiter are the in and out of the left endpoint handler flow; and rightReceiver and rightEmitter are the in and out of the right endpoint handler flow.
For example:
import akka.NotUsed
import akka.http.scaladsl.model.ws.Message
import akka.http.scaladsl.server.Directive.addByNameNullaryApply
import akka.http.scaladsl.server.Directives._
import akka.http.scaladsl.server.Route
import akka.stream.scaladsl.Flow
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
def buildHandlers(): Route = {
val (leftReceiver, rightEmitter) = bindedSinkAndSource[Message];
val (rightReceiver, leftEmitter) = bindedSinkAndSource[Message];
val leftHandlerFlow = Flow.fromSinkAndSource(leftReceiver, leftEmitter)
val rightHandlerFlow = Flow.fromSinkAndSource(rightReceiver, rightEmitter)
pathPrefix("leftEndpointChannel") {
handleWebSocketMessages(leftHandlerFlow)
} ~
pathPrefix("rightEndpointChannel") {
handleWebSocketMessages(rightHandlerFlow)
}
}
All the ideas that came to me were frustrated by the fact that thehandleWebSocketMessages(..) directive don't give access to the materialized value of the received flow.
I found a way to achieve the goal, but there could be shorter and easier ways. If you know one, please don't hesitate to add your knowledge.
import org.reactivestreams.Publisher
import org.reactivestreams.Subscriber
import org.reactivestreams.Subscription
import akka.NotUsed
import akka.stream.scaladsl.Sink
import akka.stream.scaladsl.Source
def bindedSinkAndSource[A]: (Sink[A, NotUsed], Source[A, NotUsed]) = {
class Binder extends Subscriber[A] with Publisher[A] { binder =>
var oUpStreamSubscription: Option[Subscription] = None;
var oDownStreamSubscriber: Option[Subscriber[_ >: A]] = None;
var pendingRequestFromDownStream: Option[Long] = None;
var pendingCancelFromDownStream: Boolean = false;
def onSubscribe(upStreamSubscription: Subscription): Unit = {
this.oUpStreamSubscription match {
case Some(_) => upStreamSubscription.cancel // rule 2-5
case None =>
this.oUpStreamSubscription = Some(upStreamSubscription);
if (pendingRequestFromDownStream.isDefined) {
upStreamSubscription.request(pendingRequestFromDownStream.get)
pendingRequestFromDownStream = None
}
if (pendingCancelFromDownStream) {
upStreamSubscription.cancel()
pendingCancelFromDownStream = false
}
}
}
def onNext(a: A): Unit = {
oDownStreamSubscriber.get.onNext(a)
}
def onComplete(): Unit = {
oDownStreamSubscriber.foreach { _.onComplete() };
this.oUpStreamSubscription = None
}
def onError(error: Throwable): Unit = {
oDownStreamSubscriber.foreach { _.onError(error) };
this.oUpStreamSubscription = None
}
def subscribe(downStreamSubscriber: Subscriber[_ >: A]): Unit = {
assert(this.oDownStreamSubscriber.isEmpty);
this.oDownStreamSubscriber = Some(downStreamSubscriber);
downStreamSubscriber.onSubscribe(new Subscription() {
def request(n: Long): Unit = {
binder.oUpStreamSubscription match {
case Some(usSub) => usSub.request(n);
case None =>
assert(binder.pendingRequestFromDownStream.isEmpty);
binder.pendingRequestFromDownStream = Some(n);
}
};
def cancel(): Unit = {
binder.oUpStreamSubscription match {
case Some(usSub) => usSub.cancel();
case None =>
assert(binder.pendingCancelFromDownStream == false);
binder.pendingCancelFromDownStream = true;
}
binder.oDownStreamSubscriber = None
}
})
}
}
val binder = new Binder;
val receiver = Sink.fromSubscriber(binder);
val emitter = Source.fromPublisher(binder);
(receiver, emitter);
}
Note that the instance vars of the Binder class may suffer concurrency problems if the sink and source this method creates are not fused later by the user. If that is not the case, all the accesses to these variables should be enclosed inside synchronized zones. Another solution would be to ensure that the sink and the source are materialized in an execution context with a single thread.
Two days later I discovered MergeHub and BroadcastHub. Using them the answer is much shorter:
import akka.stream.Materializer
def bindedSinkAndSource[T](implicit sm: Materializer): (Sink[T, NotUsed], Source[T, NotUsed]) = {
import akka.stream.scaladsl.BroadcastHub;
import akka.stream.scaladsl.MergeHub;
import akka.stream.scaladsl.Keep;
MergeHub.source[T](perProducerBufferSize = 8).toMat(BroadcastHub.sink[T](bufferSize = 256))(Keep.both) run
}
with the advantage that the returned sink and source can be materialized multiple times.

Play 2.4 + i18n: Using a database instead of property files for internationalization

What I did works fine (at least it looks look it does), but I am not convinced this is the best way to do it...
Basically, I wanted to have my i18n translations in a database instead of property files so users can easily edit those translations and a cache can serve them to the other users within a short period of time - I use an Akka Actor to read from the database and create a cache used by the messageApi (before I always needed to redeploy with the changes in the property files).
Basically, is what I did totally the wrong way to do it?
TranslationActor.scala :
class TranslationActor extends Actor {
def receive = {
case _ => {
Logger.info("Starting to cache the translations")
TranslationActor.tempCache = ListMap.empty
var translations: ListMap[String, String] = ListMap.empty
for (acceptedLanguage <- TranslationActor.acceptedLanguages) {
val translationLanguageId: Long = TranslationLanguage.findByCode(acceptedLanguage).get.id
val languageTranslations: Seq[Translation] = Translation.findAllByLanguageId(translationLanguageId)
translations = new ListMap[String, String]
for (languageTranslation <- languageTranslations) {
val tag = EnglishTranslation.findById(languageTranslation.englishTranslationId).get.tag
var returnedTranslation: String = languageTranslation.translation
if (returnedTranslation.isEmpty) {
returnedTranslation = tag
}
translations += tag -> new CacheValue(new Locale(acceptedLanguage), returnedTranslation).stringVar
}
TranslationActor.tempCache += acceptedLanguage -> translations
}
TranslationActor.cache = TranslationActor.tempCache
Logger.info("Finished to cache the translations")
}
}
}
object TranslationActor {
var acceptedLanguages: Seq[String] = Seq("fr", "en")
var cache: ListMap[String, ListMap[String, String]] = ListMap.empty
var tempCache: ListMap[String, ListMap[String, String]] = ListMap.empty
}
class CacheValue(locale: Locale, string: String) {
val created: Long = System.currentTimeMillis
var messageFormat: MessageFormat = null
var localeVar: Locale = locale
var stringVar: String = string
def isOlderThan(period: Long): Boolean = {
(System.currentTimeMillis - created) > (period * 1000)
}
def getMessageFormat: MessageFormat = {
if (messageFormat == null) {
if (stringVar != null) {
messageFormat = new MessageFormat(stringVar, localeVar)
} else {
messageFormat = new MessageFormat("", localeVar)
}
}
messageFormat
}
}
ManageTranslationDaemon.scala :
#Singleton
class ManageTranslationDaemon #Inject() (actorSystem: ActorSystem, applicationLifecycle: ApplicationLifecycle) {
Logger.info("Scheduling the translation daemon")
val translationActor = actorSystem.actorOf(Props(new TranslationActor()))
actorSystem.scheduler.schedule(1 seconds, 30 minutes, translationActor, "translationDaemon")
applicationLifecycle.addStopHook { () =>
Logger.info("Shutting down translation daemon")
Future.successful(actorSystem.shutdown())
}
}
TranslationGuiceConfiguration.scala : (from an com.google.inject.AbstractModule)
class TranslationGuiceConfiguration extends AbstractModule {
def configure() : Unit = {
bind(classOf[ManageTranslationDaemon]).asEagerSingleton()
}
}
Then I extended parts of the DefaultMessagesApi (by looking at the code of MessagesApi) in
MessagesPersoApi.scala :
class MessagesPersoApi #Inject() (environment: Environment, configuration: Configuration, langs: Langs) extends DefaultMessagesApi(environment: Environment, configuration: Configuration, langs: Langs) {
private def joinPaths(first: Option[String], second: String) = first match {
case Some(parent) => new java.io.File(parent, second).getPath
case None => second
}
override protected def loadMessages(langCode: String): Map[String, String] = {
TranslationActor.cache.getOrElse(langCode, loadMessagesFromFile("messages." + langCode))
}
protected def loadMessagesFromFile(langCode: String): Map[String, String] = {
import scala.collection.JavaConverters._
environment.classLoader.getResources(joinPaths(messagesPrefix, langCode)).asScala.toList
.filterNot(url => Resources.isDirectory(environment.classLoader, url)).reverse
.map { messageFile =>
Messages.parse(Messages.UrlMessageSource(messageFile), messageFile.toString).fold(e => throw e, identity)
}.foldLeft(Map.empty[String, String]) {_ ++ _}
}
override protected def loadAllMessages: Map[String, Map[String, String]] = {
langs.availables.map(_.code).map { lang =>
(lang, loadMessages(lang))
}.toMap
.+("default" -> loadMessagesFromFile("messages"))
.+("default.play" -> loadMessagesFromFile("messages.default"))
}
}
And finally created a module (play.api.inject.Module)
MessagesPersoModule.scala :
class MessagesPersoModule extends Module {
def bindings(environment: Environment, configuration: Configuration) = {
Seq(
bind[Langs].to[DefaultLangs],
bind[MessagesApi].to[MessagesPersoApi]
)
}
}
And at the end, used it in my application.conf :
play.modules.disabled += "play.api.i18n.I18nModule"
play.modules.enabled += "modules.MessagesPersoModule"
play.modules.enabled += "modules.TranslationGuiceConfiguration"
Does that actually make sense? It seemed to me that it was a bit "complicated" to write. Is there an easier way to do that same logic with less code/classes ?
Thanks,
Yoann