How can I reload akka scheduler when Play framework restart - scala

I'm currently developing an application using Play-Scala framework and akka actor to send email when a CRUD action with database happen. I have a problem when restart server.
Any way to reload akka scheduler when Play framework restart or persist akka scheduler and when restart app it will run scheduler. This is my code for example.
#Singleton
class SendEmailSaveDraftActor #Inject()(system: ActorSystem,
config: Configuration,
mailService: MailService)(implicit exec: ExecutionContext) extends Actor {
val userSchedules: mutable.HashMap[String, Seq[Cancellable]] = mutable.HashMap()
private val emailAdmin = config.getString("email.admin").getOrElse("")
private val interval24Hours = config.getInt("batch.saveDarf24.extractIntervalHours").getOrElse(1)
private val interval72Hours = config.getInt("batch.saveDarf72.extractIntervalHours").getOrElse(1)
private val interval14Days = config.getInt("batch.saveDarf14.extractIntervalDays").getOrElse(1)
def receive = {
case s: SendEmailSaveDrafModel =>
userSchedules.get(s.userName) match {
case Some(schedules) =>
schedules.map(_.cancel())
userSchedules.update(s.userName, getSchedules(s))
case None =>
userSchedules += (s.userName -> getSchedules(s))
}
case userName: String =>
userSchedules.get(userName) match {
case Some(schedules) =>
schedules.map(_.cancel())
case None =>
}
}
def getSchedules(s: SendEmailSaveDrafModel): Seq[Cancellable] = {
val schedule1 = system.scheduler.scheduleOnce(5 minutes) {
mailService Send EmailSaveDraft24Hours(to = Seq(s.to), bccEmails = Seq(emailAdmin),id = s.id, orderNo = s.orderNo,designId = s.designId)
}
val schedule2 = system.scheduler.scheduleOnce(10 minutes) {
mailService Send EmailSaveDraft72Hours(to = Seq(s.to), bccEmails = Seq(emailAdmin),id = s.id, orderNo = s.orderNo,designId = s.designId)
}
val schedule3 = system.scheduler.scheduleOnce(15 minutes) {
mailService Send EmailSaveDraft14Days(to = Seq(s.to), bccEmails = Seq(emailAdmin),id = s.id, orderNo = s.orderNo,designId = s.designId)
}
Seq(schedule1, schedule2, schedule3)
}
}

I am using akka scheduler as shown in below code.
Each user can login to my Application and can create scheduler for background task.I am using Akka scheduler for this purpose.
public Cancellable buildScheduler(String actorName, SchedulerActorMessage message, long interval, TimeUnit timeUnit, long initialDelay, String actorMapKey) {
ActorRef daemonRef = actorSystem.actorOf(Props.create(SchedulerActor.class), actorName);
Cancellable cancellableActor = actorSystem.scheduler().schedule(FiniteDuration.apply(initialDelay, timeUnit),
FiniteDuration.apply(interval, timeUnit), daemonRef, message,
actorSystem.dispatcher(), ActorRef.noSender());
actorMap.put(actorMapKey, cancellableActor);
return cancellableActor;
}
I am storing the necessary information in the DB and when application start, I am staring the scheduler again in the global class.

Related

Akka gRPC + Slick application causes "IllegalStateException: Cannot initialize ExecutionContext; AsyncExecutor already shut down"

I try to develop gRPC server with Akka-gRPC and Slick. I also use Airframe for DI.
Source code is here
The issue is that it cause failure if it receive request when execute as gRPC server.
If it doesn't start as a gRPC server, but just reads resources from the database, the process succeeds.
What is the difference?
At Follows, It read object from database with slick.
...Component is airframe object. It will use by main module.
trait UserRepository {
def getUser: Future[Seq[Tables.UsersRow]]
}
class UserRepositoryImpl(val profile: JdbcProfile, val db: JdbcProfile#Backend#Database) extends UserRepository {
import profile.api._
def getUser: Future[Seq[Tables.UsersRow]] = db.run(Tables.Users.result)
}
trait UserResolveService {
private val repository = bind[UserRepository]
def getAll: Future[Seq[Tables.UsersRow]] =
repository.getUser
}
object userServiceComponent {
val design = newDesign
.bind[UserResolveService]
.toSingleton
}
Follows is gRPC Server source code.
trait UserServiceImpl extends UserService {
private val userResolveService = bind[UserResolveService]
private val system: ActorSystem = bind[ActorSystem]
implicit val ec: ExecutionContextExecutor = system.dispatcher
override def getAll(in: GetUserListRequest): Future[GetUserListResponse] = {
userResolveService.getAll.map(us =>
GetUserListResponse(
us.map(u =>
myapp.proto.user.User(
1,
"t_horikoshi#example.com",
"t_horikoshi",
myapp.proto.user.User.UserRole.Admin
)
)
)
)
}
}
trait GRPCServer {
private val userServiceImpl = bind[UserServiceImpl]
implicit val system: ActorSystem = bind[ActorSystem]
def run(): Future[Http.ServerBinding] = {
implicit def ec: ExecutionContext = system.dispatcher
val service: PartialFunction[HttpRequest, Future[HttpResponse]] =
UserServiceHandler.partial(userServiceImpl)
val reflection: PartialFunction[HttpRequest, Future[HttpResponse]] =
ServerReflection.partial(List(UserService))
// Akka HTTP 10.1 requires adapters to accept the new actors APIs
val bound = Http().bindAndHandleAsync(
ServiceHandler.concatOrNotFound(service, reflection),
interface = "127.0.0.1",
port = 8080,
settings = ServerSettings(system)
)
bound.onComplete {
case Success(binding) =>
system.log.info(
s"gRPC Server online at http://${binding.localAddress.getHostName}:${binding.localAddress.getPort}/"
)
case Failure(ex) =>
system.log.error(ex, "occurred error")
}
bound
}
}
object grpcComponent {
val design = newDesign
.bind[UserServiceImpl]
.toSingleton
.bind[GRPCServer]
.toSingleton
}
Follows is main module.
object Main extends App {
val conf = ConfigFactory
.parseString("akka.http.server.preview.enable-http2 = on")
.withFallback(ConfigFactory.defaultApplication())
val system = ActorSystem("GRPCServer", conf)
val dbConfig: DatabaseConfig[JdbcProfile] =
DatabaseConfig.forConfig[JdbcProfile](path = "mydb")
val design = newDesign
.bind[JdbcProfile]
.toInstance(dbConfig.profile)
.bind[JdbcProfile#Backend#Database]
.toInstance(dbConfig.db)
.bind[UserRepository]
.to[UserRepositoryImpl]
.bind[ActorSystem]
.toInstance(system)
.add(userServiceComponent.design)
.add(grpcComponent.design)
design.withSession(s =>
// Await.result(s.build[UserResolveService].getUser, Duration.Inf)) // success
// Await.result(s.build[UserServiceImpl].getAll(GetUserListRequest()), Duration.Inf)) // success
s.build[GRPCServer].run() // cause IllegalStateException when reciece request.
)
}
When UserResolveService and UserServiceImpl are called directly, the process of loading an object from the database is successful.
However, when running the application as a gRPC Server, an error occurs when a request is received.
Though I was thinking all day, I couldn't resolve...
Will you please help me to resolve.
It resolved. if execute async process, It has to start gRPC server with newSession.
I fix like that.

Play + Akka - Join the cluster and ask actor on another ActorSystem

I am able to make Play app join the existing Akka cluster and then make ask call to actor running on another ActorSystem and get results back. But I am having trouble with couple of things -
I see below in logs when play tries to join the cluster. I suspect that Play is starting its own akka cluster? I am really not sure what it means.
Could not register Cluster JMX MBean with name=akka:type=Cluster as it is already registered. If you are running multiple clust
ers in the same JVM, set 'akka.cluster.jmx.multi-mbeans-in-same-jvm = on' in config`
Right now I m re-initializing the actorsystem every time when the request comes to Controller which I know is not right way do it. I am new to Scala, Akka, Play thing and having difficulty figuring out how to make it Singleton service and inject into my controller.
So far I have got this -
class DataRouter #Inject()(controller: DataController) extends SimpleRouter {
val prefix = "/v1/data"
override def routes: Routes = {
case GET(p"/ip/$datatype") =>
controller.get(datatype)
case POST(p"/ip/$datatype") =>
controller.process
}
}
case class RangeInput(start: String, end: String)
object RangeInput {
implicit val implicitWrites = new Writes[RangeInput] {
def writes(range: RangeInput): JsValue = {
Json.obj(
"start" -> range.start,
"end" -> range.end
)
}
}
}
#Singleton
class DataController #Inject()(cc: ControllerComponents)(implicit exec: ExecutionContext) extends AbstractController(cc) {
private val logger = Logger("play")
implicit val timeout: Timeout = 115.seconds
private val form: Form[RangeInput] = {
import play.api.data.Forms._
Form(
mapping(
"start" -> nonEmptyText,
"end" -> text
)(RangeInput.apply)(RangeInput.unapply)
)
}
def get(datatype: String): Action[AnyContent] = Action.async { implicit request =>
logger.info(s"show: datatype = $datatype")
logger.trace(s"show: datatype = $datatype")
//val r: Future[Result] = Future.successful(Ok("hello " + datatype ))
val config = ConfigFactory.parseString("akka.cluster.roles = [gateway]").
withFallback(ConfigFactory.load())
implicit val system: ActorSystem = ActorSystem(SharedConstants.Actor_System_Name, config)
implicit val materializer: ActorMaterializer = ActorMaterializer()
implicit val executionContext = system.dispatcher
val ipData = system.actorOf(
ClusterRouterGroup(RandomGroup(Nil), ClusterRouterGroupSettings(
totalInstances = 100, routeesPaths = List("/user/getipdata"),
allowLocalRoutees = false, useRoles = Set("static"))).props())
val res: Future[String] = (ipData ? datatype).mapTo[String]
//val res: Future[List[Map[String, String]]] = (ipData ? datatype).mapTo[List[Map[String,String]]]
val futureResult: Future[Result] = res.map { list =>
Ok(Json.toJson(list))
}
futureResult
}
def process: Action[AnyContent] = Action.async { implicit request =>
logger.trace("process: ")
processJsonPost()
}
private def processJsonPost[A]()(implicit request: Request[A]): Future[Result] = {
logger.debug(request.toString())
def failure(badForm: Form[RangeInput]) = {
Future.successful(BadRequest("Test"))
}
def success(input: RangeInput) = {
val r: Future[Result] = Future.successful(Ok("hello " + Json.toJson(input)))
r
}
form.bindFromRequest().fold(failure, success)
}
}
akka {
log-dead-letters = off
log-dead-letters-during-shutdown = off
actor {
provider = "akka.cluster.ClusterActorRefProvider"
}
remote {
log-remote-lifecycle-events = off
enabled-transports = ["akka.remote.netty.tcp"]
netty.tcp {
hostname = ${myhost}
port = 0
}
}
cluster {
seed-nodes = [
"akka.tcp://MyCluster#localhost:2541"
]
} seed-nodes = ${?SEEDNODE}
}
Answers
Refer to this URL. https://www.playframework.com/documentation/2.6.x/ScalaAkka#Built-in-actor-system-name has details about configuring the actor system name.
You should not initialize actor system on every request, use Play injected actor system in the Application class, if you wish to customize the Actor system, you should do it through modifying the AKKA configuration. For that,
you should create your own ApplicationLoader extending GuiceApplicationLoader and override the builder method to have your own AKKA configuration. Rest of the things taken care by Play like injecting this actor system in Application for you.
Refer to below URL
https://www.playframework.com/documentation/2.6.x/ScalaDependencyInjection#Advanced:-Extending-the-GuiceApplicationLoader

Insert into postgres using slick in a non blocking way

class Employee(tag: Tag) extends Table[table_types.user](tag, "EMPLOYEE") {
def employeeID = column[Int]("EMPLOYEE_ID")
def empName = column[String]("NAME")
def startDate = column[String]("START_DATE")
def * = (employeeID, empName, startDate)
}
object employeeHandle {
def insert(emp:Employee):Future[Any] = {
val dao = new SlickPostgresDAO
val db = dao.db
val insertdb = DBIO.seq(employee += (emp))
db.run(insertdb)
}
}
Insert into database a million employee records
object Hello extends App {
val employees = List[*1 million employee list*]
for(employee<-employees) {
employeeHandle.insert(employee)
*Code to connect to rest api to confirm entry*
}
}
However when I run the above code I soon run out of connections to Postgres. How can I do it in parallel (in a non blocking way) but at the same time ensure I don't run out of connections to postgres.
I think you don't need to do it in parallel; I don't see how it can solve it. Instead you could simply create connection before you start that loop and pass it to employeeHandle.insert(db, employee).
Something like (I don't know scala):
object Hello extends App {
val dao = new SlickPostgresDAO
val db = dao.db
val employees = List[*1 million employee list*]
for(employee<-employees) {
employeeHandle.insert(db, employee)
*Code to connect to rest api to confirm entry*
}
}
Almost all examples of slick insert I have come across uses blocking to fullfil the results. It would be nice to have one that doesn't.
My take on it:
object Hello extends App {
val employees = List[*1 million employee list*]
val groupedList = employees.grouped(10).toList
insertTests()
def insertTests(l: List[List[Employee]] = groupedList): Unit = {
val ins = l.head
val futures = ins.map { no => employeeHandle.insert(employee)}
val seq = Future.sequence(futures)
Await.result(seq, Duration.Inf)
if(l.nonEmpty) insertTests(l.tail)
}
}
Also the connection parameter in insert handle should be outside
object employeeHandle {
val dao = new SlickPostgresDAO
val db = dao.db
def insert(emp:Employee):Future[Any] = {
val insertdb = DBIO.seq(employee += (emp))
db.run(insertdb)
}
}

Kafka tests failing intermittently if not starting/stopping kafka each time

I'm trying to run some integration tests for a data stream using an embedded kafka cluster. When executing all the tests in a different environment than my local, the tests are failing due to some internal state that's not removed properly.
I can get the all the tests running on the non-local environment when I start/stop the kafka cluster before/after each test but I only want to start and stop the cluster once, at the beginning and at the end of the execution of my suite of tests.
I tried to remove the local streams state but that didn't seem to work:
override protected def afterEach(): Unit = KStreamTestUtils.purgeLocalStreamsState(properties)
Is there a way to get my suit of tests running without having to start/stop cluster each time?
Right below there are the relevant classes.
class TweetStreamProcessorSpec extends FeatureSpec
with MockFactory with GivenWhenThen with Eventually with BeforeAndAfterEach with BeforeAndAfterAll {
val CLUSTER: EmbeddedKafkaCluster = new EmbeddedKafkaCluster
val TEST_TOPIC: String = "test_topic"
val properties = new Properties()
override def beforeAll(): Unit = {
CLUSTER.start()
CLUSTER.createTopic(TEST_TOPIC, 1, 1)
}
override def afterAll(): Unit = CLUSTER.stop()
// if uncommenting these lines tests works
// override def afterEach(): Unit = CLUSTER.stop()
// override protected def beforeEach(): Unit = CLUSTER.start()
def createProducer: KafkaProducer[String, TweetEvent] = {
val properties = Map(
KEY_SERIALIZER_CLASS_CONFIG -> classOf[StringSerializer].getName,
VALUE_SERIALIZER_CLASS_CONFIG -> classOf[ReflectAvroSerializer[TweetEvent]].getName,
BOOTSTRAP_SERVERS_CONFIG -> CLUSTER.bootstrapServers(),
SCHEMA_REGISTRY_URL_CONFIG -> CLUSTER.schemaRegistryUrlForcedToLocalhost()
)
new KafkaProducer[String, TweetEvent](properties)
}
def kafkaConsumerSettings: KafkaConfig = {
val bootstrapServers = CLUSTER.bootstrapServers()
val schemaRegistryUrl = CLUSTER.schemaRegistryUrlForcedToLocalhost()
val zookeeper = CLUSTER.zookeeperConnect()
KafkaConfig(
ConfigFactory.parseString(
s"""
akka.kafka.bootstrap.servers = "$bootstrapServers"
akka.kafka.schema.registry.url = "$schemaRegistryUrl"
akka.kafka.zookeeper.servers = "$zookeeper"
akka.kafka.topic-name = "$TEST_TOPIC"
akka.kafka.consumer.kafka-clients.key.deserializer = org.apache.kafka.common.serialization.StringDeserializer
akka.kafka.consumer.kafka-clients.value.deserializer = ${classOf[ReflectAvroDeserializer[TweetEvent]].getName}
akka.kafka.consumer.kafka-clients.client.id = client1
akka.kafka.consumer.wakeup-timeout=20s
akka.kafka.consumer.max-wakeups=10
""").withFallback(ConfigFactory.load()).getConfig("akka.kafka")
)
}
feature("Logging tweet data from kafka topic") {
scenario("log id and payload when consuming a update tweet event") {
publishEventsToKafka(List(upTweetEvent))
val logger = Mockito.mock(classOf[Logger])
val pipeline = new TweetStreamProcessor(kafkaConsumerSettings, logger)
pipeline.start
eventually(timeout(Span(5, Seconds))) {
Mockito.verify(logger, Mockito.times(1)).info(s"updating tweet uuid=${upTweetEvent.getUuid}, payload=${upTweetEvent.getPayload}")
}
pipeline.stop
}
scenario("log id when consuming a delete tweet event") {
publishEventsToKafka(List(delTweetEvent))
val logger = Mockito.mock(classOf[Logger])
val pipeline = new TweetStreamProcessor(kafkaConsumerSettings, logger)
pipeline.start
eventually(timeout(Span(5, Seconds))) {
Mockito.verify(logger, Mockito.times(1)).info(s"deleting tweet uuid=${delTweetEvent.getUuid}")
}
pipeline.stop
}
}
}
class TweetStreamProcessor(kafkaConfig: KafkaConfig, logger: Logger)
extends Lifecycle with TweetStreamProcessor with Logging {
private var control: Control = _
private val valueDeserializer: Option[Deserializer[TweetEvent]] = None
// ...
def tweetsSource(implicit mat: Materializer): Source[CommittableMessage[String, TweetEvent], Control] =
Consumer.committableSource(tweetConsumerSettings, Subscriptions.topics(kafkaConfig.topicName))
override def start: Future[Unit] = {
control = tweetsSource(materializer)
.mapAsync(1) { msg =>
logTweetEvent(msg.record.value())
.map(_ => msg.committableOffset)
}.batch(max = 20, first => CommittableOffsetBatch.empty.updated(first)) { (batch, elem) =>
batch.updated(elem)
}
.mapAsync(3)(_.commitScaladsl())
.to(Sink.ignore)
.run()
Future.successful()
}
override def stop: Future[Unit] = {
control.shutdown()
.map(_ => Unit)
}
}
Any help over this would be much appreciated? Thanks in advance.

Akka-Cluster-Sharding: local ShardRegion(system).shardRegion(_)

I have a master Actor responsible for initializing some worker actors (there are two types of worker actors, namely, ParamServer actor and DataShard actor). For example, if I initiated 20 datashard actors via ClusterSharding(system).start(_,_,_,_,_) and after that I want to send some message to all datashard actors (say case object ReadyToProcess). I read that I can send messages to entities in Akka Cluster Shard via local ShardRegion(system).shardRegion(_). Is local shardRegion(_) will send to all datashards or just one. How can I send msgs to all datashard actors?
The master class given be:
class Master(ports: Seq[String],
dataSet: Seq[Example],
dataPerReplica: Int,
layerDimensions: Seq[Int],
activation: ActivationFunction,
activationFunctionDer: ActivationFunction,
learningRate: Double) extends Actor with ActorLogging {
val dataShards = dataSet.grouped(dataPerReplica).toSeq
val numLayers = layerDimensions.size
var numShardsFinished = 0
ports foreach { port =>
val config = ConfigFactory.parseString("akka.remote.netty.tcp.port=" + port).withFallback(ConfigFactory.load())
val clusterSystem = ActorSystem("ClusterSystem", config)
val paramServerRegions: Array[ActorRef] = new Array[ActorRef](numLayers - 1)
for (i <- 0 to numLayers - 2) {
paramServerRegions(i) = ClusterSharding(clusterSystem).start(
typeName = ParamServer.shardName,
entityProps = ParamServer.props(i, dataShards.size, learningRate, NeuralNetworkOps.randomMatrix(layerDimensions(i + 1), layerDimensions(i) + 1)),
settings = ClusterShardingSettings(clusterSystem),
extractEntityId = ParamServer.extractEntityId,
extractShardId = ParamServer.extractShardId
)
}
//create actors for each data shard/replica. Each replica needs to know about all parameter shards because they will
//be reading from them and updating them
val dataShardRegions: Array[ActorRef] = new Array[ActorRef](dataShards.size)
for (i <- 0 to dataShards.size) {
dataShardRegions(i) = ClusterSharding(clusterSystem).start(
typeName = DataShard.shardName,
entityProps = DataShard.props(i, clusterSystem, dataShards(i), activation, activationFunctionDer, paramServerRegions),
settings = ClusterShardingSettings(clusterSystem),
extractEntityId = ParamServer.extractEntityId,
extractShardId = ParamServer.extractShardId
)
}
}
def receive: Receive = {
case Start => {
val shardRegionSender = ClusterSharding(context.system).shardRegion(DataShard.shardName)
println("Tomosha boshlandi")
shardRegionSender ! ReadyToProcess
}
case ShardDone(id) => {
numShardsFinished+=1
log.info("")
if (numShardsFinished == dataShards.size) {
context.parent ! JobDone
context.stop(self)
}
}
}
}