add wait period before each retry in my scala code - scala

I have a spark connector notebook, "Export Tables To Database", that write spark table data to an Azure SQL database. I have a master notebook that calls that spark connector notebook to write many tables in parallel. If a copy fails, I have a retry portion in the master notebook that retry the export. However, it is causing duplicates in my database because the original failed one doesn't cancel the connection immediately. I want to add a wait period before each retry. How do I do that?
////these next four class and functions are for exporting data directly to the Azure SQL database via the spark connectors.
// the next two functions are for retry purpose. if exporting a table faile, it will retry
def tryNotebookRun (path: String, timeout: Int, parameters: Map[String, String] = Map.empty[String, String]): Try[Any] = {
Try(
if (parameters.nonEmpty){
dbutils.notebook.run(path, timeout, parameters)
}
else{
dbutils.notebook.run(path, timeout)
}
)
}
def runWithRetry(path: String, timeout: Int, parameters: Map[String, String] = Map.empty[String, String], maxRetries: Int = 3) = {
var numRetries = 0
// I want to add a wait period here
while (numRetries < maxRetries){
tryNotebookRun(path, timeout, parameters) match {
case Success(_) => numRetries = maxRetries
case Failure(_) => numRetries = numRetries + 1
}
}
}
case class NotebookData(path: String, timeout: Int, parameters: Map[String, String] = Map.empty[String, String])
def parallelNotebooks(notebooks: Seq[NotebookData]): Future[Seq[Any]] = {
val numNotebooksInParallel = 5
// This code limits the number of parallel notebooks.
implicit val ec = ExecutionContext.fromExecutor(Executors.newFixedThreadPool(numNotebooksInParallel))
val ctx = dbutils.notebook.getContext()
Future.sequence(
notebooks.map { notebook =>
Future {
dbutils.notebook.setContext(ctx)
runWithRetry(notebook.path, notebook.timeout, notebook.parameters)
}
.recover {
case NonFatal(e) => s"ERROR: ${e.getMessage}"
}
}
)
}
////create a sequence of tables to be writed out in parallel
val notebooks = Seq(
NotebookData("Export Tables To Database", 0, Map("client"->client, "scope"->scope, "secret"->secret, "schema"->"test", "dbTable"->"table1")),
NotebookData("Export Tables To Database", 0, Map("client"->client, "scope"->scope, "secret"->secret, "schema"->"test", "dbTable"->"table2"))
)
val res = parallelNotebooks(notebooks)
Await.result(res, 3000000 seconds) // this is a blocking call.
res.value

adding Thread.sleep was the solution
def runWithRetry(path: String, timeout: Int, parameters: Map[String, String] = Map.empty[String, String], maxRetries: Int = 2) = {
var numRetries = 0
while (numRetries < maxRetries){
tryNotebookRun(path, timeout, parameters) match {
case Success(_) => numRetries = maxRetries
case Failure(_) => {
Thread.sleep(30000)
numRetries = numRetries + 1
}
}
}
}

Related

Why does the stream never get triggered?

I have the following stream, that never reach the map after flatMapConcat.
private def stream[A](ref: ActorRef[ServerHealthStreamer])(implicit system: ActorSystem[A])
: KillSwitch = {
implicit val materializer = ActorMaterializer()
implicit val dispatcher = materializer.executionContext
system.log.info("=============> Start KafkaDetectorStream <=============")
val addr = system
.settings
.config
.getConfig("kafka")
.getString("servers")
val sink: Sink[ServerHealthEvent, NotUsed] =
ActorSink.actorRefWithAck[ServerHealthEvent, ServerHealthStreamer, Ack](
ref = ref,
onCompleteMessage = Complete,
onFailureMessage = Fail.apply,
messageAdapter = Message.apply,
onInitMessage = Init.apply,
ackMessage = Ack)
Source.tick(1.seconds, 5.seconds, NotUsed)
.flatMapConcat(_ => Source.fromFuture(health(addr)))
.map {
case true =>
KafkaActiveConfirmed
case false =>
KafkaInactiveConfirmed
}
.viaMat(KillSwitches.single)(Keep.right)
.to(sink)
.run()
}
private def health(server: String)(implicit executor: ExecutionContext): Future[Boolean] = {
val props = new Properties
props.put(AdminClientConfig.BOOTSTRAP_SERVERS_CONFIG, server)
props.put(AdminClientConfig.CONNECTIONS_MAX_IDLE_MS_CONFIG, "10000")
props.put(AdminClientConfig.REQUEST_TIMEOUT_MS_CONFIG, "5000")
Future {
AdminClient
.create(props)
.listTopics()
.names()
.get()
}
.map(_ => true)
.recover {
case _: Throwable => false
}
}
What I mean is, that this part:
.map {
case true =>
KafkaActiveConfirmed
case false =>
KafkaInactiveConfirmed
}
never gets executed and I do not know the reason. The method health executes as expected.
Try to add .log between flatMapConcat and map to see emited element. log can else log errors and stream cancelation.
https://doc.akka.io/docs/akka/current/stream/operators/Source-or-Flow/log.html
Note, .log using implicit logger
And your .flatMapConcat(_ => Source.fromFuture(health(addr))) seams triky,
try .mapAsyncUnordered(1)(_ => health(addr))

How does one measure throughput of Akka WebSocket stream?

I am new to Akka and developed a sample Akka WebSocket server that streams a file's contents to clients using BroadcastHub (based on a sample from the Akka docs).
How can I measure the throughput (messages/second), assuming the clients are consuming as fast as the server?
// file source
val fileSource = FileIO.fromPath(Paths.get(path)
// Akka file source
val theFileSource = fileSource
.toMat(BroadcastHub.sink)(Keep.right)
.run
//Akka kafka file source
lazy val kafkaSourceActorStream = {
val (kafkaSourceActorRef, kafkaSource) = Source.actorRef[String](Int.MaxValue, OverflowStrategy.fail)
.toMat(BroadcastHub.sink)(Keep.both).run()
Consumer.plainSource(consumerSettings, Subscriptions.topics("perf-test-topic"))
.runForeach(record => kafkaSourceActorRef ! record.value().toString)
}
def logicFlow: Flow[String, String, NotUsed] = Flow.fromSinkAndSource(Sink.ignore, theFileSource)
val websocketFlow: Flow[Message, Message, Any] = {
Flow[Message]
.collect {
case TextMessage.Strict(msg) => Future.successful(msg)
case _ => println("ignore streamed message")
}
.mapAsync(parallelism = 2)(identity)
.via(logicFlow)
.map { msg: String => TextMessage.Strict(msg) }
}
val fileRoute =
path("file") {
handleWebSocketMessages(websocketFlow)
}
}
def startServer(): Unit = {
bindingFuture = Http().bindAndHandle(wsRoutes, HOST, PORT)
log.info(s"Server online at http://localhost:9000/")
}
def stopServer(): Unit = {
bindingFuture
.flatMap(_.unbind())
.onComplete{
_ => system.terminate()
log.info("terminated")
}
}
//ws client
def connectToWebSocket(url: String) = {
println("Connecting to websocket: " + url)
val (upgradeResponse, closed) = Http().singleWebSocketRequest(WebSocketRequest(url), websocketFlow)
val connected = upgradeResponse.flatMap{ upgrade =>
if(upgrade.response.status == StatusCodes.SwitchingProtocols )
{
println("Web socket connection success")
Future.successful(Done)
}else {
println("Web socket connection failed with error: {}", upgrade.response.status)
throw new RuntimeException(s"Web socket connection failed: ${upgrade.response.status}")
}
}
connected.onComplete { msg =>
println(msg)
}
}
def websocketFlow: Flow[Message, Message, _] = {
Flow.fromSinkAndSource(printFlowRate, Source.maybe)
}
lazy val printFlowRate =
Flow[Message]
.alsoTo(fileSink("output.txt"))
.via(flowRate(1.seconds))
.to(Sink.foreach(rate => println(s"$rate")))
def flowRate(sampleTime: FiniteDuration) =
Flow[Message]
.conflateWithSeed(_ ⇒ 1){ case (acc, _) ⇒ acc + 1 }
.zip(Source.tick(sampleTime, sampleTime, NotUsed))
.map(_._1.toDouble / sampleTime.toUnit(SECONDS))
def fileSink(file: String): Sink[Message, Future[IOResult]] = {
Flow[Message]
.map{
case TextMessage.Strict(msg) => msg
case TextMessage.Streamed(stream) => stream.runFold("")(_ + _).flatMap(msg => Future.successful(msg))
}
.map(s => ByteString(s + "\n"))
.toMat(FileIO.toFile(new File(file)))(Keep.right)
}
You could attach a throughput-measuring stream to your existing stream. Here is an example, inspired by this answer, that prints the number of integers that are emitted from the upstream source every second:
val rateSink = Flow[Int]
.conflateWithSeed(_ => 0){ case (acc, _) => acc + 1 }
.zip(Source.tick(1.second, 1.second, NotUsed))
.map(_._1)
.toMat(Sink.foreach(i => println(s"$i elements/second")))(Keep.right)
In the following example, we attach the above sink to a source that emits the integers 1 to 10 million. To prevent the rate-measuring stream from interfering with the main stream (which, in this case, simply converts every integer to a string and returns the last string processed as part of the materialized value), we use wireTapMat:
val (rateFut, mainFut) = Source(1 to 10000000)
.wireTapMat(rateSink)(Keep.right)
.map(_.toString)
.toMat(Sink.last[String])(Keep.both)
.run() // (Future[Done], Future[String])
rateFut onComplete {
case Success(x) => println(s"rateFut completed: $x")
case Failure(_) =>
}
mainFut onComplete {
case Success(s) => println(s"mainFut completed: $s")
case Failure(_) =>
}
Running the above sample prints something like the following:
0 elements/second
2597548 elements/second
3279052 elements/second
mainFut completed: 10000000
3516141 elements/second
607254 elements/second
rateFut completed: Done
If you don't need a reference to the materialized value of rateSink, use wireTap instead of wireTapMat. For example, attaching rateSink to your WebSocket flow could look like the following:
val websocketFlow: Flow[Message, Message, Any] = {
Flow[Message]
.wireTap(rateSink) // <---
.collect {
case TextMessage.Strict(msg) => Future.successful(msg)
case _ => println("ignore streamed message")
}
.mapAsync(parallelism = 2)(identity)
.via(logicFlow)
.map { msg: String => TextMessage.Strict(msg) }
}
wireTap is defined on both Source and Flow.
Where I last worked I implemented a performance benchmark of this nature.
Basically, it meant creating a simple client app that consumes messages from the websocket and outputs some metrics. The natural choice was to implement the client using akka-http client-side support for websockets. See:
https://doc.akka.io/docs/akka-http/current/client-side/websocket-support.html#singlewebsocketrequest
Then we used the micrometer library to expose metrics to Prometheus, which was our tool of choice for reporting and charting.
https://github.com/micrometer-metrics
https://micrometer.io/docs/concepts#_meters

Empty Iterator : Asynchronous cassandra write

I am trying to implement asynchronous cassandra writes on objects (not RDD) using TableWriter. Code snippet below:
class CassandraOperations[T] extends Serializable with Logging {
/**
* Saves the data from object or Iterator of object to a Cassandra table asynchronously. Uses the specified column names.
* You can check whether this action is completed or not by callback on Future.
*/
def saveToCassandraAsync(
cc: CassandraConnector,
keyspaceName: String,
tableName: String,
columns: ColumnSelector = AllColumns,
data: Iterator[T],
writeConf: WriteConf = WriteConf(ttl = TTLOption.constant(80000)))(implicit rwf: RowWriterFactory[T]):
Future[Unit] = {
implicit val ec = ExecutionContext.global
val writer = TableWriter(cc, keyspaceName, tableName, columns, writeConf)
val futureAction = Future(writer.write(TaskContext.get(), data: Iterator[T]))
futureAction
}
}
And then wait using:
Await.result(resultFuture, TIMEOUT seconds)
the data is available when the execution reaches the write method on line :
val futureAction = Future(writer.write(TaskContext.get(), data: Iterator[T]))
But data is empty when the execution reaches the definition def write(taskContext: TaskContext, **data**: Iterator[T]) of function :
def write(taskContext: TaskContext, data: Iterator[T]) {
val updater = OutputMetricsUpdater(taskContext, writeConf)
connector.withSessionDo { session =>
val protocolVersion = session.getCluster.getConfiguration.getProtocolOptions.getProtocolVersion
val rowIterator = new CountingIterator(data)
val stmt = prepareStatement(session).setConsistencyLevel(writeConf.consistencyLevel)
val queryExecutor = new QueryExecutor(
session,
writeConf.parallelismLevel,
Some(updater.batchFinished(success = true, _, _, _)),
Some(updater.batchFinished(success = false, _, _, _)))
val routingKeyGenerator = new RoutingKeyGenerator(tableDef, columnNames)
val batchType = if (isCounterUpdate) Type.COUNTER else Type.UNLOGGED
val boundStmtBuilder = new BoundStatementBuilder(
rowWriter,
stmt,
protocolVersion = protocolVersion,
ignoreNulls = writeConf.ignoreNulls)
val batchStmtBuilder = new BatchStatementBuilder(
batchType,
routingKeyGenerator,
writeConf.consistencyLevel)
val batchKeyGenerator = batchRoutingKey(session, routingKeyGenerator) _
val batchBuilder = new GroupingBatchBuilder(
boundStmtBuilder,
batchStmtBuilder,
batchKeyGenerator,
writeConf.batchSize,
writeConf.batchGroupingBufferSize,
rowIterator)
val rateLimiter = new RateLimiter((writeConf.throughputMiBPS * 1024 * 1024).toLong, 1024 * 1024)
logDebug(s"Writing data partition to $keyspaceName.$tableName in batches of ${writeConf.batchSize}.")
for (stmtToWrite <- batchBuilder) {
queryExecutor.executeAsync(stmtToWrite)
assert(stmtToWrite.bytesCount > 0)
rateLimiter.maybeSleep(stmtToWrite.bytesCount)
}
queryExecutor.waitForCurrentlyExecutingTasks()
if (!queryExecutor.successful)
throw new IOException(s"Failed to write statements to $keyspaceName.$tableName.")
val duration = updater.finish() / 1000000000d
logInfo(f"Wrote ${rowIterator.count} rows to $keyspaceName.$tableName in $duration%.3f s.")
if (boundStmtBuilder.logUnsetToNullWarning) {
logWarning(boundStmtBuilder.UnsetToNullWarning)
}
}
}
}
so I see empty iterator.
Please guide on what can be the issue.

Why does my Akka data stream stops processing a huge file (~250,000 lines of strings) but works for small file?

My stream works for smaller file of 1000 lines but stops when I test it on a large file ~12MB and ~250,000 lines? I tried applying backpressure with a buffer and throttling it and still same thing...
Here is my data streamer:
class UserDataStreaming(usersFile: File) {
implicit val system = ActorSystemContainer.getInstance().getSystem
implicit val materializer = ActorSystemContainer.getInstance().getMaterializer
def startStreaming() = {
val graph = RunnableGraph.fromGraph(GraphDSL.create() {
implicit builder =>
val usersSource = builder.add(Source.fromIterator(() => usersDataLines)).out
val stringToUserFlowShape: FlowShape[String, User] = builder.add(csvToUser)
val averageAgeFlowShape: FlowShape[User, (String, Int, Int)] = builder.add(averageUserAgeFlow)
val averageAgeSink = builder.add(Sink.foreach(averageUserAgeSink)).in
usersSource ~> stringToUserFlowShape ~> averageAgeFlowShape ~> averageAgeSink
ClosedShape
})
graph.run()
}
val usersDataLines = scala.io.Source.fromFile(usersFile, "ISO-8859-1").getLines().drop(1)
val csvToUser = Flow[String].map(_.split(";").map(_.trim)).map(csvLinesArrayToUser)
def csvLinesArrayToUser(line: Array[String]) = User(line(0), line(1), line(2))
def averageUserAgeSink[usersSource](source: usersSource) {
source match {
case (age: String, count: Int, totalAge: Int) => println(s"age = $age; Average reader age is: ${Try(totalAge/count).getOrElse(0)} count = $count and total age = $totalAge")
case bad => println(s"Bad case: $bad")
}
}
def averageUserAgeFlow = Flow[User].fold(("", 0, 0)) {
(nums: (String, Int, Int), user: User) =>
var counter: Option[Int] = None
var totalAge: Option[Int] = None
val ageInt = Try(user.age.substring(1, user.age.length-1).toInt)
if (ageInt.isSuccess) {
counter = Some(nums._2 + 1)
totalAge = Some(nums._3 + ageInt.get)
}
else {
counter = Some(nums._2 + 0)
totalAge = Some(nums._3 + 0)
}
//println(counter.get)
(user.age, counter.get, totalAge.get)
}
}
Here is my Main:
object Main {
def main(args: Array[String]): Unit = {
implicit val system = ActorSystemContainer.getInstance().getSystem
implicit val materializer = ActorSystemContainer.getInstance().getMaterializer
val usersFile = new File("data/BX-Users.csv")
println(usersFile.length())
val userDataStreamer = new UserDataStreaming(usersFile)
userDataStreamer.startStreaming()
}
It´s possible that there may be any error related to one row of your csv file. In that case, the stream materializes and stops. Try to define your flows like that:
FlowFlowShape[String, User].map {
case (user) => try {
csvToUser(user)
}
}.withAttributes(ActorAttributes.supervisionStrategy {
case ex: Throwable =>
log.error("Error parsing row event: {}", ex)
Supervision.Resume
}
In this case the possible exception is captured and the stream ignores the error and continues.
If you use Supervision.Stop, the stream stops.

BulkLoading to Phoenix using Spark

I was trying to code some utilities to bulk load data through HFiles from Spark RDDs.
I was taking the pattern of CSVBulkLoadTool from phoenix. I managed to generate some HFiles and load them into HBase, but i can't see the rows using sqlline(e.g using hbase shell it is possible). I would be more than grateful for any suggestions.
BulkPhoenixLoader.scala:
class BulkPhoenixLoader[A <: ImmutableBytesWritable : ClassTag, T <: KeyValue : ClassTag](rdd: RDD[(A, T)]) {
def createConf(tableName: String, inConf: Option[Configuration] = None): Configuration = {
val conf = inConf.map(HBaseConfiguration.create).getOrElse(HBaseConfiguration.create())
val job: Job = Job.getInstance(conf, "Phoenix bulk load")
job.setMapOutputKeyClass(classOf[ImmutableBytesWritable])
job.setMapOutputValueClass(classOf[KeyValue])
// initialize credentials to possibily run in a secure env
TableMapReduceUtil.initCredentials(job)
val htable: HTable = new HTable(conf, tableName)
// Auto configure partitioner and reducer according to the Main Data table
HFileOutputFormat2.configureIncrementalLoad(job, htable)
conf
}
def bulkSave(tableName: String, outputPath: String, conf:
Option[Configuration]) = {
val configuration: Configuration = createConf(tableName, conf)
rdd.saveAsNewAPIHadoopFile(
outputPath,
classOf[ImmutableBytesWritable],
classOf[Put],
classOf[HFileOutputFormat2],
configuration)
}
}
ExtendedProductRDDFunctions.scala:
class ExtendedProductRDDFunctions[A <: scala.Product](data: org.apache.spark.rdd.RDD[A]) extends
ProductRDDFunctions[A](data) with Serializable {
def toHFile(tableName: String,
columns: Seq[String],
conf: Configuration = new Configuration,
zkUrl: Option[String] =
None): RDD[(ImmutableBytesWritable, KeyValue)] = {
val config = ConfigurationUtil.getOutputConfiguration(tableName, columns, zkUrl, Some(conf))
val tableBytes = Bytes.toBytes(tableName)
val encodedColumns = ConfigurationUtil.encodeColumns(config)
val jdbcUrl = zkUrl.map(getJdbcUrl).getOrElse(getJdbcUrl(config))
val conn = DriverManager.getConnection(jdbcUrl)
val query = QueryUtil.constructUpsertStatement(tableName,
columns.toList.asJava,
null)
data.flatMap(x => mapRow(x, jdbcUrl, encodedColumns, tableBytes, query))
}
def mapRow(product: Product,
jdbcUrl: String,
encodedColumns: String,
tableBytes: Array[Byte],
query: String): List[(ImmutableBytesWritable, KeyValue)] = {
val conn = DriverManager.getConnection(jdbcUrl)
val preparedStatement = conn.prepareStatement(query)
val columnsInfo = ConfigurationUtil.decodeColumns(encodedColumns)
columnsInfo.zip(product.productIterator.toList).zipWithIndex.foreach(setInStatement(preparedStatement))
preparedStatement.execute()
val uncommittedDataIterator = PhoenixRuntime.getUncommittedDataIterator(conn, true)
val hRows = uncommittedDataIterator.asScala.filter(kvPair =>
Bytes.compareTo(tableBytes, kvPair.getFirst) == 0
).flatMap(kvPair => kvPair.getSecond.asScala.map(
kv => {
val byteArray = kv.getRowArray.slice(kv.getRowOffset, kv.getRowOffset + kv.getRowLength - 1) :+ 1.toByte
(new ImmutableBytesWritable(byteArray, 0, kv.getRowLength), kv)
}))
conn.rollback()
conn.close()
hRows.toList
}
def setInStatement(statement: PreparedStatement): (((ColumnInfo, Any), Int)) => Unit = {
case ((c, v), i) =>
if (v != null) {
// Both Java and Joda dates used to work in 4.2.3, but now they must be java.sql.Date
val (finalObj, finalType) = v match {
case dt: DateTime => (new Date(dt.getMillis), PDate.INSTANCE.getSqlType)
case d: util.Date => (new Date(d.getTime), PDate.INSTANCE.getSqlType)
case _ => (v, c.getSqlType)
}
statement.setObject(i + 1, finalObj, finalType)
} else {
statement.setNull(i + 1, c.getSqlType)
}
}
private def getIndexTables(conn: Connection, qualifiedTableName: String) : List[(String, String)]
= {
val table: PTable = PhoenixRuntime.getTable(conn, qualifiedTableName)
val tables = table.getIndexes.asScala.map(x => x.getIndexType match {
case IndexType.LOCAL => (x.getTableName.getString, MetaDataUtil.getLocalIndexTableName(qualifiedTableName))
case _ => (x.getTableName.getString, x.getTableName.getString)
}).toList
tables
}
}
The generated HFiles I load with the utility tool from hbase as follows:
hbase org.apache.hadoop.hbase.mapreduce.LoadIncrementalHFiles path/to/hfile tableName
You could just convert your csv file to an RDD of Product and use the .saveToPhoenix method. This is generally how I load csv data into phoenix.
Please see: https://phoenix.apache.org/phoenix_spark.html