Datetime: Calcul Hours in days - scala

I have 3 scala functions, working well, I have an errors just in the function
I did another function equals: compute the number of hours between 2 dates by calling the 3 functions above: toEnd, ToStart, jourOuvree.
I wrote all the function in a scala Object. But when I run the function equals, it gives me an errors, this following:
^
I should use the function and apply it on my dataframe to compute the number of hours between 2 dates.
I'm not excellent in java.time API.
Someone can help me please to resolve these errors ?

I rewrote your solution. See comments inline.
import java.time._
import java.time.temporal.ChronoUnit
import scala.annotation.tailrec
import scala.concurrent.duration._
object Demo extends App {
val start = LocalTime.of(9, 0)
val midEnd = LocalTime.of(13, 0)
val midStart = LocalTime.of(14, 0)
val end = LocalTime.of(18, 0)
val firstHalf = start.until(midEnd, ChronoUnit.MILLIS).millis
val secondHalf = midStart.until(end, ChronoUnit.MILLIS).millis
implicit class formatter(d: FiniteDuration) {
def withMinutes = {
val l = d.toMinutes
s"${l / 60}:${l % 60}"
}
def withSeconds = s"${d.toHours}:${d.toMinutes % 60}:${d.toSeconds % 60}"
}
// check if day is working. Could be easily adjusted to check not only weekend but holidays as well
def isWorkingDay(d: LocalDate) = !Set(DayOfWeek.SATURDAY, DayOfWeek.SUNDAY).contains(d.getDayOfWeek)
// check if current day is working day and if not switch to next start of day/previous end of day
// depending on ask parameter
#tailrec
def adjust(t: LocalDateTime, asc: Boolean = true): LocalDateTime = {
if (!isWorkingDay(t.toLocalDate) && asc) adjust(t.toLocalDate.plusDays(1).atTime(start), asc)
else if (!isWorkingDay(t.toLocalDate) && !asc) adjust(t.toLocalDate.minusDays(1).atTime(end), asc)
else t
}
def toEnd(t: LocalTime) = {
if (t.isBefore(start)) firstHalf + secondHalf
else if (t.isBefore(midEnd)) t.until(midEnd, ChronoUnit.MILLIS).millis + secondHalf
else if (t.isBefore(midStart)) secondHalf
else if (t.isBefore(end)) t.until(end, ChronoUnit.MILLIS).millis
else 0.hours
}
def toStart(t: LocalTime) = {
if (t.isBefore(start)) 0.hours
else if (t.isBefore(midEnd)) start.until(t, ChronoUnit.MILLIS).millis
else if (t.isBefore(midStart)) firstHalf
else if (t.isBefore(end)) firstHalf + midStart.until(t, ChronoUnit.MILLIS).millis
else firstHalf + secondHalf
}
// count amount of working days between two dates.
// if dates are the same - means 0 days
def jourOuvree(d1: LocalDate, d2: LocalDate): Int = {
#tailrec
def count(n: LocalDate, acc: Int): Int = {
if (n.isEqual(d2) || n.isAfter(d2)) acc
else if (!isWorkingDay(n)) count(n.plusDays(1), acc)
else count(n.plusDays(1), acc + 1)
}
count(d1, 0)
}
// evaluate duration of working time between to date/times
// take into account that start/end could be non-working and
// adjust
def toEquals(rd1: LocalDateTime, rd2: LocalDateTime) = {
val d1 = adjust(rd1)
val d2 = adjust(rd2, asc = false)
// if we are in the same day ==> I should compute just a difference between hours
if (d1.isAfter(d2)) 0.hours
else if (d1.toLocalDate.isEqual(d2.toLocalDate)) {
toEnd(d1.toLocalTime) - toEnd(d2.toLocalTime)
}
else {
toEnd(d1.toLocalTime) + jourOuvree(d1.toLocalDate.plusDays(1), d2.toLocalDate.minusDays(1)) * 8.hours + toStart(d2.toLocalTime)
}
}
val thursdayStart = LocalDate.of(2018, 3, 1).atTime(start)
val thursdayEnd = LocalDate.of(2018, 3, 1).atTime(end)
val fridayStart = LocalDate.of(2018, 3, 2).atTime(start)
val fridayEnd = LocalDate.of(2018, 3, 2).atTime(end)
val saturdayStart = LocalDate.of(2018, 3, 3).atTime(start)
val saturdayEnd = LocalDate.of(2018, 3, 3).atTime(end)
val sundayStart = LocalDate.of(2018, 3, 4).atTime(start)
val sundayEnd = LocalDate.of(2018, 3, 4).atTime(end)
val mondayStart = LocalDate.of(2018, 3, 5).atTime(start)
val mondayEnd = LocalDate.of(2018, 3, 6).atTime(end)
println(toEquals(thursdayStart, sundayStart).withSeconds)
println(toEquals(thursdayStart, saturdayStart).withSeconds)
println(toEquals(thursdayStart, mondayStart).withSeconds)
println(toEquals(thursdayStart, mondayEnd).withSeconds)
println(toEquals(thursdayStart, thursdayEnd).withSeconds)
// formatter to use to parse string input
import java.time.format.DateTimeFormatter
val DATE_TIME_FORMATTER = DateTimeFormatter.ofPattern("yyyy-MM-dd HH:mm:ss")
println(toEquals(LocalDateTime.parse("2018-03-01 11:22:33", DATE_TIME_FORMATTER), LocalDateTime.parse("2018-03-02 11:22:33", DATE_TIME_FORMATTER)).withSeconds)
}

Related

How to calculate 'n' days interval date in functional style?

I am trying to calculae the interval of 'n' days from start date to end date.Function signature would have start_date,end_date, interval as argument which return a map with list of start, end days of given intervals.
Example: start_date:2018-01-01 , End_date : 2018-02-20 interval: 20
Expected Output:
2018-01-01 , 2018-01-20 (20 days)
2018-01-21 , 2018-02-09 (20 days)
2018-02-09 , 2018-01-20 (remaining)
I tried to write in scala but i dont feel it's a proper functional style of writing.
case class DateContainer(period: String, range: (LocalDate, LocalDate))
def generateDates(startDate: String, endDate: String,interval:Int): Unit = {
import java.time._
var lstDDateContainer = List[DateContainer]()
var start = LocalDate.parse(startDate)
val end = LocalDate.parse(endDate)
import java.time.temporal._
var futureMonth = ChronoUnit.DAYS.addTo(start, interval)
var i = 1
while (end.isAfter(futureMonth)) {
lstDDateContainer = DateContainer("P" + i, (start, futureMonth)):: lstDDateContainer
start=futureMonth
futureMonth = ChronoUnit.DAYS.addTo(futureMonth, interval)
i += 1
}
lstDDateContainer= DateContainer("P" + i, (start, end))::lstDDateContainer
lstDDateContainer.foreach(println)
}
generateDates("2018-01-01", "2018-02-20",20)
Could anyone help me to write in a functional style.
I offer a solution that produces a slightly different result than given in the question but could be easily modified to get the desired answer:
//Preliminaries
val fmt = DateTimeFormatter.ofPattern("yyyy-MM-dd")
val startDate ="2018-01-01"
val endDate = "2018-02-21"
val interval = 20L
val d1 = LocalDate.parse(startDate, fmt)
val d2 = LocalDate.parse(endDate, fmt)
//The main code
Stream.continually(interval)
.scanLeft((d1, d1.minusDays(1), interval)) ((x,y) => {
val finDate = x._2.plusDays(y)
if(finDate.isAfter(d2))
(x._2.plusDays(1), d2, ChronoUnit.DAYS.between(x._2, d2))
else
(x._2.plusDays(1), x._2.plusDays(y), y)
}).takeWhile(d => d._3 > 0).drop(1).toList
Result:
(2018-01-01,2018-01-20,20)
(2018-01-21,2018-02-09,20)
(2018-02-10,2018-02-21,12)
The idea is to scan a 3-tuple through a stream of interval and stop when no more days are remaining.
Use the java.time library to generate the dates and Stream.iterate() to generate the sequence of intervals.
import java.time.LocalDate
def generateDates( startDate :LocalDate
, endDate :LocalDate
, dayInterval :Int ) :Unit = {
val intervals =
Stream.iterate((startDate, startDate plusDays dayInterval-1)){
case (_,lastDate) =>
val nextDate = lastDate plusDays dayInterval
(lastDate plusDays 1, if (nextDate isAfter endDate) endDate
else nextDate)
}.takeWhile(_._1 isBefore endDate)
println(intervals.mkString("\n"))
}
usage:
generateDates(LocalDate.parse("2018-01-01"), LocalDate.parse("2018-02-20"), 20)
// (2018-01-01,2018-01-20)
// (2018-01-21,2018-02-09)
// (2018-02-10,2018-02-20)
Something like (Untested):
def dates(startDate: LocalDate, endDate: LocalDate, dayInterval: Int): List[(LocalDate, LocalDate, Int)] = {
if(startDate.isAfter(endDate)) {
Nil
}
else {
val nextStart = startDate.plusDays(dayInterval)
if(nextStart.isAfter(startDate)) {
List((startDate, endDate, ChronoUnit.DAYS.between(startDate, endDate)))
}
else {
(startDate, nextStart, dayInterval) :: dates(nextStart, endDate, dayInterval)
}
}
}
If your'e open to using Joda for date-time manipulations, here's what I use
import org.joda.time.{DateTime, Days}
// given from & to dates, find no of days elapsed in between (integer)
def getDaysInBetween(from: DateTime, to: DateTime): Int = Days.daysBetween(from, to).getDays
def getDateSegments(from: DateTime, to: DateTime, interval: Int): Seq[(DateTime, DateTime)] = {
// no of days between from & to dates
val days: Int = DateTimeUtils.getDaysInBetween(from, to) + 1
// no of segments (date ranges) between to & from dates
val segments: Int = days / interval
// (remaining) no of days in last range
val remainder: Int = days % interval
// last date-range
val remainderRanges: Seq[(DateTime, DateTime)] =
if (remainder != 0) from -> from.plusDays(remainder - 1) :: Nil
else Nil
// all (remaining) date-ranges + last date-range
(0 until segments).map { segment: Int =>
to.minusDays(segment * interval + interval - 1) -> to.minusDays(segment * interval)
} ++ remainderRanges
}

Generating recommendation model for a large dataset using spark

I'm trying out to generate a simple ALS model using the spark documentation here.
My first file(ratings.csv) has 20million UserID,MovID,Rat and can be downloaded here
So I have the testing data which is a subset of ratings.csv. That test dataset can be downloaded here:
The test file has just the UserID, Movie ID column.
So to create training data we will have to filter ratings.csv.
The following code is working fine for a smaller case of 100,000 UserID,MovID rating. I am not able to generate the model for the big case.
Please help with a pointer.
/**
* Created by echoesofconc on 3/8/17.
*/
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.MatrixFactorizationModel
import org.apache.spark.mllib.recommendation.Rating
import java.io._
import scala.collection.mutable.ListBuffer
object Prateek_Agrawal_task1 {
def dropheader(data: RDD[String]): RDD[String] = {
data.mapPartitionsWithIndex((idx, lines) => {
if (idx == 0) {
lines.drop(1)
}
lines
})
}
def create_training(ratings_split: RDD[Array[String]], ratings_testing: Array[Array[String]]) = {
ratings_split.filter(x => {
ratings_testing.exists(y =>
(x(0) == y(0) && x(1) == y(1))
) == false
})
}
def create_testing(ratings_split: RDD[Array[String]], ratings_testing: Array[Array[String]]) = {
ratings_split.filter(x => {
ratings_testing.exists(y =>
(x(0) == y(0) && x(1) == y(1))
) == true
})
}
def create_model(ratings_train:RDD[Array[String]],rank:Int,numIterations:Int ):org.apache.spark.mllib.recommendation.MatrixFactorizationModel={
val ratings = ratings_train.map(_ match { case Array(user,item,rate,temp) =>
Rating(user.toInt, item.toInt, rate.toDouble)
})
val model = ALS.train(ratings, rank, numIterations, 0.01)
return model
}
def print_results(final_predictions_adjusted:RDD[((Int, Int), Double)])={
val rating_range=final_predictions_adjusted.map(x=>(x._2.toInt,1)).reduceByKey(_+_).sortByKey()
val rating_range_till_4=rating_range.map{x=>
var temp=x
if (x._1==5){temp=(4,x._2)}
temp
}.reduceByKey(_+_)
rating_range_till_4.sortByKey().foreach { x =>
if(x._1==0)
printf(">=0 and <1: " + x._2+"\n")
if(x._1==1)
printf(">=1 and <2: " + x._2+"\n")
if(x._1==2)
printf(">=2 and <3: " + x._2+"\n")
if(x._1==3)
printf(">=3 and <4: " + x._2+"\n")
if(x._1==4)
printf(">=4 " + x._2+"\n")
if(x._1==5)
printf("=5 " + x._2+"\n")
}
}
case class User_mov_rat(UserID: Int, MovieID:Int, Pred_rating: Double)
def print_outputfile(final_predictions_adjusted:RDD[((Int, Int), Double)])={
val writer = new FileWriter(new File("./output.txt" ))
writer.write("UserID,MovieID,Pred_rating\n")
final_predictions_adjusted.collect().foreach(x=>{writer.write(x._1._1+","+x._1._2+","+x._2+"\n")})
writer.close()
}
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("Prateek_Agrawal_task1").setMaster("local[2]")
val sc = new SparkContext(conf)
val file = "/Users/echoesofconc/Documents/USC_courses/INF553/ml-20m/ratings.csv"
val test = "/Users/echoesofconc/Documents/USC_courses/INF553/Prateek_Agrawal_hw3/testing_20m.csv"
val data = sc.textFile(file, 2).cache()
val data_test = sc.textFile(test, 2).cache()
// Drop Header
val data_wo_header=dropheader(data).persist()
val data_test_wo_header=dropheader(data_test).persist()
// Create Training and testing data of the format (User ID, MovID, Rating, Time)
val ratings_split = data_wo_header.map(line => line.split(",")).persist()
data_wo_header.unpersist()
data.unpersist()
val ratings_testing = data_test_wo_header.map(line => line.split(",")).collect()
data_test_wo_header.unpersist()
data_test.unpersist()
val ratings_train = create_training(ratings_split, ratings_testing).persist()
val ratings_test=create_testing(ratings_split, ratings_testing)
ratings_split.unpersist()
ratings_test.unpersist()
// Create the model using rating_train the training data
val rank = 1
val numIterations = 10
val model=create_model(ratings_train,rank,numIterations)
ratings_train.unpersist()
// Average user,Rating from training this is for cases which are there in test but not rated by any user in training
val user_avgrat=ratings_test.map(_ match { case Array(user, mov, rate, temp) =>(user.toInt, (rate.toDouble,1.0))}).reduceByKey((x,y)=>(x._1 + y._1, x._2 + y._2)).mapValues{ case (sum, count) => (1.0 * sum) / count }
// Predict user_mov ratings
val user_mov = data_test_wo_header.map(_.split(',') match { case Array(user, mov) =>
(user.toInt,mov.toInt)
})
val predictions =
model.predict(user_mov).map { case Rating(user, mov, rate) =>
((user, mov), rate)
}
// Combine Predictions and unpredicted user,Movies due to them being individual. Going forward we need to improve the accuracy for these predictions
val user_mov_rat=user_mov.map(x=>(x,0.0))
val predictions_unpredicted_combined= predictions.union(user_mov_rat).reduceByKey(_+_).map(x=>(x._1._1,(x._1._2,x._2)))
// Combine average rating and predictions+unpredicted values
val avg_rating_predictions_unpredicted_combined=predictions_unpredicted_combined.join(user_avgrat)
// Generate final predictions RDD
val final_predictions=avg_rating_predictions_unpredicted_combined.map{x=>
var temp=((x._1,x._2._1._1),x._2._2)
if(x._2._1._2==0.0){temp=((x._1,x._2._1._1),x._2._2)}
if(x._2._1._2!=0.0){temp=((x._1,x._2._1._1),x._2._1._2)}
temp
}
// Adjust for ratings above 5.0 and below 0.0
val final_predictions_adjusted=final_predictions.map{x=>
var temp=x
if (x._2>5.0){temp=(x._1,5.0)}
if (x._2<0.0){temp=(x._1,0.0)}
temp
}
val ratesAndPreds = ratings_test.map(_ match { case Array(user, mov, rate, temp) => ((user.toInt,mov.toInt),rate.toDouble)}).join(final_predictions_adjusted)
val MSE = ratesAndPreds.map { case ((user, product), (r1, r2)) =>
val err = (r1 - r2)
err * err
}.mean()
val RMSE=math.sqrt(MSE)
// Print output.txt
print_outputfile(final_predictions_adjusted)
// Print the predictionresults
print_results(final_predictions_adjusted.sortByKey())
print(RMSE+"\n")
}
}
In case someone thinks I should be doing a regex match I have tried that approach. BUt that dosen't seem to be a bottleneck.
I only need to complete the create model part on which I am stuck for the big dataset. Can somebody help.
EDIT:
Another approach I tried which is much faster by using broadcast variables. But it's been running for 12 hrs with no signs of progress. On spark UI somehow the whole of the RDD(ratings.csv ~500MB) is not cached. Only around 64MB with 2.5 Million lines is being processed initially. I am using --executor-memory -8g. I have modified the create_training create_testing functions:
/**
* Created by echoesofconc on 3/8/17.
*/
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.Rating
import java.io._
object Prateek_Agrawal_task2 {
def dropheader(data: RDD[String]): RDD[String] = {
data.mapPartitionsWithIndex((idx, lines) => {
if (idx == 0) {
lines.drop(1)
}
lines
})
}
def create_training(data_wo_header: RDD[String], data_test_wo_header: RDD[String],sc:SparkContext): RDD[String] = {
val rdd2array = sc.broadcast(data_test_wo_header.collect())
val training_set = data_wo_header.filter{
case(x) => rdd2array.value.filter(y => x.indexOf(y.toString())==0).length == 0
}
return training_set
}
def create_test(data_wo_header: RDD[String], data_test_wo_header: RDD[String],sc:SparkContext): RDD[String] = {
val rdd2array = sc.broadcast(data_test_wo_header.collect())
val training_set = data_wo_header.filter{
case(x) => rdd2array.value.filter(y => x.indexOf(y.toString())==0).length != 0
}
return training_set
}
def create_model(ratings_train:RDD[String],rank:Int,numIterations:Int ):org.apache.spark.mllib.recommendation.MatrixFactorizationModel={
val ratings = ratings_train.map(_.split(',') match { case Array(user, item, rate, timestamp) =>
Rating(user.toInt, item.toInt, rate.toDouble)
})
val model = ALS.train(ratings, rank, numIterations, 0.01)
return model
}
def print_results(final_predictions_adjusted:RDD[((Int, Int), Double)])={
val rating_range=final_predictions_adjusted.map(x=>(x._2.toInt,1)).reduceByKey(_+_).sortByKey()
val rating_range_till_4=rating_range.map{x=>
var temp=x
if (x._1==5){temp=(4,x._2)}
temp
}.reduceByKey(_+_)
rating_range_till_4.sortByKey().foreach { x =>
if(x._1==0)
printf(">=0 and <1: " + x._2+"\n")
if(x._1==1)
printf(">=1 and <2: " + x._2+"\n")
if(x._1==2)
printf(">=2 and <3: " + x._2+"\n")
if(x._1==3)
printf(">=3 and <4: " + x._2+"\n")
if(x._1==4)
printf(">=4 " + x._2+"\n")
if(x._1==5)
printf("=5 " + x._2+"\n")
}
}
case class User_mov_rat(UserID: Int, MovieID:Int, Pred_rating: Double)
def print_outputfile(final_predictions_adjusted:RDD[((Int, Int), Double)])={
val writer = new FileWriter(new File("./output.txt" ))
writer.write("UserID,MovieID,Pred_rating\n")
final_predictions_adjusted.collect().foreach(x=>{writer.write(x._1._1+","+x._1._2+","+x._2+"\n")})
writer.close()
}
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("Prateek_Agrawal_task1").setMaster("local[2]")
val sc = new SparkContext(conf)
val file = "/Users/echoesofconc/Documents/USC_courses/INF553/ml-latest-small/ratings.csv"
val test = "/Users/echoesofconc/Documents/USC_courses/INF553/Prateek_Agrawal_hw3/testing_small.csv"
// val file = "/Users/echoesofconc/Documents/USC_courses/INF553/ml-20m/ratings.csv"
// val test = "/Users/echoesofconc/Documents/USC_courses/INF553/Prateek_Agrawal_hw3/testing_20m.csv"
val data = sc.textFile(file, 2).persist()
val data_test = sc.textFile(test, 2).persist()
// Drop Header
val data_wo_header=dropheader(data)
val data_test_wo_header=dropheader(data_test)
// Create Traing and testing data of the format (User ID, MovID, Rating, Time)
val ratings_train=create_training(data_wo_header,data_test_wo_header,sc).persist()
val ratings_test=create_test(data_wo_header,data_test_wo_header,sc)
// val ratings_test=create_test(data_wo_header,data_test_wo_header,sc)
// data_test_wo_header.unpersist()
// data_test.unpersist()
//// data.unpersist()
//// data_test.unpersist()
// Create the model using rating_train the training data
val rank = 1
val numIterations = 10
val model=create_model(ratings_train,rank,numIterations)
// ratings_train.unpersist()
// model.save(sc, "target/tmp/myCollaborativeFilter")
// val Model = MatrixFactorizationModel.load(sc, "/Users/echoesofconc/myCollaborativeFilter")
// Average user,Rating from training
val user_avgrat=ratings_test.map(_.split(",") match { case Array(user, mov, rate, temp) =>(user.toInt, (rate.toDouble,1.0))}).reduceByKey((x,y)=>(x._1 + y._1, x._2 + y._2)).mapValues{ case (sum, count) => (1.0 * sum) / count }
//data
// Predict user_mov ratings
val user_mov = data_test_wo_header.map(_.split(',') match { case Array(user, mov) =>
(user.toInt,mov.toInt)
})
val predictions =
model.predict(user_mov).map { case Rating(user, mov, rate) =>
((user, mov), rate)
}
// Combine Predictions and unpredicted user,Movies due to them being individual. Going forward we need to improve the accuracy for these predictions
val user_mov_rat=user_mov.map(x=>(x,0.0))
val predictions_unpredicted_combined= predictions.union(user_mov_rat).reduceByKey(_+_).map(x=>(x._1._1,(x._1._2,x._2)))
// Combine average rating and predictions+unpredicted values
val avg_rating_predictions_unpredicted_combined=predictions_unpredicted_combined.join(user_avgrat)
// Generate final predictions RDD
val final_predictions=avg_rating_predictions_unpredicted_combined.map{x=>
var temp=((x._1,x._2._1._1),x._2._2)
if(x._2._1._2==0.0){temp=((x._1,x._2._1._1),x._2._2)}
if(x._2._1._2!=0.0){temp=((x._1,x._2._1._1),x._2._1._2)}
temp
}
// Adjust for ratings above 5.0 and below 0.0
val final_predictions_adjusted=final_predictions.map{x=>
var temp=x
if (x._2>5.0){temp=(x._1,5.0)}
if (x._2<0.0){temp=(x._1,0.0)}
temp
}
val ratesAndPreds = ratings_test.map(_.split(",") match { case Array(user, mov, rate, temp) => ((user.toInt,mov.toInt),rate.toDouble)}).join(final_predictions_adjusted)
val MSE = ratesAndPreds.map { case ((user, product), (r1, r2)) =>
val err = (r1 - r2)
err * err
}.mean()
val RMSE=math.sqrt(MSE)
// Print output.txt
print_outputfile(final_predictions_adjusted)
// Print the predictionresults
print_results(final_predictions_adjusted.sortByKey())
print(RMSE+"\n")
}
}
This worked out to be fine. It's using join to create testng training data
/**
* Created by echoesofconc on 3/8/17.
*/
import org.apache.spark.SparkConf
import org.apache.spark.SparkContext
import org.apache.spark.rdd.RDD
import org.apache.spark.mllib.recommendation.ALS
import org.apache.spark.mllib.recommendation.Rating
import org.apache.spark.mllib.recommendation.MatrixFactorizationModel
import java.io._
object Prateek_Agrawal_task1 {
def dropheader(data: RDD[String]): RDD[String] = {
data.mapPartitionsWithIndex((idx, lines) => {
if (idx == 0) {
lines.drop(1)
}
lines
})
}
def create_training(ratings_split: RDD[Array[String]], ratings_testing: Array[Array[String]]) = {
ratings_split.filter(x => {
ratings_testing.exists(y =>
(x(0) == y(0) && x(1) == y(1))
) == false
})
}
def create_testing(ratings_split: RDD[Array[String]], ratings_testing: Array[Array[String]]) = {
ratings_split.filter(x => {
ratings_testing.exists(y =>
(x(0) == y(0) && x(1) == y(1))
) == true
})
}
def create_model(ratings_train:RDD[((String, String), (String, String))],rank:Int,numIterations:Int ):org.apache.spark.mllib.recommendation.MatrixFactorizationModel={
val ratings = ratings_train.map(_ match { case ((user,item),(rate,temp)) =>
Rating(user.toInt, item.toInt, rate.toDouble)
})
val model = ALS.train(ratings, rank, numIterations, 0.01)
return model
}
def print_results(final_predictions_adjusted:RDD[((Int, Int), Double)])={
val rating_range=final_predictions_adjusted.map(x=>(x._2.toInt,1)).reduceByKey(_+_).sortByKey()
val rating_range_till_4=rating_range.map{x=>
var temp=x
if (x._1==5){temp=(4,x._2)}
temp
}.reduceByKey(_+_)
rating_range_till_4.sortByKey().foreach { x =>
if(x._1==0)
printf(">=0 and <1: " + x._2+"\n")
if(x._1==1)
printf(">=1 and <2: " + x._2+"\n")
if(x._1==2)
printf(">=2 and <3: " + x._2+"\n")
if(x._1==3)
printf(">=3 and <4: " + x._2+"\n")
if(x._1==4)
printf(">=4 " + x._2+"\n")
if(x._1==5)
printf("=5 " + x._2+"\n")
}
}
case class User_mov_rat(UserID: Int, MovieID:Int, Pred_rating: Double)
def print_outputfile(final_predictions_adjusted:RDD[((Int, Int), Double)])={
val writer = new FileWriter(new File("./output.txt" ))
writer.write("UserID,MovieID,Pred_rating\n")
final_predictions_adjusted.collect().foreach(x=>{writer.write(x._1._1+","+x._1._2+","+x._2+"\n")})
writer.close()
}
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setAppName("Prateek_Agrawal_task1").setMaster("local[2]")
val sc = new SparkContext(conf)
// val file = "/Users/echoesofconc/Documents/USC_courses/INF553/ml-latest-small/ratings.csv"
// val test = "/Users/echoesofconc/Documents/USC_courses/INF553/Prateek_Agrawal_hw3/testing_small.csv"
val file = "/Users/echoesofconc/Documents/USC_courses/INF553/ml-20m/ratings.csv"
val test = "/Users/echoesofconc/Documents/USC_courses/INF553/Prateek_Agrawal_hw3/testing_20m.csv"
val data = sc.textFile(file, 2).cache()
val data_test = sc.textFile(test, 2).cache()
// Drop Header
// val data_wo_header=dropheader(data).persist()
// val data_test_wo_header=dropheader(data_test).persist()
// Create Traing and testing data of the format (User ID, MovID, Rating, Time)
val data_wo_header=dropheader(data).map(_.split(",")).map(x=>((x(0),x(1)),(x(2),x(3))))
val data_test_wo_header=dropheader(data_test).map(_.split(",")).map(x=>((x(0),x(1)),1))
val ratings_train=data_wo_header.subtractByKey(data_test_wo_header)
val ratings_test=data_wo_header.subtractByKey(ratings_train)
data_test_wo_header.unpersist()
data_wo_header.unpersist()
data.unpersist()
data_test.unpersist()
// val ratings_split = data_wo_header.map(line => line.split(",")).persist()
// data_wo_header.unpersist()
// data.unpersist()
// val ratings_testing = data_test_wo_header.map(line => line.split(",")).collect()
// data_test_wo_header.unpersist()
// data_test.unpersist()
//
// val ratings_train = create_training(ratings_split, ratings_testing).persist()
// val ratings_test=create_testing(ratings_split, ratings_testing)
// ratings_split.unpersist()
// ratings_test.unpersist()
// Create the model using rating_train the training data
val rank = 1
val numIterations = 10
// val model=create_model(ratings_train,rank,numIterations)
//
// model.save(sc, "/Users/echoesofconc/Documents/USC_courses/INF553/Prateek_Agrawal_hw3/myCollaborativeFilter")
val model = MatrixFactorizationModel.load(sc, "/Users/echoesofconc/Documents/USC_courses/INF553/Prateek_Agrawal_hw3/myCollaborativeFilter")
// Average user,Rating from training
val user_avgrat=ratings_train.map(_ match { case ((user, mov), (rate, temp)) =>(user.toInt, (rate.toDouble,1.0))}).reduceByKey((x,y)=>(x._1 + y._1, x._2 + y._2)).mapValues{ case (sum, count) => (1.0 * sum) / count }
ratings_train.unpersist()
// Predict user_mov ratings
val user_mov = data_test_wo_header.map(_ match { case ((user, mov),temp) =>
(user.toInt,mov.toInt)
})
val predictions =
model.predict(user_mov).map { case Rating(user, mov, rate) =>
((user, mov), rate)
}
// Combine Predictions and unpredicted user,Movies due to them being individual. Going forward we need to improve the accuracy for these predictions
val user_mov_rat=user_mov.map(x=>(x,0.0))
val predictions_unpredicted_combined= predictions.union(user_mov_rat).reduceByKey(_+_).map(x=>(x._1._1,(x._1._2,x._2)))
// Combine average rating and predictions+unpredicted values
val avg_rating_predictions_unpredicted_combined=predictions_unpredicted_combined.join(user_avgrat)
// Generate final predictions RDD
val final_predictions=avg_rating_predictions_unpredicted_combined.map{x=>
var temp=((x._1,x._2._1._1),x._2._2)
if(x._2._1._2==0.0){temp=((x._1,x._2._1._1),x._2._2)}
if(x._2._1._2!=0.0){temp=((x._1,x._2._1._1),x._2._1._2)}
temp
}
// Adjust for ratings above 5.0 and below 0.0
val final_predictions_adjusted=final_predictions.map{x=>
var temp=x
if (x._2>5.0){temp=(x._1,5.0)}
if (x._2<0.0){temp=(x._1,0.0)}
temp
}
// final_predictions_adjusted.count()
val ratesAndPreds_map = ratings_test.map(_ match { case ((user, mov), (rate, temp)) => ((user.toInt,mov.toInt),rate.toDouble)})
val ratesAndPreds=ratesAndPreds_map.join(final_predictions_adjusted)
val MSE = ratesAndPreds.map { case ((user, product), (r1, r2)) =>
val err = (r1 - r2)
err * err
}.mean()
val RMSE=math.sqrt(MSE)
// Print output.txt
print_outputfile(final_predictions_adjusted)
// Print the predictionresults
print_results(final_predictions_adjusted.sortByKey())
print(RMSE+"\n")
}
}

Sorting a DStream and taking topN

I have some DStream in Spark Scala and I want to sort it then take the top N.
The problem is that whenever I try to run it I get NotSerializableException and the exception message says:
This is because the DStream object is being referred to from within the closure.
The problem is that I don't know how to solve it:
Here is my try:
package com.badrit.realtime
import java.util.Date
import com.badrit.drivers.UnlimitedSpaceTimeDriver
import com.badrit.model.{CellBuilder, DataReader, Trip}
import com.badrit.utility.Printer
import org.apache.spark.SparkConf
import org.apache.spark.rdd.RDD
import org.apache.spark.streaming.dstream.{DStream, InputDStream}
import org.apache.spark.streaming.{Duration, Milliseconds, StreamingContext}
import scala.collection.mutable
object StreamingDriver {
val appName: String = "HotSpotRealTime"
val hostName = "localhost"
val port = 5050
val constrains = UnlimitedSpaceTimeDriver.constrains;
var streamingRate = 1;
var windowSize = 8;
var slidingInterval = 2;
val cellBuilder = new CellBuilder(constrains)
val inputFilePath = "/home/ahmedelgamal/Downloads/green_tripdata_2015-02.csv"
def prepareTestData(sparkStreamCtx: StreamingContext): InputDStream[Trip] = {
val sparkCtx = sparkStreamCtx.sparkContext
val textFile: RDD[String] = sparkCtx.textFile(inputFilePath)
val data: RDD[Trip] = new DataReader().getTrips(textFile)
val groupedData = data.filter(_.pickup.date.before(new Date(2015, 1, 2, 0, 0, 0)))
.groupBy(trip => trip.pickup.date.getMinutes).sortBy(_._1).map(_._2).collect()
printf("Grouped Data Count is " + groupedData.length)
var dataQueue: mutable.Queue[RDD[Trip]] = mutable.Queue.empty;
groupedData.foreach(trips => dataQueue += sparkCtx.makeRDD(trips.toArray))
printf("\n\nTest Queue size is " + dataQueue.size)
groupedData.zipWithIndex.foreach { case (trips: Iterable[Trip], index: Int) => {
println("Items List " + index)
val passengers: Array[Int] = trips.map(_.passengers).toArray
val cnt = passengers.length
println("Sum is " + passengers.sum)
println("Cnt is " + cnt)
val passengersRdd = sparkCtx.parallelize(passengers)
println("Mean " + passengersRdd.mean())
println("Stdv" + passengersRdd.stdev())
}
}
sparkStreamCtx.queueStream(dataQueue, true)
}
def cellCreator(trip: Trip) = cellBuilder.cellForCarStop(trip.pickup)
def main(args: Array[String]) {
if (args.length < 1) {
streamingRate = 1;
windowSize = 3 //2 hours 60 * 60 * 1000L
slidingInterval = 2 //0.5 hour 60 * 60 * 1000L
}
else {
streamingRate = args(0).toInt;
windowSize = args(1).toInt
slidingInterval = args(2).toInt
}
val sparkConf = new SparkConf().setAppName(appName).setMaster("local[*]")
val sparkStreamCtx = new StreamingContext(sparkConf, Milliseconds(streamingRate))
sparkStreamCtx.sparkContext.setLogLevel("ERROR")
sparkStreamCtx.checkpoint("/tmp")
val data: InputDStream[Trip] = prepareTestData(sparkStreamCtx)
val dataWindow = data.window(new Duration(windowSize), new Duration(slidingInterval))
//my main problem lies in the following line
val newDataWindow = dataWindow.transform(rdd => sparkStreamCtx.sparkContext.parallelize(rdd.take(10)))
newDataWindow.print
sparkStreamCtx.start()
sparkStreamCtx.awaitTerminationOrTimeout(1000)
}
}
I don't mind any other ways to sort a DStream and get its top N rather than my way.
You can use transform method in the DStream object then sort the input RDD and take n elements of it in a list, then filter the original RDD to be contained in this list.
val n = 10
val topN = result.transform(rdd =>{
val list = rdd.sortBy(_._1).take(n)
rdd.filter(list.contains)
})
topN.print

org.apache.spark.SparkException: Task not serializable (scala)

I am new for scala as well as FOR spark, Please help me to resolve this issue.
in spark shell when I load below functions individually they run without any exception, when I copy this function in scala object, and load same file in spark shell they throws task not serialization exception in "processbatch" function when trying to parallelize.
PFB code for the same:
import org.apache.spark.sql.Row
import org.apache.log4j.Logger
import org.apache.spark.sql.hive.HiveContext
object Process {
val hc = new HiveContext(sc)
def processsingle(wait: Int, patient: org.apache.spark.sql.Row, visits: Array[org.apache.spark.sql.Row]) : String = {
var out = new StringBuilder()
val processStart = getTimeInMillis()
for( x <- visits ) {
out.append(", " + x.getAs("patientid") + ":" + x.getAs("visitid"))
}
}
def processbatch(batch: Int, wait: Int, patients: Array[org.apache.spark.sql.Row], visits: Array[org.apache.spark.sql.Row]) = {
val out = sc.parallelize(patients, batch).map( r=> processsingle(wait, r, visits.filter(f=> f.getAs("patientid") == r.getAs("patientid")))).collect()
for(x <- out) println(x)
}
def processmeasures(fetch: Int, batch: Int, wait: Int) = {
val patients = hc.sql("SELECT patientid FROM tableName1 order by p_id").collect()
val visit = hc.sql("SELECT patientid, visitid FROM tableName2")
val count = patients.length
val fetches = if(count % fetch > 0) (count / fetch + 1) else (count / fetch)
for(i <- 0 to fetches.toInt-1){
val startFetch = i*fetch
val endFetch = math.min((i+1)*fetch, count.toInt)-1
val fetchSize = endFetch - startFetch + 1
val fetchClause = "patientid >= " + patients(startFetch).get(0) + " and patientid <= " + patients(endFetch).get(0)
val fetchVisit = visit.filter( fetchClause ).collect()
val batches = if(fetchSize % batch > 0) (fetchSize / batch + 1) else (fetchSize / batch)
for(j <- 0 to batches.toInt-1){
val startBatch = j*batch
val endBatch = math.min((j+1)*batch, fetch.toInt)-1
println(s"Batch from $startBatch to $endBatch");
val batchVisits = fetchVisit.filter(g => g.getAs[Long]("patientid") >= patients(i*fetch + startBatch).getLong(0) && g.getAs[Long]("patientid") <= patients(math.min(i*fetch + endBatch + 1, endFetch)).getLong(0))
processbatch(batch, wait, patients.slice(i*fetch + startBatch, i*fetch + endBatch + 1), batchVisits)
}
}
println("Processing took " + getExecutionTime(processStart) + " millis")
}
}
You should make Process object Serializable:
object Process extends Serializable {
...
}

use spark run KMeans cluster , program block?

when I use apache spark scala API run the KMeans cluster. my program as follow:
object KMeans {
def closestPoint(p: Vector, centers: Array[Vector]) = {
var index = 0
var bestIndex = 0
var closest = Double.PositiveInfinity
for(i <- 0 until centers.length) {
var tempDist = p.squaredDist(centers(i))
if(tempDist < closest) {
closest = tempDist
bestIndex = i
}
}
bestIndex
}
def parseVector(line: String): Vector = {
new Vector(line.split("\\s+").map(s => s.toDouble))
}
def main(args: Array[String]): Unit = {}
System.setProperty("hadoop.home.dir", "F:/OpenSoft/hadoop-2.2.0")
val sc = new SparkContext("local", "kmeans cluster",
"G:/spark-0.9.0-incubating-bin-hadoop2",
SparkContext.jarOfClass(this.getClass()))
val lines = sc.textFile("G:/testData/synthetic_control.data.txt") // RDD[String]
val count = lines.count
val data = lines.map(parseVector _) // RDD[Vector]
data.foreach(println)
val K = 6
val convergeDist = 0.1
val kPoint = data.takeSample(withReplacement = false, K, 42) // Array[Vector]
kPoint.foreach(println)
var tempDist = 1.0
while(tempDist > convergeDist) {
val closest = data.map(p => (closestPoint(p, kPoint), (p, 1)))
val pointStat = closest.reduceByKey{case ((x1, y1), (x2, y2)) =>
(x1+x2, y1+y2)}
val newKPoint = pointStat.map{pair => (
pair._1,pair._2._1/pair._2._2)}.collectAsMap()
tempDist = 0.0
for(i <- 0 until K) {
tempDist += kPoint(i).squaredDist(newKPoint(i))
}
for(newP <- newKPoint) {
kPoint(newP._1) = newP._2
}
println("Finish iteration (delta=" + tempDist + ")")
}
println("Finish centers: ")
kPoint.foreach(println)
System.exit(0)
}
when I apply run as local mode , the log info as follow:
..................
14/03/31 11:29:15 INFO HadoopRDD: Input split: hdfs://hadoop-01:9000/data/synthetic_control.data:0+288374
program begin block , no running continue........
Can anyone can help me???