I use Spark 2.0.1 and Scala 2.11.
This is a question related to user-defined aggregate function (UDAF) in Spark. I'm using the example answer provided here to ask my question:
import org.apache.spark.sql.expressions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{Row, Column}
object DummyUDAF extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("x", StringType)
def bufferSchema = new StructType()
.add("buff", ArrayType(LongType))
.add("buff2", ArrayType(DoubleType))
def dataType = new StructType()
.add("xs", ArrayType(LongType))
.add("ys", ArrayType(DoubleType))
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {}
def update(buffer: MutableAggregationBuffer, input: Row) = {}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {}
def evaluate(buffer: Row) = (Array(1L, 2L, 3L), Array(1.0, 2.0, 3.0))
}
I'm able to return multiple Maps instead of an Array easily, but not able to mutate the map in the update method.
import org.apache.spark.sql.expressions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{Row, Column}
import scala.collection.mutable.Map
object DummyUDAF extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("x", DoubleType).add("y", IntegerType)
def bufferSchema = new StructType()
.add("buff", MapType(DoubleType, IntegerType))
.add("buff2", MapType(DoubleType, IntegerType))
def dataType = new StructType()
.add("xs", MapType(DoubleType, IntegerType))
.add("ys", MapType(DoubleType, IntegerType))
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {
buffer(0) = scala.collection.mutable.Map[Double,Int]()
buffer(1) = scala.collection.mutable.Map[Double,Int]()
}
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
buffer(0).asInstanceOf[Map[Double,Int]](input.getDouble(0)) = input.getInt(1)
buffer(1).asInstanceOf[Map[Double,Int]](input.getDouble(0)*10) = input.getInt(1)*10
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {
buffer1(0).asInstanceOf[Map[Double,Int]] ++= buffer2(0).asInstanceOf[Map[Double,Int]]
buffer1(1).asInstanceOf[Map[Double,Int]] ++= buffer2(1).asInstanceOf[Map[Double,Int]]
}
//def evaluate(buffer: Row) = (Map(1.0->10,2.0->20), Map(10.0->100,11.0->110))
def evaluate(buffer: Row) = (buffer(0).asInstanceOf[Map[Double,Int]], buffer(1).asInstanceOf[Map[Double,Int]])
}
This compiles fine, but gives a runtime error:
val df = Seq((1.0, 1), (2.0, 2)).toDF("k", "v")
df.select(DummyUDAF($"k", $"v")).show(1, false)
org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 70.0 failed 4 times, most recent failure: Lost task 1.3 in stage 70.0 (TID 204, 10.91.252.25): java.lang.ClassCastException: scala.collection.immutable.Map$EmptyMap$ cannot be cast to scala.collection.mutable.Map
Another solution discussed here indicates that this could be a problem due to MapType StructType. However, when I try out the solution mentioned I still get the same error.
val distudaf = new DistinctValues
val df = Seq(("a", "a1"), ("a", "a1"), ("a", "a2"), ("b", "b1"), ("b", "b2"), ("b", "b3"), ("b", "b1"), ("b", "b1")).toDF("col1", "col2")
df.groupBy("col1").agg(distudaf($"col2").as("DV")).show
org.apache.spark.SparkException: Job aborted due to stage failure: Task 1 in stage 22.0 failed 4 times, most recent failure: Lost task 1.3 in stage 22.0 (TID 100, 10.91.252.25): java.lang.ClassCastException: scala.collection.immutable.Map$EmptyMap$ cannot be cast to scala.collection.mutable.Map
My preference would be to mutate the Map, given that I expect the Map to be huge, and making a copy and re-assigning may be lead to performance/memory bottlenecks)
My limited understanding of UDAF is that you should only set what you want to be (semantically) updated, i.e. take what is set already in MutableAggregationBuffer, combine with what you want to add and...= it (which will call update(i: Int, value: Any): Unit under the covers)
Your code could look as follows:
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val newBuffer0 = buffer(0).asInstanceOf[Map[Double, Int]]
buffer(0) = newBuffer0 + (input.getDouble(0) -> input.getInt(1))
val newBuffer1 = buffer(1).asInstanceOf[Map[Double, Int]]
buffer(1) = newBuffer1 + (input.getDouble(0) * 10 -> input.getInt(1) * 10)
}
The complete DummyUDAF could be as follows:
import org.apache.spark.sql.expressions._
import org.apache.spark.sql.types._
import org.apache.spark.sql.functions.udf
import org.apache.spark.sql.{Row, Column}
object DummyUDAF extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("x", DoubleType).add("y", IntegerType)
def bufferSchema = new StructType()
.add("buff", MapType(DoubleType, IntegerType))
.add("buff2", MapType(DoubleType, IntegerType))
def dataType = new StructType()
.add("xs", MapType(DoubleType, IntegerType))
.add("ys", MapType(DoubleType, IntegerType))
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {
buffer(0) = Map[Double,Int]()
buffer(1) = Map[Double,Int]()
}
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val newBuffer0 = buffer(0).asInstanceOf[Map[Double, Int]]
buffer(0) = newBuffer0 + (input.getDouble(0) -> input.getInt(1))
val newBuffer1 = buffer(1).asInstanceOf[Map[Double, Int]]
buffer(1) = newBuffer1 + (input.getDouble(0) * 10 -> input.getInt(1) * 10)
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {
buffer1(0) = buffer1(0).asInstanceOf[Map[Double,Int]] ++ buffer2(0).asInstanceOf[Map[Double,Int]]
buffer1(1) = buffer1(1).asInstanceOf[Map[Double,Int]] ++ buffer2(1).asInstanceOf[Map[Double,Int]]
}
//def evaluate(buffer: Row) = (Map(1.0->10,2.0->20), Map(10.0->100,11.0->110))
def evaluate(buffer: Row) = (buffer(0).asInstanceOf[Map[Double,Int]], buffer(1).asInstanceOf[Map[Double,Int]])
}
Late for the party. I just discovered that one can use
override def bufferSchema: StructType = StructType(List(
StructField("map", ObjectType(classOf[mutable.Map[String, Long]]))
))
to use mutable.Map in a buffer.
Related
I have a DF looking like this:
time,channel,value
0,foo,5
0,bar,23
100,foo,42
...
I want a DF like this:
time,foo,bar
0,5,23
100,42,...
In Spark 2, I did it with a UDAF like this:
case class ColumnBuilderUDAF(channels: Seq[String]) extends UserDefinedAggregateFunction {
#transient lazy val inputSchema: StructType = StructType {
StructField("channel", StringType, nullable = false) ::
StructField("value", DoubleType, nullable = false) ::
Nil
}
#transient lazy val bufferSchema: StructType = StructType {
channels
.toList
.indices
.map(i => StructField("c%d".format(i), DoubleType, nullable = false))
}
#transient lazy val dataType: DataType = bufferSchema
#transient lazy val deterministic: Boolean = false
def initialize(buffer: MutableAggregationBuffer): Unit = channels.indices.foreach(buffer(_) = NaN)
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val channel = input.getAs[String](0)
val p = channels.indexOf(channel)
if (p >= 0 && p < channels.length) {
val v = input.getAs[Double](1)
if (!v.isNaN) {
buffer(p) = v
}
}
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit =
channels
.indices
.foreach { i =>
val v2 = buffer2.getAs[Double](i)
if ((!v2.isNaN) && buffer1.getAs[Double](i).isNaN) {
buffer1(i) = v2
}
}
def evaluate(buffer: Row): Any =
new GenericRowWithSchema(channels.indices.map(buffer.getAs[Double]).toArray, dataType.asInstanceOf[StructType])
}
which I use like this:
val cb = ColumnBuilderUDAF(Seq("foo", "bar"))
val dfColumnar = df.groupBy($"time").agg(cb($"channel", $"value") as "c")
and then, I rename c.c0, c.c1 etc. to foo, bar etc.
In Spark 3, UDAF is deprecated and Aggregator should be used instead. So I began to port it like this:
case class ColumnBuilder(channels: Seq[String]) extends Aggregator[(String, Double), Array[Double], Row] {
lazy val bufferEncoder: Encoder[Array[Double]] = Encoders.javaSerialization[Array[Double]]
lazy val zero: Array[Double] = channels.map(_ => Double.NaN).toArray
def reduce(b: Array[Double], a: (String, Double)): Array[Double] = {
val index = channels.indexOf(a._1)
if (index >= 0 && !a._2.isNaN) b(index) = a._2
b
}
def merge(b1: Array[Double], b2: Array[Double]): Array[Double] = {
(0 until b1.length.min(b2.length)).foreach(i => if (b1(i).isNaN) b1(i) = b2(i))
b1
}
def finish(reduction: Array[Double]): Row =
new GenericRowWithSchema(reduction.map(x => x: Any), outputEncoder.schema)
def outputEncoder: Encoder[Row] = ??? // what goes here?
}
I don't know how to implement the Encoder[Row] as Spark does not have a pre-defined one. If I simply do a straightforward approach like this:
val outputEncoder: Encoder[Row] = new Encoder[Row] {
val schema: StructType = StructType(channels.map(StructField(_, DoubleType, nullable = false)))
val clsTag: ClassTag[Row] = classTag[Row]
}
I get a ClassCastException because outputEncoder actually has to be ExpressionEncoder.
So, how do I implement this correctly? Or do I still have to use the deprecated UDAF?
You can do it with the use of groupBy and pivot
import spark.implicits._
import org.apache.spark.sql.functions._
val df = Seq(
(0, "foo", 5),
(0, "bar", 23),
(100, "foo", 42)
).toDF("time", "channel", "value")
df.groupBy("time")
.pivot("channel")
.agg(first("value"))
.show(false)
Output:
+----+----+---+
|time|bar |foo|
+----+----+---+
|100 |null|42 |
|0 |23 |5 |
+----+----+---+
I want to do clickstream sessionization on the spark data frame. Let's I have loaded the data frame which has events from multiple sessions with the following schema -
And I want to aggregate(stitch) the sessions, like this -
I have explored UDAF and Window functions but could not understand how I can use them for this specific use case. I know that partitioning the data by session id puts entire session data in a single partition but how do I aggregate them?
The idea is to aggregate all the events specific to each session as a single output record.
You can use collect_set:
def process(implicit spark: SparkSession) = {
import spark._
import org.apache.spark.sql.functions.{ concat, col, collect_set }
val seq = Seq(Row(1, 1, "startTime=1549270909"), Row(1, 1, "endTime=1549270913"))
val rdd = spark.sparkContext.parallelize(seq)
val df1 = spark.createDataFrame(rdd, StructType(List(StructField("sessionId", IntegerType, false), StructField("userId", IntegerType, false), StructField("session", StringType, false))))
df1.groupBy("sessionId").agg(collect_set("session"))
}
}
That gives you:
+---------+------------------------------------------+
|sessionId|collect_set(session) |
+---------+------------------------------------------+
|1 |[startTime=1549270909, endTime=1549270913]|
+---------+------------------------------------------+
as output.
If you need a more complex logic, it could be included in the following UDAF:
class YourComplexLogicStrings extends UserDefinedAggregateFunction {
override def inputSchema: StructType = StructType(StructField("input", StringType) :: Nil)
override def bufferSchema: StructType = StructType(StructField("pair", StringType) :: Nil)
override def dataType: DataType = StringType
override def deterministic: Boolean = true
override def initialize(buffer: MutableAggregationBuffer): Unit = buffer(0) = ""
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val b = buffer.getAs[String](0)
val i = input.getAs[String](0)
buffer(0) = { if(b.isEmpty) b + i else b + " + " + i }
}
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
val b1 = buffer1.getAs[String](0)
val b2 = buffer2.getAs[String](0)
if(!b1.isEmpty)
buffer1(0) = (b1) ++ "," ++ (b2)
else
buffer1(0) = b2
}
override def evaluate(buffer: Row): Any = {
val yourString = buffer.getAs[String](0)
// Compute your logic and return another String
yourString
}
}
def process0(implicit spark: SparkSession) = {
import org.apache.spark.sql.functions.{ concat, col, collect_set }
val agg0 = new YourComplexLogicStrings()
val seq = Seq(Row(1, 1, "startTime=1549270909"), Row(1, 1, "endTime=1549270913"))
val rdd = spark.sparkContext.parallelize(seq)
val df1 = spark.createDataFrame(rdd, StructType(List(StructField("sessionId", IntegerType, false), StructField("userId", IntegerType, false), StructField("session", StringType, false))))
df1.groupBy("sessionId").agg(agg0(col("session")))
}
It gives:
+---------+---------------------------------------+
|sessionId|yourcomplexlogicstrings(session) |
+---------+---------------------------------------+
|1 |startTime=1549270909,endTime=1549270913|
+---------+---------------------------------------+
Note that you could include very complex logic using spark sql functions directly if you want to avoid UDAFs.
I am trying to use a custom HashMap implementation as UserDefinedType instead of MapType in spark. The code is working fine in spark 1.5.2 but giving java.lang.ClassCastException: scala.collection.immutable.HashMap$HashMap1 cannot be cast to org.apache.spark.sql.catalyst.util.MapData exception in spark 1.6.2
The code :-
import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
import scala.collection.immutable.HashMap
class Test extends UserDefinedAggregateFunction {
def inputSchema: StructType =
StructType(Array(StructField("input", StringType)))
def bufferSchema = StructType(Array(StructField("top_n", CustomHashMapType)))
def dataType: DataType = CustomHashMapType
def deterministic = true
def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0) = HashMap.empty[String, Long]
}
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val buff0 = buffer.getAs[HashMap[String, Long]](0)
buffer(0) = buff0.updated("test", buff0.getOrElse("test", 0L) + 1L)
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
buffer1(0) = buffer1.
getAs[HashMap[String, Long]](0)
.merged(buffer2.getAs[HashMap[String, Long]](0))({ case ((k, v1), (_, v2)) => (k, v1 + v2) })
}
def evaluate(buffer: Row): Any = {
buffer(0)
}
}
private case object CustomHashMapType extends UserDefinedType[HashMap[String, Long]] {
override def sqlType: DataType = MapType(StringType, LongType)
override def serialize(obj: Any): Map[String, Long] =
obj.asInstanceOf[Map[String, Long]]
override def deserialize(datum: Any): HashMap[String, Long] = {
datum.asInstanceOf[Map[String, Long]] ++: HashMap.empty[String, Long]
}
override def userClass: Class[HashMap[String, Long]] = classOf[HashMap[String, Long]]
}
The wrapper Class to run the UDAF:-
import org.apache.spark.sql.SQLContext
import org.apache.spark.{SparkConf, SparkContext}
object TestJob {
def main(args: Array[String]): Unit = {
val conf = new SparkConf().setMaster("local[4]").setAppName("DataStatsExecution")
val sc = new SparkContext(conf)
val sqlContext = new SQLContext(sc)
import sqlContext.implicits._
val df = sc.parallelize(Seq(1,2,3,4)).toDF("col")
val udaf = new Test()
val outdf = df.agg(udaf(df("col")))
outdf.show
}
}
When I run the above code in spark 1.6.2, I get the following exception:-
Caused by: java.lang.ClassCastException: scala.collection.immutable.HashMap$HashMap1 cannot be cast to org.apache.spark.sql.catalyst.util.MapData
at org.apache.spark.sql.catalyst.expressions.BaseGenericInternalRow$class.getMap(rows.scala:50)
at org.apache.spark.sql.catalyst.expressions.GenericMutableRow.getMap(rows.scala:248)
at org.apache.spark.sql.catalyst.expressions.JoinedRow.getMap(JoinedRow.scala:115)
at org.apache.spark.sql.catalyst.expressions.GeneratedClass$SpecificMutableProjection.apply(Unknown Source)
at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$31.apply(AggregationIterator.scala:345)
at org.apache.spark.sql.execution.aggregate.AggregationIterator$$anonfun$31.apply(AggregationIterator.scala:344)
at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:154)
at org.apache.spark.sql.execution.aggregate.SortBasedAggregationIterator.next(SortBasedAggregationIterator.scala:29)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
at scala.collection.Iterator$$anon$11.next(Iterator.scala:328)
at org.apache.spark.shuffle.sort.BypassMergeSortShuffleWriter.write(BypassMergeSortShuffleWriter.java:149)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:73)
at org.apache.spark.scheduler.ShuffleMapTask.runTask(ShuffleMapTask.scala:41)
at org.apache.spark.scheduler.Task.run(Task.scala:89)
at org.apache.spark.executor.Executor$TaskRunner.run(Executor.scala:227)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1149)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:624)
at java.lang.Thread.run(Thread.java:748)
I have found that the HashMap implementation is way faster than the available spark MapType implementation. Are there any changes that can be done to run the code in spark 1.6.2 or is there any possible alternative?
I have a DataFrame of two columns, ID of type Int and Vec of type Vector (org.apache.spark.mllib.linalg.Vector).
The DataFrame looks like follow:
ID,Vec
1,[0,0,5]
1,[4,0,1]
1,[1,2,1]
2,[7,5,0]
2,[3,3,4]
3,[0,8,1]
3,[0,0,1]
3,[7,7,7]
....
I would like to do a groupBy($"ID") then apply an aggregation on the rows inside each group by summing the vectors.
The desired output of the above example would be:
ID,SumOfVectors
1,[5,2,7]
2,[10,8,4]
3,[7,15,9]
...
The available aggregation functions will not work, e.g. df.groupBy($"ID").agg(sum($"Vec") will lead to an ClassCastException.
How to implement a custom aggregation function that allows me to do the sum of vectors or arrays or any other custom operation?
Spark >= 3.0
You can use Summarizer with sum
import org.apache.spark.ml.stat.Summarizer
df
.groupBy($"id")
.agg(Summarizer.sum($"vec").alias("vec"))
Spark <= 3.0
Personally I wouldn't bother with UDAFs. There are more than verbose and not exactly fast (Spark UDAF with ArrayType as bufferSchema performance issues) Instead I would simply use reduceByKey / foldByKey:
import org.apache.spark.sql.Row
import breeze.linalg.{DenseVector => BDV}
import org.apache.spark.ml.linalg.{Vector, Vectors}
def dv(values: Double*): Vector = Vectors.dense(values.toArray)
val df = spark.createDataFrame(Seq(
(1, dv(0,0,5)), (1, dv(4,0,1)), (1, dv(1,2,1)),
(2, dv(7,5,0)), (2, dv(3,3,4)),
(3, dv(0,8,1)), (3, dv(0,0,1)), (3, dv(7,7,7)))
).toDF("id", "vec")
val aggregated = df
.rdd
.map{ case Row(k: Int, v: Vector) => (k, BDV(v.toDense.values)) }
.foldByKey(BDV.zeros[Double](3))(_ += _)
.mapValues(v => Vectors.dense(v.toArray))
.toDF("id", "vec")
aggregated.show
// +---+--------------+
// | id| vec|
// +---+--------------+
// | 1| [5.0,2.0,7.0]|
// | 2|[10.0,8.0,4.0]|
// | 3|[7.0,15.0,9.0]|
// +---+--------------+
And just for comparison a "simple" UDAF. Required imports:
import org.apache.spark.sql.expressions.{MutableAggregationBuffer,
UserDefinedAggregateFunction}
import org.apache.spark.ml.linalg.{Vector, Vectors, SQLDataTypes}
import org.apache.spark.sql.types.{StructType, ArrayType, DoubleType}
import org.apache.spark.sql.Row
import scala.collection.mutable.WrappedArray
Class definition:
class VectorSum (n: Int) extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("v", SQLDataTypes.VectorType)
def bufferSchema = new StructType().add("buff", ArrayType(DoubleType))
def dataType = SQLDataTypes.VectorType
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {
buffer.update(0, Array.fill(n)(0.0))
}
def update(buffer: MutableAggregationBuffer, input: Row) = {
if (!input.isNullAt(0)) {
val buff = buffer.getAs[WrappedArray[Double]](0)
val v = input.getAs[Vector](0).toSparse
for (i <- v.indices) {
buff(i) += v(i)
}
buffer.update(0, buff)
}
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {
val buff1 = buffer1.getAs[WrappedArray[Double]](0)
val buff2 = buffer2.getAs[WrappedArray[Double]](0)
for ((x, i) <- buff2.zipWithIndex) {
buff1(i) += x
}
buffer1.update(0, buff1)
}
def evaluate(buffer: Row) = Vectors.dense(
buffer.getAs[Seq[Double]](0).toArray)
}
And an example usage:
df.groupBy($"id").agg(new VectorSum(3)($"vec") alias "vec").show
// +---+--------------+
// | id| vec|
// +---+--------------+
// | 1| [5.0,2.0,7.0]|
// | 2|[10.0,8.0,4.0]|
// | 3|[7.0,15.0,9.0]|
// +---+--------------+
See also: How to find mean of grouped Vector columns in Spark SQL?.
I suggest the following (works on Spark 2.0.2 onward), it might be optimized but it's very nice, one thing you have to know in advance is the vector size when you create the UDAF instance
import org.apache.spark.ml.linalg._
import org.apache.spark.mllib.linalg.WeightedSparseVector
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
class VectorAggregate(val numFeatures: Int)
extends UserDefinedAggregateFunction {
private type B = Map[Int, Double]
def inputSchema: StructType = StructType(StructField("vec", new VectorUDT()) :: Nil)
def bufferSchema: StructType =
StructType(StructField("agg", MapType(IntegerType, DoubleType)) :: Nil)
def initialize(buffer: MutableAggregationBuffer): Unit =
buffer.update(0, Map.empty[Int, Double])
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val zero = buffer.getAs[B](0)
input match {
case Row(DenseVector(values)) => buffer.update(0, values.zipWithIndex.foldLeft(zero){case (acc,(v,i)) => acc.updated(i, v + acc.getOrElse(i,0d))})
case Row(SparseVector(_, indices, values)) => buffer.update(0, values.zip(indices).foldLeft(zero){case (acc,(v,i)) => acc.updated(i, v + acc.getOrElse(i,0d))}) }}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
val zero = buffer1.getAs[B](0)
buffer1.update(0, buffer2.getAs[B](0).foldLeft(zero){case (acc,(i,v)) => acc.updated(i, v + acc.getOrElse(i,0d))})}
def deterministic: Boolean = true
def evaluate(buffer: Row): Any = {
val Row(agg: B) = buffer
val indices = agg.keys.toArray.sorted
Vectors.sparse(numFeatures,indices,indices.map(agg)).compressed
}
def dataType: DataType = new VectorUDT()
}
With pyspark 3.0.0, which is my version, you can use Summarizer to do it easily. Your col needs to be type of DenseVector
from pyspark.ml.stat import Summarizer
sdf.groupBy("ID").agg(Summarizer.mean(sdf.Vec)).show()
Note: there is no avg function in pyspark, but you can use mean method
I have a DataFrame of two columns, ID of type Int and Vec of type Vector (org.apache.spark.mllib.linalg.Vector).
The DataFrame looks like follow:
ID,Vec
1,[0,0,5]
1,[4,0,1]
1,[1,2,1]
2,[7,5,0]
2,[3,3,4]
3,[0,8,1]
3,[0,0,1]
3,[7,7,7]
....
I would like to do a groupBy($"ID") then apply an aggregation on the rows inside each group by summing the vectors.
The desired output of the above example would be:
ID,SumOfVectors
1,[5,2,7]
2,[10,8,4]
3,[7,15,9]
...
The available aggregation functions will not work, e.g. df.groupBy($"ID").agg(sum($"Vec") will lead to an ClassCastException.
How to implement a custom aggregation function that allows me to do the sum of vectors or arrays or any other custom operation?
Spark >= 3.0
You can use Summarizer with sum
import org.apache.spark.ml.stat.Summarizer
df
.groupBy($"id")
.agg(Summarizer.sum($"vec").alias("vec"))
Spark <= 3.0
Personally I wouldn't bother with UDAFs. There are more than verbose and not exactly fast (Spark UDAF with ArrayType as bufferSchema performance issues) Instead I would simply use reduceByKey / foldByKey:
import org.apache.spark.sql.Row
import breeze.linalg.{DenseVector => BDV}
import org.apache.spark.ml.linalg.{Vector, Vectors}
def dv(values: Double*): Vector = Vectors.dense(values.toArray)
val df = spark.createDataFrame(Seq(
(1, dv(0,0,5)), (1, dv(4,0,1)), (1, dv(1,2,1)),
(2, dv(7,5,0)), (2, dv(3,3,4)),
(3, dv(0,8,1)), (3, dv(0,0,1)), (3, dv(7,7,7)))
).toDF("id", "vec")
val aggregated = df
.rdd
.map{ case Row(k: Int, v: Vector) => (k, BDV(v.toDense.values)) }
.foldByKey(BDV.zeros[Double](3))(_ += _)
.mapValues(v => Vectors.dense(v.toArray))
.toDF("id", "vec")
aggregated.show
// +---+--------------+
// | id| vec|
// +---+--------------+
// | 1| [5.0,2.0,7.0]|
// | 2|[10.0,8.0,4.0]|
// | 3|[7.0,15.0,9.0]|
// +---+--------------+
And just for comparison a "simple" UDAF. Required imports:
import org.apache.spark.sql.expressions.{MutableAggregationBuffer,
UserDefinedAggregateFunction}
import org.apache.spark.ml.linalg.{Vector, Vectors, SQLDataTypes}
import org.apache.spark.sql.types.{StructType, ArrayType, DoubleType}
import org.apache.spark.sql.Row
import scala.collection.mutable.WrappedArray
Class definition:
class VectorSum (n: Int) extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("v", SQLDataTypes.VectorType)
def bufferSchema = new StructType().add("buff", ArrayType(DoubleType))
def dataType = SQLDataTypes.VectorType
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {
buffer.update(0, Array.fill(n)(0.0))
}
def update(buffer: MutableAggregationBuffer, input: Row) = {
if (!input.isNullAt(0)) {
val buff = buffer.getAs[WrappedArray[Double]](0)
val v = input.getAs[Vector](0).toSparse
for (i <- v.indices) {
buff(i) += v(i)
}
buffer.update(0, buff)
}
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {
val buff1 = buffer1.getAs[WrappedArray[Double]](0)
val buff2 = buffer2.getAs[WrappedArray[Double]](0)
for ((x, i) <- buff2.zipWithIndex) {
buff1(i) += x
}
buffer1.update(0, buff1)
}
def evaluate(buffer: Row) = Vectors.dense(
buffer.getAs[Seq[Double]](0).toArray)
}
And an example usage:
df.groupBy($"id").agg(new VectorSum(3)($"vec") alias "vec").show
// +---+--------------+
// | id| vec|
// +---+--------------+
// | 1| [5.0,2.0,7.0]|
// | 2|[10.0,8.0,4.0]|
// | 3|[7.0,15.0,9.0]|
// +---+--------------+
See also: How to find mean of grouped Vector columns in Spark SQL?.
I suggest the following (works on Spark 2.0.2 onward), it might be optimized but it's very nice, one thing you have to know in advance is the vector size when you create the UDAF instance
import org.apache.spark.ml.linalg._
import org.apache.spark.mllib.linalg.WeightedSparseVector
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
class VectorAggregate(val numFeatures: Int)
extends UserDefinedAggregateFunction {
private type B = Map[Int, Double]
def inputSchema: StructType = StructType(StructField("vec", new VectorUDT()) :: Nil)
def bufferSchema: StructType =
StructType(StructField("agg", MapType(IntegerType, DoubleType)) :: Nil)
def initialize(buffer: MutableAggregationBuffer): Unit =
buffer.update(0, Map.empty[Int, Double])
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val zero = buffer.getAs[B](0)
input match {
case Row(DenseVector(values)) => buffer.update(0, values.zipWithIndex.foldLeft(zero){case (acc,(v,i)) => acc.updated(i, v + acc.getOrElse(i,0d))})
case Row(SparseVector(_, indices, values)) => buffer.update(0, values.zip(indices).foldLeft(zero){case (acc,(v,i)) => acc.updated(i, v + acc.getOrElse(i,0d))}) }}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
val zero = buffer1.getAs[B](0)
buffer1.update(0, buffer2.getAs[B](0).foldLeft(zero){case (acc,(i,v)) => acc.updated(i, v + acc.getOrElse(i,0d))})}
def deterministic: Boolean = true
def evaluate(buffer: Row): Any = {
val Row(agg: B) = buffer
val indices = agg.keys.toArray.sorted
Vectors.sparse(numFeatures,indices,indices.map(agg)).compressed
}
def dataType: DataType = new VectorUDT()
}
With pyspark 3.0.0, which is my version, you can use Summarizer to do it easily. Your col needs to be type of DenseVector
from pyspark.ml.stat import Summarizer
sdf.groupBy("ID").agg(Summarizer.mean(sdf.Vec)).show()
Note: there is no avg function in pyspark, but you can use mean method