How can I convert output of my vector assembler to a dense vector rather than sparse vector?
val featureIndexer = new VectorAssembler().setInputCols(Array("feature1","feature2","feature3")).setOutputCol("indexedFeatures")
training_set_combined = training_set_combined.na.fill(-9999)
testing_set_combined = testing_set_combined.na.fill(-9999)
// training
val assembler = new VectorAssembler().setInputCols(feature_names.toArray).setOutputCol("features")
def get_param(): mutable.HashMap[String, Any] = {
val params = new mutable.HashMap[String, Any]()
params += "eta" -> 0.1f
params += "num_round" -> 150
params += "missing" -> -999
params += "subsample" -> 1
params += "objective" -> "binary:logistic"
return params
}
val xgb = new XGBoostClassifier(get_param().toMap).setLabelCol("label").setFeaturesCol("features")
val pipeline = new Pipeline().setStages(Array(assembler, xgb))
val xgbclassifier = pipeline.fit(training_set_combined)
I am looking to convert vector assembler to dense vector
Here is the densevector transformer implementation-
import org.apache.spark.ml.Transformer
import org.apache.spark.ml.linalg.SQLDataTypes
import org.apache.spark.ml.param.shared.{HasInputCols, HasOutputCols}
import org.apache.spark.ml.param.{ParamMap, Params}
import org.apache.spark.ml.util.{DefaultParamsWritable, Identifiable}
import org.apache.spark.sql.types.StructType
import org.apache.spark.sql.{DataFrame, Dataset}
import org.apache.spark.sql.functions._
class DenseVectorConverter(val uid: String) extends Transformer with Params
with HasInputCols with HasOutputCols with DefaultParamsWritable {
def this() = this(Identifiable.randomUID("denseVectorConverter"))
/** #group setParam */
def setInputCols(value: Array[String]): this.type = set(inputCols, value)
/** #group setParam */
def setOutputCols(value: Array[String]): this.type = set(outputCols, value)
def validateAndTransformSchema(schema: StructType): StructType = {
require($(inputCols).length == $(inputCols).distinct.length, s"inputCols contains" +
s" duplicates: (${$(inputCols).mkString(", ")})")
require($(outputCols).length == $(outputCols).distinct.length, s"outputCols contains" +
s" duplicates: (${$(outputCols).mkString(", ")})")
require($(inputCols).length == $(outputCols).length, s"inputCols(${$(inputCols).length})" +
s" and outputCols(${$(outputCols).length}) should have the same length")
$(inputCols).zip($(outputCols)).foldLeft(schema) { (schema, inOutCol) =>
val inputField = schema(inOutCol._1)
require(inputField.dataType == SQLDataTypes.VectorType, s"Expected dtatype of input col: ${inputField.name} as " +
s"vector but found ${inputField.dataType}")
schema.add(inOutCol._2, inputField.dataType, inputField.nullable, inputField.metadata)
}
}
def transformSchema(schema: StructType): StructType = validateAndTransformSchema(schema)
def copy(extra: ParamMap): RenameColumns = defaultCopy(extra)
override def transform(dataset: Dataset[_]): DataFrame = {
transformSchema(dataset.schema, logging = true)
val sparseToDense =
udf((v: org.apache.spark.ml.linalg.Vector) => v.toDense)
$(inputCols).zip($(outputCols)).foldLeft(dataset.toDF()) { (df, inputColOutputCol) =>
df.withColumn(inputColOutputCol._2,
sparseToDense(col(inputColOutputCol._1)));
}
}
}
object DenseVectorConverter extends DefaultParamsReadable[DenseVectorConverter] {
override def load(path: String): DenseVectorConverter = super.load(path)
}
I tested using below testcase-
import org.apache.spark.ml.linalg.Vectors
val data = Array(
Vectors.sparse(5, Seq((1, 1.0), (3, 7.0))),
Vectors.dense(2.0, 0.0, 3.0, 4.0, 5.0),
Vectors.dense(4.0, 0.0, 0.0, 6.0, 7.0)
)
val df = sqlContext.createDataFrame(data.map(Tuple1.apply)).toDF("features")
df.show(false)
// +---------------------+
// |features |
// +---------------------+
// |(5,[1,3],[1.0,7.0]) |
// |[2.0,0.0,3.0,4.0,5.0]|
// |[4.0,0.0,0.0,6.0,7.0]|
// +---------------------+
val denseVectorConverter = new DenseVectorConverter()
.setInputCols(Array("features"))
.setOutputCols(Array("features_dense"))
denseVectorConverter.transform(df).show(false)
// +---------------------+---------------------+
// |features |features_dense |
// +---------------------+---------------------+
// |(5,[1,3],[1.0,7.0]) |[0.0,1.0,0.0,7.0,0.0]|
// |[2.0,0.0,3.0,4.0,5.0]|[2.0,0.0,3.0,4.0,5.0]|
// |[4.0,0.0,0.0,6.0,7.0]|[4.0,0.0,0.0,6.0,7.0]|
// +---------------------+---------------------+
Now your modified code should look like as below-
val featureIndexer = new VectorAssembler().setInputCols(Array("feature1","feature2","feature3")).setOutputCol("indexedFeatures")
training_set_combined = training_set_combined.na.fill(-9999)
testing_set_combined = testing_set_combined.na.fill(-9999)
// training
val assembler = new VectorAssembler().setInputCols(feature_names.toArray).setOutputCol("features")
val denseVectorConverter = new DenseVectorConverter()
.setInputCols(Array("features"))
.setOutputCols(Array("features_dense"))
def get_param(): mutable.HashMap[String, Any] = {
val params = new mutable.HashMap[String, Any]()
params += "eta" -> 0.1f
params += "num_round" -> 150
params += "missing" -> -999
params += "subsample" -> 1
params += "objective" -> "binary:logistic"
return params
}
val xgb = new XGBoostClassifier(get_param().toMap).setLabelCol("label").setFeaturesCol("features_dense")
val pipeline = new Pipeline().setStages(Array(assembler, denseVectorConverter, xgb))
val xgbclassifier = pipeline.fit(training_set_combined)
I've modified your code, please test it once.
Related
I have a DF looking like this:
time,channel,value
0,foo,5
0,bar,23
100,foo,42
...
I want a DF like this:
time,foo,bar
0,5,23
100,42,...
In Spark 2, I did it with a UDAF like this:
case class ColumnBuilderUDAF(channels: Seq[String]) extends UserDefinedAggregateFunction {
#transient lazy val inputSchema: StructType = StructType {
StructField("channel", StringType, nullable = false) ::
StructField("value", DoubleType, nullable = false) ::
Nil
}
#transient lazy val bufferSchema: StructType = StructType {
channels
.toList
.indices
.map(i => StructField("c%d".format(i), DoubleType, nullable = false))
}
#transient lazy val dataType: DataType = bufferSchema
#transient lazy val deterministic: Boolean = false
def initialize(buffer: MutableAggregationBuffer): Unit = channels.indices.foreach(buffer(_) = NaN)
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val channel = input.getAs[String](0)
val p = channels.indexOf(channel)
if (p >= 0 && p < channels.length) {
val v = input.getAs[Double](1)
if (!v.isNaN) {
buffer(p) = v
}
}
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit =
channels
.indices
.foreach { i =>
val v2 = buffer2.getAs[Double](i)
if ((!v2.isNaN) && buffer1.getAs[Double](i).isNaN) {
buffer1(i) = v2
}
}
def evaluate(buffer: Row): Any =
new GenericRowWithSchema(channels.indices.map(buffer.getAs[Double]).toArray, dataType.asInstanceOf[StructType])
}
which I use like this:
val cb = ColumnBuilderUDAF(Seq("foo", "bar"))
val dfColumnar = df.groupBy($"time").agg(cb($"channel", $"value") as "c")
and then, I rename c.c0, c.c1 etc. to foo, bar etc.
In Spark 3, UDAF is deprecated and Aggregator should be used instead. So I began to port it like this:
case class ColumnBuilder(channels: Seq[String]) extends Aggregator[(String, Double), Array[Double], Row] {
lazy val bufferEncoder: Encoder[Array[Double]] = Encoders.javaSerialization[Array[Double]]
lazy val zero: Array[Double] = channels.map(_ => Double.NaN).toArray
def reduce(b: Array[Double], a: (String, Double)): Array[Double] = {
val index = channels.indexOf(a._1)
if (index >= 0 && !a._2.isNaN) b(index) = a._2
b
}
def merge(b1: Array[Double], b2: Array[Double]): Array[Double] = {
(0 until b1.length.min(b2.length)).foreach(i => if (b1(i).isNaN) b1(i) = b2(i))
b1
}
def finish(reduction: Array[Double]): Row =
new GenericRowWithSchema(reduction.map(x => x: Any), outputEncoder.schema)
def outputEncoder: Encoder[Row] = ??? // what goes here?
}
I don't know how to implement the Encoder[Row] as Spark does not have a pre-defined one. If I simply do a straightforward approach like this:
val outputEncoder: Encoder[Row] = new Encoder[Row] {
val schema: StructType = StructType(channels.map(StructField(_, DoubleType, nullable = false)))
val clsTag: ClassTag[Row] = classTag[Row]
}
I get a ClassCastException because outputEncoder actually has to be ExpressionEncoder.
So, how do I implement this correctly? Or do I still have to use the deprecated UDAF?
You can do it with the use of groupBy and pivot
import spark.implicits._
import org.apache.spark.sql.functions._
val df = Seq(
(0, "foo", 5),
(0, "bar", 23),
(100, "foo", 42)
).toDF("time", "channel", "value")
df.groupBy("time")
.pivot("channel")
.agg(first("value"))
.show(false)
Output:
+----+----+---+
|time|bar |foo|
+----+----+---+
|100 |null|42 |
|0 |23 |5 |
+----+----+---+
I would like to calculate a mode for multiple columns in the same time in Spark and use this calculated values to impute missings in a DataFrame. I found how to calculate e.g. a mean, but a mode is more complex I think.
Here is a mean calculation:
val multiple_mean = df.na.fill(df.columns.zip(
df.select(intVars.map(mean(_)): _*).first.toSeq
).toMap)
I am able to calculate a mode in brute force way:
var list = ArrayBuffer.empty[Float]
for(column <- df.columns){
list += df.select(column).groupBy(col(column)).count().orderBy(desc("count")).first.toSeq(0).asInstanceOf[Float]
}
val multiple_mode = df.na.fill(df.columns.zip(list.toSeq).toMap)
What way would be the best if we consider a performance?
Thank you for any help.
You could use UserDefinedAggregateFunction. The code below is tested in spark 1.6.2
First create a class which extends UserDefinedAggregateFunction.
import org.apache.spark.sql.Row
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
class ModeUDAF extends UserDefinedAggregateFunction{
override def dataType: DataType = StringType
override def inputSchema: StructType = new StructType().add("input", StringType)
override def deterministic: Boolean = true
override def bufferSchema: StructType = new StructType().add("mode", MapType(StringType, LongType))
override def initialize(buffer: MutableAggregationBuffer): Unit = {
buffer(0) = Map.empty[Any, Long]
}
override def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val buff0 = buffer.getMap[Any, Long](0)
val inp = input.get(0)
buffer(0) = buff0.updated(inp, buff0.getOrElse(inp, 0L) + 1L)
}
override def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
val mp1 = buffer1.getMap[Any, Long](0)
val mp2 = buffer2.getMap[Any, Long](0)
buffer1(0) = mp1 ++ mp2.map { case (k, v) => k -> (v + mp1.getOrElse(k, 0L)) }
}
override def evaluate(buffer: Row): Any = {
lazy val st = buffer.getMap[Any, Long](0).toStream
val mode = st.foldLeft(st.head){case (e, s) => if (s._2 > e._2) s else e}
mode._1
}
}
Afterwords you could use it with your dataframe in the following manner.
val modeColumnList = List("some", "column", "names") // or df.columns.toList
val modeAgg = new ModeUDAF()
val aggCols = modeColumnList.map(c => modeAgg(df(c)))
val aggregatedModeDF = df.agg(aggCols.head, aggCols.tail: _*)
aggregatedModeDF.show()
Also you could use .collect on the final dataframe to collect the result in a scala data structure.
Note: The performance of this solution depends on the cardinality of the input column.
I have a DataFrame of two columns, ID of type Int and Vec of type Vector (org.apache.spark.mllib.linalg.Vector).
The DataFrame looks like follow:
ID,Vec
1,[0,0,5]
1,[4,0,1]
1,[1,2,1]
2,[7,5,0]
2,[3,3,4]
3,[0,8,1]
3,[0,0,1]
3,[7,7,7]
....
I would like to do a groupBy($"ID") then apply an aggregation on the rows inside each group by summing the vectors.
The desired output of the above example would be:
ID,SumOfVectors
1,[5,2,7]
2,[10,8,4]
3,[7,15,9]
...
The available aggregation functions will not work, e.g. df.groupBy($"ID").agg(sum($"Vec") will lead to an ClassCastException.
How to implement a custom aggregation function that allows me to do the sum of vectors or arrays or any other custom operation?
Spark >= 3.0
You can use Summarizer with sum
import org.apache.spark.ml.stat.Summarizer
df
.groupBy($"id")
.agg(Summarizer.sum($"vec").alias("vec"))
Spark <= 3.0
Personally I wouldn't bother with UDAFs. There are more than verbose and not exactly fast (Spark UDAF with ArrayType as bufferSchema performance issues) Instead I would simply use reduceByKey / foldByKey:
import org.apache.spark.sql.Row
import breeze.linalg.{DenseVector => BDV}
import org.apache.spark.ml.linalg.{Vector, Vectors}
def dv(values: Double*): Vector = Vectors.dense(values.toArray)
val df = spark.createDataFrame(Seq(
(1, dv(0,0,5)), (1, dv(4,0,1)), (1, dv(1,2,1)),
(2, dv(7,5,0)), (2, dv(3,3,4)),
(3, dv(0,8,1)), (3, dv(0,0,1)), (3, dv(7,7,7)))
).toDF("id", "vec")
val aggregated = df
.rdd
.map{ case Row(k: Int, v: Vector) => (k, BDV(v.toDense.values)) }
.foldByKey(BDV.zeros[Double](3))(_ += _)
.mapValues(v => Vectors.dense(v.toArray))
.toDF("id", "vec")
aggregated.show
// +---+--------------+
// | id| vec|
// +---+--------------+
// | 1| [5.0,2.0,7.0]|
// | 2|[10.0,8.0,4.0]|
// | 3|[7.0,15.0,9.0]|
// +---+--------------+
And just for comparison a "simple" UDAF. Required imports:
import org.apache.spark.sql.expressions.{MutableAggregationBuffer,
UserDefinedAggregateFunction}
import org.apache.spark.ml.linalg.{Vector, Vectors, SQLDataTypes}
import org.apache.spark.sql.types.{StructType, ArrayType, DoubleType}
import org.apache.spark.sql.Row
import scala.collection.mutable.WrappedArray
Class definition:
class VectorSum (n: Int) extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("v", SQLDataTypes.VectorType)
def bufferSchema = new StructType().add("buff", ArrayType(DoubleType))
def dataType = SQLDataTypes.VectorType
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {
buffer.update(0, Array.fill(n)(0.0))
}
def update(buffer: MutableAggregationBuffer, input: Row) = {
if (!input.isNullAt(0)) {
val buff = buffer.getAs[WrappedArray[Double]](0)
val v = input.getAs[Vector](0).toSparse
for (i <- v.indices) {
buff(i) += v(i)
}
buffer.update(0, buff)
}
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {
val buff1 = buffer1.getAs[WrappedArray[Double]](0)
val buff2 = buffer2.getAs[WrappedArray[Double]](0)
for ((x, i) <- buff2.zipWithIndex) {
buff1(i) += x
}
buffer1.update(0, buff1)
}
def evaluate(buffer: Row) = Vectors.dense(
buffer.getAs[Seq[Double]](0).toArray)
}
And an example usage:
df.groupBy($"id").agg(new VectorSum(3)($"vec") alias "vec").show
// +---+--------------+
// | id| vec|
// +---+--------------+
// | 1| [5.0,2.0,7.0]|
// | 2|[10.0,8.0,4.0]|
// | 3|[7.0,15.0,9.0]|
// +---+--------------+
See also: How to find mean of grouped Vector columns in Spark SQL?.
I suggest the following (works on Spark 2.0.2 onward), it might be optimized but it's very nice, one thing you have to know in advance is the vector size when you create the UDAF instance
import org.apache.spark.ml.linalg._
import org.apache.spark.mllib.linalg.WeightedSparseVector
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
class VectorAggregate(val numFeatures: Int)
extends UserDefinedAggregateFunction {
private type B = Map[Int, Double]
def inputSchema: StructType = StructType(StructField("vec", new VectorUDT()) :: Nil)
def bufferSchema: StructType =
StructType(StructField("agg", MapType(IntegerType, DoubleType)) :: Nil)
def initialize(buffer: MutableAggregationBuffer): Unit =
buffer.update(0, Map.empty[Int, Double])
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val zero = buffer.getAs[B](0)
input match {
case Row(DenseVector(values)) => buffer.update(0, values.zipWithIndex.foldLeft(zero){case (acc,(v,i)) => acc.updated(i, v + acc.getOrElse(i,0d))})
case Row(SparseVector(_, indices, values)) => buffer.update(0, values.zip(indices).foldLeft(zero){case (acc,(v,i)) => acc.updated(i, v + acc.getOrElse(i,0d))}) }}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
val zero = buffer1.getAs[B](0)
buffer1.update(0, buffer2.getAs[B](0).foldLeft(zero){case (acc,(i,v)) => acc.updated(i, v + acc.getOrElse(i,0d))})}
def deterministic: Boolean = true
def evaluate(buffer: Row): Any = {
val Row(agg: B) = buffer
val indices = agg.keys.toArray.sorted
Vectors.sparse(numFeatures,indices,indices.map(agg)).compressed
}
def dataType: DataType = new VectorUDT()
}
With pyspark 3.0.0, which is my version, you can use Summarizer to do it easily. Your col needs to be type of DenseVector
from pyspark.ml.stat import Summarizer
sdf.groupBy("ID").agg(Summarizer.mean(sdf.Vec)).show()
Note: there is no avg function in pyspark, but you can use mean method
I don't know if it is possible, but I'd like in my mapPartitions to split in two lists the variable "a". Like here to have a list l that stores all numbers and an other list let's say b that stores all words. with something like a.mapPartitions((p,v) =>{ val l = p.toList; val b = v.toList; ....}
With for example in my for loop l(i)=1 and b(i) ="score"
import scala.io.Source
import org.apache.spark.rdd.RDD
import scala.collection.mutable.ListBuffer
val a = sc.parallelize(List(("score",1),("chicken",2),("magnacarta",2)) )
a.mapPartitions(p =>{val l = p.toList;
val ret = new ListBuffer[Int]
val words = new ListBuffer[String]
for(i<-0 to l.length-1){
words+= b(i)
ret += l(i)
}
ret.toList.iterator
}
)
Spark is a distributed computing engine. you can perform operation on partitioned data across nodes of the cluster. Then you need a Reduce() method that performs a summary operation.
Please see this code that should do what you want:
import org.apache.spark.SparkContext
import org.apache.spark.SparkConf
object SimpleApp {
class MyResponseObj(var numbers: List[Int] = List[Int](), var words: List[String] = List[String]()) extends java.io.Serializable{
def +=(str: String, int: Int) = {
numbers = numbers :+ int
words = words :+ str
this
}
def +=(other: MyResponseObj) = {
numbers = numbers ++ other.numbers
words = words ++ other.words
this
}
}
def main(args: Array[String]) {
val conf = new SparkConf().setAppName("Simple Application").setMaster("local[2]")
val sc = new SparkContext(conf)
val a = sc.parallelize(List(("score", 1), ("chicken", 2), ("magnacarta", 2)))
val myResponseObj = a.mapPartitions[MyResponseObj](it => {
var myResponseObj = new MyResponseObj()
it.foreach {
case (str :String, int :Int) => myResponseObj += (str, int)
case _ => println("unexpected data")
}
Iterator(myResponseObj)
}).reduce( (myResponseObj1, myResponseObj2) => myResponseObj1 += myResponseObj2 )
println(myResponseObj.words)
println(myResponseObj.numbers)
}
}
I have a DataFrame of two columns, ID of type Int and Vec of type Vector (org.apache.spark.mllib.linalg.Vector).
The DataFrame looks like follow:
ID,Vec
1,[0,0,5]
1,[4,0,1]
1,[1,2,1]
2,[7,5,0]
2,[3,3,4]
3,[0,8,1]
3,[0,0,1]
3,[7,7,7]
....
I would like to do a groupBy($"ID") then apply an aggregation on the rows inside each group by summing the vectors.
The desired output of the above example would be:
ID,SumOfVectors
1,[5,2,7]
2,[10,8,4]
3,[7,15,9]
...
The available aggregation functions will not work, e.g. df.groupBy($"ID").agg(sum($"Vec") will lead to an ClassCastException.
How to implement a custom aggregation function that allows me to do the sum of vectors or arrays or any other custom operation?
Spark >= 3.0
You can use Summarizer with sum
import org.apache.spark.ml.stat.Summarizer
df
.groupBy($"id")
.agg(Summarizer.sum($"vec").alias("vec"))
Spark <= 3.0
Personally I wouldn't bother with UDAFs. There are more than verbose and not exactly fast (Spark UDAF with ArrayType as bufferSchema performance issues) Instead I would simply use reduceByKey / foldByKey:
import org.apache.spark.sql.Row
import breeze.linalg.{DenseVector => BDV}
import org.apache.spark.ml.linalg.{Vector, Vectors}
def dv(values: Double*): Vector = Vectors.dense(values.toArray)
val df = spark.createDataFrame(Seq(
(1, dv(0,0,5)), (1, dv(4,0,1)), (1, dv(1,2,1)),
(2, dv(7,5,0)), (2, dv(3,3,4)),
(3, dv(0,8,1)), (3, dv(0,0,1)), (3, dv(7,7,7)))
).toDF("id", "vec")
val aggregated = df
.rdd
.map{ case Row(k: Int, v: Vector) => (k, BDV(v.toDense.values)) }
.foldByKey(BDV.zeros[Double](3))(_ += _)
.mapValues(v => Vectors.dense(v.toArray))
.toDF("id", "vec")
aggregated.show
// +---+--------------+
// | id| vec|
// +---+--------------+
// | 1| [5.0,2.0,7.0]|
// | 2|[10.0,8.0,4.0]|
// | 3|[7.0,15.0,9.0]|
// +---+--------------+
And just for comparison a "simple" UDAF. Required imports:
import org.apache.spark.sql.expressions.{MutableAggregationBuffer,
UserDefinedAggregateFunction}
import org.apache.spark.ml.linalg.{Vector, Vectors, SQLDataTypes}
import org.apache.spark.sql.types.{StructType, ArrayType, DoubleType}
import org.apache.spark.sql.Row
import scala.collection.mutable.WrappedArray
Class definition:
class VectorSum (n: Int) extends UserDefinedAggregateFunction {
def inputSchema = new StructType().add("v", SQLDataTypes.VectorType)
def bufferSchema = new StructType().add("buff", ArrayType(DoubleType))
def dataType = SQLDataTypes.VectorType
def deterministic = true
def initialize(buffer: MutableAggregationBuffer) = {
buffer.update(0, Array.fill(n)(0.0))
}
def update(buffer: MutableAggregationBuffer, input: Row) = {
if (!input.isNullAt(0)) {
val buff = buffer.getAs[WrappedArray[Double]](0)
val v = input.getAs[Vector](0).toSparse
for (i <- v.indices) {
buff(i) += v(i)
}
buffer.update(0, buff)
}
}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row) = {
val buff1 = buffer1.getAs[WrappedArray[Double]](0)
val buff2 = buffer2.getAs[WrappedArray[Double]](0)
for ((x, i) <- buff2.zipWithIndex) {
buff1(i) += x
}
buffer1.update(0, buff1)
}
def evaluate(buffer: Row) = Vectors.dense(
buffer.getAs[Seq[Double]](0).toArray)
}
And an example usage:
df.groupBy($"id").agg(new VectorSum(3)($"vec") alias "vec").show
// +---+--------------+
// | id| vec|
// +---+--------------+
// | 1| [5.0,2.0,7.0]|
// | 2|[10.0,8.0,4.0]|
// | 3|[7.0,15.0,9.0]|
// +---+--------------+
See also: How to find mean of grouped Vector columns in Spark SQL?.
I suggest the following (works on Spark 2.0.2 onward), it might be optimized but it's very nice, one thing you have to know in advance is the vector size when you create the UDAF instance
import org.apache.spark.ml.linalg._
import org.apache.spark.mllib.linalg.WeightedSparseVector
import org.apache.spark.sql.expressions.{MutableAggregationBuffer, UserDefinedAggregateFunction}
import org.apache.spark.sql.types._
class VectorAggregate(val numFeatures: Int)
extends UserDefinedAggregateFunction {
private type B = Map[Int, Double]
def inputSchema: StructType = StructType(StructField("vec", new VectorUDT()) :: Nil)
def bufferSchema: StructType =
StructType(StructField("agg", MapType(IntegerType, DoubleType)) :: Nil)
def initialize(buffer: MutableAggregationBuffer): Unit =
buffer.update(0, Map.empty[Int, Double])
def update(buffer: MutableAggregationBuffer, input: Row): Unit = {
val zero = buffer.getAs[B](0)
input match {
case Row(DenseVector(values)) => buffer.update(0, values.zipWithIndex.foldLeft(zero){case (acc,(v,i)) => acc.updated(i, v + acc.getOrElse(i,0d))})
case Row(SparseVector(_, indices, values)) => buffer.update(0, values.zip(indices).foldLeft(zero){case (acc,(v,i)) => acc.updated(i, v + acc.getOrElse(i,0d))}) }}
def merge(buffer1: MutableAggregationBuffer, buffer2: Row): Unit = {
val zero = buffer1.getAs[B](0)
buffer1.update(0, buffer2.getAs[B](0).foldLeft(zero){case (acc,(i,v)) => acc.updated(i, v + acc.getOrElse(i,0d))})}
def deterministic: Boolean = true
def evaluate(buffer: Row): Any = {
val Row(agg: B) = buffer
val indices = agg.keys.toArray.sorted
Vectors.sparse(numFeatures,indices,indices.map(agg)).compressed
}
def dataType: DataType = new VectorUDT()
}
With pyspark 3.0.0, which is my version, you can use Summarizer to do it easily. Your col needs to be type of DenseVector
from pyspark.ml.stat import Summarizer
sdf.groupBy("ID").agg(Summarizer.mean(sdf.Vec)).show()
Note: there is no avg function in pyspark, but you can use mean method