Spark distinct followed by join giving IndexOutOfBoundsException - scala

Trying to join two dataframes A & B. B has a distinct operation right before the join. Also one of the columns in B is joined on two columns in A. This specific situation is giving an IndexOutOfBoundsException. Anyone run into this situation before?
Details below. Thanks in advance!
Environment:
spark-shell standalone mode
Spark version 2.3.1
Code:
val df1 = Seq((1, "one", "one"), (2, "two", "two")).toDF("key1", "val11", "val12")
val df2 = Seq(("one", "first"), ("one", "first"), ("two", "second")).toDF("key2", "val2")
val df3 = df2.distinct
val df4 = df1.join(df3, col("val11") === col("key2") and col("val12") === col("key2"))
df4.show(false)
Exception:
java.lang.IndexOutOfBoundsException: -1
at scala.collection.LinearSeqOptimized$class.apply(LinearSeqOptimized.scala:65)
at scala.collection.immutable.List.apply(List.scala:84)
at org.apache.spark.sql.execution.exchange.EnsureRequirements$$anonfun$reorder$1.apply(EnsureRequirements.scala:233)
at org.apache.spark.sql.execution.exchange.EnsureRequirements$$anonfun$reorder$1.apply(EnsureRequirements.scala:231)
at scala.collection.mutable.ResizableArray$class.foreach(ResizableArray.scala:59)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:48)
at org.apache.spark.sql.execution.exchange.EnsureRequirements.reorder(EnsureRequirements.scala:231)
at org.apache.spark.sql.execution.exchange.EnsureRequirements.org$apache$spark$sql$execution$exchange$EnsureRequirements$$reorderJoinKeys(EnsureRequirements.scala:255)
at org.apache.spark.sql.execution.exchange.EnsureRequirements$$anonfun$org$apache$spark$sql$execution$exchange$EnsureRequirements$$reorderJoinPredicates$1.applyOrElse(EnsureRequirements.scala:277)
at org.apache.spark.sql.execution.exchange.EnsureRequirements$$anonfun$org$apache$spark$sql$execution$exchange$EnsureRequirements$$reorderJoinPredicates$1.applyOrElse(EnsureRequirements.scala:273)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:288)
at org.apache.spark.sql.execution.exchange.EnsureRequirements.org$apache$spark$sql$execution$exchange$EnsureRequirements$$reorderJoinPredicates(EnsureRequirements.scala:273)
at org.apache.spark.sql.execution.exchange.EnsureRequirements$$anonfun$apply$1.applyOrElse(EnsureRequirements.scala:302)
at org.apache.spark.sql.execution.exchange.EnsureRequirements$$anonfun$apply$1.applyOrElse(EnsureRequirements.scala:294)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformUp$1.apply(TreeNode.scala:289)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:288)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$3.apply(TreeNode.scala:286)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformUp(TreeNode.scala:286)
at org.apache.spark.sql.execution.exchange.EnsureRequirements.apply(EnsureRequirements.scala:294)
at org.apache.spark.sql.execution.exchange.EnsureRequirements.apply(EnsureRequirements.scala:37)
at org.apache.spark.sql.execution.QueryExecution$$anonfun$prepareForExecution$1.apply(QueryExecution.scala:87)
at org.apache.spark.sql.execution.QueryExecution$$anonfun$prepareForExecution$1.apply(QueryExecution.scala:87)
at scala.collection.LinearSeqOptimized$class.foldLeft(LinearSeqOptimized.scala:124)
at scala.collection.immutable.List.foldLeft(List.scala:84)
at org.apache.spark.sql.execution.QueryExecution.prepareForExecution(QueryExecution.scala:87)
at org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:77)
at org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:77)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:3249)
at org.apache.spark.sql.Dataset.head(Dataset.scala:2484)
at org.apache.spark.sql.Dataset.take(Dataset.scala:2698)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:254)
at org.apache.spark.sql.Dataset.show(Dataset.scala:725)
at org.apache.spark.sql.Dataset.show(Dataset.scala:702)
... 49 elided
Update: Working Solution: Thanks #1pluszara!
val df1 = Seq((1, "one", "one"), (2, "two", "two")).toDF("key1", "val11", "val12")
val df2 = Seq(("one", "first"), ("one", "first"), ("two", "second")).toDF("key2", "val2")
val df3 = spark.createDataFrame(df2.rdd.distinct, df2.schema)
val df4 = df1.join(df3, col("val11") === col("key2") and col("val12") === col("key2"))
df4.show(false)

Tried this:
val df3 = df2.rdd.distinct().map({
case Row(key2: String, val2: String) => (key2,val2)
}).toDF("key2","val2")
val df4 = df1.join(df3, col("val11") === col("key2") and col("val12") === col("key2"))
df4.show(false)
Output:
+----+-----+-----+----+------+
|key1|val11|val12|key2|val2 |
+----+-----+-----+----+------+
|2 |two |two |two |second|
|1 |one |one |one |first |
+----+-----+-----+----+------+
But not sure how the execution has worked internally for the dataframe version though.

Related

Transform a dataframe for the minHashLSH in spark

I have this data frame:
val df = (
spark
.createDataFrame(
Seq((1L, 2L), (1L, 5L), (1L,8L), (2L,4L), (2L,6L), (2L,8L))
)
.toDF("A","B")
.groupBy("A")
.agg(collect_list("B").alias("B"))
)
And I would like to transform it to the following form:
val dfTransformed =
(
spark
.createDataFrame(
Seq(
(1, Vectors.sparse(9, Seq((2, 1.0), (5,1.0), (8,1.0)))),
(2, Vectors.sparse(9, Seq((4, 1.0), (6,1.0), (8,1.0))))
)
).toDF("A", "B")
)
I want to do this so that I can use the MinHashLSH transformation (https://spark.apache.org/docs/2.2.3/api/scala/index.html#org.apache.spark.ml.feature.MinHashLSH).
I have tried with a UDF as follows but without success:
def f(x:Array[Long]) = Vectors.sparse(9, x.map(p => (p.toInt,1.0)).toSeq)
val udff = udf((x:Array[Long]) => f(x))
val dfTransformed = df.withColumn("transformed", udff(col("B"))).show()
Could anyone help me, please?
Use Seq for UDF, not Array:
def f(x: Seq[Long]) = Vectors.sparse(9, x.map(p => (p.toInt,1.0)))
val udff = udf((x: Seq[Long]) => f(x))
val dfTransformed = df.withColumn("transformed", udff(col("B")))
dfTransformed.show(false)
+---+---------+-------------------------+
|A |B |transformed |
+---+---------+-------------------------+
|1 |[2, 5, 8]|(9,[2,5,8],[1.0,1.0,1.0])|
|2 |[4, 6, 8]|(9,[4,6,8],[1.0,1.0,1.0])|
+---+---------+-------------------------+

How to null a struct in Scala spark when all values in the struct are null?

I have a spark scala data frame with a column that is a struct and I want null instead of objects when all values in struct are null.
val someDF = Seq(
(8, null,null),
(64, "mouse", "s"),
(-27, "horse", "e")
).toDF("a", "b", "c")
def make_week_struct (week:String) : Column = {
val summary = struct($"b", $"c").alias(s"wks_${week}_jrny")
return summary
}
val week1_summary = make_week_struct("1")
var dd = someDF.select($"a",week1_summary)
display(dd)
Sample Data
a b c
8 null null
64 mouse s
-27 horse e
Current Output
a wks_1_jrny
8 object:{a:null, b:null}
64 object:{a:"mouse", b:"s"}
-27 object:{a:"horse", b:"e"}
Expected Output
a wks_1_jrny
8 null
64 object:{a:"mouse", b:"s"}
-27 object:{a:"horse", b:"e"}
You can also use to_json function & filter empty json {}.
scala>
dd
.withColumn("wks_1_jrny",
when(
to_json($"wks_1_jrny") =!= "{}", // Filter Empty Json values.
$"wks_1_jrny"
)
)
.show(false)
+---+----------+
|a |wks_1_jrny|
+---+----------+
|8 |null |
|64 |[mouse,s] |
|-27|[horse,e] |
+---+----------+
This also should work:
import org.apache.spark.sql.functions._
import spark.implicits._
val df = List(
(None, None),
(None, Some("abc")),
(Some(1), Some("xyz"))
).toDF("id", "name")
val structCols = Seq("id", "name")
val dataStruct = struct(structCols.map(col): _*)
val emptyStruct = struct(df.schema.fields.filter(f => structCols.contains(f.name)).map(f => lit(null).cast(f.dataType).as(f.name)):_*)
df
.select(when(dataStruct.equalTo(emptyStruct), lit(null: StructType)).otherwise(dataStruct).as("col"))
.show(false)

Split string in Dataframe using Scala on Spark

I have a logfile which has 100+ columns. Out of which I only needed two columns '_raw' and '_time', so i created loaded the logfile as "csv" DF.
Step 1:
scala> val log = spark.read.format("csv").option("inferSchema", "true").option("header", "true").load("soa_prod_diag_10_jan.csv")
log: org.apache.spark.sql.DataFrame = [ARRAffinity: string, CoordinatorNonSecureURL: string ... 126 more fields]
Step 2:
I registered the DF as temp table
log.createOrReplaceTempView("logs")
Step 3: I extracted my two required columns '_raw' and '_time'
scala> val sqlDF = spark.sql("select _raw, _time from logs")
sqlDF: org.apache.spark.sql.DataFrame = [_raw: string, _time: string]
scala> sqlDF.show(1, false)
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----+
|_raw |_time|
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----+
|[2019-01-10T23:59:59.998-06:00] [xx_yyy_zz_sss_ra10] [ERROR] [OSB-473003] [oracle.osb.statistics.statistics] [tid: [ACTIVE].ExecuteThread: '28' for queue: 'weblogic.kernel.Default (self-tuning)'] [userId: <anonymous>] [ecid: 92b39a8b-8234-4d19-9ac7-4908dc79c5ed-0000bd0b,0] [partition-name: DOMAIN] [tenant-name: GLOBAL] Aggregation Server Not Available. Failed to get remote aggregator[[|null |
+----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------+-----+
only showing top 1 row
My requirement:
I need to split the string in the '_raw' column to produce
[2019-01-10T23:59:59.998-06:00] [xx_yyy_zz_sss_ra10] [ERROR] [OSB-473003] [oracle.osb.statistics.statistics] [ecid: 92b39a8b-8234-4d19-9ac7-4908dc79c5ed-0000bd0b] with column names a, b, c, d, e, f respectively
Also remove all null values from both '_raw' and '_time'
Your answers will appreciated :)
You can you split function, and split the _raw by space. This will return an array and then you can extract the values from that array. You can also use regexp_extract function to extract values from log messages. Both the ways are shown below. I hope it is helpful.
//Creating Test Data
val df = Seq("[2019-01-10T23:59:59.998-06:00] [xx_yyy_zz_sss_ra10] [ERROR] [OSB-473003] [oracle.osb.statistics.statistics] [tid: [ACTIVE].ExecuteThread: '28' for queue: 'weblogic.kernel.Default (self-tuning)'] [userId: <anonymous>] [ecid: 92b39a8b-8234-4d19-9ac7-4908dc79c5ed-0000bd0b,0] [partition-name: DOMAIN] [tenant-name: GLOBAL] Aggregation Server Not Available. Failed to get remote aggregator[[")
.toDF("_raw")
val splitDF = df.withColumn("split_raw_arr", split($"_raw", " "))
.withColumn("A", $"split_raw_arr"(0))
.withColumn("B", $"split_raw_arr"(1))
.withColumn("C", $"split_raw_arr"(2))
.withColumn("D", $"split_raw_arr"(3))
.withColumn("E", $"split_raw_arr"(4))
.drop("_raw", "split_raw_arr")
splitDF.show(false)
+-------------------------------+--------------------+-------+------------+----------------------------------+
|A |B |C |D |E |
+-------------------------------+--------------------+-------+------------+----------------------------------+
|[2019-01-10T23:59:59.998-06:00]|[xx_yyy_zz_sss_ra10]|[ERROR]|[OSB-473003]|[oracle.osb.statistics.statistics]|
+-------------------------------+--------------------+-------+------------+----------------------------------+
val extractedDF = df
.withColumn("a", regexp_extract($"_raw", "\\[(.*?)\\]",1))
.withColumn("b", regexp_extract($"_raw", "\\[(.*?)\\] \\[(.*?)\\]",2))
.withColumn("c", regexp_extract($"_raw", "\\[(.*?)\\] \\[(.*?)\\] \\[(.*?)\\]",3))
.withColumn("d", regexp_extract($"_raw", "\\[(.*?)\\] \\[(.*?)\\] \\[(.*?)\\] \\[(.*?)\\]",4))
.withColumn("e", regexp_extract($"_raw", "\\[(.*?)\\] \\[(.*?)\\] \\[(.*?)\\] \\[(.*?)\\] \\[(.*?)\\]",5))
.withColumn("f", regexp_extract($"_raw", "(?<=ecid: )(.*?)(?=,)",1))
.drop("_raw")
+-----------------------------+------------------+-----+----------+--------------------------------+---------------------------------------------+
|a |b |c |d |e |f |
+-----------------------------+------------------+-----+----------+--------------------------------+---------------------------------------------+
|2019-01-10T23:59:59.998-06:00|xx_yyy_zz_sss_ra10|ERROR|OSB-473003|oracle.osb.statistics.statistics|92b39a8b-8234-4d19-9ac7-4908dc79c5ed-0000bd0b|
+-----------------------------+------------------+-----+----------+--------------------------------+---------------------------------------------+

Spark: reduce/aggregate by key

I am new to Spark and Scala, so I have no idea how this kind of problem is called (which makes searching for it pretty hard).
I have data of the following structure:
[(date1, (name1, 1)), (date1, (name1, 1)), (date1, (name2, 1)), (date2, (name3, 1))]
In some way, this has to be reduced/aggregated to:
[(date1, [(name1, 2), (name2, 1)]), (date2, [(name3, 1)])]
I know how to do reduceByKey on a list of key-value pairs, but this particular problem is a mystery to me.
Thanks in advance!
My data, but here goes, step-wise:
val rdd1 = sc.makeRDD(Array( ("d1",("A",1)), ("d1",("A",1)), ("d1",("B",1)), ("d2",("E",1)) ),2)
val rdd2 = rdd1.map(x => ((x._1, x._2._1), x._2._2))
val rdd3 = rdd2.groupByKey
val rdd4 = rdd3.map{
case (str, nums) => (str, nums.sum)
}
val rdd5 = rdd4.map(x => (x._1._1, (x._1._2, x._2))).groupByKey
rdd5.collect
returns:
res28: Array[(String, Iterable[(String, Int)])] = Array((d2,CompactBuffer((E,1))), (d1,CompactBuffer((A,2), (B,1))))
Better approach avoiding groupByKey is as follows:
val rdd1 = sc.makeRDD(Array( ("d1",("A",1)), ("d1",("A",1)), ("d1",("B",1)), ("d2",("E",1)) ),2)
val rdd2 = rdd1.map(x => ((x._1, x._2._1), (x._2._2))) // Need to add quotes around V part for reduceByKey
val rdd3 = rdd2.reduceByKey(_+_)
val rdd4 = rdd3.map(x => (x._1._1, (x._1._2, x._2))).groupByKey // Necessary Shuffle
rdd4.collect
As I stated in the columns it can be done with DataFrames for structured data, so run this below:
// This above should be enough.
import org.apache.spark.sql.expressions._
import org.apache.spark.sql.functions._
val rddA = sc.makeRDD(Array( ("d1","A",1), ("d1","A",1), ("d1","B",1), ("d2","E",1) ),2)
val dfA = rddA.toDF("c1", "c2", "c3")
val dfB = dfA
.groupBy("c1", "c2")
.agg(sum("c3").alias("sum"))
dfB.show
returns:
+---+---+---+
| c1| c2|sum|
+---+---+---+
| d1| A| 2|
| d2| E| 1|
| d1| B| 1|
+---+---+---+
But you can do this to approximate the above of the CompactBuffer above.
import org.apache.spark.sql.functions.{col, udf}
case class XY(x: String, y: Long)
val xyTuple = udf((x: String, y: Long) => XY(x, y))
val dfC = dfB
.withColumn("xy", xyTuple(col("c2"), col("sum")))
.drop("c2")
.drop("sum")
dfC.printSchema
dfC.show
// Then ... this gives you the CompactBuffer answer but from a DF-perspective
val dfD = dfC.groupBy(col("c1")).agg(collect_list(col("xy")))
dfD.show
returns - some renaming req'd and possible sorting:
---+----------------+
| c1|collect_list(xy)|
+---+----------------+
| d2| [[E, 1]]|
| d1|[[A, 2], [B, 1]]|
+---+----------------+

Cannot merge two DataFrames in Scala Spark

I've been trying to append 1 DataFrame to another DF in Scala. The append operation in this case is simply adding a new column of the same size to the existing column - no key matching is involved. Both DataFrames are of the same shape (5 rows and 1 column only).
scala> val coefficients = lrModel.coefficients.toArray.toSeq.toDF("coefficients")
coefficients: org.apache.spark.sql.DataFrame = [coefficients: double]
scala> coefficients.show()
+--------------------+
| coefficients|
+--------------------+
| -59525.0697785032|
| 6957.836000531959|
| 314.2998010755629|
|-0.37884289844065666|
| -1758.154438149325|
+--------------------+
scala> val tvalues = trainingSummary.tValues.toArray.drop(1).toSeq.toDF("t-values")
tvalues: org.apache.spark.sql.DataFrame = [t-values: double]
scala> tvalues.show()
+-------------------+
| t-values|
+-------------------+
| 1.8267249911295418|
| 100.35507390273406|
| -8.768588605222108|
|-0.4656738230173362|
| 10.48091833711012|
+-------------------+
The join() function runs and I can even get the schema, but when I want to display all values of the new DF I'm getting the error:
scala> val outputModelDF1 = coefficients.join(tvalues)
outputModelDF1: org.apache.spark.sql.DataFrame = [coefficients: double, t-values: double]
scala> outputModelDF1.printSchema()
root
|-- coefficients: double (nullable = false)
|-- t-values: double (nullable = false)
scala> outputModelDF1.show()
org.apache.spark.sql.AnalysisException: Detected cartesian product for INNER join between logical plans
Project [value#359 AS coefficients#361]
+- LocalRelation [value#359]
and
Project [value#368 AS t-values#370]
+- LocalRelation [value#368]
Join condition is missing or trivial.
Use the CROSS JOIN syntax to allow cartesian products between these relations.;
at org.apache.spark.sql.catalyst.optimizer.CheckCartesianProducts$$anonfun$apply$20.applyOrElse(Optimizer.scala:1080)
at org.apache.spark.sql.catalyst.optimizer.CheckCartesianProducts$$anonfun$apply$20.applyOrElse(Optimizer.scala:1077)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$2.apply(TreeNode.scala:267)
at org.apache.spark.sql.catalyst.trees.CurrentOrigin$.withOrigin(TreeNode.scala:70)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:266)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$1.apply(TreeNode.scala:272)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$1.apply(TreeNode.scala:272)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:272)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$1.apply(TreeNode.scala:272)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$transformDown$1.apply(TreeNode.scala:272)
at org.apache.spark.sql.catalyst.trees.TreeNode$$anonfun$4.apply(TreeNode.scala:306)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapProductIterator(TreeNode.scala:187)
at org.apache.spark.sql.catalyst.trees.TreeNode.mapChildren(TreeNode.scala:304)
at org.apache.spark.sql.catalyst.trees.TreeNode.transformDown(TreeNode.scala:272)
at org.apache.spark.sql.catalyst.trees.TreeNode.transform(TreeNode.scala:256)
at org.apache.spark.sql.catalyst.optimizer.CheckCartesianProducts.apply(Optimizer.scala:1077)
at org.apache.spark.sql.catalyst.optimizer.CheckCartesianProducts.apply(Optimizer.scala:1062)
at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:85)
at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1$$anonfun$apply$1.apply(RuleExecutor.scala:82)
at scala.collection.IndexedSeqOptimized$class.foldl(IndexedSeqOptimized.scala:57)
at scala.collection.IndexedSeqOptimized$class.foldLeft(IndexedSeqOptimized.scala:66)
at scala.collection.mutable.WrappedArray.foldLeft(WrappedArray.scala:35)
at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:82)
at org.apache.spark.sql.catalyst.rules.RuleExecutor$$anonfun$execute$1.apply(RuleExecutor.scala:74)
at scala.collection.immutable.List.foreach(List.scala:381)
at org.apache.spark.sql.catalyst.rules.RuleExecutor.execute(RuleExecutor.scala:74)
at org.apache.spark.sql.execution.QueryExecution.optimizedPlan$lzycompute(QueryExecution.scala:78)
at org.apache.spark.sql.execution.QueryExecution.optimizedPlan(QueryExecution.scala:78)
at org.apache.spark.sql.execution.QueryExecution.sparkPlan$lzycompute(QueryExecution.scala:84)
at org.apache.spark.sql.execution.QueryExecution.sparkPlan(QueryExecution.scala:80)
at org.apache.spark.sql.execution.QueryExecution.executedPlan$lzycompute(QueryExecution.scala:89)
at org.apache.spark.sql.execution.QueryExecution.executedPlan(QueryExecution.scala:89)
at org.apache.spark.sql.Dataset.withAction(Dataset.scala:2832)
at org.apache.spark.sql.Dataset.head(Dataset.scala:2153)
at org.apache.spark.sql.Dataset.take(Dataset.scala:2366)
at org.apache.spark.sql.Dataset.showString(Dataset.scala:245)
at org.apache.spark.sql.Dataset.show(Dataset.scala:644)
at org.apache.spark.sql.Dataset.show(Dataset.scala:603)
at org.apache.spark.sql.Dataset.show(Dataset.scala:612)
... 52 elided
Any idea how to deal with it and how to simply merge these two DFs together?
UPDATE 1
I should have stated the desired format of the output that I want to achieve. Please see below:
+--------------------+--------------------+
| coefficients| t-values|
+--------------------+--------------------+
| -59525.0697785032| 1.8267249911295418|
| 6957.836000531959| 100.35507390273406|
| 314.2998010755629| -8.768588605222108|
|-0.37884289844065666| -0.4656738230173362|
| -1758.154438149325| -1758.154438149325|
+--------------------+--------------------+
UPDATE 2
Unfortunately, the following approach using withColumn() didn't work.
scala> val outputModelDF1 = coefficients.withColumn("t-values", tvalues("t-values"))
org.apache.spark.sql.AnalysisException: resolved attribute(s) t-values#119 missing from coefficients#113 in operator !Project [coefficients#113, t-values#119 AS t-values#130];;
!Project [coefficients#113, t-values#119 AS t-values#130]
+- Project [value#111 AS coefficients#113]
+- LocalRelation [value#111]
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$class.failAnalysis(CheckAnalysis.scala:39)
at org.apache.spark.sql.catalyst.analysis.Analyzer.failAnalysis(Analyzer.scala:91)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:347)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$$anonfun$checkAnalysis$1.apply(CheckAnalysis.scala:78)
at org.apache.spark.sql.catalyst.trees.TreeNode.foreachUp(TreeNode.scala:127)
at org.apache.spark.sql.catalyst.analysis.CheckAnalysis$class.checkAnalysis(CheckAnalysis.scala:78)
at org.apache.spark.sql.catalyst.analysis.Analyzer.checkAnalysis(Analyzer.scala:91)
at org.apache.spark.sql.execution.QueryExecution.assertAnalyzed(QueryExecution.scala:52)
at org.apache.spark.sql.Dataset$.ofRows(Dataset.scala:66)
at org.apache.spark.sql.Dataset.org$apache$spark$sql$Dataset$$withPlan(Dataset.scala:2872)
at org.apache.spark.sql.Dataset.select(Dataset.scala:1153)
at org.apache.spark.sql.Dataset.withColumn(Dataset.scala:1908)
... 52 elided
One approach would be to create key columns in the dataframes for the join using monotonicallyIncreasingId:
val df1 = Seq(
(-59525.0697785032), (6957.836000531959), (314.2998010755629), (-0.37884289844065666), (-1758.154438149325)
).toDF("coefficients")
val df2 = Seq(
(1.8267249911295418), (100.35507390273406), (-8.768588605222108), (-0.4656738230173362), (10.48091833711012)
).toDF("t-values")
val df1R = df1.withColumn("rowid", monotonicallyIncreasingId)
val df2R = df2.withColumn("rowid", monotonicallyIncreasingId)
val dfJoined = df1R.join(df2R, Seq("rowid"))
dfJoined.show
+-----+--------------------+-------------------+
|rowid| coefficients| t-values|
+-----+--------------------+-------------------+
| 0| -59525.0697785032| 1.8267249911295418|
| 1| 6957.836000531959| 100.35507390273406|
| 2| 314.2998010755629| -8.768588605222108|
| 3|-0.37884289844065666|-0.4656738230173362|
| 4| -1758.154438149325| 10.48091833711012|
+-----+--------------------+-------------------+