I'm migrating the Scala code to Pyspark. The original code snippet looks like this:
df.select( $"aa.*",
when($"bb".isNotNull, $"cc".multiply($"bb")).otherwise($"cc")
and my pyspark code is this:
df.select( "aa.*",
when(col("bb").isNotNull, col("cc") * col("bb")).otherwise(col("cc"))
And I have this error:
6 .select(
----> 7 col("clr.*"), when(col("bb").isNotNull, col("cc") * col("bb")).otherwise(col("cc")))
/usr/hdp/current/spark2-client/python/pyspark/sql/functions.py in when(condition, value)
708 sc = SparkContext._active_spark_context
709 if not isinstance(condition, Column):
--> 710 raise TypeError("condition should be a Column")
711 v = value._jc if isinstance(value, Column) else value
712 jc = sc._jvm.functions.when(condition._jc, v)
TypeError: condition should be a Column
Please help me with:
Explain why this logic works on Scala, but not on Python
Suggest improvement
Thanks!
Related
I am new to pyspark and just started with pyspark by following this
I have made the changes to the code basis on my local system. Below is the code how I am setting up pyspark
# Import PySpark related modules
import findspark
findspark.init()
import pyspark
import os
import sys
os.environ['PYSPARK_PYTHON'] = 'python'
os.environ['PYSPARK_DRIVER_PYTHON'] = 'jupyter'
from pyspark.rdd import RDD
from pyspark.sql import Row
from pyspark.sql import DataFrame
from pyspark.sql import SparkSession
from pyspark.sql import SQLContext
from pyspark.sql import functions
from pyspark.sql.functions import lit, desc, col, size, array_contains\
, isnan, udf, hour, array_min, array_max, countDistinct
from pyspark.sql.types import *
MAX_MEMORY = '15G'
# Initialize a spark session.
conf = pyspark.SparkConf().setMaster("local[*]") \
.set('spark.executor.heartbeatInterval', 10000) \
.set('spark.network.timeout', 10000) \
.set("spark.core.connection.ack.wait.timeout", "20000") \
.set("spark.executor.memory", MAX_MEMORY) \
.set("spark.driver.memory", MAX_MEMORY) \
.set("spark.driver.maxResultSize", MAX_MEMORY) \
.set("spark.sql.execution.arrow.pyspark.enabled", "true") \
.set("spark.memory.offHeap.enabled","true") \
.set("spark.memory.offHeap.size","10g") \
.set("spark.network.timeout", "300000")
def init_spark():
spark = SparkSession \
.builder \
.appName("Pyspark guide") \
.config(conf=conf) \
.getOrCreate()
return spark
spark = init_spark()
filename_data = 'endomondoHR.json'
# Load the main data set into pyspark data frame
df = spark.read.json(filename_data, mode="DROPMALFORMED")
print('Data frame type: ' + str(type(df)))
While executing the code in cell
# Helper function to calculate statistic(s) of the column name from a tuple x of (sport, records list of the column)
#, the stats to calculate is also given as an input
def calculate_stats(x,column_name, stat_list):
sport, records_list = x
stat_dict = {'sport': sport}
if 'min' in stat_list:
min_stat = min(records_list)
stat_dict.update({'min ' + column_name : min_stat})
if 'max' in stat_list:
max_stat = max(records_list)
stat_dict.update({'max ' + column_name: max_stat})
if 'mean' in stat_list:
average_stat = stats.mean(records_list)
stat_dict.update({'mean ' + column_name: average_stat})
if 'stdev' in stat_list:
std_stat = stats.stdev(records_list)
stat_dict.update({'stdev ' + column_name: std_stat})
if '50th percentile' in stat_list:
median_stat = stats.median(records_list)
stat_dict.update({'50th percentile ' + column_name: median_stat})
if '25th percentile' in stat_list:
percentile_25th_stat = np.percentile(records_list, 25)
stat_dict.update({'25th percentile ' + column_name: percentile_25th_stat})
if '75th percentile' in stat_list:
percentile_75th_stat = np.percentile(records_list, 75)
stat_dict.update({'75th percentile ' + column_name: percentile_75th_stat})
if '95th percentile' in stat_list:
percentile_95th_stat = np.percentile(records_list, 95)
stat_dict.update({'95th percentile ' + column_name: percentile_95th_stat})
return stat_dict
def to_list(a):
return a
def extend(a, b):
a.extend(b)
return a
def retrieve_array_column_stat_df(df, column_name, stat_list):
# Convert sport & "column_name" to RDD to easily calculate the statistics of intervals by sports
sport_record_rdd = df.select('sport', column_name).rdd \
.map(tuple).combineByKey(to_list, extend, extend).persist()
# Calculate statistics of the input column by calling calculate_stats function defined above
record_statistic_df = pd.DataFrame(sport_record_rdd.map(
lambda x: calculate_stats(x, column_name,stat_list)).collect()
)
# Set proper dataframe column orders
columns_order = ['sport'] + [stat + ' ' + column_name for stat in stat_list]
# Re order columns
return record_statistic_df[columns_order]
stat_list = ['min', '25th percentile', 'mean', '50th percentile',
'75th percentile', '95th percentile', 'max', 'stdev']
interval_statistic_df = retrieve_array_column_stat_df(df, column_name='interval', stat_list=stat_list)
print('\nLet\'s look at statistic for interval, in seconds (by sport):' )
interval_statistic_df
I am getting below error
Py4JJavaError Traceback (most recent call last)
c:\Users\shiv\Desktop\test\pyspark_analysis_bigdata.ipynb Cell 45 in <cell line: 55>()
51 return record_statistic_df[columns_order]
53 stat_list = ['min', '25th percentile', 'mean', '50th percentile',
54 '75th percentile', '95th percentile', 'max', 'stdev']
---> 55 interval_statistic_df = retrieve_array_column_stat_df(df, column_name='interval', stat_list=stat_list)
56 print('\nLet\'s look at statistic for interval, in seconds (by sport):' )
57 interval_statistic_df
c:\Users\shiv\Desktop\test\pyspark_analysis_bigdata.ipynb Cell 45 in retrieve_array_column_stat_df(df, column_name, stat_list)
41 sport_record_rdd = df.select('sport', column_name).rdd \
42 .map(tuple).combineByKey(to_list, extend, extend).persist()
44 # Calculate statistics of the input column by calling calculate_stats function defined above
---> 45 record_statistic_df = pd.DataFrame(sport_record_rdd.map(
46 lambda x: calculate_stats(x, column_name,stat_list)).collect()
47 )
48 # Set proper dataframe column orders
49 columns_order = ['sport'] + [stat + ' ' + column_name for stat in stat_list]
File c:\Users\shiv\miniconda3\lib\site-packages\pyspark\rdd.py:1197, in RDD.collect(self)
1195 with SCCallSiteSync(self.context):
1196 assert self.ctx._jvm is not None
-> 1197 sock_info = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
1198 return list(_load_from_socket(sock_info, self._jrdd_deserializer))
File c:\Users\shiv\miniconda3\lib\site-packages\py4j\java_gateway.py:1321, in JavaMember.__call__(self, *args)
1315 command = proto.CALL_COMMAND_NAME +\
1316 self.command_header +\
1317 args_command +\
1318 proto.END_COMMAND_PART
1320 answer = self.gateway_client.send_command(command)
-> 1321 return_value = get_return_value(
1322 answer, self.gateway_client, self.target_id, self.name)
1324 for temp_arg in temp_args:
1325 temp_arg._detach()
File c:\Users\shiv\miniconda3\lib\site-packages\pyspark\sql\utils.py:190, in capture_sql_exception.<locals>.deco(*a, **kw)
188 def deco(*a: Any, **kw: Any) -> Any:
189 try:
--> 190 return f(*a, **kw)
191 except Py4JJavaError as e:
192 converted = convert_exception(e.java_exception)
File c:\Users\shiv\miniconda3\lib\site-packages\py4j\protocol.py:326, in get_return_value(answer, gateway_client, target_id, name)
324 value = OUTPUT_CONVERTER[type](answer[2:], gateway_client)
325 if answer[1] == REFERENCE_TYPE:
--> 326 raise Py4JJavaError(
327 "An error occurred while calling {0}{1}{2}.\n".
328 format(target_id, ".", name), value)
329 else:
330 raise Py4JError(
331 "An error occurred while calling {0}{1}{2}. Trace:\n{3}\n".
332 format(target_id, ".", name, value))
Py4JJavaError: An error occurred while calling z:org.apache.spark.api.python.PythonRDD.collectAndServe.
: org.apache.spark.SparkException: Job aborted due to stage failure: Task 5 in stage 155.0 failed 1 times, most recent failure: Lost task 5.0 in stage 155.0 (TID 1775) (shiv executor driver): java.net.SocketException: Connection reset by peer
at java.base/sun.nio.ch.NioSocketImpl.implWrite(NioSocketImpl.java:413)
at java.base/sun.nio.ch.NioSocketImpl.write(NioSocketImpl.java:433)
at java.base/sun.nio.ch.NioSocketImpl$2.write(NioSocketImpl.java:812)
at java.base/java.net.Socket$SocketOutputStream.write(Socket.java:1120)
at java.base/java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:125)
at java.base/java.io.BufferedOutputStream.implWrite(BufferedOutputStream.java:215)
at java.base/java.io.BufferedOutputStream.write(BufferedOutputStream.java:199)
at java.base/java.io.DataOutputStream.write(DataOutputStream.java:112)
at java.base/java.io.FilterOutputStream.write(FilterOutputStream.java:108)
at org.apache.spark.api.python.PythonRDD$.write$1(PythonRDD.scala:295)
at org.apache.spark.api.python.PythonRDD$.$anonfun$writeIteratorToStream$1(PythonRDD.scala:307)
at org.apache.spark.api.python.PythonRDD$.$anonfun$writeIteratorToStream$1$adapted(PythonRDD.scala:307)
at scala.collection.Iterator.foreach(Iterator.scala:943)
at scala.collection.Iterator.foreach$(Iterator.scala:943)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1431)
at org.apache.spark.api.python.PythonRDD$.writeIteratorToStream(PythonRDD.scala:307)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.writeIteratorToStream(PythonUDFRunner.scala:53)
at org.apache.spark.api.python.BasePythonRunner$WriterThread.$anonfun$run$1(PythonRunner.scala:438)
at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:2066)
at org.apache.spark.api.python.BasePythonRunner$WriterThread.run(PythonRunner.scala:272)
Driver stacktrace:
at org.apache.spark.scheduler.DAGScheduler.failJobAndIndependentStages(DAGScheduler.scala:2672)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2(DAGScheduler.scala:2608)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$abortStage$2$adapted(DAGScheduler.scala:2607)
at scala.collection.mutable.ResizableArray.foreach(ResizableArray.scala:62)
at scala.collection.mutable.ResizableArray.foreach$(ResizableArray.scala:55)
at scala.collection.mutable.ArrayBuffer.foreach(ArrayBuffer.scala:49)
at org.apache.spark.scheduler.DAGScheduler.abortStage(DAGScheduler.scala:2607)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1(DAGScheduler.scala:1182)
at org.apache.spark.scheduler.DAGScheduler.$anonfun$handleTaskSetFailed$1$adapted(DAGScheduler.scala:1182)
at scala.Option.foreach(Option.scala:407)
at org.apache.spark.scheduler.DAGScheduler.handleTaskSetFailed(DAGScheduler.scala:1182)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.doOnReceive(DAGScheduler.scala:2860)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2802)
at org.apache.spark.scheduler.DAGSchedulerEventProcessLoop.onReceive(DAGScheduler.scala:2791)
at org.apache.spark.util.EventLoop$$anon$1.run(EventLoop.scala:49)
at org.apache.spark.scheduler.DAGScheduler.runJob(DAGScheduler.scala:952)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2228)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2249)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2268)
at org.apache.spark.SparkContext.runJob(SparkContext.scala:2293)
at org.apache.spark.rdd.RDD.$anonfun$collect$1(RDD.scala:1021)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:151)
at org.apache.spark.rdd.RDDOperationScope$.withScope(RDDOperationScope.scala:112)
at org.apache.spark.rdd.RDD.withScope(RDD.scala:406)
at org.apache.spark.rdd.RDD.collect(RDD.scala:1020)
at org.apache.spark.api.python.PythonRDD$.collectAndServe(PythonRDD.scala:180)
at org.apache.spark.api.python.PythonRDD.collectAndServe(PythonRDD.scala)
at java.base/jdk.internal.reflect.DirectMethodHandleAccessor.invoke(DirectMethodHandleAccessor.java:104)
at java.base/java.lang.reflect.Method.invoke(Method.java:578)
at py4j.reflection.MethodInvoker.invoke(MethodInvoker.java:244)
at py4j.reflection.ReflectionEngine.invoke(ReflectionEngine.java:357)
at py4j.Gateway.invoke(Gateway.java:282)
at py4j.commands.AbstractCommand.invokeMethod(AbstractCommand.java:132)
at py4j.commands.CallCommand.execute(CallCommand.java:79)
at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
at java.base/java.lang.Thread.run(Thread.java:1589)
Caused by: java.net.SocketException: Connection reset by peer
at java.base/sun.nio.ch.NioSocketImpl.implWrite(NioSocketImpl.java:413)
at java.base/sun.nio.ch.NioSocketImpl.write(NioSocketImpl.java:433)
at java.base/sun.nio.ch.NioSocketImpl$2.write(NioSocketImpl.java:812)
at java.base/java.net.Socket$SocketOutputStream.write(Socket.java:1120)
at java.base/java.io.BufferedOutputStream.flushBuffer(BufferedOutputStream.java:125)
at java.base/java.io.BufferedOutputStream.implWrite(BufferedOutputStream.java:215)
at java.base/java.io.BufferedOutputStream.write(BufferedOutputStream.java:199)
at java.base/java.io.DataOutputStream.write(DataOutputStream.java:112)
at java.base/java.io.FilterOutputStream.write(FilterOutputStream.java:108)
at org.apache.spark.api.python.PythonRDD$.write$1(PythonRDD.scala:295)
at org.apache.spark.api.python.PythonRDD$.$anonfun$writeIteratorToStream$1(PythonRDD.scala:307)
at org.apache.spark.api.python.PythonRDD$.$anonfun$writeIteratorToStream$1$adapted(PythonRDD.scala:307)
at scala.collection.Iterator.foreach(Iterator.scala:943)
at scala.collection.Iterator.foreach$(Iterator.scala:943)
at scala.collection.AbstractIterator.foreach(Iterator.scala:1431)
at org.apache.spark.api.python.PythonRDD$.writeIteratorToStream(PythonRDD.scala:307)
at org.apache.spark.sql.execution.python.PythonUDFRunner$$anon$1.writeIteratorToStream(PythonUDFRunner.scala:53)
at org.apache.spark.api.python.BasePythonRunner$WriterThread.$anonfun$run$1(PythonRunner.scala:438)
at org.apache.spark.util.Utils$.logUncaughtExceptions(Utils.scala:2066)
at org.apache.spark.api.python.BasePythonRunner$WriterThread.run(PythonRunner.scala:272)
I am not able to understand the error, can anybody help here?
Important info -
pyspark and spark version - 3.3.1
python version - 3.9.13
Windows 11, 16GB, Intel Core i5-11400H, NVIDIA-GeForce GTX 1650 with 4GB GDDR6 dedicated VRAM
Java version - 19.0.2
I tried allocating maximum memory to driver and executors, increased heartbeat interval and network timeout and then later used findspark to intialize spark. The above code set up is the latest and still getting the same error.
Update
I tried running the code with the chunk of data and made the changes, the code ran successfully:
df_min = df.limit(1000)
stat_list = ['min', '25th percentile', 'mean', '50th percentile',
'75th percentile', '95th percentile', 'max', 'stdev']
interval_statistic_df = retrieve_array_column_stat_df(df_min, column_name='interval', stat_list=stat_list)
print('\nLet\'s look at statistic for interval, in seconds (by sport):' )
interval_statistic_df
Therefore, the size of the data is the issue. Can anyone help me with the optimized code to omit the use of rdd and effectively use map() and collect()
I am trying to calculate cosine distances of 2 title and headline columns via using pre-trained bert model just like below
title
headline
title_array
headline_array
arrayed
Dance Gavin Dance bass player Tim Feerick dead at 34
Prince Harry and Meghan Markle make secret visit to see Queen ahead of Invictus Games
["Dance Gavin Dance bass player Tim Feerick dead at 34"]
["Prince Harry and Meghan Markle make secret visit to see Queen ahead of Invictus Games"]
["Dance Gavin Dance bass player Tim Feerick dead at 34", "Prince Harry and Meghan Markle make secret visit to see Queen ahead of Invictus Games"]
# downloading bert
model = SentenceTransformer('bert-base-nli-mean-tokens')
from sentence_transformers import SentenceTransformer
import numpy as np
from pyspark.sql.types import FloatType
import pyspark.sql.functions as f
#udf(FloatType())
def cosine_similarity(sentence_embeddings, ind_a, ind_b):
s = sentence_embeddings
return np.dot(s[ind_a], s[ind_b]) / (np.linalg.norm(s[ind_a]) * np.linalg.norm(s[ind_b]))
#udf_bert = udf(cosine_similarity, FloatType())
''''
s0 = "our president is a good leader he will not fail"
s1 = "our president is not a good leader he will fail"
s2 = "our president is a good leader"
s3 = "our president will succeed"
sentences = [s0, s1, s2, s3]
sentence_embeddings = model.encode(sentences)
s = sentence_embeddings
print(f"{s0} <--> {s1}: {udf_bert(sentence_embeddings, 0, 1)}")
print(f"{s0} <--> {s2}: {cosine_similarity(sentence_embeddings, 0, 2)}")
print(f"{s0} <--> {s3}: {cosine_similarity(sentence_embeddings, 0, 3)}")
'''''
test_df = test_df.withColumn("Similarities", (cosine_similarity(model.encode(test_df.arrayed), 0, 1))
As we see from the example , algorithm takes concatenation of two array of strings and calculate distances of cosine among them.
When I only run the algorithm/function with the sample texts commented out , it is working. But when I try to apply it into my dataframe via registering as a udf and call with dataframe I am facing with the error below:
TypeError Traceback (most recent call last)
<command-757165186581086> in <module>
26 '''''
27
---> 28 test_df = test_df.withColumn("Similarities", f.lit(cosine_similarity(model.encode(test_df.arrayed), 0, 1)))
/databricks/spark/python/pyspark/sql/udf.py in wrapper(*args)
197 #functools.wraps(self.func, assigned=assignments)
198 def wrapper(*args):
--> 199 return self(*args)
200
201 wrapper.__name__ = self._name
/databricks/spark/python/pyspark/sql/udf.py in __call__(self, *cols)
177 judf = self._judf
178 sc = SparkContext._active_spark_context
--> 179 return Column(judf.apply(_to_seq(sc, cols, _to_java_column)))
180
181 # This function is for improving the online help system in the interactive interpreter.
/databricks/spark/python/pyspark/sql/column.py in _to_seq(sc, cols, converter)
60 """
61 if converter:
---> 62 cols = [converter(c) for c in cols]
63 return sc._jvm.PythonUtils.toSeq(cols)
64
/databricks/spark/python/pyspark/sql/column.py in <listcomp>(.0)
60 """
61 if converter:
---> 62 cols = [converter(c) for c in cols]
63 return sc._jvm.PythonUtils.toSeq(cols)
64
/databricks/spark/python/pyspark/sql/column.py in _to_java_column(col)
44 jcol = _create_column_from_name(col)
45 else:
---> 46 raise TypeError(
47 "Invalid argument, not a string or column: "
48 "{0} of type {1}. "
TypeError: Invalid argument, not a string or column: [-0.29246375 0.02216947 0.610355 -0.02230968 0.61386955 0.15291359]
The input of a UDF is a Column or a column name, that's why Spark is complaining Invalid argument, not a string or column: [-0.29246375 0.02216947 0.610355 -0.02230968 0.61386955 0.15291359]. You'll need to pass arrayed only, and refer model inside your UDF. Something like this
#udf(FloatType())
def cosine_similarity(sentence_embeddings, ind_a, ind_b):
from sentence_transformers import SentenceTransformer
model = SentenceTransformer('bert-base-nli-mean-tokens')
s = model.encode(arrayed)
return np.dot(s[ind_a], s[ind_b]) / (np.linalg.norm(s[ind_a]) * np.linalg.norm(s[ind_b]))
test_df = test_df.withColumn("Similarities", (cosine_similarity(test_df.arrayed, 0, 1))
I am trying to tune my model on Databricks using Pyspark.
I receive the following error:
TypeError: object of type 'ParamGridBuilder' has no len()
My code has been listed below.
from pyspark.ml.recommendation import ALS
from pyspark.ml.evaluation import RegressionEvaluator
als = ALS(userCol = "userId",itemCol="movieId", ratingCol="rating", coldStartStrategy="drop", nonnegative = True, implicitPrefs = False)
# Imports ParamGridBuilder package
from pyspark.ml.tuning import ParamGridBuilder
# Creates a ParamGridBuilder, and adds hyperparameters
param_grid = ParamGridBuilder().addGrid(als.rank, [5,10,20,40]).addGrid(als.maxIter, [5,10,15,20]).addGrid(als.regParam,[0.01,0.001,0.0001,0.02])
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",predictionCol="prediction")
# Imports CrossValidator package
from pyspark.ml.tuning import CrossValidator
# Creates cross validator and tells Spark what to use when training and evaluates
cv = CrossValidator(estimator = als,
estimatorParamMaps = param_grid,
evaluator = evaluator,
numFolds = 5)
model = cv.fit(training)
TypeError: object of type 'ParamGridBuilder' has no len()
Full Error Log:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<command-1952169986445972> in <module>()
----> 1 model = cv.fit(training)
2
3 # Extract best combination of values from cross validation
4
5 best_model = model.bestModel
/databricks/spark/python/pyspark/ml/base.py in fit(self, dataset, params)
130 return self.copy(params)._fit(dataset)
131 else:
--> 132 return self._fit(dataset)
133 else:
134 raise ValueError("Params must be either a param map or a list/tuple of param maps, "
/databricks/spark/python/pyspark/ml/tuning.py in _fit(self, dataset)
279 est = self.getOrDefault(self.estimator)
280 epm = self.getOrDefault(self.estimatorParamMaps)
--> 281 numModels = len(epm)
It simple means that your object does not have a length property (unlike lists). Thus, In your line
param_grid = ParamGridBuilder()
.addGrid(als.rank, [5,10,20,40])
.addGrid(als.maxIter, [5,10,15,20])
.addGrid(als.regParam, [0.01,0.001,0.0001,0.02])
You should add .build() in the end to actually construct a grid.
I'm working on a code that was correctly executed with the dataframe before, but this time when I execute it, I get an error. (The only difference is that I used persist() on the dataframe this time.)
simMat = IndexedRMat.columnSimilarities()
executes correctly, but then this part:
columns = ['product1', 'product2', 'sim']
vals = simMat.entries.map(lambda e: (e.i, e.j, e.value)).collect()
dfsim = spark.createDataFrame(vals, columns)
generates this error:
AttributeErrorTraceback (most recent call last)
<ipython-input-100-11502084c71b> in <module>()
1 columns = ['product1', 'product2', 'sim']
----> 2 vals = simMat.entries.map(lambda e: (e.i, e.j, e.value)).collect()
3 dfsim = spark.createDataFrame(vals, columns)
/opt/spark-2.3.0-SNAPSHOT-bin-spark-master/python/pyspark/rdd.pyc in collect(self)
806 to be small, as all the data is loaded into the driver's memory.
807 """
--> 808 with SCCallSiteSync(self.context) as css:
809 port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
810 return list(_load_from_socket(port, self._jrdd_deserializer))
/opt/spark-2.3.0-SNAPSHOT-bin-spark-master/python/pyspark/traceback_utils.pyc in __enter__(self)
70 def __enter__(self):
71 if SCCallSiteSync._spark_stack_depth == 0:
---> 72 self._context._jsc.setCallSite(self._call_site)
73 SCCallSiteSync._spark_stack_depth += 1
74
AttributeError: 'NoneType' object has no attribute 'setCallSite'
What does it mean? I'm new to spark and didn't find an explanation for this type of error..
I tried to convert nested listed to Dataframe by following the answers in this link
List to DataFrame in pyspark
my_data =[['apple','ball','ballon'],['cat','camel','james'],['none','focus','cake']]
from pyspark.sql import Row
R = Row('ID', 'words')
spark.createDataFrame([R(i, x) for i, x in enumerate(my_data)]).show()
But I obtain this error :
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-147-780a8d7196df> in <module>()
----> 5 spark.createDataFrame([R(i, x) for i, x in enumerate(my_data)]).show()
F:\spark\spark\python\pyspark\sql\session.py in createDataFrame(self, data, schema, samplingRatio, verifySchema)
--> 689 rdd, schema = self._createFromLocal(map(prepare, data), schema)
F:\spark\spark\python\pyspark\sql\session.py in _createFromLocal(self, data, schema)
--> 424 return self._sc.parallelize(data), schema
F:\spark\spark\python\pyspark\context.py in parallelize(self, c, numSlices)
--> 484 jrdd = self._serialize_to_jvm(c, numSlices, serializer)
F:\spark\spark\python\pyspark\context.py in _serialize_to_jvm(self, data, parallelism, serializer)
--> 493 tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
~\Anaconda3\lib\tempfile.py in NamedTemporaryFile(mode, buffering, encoding, newline, suffix, prefix, dir, delete)
547 flags |= _os.O_TEMPORARY
548
--> 549 (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
550 try:
551 file = _io.open(fd, mode, buffering=buffering,
~\Anaconda3\lib\tempfile.py in _mkstemp_inner(dir, pre, suf, flags, output_type)
258 file = _os.path.join(dir, pre + name + suf)
259 try:
--> 260 fd = _os.open(file, flags, 0o600)
261 except FileExistsError:
262 continue # try again
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\*****\\AppData\\Local\\Temp\\spark-e340269d-a29e-4b95-90d3-c424a04fcb0a\\pyspark-f7fce557-e11b-47c9-b7a5-81e72a360b36\\tmp7n0s97t2'
i was getting the same error from jupyter notebook/pyspark.
it worked after restarting the notebook kernel.