Multiclass Classification Evaluator in PySpark - pyspark

from pyspark.ml.classification import MultilayerPerceptronClassifier
inputneurons = len(pipe_df.columns)
nn = MultilayerPerceptronClassifier(layers = [inputneurons,20,2])
nn_model = nn.fit(train_data)
results = nn_model.transform(test_data)
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
evaluator = MulticlassClassificationEvaluator()
mlp_accuracy = evaluator.evaluate(results)
and when run it, shows errors ---------------------------------------------------------------------------
Py4JJavaError Traceback (most recent call last)
in ()
23 evaluator = MulticlassClassificationEvaluator()
24
---> 25 mlp_accuracy = evaluator.evaluate(results)
26
27
and I tried BinaryClassificationEvaluator but it does't work as well..
Is anyone know whats wrong here? I am new to PySpark...

Related

org.jpmml.sparkml.PMMLBuilder does not exist in the JVM

Thanks a lot for any help.
My goal is to save a trained model in XML format and Im really stragling with this error and warnings
---------------------------------------------------------------------------
Exception in thread "Thread-4" java.lang.ExceptionInInitializerError
at java.base/java.lang.Class.forName0(Native Method)
at java.base/java.lang.Class.forName(Class.java:398)
at py4j.reflection.CurrentThreadClassLoadingStrategy.classForName(CurrentThreadClassLoadingStrategy.java:40)
at py4j.reflection.ReflectionUtil.classForName(ReflectionUtil.java:51)
at py4j.reflection.TypeUtil.forName(TypeUtil.java:243)
at py4j.commands.ReflectionCommand.getUnknownMember(ReflectionCommand.java:175)
at py4j.commands.ReflectionCommand.execute(ReflectionCommand.java:87)
at py4j.ClientServerConnection.waitForCommands(ClientServerConnection.java:182)
at py4j.ClientServerConnection.run(ClientServerConnection.java:106)
at java.base/java.lang.Thread.run(Thread.java:829)
Caused by: java.lang.IllegalArgumentException: Expected Apache Spark ML version 3.1, got version 3.2 (3.2.0)
at org.jpmml.sparkml.ConverterFactory.checkVersion(ConverterFactory.java:114)
at org.jpmml.sparkml.PMMLBuilder.init(PMMLBuilder.java:481)
at org.jpmml.sparkml.PMMLBuilder.<clinit>(PMMLBuilder.java:545)
... 10 more
ERROR:root:Exception while sending command.
Traceback (most recent call last):
File "/home/mbg/.local/lib/python3.8/site-packages/pyspark/python/lib/py4j-0.10.9.2-src.zip/py4j/clientserver.py", line 480, in send_command
raise Py4JNetworkError("Answer from Java side is empty")
py4j.protocol.Py4JNetworkError: Answer from Java side is empty
During handling of the above exception, another exception occurred:
Traceback (most recent call last):
File "/home/mbg/.local/lib/python3.8/site-packages/pyspark/python/lib/py4j-0.10.9.2-src.zip/py4j/java_gateway.py", line 1038, in send_command
response = connection.send_command(command)
File "/home/mbg/.local/lib/python3.8/site-packages/pyspark/python/lib/py4j-0.10.9.2-src.zip/py4j/clientserver.py", line 503, in send_command
raise Py4JNetworkError(
py4j.protocol.Py4JNetworkError: Error while sending or receiving
---------------------------------------------------------------------------
Py4JError Traceback (most recent call last)
/tmp/ipykernel_20251/3496938591.py in <module>
----> 1 pmmlBuilder = PMMLBuilder(sc, df_train, rfModel)
~/.local/lib/python3.8/site-packages/pyspark2pmml/__init__.py in __init__(self, sc, df, pipelineModel)
10 javaSchema = javaDf.schema.__call__()
11 javaPipelineModel = pipelineModel._to_java()
---> 12 javaPmmlBuilderClass = sc._jvm.org.jpmml.sparkml.PMMLBuilder
13 if(not isinstance(javaPmmlBuilderClass, JavaClass)):
14 raise RuntimeError("JPMML-SparkML not found on classpath")
~/.local/lib/python3.8/site-packages/pyspark/python/lib/py4j-0.10.9.2-src.zip/py4j/java_gateway.py in __getattr__(self, name)
1647 answer[proto.CLASS_FQN_START:], self._gateway_client)
1648 else:
-> 1649 raise Py4JError("{0} does not exist in the JVM".format(new_fqn))
1650
1651
Py4JError: org.jpmml.sparkml.PMMLBuilder does not exist in the JVM
My code is the folowing:
from pyspark import SparkConf
from pyspark import SparkContext
from pyspark.sql import SparkSession
conf = SparkConf().setAppName("SparkApp_ETL_ML").setMaster("local[*]")
sc = SparkContext.getOrCreate(conf)
spark = SparkSession.builder.getOrCreate()
import pandas as pd
df=pd.read_parquet("https://s3.eu-de.cloud-object-storage.appdomain.cloud/cloud-object-storage-yy-cos-standard-js4/data.parquet")
sdf = spark.createDataFrame(df)
from pyspark.sql.types import DoubleType
sdf = sdf.withColumn("x", sdf.x.cast(DoubleType()))
sdf = sdf.withColumn("y", sdf.y.cast(DoubleType()))
sdf = sdf.withColumn("z", sdf.z.cast(DoubleType()))
from pyspark.ml.feature import StringIndexer
from pyspark.ml.feature import VectorAssembler
from pyspark.ml.feature import MinMaxScaler
from pyspark.ml import Pipeline
from pyspark.ml.evaluation import MulticlassClassificationEvaluator
input_columns = ["x", "y", "z"] # input columns to consider
train, test = sdf.randomSplit([0.8, 0.2], seed=1)
indexer = StringIndexer(inputCol="class", outputCol="label")
vectorAssembler = VectorAssembler(inputCols=input_columns, outputCol="features")
normalizer = MinMaxScaler(inputCol="features", outputCol="features_norm")
pipeline = Pipeline(stages=[indexer, vectorAssembler, normalizer])
binEval = MulticlassClassificationEvaluator().setMetricName("accuracy").setPredictionCol("prediction"). \
setLabelCol("label")
df_train = pipeline.fit(train).transform(train)
df_test = pipeline.fit(test).transform(test)
from pyspark.ml.classification import RandomForestClassifier
rf = RandomForestClassifier(featuresCol='features_norm', labelCol='label', maxDepth=20, numTrees=7, seed=1)
rfModel = rf.fit(df_train)
from pyspark2pmml import PMMLBuilder
model_target = "HMP_frModel.xml"
pmmlBuilder = PMMLBuilder(sc, df_train, rfModel)
All works well till the last line in code.
I tried all solutions i found on the internet but unfortunatly without success.
I am working with jupyter notebook not anaconda and installed pyspark with pip and I added those variables in the .bashrc
export PATH=$PATH:~/.local/bin
export SPARK_HOME=~/.local/lib/python3.8/site-packages/pyspark
export PYTHONPATH=$SPARK_HOME/python/lib/py4j-0.10.9.2-src.zip
export PATH=$SPARK_HOME/bin:$SPARK_HOME/python:$PATH
export PYSPARK_DRIVER_PYTHON=jupyter
export PYSPARK_DRIVER_PYTHON_OPTS='notebook'
I also downloaded those jar files jpmml-sparkml-executable-1.7.2.jar jpmml-sparkml-executable-1.8.0.jar and put them in this directory ~/.local/lib/python3.8/site-packages/pyspark/jars

Sympy .coeff_all() returned list is not readable by scipy

I have question about the data type of the result returned by Sympy Poly.all_coeffs(). I have started to use Sympy just recently.
My Sympy transfer function is following:
Then I run this code:
n,d = fraction(Gs)
num = Poly(n,s)
den = Poly(d,s)
num_c = num.all_coeffs()
den_c = den.all_coeffs()
I get:
Then I run this code:
from scipy import signal
#nu = [5000000.0]
#de = [4.99, 509000.0]
nu = num_c
de = den_c
sys = signal.lti(nu, de)
w,mag,phase = signal.bode(sys)
plt.plot(w/(2*np.pi), mag)
and the result is:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-131-fb960684259c> in <module>
4 nu = num_c
5 de = den_c
----> 6 sys = signal.lti(nu, de)
But if I use those commented line 'nu' and 'de' straight python lists instead, the program works. So what is wrong here?
Why did you just show a bit the error? Why not the full message, maybe even the full traceback!
In [60]: sys = signal.lti(num_c, den_c)
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-60-21f71ecd8884> in <module>
----> 1 sys = signal.lti(num_c, den_c)
/usr/local/lib/python3.6/dist-packages/scipy/signal/ltisys.py in __init__(self, *system, **kwargs)
590 self._den = None
591
--> 592 self.num, self.den = normalize(*system)
593
594 def __repr__(self):
/usr/local/lib/python3.6/dist-packages/scipy/signal/filter_design.py in normalize(b, a)
1609 leading_zeros = 0
1610 for col in num.T:
-> 1611 if np.allclose(col, 0, atol=1e-14):
1612 leading_zeros += 1
1613 else:
<__array_function__ internals> in allclose(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/numpy/core/numeric.py in allclose(a, b, rtol, atol, equal_nan)
2169
2170 """
-> 2171 res = all(isclose(a, b, rtol=rtol, atol=atol, equal_nan=equal_nan))
2172 return bool(res)
2173
<__array_function__ internals> in isclose(*args, **kwargs)
/usr/local/lib/python3.6/dist-packages/numpy/core/numeric.py in isclose(a, b, rtol, atol, equal_nan)
2267 y = array(y, dtype=dt, copy=False, subok=True)
2268
-> 2269 xfin = isfinite(x)
2270 yfin = isfinite(y)
2271 if all(xfin) and all(yfin):
TypeError: ufunc 'isfinite' not supported for the input types, and the inputs could not be safely coerced to any supported types according to the casting rule ''safe''
Now look at the elements of the num_c list (same for den_c):
In [55]: num_c[0]
Out[55]: 500000.000000000
In [56]: type(_)
Out[56]: sympy.core.numbers.Float
The scipy code is doing numpy testing on the inputs. So it's first turned the lists into arrays:
In [61]: np.array(num_c)
Out[61]: array([500000.000000000], dtype=object)
This array contains sympy object(s). It can't cast that to numpy float with 'safe'. But an explicit astype uses unsafe as the default:
In [63]: np.array(num_c).astype(float)
Out[63]: array([500000.])
So lets convert both lists into valid numpy float arrays:
In [64]: sys = signal.lti(np.array(num_c).astype(float), np.array(den_c).astype(float))
In [65]: sys
Out[65]:
TransferFunctionContinuous(
array([100200.4008016]),
array([1.00000000e+00, 1.02004008e+05]),
dt: None
)
Conversion in a list comprehension also works:
sys = signal.lti([float(i) for i in num_c],[float(i) for i in den_c])
You likely need to conver sympy objects to floats / lists of floats.

How to limit FPGrowth itemesets to just 2 or 3

I am running the FPGrowth algorithm using pyspark in python3.6 using jupyter notebook. When I am trying to save the association rules output of rules generated is huge. So I want to limit the number of consequent. Here is the code which I have tried. I also changed the spark context parameters.
Maximum Pattern Length fpGrowth (Apache) PySpark
from pyspark.sql.functions import col, size
from pyspark.ml.fpm import FPGrowth
from pyspark.sql import Row
from pyspark.context import SparkContext
from pyspark.sql.session import SparkSession
from pyspark import SparkConf
conf = SparkConf().setAppName("App")
conf = (conf.setMaster('local[*]')
.set('spark.executor.memory', '100G')
.set('spark.driver.memory', '400G')
.set('spark.driver.maxResultSize', '200G'))
sc = SparkContext.getOrCreate(conf=conf)
spark = SparkSession(sc)
R = Row('ID', 'items')
df=spark.createDataFrame([R(i, x) for i, x in enumerate(lol)])
fpGrowth = FPGrowth(itemsCol="items", minSupport=0.7, minConfidence=0.9)
model = fpGrowth.fit(df)
ar=model.associationRules.where(size(col('antecedent')) == 2).where(size(col('cosequent')) == 1)
ar.cache()
ar.toPandas().to_csv('output.csv')
It gives an error
TypeError Traceback (most recent call last)
<ipython-input-1-f90c7a9f11ae> in <module>
---> 73 ar=model.associationRules.where(size(col('antecedent')) ==
2).where(size(col('consequent')) == 1)
TypeError: 'str' object is not callable
Can someone help me to solve the issue.
Here lol is list of list of transactions: [['a','b'],['c','a','e']....]
Python: 3.6.5
Pyspark
Windows 10
From the above discussion and following this link, it helped me to resolve the problem.
'str' object is not callable TypeError
import pyspark.sql.functions as func
model.associationRules.where(func.size(func.col('antecedent')) == 1).where(func.size(func.col('consequent')) == 1).show()

NameError: name 're' is not defined... already imported re in the code and built in function

I keep getting "NameError: name 're' is not defined", even though I have already imported re in my code AND the built in function pat_count() defined in library_s19_week2.py. I tried all the possible places to import re but none seemed working. Please help!
My code:
import re
hash_pat = re.compile(r'#\w+')
hash_counter = pat_count(hash_pat)
tweet_table['hash_count'] = tweet_table.apply(lambda row: hash_counter(row['tweet']), axis=1)
Traceback for the error:
---------------------------------------------------------------------------
NameError Traceback (most recent call last)
<ipython-input-93-1880eb903ae9> in <module>()
10
11 hash_pat = re.compile(r'#\w+')
---> 12 hash_counter = pat_count(hash_pat)
13 tweet_table['hash_count'] = tweet_table.apply(lambda row: hash_counter(row['tweet']), axis=1)
14
/content/library_s19_week2.py in pat_count(pattern)
95 def pat_count(pattern):
96 import re
---> 97
98 pat = re.compile(pattern)
99
NameError: name 're' is not defined
I found my bug:
hash_pat = re.compile(r'#\w+') should be hash_pat = r'#\w+.
As seen in the function pat_count() in the traceback, hash_pat is an input to re.compile().

PySpark TypeError: object of type 'ParamGridBuilder' has no len()

I am trying to tune my model on Databricks using Pyspark.
I receive the following error:
TypeError: object of type 'ParamGridBuilder' has no len()
My code has been listed below.
from pyspark.ml.recommendation import ALS
from pyspark.ml.evaluation import RegressionEvaluator
als = ALS(userCol = "userId",itemCol="movieId", ratingCol="rating", coldStartStrategy="drop", nonnegative = True, implicitPrefs = False)
# Imports ParamGridBuilder package
from pyspark.ml.tuning import ParamGridBuilder
# Creates a ParamGridBuilder, and adds hyperparameters
param_grid = ParamGridBuilder().addGrid(als.rank, [5,10,20,40]).addGrid(als.maxIter, [5,10,15,20]).addGrid(als.regParam,[0.01,0.001,0.0001,0.02])
evaluator = RegressionEvaluator(metricName="rmse", labelCol="rating",predictionCol="prediction")
# Imports CrossValidator package
from pyspark.ml.tuning import CrossValidator
# Creates cross validator and tells Spark what to use when training and evaluates
cv = CrossValidator(estimator = als,
estimatorParamMaps = param_grid,
evaluator = evaluator,
numFolds = 5)
model = cv.fit(training)
TypeError: object of type 'ParamGridBuilder' has no len()
Full Error Log:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<command-1952169986445972> in <module>()
----> 1 model = cv.fit(training)
2
3 # Extract best combination of values from cross validation
4
5 best_model = model.bestModel
/databricks/spark/python/pyspark/ml/base.py in fit(self, dataset, params)
130 return self.copy(params)._fit(dataset)
131 else:
--> 132 return self._fit(dataset)
133 else:
134 raise ValueError("Params must be either a param map or a list/tuple of param maps, "
/databricks/spark/python/pyspark/ml/tuning.py in _fit(self, dataset)
279 est = self.getOrDefault(self.estimator)
280 epm = self.getOrDefault(self.estimatorParamMaps)
--> 281 numModels = len(epm)
It simple means that your object does not have a length property (unlike lists). Thus, In your line
param_grid = ParamGridBuilder()
.addGrid(als.rank, [5,10,20,40])
.addGrid(als.maxIter, [5,10,15,20])
.addGrid(als.regParam, [0.01,0.001,0.0001,0.02])
You should add .build() in the end to actually construct a grid.