My goal is to clean the Data in a column in a Pyspark DF. I have written a function for cleaning .
def preprocess(text):
text = text.lower()
text=text.strip()
text=re.compile('<.*?>').sub('', text)
text = re.compile('[%s]' % re.escape(string.punctuation)).sub(' ', text)
text = re.sub('\s+', ' ', text)
text = re.sub(r'\[[0-9]*\]',' ',text)
text=re.sub(r'[^\w\s]', '', text.lower().strip())
text = re.sub(r'\d',' ',text)
text = re.sub(r'\s+',' ',text)
return text
#LEMMATIZATION
# Initialize the lemmatizer
wl = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
def remove_stopwords(text):
text = [i for i in text.split() if not i in stop_words]
return text
# This is a helper function to map NTLK position tags
def get_wordnet_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
# Tokenize the sentence
def lemmatizer(string):
word_pos_tags = nltk.pos_tag(word_tokenize(string)) # Get position tags
a=[wl.lemmatize(tag[0], get_wordnet_pos(tag[1])) for idx, tag in enumerate(word_pos_tags)] # Map the position tag and lemmatize the word/token
return " ".join(a)
#Final Function
def finalpreprocess(string):
return lemmatizer(' '.join(remove_stopwords(preprocess(string))))
The functions seems to work fine when I test it . When I do
text = 'Ram and Bheem are buddies. They (both) like <b>running</b>. They got better at it over the weekend'
print(finalpreprocess(text))
I see the exact result I want.
ram bheem buddy like run get well weekend
How ever when I try to apply this function finalpreprocess() to a column in pyspark dataframe . I am getting errors.
Here is what I did.
udf_txt_clean = udf(lambda x: finalpreprocess(x),StringType())
df.withColumn("cleaned_text",lem(col("reason"))).select("reason","cleaned_text").show(10,False)
Then I am getting the error :
Traceback (most recent call last):
File "/databricks/spark/python/pyspark/serializers.py", line 473, in dumps
return cloudpickle.dumps(obj, pickle_protocol)
File "/databricks/spark/python/pyspark/cloudpickle/cloudpickle_fast.py", line 73, in dumps
cp.dump(obj)
File "/databricks/spark/python/pyspark/cloudpickle/cloudpickle_fast.py", line 563, in dump
return Pickler.dump(self, obj)
TypeError: cannot pickle '_thread.RLock' object
PicklingError: Could not serialize object: TypeError: cannot pickle '_thread.RLock' object
So far here is what I did. In my finalpreprocess(), I am using three different functions preprocess(),remove_stopwords(), lemmatizer() . I changed my udf_txt_clean accordingly . Like
udf_txt_clean = udf(lambda x: preprocess(x),StringType())
udf_txt_clean = udf(lambda x: remove_stopwords(x),StringType())
These two run fine But -
udf_txt_clean = udf(lambda x: lemmatizer (x),StringType())
is the one that is giving me the error. I am not able to understand why this function is giving the error but not the other two. From my limited understating I see that its having trouble trying to pickle this function but I am not able to understand why its trying to pickle this in the first place or if there is a work around for it.
It would help if the example were more reproducible next time. It took a bit to re-create this. No worries, though,I have a solution here.
First, cloudpickle is the mechanism of Spark to move a function from drivers to workers. So functions are pickled and then sent to the workers for execution. So something you are using can't be pickled. In order to quickly test, you can just use:
import cloudpickle
cloudpickle.dumps(x)
where x is something that you are testing if it's cloudpickle-able. In this case, I tried a couple of times and found wordnet not to be serializable. You can test with:
cloudpickle.dumps(wordnet)
and it will reproduce the issue. You can get around this by importing the stuff that can't be pickled inside your function. Here is an end-to-end example for you.
import re
import pandas as pd
from nltk.stem import WordNetLemmatizer
from nltk.corpus import stopwords
import string
from nltk.tokenize import word_tokenize
from nltk.corpus import wordnet
from pyspark.sql import SparkSession
from pyspark.sql.functions import col
from pyspark.sql.functions import udf
from pyspark.sql.types import ArrayType,IntegerType,StringType
def preprocess(text):
text = text.lower()
text=text.strip()
text=re.compile('<.*?>').sub('', text)
text = re.compile('[%s]' % re.escape(string.punctuation)).sub(' ', text)
text = re.sub('\s+', ' ', text)
text = re.sub(r'\[[0-9]*\]',' ',text)
text=re.sub(r'[^\w\s]', '', text.lower().strip())
text = re.sub(r'\d',' ',text)
text = re.sub(r'\s+',' ',text)
return text
#LEMMATIZATION
# Initialize the lemmatizer
wl = WordNetLemmatizer()
stop_words = set(stopwords.words('english'))
def remove_stopwords(text):
text = [i for i in text.split() if not i in stop_words]
return text
def lemmatizer(string):
from nltk.corpus import wordnet
def get_wordnet_pos(tag):
if tag.startswith('J'):
return wordnet.ADJ
elif tag.startswith('V'):
return wordnet.VERB
elif tag.startswith('N'):
return wordnet.NOUN
elif tag.startswith('R'):
return wordnet.ADV
else:
return wordnet.NOUN
word_pos_tags = nltk.pos_tag(word_tokenize(string)) # Get position tags
a=[wl.lemmatize(tag[0], get_wordnet_pos(tag[1])) for idx, tag in enumerate(word_pos_tags)] # Map the position tag and lemmatize the word/token
return " ".join(a)
#Final Function
def finalpreprocess(string):
return lemmatizer(' '.join(remove_stopwords(preprocess(string))))
spark = SparkSession.builder.getOrCreate()
text = 'Ram and Bheem are buddies. They (both) like <b>running</b>. They got better at it over the weekend'
test = pd.DataFrame({"test": [text]})
sdf = spark.createDataFrame(test)
udf_txt_clean = udf(lambda x: finalpreprocess(x),StringType())
sdf.withColumn("cleaned_text",udf_txt_clean(col("test"))).select("test","cleaned_text").show(10,False)
Related
Apologies for a length post. I have been trying to beat my head around reading about mock, MagicMock, and all the time getting confused. Hence, decided to write this post.
I know several questions, and pages have been written on this. But, still not able to wrap my head around this.
My Setup:
All the test code, and the 2 module files come under one "folder" mymodule
my_module_1.py file contains
class MyOuterClass(object):
MyInnerClass(object):
attribute1: str
attribute2: str
attribute3: str
def get(self) -> MyInnerClass:
'''
pseudocode
1. a call to AWS's service is made
2. the output from call in step 1 is used to set attributes of this InnerClass
3. return innerclass instance
'''
I use the OuterClass in another file(my_module_2.py), to set some values and return a string as follows:
class MyModule2():
def get_foo(self, some_boolean_predicate):
if some_boolean_predicate:
temp = my_module_1.OuterClass().get()
statement = f'''
WITH (
BAR (
FIELD_1 = '{temp.attribute1}',
FIELD_2 = '{temp.attribute2}',
FIELD_3 = '{temp.attribute3}'
)
)
'''
else:
statement = ''
return statement
I want to write the unit tests for the file my_module_2.py file, and test the function get_foo
How I am writing the tests(or planning on)
a test file by name test_my_module2.py
I started with creating a pytest.fixture for the MyOuterClass's get function as follows since I will be reusing this piece of info again in my other tests
#pytest.fixture
def mock_get(mocker: MockerFixture) -> MagicMock:
return mocker.patch.object(MyOuterClass, 'get')
Finally,
Then I proceeded to use this fixture in my test as follows:
from unittest import mock
from unittest.mock import MagicMock, Mock, patch, PropertyMock
import pytest
from pytest_mock import MockerFixture
from my_module.my_module_1 import myOuterClass
def test_should_get_from_inner_class(self, mock_get):
# mock call to get are made
output = mock_get.get
#update the values for the InnerClass's attributes here
output.attribute1.side_effect = 'attr1'
output.attribute2.side_effect = 'attr2'
output.attribute3.side_effect = 'attr3'
mock_output_str = '''
WITH (
BAR (
FIELD_1 = 'attr1',
FIELD_2 = 'attr2',
FIELD_3 = 'attr3'
)
)
'''
module2Obj = MyModule2()
response = module2Obj.get_foo(some_boolean_predicate=True)
# the following assertion passes
assert mock_get.get.called_once()
# I would like match `response to that with mock_output_str instance above
assert response == mock_output_str
But, the assertion as you might have guessed failed, and I know I am comparing completely different types, since I see
errors such as
FAILED [100%]
WITH (
BAR (
FIELD1 = '<MagicMock name='get().attr1' id='4937943120'>',
FIELD3 = '<MagicMock name='get().attr2' id='4937962976'>',
FIELD3 = '<MagicMock name='get().attr3' id='4937982928'>'
)
)
Thank you for being patient with me till here, i know its a really lengthy post, but stuck on this for a few days, ended up creating a post here.
How do i get to validate the mock's value with the mock_output_str?
yess! the hint was in the #gold_cy's answer. I was only calling my mock and never setting its values
this is what my test case ended up looking
mock_obj = OuterClass.InnerClass()
mock_obj.attribute1='some-1'
mock_obj.attribute2='some-2'
mock_obj.attribute3='some-3'
mock_get.return_value = mock_obj
once my mock was setup properly, then the validation became easy! Thank you!
Trying to receive input from WCT_Control into WCT_DataPull
Cant figure out how to get the data into WCT_DataPull to perform an action with it. I think I am going about this backwards, but I also think I have been staring at it too long.
Essentially, the user enters the information necessary into a GUI to connect to a specific SQL table (predetermined) and then saves the data in the table and outputs it as a csv file backup.
I want the user to click the submit button and that creates the backup. However, at this point when I click the button, it will store all the data in the variables (If I put a print statement in I see the correct values), but I cant seem to figure out how to get the variables to WCT_DataPull, where the backup creation action is performed.
WCT_Control
from PyQt5.QtWidgets import *
from WCT_View import Ui_MainWindow
class Controller(QMainWindow, Ui_MainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.run.clicked.connect(lambda : self.submit())
def submit(self):
self.run.clicked.connect()
server = self.server_entry.text()
database = self.data_entry.text()
station = self.station_entry.text()
app = self.app_entry.text()
backup_name = self.filename_entry.text()
self.server_entry.setText('')
self.data_entry.setText('')
self.station_entry.setText('')
self.app_entry.setText('')
self.filename_entry.setText('')
return server, database, station, app, backup_name
WCT_DataPull
from WCT_Control import *
import pyodbc
import csv
pull_data = Controller()
def write_bak():
driver = 'ODBC Driver 17 for SQL Server'
serv, data, stat, app, bak_name = pull_data.submit()
conn = pyodbc.connect('DRIVER={0};SERVER={1};DATABASE={2};Trusted_Connection=yes'.format(driver, serv, data))
cursor = conn.cursor()
rows = cursor.execute("""
select DnsName, PackageName, Code, Value from WorkstationApplicationSettings
where DnsName=? and PackageName=?
""", stat, app).fetchall()
for row in rows:
print(row.PackageName,':', row.Code, ':', row.Value)
with open(bak_name, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(rows)
So you just have to do the things in opposite way, instead of using PYQT5 in WCT script, use the WCT function in PYQT5 script
WCT_Control
from PyQt5.QtWidgets import *
from WCT_DataPull import write_bak
class Controller(QMainWindow, Ui_MainWindow):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
self.run.clicked.connect(lambda : self.submit())
def submit(self):
self.run.clicked.connect()
server = self.server_entry.text()
database = self.data_entry.text()
station = self.station_entry.text()
app = self.app_entry.text()
backup_name = self.filename_entry.text()
self.server_entry.setText('')
self.data_entry.setText('')
self.station_entry.setText('')
self.app_entry.setText('')
self.filename_entry.setText('')
write_bak(serv, data, stat, app, bak_name)
WCT_DataPull
import pyodbc
import csv
def write_bak(serv, data, stat, app, bak_name):
driver = 'ODBC Driver 17 for SQL Server'
conn = pyodbc.connect('DRIVER={0};SERVER={1};DATABASE={2};Trusted_Connection=yes'.format(driver, serv, data))
cursor = conn.cursor()
rows = cursor.execute("""
select DnsName, PackageName, Code, Value from WorkstationApplicationSettings
where DnsName=? and PackageName=?
""", stat, app).fetchall()
for row in rows:
print(row.PackageName,':', row.Code, ':', row.Value)
with open(bak_name, 'w', newline='') as f:
writer = csv.writer(f)
writer.writerows(rows)
Also Make sure to write the code to run the WCT_control by using Qapplication
I used AWS Glue Transform-custom code, and wrote the following code.
def MyTransform (glueContext, dfc) -> DynamicFrameCollection:
from pyspark.sql.functions import col, lit
selected = dfc.select(list(dfc.keys())[0]).toDF()
selected = selected.withColumn('item2',lit(col('item1')))
results = DynamicFrame.fromDF(selected, glueContext, "results")
return DynamicFrameCollection({"results": results}, glueContext)
I got the following error.
AnalysisException: cannot resolve '`item1`' given input columns: [];
You don't need to wrap lit(col('item1')). lit is used to create a literal static value, while col is to refer to a specific column. Depends on what exactly you're trying to achieve, but this change will at least fix your error
selected = selected.withColumn('item2', col('item1'))
So i had a much bigger problem ,but i sorted that out. Now, i have this error command:
pygame 2.0.1 (SDL 2.0.14, Python 3.7.9)
Hello from the pygame community. https://www.pygame.org/contribute.html
Traceback (most recent call last):
File "c:/Users/danku/jarvis.py", line 16, in
engine = pyttsx3.init('sapi5')
AttributeError: module 'pyttsx3' has no attribute 'init'
i tried kinda everything(installed pygame,pypiwin32,pywintypes) but i cant figure it out. Here is my beloved code (dont laugh its jarvis code):
#alap
import pyttsx3
import datetime
import speech_recognition as sr
import wikipedia
import webbrowser
import os
import pywhatkit
import pyjokes
import subprocess
import pywintypes
import win32com.client
import pygame
engine = pyttsx3.init('sapi5')
def speak(audio):
engine.say(audio)
engine.runAndWait()
def time():
Time = datetime.datetime.now().strftime("%H:%M:%S")
speak(Time)
def date():
year = int(datetime.datetime.now().year)
month = int(datetime.datetime.now().month)
date = int(datetime.datetime.now().day)
speak(date)
speak(month)
speak(year)
def wishme():
speak("Welcome back sir! All system are ready for work!")
speak("the current time is")
time()
speak("The current date is")
date()
hour = datetime.datetime.now().hour
if hour >= 6 and hour<12:
speak("Good morning sir!")
elif hour >=12 and hour<18:
speak("Good afternoon sir!")
elif hour >=18 and hour<24:
speak("Good evening sir!")
else:
speak("Good night sir!")
speak("Jarvis at your service. Please tell me how can i help you?")
def takeCommand():
r = sr.Recognizer()
with sr.Microphone() as source:
print("listening...")
r.pause_threshold = 1
audio = r.listen(source)
try:
print("Recognizing...")
query = r.recognize_google(audio, language='en-US')
print(query)
except Exception as e:
print(e)
speak("Say that again")
return "none"
return query
if __name__ == "__main__":
wishme()
while True:
query = takeCommand().lower()
if 'wikipedia' in query: #if wikipedia found in the query then this block will be executed
speak('Searching Wikipedia...')
query = query.replace("wikipedia", "")
results = wikipedia.summary(query, sentences=2)
speak("According to Wikipedia")
print(results)
speak(results)
``
Also i'm using python 2.71, and latest of pip.
It is normally because you have named your Python file the same as the module you are importing and caused a circular reference. Try changing the name of your file. It should resolve the issue.
enter code here
from locust import HttpLocust, TaskSet, task
class ExampleTask(TaskSet):
csvfile = open('failed.csv', 'r')
data = csvfile.readlines()
bakdata = list(data)
#task
def fun(self):
try:
value = self.data.pop().split(',')
print('------This is the value {}'.format(value[0]))
except IndexError:
self.data = list(self.bakdata)
class ExampleUser(HttpLocust):
host = 'https://www.google.com'
task_set = ExampleTask
Following my csv file:
516,True,success
517,True,success
518,True,success
519,True,success
520,True,success
521,True,success
522,True,success
523,True,success
524,True,success
525,True,success
526,True,success
527,True,success
528,True,success
529,True,success
530,True,success
531,True,success
532,True,success
533,True,success
534,True,success
535,True,success
536,True,success
537,True,success
538,True,success
539,True,success
540,True,success
541,True,success
542,True,success
543,True,success
544,True,success
545,True,success
546,True,success
547,True,success
548,True,success
549,True,success
550,True,success
551,True,success
552,True,success
553,True,success
554,True,success
555,True,success
556,True,success
557,True,success
558,True,success
559,True,success
Here after csv file end , locust does not takes unique value, it takes same value for all the users which is simulated.
I'm not 100% sure, but I think your problem is this line:
self.data = list(self.bakdata)
This will give each User instance a different copy of the list.
It should work if you change it to:
ExampleTask.data = list(self.bakdata)
Or you can use locust-plugins's CSVReader, see the example here:
https://github.com/SvenskaSpel/locust-plugins/blob/master/examples/csvreader_ex.py