AttributeError: 'NoneType' object has no attribute 'setCallSite' pyspark after indexedRowMatrix columnSimilarities() - pyspark

I'm working on a code that was correctly executed with the dataframe before, but this time when I execute it, I get an error. (The only difference is that I used persist() on the dataframe this time.)
simMat = IndexedRMat.columnSimilarities()
executes correctly, but then this part:
columns = ['product1', 'product2', 'sim']
vals = simMat.entries.map(lambda e: (e.i, e.j, e.value)).collect()
dfsim = spark.createDataFrame(vals, columns)
generates this error:
AttributeErrorTraceback (most recent call last)
<ipython-input-100-11502084c71b> in <module>()
1 columns = ['product1', 'product2', 'sim']
----> 2 vals = simMat.entries.map(lambda e: (e.i, e.j, e.value)).collect()
3 dfsim = spark.createDataFrame(vals, columns)
/opt/spark-2.3.0-SNAPSHOT-bin-spark-master/python/pyspark/rdd.pyc in collect(self)
806 to be small, as all the data is loaded into the driver's memory.
807 """
--> 808 with SCCallSiteSync(self.context) as css:
809 port = self.ctx._jvm.PythonRDD.collectAndServe(self._jrdd.rdd())
810 return list(_load_from_socket(port, self._jrdd_deserializer))
/opt/spark-2.3.0-SNAPSHOT-bin-spark-master/python/pyspark/traceback_utils.pyc in __enter__(self)
70 def __enter__(self):
71 if SCCallSiteSync._spark_stack_depth == 0:
---> 72 self._context._jsc.setCallSite(self._call_site)
73 SCCallSiteSync._spark_stack_depth += 1
74
AttributeError: 'NoneType' object has no attribute 'setCallSite'
What does it mean? I'm new to spark and didn't find an explanation for this type of error..

Related

AttributeError: 'Index' object has no attribute 'index'

I have saved & loaded a simple XGBClassifier(random_sate = 100) model, trained on Heart Disease prediction dataset(target variable mapped to 0s & 1s). I am trying to create a dtreeviz plot for the same:
from dtreeviz.trees import *
viz = dtreeviz(loaded_model, X_train, y_train, tree_index = 10,feature_names = X_train.columns,
class_names = ['Absence', 'Presence'], target_name = 'Heart Disease')
viz.view()
However, I am getting the following error:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-18-2a3024516ed1> in <module>
1 from dtreeviz.trees import *
----> 2 viz = dtreeviz(loaded_model, X_train, y_train, tree_index = 10, feature_names = X_train.columns,
3 class_names = ['Absence', 'Presence'], target_name = 'Heart Disease')
4 viz.view()
~\anaconda3\lib\site-packages\dtreeviz\trees.py in dtreeviz(tree_model, x_data, y_data, feature_names, target_name, class_names, tree_index, precision, orientation, instance_orientation, show_root_edge_labels, show_node_labels, show_just_path, fancy, histtype, highlight_path, X, max_X_features_LR, max_X_features_TD, depth_range_to_display, label_fontsize, ticks_fontsize, fontname, title, title_fontsize, colors, cmap, scale)
816 if shadow_tree.is_classifier():
817 nbins = get_num_bins(histtype, n_classes)
--> 818 node_heights = shadow_tree.get_split_node_heights(X_data, y_data, nbins=nbins)
819
820 internal = []
~\anaconda3\lib\site-packages\dtreeviz\models\shadow_decision_tree.py in get_split_node_heights(self, X_train, y_train, nbins)
273 for node in self.internal:
274 # print(node.feature_name(), node.id)
--> 275 X_feature = X_train[:, node.feature()]
276 overall_feature_range = (np.min(X_feature), np.max(X_feature))
277 # print(f"range {overall_feature_range}")
~\anaconda3\lib\site-packages\dtreeviz\models\shadow_decision_tree.py in feature(self)
506 """Returns feature index used at this node"""
507
--> 508 return self.shadow_tree.get_node_feature(self.id)
509
510 def feature_name(self) -> (str, None):
~\anaconda3\lib\site-packages\dtreeviz\models\xgb_decision_tree.py in get_node_feature(self, id)
76 feature_name = self._get_nodes_values("Feature")[id]
77 try:
---> 78 return self.feature_names.index(feature_name)
79 except ValueError as error:
80 return self.__class__.NO_FEATURE
AttributeError: 'Index' object has no attribute 'index'
I have been trying resolve this since yesterday, however, I can not find any solution.
Kindly help!
Thanks,
Neel

Error Expected object of device type cuda but got device type cpu for argument #1 'self' in call to _th_index_select

I have the following code taken directly from here with some pretty little modifications:
import pandas as pd
import torch
import json
from transformers import T5Tokenizer, T5ForConditionalGeneration, T5Config
from torch import cuda
df = pd.read_pickle('df_final.pkl')
model = T5ForConditionalGeneration.from_pretrained('t5-base')
tokenizer = T5Tokenizer.from_pretrained('t5-base')
device = 'cuda' if cuda.is_available() else 'cpu'
text = ''.join(df[(df['col1'] == 'type') & (df['col2'] == 2)].col3.to_list())
preprocess_text = text.strip().replace("\n","")
t5_prepared_Text = "summarize: "+preprocess_text
#print ("original text preprocessed: \n", preprocess_text)
tokenized_text = tokenizer.encode(t5_prepared_Text, return_tensors="pt", max_length = 500000).to(device)
# summmarize
summary_ids = model.generate(tokenized_text,
num_beams=4,
no_repeat_ngram_size=2,
min_length=30,
max_length=100,
early_stopping=True)
output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
print ("\n\nSummarized text: \n",output)
When executing the model_generate() part i get an error like this:
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-12-e8e9819a85dc> in <module>
12 min_length=30,
13 max_length=100,
---> 14 early_stopping=True).to(device)
15
16 output = tokenizer.decode(summary_ids[0], skip_special_tokens=True)
~\Anaconda3\lib\site-packages\torch\autograd\grad_mode.py in decorate_no_grad(*args, **kwargs)
47 def decorate_no_grad(*args, **kwargs):
48 with self:
---> 49 return func(*args, **kwargs)
50 return decorate_no_grad
51
~\Anaconda3\lib\site-packages\transformers\generation_utils.py in generate(self, input_ids, max_length, min_length, do_sample, early_stopping, num_beams, temperature, top_k, top_p, repetition_penalty, bad_words_ids, bos_token_id, pad_token_id, eos_token_id, length_penalty, no_repeat_ngram_size, num_return_sequences, attention_mask, decoder_start_token_id, use_cache, **model_specific_kwargs)
383 encoder = self.get_encoder()
384
--> 385 encoder_outputs: tuple = encoder(input_ids, attention_mask=attention_mask)
386
387 # Expand input ids if num_beams > 1 or num_return_sequences > 1
~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
--> 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)
~\Anaconda3\lib\site-packages\transformers\modeling_t5.py in forward(self, input_ids, attention_mask, encoder_hidden_states, encoder_attention_mask, inputs_embeds, head_mask, past_key_value_states, use_cache, output_attentions, output_hidden_states, return_dict)
701 if inputs_embeds is None:
702 assert self.embed_tokens is not None, "You have to intialize the model with valid token embeddings"
--> 703 inputs_embeds = self.embed_tokens(input_ids)
704
705 batch_size, seq_length = input_shape
~\Anaconda3\lib\site-packages\torch\nn\modules\module.py in __call__(self, *input, **kwargs)
539 result = self._slow_forward(*input, **kwargs)
540 else:
--> 541 result = self.forward(*input, **kwargs)
542 for hook in self._forward_hooks.values():
543 hook_result = hook(self, input, result)
~\Anaconda3\lib\site-packages\torch\nn\modules\sparse.py in forward(self, input)
112 return F.embedding(
113 input, self.weight, self.padding_idx, self.max_norm,
--> 114 self.norm_type, self.scale_grad_by_freq, self.sparse)
115
116 def extra_repr(self):
~\Anaconda3\lib\site-packages\torch\nn\functional.py in embedding(input, weight, padding_idx, max_norm, norm_type, scale_grad_by_freq, sparse)
1482 # remove once script supports set_grad_enabled
1483 _no_grad_embedding_renorm_(weight, input, max_norm, norm_type)
-> 1484 return torch.embedding(weight, input, padding_idx, scale_grad_by_freq, sparse)
1485
1486
RuntimeError: Expected object of device type cuda but got device type cpu for argument #1 'self' in call to _th_index_select
​
I've searched this error and fouund some other threads like this one and this one but they didn't help me much since their case seems to be completely different. In my case there are no custom instances or classes created, so i don't know how to fix this or where the error come from.
Could you please tell me where is the error coming from and how could i fix it?
Thank you very much in advance.
Try explicitly moving your model to the GPU.
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
model = T5ForConditionalGeneration.from_pretrained('t5-base').to(device)

Autoencoder in fastai

I'm trying to build an autoencoder with fast.ai version 1.0.52 and struggling with how to set labels to be equal to original images. I was
following this blog post: https://alanbertl.com/autoencoder-with-fast-ai/
I replaced ImageItemList in the original code with ImageList since it was changed in the latest fastai versions.
%reload_ext autoreload
%autoreload 2
%matplotlib inline
from fastai.imports import *
from fastai.vision import *
from fastai.data_block import *
from fastai.basic_train import *
import pandas as pd
x = np.random.randint(256, size=(1000, 16384))
x = x/255
x = x.reshape(-1,128,128)
x = np.stack([x,x,x],1)
x.shape
class ArraysImageList(ImageList,FloatList):
def __init__(self, items:Iterator, log:bool=False, **kwargs):
if isinstance(items, ItemList):
items = items.items
super(FloatList,self).__init__(items,**kwargs)
def get(self,i):
return Tensor(super(FloatList,self).get(i).astype('float32'))
x_il = ArraysImageList(x)
x_ils = x_il.split_by_rand_pct()
lls = x_ils.label_from_lists(x_ils.train, x_ils.valid)
Here's the error message I get.
---------------------------------------------------------------------------
RuntimeError Traceback (most recent call last)
<ipython-input-33-cbada9e18af9> in <module>
----> 1 lls = x_ils.label_from_lists(x_ils.train, x_ils.valid)
~/.local/lib/python3.6/site-packages/fastai/data_block.py in label_from_lists(self, train_labels, valid_labels, label_cls, **kwargs)
484 self.valid = self.valid._label_list(x=self.valid, y=self.train.y.new(valid_labels, **kwargs))
485 self.__class__ = LabelLists
--> 486 self.process()
487 return self
488
~/.local/lib/python3.6/site-packages/fastai/data_block.py in process(self)
520 "Process the inner datasets."
521 xp,yp = self.get_processors()
--> 522 for ds,n in zip(self.lists, ['train','valid','test']): ds.process(xp, yp, name=n)
523 #progress_bar clear the outputs so in some case warnings issued during processing disappear.
524 for ds in self.lists:
~/.local/lib/python3.6/site-packages/fastai/data_block.py in process(self, xp, yp, name)
683 def process(self, xp:PreProcessor=None, yp:PreProcessor=None, name:str=None):
684 "Launch the processing on `self.x` and `self.y` with `xp` and `yp`."
--> 685 self.y.process(yp)
686 if getattr(self.y, 'filter_missing_y', False):
687 filt = array([o is None for o in self.y.items])
~/.local/lib/python3.6/site-packages/fastai/data_block.py in process(self, processor)
73 if processor is not None: self.processor = processor
74 self.processor = listify(self.processor)
---> 75 for p in self.processor: p.process(self)
76 return self
77
~/.local/lib/python3.6/site-packages/fastai/data_block.py in process(self, ds)
334
335 def process(self, ds):
--> 336 if self.classes is None: self.create_classes(self.generate_classes(ds.items))
337 ds.classes = self.classes
338 ds.c2i = self.c2i
~/.local/lib/python3.6/site-packages/fastai/data_block.py in generate_classes(self, items)
391 for c in items: classes = classes.union(set(c))
392 classes = list(classes)
--> 393 classes.sort()
394 return classes
395
RuntimeError: bool value of Tensor with more than one value is ambiguous
Ultimately, I want to read images using a dataframe with image paths. So I also tried the following:
import sklearn
cv = sklearn.model_selection.GroupKFold(n_splits=5)
train_inds, valid_inds = next(cv.split(iso_image_df.group, groups=iso_image_df.group))
img_lists = (ImageList.from_df(iso_image_df, resized_img_path, cols=0).split_by_idxs(train_inds, valid_inds))
src = img_lists.label_from_lists(img_lists.train, img_lists.valid)
data = (src.databunch(bs = 32).normalize(imagenet_stats))
data.show_batch(rows=3, figsize=(10, 10))
Here I get the following error message:
---------------------------------------------------------------------------
AttributeError Traceback (most recent call last)
<ipython-input-146-2514de511e64> in <module>
----> 1 data.show_batch(rows=3, figsize=(10, 10))
~/.local/lib/python3.6/site-packages/fastai/basic_data.py in show_batch(self, rows, ds_type, reverse, **kwargs)
190 #TODO: get rid of has_arg if possible
191 if has_arg(self.train_ds.y.reconstruct, 'x'):
--> 192 ys = [self.train_ds.y.reconstruct(grab_idx(y, i), x=x) for i,x in enumerate(xs)]
193 else : ys = [self.train_ds.y.reconstruct(grab_idx(y, i)) for i in range(n_items)]
194 self.train_ds.x.show_xys(xs, ys, **kwargs)
~/.local/lib/python3.6/site-packages/fastai/basic_data.py in <listcomp>(.0)
190 #TODO: get rid of has_arg if possible
191 if has_arg(self.train_ds.y.reconstruct, 'x'):
--> 192 ys = [self.train_ds.y.reconstruct(grab_idx(y, i), x=x) for i,x in enumerate(xs)]
193 else : ys = [self.train_ds.y.reconstruct(grab_idx(y, i)) for i in range(n_items)]
194 self.train_ds.x.show_xys(xs, ys, **kwargs)
~/.local/lib/python3.6/site-packages/fastai/data_block.py in reconstruct(self, t, x)
89 def reconstruct(self, t:Tensor, x:Tensor=None):
90 "Reconstruct one of the underlying item for its data `t`."
---> 91 return self[0].reconstruct(t,x) if has_arg(self[0].reconstruct, 'x') else self[0].reconstruct(t)
92
93 def new(self, items:Iterator, processor:PreProcessors=None, **kwargs)->'ItemList':
AttributeError: 'Image' object has no attribute 'reconstruct'
Any help is highly appreciated!
The lls are being used to create the databunch.
I've looked at it and given the API change in fastai libs I created the databunch without using the lls that were causing the error:
bs = 64
db = (ImageImageList.from_folder(mnist)
.split_by_folder()
.label_from_func(get_y_fn)
.databunch(bs=bs,num_workers=4))
EDIT: you'll need the get_y_fn; it is very simply defined
def get_y_fn(x): return x
the lls aren't used for anything else anyway
This should fix your problem, let me know if this worked for you.

Torchtext AttributeError: 'Example' object has no attribute 'text_content'

I'm working with RNN and using Pytorch & Torchtext. I've got a problem with building vocab in my RNN. My code is as follows:
TEXT = Field(tokenize=tokenizer, lower=True)
LABEL = LabelField(dtype=torch.float)
trainds = TabularDataset(
path='drive/{}'.format(TRAIN_PATH), format='tsv',
fields=[
('label_start', LABEL),
('label_end', None),
('title', None),
('symbol', None),
('text_content', TEXT),
])
testds = TabularDataset(
path='drive/{}'.format(TEST_PATH), format='tsv',
fields=[
('text_content', TEXT),
])
TEXT.build_vocab(trainds, testds)
When I want to build vocab, I'm getting this annoying error:
AttributeError: 'Example' object has no attribute 'text_content'
I'm sure, that there is no missing text_content attr. I made try-catch in order to display this specific case:
try:
print(len(trainds[i]))
except:
print(trainds[i].text_content)
Surprisingly, I don't get any error and this specific print command shows:
['znana', 'okresie', 'masarni', 'walc', 'y', 'myśl', 'programie', 'sprawy', ...]
So it indicates, that there is text_content attr. When I perform this on a smaller dataset, it works like a charm. This problem occurs when I want to work with proper data. I ran out of ideas. Maybe someone had a similar case and can explain it.
My full traceback:
AttributeError Traceback (most recent call last)
<ipython-input-16-cf31866a07e7> in <module>()
155
156 if __name__ == "__main__":
--> 157 main()
158
<ipython-input-16-cf31866a07e7> in main()
117 break
118
--> 119 TEXT.build_vocab(trainds, testds)
120 print('zbudowano dla text')
121 LABEL.build_vocab(trainds)
/usr/local/lib/python3.6/dist-packages/torchtext/data/field.py in build_vocab(self, *args, **kwargs)
260 sources.append(arg)
261 for data in sources:
--> 262 for x in data:
263 if not self.sequential:
264 x = [x]
/usr/local/lib/python3.6/dist-packages/torchtext/data/dataset.py in __getattr__(self, attr)
152 if attr in self.fields:
153 for x in self.examples:
--> 154 yield getattr(x, attr)
155
156 #classmethod
AttributeError: 'Example' object has no attribute 'text_content'
This problem arises when the fields are not passed in the same order as they are in the csv/tsv file. Order must be same. Also check if no extra or less fields are mentioned than there are in the csv/tsv file..
I had the same problem.
The reason was that some rows in my input csv dataset were empty.

convert nested list to Dataframe : Pyspark

I tried to convert nested listed to Dataframe by following the answers in this link
List to DataFrame in pyspark
my_data =[['apple','ball','ballon'],['cat','camel','james'],['none','focus','cake']]
from pyspark.sql import Row
R = Row('ID', 'words')
spark.createDataFrame([R(i, x) for i, x in enumerate(my_data)]).show()
​
But I obtain this error :
---------------------------------------------------------------------------
FileNotFoundError Traceback (most recent call last)
<ipython-input-147-780a8d7196df> in <module>()
----> 5 spark.createDataFrame([R(i, x) for i, x in enumerate(my_data)]).show()
F:\spark\spark\python\pyspark\sql\session.py in createDataFrame(self, data, schema, samplingRatio, verifySchema)
--> 689 rdd, schema = self._createFromLocal(map(prepare, data), schema)
F:\spark\spark\python\pyspark\sql\session.py in _createFromLocal(self, data, schema)
--> 424 return self._sc.parallelize(data), schema
F:\spark\spark\python\pyspark\context.py in parallelize(self, c, numSlices)
--> 484 jrdd = self._serialize_to_jvm(c, numSlices, serializer)
F:\spark\spark\python\pyspark\context.py in _serialize_to_jvm(self, data, parallelism, serializer)
--> 493 tempFile = NamedTemporaryFile(delete=False, dir=self._temp_dir)
~\Anaconda3\lib\tempfile.py in NamedTemporaryFile(mode, buffering, encoding, newline, suffix, prefix, dir, delete)
547 flags |= _os.O_TEMPORARY
548
--> 549 (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags, output_type)
550 try:
551 file = _io.open(fd, mode, buffering=buffering,
~\Anaconda3\lib\tempfile.py in _mkstemp_inner(dir, pre, suf, flags, output_type)
258 file = _os.path.join(dir, pre + name + suf)
259 try:
--> 260 fd = _os.open(file, flags, 0o600)
261 except FileExistsError:
262 continue # try again
FileNotFoundError: [Errno 2] No such file or directory: 'C:\\Users\\*****\\AppData\\Local\\Temp\\spark-e340269d-a29e-4b95-90d3-c424a04fcb0a\\pyspark-f7fce557-e11b-47c9-b7a5-81e72a360b36\\tmp7n0s97t2'
i was getting the same error from jupyter notebook/pyspark.
it worked after restarting the notebook kernel.