pyspark image dimension reduction with PCA - pyspark

I am using Pyspark in AWS cloud to extract the image features:
ImageSchema.imageFields
img2vec = F.udf(lambda x: DenseVector(ImageSchema.toNDArray(x).flatten()),
VectorUDT())
df_vec = df_cat.withColumn('original_vectors', img2vec("image"))
df_vec.show()
After having standardized the data:
standardizer = MinMaxScaler(inputCol="original_vectors",
outputCol="scaledFeatures",
min=-1.0,
max=1.0)
#withStd=True, withMean=True)
model_std = standardizer.fit(df_vec)
df_std = model_std.transform(df_vec)
df_std.show()
... when I apply PCA for dimension reduction, I receive an error that I could not debug for a couple of weeks :(
Error_1
Error_2
Could you please help me to solve that?
I use Pyspark spark-3.0.3-bin-hadoop2.7

img2vec = F.udf(lambda x : Vectors.dense(x), VectorUDT())
df = df.withColumn("data_as_vector", img2vec("data_as_resized_array"))
standardizer = StandardScaler(withMean=True, withStd=True, inputCol="data_as_vector", outputCol="scaledFeatures")
for image it needs to resize image data with this code and you must use the resized image data;
def resize_img(img_data, resize=True):
mode = 'RGBA' if (img_data.nChannels == 4) else 'RGB'
img = Image.frombytes(mode=mode, data=img_data.data, size=[img_data.width, img_data.height])
img = img.convert('RGB') if (mode == 'RGBA') else img
img = img.resize([224, 224], resample=Image.Resampling.BICUBIC) if (resize) else img
arr = convert_bgr_array_to_rgb_array(np.asarray(img))
arr = arr.reshape([224*224*3]) if (resize) else arr.reshape([img_data.width*img_data.height*3])
return arr
def resize_image_udf(dataframe_batch_iterator: Iterator[pd.DataFrame]) -> Iterator[pd.DataFrame]:
for dataframe_batch in dataframe_batch_iterator:
dataframe_batch["data_as_resized_array"] = dataframe_batch.apply(resize_img, args=(True,), axis=1)
dataframe_batch["data_as_array"] = dataframe_batch.apply(resize_img, args=(False,), axis=1)
yield dataframe_batch
resized_df = df_image.select("image.*").mapInPandas(resize_image_udf, schema)
then you can make standardscaler and PCA with;
model_std = standardizer.fit(df)
df = model_std.transform(df)
# algorithm
pca = PCA(k=n_components, inputCol='data_as_vector', outputCol='pcaFeatures')
model_pca = pca.fit(df)
# Transformation images
df = model_pca.transform(df)
i think, i am too late to answer your questions, sorry

Related

Test Custom CrossValidator with origin CrossValidator of SparkML

I created a custom CrossValidator with SplitType as Random/Time-based/Stratified based on this reference. When SplitType == Random, I use the original _kFold function of CrossValidator in pyspark.ml to test the result of my custom CrossValidator with _fit function as below.
I expected when SplitType == Random, the results should be close to what original Sparkml CrossValidator produce but even after multiple runs, orignal SparkML CrossValidator gives always better results. Anyone can help me to understand the reasons please?
def _fit(self, dataset : DataFrame):
est = self.getOrDefault(self.estimator)
epm = self.getOrDefault(self.estimatorParamMaps)
numModels = len(epm)
eva = self.getOrDefault(self.evaluator)
nFolds = self.getOrDefault(self.numFolds)
metrics_all = [[0.0] * numModels for i in range(nFolds)]
SplitType = self.getOrDefault(self.SplitType)
pool = ThreadPool(processes=min(self.getParallelism(), numModels))
subModels = None
collectSubModelsParam = self.getCollectSubModels()
if collectSubModelsParam:
subModels = [[None for j in range(numModels)] for i in range(nFolds)]
if SplitType == 'Random':
datasets = self._kFold(dataset)
print(SplitType)
elif SplitType == 'Stratified':
datasets = self.stratify_data(dataset)
print(SplitType)
for i in range(nFolds):
validation = datasets[i][1].cache()
train = datasets[i][0].cache()
tasks = map(
inheritable_thread_target,
_parallelFitTasks(est, train, eva, validation, epm, collectSubModelsParam),
)
for j, metric, subModel in pool.imap_unordered(lambda f: f(), tasks):
metrics_all[i][j] = metric
if collectSubModelsParam:
assert subModels is not None
subModels[i][j] = subModel
validation.unpersist()
train.unpersist()
metrics, std_metrics = CrossValidator._gen_avg_and_std_metrics(metrics_all)
if eva.isLargerBetter():
bestIndex = np.argmax(metrics)
else:
bestIndex = np.argmin(metrics)
bestModel = est.fit(dataset, epm[bestIndex])
return self._copyValues(
CrossValidatorModel(bestModel, metrics, cast(List[List[Model]], subModels), std_metrics)
)
With original CrossValidator, the result is always much more stable and better, even after multiple runs :
lr = LogisticRegression(featuresCol = 'features', labelCol = 'label')
grid = ParamGridBuilder().addGrid(lr.maxIter, [0, 1, 5]).build()
evaluator = BinaryClassificationEvaluator()
scv = CustomCrossValidator(
estimator=lr,
estimatorParamMaps=grid,
evaluator=evaluator,
numFolds=4,
SplitType='Random'
)
Origin CrossValidator :
avgMetrics
stdMetrics
0.5
0
0.8457
0.0425
0.8457
0.0425
My Custom CrossValidator with Random SplitType :
avgMetrics
stdMetrics
0.5
0
0.8278
0.0937
0.8278
0.0937
Anyone can help to explain the reasons please?
Many thanks,

How to use nn.MultiheadAttention together with nn.LSTM?

I'm trying to build a Pytorch network for image captioning.
Currently I have a working network of Encoder and Decoder, and I want to add nn.MultiheadAttnetion layer to it (to be used as self attention).
Currently my decode looks like this:
class Decoder(nn.Module):
def __init__(self, hidden_size, embed_dim, vocab_size, layers = 1):
super(Decoder, self).__init__()
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.layers = layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.lstm = nn.LSTM(input_size = embed_dim, hidden_size = hidden_size, batch_first = True, num_layers = layers)
#self.attention = nn.MultiheadAttention(hidden_size, num_heads=1, batch_first= True)
self.fc = nn.Linear(hidden_size, self.vocab_size)
def init_hidden(self, batch_size):
h = torch.zeros(self.layers, batch_size, self.hidden_size).to(device)
c = torch.zeros(self.layers, batch_size, self.hidden_size).to(device)
return h,c
def forward(self, features, caption):
batch_size = caption.size(0)
caption_size = caption.size(1)
h,c = self.init_hidden(batch_size)
embeddings = self.embedding(caption)
lstm_input = torch.cat((features.unsqueeze(1), embeddings[:,:-1,:]), dim=1)
output, (h,c) = self.lstm(lstm_input, (h,c))
#output, _ = self.attention(output, output, output)
output = self.fc(output)
return output
def generate_caption(self, features, max_caption_size = MAX_LEN):
h,c = self.init_hidden(1)
caption = ""
embeddings = features.unsqueeze(1)
for i in range(max_caption_size):
output, (h, c) = self.lstm(embeddings, (h,c))
#output, _ = self.attention(output, output, output)
output = self.fc(output)
_, word_index = torch.max(output, dim=2) # take the word with highest probability
if word_index == vocab.get_index(END_WORD):
break
caption += vocab.get_word(word_index) + " "
embeddings = self.embedding(torch.LongTensor([word_index]).view(1,-1).to(device))
return caption
and it gives relatively good results for image captioning.
I want to add the commented out lines so the model will use Attention. But- when I do that- the model breaks, although the loss becomes extremely low (decreasing from 2.7 to 0.2 during training instead of 2.7 to 1 without the attention) - the caption generation is not really working (predicts the same word over and over again).
My questions are:
Am I using the nn.MultiheadAttention correctly? it is very weird to me that it should be used after the LSTM, but I saw this online, and it works from dimension sizes perspective
Any idea why my model breaks when I use Attention?
EDIT: I also tried to put the Attention before the LSTM, and it didn't work as well (network predicted the same caption for every picture)

ValueError: Target size (torch.Size([128])) must be the same as input size (torch.Size([112]))

I have a training function, in which inside there are two vectors:
d_labels_a = torch.zeros(128)
d_labels_b = torch.ones(128)
Then I have these features:
# Compute output
features_a = nets[0](input_a)
features_b = nets[1](input_b)
features_c = nets[2](inputs)
And then a domain classifier (nets[4]) makes predictions:
d_pred_a = torch.squeeze(nets[4](features_a))
d_pred_b = torch.squeeze(nets[4](features_b))
d_pred_a = d_pred_a.float()
d_pred_b = d_pred_b.float()
print(d_pred_a.shape)
The error raises in the loss function: ` pred_a = torch.squeeze(nets3)
pred_b = torch.squeeze(nets3)
pred_c = torch.squeeze(nets3)
loss = criterion(pred_a, labels_a) + criterion(pred_b, labels_b) + criterion(pred_c, labels) + d_criterion(d_pred_a, d_labels_a) + d_criterion(d_pred_b, d_labels_b)
The problem is that d_pred_a/b is different from d_labels_a/b, but only after a certain point. Indeed, when I print the shape of d_pred_a/b it istorch.Size([128])but then it changes totorch.Size([112])` independently.
It comes from here:
# Compute output
features_a = nets[0](input_a)
features_b = nets[1](input_b)
features_c = nets[2](inputs)
because if I print the shape of features_a is torch.Size([128, 2048]) but it changes into torch.Size([112, 2048])
nets[0] is a VGG, like this:
class VGG16(nn.Module):
def __init__(self, input_size, batch_norm=False):
super(VGG16, self).__init__()
self.in_channels,self.in_width,self.in_height = input_size
self.block_1 = VGGBlock(self.in_channels,64,batch_norm=batch_norm)
self.block_2 = VGGBlock(64, 128,batch_norm=batch_norm)
self.block_3 = VGGBlock(128, 256,batch_norm=batch_norm)
self.block_4 = VGGBlock(256,512,batch_norm=batch_norm)
#property
def input_size(self):
return self.in_channels,self.in_width,self.in_height
def forward(self, x):
x = self.block_1(x)
x = self.block_2(x)
x = self.block_3(x)
x = self.block_4(x)
# x = self.avgpool(x)
x = torch.flatten(x,1)
return x
I solved. The problem was the last batch. I used drop_last=True in the dataloader and It worked.

How can I measure Precision and Recall on Logistic Regression with PySpark?

I am using a Logistic Regression model on PySpark through databricks but i am not able to get my precision and recall. Everything works fine and I am able to get my ROC but there is not attribute or lib for Precision and Recall
lrModel = LogisticRegression()
predictions = bestModel.transform(testData)
# Instantiate metrics object
results = predictions.select(['probability', 'label'])
results_collect = results.collect()
results_list = [(float(i[0][0]), 1.0-float(i[1])) for i in results_collect]
scoreAndLabels = sc.parallelize(results_list)
metrics = MulticlassMetrics(scoreAndLabels)
# Overall statistics
precision = metrics.precision()
recall = metrics.recall()
f1Score = metrics.fMeasure()
print("Summary Stats")
print("Precision = %s" % precision)
print("Recall = %s" % recall)
print("F1 Score = %s" % f1Score)
>>>Summary Stats
>>>Precision = 0.0
>>>Recall = 0.0
>>>F1 Score = 0.0
I was able to create my own function to do so. It returns everything and more. I am using the "MulticlassMetrics()" from mllib package. Since its a multiclass it calculates metrics for each label so, you have to specify which label you want to retrieve.
### Model Evaluator User Defined Functions
def udfModelEvaluator(dfPredictions, labelColumn='label'):
colSelect = dfPredictions.select(
[F.col('prediction').cast(DoubleType())
,F.col(labelColumn).cast(DoubleType()).alias('label')])
metrics = MulticlassMetrics(colSelect.rdd)
mAccuracy = metrics.accuracy
mPrecision = metrics.precision(1)
mRecall = metrics.recall(1)
mF1 = metrics.fMeasure(1.0, 1.0)
mMatrix = metrics.confusionMatrix().toArray().astype(int)
mTP = metrics.confusionMatrix().toArray()[1][1]
mTN = metrics.confusionMatrix().toArray()[0][0]
mFP = metrics.confusionMatrix().toArray()[0][1]
mFN = metrics.confusionMatrix().toArray()[1][0]
mResults = [mAccuracy, mPrecision, mRecall, mF1, mMatrix, mTP, mTN, mFP, mFN, "Return [[0]=Accuracy, [1]=Precision, [2]=Recall, [3]=F1, [4]=ConfusionMatrix, [5]=TP, [6]=TN, [7]=FP, [8]=FN]"]
return mResults
To call the function:
metricsList = udfModelEvaluator(predictionsData, "label")
metricsList

Is this way of using Naive Bayes in matlab correct

I'm using naive bayes in matlab for clasification like this:
dataFull = csvread('haberman.data.data')
dataTaining = dataFull(250, :)
dataTaining = dataFull(1:250,:)
dataTest = dataFull(251:end, :)
dataTainingClass = dataTaining(:,4)
dataTraining = dataTraining(:,1:3)
dataTraining = dataTaining(:,1:3)
dataTestClass = dataTest(:,4)
dataTest = dataTest(:,1:3)
nb = NaiveBayes.fit(dataTraining, dataTainingClass)
predict(nb, dataTest)
percentange = (dataTestClass == ans)
sum(percentange) / length(percentange)
With Haberman's dataset from UCI dataset
Am I doing this right?