I was trying to implement the exercise about Sequence Models and Long-Short Term Memory Networks with Pytorch. The idea is to add an LSTM part-of-speech tagger character-level features but I can't seem to work it out. They gave as a hint that there should be two LSTMs involved, one that will output a character level representation and another one that will be in charge of predicting the Part-of-speech tag. I just can't figure out how to loop over the words level (in a sentence) and over the character (in each word of a sentence) and implement it in the forward function. Does anyone know how to do it ? Or encounter a similar situation ?
Here is my code:
class LSTMTaggerAug(nn.Module):
def __init__(self, embedding_dim_words, embedding_dim_chars, hidden_dim_words, hidden_dim_chars, vocab_size, tagset_size, charset_size):
super(LSTMTaggerAug, self).__init__()
self.hidden_dim_words = hidden_dim_words
self.hidden_dim_chars = hidden_dim_chars
self.word_embeddings = nn.Embedding(vocab_size, embedding_dim_words)
self.char_embeddings = nn.Embedding(charset_size, embedding_dim_chars)
self.lstm_char = nn.LSTM(embedding_dim_chars, hidden_dim_chars)
self.lstm_words = nn.LSTM(embedding_dim_words + hidden_dim_chars, hidden_dim_words)
self.hidden2tag = nn.Linear(hidden_dim_words, tagset_size)
self.hidden_char = self.init_hidden(c=False)
self.hidden_words = self.init_hidden(c=True)
def init_hidden(self, c=True):
if c:
return (autograd.Variable(torch.zeros(1, 1, self.hidden_dim_words)),
autograd.Variable(torch.zeros(1, 1, self.hidden_dim_words)))
else:
return (autograd.Variable(torch.zeros(1, 1, self.hidden_dim_chars)),
autograd.Variable(torch.zeros(1, 1, self.hidden_dim_chars)))
def forward(self, sentence, words):
# embeds = self.word_embeddings(sentence)
for ix, word in enumerate(sentence):
chars = words[ix]
char_embeds = self.char_embeddings(chars)
lstm_char_out, self.hidden_char = self.lstm_char(
char_embeds.view(len(chars), 1, -1), self.hidden_char)
char_rep = lstm_char_out[-1]
embeds = self.word_embeddings(word)
embeds_cat = torch.cat((embeds, char_rep), dim=1)
lstm_out, self.hidden_words = self.lstm_words(embeds_cat, self.hidden_words)
tag_space = self.hidden2tag(lstm_out.view(1, -1))
tag_score = F.log_softmax(tag_space, dim=1)
if ix==0:
tag_scores = tag_score
else:
tag_scores = torch.cat((tag_scores, tag_score), 0)
return tag_scores
The most naive way to do it according to your description would be to take a sentence s stripped of punctuation. Then split it into words:
words = s.split()
Take your first character level lstm, LSTMc and apply it to every word individually to encode the words (use the last output-state of the lstm to encode the word):
encoded_words = []
for word in words:
state = state_0
for char in word:
h, state = LSTMc(one_hot_encoding(char), state)
encoded_words.append(h)
After you've encoded the words, you pass word-level the part of speech tagger lstm LSTMw on the encoded words:
state = statew_0
parts_of_speech = []
for enc_word in encoded_words:
pos, state = LSTMw(enc_word, state)
parts_of_speech.append(pos)
Related
I'm trying to build a Pytorch network for image captioning.
Currently I have a working network of Encoder and Decoder, and I want to add nn.MultiheadAttnetion layer to it (to be used as self attention).
Currently my decode looks like this:
class Decoder(nn.Module):
def __init__(self, hidden_size, embed_dim, vocab_size, layers = 1):
super(Decoder, self).__init__()
self.embed_dim = embed_dim
self.vocab_size = vocab_size
self.layers = layers
self.hidden_size = hidden_size
self.embedding = nn.Embedding(vocab_size, embed_dim, padding_idx=0)
self.lstm = nn.LSTM(input_size = embed_dim, hidden_size = hidden_size, batch_first = True, num_layers = layers)
#self.attention = nn.MultiheadAttention(hidden_size, num_heads=1, batch_first= True)
self.fc = nn.Linear(hidden_size, self.vocab_size)
def init_hidden(self, batch_size):
h = torch.zeros(self.layers, batch_size, self.hidden_size).to(device)
c = torch.zeros(self.layers, batch_size, self.hidden_size).to(device)
return h,c
def forward(self, features, caption):
batch_size = caption.size(0)
caption_size = caption.size(1)
h,c = self.init_hidden(batch_size)
embeddings = self.embedding(caption)
lstm_input = torch.cat((features.unsqueeze(1), embeddings[:,:-1,:]), dim=1)
output, (h,c) = self.lstm(lstm_input, (h,c))
#output, _ = self.attention(output, output, output)
output = self.fc(output)
return output
def generate_caption(self, features, max_caption_size = MAX_LEN):
h,c = self.init_hidden(1)
caption = ""
embeddings = features.unsqueeze(1)
for i in range(max_caption_size):
output, (h, c) = self.lstm(embeddings, (h,c))
#output, _ = self.attention(output, output, output)
output = self.fc(output)
_, word_index = torch.max(output, dim=2) # take the word with highest probability
if word_index == vocab.get_index(END_WORD):
break
caption += vocab.get_word(word_index) + " "
embeddings = self.embedding(torch.LongTensor([word_index]).view(1,-1).to(device))
return caption
and it gives relatively good results for image captioning.
I want to add the commented out lines so the model will use Attention. But- when I do that- the model breaks, although the loss becomes extremely low (decreasing from 2.7 to 0.2 during training instead of 2.7 to 1 without the attention) - the caption generation is not really working (predicts the same word over and over again).
My questions are:
Am I using the nn.MultiheadAttention correctly? it is very weird to me that it should be used after the LSTM, but I saw this online, and it works from dimension sizes perspective
Any idea why my model breaks when I use Attention?
EDIT: I also tried to put the Attention before the LSTM, and it didn't work as well (network predicted the same caption for every picture)
I have the body of the code, which should work fine, I think it doesn't because of something I'm messing up here, probably having to do with the embedding.
import torch.nn as nn
class MultilayerPerceptron(nn.Module):
def __init__(self, input_size, hidden_size): # I removed output size
# Call initializer function of the super class
super(MultilayerPerceptron, self).__init__()
self.embedding = nn.Embedding(INPUT_DIM, EMBEDDING_DIM) #added this myself, maybe wrong
#self.mlp = nn.MultilayerPerceptron(EMBEDDING_DIM, HIDDEN_DIM) #also added
self.INPUT_DIM = INPUT_DIM
self.HIDDEN_DIM = HIDDEN_DIM
self.OUTPUT_DIM = OUTPUT_DIM
self.EMBEDDING_DIM = EMBEDDING_DIM
#whenever this model is called, those layers in the sequential block
#will be processed in the order given to the block.
self.model = nn.Sequential(
#nn.Flatten(), # adding this hopefully it works (it didn't)
#embeds = embedded.mean(dim=1) #god help
nn.Linear(self.INPUT_DIM, self.HIDDEN_DIM), #later on, multiply by embedding dimensionality #I removed
nn.ReLU(),
nn.Linear(self.HIDDEN_DIM, self.OUTPUT_DIM), #one layer neural network
nn.ReLU(), # do I need this?
nn.Sigmoid(),
)
def forward(self, x):
embedded = self.embedding(x)
#embedded = [sent len, batch size, emb dim]
output, hidden = self.model(embedded)
output = self.model(x) #call the model defined above for forward propagation.
return output
INPUT_DIM = len(TEXT.vocab)
EMBEDDING_DIM = 100 #how do I fit this into the model??
HIDDEN_DIM = 256
OUTPUT_DIM = 1
model = MultilayerPerceptron(INPUT_DIM, HIDDEN_DIM) #MLP instead
The error I get is "mat1 and mat2 shapes cannot be multiplied (50176x100 and 25002x256)".
Good Day
I was testing the functionality of a text summarization code published on the website: https://towardsdatascience.com/understand-text-summarization-and-create-your-own-summarizer-in-python-b26a9f09fc70.
The problem is that, when I call the function on a text file, the 'cp949' codec can't decode byte 0xe2 in position 205: illegal multibyte sequence error appears. I know, from other posts, that it is an error related to the encoding type of the file. Therefore, I changed the encoding type of the test2.txt file to UTF-8 (saving the file in Plain text format, then choosing UTF-8 on Text Encoding > Other Encoding), but I still get this error message.
Here is the code that I wrote:
Import libraries
from nltk.corpus import stopwords
from nltk.cluster.util import cosine_distance
import numpy as np
import networkx as nx
test_text_word = "test2.txt"
def read_article(test_text_word):
file = open(test_text_word, "r")
filedata = file.readlines()
article = filedata[0].split(". ")
sentences = []`
for sentence in article:
print(sentence)
sentences.append(sentence.replace("[^a-zA-Z]", " ").split(" "))
sentences.pop()
return sentences
def sentence_similarity(sent1, sent2, stopwords=None):
if stopwords is None:
stopwords = []
sent1 = [w.lower() for w in sent1]
sent2 = [w.lower() for w in sent2]
all_words = list(set(sent1 + sent2))
vector1 = [0] * len(all_words)
vector2 = [0] * len(all_words)
# build the vector for the first sentence
for w in sent1:
if w in stopwords:
continue
vector1[all_words.index(w)] += 1
# build the vector for the second sentence
for w in sent2:
if w in stopwords:
continue
vector2[all_words.index(w)] += 1
return 1 - cosine_distance(vector1, vector2)
def build_similarity_matrix(sentences, stop_words):
# Create an empty similarity matrix
similarity_matrix = np.zeros((len(sentences), len(sentences)))
for idx1 in range(len(sentences)):
for idx2 in range(len(sentences)):
if idx1 == idx2: #ignore if both are same sentences
continue
similarity_matrix[idx1][idx2] = sentence_similarity(sentences[idx1], sentences[idx2], stop_words)
return similarity_matrix
def generate_summary(test_text_word, top_n=5):
stop_words = stopwords.words('english')
summarize_text = []
# Step 1 - Read text anc split it
sentences = read_article(test_text_word)
# Step 2 - Generate Similary Martix across sentences
sentence_similarity_martix = build_similarity_matrix(sentences, stop_words)
# Step 3 - Rank sentences in similarity martix
sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_martix)
scores = nx.pagerank(sentence_similarity_graph)
# Step 4 - Sort the rank and pick top sentences
ranked_sentence = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
print("Indexes of top ranked_sentence order are ", ranked_sentence)
for i in range(top_n):
summarize_text.append(" ".join(ranked_sentence[i][1]))
# Step 5 - Offcourse, output the summarize texr
print("Summarize Text: \n", ". ".join(summarize_text))
The problem is that, when I run the code, with the following command:
generate_summary("test2.txt", 2)
I receive this error message: 'cp949' codec can't decode byte 0xe2 in position 205: illegal multibyte sequence
Should I change something in the code?
Thanks for your support.
Trying to return the category of input data. Training data is 'data.csv' which is 3500000 rows of sentence and its class.
import nltk
from nltk.stem.lancaster import LancasterStemmer
import os
import csv
import json
import datetime
stemmer = LancasterStemmer()
training_data = []
with open('data.csv') as f:
training_data = [{k: str(v) for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
words = []
classes = []
documents = []
ignore_words = ['?','.','_','-'] #words to be ignored in input data file
for pattern in training_data:
w = nltk.word_tokenize(pattern['sentence'])
words.extend(w)
documents.append((w, pattern['class']))
if pattern['class'] not in classes:
classes.append(pattern['class'])
words = [stemmer.stem(a.lower()) for a in words if a not in ignore_words]
words = list(set(words)) #remove duplicates
classes = list(set(classes))
create our training data
training = []
output = []
output_empty = [0] * len(classes)
for doc in documents:
# initialize our bag of words
bag = []
# list of tokenized words for the pattern
pattern_words = doc[0]
# stem each word
pattern_words = [stemmer.stem(word.lower()) for word in pattern_words]
for w in words:
bag.append(1) if w in pattern_words else bag.append(0)
training.append(bag)
output_row = list(output_empty)
output_row[classes.index(doc[1])] = 1
output.append(output_row)
import numpy as np
import time
def sigmoid(x):
output = 1/(1+np.exp(-x))
return output
def sigmoid_output_to_derivative(output):
return output*(1-output)
def clean_up_sentence(sentence):
sentence_words = nltk.word_tokenize(sentence)
sentence_words = [stemmer.stem(word.lower()) for word in sentence_words]
return sentence_words
def bow(sentence, words, show_details=False):
# tokenize the pattern
sentence_words = clean_up_sentence(sentence)
# bag of words
bag = [0]*len(words)
for s in sentence_words:
for i,w in enumerate(words):
if w == s:
bag[i] = 1
return(np.array(bag))
returns the calculated value of the output after multiplying with the sigmoids
def think(sentence, show_details=False):
x = bow(sentence.lower(), words, show_details)
# input layer is our bag of words
l0 = x
# matrix multiplication of input and hidden layer
l1 = sigmoid(np.dot(l0, synapse_0))
# output layer
l2 = sigmoid(np.dot(l1, synapse_1))
return l2
When using the rapid annotator tool brat, it appears that the created annotations file will present the annotation in the order that the annotations were performed by the user. If you start at the beginning of a document and go the end performing annotation, then the annotations will naturally be in the correct offset order. However, if you need to go earlier in the document and add another annotation, the offset order of the annotations in the output .ann file will be out of order.
How then can you rearrange the .ann file such that the annotations are in offset order when you are done? Is there some option within brat that allows you to do this or is it something that one has to write their own script to perform?
Hearing nothing, I did write a python script to accomplish what I had set out to do. First, I reorder all annotations by begin index. Secondly, I resequence the label numbers so that they are once again in ascending order.
import optparse, sys
splitchar1 = '\t'
splitchar2 = ' '
# for brat, overlapped is not permitted (or at least a warning is generated)
# we could use this simplification in sorting by simply sorting on begin. it is
# probably a good idea anyway.
class AnnotationRecord:
label = 'T0'
type = ''
begin = -1
end = -1
text = ''
def __repr__(self):
return self.label + splitchar1
+ self.type + splitchar2
+ str(self.begin) + splitchar2
+ str(self.end) + splitchar1 + self.text
def create_record(parts):
record = AnnotationRecord()
record.label = parts[0]
middle_parts = parts[1].split(splitchar2)
record.type = middle_parts[0]
record.begin = middle_parts[1]
record.end = middle_parts[2]
record.text = parts[2]
return record
def main(filename, out_filename):
fo = open(filename, 'r')
lines = fo.readlines()
fo.close()
annotation_records = []
for line in lines:
parts = line.split(splitchar1)
annotation_records.append(create_record(parts))
# sort based upon begin
sorted_records = sorted(annotation_records, key=lambda a: int(a.begin))
# now relabel based upon the sorted order
label_value = 1
for sorted_record in sorted_records:
sorted_record.label = 'T' + str(label_value)
label_value += 1
# now write the resulting file to disk
fo = open(out_filename, 'w')
for sorted_record in sorted_records:
fo.write(sorted_record.__repr__())
fo.close()
#format of .ann file is T# Type Start End Text
#args are input file, output file
if __name__ == '__main__':
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(),
usage=globals()['__doc__'],
version='$Id$')
parser.add_option ('-v', '--verbose', action='store_true',
default=False, help='verbose output')
(options, args) = parser.parse_args()
if len(args) < 2:
parser.error ('missing argument')
main(args[0], args[1])
sys.exit(0)