How can I run my Maximum Drawdown Code without this ValueError Exception? - python-3.7

Im trying to follow an exercise on calculating the maximum drawdown and maximum drawdown duration of a market market neutral vs a long-only trading strategy.
I followed the code to the T and has worked perfectly up until now, and I seem to be getting a ValueError Exception. What code do I need to change for my code to work?
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from MaxDD_Function import calculateMaxDD
# CALCUALTING MAXDD AND CREATING THE FUNCTION.
def calculateMaxDD(cumret):
highwatermark = np.zeros(cumret.shape)
drawdown = np.zeros(cumret.shape)
drawdownduration = np.zeros(cumret.shape)
for t in np.arange(1, cumret.shape[0]):
highwatermark[t] = (np.maximum(highwatermark[t -1]), cumret[t])
drawdown[t] = ((1+ cumret[t] )/(1 + highwatermark[t]) - 1)
if drawdown[t] == 0:
drawdownduration[t] == 0
else:
drawdownduration[t] = drawdownduration[t -1] + 1
maxDD, i = np.min(drawdown, np.argmin(drawdown)) # drawdown < 0 always
maxDDD = np.max(drawdownduration)
return (maxDD, maxDDD, i)
# First part of example. Read the csv data and calculate.
#The first dataframe/set for my strategy
df = pd.read_csv('IGE_daily.csv')
#print (df.head())
df.sort_values(by= 'Date', inplace = True)
dailyret = df.loc[:, 'Adj Close'].pct_change()
excessRet = ((dailyret - 0.04)/252)
sharpeRatio = ((np.sqrt(252)*np.mean(excessRet))/np.std(excessRet))
print (sharpeRatio)
#Second part of example
#This is the second dataframe/set for my strategy.
df2 = pd.read_csv('SPY.csv')
#The new data frame, with both datasets.
df = pd.merge (df, df2, on = 'Date', suffixes = ('_IGE', '_SPY'))
df['Date'] = pd.to_datetime(df['Date'])
df.set_index('Date', inplace = True)
df.sort_index(inplace = True)
dailyret = df [['Adj Close_IGE', 'Adj Close_SPY' ]].pct_change() # Daily
Returns
dailyret.rename(columns = {"Adj Close_IGE": "IGE", "Adj Close_SPY": "SPY"
}, inplace = True)
netRet = (dailyret['IGE'] - dailyret['SPY'])/2
sharpeRatio = np.sqrt(252) * np.mean(netRet)/np.std(netRet)
print (sharpeRatio)
cumret = np.cumprod(1 + netRet) - 1 #Cumalative return
#print (plt.plot(cumret))
#print (plt.show()) # Remember to always run plt.show to see the plot in
terminal.
maxDrawdown, maxDrawdownDuration, startDrawdownDay =
calculateMaxDD(cumret.values)
maxDrawdown = calculateMaxDD(cumret.values)
print (maxDrawdown)
Here are the results I got from my above mentioned code:
Ivies-MacBook-Pro:Quant_Trading Ivieidahosa$ python Ex3_4.py
-46.10531783058014
0.7743286831426566
Traceback (most recent call last):
File "Ex3_4.py", line 76, in <module>
maxDrawdown = calculateMaxDD(cumret.values)
File "Ex3_4.py", line 15, in calculateMaxDD
highwatermark[t] = (np.maximum(highwatermark[t -1]), cumret[t])
ValueError: invalid number of arguments
I expected the output on themaxDrawdown to be -0.09529268047208683,maxDrawdwnduration to be 497 andstartDrawdownday to be 1223.

Q: What code do I need to change for my code to work?
Your code uses a call to a numpy function having a defined a minimum-call-signature as: np.maximum( <array_like_A>, <array_like_B> )
This simply fails to meet the expected behaviour once only one of the expected pair of values was delivered in the reported line ( see the closing parenthesis ), or a scalar or any other, non-array-like type of object(s) were attempted to be delivered into the call-signature:
highwatermark[t] = ( np.maximum( highwatermark[t-1] ), cumret[t] )
where a tuple was attempted to get constructed on the right hand side of the value-assignment (well, actually an object-reference gets assigned in python, sure, but was trying to remain short here to tell that fast for an easy reading ), the first item of which was expected to get assigned to a returned value from a call to the above documented np.maximum(...) function. And Hic Sunt Leones ...
May like to start further bug-tracing with a cross-check-ing of the state of objects and the call-signature:
try:
for t in np.arange( 1, cumret.shape[0] ):
print( "The shape of <highwatermark[t-1]>-object was: ",
highwatermark[t-1].shape, " for t == ", t
)
except:
print( "The <highwatermark[t-1]>-object was not a numpy array",
" for t == ", t
)
finally:
print( np.maximum.__doc__ )

Related

Need to create GPA Calculator to run in command line

Need to write a GPA calculator using the provided dictionary to output the gpa based on the 4 arguments of letter grades. I can get the code to run in google colab or other IDEs, but I get no output in CL. Can someone point me to what I am missing?
'''
import sys
#print (sys.argv[1])
#print (sys.argv[2])
#print (sys.argv[3])
#print (sys.argv[4])
def gpa_calculator():
grades = [sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]]
grades_upper = [each_string.upper() for each_string in grades]
points = 0
grade_chart = {'A':4.0, 'A-':3.66, 'B+':3.33, 'B':3.0, 'B-':2.66,
'C+':2.33, 'C':2.0, 'C-':1.66, 'D+':1.33, 'D':1.00, 'D-':.66, 'F':0.00}
for grade in grades_upper:
points += grade_chart[grade]
gpa = points / len(grades)
rounded_gpa = round(gpa,2)
return rounded_gpa
print (rounded_gpa)
gpa_calculator()'''
return rounded_gpa
print (rounded_gpa)
You seem to be returning the value from the function before you reach the print statement. I'm guessing the value is returned correctly, but you don't do anything with the return value when calling the function, so nothing is output to the screen.
You should move the print(...) call above the return statement, or print out the result when calling the function:
print(gpa_calculate())
That's because you return first before print.
In jupyter notebook like google colab, each cell will print anything the last line returns (if any). That's why in such environment you get output.
Corrected code:
import sys
#print (sys.argv[1])
#print (sys.argv[2])
#print (sys.argv[3])
#print (sys.argv[4])
def gpa_calculator():
grades = [sys.argv[1], sys.argv[2], sys.argv[3], sys.argv[4]]
grades_upper = [each_string.upper() for each_string in grades]
points = 0
grade_chart = {'A':4.0, 'A-':3.66, 'B+':3.33, 'B':3.0, 'B-':2.66,
'C+':2.33, 'C':2.0, 'C-':1.66, 'D+':1.33, 'D':1.00, 'D-':.66, 'F':0.00}
for grade in grades_upper:
points += grade_chart[grade]
gpa = points / len(grades)
rounded_gpa = round(gpa,2)
print(rounded_gpa)
return rounded_gpa
gpa_calculator()
output:
C:\Users\XXXXX\Desktop>python3 a.py A B C D
2.5
C:\Users\XXXXX\Desktop>python3 a.py A A A A
4.0

Deep learning chatbot specific Index error list index out of range

I am trying to follow a tutorial on how to make a deeplearning chatbot with pytorch. However, this code is quite complex for me and it has stopped with a "IndexError: list index out of range". I looked the error up and get the gist of what it usually means, but seeing as this code is very complex for me I can't figure out how to solve the error.
this is the source tutorial: [https://colab.research.google.com/github/pytorch/tutorials/blob/gh-pages/_downloads/chatbot_tutorial.ipynb#scrollTo=LTzdbPF-OBL9][1]
Line 198 seems to be causing the error
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH
This is the error log
Start preparing training data ...
Reading lines...
Traceback (most recent call last):
File "D:\Documents\Python\python pycharm files\pythonProject4\3.9 Chatbot.py", line 221, in <module>
voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir)
File "D:\Documents\Python\python pycharm files\pythonProject4\3.9 Chatbot.py", line 209, in loadPrepareData
pairs = filterPairs(pairs)
File "D:\Documents\Python\python pycharm files\pythonProject4\3.9 Chatbot.py", line 202, in filterPairs
return [pair for pair in pairs if filterPair(pair)]
File "D:\Documents\Python\python pycharm files\pythonProject4\3.9 Chatbot.py", line 202, in <listcomp>
return [pair for pair in pairs if filterPair(pair)]
File "D:\Documents\Python\python pycharm files\pythonProject4\3.9 Chatbot.py", line 198, in filterPair
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH
IndexError: list index out of range
Read 442563 sentence pairs
Process finished with exit code 1
And this is my code copied from my pycharm up to the block with the error. Seeing as its a huge code I could not copy the entire code. The rest of the code can be found in the github source link above.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import torch
from torch.jit import script, trace
import torch.nn as nn
from torch import optim
import torch.nn.functional as F
import csv
import random
import re
import os
import unicodedata
import codecs
from io import open
import itertools
import math
USE_CUDA = torch.cuda.is_available()
device = torch.device("cuda" if USE_CUDA else "cpu")
corpus_name = "cornell movie-dialogs corpus"
corpus = os.path.join("D:\Documents\Python\intents", corpus_name)
def printLines(file, n=10):
with open(file, 'rb') as datafile:
lines = datafile.readlines()
for line in lines[:n]:
print(line)
printLines(os.path.join(corpus, "movie_lines.txt"))
# Splits each line of the file into a dictionary of fields
def loadLines(fileName, fields):
lines = {}
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
lineObj = {}
for i, field in enumerate(fields):
lineObj[field] = values[i]
lines[lineObj['lineID']] = lineObj
return lines
# Groups fields of lines from `loadLines` into conversations based on *movie_conversations.txt*
def loadConversations(fileName, lines, fields):
conversations = []
with open(fileName, 'r', encoding='iso-8859-1') as f:
for line in f:
values = line.split(" +++$+++ ")
# Extract fields
convObj = {}
for i, field in enumerate(fields):
convObj[field] = values[i]
# Convert string to list (convObj["utteranceIDs"] == "['L598485', 'L598486', ...]")
lineIds = eval(convObj["utteranceIDs"])
# Reassemble lines
convObj["lines"] = []
for lineId in lineIds:
convObj["lines"].append(lines[lineId])
conversations.append(convObj)
return conversations
# Extracts pairs of sentences from conversations
def extractSentencePairs(conversations):
qa_pairs = []
for conversation in conversations:
# Iterate over all the lines of the conversation
for i in range(len(conversation["lines"]) - 1): # We ignore the last line (no answer for it)
inputLine = conversation["lines"][i]["text"].strip()
targetLine = conversation["lines"][i+1]["text"].strip()
# Filter wrong samples (if one of the lists is empty)
if inputLine and targetLine:
qa_pairs.append([inputLine, targetLine])
return qa_pairs
# Define path to new file
datafile = os.path.join(corpus, "formatted_movie_lines.txt")
delimiter = '\t'
# Unescape the delimiter
delimiter = str(codecs.decode(delimiter, "unicode_escape"))
# Initialize lines dict, conversations list, and field ids
lines = {}
conversations = []
MOVIE_LINES_FIELDS = ["lineID", "characterID", "movieID", "character", "text"]
MOVIE_CONVERSATIONS_FIELDS = ["character1ID", "character2ID", "movieID", "utteranceIDs"]
# Load lines and process conversations
print("\nProcessing corpus...")
lines = loadLines(os.path.join(corpus, "movie_lines.txt"), MOVIE_LINES_FIELDS)
print("\nLoading conversations...")
conversations = loadConversations(os.path.join(corpus, "movie_conversations.txt"),
lines, MOVIE_CONVERSATIONS_FIELDS)
# Write new csv file
print("\nWriting newly formatted file...")
with open(datafile, 'w', encoding='utf-8') as outputfile:
writer = csv.writer(outputfile, delimiter=delimiter)
for pair in extractSentencePairs(conversations):
writer.writerow(pair)
# Print a sample of lines
print("\nSample lines from file:")
printLines(datafile)
# Default word tokens
PAD_token = 0 # Used for padding short sentences
SOS_token = 1 # Start-of-sentence token
EOS_token = 2 # End-of-sentence token
class Voc:
def __init__(self, name):
self.name = name
self.trimmed = False
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3 # Count SOS, EOS, PAD
def addSentence(self, sentence):
for word in sentence.split(' '):
self.addWord(word)
def addWord(self, word):
if word not in self.word2index:
self.word2index[word] = self.num_words
self.word2count[word] = 1
self.index2word[self.num_words] = word
self.num_words += 1
else:
self.word2count[word] += 1
# Remove words below a certain count threshold
def trim(self, min_count):
if self.trimmed:
return
self.trimmed = True
keep_words = []
for k, v in self.word2count.items():
if v >= min_count:
keep_words.append(k)
print('keep_words {} / {} = {:.4f}'.format(
len(keep_words), len(self.word2index), len(keep_words) / len(self.word2index)
))
# Reinitialize dictionaries
self.word2index = {}
self.word2count = {}
self.index2word = {PAD_token: "PAD", SOS_token: "SOS", EOS_token: "EOS"}
self.num_words = 3 # Count default tokens
for word in keep_words:
self.addWord(word)
MAX_LENGTH = 10 # Maximum sentence length to consider
# Turn a Unicode string to plain ASCII, thanks to
# http://stackoverflow.com/a/518232/2809427
def unicodeToAscii(s):
return ''.join(
c for c in unicodedata.normalize('NFD', s)
if unicodedata.category(c) != 'Mn'
)
# Lowercase, trim, and remove non-letter characters
def normalizeString(s):
s = unicodeToAscii(s.lower().strip())
s = re.sub(r"([.!?])", r" \1", s)
s = re.sub(r"[^a-zA-Z.!?]+", r" ", s)
s = re.sub(r"\s+", r" ", s).strip()
return s
# Read query/response pairs and return a voc object
def readVocs(datafile, corpus_name):
print("Reading lines...")
# Read the file and split into lines
lines = open(datafile, encoding='utf-8').\
read().strip().split('\n')
# Split every line into pairs and normalize
pairs = [[normalizeString(s) for s in l.split('\t')] for l in lines]
voc = Voc(corpus_name)
return voc, pairs
# Returns True iff both sentences in a pair 'p' are under the MAX_LENGTH threshold
def filterPair(p):
# Input sequences need to preserve the last word for EOS token
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH
# Filter pairs using filterPair condition
def filterPairs(pairs):
return [pair for pair in pairs if filterPair(pair)]
# Using the functions defined above, return a populated voc object and pairs list
def loadPrepareData(corpus, corpus_name, datafile, save_dir):
print("Start preparing training data ...")
voc, pairs = readVocs(datafile, corpus_name)
print("Read {!s} sentence pairs".format(len(pairs)))
pairs = filterPairs(pairs)
print("Trimmed to {!s} sentence pairs".format(len(pairs)))
print("Counting words...")
for pair in pairs:
voc.addSentence(pair[0])
voc.addSentence(pair[1])
print("Counted words:", voc.num_words)
return voc, pairs
# Load/Assemble voc and pairs
save_dir = os.path.join("data", "save")
voc, pairs = loadPrepareData(corpus, corpus_name, datafile, save_dir)
# Print some pairs to validate
print("\npairs:")
for pair in pairs[:10]:
print(pair)
MIN_COUNT = 3 # Minimum word count threshold for trimming
I really hope someone can help me fix this problem and help me understand why it happens.
In the end I changed
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH
to
try:
return len(p[0].split(' ')) < MAX_LENGTH and len(p[1].split(' ')) < MAX_LENGTH
except:
return False
And now the code seems to be working.

Encoding problem while running text summarization code

Good Day
I was testing the functionality of a text summarization code published on the website: https://towardsdatascience.com/understand-text-summarization-and-create-your-own-summarizer-in-python-b26a9f09fc70.
The problem is that, when I call the function on a text file, the 'cp949' codec can't decode byte 0xe2 in position 205: illegal multibyte sequence error appears. I know, from other posts, that it is an error related to the encoding type of the file. Therefore, I changed the encoding type of the test2.txt file to UTF-8 (saving the file in Plain text format, then choosing UTF-8 on Text Encoding > Other Encoding), but I still get this error message.
Here is the code that I wrote:
Import libraries
from nltk.corpus import stopwords
from nltk.cluster.util import cosine_distance
import numpy as np
import networkx as nx
test_text_word = "test2.txt"
def read_article(test_text_word):
file = open(test_text_word, "r")
filedata = file.readlines()
article = filedata[0].split(". ")
sentences = []`
for sentence in article:
print(sentence)
sentences.append(sentence.replace("[^a-zA-Z]", " ").split(" "))
sentences.pop()
return sentences
def sentence_similarity(sent1, sent2, stopwords=None):
if stopwords is None:
stopwords = []
sent1 = [w.lower() for w in sent1]
sent2 = [w.lower() for w in sent2]
all_words = list(set(sent1 + sent2))
vector1 = [0] * len(all_words)
vector2 = [0] * len(all_words)
# build the vector for the first sentence
for w in sent1:
if w in stopwords:
continue
vector1[all_words.index(w)] += 1
# build the vector for the second sentence
for w in sent2:
if w in stopwords:
continue
vector2[all_words.index(w)] += 1
return 1 - cosine_distance(vector1, vector2)
def build_similarity_matrix(sentences, stop_words):
# Create an empty similarity matrix
similarity_matrix = np.zeros((len(sentences), len(sentences)))
for idx1 in range(len(sentences)):
for idx2 in range(len(sentences)):
if idx1 == idx2: #ignore if both are same sentences
continue
similarity_matrix[idx1][idx2] = sentence_similarity(sentences[idx1], sentences[idx2], stop_words)
return similarity_matrix
def generate_summary(test_text_word, top_n=5):
stop_words = stopwords.words('english')
summarize_text = []
# Step 1 - Read text anc split it
sentences = read_article(test_text_word)
# Step 2 - Generate Similary Martix across sentences
sentence_similarity_martix = build_similarity_matrix(sentences, stop_words)
# Step 3 - Rank sentences in similarity martix
sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_martix)
scores = nx.pagerank(sentence_similarity_graph)
# Step 4 - Sort the rank and pick top sentences
ranked_sentence = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
print("Indexes of top ranked_sentence order are ", ranked_sentence)
for i in range(top_n):
summarize_text.append(" ".join(ranked_sentence[i][1]))
# Step 5 - Offcourse, output the summarize texr
print("Summarize Text: \n", ". ".join(summarize_text))
The problem is that, when I run the code, with the following command:
generate_summary("test2.txt", 2)
I receive this error message: 'cp949' codec can't decode byte 0xe2 in position 205: illegal multibyte sequence
Should I change something in the code?
Thanks for your support.

Cryptic TypeError: 'decimal.Decimal' object cannot be interpreted as an integer

I am struggling to understand why this function apparently fails in the Jupyter Notebook, but not in the IPython shell:
def present_value( r, n, fv = None, pmt = None ):
'''
Function to compute the Present Value based on interest rate and
a given future value.
Arguments accepted
------------------
* r = interest rate,
which should be given in its original percentage, eg.
5% instead of 0.05
* n = number of periods for which the cash flow,
either as annuity or single flow from one present value
* fv = future value in dollars,
if problem is annuity based, leave this empty
* pmt = each annuity payment in dollars,
if problem is single cash flow based, leave this empty
'''
original_args = [r, n, fv, pmt]
dec_args = [Decimal( arg ) if arg != None
else arg
for arg in original_args
]
if dec_args[3] == None:
return dec_args[2] / ( ( 1 + ( dec_args[0] / 100 ) )**dec_args[1] )
elif dec_args[2] == None:
# annuity_length = range( 1, dec_args[1] + 1 )
# Not allowed to add a Decimal object
# with an integer and to use it
# in the range() function,
# so we dereference the integer from original_args
annuity_length = range( 1, original_args[1] + 1 )
# Apply discounting to each annuity payment made
# according to number of years left till end
all_compounded_pmt = [dec_args[3] * ( 1 / ( ( 1 + dec_args[0] / 100 ) ** time_left ) ) \
for time_left in annuity_length
]
return sum( all_compounded_pmt )
When I imported the module that this function resides in, named functions.py, using from functions import *, and then executed present_value(r=7, n=35, pmt = 11000), I got the error:
---------------------------------------------------------------------------
TypeError Traceback (most recent call last)
<ipython-input-93-c1cc587f7e27> in <module>()
----> 1 present_value(r=7, n=35, pmt = 11000)
/path_to_file/functions.py in present_value(r, n, fv, pmt)
73 if dec_args[3] == None:
74 return dec_args[2]/((1 + (dec_args[0]/100))**dec_args[1])
---> 75
76 elif dec_args[2] == None:
77 # annuity_length = range(1, dec_args[1]+1)
TypeError: 'decimal.Decimal' object cannot be interpreted as an integer
but in the IPython shell, evaluating this function it works perfectly fine:
In [42]: functions.present_value(r=7, n=35, pmt = 11000)
Out[42]: Decimal('142424.39530474029537')
Can anyone please help me with this really confusing and obscure issue?

How do you order annotations by offset in brat?

When using the rapid annotator tool brat, it appears that the created annotations file will present the annotation in the order that the annotations were performed by the user. If you start at the beginning of a document and go the end performing annotation, then the annotations will naturally be in the correct offset order. However, if you need to go earlier in the document and add another annotation, the offset order of the annotations in the output .ann file will be out of order.
How then can you rearrange the .ann file such that the annotations are in offset order when you are done? Is there some option within brat that allows you to do this or is it something that one has to write their own script to perform?
Hearing nothing, I did write a python script to accomplish what I had set out to do. First, I reorder all annotations by begin index. Secondly, I resequence the label numbers so that they are once again in ascending order.
import optparse, sys
splitchar1 = '\t'
splitchar2 = ' '
# for brat, overlapped is not permitted (or at least a warning is generated)
# we could use this simplification in sorting by simply sorting on begin. it is
# probably a good idea anyway.
class AnnotationRecord:
label = 'T0'
type = ''
begin = -1
end = -1
text = ''
def __repr__(self):
return self.label + splitchar1
+ self.type + splitchar2
+ str(self.begin) + splitchar2
+ str(self.end) + splitchar1 + self.text
def create_record(parts):
record = AnnotationRecord()
record.label = parts[0]
middle_parts = parts[1].split(splitchar2)
record.type = middle_parts[0]
record.begin = middle_parts[1]
record.end = middle_parts[2]
record.text = parts[2]
return record
def main(filename, out_filename):
fo = open(filename, 'r')
lines = fo.readlines()
fo.close()
annotation_records = []
for line in lines:
parts = line.split(splitchar1)
annotation_records.append(create_record(parts))
# sort based upon begin
sorted_records = sorted(annotation_records, key=lambda a: int(a.begin))
# now relabel based upon the sorted order
label_value = 1
for sorted_record in sorted_records:
sorted_record.label = 'T' + str(label_value)
label_value += 1
# now write the resulting file to disk
fo = open(out_filename, 'w')
for sorted_record in sorted_records:
fo.write(sorted_record.__repr__())
fo.close()
#format of .ann file is T# Type Start End Text
#args are input file, output file
if __name__ == '__main__':
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(),
usage=globals()['__doc__'],
version='$Id$')
parser.add_option ('-v', '--verbose', action='store_true',
default=False, help='verbose output')
(options, args) = parser.parse_args()
if len(args) < 2:
parser.error ('missing argument')
main(args[0], args[1])
sys.exit(0)