Convert time/tick in Python MIDI MIDO read/save file - midi

The following program uses MIDO to read 'g1.mid' then save it to 'g1_new.mid'. My question is that: in reading the file, 'msg.time' is a float value, but in saving the file, 'time in Message' is an integer in tick. How do we convert 'msg.time' to 'tick in message' in this case?
from mido import MidiFile
from mido import Message, MidiTrack
mid = MidiFile()
track = MidiTrack()
mid.tracks.append(track)
for msg in MidiFile('g1.mid'):
if (not msg.is_meta):
if (msg.type == 'note_on'):
# how to convert msg.time to tick to fill in '?'
track.append(Message('note_on', note=msg.note, velocity=msg.velocity, time=?))
elif (msg.type == 'note_off'):
# how to convert msg.time to tick to fill in '?'
track.append(Message('note_off', note=msg.note, velocity=msg.velocity, time=?))
elif (msg.type == 'program_change'):
track.append(Message('program_change', program=msg.program, channel=msg.channel))
mid.save('g1_new.mid')
Note: This piece of code is in a project about music generation.

When you iterate over the MidiFile object itself, the time stamps are explicitly converted:
class MidiFile(object):
...
def __iter__(self):
...
tempo = DEFAULT_TEMPO
for msg in merge_tracks(self.tracks):
# Convert message time from absolute time
# in ticks to relative time in seconds.
if msg.time > 0:
delta = tick2second(msg.time, self.ticks_per_beat, tempo)
else:
delta = 0
yield msg.copy(time=delta)
if msg.type == 'set_tempo':
tempo = msg.tempo
So just iterate over mid.tracks (or over the merged tracks) directly.

Related

dht11 sensor only retuning unexpected number of pulse often

I am trying to make a simple water pump controller using a dth11 to make the pump turn on more frequently when the temperature is higher. i have it working but every 4th or 5th time i call measure on the dht11 sensor i get an error saying "InvalidPulseCount: Expected 82 but got 0 pulses" or "InvalidPulseCount: Got more than 82 pulses". I have added try block that is stopping the program from crashing but would really like to figure out why it is happening. I also had to edit the dht.py lib to have 82 instead of 84 as the default expected pulses because that was what was commonly returned.
here is my main.py file
from machine import Pin
from time import sleep_ms
import dht
import I2C_LCD_driver
sensor = dht.DHT11(Pin(28))
lcd = I2C_LCD_driver.lcd()
pump = Pin(7, machine.Pin.OUT)
counter = 0
pumpTime = 30
normalTime = 60
hotTime = 30
lowTemp = 19
# sensor variables only updated every 3 loops
lastMesure = 1
temp = 0
humid = 0
first = True
while True:
# sensor.messure can only be called ever 3 seconds
# start at 1 and set to zero in the first loop for our first messurement
lastMesure += -1
if lastMesure <= 0:
try:
sensor.measure()
lastMesure = 3
temp = round((sensor.temperature), 0)
humid = sensor.humidity
except:
print("something went wrong")
print("Counter: {:.0f} pumpping:{:0.f}".format(counter, pump.value()))
print("Temp: {:.0f}℃ HUMIDITY: {:.0F}% ".format(temp, humid))
# if the pump is running
if pump.value() == 1:
if counter >= pumpTime: # if it has been the set pump run time
pump.value(0) # turn off pump
counter = 0 # reset counter
else:
counter += 1
else:
# check current temp
# if warmer then {lowTemp} check for {hotTime} else check for {normalTime}
if (temp > lowTemp and counter >= hotTime) or counter >= normalTime:
pump.value(1) # turn on pump
counter = 0 # reset counter
else:
counter += 1
# print current data to the screen
lcd.lcd_clear()
lcd.lcd_display_string("T: {:.0f}C H:{:.0f}%".format(temp, humid), 1)
if pump.value() == 1:
status = f'Pumping {pumpTime - counter}s'
lcd.lcd_display_string(status, 2);
else:
lcd.lcd_display_string("Pump off ", 2)
sleep_ms(1000)
here is a picture of my breadboard set up. I have run it both with and without a 1k pull up resistor on the data pin

Encoding problem while running text summarization code

Good Day
I was testing the functionality of a text summarization code published on the website: https://towardsdatascience.com/understand-text-summarization-and-create-your-own-summarizer-in-python-b26a9f09fc70.
The problem is that, when I call the function on a text file, the 'cp949' codec can't decode byte 0xe2 in position 205: illegal multibyte sequence error appears. I know, from other posts, that it is an error related to the encoding type of the file. Therefore, I changed the encoding type of the test2.txt file to UTF-8 (saving the file in Plain text format, then choosing UTF-8 on Text Encoding > Other Encoding), but I still get this error message.
Here is the code that I wrote:
Import libraries
from nltk.corpus import stopwords
from nltk.cluster.util import cosine_distance
import numpy as np
import networkx as nx
test_text_word = "test2.txt"
def read_article(test_text_word):
file = open(test_text_word, "r")
filedata = file.readlines()
article = filedata[0].split(". ")
sentences = []`
for sentence in article:
print(sentence)
sentences.append(sentence.replace("[^a-zA-Z]", " ").split(" "))
sentences.pop()
return sentences
def sentence_similarity(sent1, sent2, stopwords=None):
if stopwords is None:
stopwords = []
sent1 = [w.lower() for w in sent1]
sent2 = [w.lower() for w in sent2]
all_words = list(set(sent1 + sent2))
vector1 = [0] * len(all_words)
vector2 = [0] * len(all_words)
# build the vector for the first sentence
for w in sent1:
if w in stopwords:
continue
vector1[all_words.index(w)] += 1
# build the vector for the second sentence
for w in sent2:
if w in stopwords:
continue
vector2[all_words.index(w)] += 1
return 1 - cosine_distance(vector1, vector2)
def build_similarity_matrix(sentences, stop_words):
# Create an empty similarity matrix
similarity_matrix = np.zeros((len(sentences), len(sentences)))
for idx1 in range(len(sentences)):
for idx2 in range(len(sentences)):
if idx1 == idx2: #ignore if both are same sentences
continue
similarity_matrix[idx1][idx2] = sentence_similarity(sentences[idx1], sentences[idx2], stop_words)
return similarity_matrix
def generate_summary(test_text_word, top_n=5):
stop_words = stopwords.words('english')
summarize_text = []
# Step 1 - Read text anc split it
sentences = read_article(test_text_word)
# Step 2 - Generate Similary Martix across sentences
sentence_similarity_martix = build_similarity_matrix(sentences, stop_words)
# Step 3 - Rank sentences in similarity martix
sentence_similarity_graph = nx.from_numpy_array(sentence_similarity_martix)
scores = nx.pagerank(sentence_similarity_graph)
# Step 4 - Sort the rank and pick top sentences
ranked_sentence = sorted(((scores[i],s) for i,s in enumerate(sentences)), reverse=True)
print("Indexes of top ranked_sentence order are ", ranked_sentence)
for i in range(top_n):
summarize_text.append(" ".join(ranked_sentence[i][1]))
# Step 5 - Offcourse, output the summarize texr
print("Summarize Text: \n", ". ".join(summarize_text))
The problem is that, when I run the code, with the following command:
generate_summary("test2.txt", 2)
I receive this error message: 'cp949' codec can't decode byte 0xe2 in position 205: illegal multibyte sequence
Should I change something in the code?
Thanks for your support.

How can I run my Maximum Drawdown Code without this ValueError Exception?

Im trying to follow an exercise on calculating the maximum drawdown and maximum drawdown duration of a market market neutral vs a long-only trading strategy.
I followed the code to the T and has worked perfectly up until now, and I seem to be getting a ValueError Exception. What code do I need to change for my code to work?
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from MaxDD_Function import calculateMaxDD
# CALCUALTING MAXDD AND CREATING THE FUNCTION.
def calculateMaxDD(cumret):
highwatermark = np.zeros(cumret.shape)
drawdown = np.zeros(cumret.shape)
drawdownduration = np.zeros(cumret.shape)
for t in np.arange(1, cumret.shape[0]):
highwatermark[t] = (np.maximum(highwatermark[t -1]), cumret[t])
drawdown[t] = ((1+ cumret[t] )/(1 + highwatermark[t]) - 1)
if drawdown[t] == 0:
drawdownduration[t] == 0
else:
drawdownduration[t] = drawdownduration[t -1] + 1
maxDD, i = np.min(drawdown, np.argmin(drawdown)) # drawdown < 0 always
maxDDD = np.max(drawdownduration)
return (maxDD, maxDDD, i)
# First part of example. Read the csv data and calculate.
#The first dataframe/set for my strategy
df = pd.read_csv('IGE_daily.csv')
#print (df.head())
df.sort_values(by= 'Date', inplace = True)
dailyret = df.loc[:, 'Adj Close'].pct_change()
excessRet = ((dailyret - 0.04)/252)
sharpeRatio = ((np.sqrt(252)*np.mean(excessRet))/np.std(excessRet))
print (sharpeRatio)
#Second part of example
#This is the second dataframe/set for my strategy.
df2 = pd.read_csv('SPY.csv')
#The new data frame, with both datasets.
df = pd.merge (df, df2, on = 'Date', suffixes = ('_IGE', '_SPY'))
df['Date'] = pd.to_datetime(df['Date'])
df.set_index('Date', inplace = True)
df.sort_index(inplace = True)
dailyret = df [['Adj Close_IGE', 'Adj Close_SPY' ]].pct_change() # Daily
Returns
dailyret.rename(columns = {"Adj Close_IGE": "IGE", "Adj Close_SPY": "SPY"
}, inplace = True)
netRet = (dailyret['IGE'] - dailyret['SPY'])/2
sharpeRatio = np.sqrt(252) * np.mean(netRet)/np.std(netRet)
print (sharpeRatio)
cumret = np.cumprod(1 + netRet) - 1 #Cumalative return
#print (plt.plot(cumret))
#print (plt.show()) # Remember to always run plt.show to see the plot in
terminal.
maxDrawdown, maxDrawdownDuration, startDrawdownDay =
calculateMaxDD(cumret.values)
maxDrawdown = calculateMaxDD(cumret.values)
print (maxDrawdown)
Here are the results I got from my above mentioned code:
Ivies-MacBook-Pro:Quant_Trading Ivieidahosa$ python Ex3_4.py
-46.10531783058014
0.7743286831426566
Traceback (most recent call last):
File "Ex3_4.py", line 76, in <module>
maxDrawdown = calculateMaxDD(cumret.values)
File "Ex3_4.py", line 15, in calculateMaxDD
highwatermark[t] = (np.maximum(highwatermark[t -1]), cumret[t])
ValueError: invalid number of arguments
I expected the output on themaxDrawdown to be -0.09529268047208683,maxDrawdwnduration to be 497 andstartDrawdownday to be 1223.
Q: What code do I need to change for my code to work?
Your code uses a call to a numpy function having a defined a minimum-call-signature as: np.maximum( <array_like_A>, <array_like_B> )
This simply fails to meet the expected behaviour once only one of the expected pair of values was delivered in the reported line ( see the closing parenthesis ), or a scalar or any other, non-array-like type of object(s) were attempted to be delivered into the call-signature:
highwatermark[t] = ( np.maximum( highwatermark[t-1] ), cumret[t] )
where a tuple was attempted to get constructed on the right hand side of the value-assignment (well, actually an object-reference gets assigned in python, sure, but was trying to remain short here to tell that fast for an easy reading ), the first item of which was expected to get assigned to a returned value from a call to the above documented np.maximum(...) function. And Hic Sunt Leones ...
May like to start further bug-tracing with a cross-check-ing of the state of objects and the call-signature:
try:
for t in np.arange( 1, cumret.shape[0] ):
print( "The shape of <highwatermark[t-1]>-object was: ",
highwatermark[t-1].shape, " for t == ", t
)
except:
print( "The <highwatermark[t-1]>-object was not a numpy array",
" for t == ", t
)
finally:
print( np.maximum.__doc__ )

Weird behaviour with dialog boxes and variables in Lua script for Lightroom plugin

I'm writing a Lightroom plugin using the Lightroom SDK/API in the Lua language. I'm new to Lua. I've found a situation where my script only works if a Lightroom dialog box (LrDialogs.message("random message")) is present in one function. Without it the function falls over at a later point claiming a string variable (Image.dr in the last LrDialogs.message) is 'nil' as opposed to the normal value it has when the plugin is working properly. Anyone know what's going wrong? Here's the relevant code segment:
------ read output file for exif and write to LR metadata ------
function parseOutput(outputFilePath)
LrDialogs.message("random message")
local tblOutput = {} --to hold the output exif (1 column table, i.e. an array)
local tblImages = {} --to hold the images and their relevant metadata
for line in io.lines(outputFilePath) do
line = removeWhitespaces(line)
table.insert(tblOutput, line)
end
local str = table.remove(tblOutput) --remove last line in table/file (it's log info, not exif)
tblImages = extractExif(tblOutput) --pick out the exif key/value pairs and add to Image objects
end
function extractExif(tblOutput)
local Image = {} --pseudo object to hold metadata for each image
local tblImages = {}
local blnFlag = false
local intCount = 0
for k,v in pairs(tblOutput) do --iterate through each value in the table
if string.find(v, "^=.+") then
--test if new image other than the first one
if blnFlag == true then
--add Image to tblImages and then clear Image object
table.insert(tblImages, Image)
--Image = {} --don't technically need this
blnFlag = false
--LrDialogs.message("inside blnFlag test")
end
i, j = string.find(v, "/") -- **** MAC ONLY. Back slash for Windows *****
Image.filePath = string.sub(v, i) --returns the file path
Image.name = string.match(v, "([^/]+)$") --return the file name
blnFlag = true
elseif string.find(v, "ISO") ~= nil then
Image.iso = string.match(v, "%a+:(.+)") --get text (i.e value) to right of colon
elseif string.find(v, "Film") ~= nil then
Image.filmSim = string.match(v, "%a+:(.+)")
elseif string.find(v, "Setting") ~= nil then
Image.drMode = string.match(v, "%a+:(.+)")
elseif (string.find(v, "Auto") ~= nil) or (string.find(v, "Development") ~= nil) then
Image.dr = string.match(v, "%a+:(.+)")
else
end
end
LrDialogs.message(Image.name .. Image.iso .. Image.filmSim .. Image.drMode .. Image.dr)
return tblImages
end
function removeWhitespaces(str)
return string.gsub(str, "%s", "")
end

How do you order annotations by offset in brat?

When using the rapid annotator tool brat, it appears that the created annotations file will present the annotation in the order that the annotations were performed by the user. If you start at the beginning of a document and go the end performing annotation, then the annotations will naturally be in the correct offset order. However, if you need to go earlier in the document and add another annotation, the offset order of the annotations in the output .ann file will be out of order.
How then can you rearrange the .ann file such that the annotations are in offset order when you are done? Is there some option within brat that allows you to do this or is it something that one has to write their own script to perform?
Hearing nothing, I did write a python script to accomplish what I had set out to do. First, I reorder all annotations by begin index. Secondly, I resequence the label numbers so that they are once again in ascending order.
import optparse, sys
splitchar1 = '\t'
splitchar2 = ' '
# for brat, overlapped is not permitted (or at least a warning is generated)
# we could use this simplification in sorting by simply sorting on begin. it is
# probably a good idea anyway.
class AnnotationRecord:
label = 'T0'
type = ''
begin = -1
end = -1
text = ''
def __repr__(self):
return self.label + splitchar1
+ self.type + splitchar2
+ str(self.begin) + splitchar2
+ str(self.end) + splitchar1 + self.text
def create_record(parts):
record = AnnotationRecord()
record.label = parts[0]
middle_parts = parts[1].split(splitchar2)
record.type = middle_parts[0]
record.begin = middle_parts[1]
record.end = middle_parts[2]
record.text = parts[2]
return record
def main(filename, out_filename):
fo = open(filename, 'r')
lines = fo.readlines()
fo.close()
annotation_records = []
for line in lines:
parts = line.split(splitchar1)
annotation_records.append(create_record(parts))
# sort based upon begin
sorted_records = sorted(annotation_records, key=lambda a: int(a.begin))
# now relabel based upon the sorted order
label_value = 1
for sorted_record in sorted_records:
sorted_record.label = 'T' + str(label_value)
label_value += 1
# now write the resulting file to disk
fo = open(out_filename, 'w')
for sorted_record in sorted_records:
fo.write(sorted_record.__repr__())
fo.close()
#format of .ann file is T# Type Start End Text
#args are input file, output file
if __name__ == '__main__':
parser = optparse.OptionParser(formatter=optparse.TitledHelpFormatter(),
usage=globals()['__doc__'],
version='$Id$')
parser.add_option ('-v', '--verbose', action='store_true',
default=False, help='verbose output')
(options, args) = parser.parse_args()
if len(args) < 2:
parser.error ('missing argument')
main(args[0], args[1])
sys.exit(0)