AttributeError: 'NoneType' object has no attribute 'impl' - mozilla-deepspeech

After running my program, I am getting a Output, but I also get this error message.
Exception ignored in: <function Model.__del__ at 0x7f02ba33b430>
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/deepspeech/__init__.py", line 43, in __del__
AttributeError: 'NoneType' object has no attribute 'impl'
Here is the code - Here I am trying to convert an wv audio file into text using deepspeech library.
from deepspeech import Model
import numpy as np
import os
import wave
import json
from IPython.display import Audio
from IPython.display import clear_output
model_file_path = 'deepspeech-0.8.2-models.pbmm'
lm_file_path = 'deepspeech-0.8.2-models.scorer'
beam_width = 100
lm_alpha = 0.93
lm_beta = 1.18
model = Model(model_file_path)
model.enableExternalScorer(lm_file_path)
model.setScorerAlphaBeta(lm_alpha, lm_beta)
model.setBeamWidth(beam_width)
def read_wav_file(filename):
with wave.open(filename, 'rb') as w:
rate = w.getframerate()
frames = w.getnframes()
buffer = w.readframes(frames)
return buffer, rate
def transcribe(audio_file):
buffer, rate = read_wav_file(audio_file)
data16 = np.frombuffer(buffer, dtype=np.int16)
return model.stt(data16)
print(transcribe('speech.wav'))

Importing IPython is causing the issue, try running your code without it and it should work.
from deepspeech import Model
import numpy as np
import os
import wave
import json
model_file_path = 'deepspeech-0.8.2-models.pbmm'
lm_file_path = 'deepspeech-0.8.2-models.scorer'
beam_width = 100
lm_alpha = 0.93
lm_beta = 1.18
model = Model(model_file_path)
model.enableExternalScorer(lm_file_path)
model.setScorerAlphaBeta(lm_alpha, lm_beta)
model.setBeamWidth(beam_width)
def read_wav_file(filename):
with wave.open(filename, 'rb') as w:
rate = w.getframerate()
frames = w.getnframes()
buffer = w.readframes(frames)
return buffer, rate
def transcribe(audio_file):
buffer, rate = read_wav_file(audio_file)
data16 = np.frombuffer(buffer, dtype=np.int16)
return model.stt(data16)
print(transcribe('speech.wav'))

Related

Bokeh callback function with image editing not working with pyscript

I am using pyscript to take an image and process it. I am using a bokeh widget to make changes to the image through callback function. The callback function works but stops when I use Pillow to convert the image to greyscale.
Imports:
import numpy as np
from PIL import Image
import js
from js import document, console, Uint8Array, window, File, Bokeh, JSON, localStorage
from pyodide import create_proxy
import asyncio
import io
import json
from bokeh import __version__
from bokeh.document import Document
from bokeh.embed.util import OutputDocumentFor, standalone_docs_json_and_render_items
from bokeh.models import Slider, Div
from bokeh.layouts import Row
from bokeh.protocol.messages.patch_doc import process_document_events
def callback(attr, old, new):
div.text = f'Symbol is: {new}'
first_item = localStorage.getItem('key')
print(first_item)
my_image = imageToBinary(first_item)
print(my_image)
rs_img = convertBintoRS(my_image,8)
rs_img = encodeRS(rs_img, 5, 6)
rs_img = seperate_img(rs_img,5+6,5)
rs_img = RStoBinary(rs_img,8)
rs_img = binaryToImage(rs_img)
display_image(rs_img,"reed_s")
p.on_change('value', callback)
row = Row(children=[p, div])
def imageToBinary(img):
print('enter imageToBinary')
img = img.convert('L')
print('convert to greyscale')
print(img)
img = np.array(img)
print('convert numpy')
x,y = np.shape(img)
print(x,y)
for i in range(0,x):
for j in range(0,y):
if img[i][j] < 255/2:
img[i][j] = 0
else:
img[i][j] = 1
print('success')
return img
I am also including this which is some code that is relevant to the bokeh widget.
def doc_json(model, target):
with OutputDocumentFor([model]) as doc:
doc.title = ""
docs_json, _ = standalone_docs_json_and_render_items(
[model], suppress_callback_warning=False
)
doc_json = list(docs_json.values())[0]
root_id = doc_json['roots']['root_ids'][0]
return doc, json.dumps(dict(
target_id = target,
root_id = root_id,
doc = doc_json,
version = __version__,
))
def _link_docs(pydoc, jsdoc):
def jssync(event):
if getattr(event, 'setter_id', None) is not None:
return
events = [event]
json_patch = jsdoc.create_json_patch_string(pyodide.ffi.to_js(events))
pydoc.apply_json_patch(json.loads(json_patch))
jsdoc.on_change(pyodide.ffi.create_proxy(jssync), pyodide.ffi.to_js(False))
def pysync(event):
json_patch, buffers = process_document_events([event], use_buffers=True)
buffer_map = {}
for (ref, buffer) in buffers:
buffer_map[ref['id']] = buffer
jsdoc.apply_json_patch(JSON.parse(json_patch), pyodide.ffi.to_js(buffer_map), setter_id='js')
pydoc.on_change(pysync)
async def show(plot, target):
pydoc, model_json = doc_json(plot, target)
views = await Bokeh.embed.embed_item(JSON.parse(model_json))
jsdoc = views[0].model.document
_link_docs(pydoc, jsdoc)
asyncio.ensure_future(show(row, 'myplot'))
The callback function stops within imageToBinary at the print('enter imageToBinary')
I know the function works outside the callback function.
I have printed the item going into localStorage and out of it to check it is the same but type() wouldn't work and gave an error.

Can we give Max & Min values statically in normalization using MinMaxScaler Sklearn?

So, I have this doubt and have been looking for answers.
Below is the input post request
{
"emotive_Score": [0.89,0.57,0.089,0,0.004,0,0],
"sentiment_Score": [1.521894,-6.4523187],
"mood_score":[40]
}
And I'm using the following code to scale the values.
from flask import Flask, request
from flask_restful import Resource, Api
from json import dumps
from sklearn import preprocessing
import numpy as np
class MoodScore(Resource):
def post(self):
json_data = request.get_json(force=True)
if not json_data:
return {'message': 'No input data provided'}, 400
x = request.json['emotive_Score']
x1 = request.json['sentiment_Score']
x2 = request.json['mood_score']
#Normalisation for Emotive Score
xEmotive = np.array(x)
PositiveEmotive = str(xEmotive[4]+xEmotive[6])
NegativeEmotive = str(xEmotive[0]+xEmotive[1]+xEmotive[2]+xEmotive[3]+xEmotive[5])
EmotiveScoreArray = (PositiveEmotive,NegativeEmotive)
Nml = np.array(EmotiveScoreArray)
float_array = Nml.astype(np.float)
xM = float_array.reshape(-1,1)
minmaxscaler = preprocessing.MinMaxScaler(feature_range=(0,1))
Emotive = minmaxscaler.fit_transform(xM)
#Normalisation for Sentiment Score
xSentiment = np.array(x1)
PositiveSentiment = str(xSentiment[0])
NegativeSentiment = str(xSentiment[1])
SentimentScoreArray = (PositiveSentiment,NegativeSentiment)
Nml1 = np.array(SentimentScoreArray)
float_array1 = Nml1.astype(np.float)
xM1 = float_array1.reshape(-1,1)
minmaxscaler1 = preprocessing.MinMaxScaler(feature_range=(-1,1))
Sentiment = minmaxscaler1.fit_transform(xM1)
return {'PositiveEmotive':str(Emotive[0]),'NegativeEmotive':str(Emotive[1]),'PositiveSentiment':str(Sentiment[0]),'NegativeSentiment':str(Sentiment[1]),'FinalValue':str(Emotive[0]+Emotive[1]+Sentiment[0]+Sentiment[1])}
# return {'FinalScore': str(Sentiment)}
app = Flask(__name__)
api = Api(app)
api.add_resource(MoodScore, '/moodScore')
if __name__ == '__main__':
app.run(port='5005', host="0.0.0.0")
And I'm getting the following as output.
{
"PositiveEmotive": "[0.]",
"NegativeEmotive": "[1.]",
"PositiveSentiment": "[1.]",
"NegativeSentiment": "[-1.]",
"FinalValue": "[1.]"
}
I just want to know whether I can give static values to Min & Max during normalization calculation so that I can get the desired result as below
{
"PositiveEmotive": "[0.546]",
"NegativeEmotive": "[1.]",
"PositiveSentiment": "[0.598]",
"NegativeSentiment": "[-0.6879.]",
"FinalValue": "[1.4561]"
}

performance loop while get data from gridFS

I'm using pymongo to get the data from gridFS, the loop while getting this data is really slow.
Is it possible to avoid that loop, or is any way to do that faster??
from pymongo import MongoClient
from pprint import pprint
import bson
from gridfs import GridFS
import json
import pandas as pd
client = MongoClient()
client.database_names()
db = client['MC']
fs = GridFS(db, collection="MC")
db.collection_names(include_system_collections=False)
collectionFiles = db['MC.files']
collectionChunk = db['MC.chunks']
files = db['MC.files'].find({"metadata.Feature0": "00011"})
for n in files:
file_id = n['_id']
chunks = db['MotorCalculo.chunks'].find({"files_id": file_id})
bsondData = (fs.get(file_id).read())
decData = bsondData.decode()
jsonData = json.loads(decData)
F1 = jsonData['Feature1']
F2 = jsonData['Feature2']
If you have enough RAM, it should be faster to access file groups and not make as many calls to mongo.
You can try something like this:
batch_file_id = ['#1', '#2', '#3', '#4']
chunks = db['MotorCalculo.chunks'].find('{\"files_id\" : {\"$in\":[{\"$oid\":\"' + '\"}, {\"$oid\":\"'.join(batch_file_id) + '\"}]}}')
...
batch_file_id
Out[1]: ['#1', '#2', '#3', '#4']
'{\"files_id\" : {\"$in\":[{\"$oid\":\"' + '\"}, {\"$oid\":\"'.join(batch_file_id) + '\"}]}}'
Out[2]: '{"files_id" : {"$in":[{"$oid":"#1"}, {"$oid":"#2"}, {"$oid":"#3"}, {"$oid":"#4"}]}}'
Regards!!

Fiji Jython: Macro.getOptions() returns error "'Macro' is not defined"

I'm trying to pass arguments from a macro to a plugin being run in the macro, but the plugin is returning an error when I try to retrieve the string argument being passed. I believe this means I need to import a class, but everything I've tried to far hasn't worked. Here's the code:
from ij import IJ, ImagePlus, WindowManager, ImageStack
from ij.process import FloatProcessor, ImageProcessor, ByteProcessor
from ij.gui import ProgressBar, PointRoi
from ij.measure import ResultsTable
import ij
import ij.macro.Interpreter
import java.util.ArrayList as ArrayList
#stackOpen = IJ.openImage("/home/srammie/Pictures/Chain_Demo1/chainStack1.tif")
imageOpen = WindowManager.getCurrentImage()
imageOpenTitle = imageOpen.getTitle()
imageOpenTitle = imageOpenTitle.split("-")
amplitude = imageOpenTitle[0]
#imageOpen = IJ.openImage("/home/srammie/Pictures/chain_slap_eval1/summaryImage1.tif")
summaryImage = imageOpen.getProcessor()
print amplitude + " image being analyzed!"
dupImageOpen = imageOpen.createImagePlus()
dupSummaryImage = imageOpen.getProcessor().duplicate()
dupSummaryImageTitle = amplitude + "_resultsImage"
dupImageOpen.setProcessor(dupSummaryImageTitle, dupSummaryImage)
lowerBoundArray = ArrayList()
lowerBoundArrayRight = ArrayList()
lowerBoundArrayLeft = ArrayList()
upperBoundArray = ArrayList()
upperBoundArrayRight = ArrayList()
upperBoundArrayLeft = ArrayList()
deltaArray = ArrayList()
deltaArrayRight = ArrayList()
deltaArrayLeft = ArrayList()
largestDelta = 0
smallestDelta = 100
#print "Break"
pRoi = ArrayList()
arguments = Macro.getOptions()
arg = arguments.split(" ")
for i in range(0, len(arg)-1):
argString = arg[i].split("=")
pRoi.add(argString[1])
This script was working for me the first time it was created, but after restarting Fiji, the script started returning the error
NameError: name 'Macro' is not defined.
Any suggestions? Thanks!
Edit:
Restarting Fiji again seems to have made this problem go away.
You did not import the Macro class. I changed the imports to:
from ij import IJ, ImagePlus, WindowManager, ImageStack, Macro
from ij.process import FloatProcessor, ImageProcessor, ByteProcessor
from ij.gui import ProgressBar, PointRoi
from ij.measure import ResultsTable
from ij.macro import Interpreter
from java.util import ArrayList

Copy Feature Classes in a String

I am trying to copy only feature classes I specify rather than copying them all using a if loop. I tried the below and it executes, but does not copy the files.
import arcpy
import os
arcpy.env.workspace = r'c:\arcgis\ArcTutor\ModelBuilder\GTKModelbuilder\Data\RFDA Shapefiles'
outWorkspace = r'C:\Output'
fcList = arcpy.ListFeatureClasses()
for shapefile in fcList:
if fcList == ('BedfordCalls.shp','ColleyvilleCalls.shp','HurstCalls.shp','KellersCalls.shp'):
outFeatureClass = os.path.join(outWorkspace,shapefile.strip(".shp"))
arcpy.CopyFeatures_management(shapefile, outFeatureClass)
import arcpy
import os
arcpy.env.workspace = r'c:\Shapefile'
outWorkspace = r'C:\Output'
fcList = arcpy.ListFeatureClasses()
for shapefile in fcList:
if'BedfordCalls.shp'or'ColleyvilleCalls.shp'or'HurstCalls.shp'or'KellersCalls.shp' in fcList:
outFeatureClass = os.path.join(outWorkspace,shapefile.strip(".shp"))
arcpy.CopyFeatures_management(shapefile, outFeatureClass)
The below should do what you want more effectively (noting comments on earlier answer):
import arcpy
import os
arcpy.env.workspace = r'c:\Shapefile'
outWorkspace = r'C:\Output'
fcList = ['BedfordCalls.shp', 'ColleyvilleCalls.shp', 'HurstCalls.shp', 'KellersCalls.shp']
for shapefile in fcList:
outFeatureClass = os.path.join(outWorkspace, shapefile)
arcpy.CopyFeatures_management(shapefile, outFeatureClass)