Copy Feature Classes in a String - copy

I am trying to copy only feature classes I specify rather than copying them all using a if loop. I tried the below and it executes, but does not copy the files.
import arcpy
import os
arcpy.env.workspace = r'c:\arcgis\ArcTutor\ModelBuilder\GTKModelbuilder\Data\RFDA Shapefiles'
outWorkspace = r'C:\Output'
fcList = arcpy.ListFeatureClasses()
for shapefile in fcList:
if fcList == ('BedfordCalls.shp','ColleyvilleCalls.shp','HurstCalls.shp','KellersCalls.shp'):
outFeatureClass = os.path.join(outWorkspace,shapefile.strip(".shp"))
arcpy.CopyFeatures_management(shapefile, outFeatureClass)

import arcpy
import os
arcpy.env.workspace = r'c:\Shapefile'
outWorkspace = r'C:\Output'
fcList = arcpy.ListFeatureClasses()
for shapefile in fcList:
if'BedfordCalls.shp'or'ColleyvilleCalls.shp'or'HurstCalls.shp'or'KellersCalls.shp' in fcList:
outFeatureClass = os.path.join(outWorkspace,shapefile.strip(".shp"))
arcpy.CopyFeatures_management(shapefile, outFeatureClass)

The below should do what you want more effectively (noting comments on earlier answer):
import arcpy
import os
arcpy.env.workspace = r'c:\Shapefile'
outWorkspace = r'C:\Output'
fcList = ['BedfordCalls.shp', 'ColleyvilleCalls.shp', 'HurstCalls.shp', 'KellersCalls.shp']
for shapefile in fcList:
outFeatureClass = os.path.join(outWorkspace, shapefile)
arcpy.CopyFeatures_management(shapefile, outFeatureClass)

Related

Bokeh callback function with image editing not working with pyscript

I am using pyscript to take an image and process it. I am using a bokeh widget to make changes to the image through callback function. The callback function works but stops when I use Pillow to convert the image to greyscale.
Imports:
import numpy as np
from PIL import Image
import js
from js import document, console, Uint8Array, window, File, Bokeh, JSON, localStorage
from pyodide import create_proxy
import asyncio
import io
import json
from bokeh import __version__
from bokeh.document import Document
from bokeh.embed.util import OutputDocumentFor, standalone_docs_json_and_render_items
from bokeh.models import Slider, Div
from bokeh.layouts import Row
from bokeh.protocol.messages.patch_doc import process_document_events
def callback(attr, old, new):
div.text = f'Symbol is: {new}'
first_item = localStorage.getItem('key')
print(first_item)
my_image = imageToBinary(first_item)
print(my_image)
rs_img = convertBintoRS(my_image,8)
rs_img = encodeRS(rs_img, 5, 6)
rs_img = seperate_img(rs_img,5+6,5)
rs_img = RStoBinary(rs_img,8)
rs_img = binaryToImage(rs_img)
display_image(rs_img,"reed_s")
p.on_change('value', callback)
row = Row(children=[p, div])
def imageToBinary(img):
print('enter imageToBinary')
img = img.convert('L')
print('convert to greyscale')
print(img)
img = np.array(img)
print('convert numpy')
x,y = np.shape(img)
print(x,y)
for i in range(0,x):
for j in range(0,y):
if img[i][j] < 255/2:
img[i][j] = 0
else:
img[i][j] = 1
print('success')
return img
I am also including this which is some code that is relevant to the bokeh widget.
def doc_json(model, target):
with OutputDocumentFor([model]) as doc:
doc.title = ""
docs_json, _ = standalone_docs_json_and_render_items(
[model], suppress_callback_warning=False
)
doc_json = list(docs_json.values())[0]
root_id = doc_json['roots']['root_ids'][0]
return doc, json.dumps(dict(
target_id = target,
root_id = root_id,
doc = doc_json,
version = __version__,
))
def _link_docs(pydoc, jsdoc):
def jssync(event):
if getattr(event, 'setter_id', None) is not None:
return
events = [event]
json_patch = jsdoc.create_json_patch_string(pyodide.ffi.to_js(events))
pydoc.apply_json_patch(json.loads(json_patch))
jsdoc.on_change(pyodide.ffi.create_proxy(jssync), pyodide.ffi.to_js(False))
def pysync(event):
json_patch, buffers = process_document_events([event], use_buffers=True)
buffer_map = {}
for (ref, buffer) in buffers:
buffer_map[ref['id']] = buffer
jsdoc.apply_json_patch(JSON.parse(json_patch), pyodide.ffi.to_js(buffer_map), setter_id='js')
pydoc.on_change(pysync)
async def show(plot, target):
pydoc, model_json = doc_json(plot, target)
views = await Bokeh.embed.embed_item(JSON.parse(model_json))
jsdoc = views[0].model.document
_link_docs(pydoc, jsdoc)
asyncio.ensure_future(show(row, 'myplot'))
The callback function stops within imageToBinary at the print('enter imageToBinary')
I know the function works outside the callback function.
I have printed the item going into localStorage and out of it to check it is the same but type() wouldn't work and gave an error.

AttributeError: 'NoneType' object has no attribute 'impl'

After running my program, I am getting a Output, but I also get this error message.
Exception ignored in: <function Model.__del__ at 0x7f02ba33b430>
Traceback (most recent call last):
File "/usr/local/lib/python3.8/dist-packages/deepspeech/__init__.py", line 43, in __del__
AttributeError: 'NoneType' object has no attribute 'impl'
Here is the code - Here I am trying to convert an wv audio file into text using deepspeech library.
from deepspeech import Model
import numpy as np
import os
import wave
import json
from IPython.display import Audio
from IPython.display import clear_output
model_file_path = 'deepspeech-0.8.2-models.pbmm'
lm_file_path = 'deepspeech-0.8.2-models.scorer'
beam_width = 100
lm_alpha = 0.93
lm_beta = 1.18
model = Model(model_file_path)
model.enableExternalScorer(lm_file_path)
model.setScorerAlphaBeta(lm_alpha, lm_beta)
model.setBeamWidth(beam_width)
def read_wav_file(filename):
with wave.open(filename, 'rb') as w:
rate = w.getframerate()
frames = w.getnframes()
buffer = w.readframes(frames)
return buffer, rate
def transcribe(audio_file):
buffer, rate = read_wav_file(audio_file)
data16 = np.frombuffer(buffer, dtype=np.int16)
return model.stt(data16)
print(transcribe('speech.wav'))
Importing IPython is causing the issue, try running your code without it and it should work.
from deepspeech import Model
import numpy as np
import os
import wave
import json
model_file_path = 'deepspeech-0.8.2-models.pbmm'
lm_file_path = 'deepspeech-0.8.2-models.scorer'
beam_width = 100
lm_alpha = 0.93
lm_beta = 1.18
model = Model(model_file_path)
model.enableExternalScorer(lm_file_path)
model.setScorerAlphaBeta(lm_alpha, lm_beta)
model.setBeamWidth(beam_width)
def read_wav_file(filename):
with wave.open(filename, 'rb') as w:
rate = w.getframerate()
frames = w.getnframes()
buffer = w.readframes(frames)
return buffer, rate
def transcribe(audio_file):
buffer, rate = read_wav_file(audio_file)
data16 = np.frombuffer(buffer, dtype=np.int16)
return model.stt(data16)
print(transcribe('speech.wav'))

insert_many not working when adding one more case

I used the following code to insert tab0011.json into portal_db.acs:
from pymongo import MongoClient
import json
client = MongoClient()
db = client.portal_db
db.acs.drop()
acs = db.acs
data_acs = json.load(open('/vagrant/data/tab0011.json', 'r'))
result_acs = acs.insert_many(data_acs)
The code has stored the tab0011.json data correclty. However, I tried the following code to insert tab0011.json into portal_db.acs and tab0007.json into portal_db.tab0007. Both collections were created but with none inside, i.e., empty:
from pymongo import MongoClient
import json
client = MongoClient()
db = client.portal_db
db.acs.drop()
acs = db.acs
db.tab0007.drop()
tab0007 = db.tab0007
data_acs = json.load(open('/vagrant/data/tab0011.json', 'r'))
data_tab0007 = json.load(open('/vagrant/data/tab0007.json', 'r'))
result_acs = acs.insert_many(data_acs)
result_tab0007 = tab0007.insert_many(data_tab0007)
Not quite sure why.
If the file extension is .json I am able to read the data via the methods used in your code and insert them into collections in the same database. I can see the data that I used in both the respective collections
Maybe you can try doing it this way:
from pymongo import MongoClient
import json
client = MongoClient(host="localhost", port=27017)
db = client["portal_db"]
acs = db.get_collection("acs")
tab0007 = db.get_collection("tab0007")
db.drop_collection("acs")
db.drop_collection("tab0007")
data_acs = json.load(open('/vagrant/data/tab0011.json', 'r'))
data_tab0007 = json.load(open('/vagrant/data/tab0007.json', 'r'))
acs_inserts = acs.insert_many(data_acs)
tab_inserts = tab0007.insert_many(data_tab0007)
print(acs_insert.inserted_ids)
print(tab_inserts.inserted_ids)
The last two lines would print the ObjectIds of the Documents inserted.

performance loop while get data from gridFS

I'm using pymongo to get the data from gridFS, the loop while getting this data is really slow.
Is it possible to avoid that loop, or is any way to do that faster??
from pymongo import MongoClient
from pprint import pprint
import bson
from gridfs import GridFS
import json
import pandas as pd
client = MongoClient()
client.database_names()
db = client['MC']
fs = GridFS(db, collection="MC")
db.collection_names(include_system_collections=False)
collectionFiles = db['MC.files']
collectionChunk = db['MC.chunks']
files = db['MC.files'].find({"metadata.Feature0": "00011"})
for n in files:
file_id = n['_id']
chunks = db['MotorCalculo.chunks'].find({"files_id": file_id})
bsondData = (fs.get(file_id).read())
decData = bsondData.decode()
jsonData = json.loads(decData)
F1 = jsonData['Feature1']
F2 = jsonData['Feature2']
If you have enough RAM, it should be faster to access file groups and not make as many calls to mongo.
You can try something like this:
batch_file_id = ['#1', '#2', '#3', '#4']
chunks = db['MotorCalculo.chunks'].find('{\"files_id\" : {\"$in\":[{\"$oid\":\"' + '\"}, {\"$oid\":\"'.join(batch_file_id) + '\"}]}}')
...
batch_file_id
Out[1]: ['#1', '#2', '#3', '#4']
'{\"files_id\" : {\"$in\":[{\"$oid\":\"' + '\"}, {\"$oid\":\"'.join(batch_file_id) + '\"}]}}'
Out[2]: '{"files_id" : {"$in":[{"$oid":"#1"}, {"$oid":"#2"}, {"$oid":"#3"}, {"$oid":"#4"}]}}'
Regards!!

Fiji Jython: Macro.getOptions() returns error "'Macro' is not defined"

I'm trying to pass arguments from a macro to a plugin being run in the macro, but the plugin is returning an error when I try to retrieve the string argument being passed. I believe this means I need to import a class, but everything I've tried to far hasn't worked. Here's the code:
from ij import IJ, ImagePlus, WindowManager, ImageStack
from ij.process import FloatProcessor, ImageProcessor, ByteProcessor
from ij.gui import ProgressBar, PointRoi
from ij.measure import ResultsTable
import ij
import ij.macro.Interpreter
import java.util.ArrayList as ArrayList
#stackOpen = IJ.openImage("/home/srammie/Pictures/Chain_Demo1/chainStack1.tif")
imageOpen = WindowManager.getCurrentImage()
imageOpenTitle = imageOpen.getTitle()
imageOpenTitle = imageOpenTitle.split("-")
amplitude = imageOpenTitle[0]
#imageOpen = IJ.openImage("/home/srammie/Pictures/chain_slap_eval1/summaryImage1.tif")
summaryImage = imageOpen.getProcessor()
print amplitude + " image being analyzed!"
dupImageOpen = imageOpen.createImagePlus()
dupSummaryImage = imageOpen.getProcessor().duplicate()
dupSummaryImageTitle = amplitude + "_resultsImage"
dupImageOpen.setProcessor(dupSummaryImageTitle, dupSummaryImage)
lowerBoundArray = ArrayList()
lowerBoundArrayRight = ArrayList()
lowerBoundArrayLeft = ArrayList()
upperBoundArray = ArrayList()
upperBoundArrayRight = ArrayList()
upperBoundArrayLeft = ArrayList()
deltaArray = ArrayList()
deltaArrayRight = ArrayList()
deltaArrayLeft = ArrayList()
largestDelta = 0
smallestDelta = 100
#print "Break"
pRoi = ArrayList()
arguments = Macro.getOptions()
arg = arguments.split(" ")
for i in range(0, len(arg)-1):
argString = arg[i].split("=")
pRoi.add(argString[1])
This script was working for me the first time it was created, but after restarting Fiji, the script started returning the error
NameError: name 'Macro' is not defined.
Any suggestions? Thanks!
Edit:
Restarting Fiji again seems to have made this problem go away.
You did not import the Macro class. I changed the imports to:
from ij import IJ, ImagePlus, WindowManager, ImageStack, Macro
from ij.process import FloatProcessor, ImageProcessor, ByteProcessor
from ij.gui import ProgressBar, PointRoi
from ij.measure import ResultsTable
from ij.macro import Interpreter
from java.util import ArrayList