Connecting node using walker - jaseci

Can we connect nodes like that inside of root
walker create_graph{
has first_node = "";
has nd;
root{
nd = spawn node::soc(imprint={"list_imprint": bienc[intent.str], "name": intent.str});
// first_node = jac:uuid*******
// nd = jac:uuid******
first_node +[intent_transition(intent=intent.str)]+>nd;
}
error at the line: first_node +[intent_transition(intent=intent.str)]+>nd;
"errors": [
"conv_walkers.jac:create_graph - line 71, col 8 - rule atom - Incompatible type for object jac:uuid:79428582-055c-4f06-8867-747c2abf18cf - str, expecting [<class 'jaseci.graph.node.Node'>, <class 'jaseci.jac.jac_set.JacSet'>]",
"conv_walkers.jac:create_graph - line 71, col 0 - rule connect - 'str' object has no attribute 'obj_list'"
]

Related

How to do Semantic segmentation with detectron2

I'm using Detectron2 to do instance segmentation as in the tutorial. Below is the code:
from detectron2.config import CfgNode
import detectron2.data.transforms as T
from detectron2.data import build_detection_train_loader, DatasetMapper
os.makedirs(cfg.OUTPUT_DIR, exist_ok=True)
transform_list = [
# T.Resize(shape=(200,300)),
T.RandomRotation(angle=90.0),
# T.RandomContrast(intensity_min=0.75, intensity_max=1.25),
# T.RandomBrightness(intensity_min=0.75, intensity_max=1.25),
# T.RandomSaturation(intensity_min=0.75, intensity_max=1.25),
# T.RandomLighting(scale=0.1),
T.RandomFlip(),
# T.RandomCrop(crop_type="absolute", crop_size=(180, 270))
]
# custom_mapper = get_custom_mapper(transfrom_list)
custom_mapper = DatasetMapper(
cfg,
is_train=True,
augmentations=transform_list,
use_instance_mask=True,
instance_mask_format="bitmask",
)
class CustomTrainer(DefaultTrainer):
#classmethod
def build_test_loader(cls, cfg: CfgNode, dataset_name):
return build_detection_test_loader(cfg, dataset_name, mapper=custom_mapper)
#classmethod
def build_train_loader(cls, cfg: CfgNode):
return build_detection_train_loader(cfg, mapper=custom_mapper)
cfg.INPUT.MASK_FORMAT = 'bitmask'
cfg.DATALOADER.FILTER_EMPTY_ANNOTATIONS = False
trainer = CustomTrainer(cfg)
# trainer = DefaultTrainer(cfg)
# trainer.resume_or_load(resume=False)
# trainer.train()
However, in this case I don't care about instances and more like I want to do semantic segmentation but there is no tutorial or examples to do that nor I'm seeing a semantic model I can start with. Misc/semantic_R_50_FPN_1x.yaml throws an error saying there is no pretrained model available.
So instead I'm trying to use the SemSegEvaluator instead of COCO evaluator to give me metrics around semantic rather than instances. Below is the code:
from detectron2.evaluation import COCOEvaluator, inference_on_dataset, SemSegEvaluator
from detectron2.data import build_detection_test_loader
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.4
# evaluator = COCOEvaluator(val_dataset_name, output_dir=os.path.join(cfg.OUTPUT_DIR, 'val'), use_fast_impl=False, tasks=['segm'])
evaluator = SemSegEvaluator(val_dataset_name, output_dir=os.path.join(cfg.OUTPUT_DIR, 'val'))
val_loader = build_detection_test_loader(cfg, val_dataset_name)
eval_result = inference_on_dataset(predictor.model, val_loader, evaluator)
print(eval_result)
However, this is failing with the following error:
[12/20 16:29:02 d2.data.datasets.coco]: Loaded 50 imagesss abdul in COCO format from /content/gdrive/MyDrive/SolarDetection/datasets/train8//val/labels.json
---------------------------------------------------------------------------
KeyError Traceback (most recent call last)
<ipython-input-10-61bd5aaec8ea> in <module>
3 cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = 0.4
4 # evaluator = COCOEvaluator(val_dataset_name, output_dir=os.path.join(cfg.OUTPUT_DIR, 'val'), use_fast_impl=False, tasks=['segm'])
----> 5 evaluator = SemSegEvaluator(val_dataset_name, output_dir=os.path.join(cfg.OUTPUT_DIR, 'val'))
6 val_loader = build_detection_test_loader(cfg, val_dataset_name)
7 # ipdb.set_trace(context=6)
1 frames
/content/gdrive/MyDrive/repos/detectron2/detectron2/evaluation/sem_seg_evaluation.py in <dictcomp>(.0)
69
70 self.input_file_to_gt_file = {
---> 71 dataset_record["file_name"]: dataset_record["sem_seg_file_name"]
72 for dataset_record in DatasetCatalog.get(dataset_name)
73 }
KeyError: 'sem_seg_file_name'
Any idea or hint how I can setup and use the SemSegEvaluator?

Running ApiSpec with Flask and Swagger UI

I am trying to render my API's in Swagger. I am using ApiSpec library to generate the Open Api Spec, which then I trying to add into my Swagger UI. I am trying to use MethodView available in Flask with the following code below.
from flask.views import MethodView
from flask import Blueprint, after_this_request, make_response
test_data = Blueprint('test', __name__, url_prefix='/testdata')
class TestDataApi(MethodView):
def get():
"""Get all TestData.
---
description: Get a random data
security:
- ApiKeyAuth: []
responses:
200:
description: Return all the TestData
content:
application/json:
schema: TestDataSchema
headers:
- $ref: '#/components/headers/X-Total-Items'
- $ref: '#/components/headers/X-Total-Pages'
"""
data = TestData.query.all()
response_data = test_schema.dump(data)
resp = make_response(json.dumps(response_data), 200)
return resp
This is my app.py where I am trying to register swagger and corresponding views:
api = Blueprint('api', __name__, url_prefix="/api/v0")
spec = APISpec(
title='Test Backend',
version='v1',
openapi_version='3.0.2',
plugins=[MarshmallowPlugin(), FlaskPlugin()],
)
api.register_blueprint(test_data)
app.register_blueprint(api)
test_view = TestDataApi.as_view('test_api')
app.add_url_rule('/api/v0/testdata/', view_func=test_view, methods=['GET',])
spec.components.schema("TestData", schema=TestDataSchema)
spec.path(view=test_view, operations=dict(get={}))
SWAGGER_URL = '/api/v0/docs'
API_URL = 'swagger.json'
swaggerui_blueprint = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config={
'app_name': "Backend"
})
app.register_blueprint(swaggerui_blueprint)
But I keep hitting this below error and I am not able to deduce how to fix it.
File "/home/hh/Projects/testdata/src/goodbytz_app.py", line 29, in <module>
app = create_app()
File "/home/hh/Projects/testdata/src/app.py", line 100, in create_app
spec.path(view=additives_view, operations=dict(get={}))
File "/home/hh/Projects/testdata/test_poc/lib/python3.10/site-packages/apispec/core.py", line 516, in path
plugin.operation_helper(path=path, operations=operations, **kwargs)
File "/home/hh/Projects/testdata/test_poc/lib/python3.10/site-packages/apispec/ext/marshmallow/__init__.py", line 218, in operation_helper
self.resolver.resolve_operations(operations)
File "/home/hh/Projects/testdata/test_poc/lib/python3.10/site-packages/apispec/ext/marshmallow/schema_resolver.py", line 34, in resolve_operations
self.resolve_response(response)
File "/home/hh/Projects/testdata/test_poc/lib/python3.10/site-packages/apispec/ext/marshmallow/schema_resolver.py", line 183, in resolve_response
if "headers" in response:
TypeError: argument of type 'NoneType' is not iterable
can try this one, more simple and its automatically generate openapi/swagger https://pypi.org/project/flask-toolkits/

Python multiprocessing, can't pickle thread.lock (pymongo.Cursor)

First, let me assure you I read all the relevant answers and they don't work for me.
I am using multiprocessing Pool to parallelize my data creation. I am using Mongodb 5.0 and pymongo client.
As you can see I am initializing the mongo client in the worker as suggested by the available answers but still I get a :
TypeError: cannot pickle '_thread.lock' object
Exception ignored in: <function CommandCursor.__del__ at 0x7f96f6fff160>
Is there a way I can use multiprocessing with pymongo.Cursor ??
Any help will be appreciated
This is the function that calls the Pool
def get_all_valid_events(
event_criteria:str,
all_listings:List[str],
earnings:List[Dict[str,Any]],
days_around_earnings=0,
debug=False,
poolsize=10,
chunk_size=100,
lookback=30,
lookahead = 0
):
start = time.perf_counter()
listings = Manager().list(all_listings.copy())
valid_events = []
if debug:
for i in range(ceil(len(listings)/chunk_size)):
valid_events += get_valid_event_dates_by_listing(event_criteria,listings[i*chunk_size:(i+1)*chunk_size] , earnings, days_around_earnings,debug)
else:
payload = list()
for i in range(ceil(len(listings)/chunk_size)):
payload.append(
[
event_criteria,
listings[i*chunk_size:(i+1)*chunk_size],
earnings,
days_around_earnings,
debug,
lookback,
lookahead
]
)
with ThreadPool(poolsize) as pool:
valid_events = pool.starmap(get_valid_event_dates_by_listing, payload)
print(f"getting all valid true events took {time.perf_counter() - start} sec")
return valid_events
And this is the worker function:
def get_valid_event_dates_by_listing(
event_criteria:str,
listings:List[str],
earnings_list,
days_around_earnings=0,
debug=False,
lookback=30,
lookahead=0
) -> List[Tuple[Tuple[str, datetime], int]]:
#TODO: generalize event filter
start = time.perf_counter()
client = MongoClient()
db = client['stock_signals']
cursor_candles_by_listing = db.candles.find(
{'listing': {'$in': listings}},
{'_id':0, 'listing':1, 'date':1,'position':1, 'PD_BBANDS_6_lower':1, 'close':1, 'PD_BBANDS_6_upper':1}
)
candles = list(cursor_candles_by_listing)
df = pd.DataFrame(candles).dropna()
minimum_position_dict = dict(df.groupby('listing').min()['position']) # We need the minimum position by listing to filter only events that have lookback
# Filter only the dates that satisfy the criteria
lte_previous_bb_6_lower = df['close'] <= df[f"{event_criteria}_lower"].shift()
gte_previous_bb_6_upper = df['close'] >= df[f"{event_criteria}_upper"].shift()
potential_true_events_df = df[lte_previous_bb_6_lower | gte_previous_bb_6_upper]
potential_false_events_df = df.drop(potential_true_events_df.index)
potential_true_event_dates = potential_true_events_df[['listing', 'date', 'position']].values
actual_true_event_dates = earning_helpers.filter_event_dates_by_earnings_and_position(potential_true_event_dates, earnings_list, minimum_position_dict ,days_around_earning=days_around_earnings, lookback=lookback)
true_event_dates = [((event_date[0], event_date[1], event_date[2]), 1) for event_date in actual_true_event_dates]
potential_false_event_dates = potential_false_events_df[['listing', 'date', 'position']].values
actual_false_event_dates = _random_false_events_from_listing_df(potential_false_event_dates, len(actual_true_event_dates), earnings_list, minimum_position_dict, days_around_earnings,lookback)
false_events_dates = [((event_date[0], event_date[1], event_date[2]), 0) for event_date in actual_false_event_dates]
all_event_dates = true_event_dates + false_events_dates
shuffle(all_event_dates)
print(f"getting a true sequence for listing took {time.perf_counter() - start} sec")
return all_event_dates
And this is my main
from utils import event_helpers, earning_helpers
from utils.queries import get_candle_listing
if __name__ == "__main__":
all_listings = get_candle_listing.get_listings()
earnigns = earning_helpers.get_all_earnings_dates()
res = event_helpers.get_all_valid_events('PD_BBANDS_6', all_listings, earnigns, 2, chunk_size=100)
Full Stack Trace
File "test_multiprocess.py", line 8, in <module>
res = event_helpers.get_all_valid_events('PD_BBANDS_6', all_listings, earnigns, 2, chunk_size=100)
File "/media/data/projects/ml/signal_platform/utils/event_helpers.py", line 53, in get_all_valid_events
valid_events = pool.starmap(get_valid_event_dates_by_listing, payload)
File "/home/froy001/.asdf/installs/python/3.8.12/lib/python3.8/multiprocessing/pool.py", line 372, in starmap
return self._map_async(func, iterable, starmapstar, chunksize).get()
File "/home/froy001/.asdf/installs/python/3.8.12/lib/python3.8/multiprocessing/pool.py", line 771, in get
raise self._value
File "/home/froy001/.asdf/installs/python/3.8.12/lib/python3.8/multiprocessing/pool.py", line 537, in _handle_tasks
put(task)
File "/home/froy001/.asdf/installs/python/3.8.12/lib/python3.8/multiprocessing/connection.py", line 206, in send
self._send_bytes(_ForkingPickler.dumps(obj))
File "/home/froy001/.asdf/installs/python/3.8.12/lib/python3.8/multiprocessing/reduction.py", line 51, in dumps
cls(buf, protocol).dump(obj)
TypeError: cannot pickle '_thread.lock' object
Exception ignored in: <function CommandCursor.__del__ at 0x7f46e91e21f0>
Traceback (most recent call last):
File "/home/froy001/.cache/pypoetry/virtualenvs/signal-platform-31MTNyCe-py3.8/lib/python3.8/site-packages/pymongo/command_cursor.py", line 68, in __del__
File "/home/froy001/.cache/pypoetry/virtualenvs/signal-platform-31MTNyCe-py3.8/lib/python3.8/site-packages/pymongo/command_cursor.py", line 83, in __die
File "/home/froy001/.cache/pypoetry/virtualenvs/signal-platform-31MTNyCe-py3.8/lib/python3.8/site-packages/pymongo/mongo_client.py", line 1696, in _cleanup_cursor
File "/home/froy001/.cache/pypoetry/virtualenvs/signal-platform-31MTNyCe-py3.8/lib/python3.8/site-packages/pymongo/client_session.py", line 466, in _end_session
File "/home/froy001/.cache/pypoetry/virtualenvs/signal-platform-31MTNyCe-py3.8/lib/python3.8/site-packages/pymongo/client_session.py", line 871, in in_transaction
File "/home/froy001/.cache/pypoetry/virtualenvs/signal-platform-31MTNyCe-py3.8/lib/python3.8/site-packages/pymongo/client_session.py", line 362, in active
AttributeError: 'NoneType' object has no attribute 'STARTING'
Update: 01-23
I tried using the multiprocess library using dill but it didn't help

TypeError: unsupported operand type(s) for +: 'int' and 'RowProxy'

The purpose of this view is to extract and return into JSON format after reading from goodread.com and my database by some calculation.
I tried so many ways to render the API but still no hope
I tried changing user input oneIsbn with 'oneIsbn' while querying in each listed SQL queries. what I got on my browser is just like this
{
"Error": "Invalid ISBN 0380795272"
}
My code snippet
#app.route("/api/<oneIsbn>", methods=["GET", "POST"])
#login_required
def api(oneIsbn):
"""Returns in JSON format for a single Book"""
if request.method == "GET":
check = db.execute("SELECT * FROM books WHERE isbn= :isbn",
{"isbn": oneIsbn}).fetchone()
if check is None:
return jsonify({"Error": f"Invalid ISBN {oneIsbn}"}), 405
else:
res = requests.get(
"https://www.goodreads.com/book/review_counts.json",
params={
"key": "x9fJg",
"isbns": oneIsbn})
if res.status_code != 200:
raise Exception("ERROR: API request unsuccessful.")
else:
data = res.json()
y = data["books"][0]["work_reviews_count"]
r = data["books"][0]["average_rating"]
isbn = db.execute("SELECT isbn FROM books WHERE isbn = :isbn",
{"isbn": oneIsbn}).fetchone()
title = db.execute("SELECT title FROM books WHERE isbn = :isbn",
{"isbn": oneIsbn}).fetchone()
author = db.execute("SELECT author FROM books WHERE isbn = :isbn",
{"isbn": oneIsbn}).fetchone()
year = db.execute("SELECT year FROM books WHERE isbn = :isbn",
{"isbn": oneIsbn}).fetchone()
x = db.execute("SELECT COUNT(reviews) FROM reviews WHERE isbn = :isbn",
{"isbn": 'oneIsbn'}).fetchone()
z = db.execute("SELECT rating FROM reviews WHERE isbn = :isbn",
{"isbn": oneIsbn}).fetchone()
rev = int(y)
revCount = int(x.count)
bothReviewValue = sum((revCount,rev))
# listRate = float(z)
rat = float(r)
bothRatingValue = sum([z,rat]) / 2
return jsonify(
ISBN=isbn,
TITLE=title,
AUTHOR=author,
YEAR=year,
REVIEWS=bothReviewValue,
RATING=bothRatingValue
), 422
TRACEBACK
TypeError
TypeError: unsupported operand type(s) for +: 'int' and 'RowProxy'
Traceback (most recent call last)
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 2464, in __call__
return self.wsgi_app(environ, start_response)
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 2450, in wsgi_app
response = self.handle_exception(e)
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 1867, in handle_exception
reraise(exc_type, exc_value, tb)
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\_compat.py", line 39, in reraise
raise value
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 2447, in wsgi_app
response = self.full_dispatch_request()
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 1952, in full_dispatch_request
rv = self.handle_user_exception(e)
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 1821, in handle_user_exception
reraise(exc_type, exc_value, tb)
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\_compat.py", line 39, in reraise
Open an interactive python shell in this frameraise value
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 1950, in full_dispatch_request
rv = self.dispatch_request()
File "C:\Users\Beacon\AppData\Local\Programs\Python\Python38\Lib\site-packages\flask\app.py", line 1936, in dispatch_request
return self.view_functions[rule.endpoint](**req.view_args)
File "C:\Users\Beacon\Desktop\THINKFUL DOCS\PYTHON\PYTHON WORKSPACE\project1\application.py", line 39, in wrapped_view
return view(**kwargs)
File "C:\Users\Beacon\Desktop\THINKFUL DOCS\PYTHON\PYTHON WORKSPACE\project1\application.py", line 233, in api
bothRatingValue = sum([z,rat]) / 2
TypeError: unsupported operand type(s) for +: 'int' and 'RowProxy'
The debugger caught an exception in your WSGI application. You can now look at the traceback which led to the error.
To switch between the interactive traceback and the plaintext one, you can click on the "Traceback" headline. From the text traceback you can also create a paste of it. For code execution mouse-over the frame you want to debug and click on the console icon on the right side.
You can execute arbitrary Python code in the stack frames and there are some extra helpers available for introspection:
dump() shows all variables in the frame
dump(obj) dumps all that's known about the object
Brought to you by DON'T PANIC, your friendly Werkzeug powered traceback interpreter.
This method I used really works great. I already missed something like
The sum method should be put in tuple not in list and the pulled string rate should cast to float.
rev = int(y)
revCount = int(x.count)
bothReviewValue = sum((revCount,rev))
listRate = float(z)
rat = float(r)
bothRatingValue = sum((listRate,rat)) / 2

running file diff on two files on Mac

Im comparing to files, but my understanding is that + signifies addition and - symbols deletion. The new file has a typo:
if (KEY_STATUS.spacr) {
The why is it represented by -. It should be +, right ? When I run diff -u game_new.js game_old.js:
--- game_new.js 2018-06-12 02:03:32.000000000 -0700
+++ game_old.js 2018-06-12 02:03:22.000000000 -0700
## -4,9 +4,9 ##
//
KEY_CODES = {
- 13: 'enter',
32: 'space',
37: 'left',
+ 38: 'up',
39: 'right',
40: 'down',
70: 'f',
## -392,7 +392,7 ##
this.vel.rot = 0;
}
- if (KEY_STATUS.spacr) {
+ if (KEY_STATUS.up) {
var rad = ((this.rot-90) * Math.PI)/180;
this.acc.x = 0.5 * Math.cos(rad);
this.acc.y = 0.5 * Math.sin(rad);
## -406,7 +406,7 ##
if (this.delayBeforeBullet > 0) {
this.delayBeforeBullet -= delta;
}
- if (KEY_STATUS.enter) {
+ if (KEY_STATUS.space) {
if (this.delayBeforeBullet <= 0) {
this.delayBeforeBullet = 10;
for (var i = 0; i < this.bullets.length; i++) {
## -919,7 +919,7 ##
waiting: function () {
Text.renderText(ipad ? 'Touch Sreen to Start' : 'Press Space to Start', 36, Game.canvasWidth/2 - 270, Game.canvasHeight/2);
if (KEY_STATUS.space || window.gameStart) {
- KEY_STATUS.space = false; // hack so we don't move right away
+ KEY_STATUS.space = false; // hack so we don't shoot right away
window.gameStart = false;
this.state = 'start';
}
I believe that when you run:
diff -u game_new.js game_old.js
The changes coming from the file on the left are interpreted as being the source, and marked with a minus, while the changes coming from the file on the right are treated as the destination, and marked with a plus.
If you want the - and + labels to appear as you want, then run diff with the files in the reverse order:
diff -u game_old.js game_new.js