SQLAlchemy prevent “nesting” pattern - postgresql

I am now using SQLAlchemy(async/core), PostgreSQL and FastAPI.
I made a #contextlib.contextmanager.
#contextlib.contextmanager
def transaction_begin(self) -> typing.Generator:
if not self.engine_connection.in_transaction():
with self.engine_connection.begin():
yield self.engine_connection
else:
yield self.engine_connection
It worked fine, but today I encountered an AttributeError: __enter__. It works somewhere in FastAPI router functions, it doesn't somewhere:
Traceback (most recent call last):
File "/var/www/myproj/app/auth/routers.py", line 161, in get_refresh_token
await CRUDModel(conn).insert(stmt)
File "/var/www/myproj/app/core/crud.py", line 61, in insert
with self.transaction_begin():
File "/home/me/.pyenv/versions/3.10.5/lib/python3.10/contextlib.py", line 135, in __enter__
return next(self.gen)
File "/var/www/myproj/app/core/crud.py", line 26, in transaction_begin
with self.engine_connection.begin():
AttributeError: __enter__
To solve the issue, I might rewrite the contextlib like this because PostgreSQL always implicit begins:
#contextlib.contextmanager
def transaction_begin(self) -> typing.Generator:
# BEGIN (implicit) by PostgreSQL
yield self.engine_connection
Actually, my contextmanager code is from here :
https://docs.sqlalchemy.org/en/14/core/connections.html#migrating-from-the-nesting-pattern
I would like to know that it is okay to get engine_connection without BEGIN if I use PostgreSQL. Otherwise, do I have to explicit begin transaction if not in transaction?
Thank you.

Related

Odoo 16: Playing with translations and Boom ! No longer can access my custom model data in my server

After successfully adding a pot file to my new i18n folder in my local machine, as well as setting "translate=True" in a couple of fields in my carddecks module, and verifying that in localhost I could acess my model data, I decided to update my server.
But when I try to access my model data I get the following error:
LINE 1: ..."write_date", COALESCE("carddecks_card"."cardText"->>'pt_PT'...
^
HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
Anyone might know what may be causing this?
source code for the module can be found at https://github.com/diogocsc/carddecks
Full error:
Traceback (most recent call last):
File "/usr/lib/python3/dist-packages/odoo/http.py", line 1579, in _serve_db
return service_model.retrying(self._serve_ir_http, self.env)
File "/usr/lib/python3/dist-packages/odoo/service/model.py", line 134, in retrying
result = func()
File "/usr/lib/python3/dist-packages/odoo/http.py", line 1608, in _serve_ir_http
response = self.dispatcher.dispatch(rule.endpoint, args)
File "/usr/lib/python3/dist-packages/odoo/http.py", line 1805, in dispatch
result = self.request.registry['ir.http']._dispatch(endpoint)
File "/usr/lib/python3/dist-packages/odoo/addons/website/models/ir_http.py", line 235, in _dispatch
response = super()._dispatch(endpoint)
File "/usr/lib/python3/dist-packages/odoo/addons/base/models/ir_http.py", line 144, in _dispatch
result = endpoint(**request.params)
File "/usr/lib/python3/dist-packages/odoo/http.py", line 698, in route_wrapper
result = endpoint(self, *args, **params_ok)
File "/usr/lib/python3/dist-packages/odoo/addons/web/controllers/dataset.py", line 42, in call_kw
return self._call_kw(model, method, args, kwargs)
File "/usr/lib/python3/dist-packages/odoo/addons/web/controllers/dataset.py", line 33, in _call_kw
return call_kw(request.env[model], method, args, kwargs)
File "/usr/lib/python3/dist-packages/odoo/api.py", line 457, in call_kw
result = _call_kw_model(method, model, args, kwargs)
File "/usr/lib/python3/dist-packages/odoo/api.py", line 430, in _call_kw_model
result = method(recs, *args, **kwargs)
File "/usr/lib/python3/dist-packages/odoo/addons/web/models/models.py", line 62, in web_search_read
records = self.search_read(domain, fields, offset=offset, limit=limit, order=order)
File "/usr/lib/python3/dist-packages/odoo/models.py", line 4968, in search_read
result = records.read(fields, **read_kwargs)
File "/usr/lib/python3/dist-packages/odoo/models.py", line 2992, in read
self._read(stored_fields)
File "/usr/lib/python3/dist-packages/odoo/models.py", line 3235, in _read
cr.execute(query_str, params + [sub_ids])
File "/usr/lib/python3/dist-packages/odoo/sql_db.py", line 315, in execute
res = self._obj.execute(query, params)
psycopg2.errors.UndefinedFunction: operator does not exist: character varying ->> unknown
LINE 1: ..."write_date", COALESCE("carddecks_card"."cardText"->>'pt_PT'...
^
HINT: No operator matches the given name and argument types. You might need to add explicit type casts.
The above server error caused the following client error:
OwlError: The following error occurred in onWillStart: "Odoo Server Error"
at wrapError (https://www.relationalgames.com/web/assets/504-41b52e3/web.assets_common.min.js:1445:77)
at onWillStart (https://www.relationalgames.com/web/assets/504-41b52e3/web.assets_common.min.js:1451:117)
at useModel (https://www.relationalgames.com/web/assets/505-11285c6/web.assets_backend.min.js:4709:1)
at ListController.setup (https://www.relationalgames.com/web/assets/505-11285c6/web.assets_backend.min.js:4430:645)
at new ComponentNode (https://www.relationalgames.com/web/assets/504-41b52e3/web.assets_common.min.js:1407:136)
at https://www.relationalgames.com/web/assets/504-41b52e3/web.assets_common.min.js:1929:6
at View.slot1 (eval at compile (https://www.relationalgames.com/web/assets/504-41b52e3/web.assets_common.min.js:1892:370), <anonymous>:15:36)
at callSlot (https://www.relationalgames.com/web/assets/504-41b52e3/web.assets_common.min.js:1508:25)
at WithSearch.template (eval at compile (https://www.relationalgames.com/web/assets/504-41b52e3/web.assets_common.min.js:1892:370), <anonymous>:8:12)
at Fiber._render (https://www.relationalgames.com/web/assets/504-41b52e3/web.assets_common.min.js:1336:96)
Caused by: RPC_ERROR: Odoo Server Error
at makeErrorFromResponse (https://www.relationalgames.com/web/assets/505-11285c6/web.assets_backend.min.js:967:163)
at XMLHttpRequest.<anonymous> (https://www.relationalgames.com/web/assets/505-11285c6/web.assets_backend.min.js:974:13)
Found the answer for this. In my localhost I was updating my module. In my server i was just running docker-compose up. Not updating the module in the db.
The solution was going to settings -> apps search by cardecks and upgrade it.

Chaining an iterable task result to a group, then chaining the results of this group to a chord

Related: How to chain a Celery task that returns a list into a group? and Chain a celery task's results into a distributed group, but they lack a MWE, to my understanding do not fully cover my needs, and are maybe outdated.
I need to dynamically pass the output of a task to a parallel group of tasks, then pass the results of this group to a final task. I would like to make this a single "meta task".
Here's my attempt so far:
# chaintasks.py
import random
from typing import List, Iterable, Callable
from celery import Celery, signature, group, chord
app = Celery(
"chaintasks",
backend="redis://localhost:6379",
broker="pyamqp://guest#localhost//",
)
app.conf.update(task_track_started=True, result_persistent=True)
#app.task
def select_items(items: List[str]):
print("In select_items, received", items)
selection = tuple(set(random.choices(items, k=random.randint(1, len(items)))))
print("In select_items, sending", selection)
return selection
#app.task
def process_item(item: str):
print("In process_item, received", item)
return item + "_processed"
#app.task
def group_items(items: List[str]):
print("In group_items, received", items)
return ":".join(items)
#app.task
def dynamic_map_group(iterable: Iterable, callback: Callable):
# Map a callback over an iterator and return as a group
# Credit to https://stackoverflow.com/a/13569873/5902284
callback = signature(callback)
return group(callback.clone((arg,)) for arg in iterable).delay()
#app.task
def dynamic_map_chord(iterable: Iterable, callback: Callable):
callback = signature(callback)
return chord(callback.clone((arg,)) for arg in iterable).delay()
Now, to try this out, let's spin up a celery worker and use docker to for rabbitmq and redis
$ docker run -p 5672:5672 rabbitmq
$ docker run -p 6379:6379 redis
$ celery -A chaintasks:app worker -E
Now, let's try to illustrate what I need:
items = ["item1", "item2", "item3", "item4"]
print(select_items.s(items).apply_async().get())
This outputs: ['item3'], great.
meta_task1 = select_items.s(items) | dynamic_map_group.s(process_item.s())
meta_task1_result = meta_task1.apply_async().get()
print(meta_task1_result)
It gets trickier here: the output is [['b916f5d3-4367-46c5-896f-f2d2e2e59a00', None], [[['3f78d31b-e374-484e-b05b-40c7a2038081', None], None], [['bce50d49-466a-43e9-b6ad-1b78b973b50f', None], None]]].
I am not sure what the Nones, mean, but I guess the UUIDs represents the subtasks of my meta task. But how do I get their results from here?
I tried:
subtasks_ids = [i[0][0] for i in meta_task1_result[1]]
processed_items = [app.AsyncResult(i).get() for i in subtasks_ids]
but this just hangs. What am I doing wrong here? I'm expecting an output looking like ['processed_item1', 'processed_item3']
What about trying to chord the output of select_items to group_items?
meta_task2 = select_items.s(items) | dynamic_map_chord.s(group_items.s())
print(meta_task2.apply_async().get())
Ouch, this raises an AttributeError that I don't really understand.
Traceback (most recent call last):
File "chaintasks.py", line 70, in <module>
print(meta_task2.apply_async().get())
File "/site-packages/celery/result.py", line 223, in get
return self.backend.wait_for_pending(
File "/site-packages/celery/backends/asynchronous.py", line 201, in wait_for_pending
return result.maybe_throw(callback=callback, propagate=propagate)
File "/site-packages/celery/result.py", line 335, in maybe_throw
self.throw(value, self._to_remote_traceback(tb))
File "/site-packages/celery/result.py", line 328, in throw
self.on_ready.throw(*args, **kwargs)
File "/site-packages/vine/promises.py", line 234, in throw
reraise(type(exc), exc, tb)
File "/site-packages/vine/utils.py", line 30, in reraise
raise value
AttributeError: 'NoneType' object has no attribute 'clone'
Process finished with exit code 1
And finally, what I really am trying to do is something like:
ultimate_meta_task = (
select_items.s(items)
| dynamic_map_group.s(process_item.s())
| dynamic_map_chord.s(group_items.s())
)
print(ultimate_meta_task.apply_async().get())
but this leads to:
Traceback (most recent call last):
File "chaintasks.py", line 77, in <module>
print(ultimate_meta_task.apply_async().get())
File "/site-packages/celery/result.py", line 223, in get
return self.backend.wait_for_pending(
File "/site-packages/celery/backends/asynchronous.py", line 199, in wait_for_pending
for _ in self._wait_for_pending(result, **kwargs):
File "/site-packages/celery/backends/asynchronous.py", line 265, in _wait_for_pending
for _ in self.drain_events_until(
File "/site-packages/celery/backends/asynchronous.py", line 58, in drain_events_until
on_interval()
File "/site-packages/vine/promises.py", line 160, in __call__
return self.throw()
File "/site-packages/vine/promises.py", line 157, in __call__
retval = fun(*final_args, **final_kwargs)
File "/site-packages/celery/result.py", line 236, in _maybe_reraise_parent_error
node.maybe_throw()
File "/site-packages/celery/result.py", line 335, in maybe_throw
self.throw(value, self._to_remote_traceback(tb))
File "/site-packages/celery/result.py", line 328, in throw
self.on_ready.throw(*args, **kwargs)
File "/site-packages/vine/promises.py", line 234, in throw
reraise(type(exc), exc, tb)
File "/site-packages/vine/utils.py", line 30, in reraise
raise value
kombu.exceptions.EncodeError: TypeError('Object of type GroupResult is not JSON serializable')
Process finished with exit code 1
What I try to achieve here, is to get an output looking like 'processed_item1:processed_item3'. Is this even doable using celery? Any help here is appreciated.

download file from mongo gridfs with python

I have uploaded files to the mongo.But when I want to download from mongo by httpresonse on the web browser,that did not work.
Here is the views.py:
if filename is not None:
file_ = db.fs.files.find_one({
'filename':filename
})
file_id = file_['_id']
wrapper = fs.get(file_id).read()
response = StreamingHttpResponse(FileWrapper(wrapper),content_type=file_['contentType'])
response['Content-Disposition'] = 'attachment; filename=%s' % str(filename)
response['Content-Length'] = file_['length']
return response
I got this error:
Traceback (most recent call last):
File "/usr/lib/python2.7/wsgiref/handlers.py", line 86, in run
self.finish_response()
File "/usr/lib/python2.7/wsgiref/handlers.py", line 126, in finish_response
for data in self.result:
File "/usr/local/lib/python2.7/dist-packages/django/utils/six.py", line 473, in next
return type(self).__next__(self)
File "/usr/local/lib/python2.7/dist-packages/django/http/response.py", line 292, in __next__
return self.make_bytes(next(self._iterator))
File "/usr/lib/python2.7/wsgiref/util.py", line 30, in next
data = self.filelike.read(self.blksize)
AttributeError: 'str' object has no attribute 'read'
But when I change the StreamingHttpResponse to HttpResponse,the error is as follow:
[30/Jul/2014 17:29:43] "GET /download/cs101/ HTTP/1.1" 200 664
/usr/lib/python2.7/wsgiref/handlers.py:126: DeprecationWarning:
Creating streaming responses with `HttpResponse` is deprecated.
Use `StreamingHttpResponse`instead if you need the streaming behavior.
for data in self.result:
Traceback (most recent call last):
File "/usr/lib/python2.7/wsgiref/handlers.py", line 86, in run
self.finish_response()
File "/usr/lib/python2.7/wsgiref/handlers.py", line 126, in finish_response
for data in self.result:
File "/usr/local/lib/python2.7/dist-packages/django/utils/six.py", line 473, in next
return type(self).__next__(self)
File "/usr/local/lib/python2.7/dist-packages/django/http/response.py", line 292, in __next__
return self.make_bytes(next(self._iterator))
File "/usr/lib/python2.7/wsgiref/util.py", line 30, in next
data = self.filelike.read(self.blksize)
AttributeError: 'str' object has no attribute 'read'
Thanks in advance!
You're calling the read method in:
wrapper = fs.get(file_id).read()
So you're getting a str (assuming Python 2, if 3, you're getting bytes). FileWrapper needs file like object, which of course str is not one.
Try to use:
wrapper = fs.get(file_id)
This will return file like object.
OTOH, pymongo's .get() returns a GridOut instance, which already supports iteration, so why not try something like:
wrapper = fs.get(file_id)
response = StreamingHttpResponse(wrapper, content_type=file_['contentType'])

Issue with multiline TokenInputTransformer in IPython extension

I am trying to use a TokenInputTransformer within an IPython extension module, but it seems that there is something wrong with the standard implementation of token transformers with multiline input. Consider the following minimal extension:
from IPython.core.inputtransformer import TokenInputTransformer
#TokenInputTransformer.wrap
def test_transformer(tokens):
return tokens
def load_ipython_extension(ip):
for s in (ip.input_splitter, ip.input_transformer_manager):
s.python_line_transforms.extend([test_transformer()])
print "Test activated"
When I load the extension in IPython 1.1.0 I get a non-handled exception with multiline input:
In [1]: %load_ext test
Test activated
In [2]: abs(
...: 2
...: )
Traceback (most recent call last):
File "/Applications/anaconda/bin/ipython", line 6, in <module>
sys.exit(start_ipython())
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/__init__.py", line 118, in start_ipython
return launch_new_instance(argv=argv, **kwargs)
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/config/application.py", line 545, in launch_instance
app.start()
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/terminal/ipapp.py", line 362, in start
self.shell.mainloop()
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/terminal/interactiveshell.py", line 436, in mainloop
self.interact(display_banner=display_banner)
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/terminal/interactiveshell.py", line 548, in interact
self.input_splitter.push(line)
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/core/inputsplitter.py", line 620, in push
out = self.push_line(line)
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/core/inputsplitter.py", line 655, in push_line
line = transformer.push(line)
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/core/inputtransformer.py", line 152, in push
return self.output(tokens)
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/core/inputtransformer.py", line 157, in output
return untokenize(self.func(tokens)).rstrip('\n')
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/utils/_tokenize_py2.py", line 276, in untokenize
return ut.untokenize(iterable)
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/utils/_tokenize_py2.py", line 214, in untokenize
self.add_whitespace(start)
File "/Applications/anaconda/lib/python2.7/site-packages/IPython/utils/_tokenize_py2.py", line 199, in add_whitespace
assert row >= self.prev_row
AssertionError
If you suspect this is an IPython bug, please report it at:
https://github.com/ipython/ipython/issues
or send an email to the mailing list at ipython-dev#scipy.org
You can print a more detailed traceback right now with "%tb", or use "%debug"
to interactively debug it.
Extra-detailed tracebacks for bug-reporting purposes can be enabled via:
%config Application.verbose_crash=True
Am I doing something wrong or is it really an IPython bug?
It is really an IPython bug, I think. Specifically, the way we handle tokenize fails when an expression involving brackets (()[]{}) is spread over more than one line. I'm trying to work out what we can do about this.
Kinda late answer but, I was trying to use it in my own extension and just had the same problem. I've solved it by simply removing NL from the list (it's not the same as NEWLINE token which end statement), NL token only appears inside [], (), {} so it should be safely removable.
from tokenize import NL
#TokenInputTransformer.wrap
def mat_transformer(tokens):
tokens = list(filter(lambda t: t.type != NL, tokens))
return tokens
If you are looking for full example, I've posted my goofy code there: https://github.com/Quinzel/pymat

How to plug txyam on top of tornado IOLoop

I 've tried to use tornado.platform.twisted to integrate txyam memcached client, but when I try to check it for functioning, next error is thrown:
Traceback (most recent call last):
File "swcomet/tx_memcache_helper.py", line 32, in <module>
mem_helper = MemcacheHelper()
File "swcomet/tx_memcache_helper.py", line 19, in __init__
self.add(4)
File "/home/rustem/work/sw.services.swcomet.python/venv/local/lib/python2.7/site-packages/tornado/gen.py", line 117, in wrapper
gen = func(*args, **kwargs)
File "swcomet/tx_memcache_helper.py", line 25, in add
self.mem.getPickled(user_id, decompress=True)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 133, in getPickled
return self.get(key, **kwargs).addCallback(handleResult, uncompress)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 27, in wrapper
func = getattr(self.getClient(key), cmd)
File "/home/rustem/work/sw.services.swcomet.python/venv/lib/python2.7/site-packages/txyam/client.py", line 48, in getClient
raise NoServerError, "No connected servers remaining."
txyam.client.NoServerError: No connected servers remaining.
The source code which dumps that error:
import tornado.ioloop
import tornado.gen
from txyam.client import YamClient
from swtools.date import _ts
import tornado.platform.twisted
MEMHOSTS = ['127.0.0.1111']
USER_EXPIRATION_TIME = 61
class MemcacheHelper(object):
def __init__(self, *a, **kw):
try:
self.mem = YamClient(["127.0.0.1"])
except Exception, e:
print "ERror", e
self.clients = set()
self.add(4)
#tornado.gen.engine
def add(self, user_id, expire=None):
self.clients.add(user_id)
expire = expire or USER_EXPIRATION_TIME
self.mem.getPickled(user_id, decompress=True)
print "hmmm"
if __name__ == '__main__':
print "trying to start on top of IOLOOP"
ioloop = tornado.ioloop.IOLoop.instance()
#reactor = TornadoReactor(ioloop)
mem_helper = MemcacheHelper()
#mem_helper.add(4)
ioloop.start()
Please, help me to solve this problem!
txyam appears not to let you perform any memcache operations until after at least one connection has been established:
def getActiveConnections(self):
return [factory.client for factory in self.factories if not factory.client is None]
def getClient(self, key):
hosts = self.getActiveConnections()
log.msg("Using %i active hosts" % len(hosts))
if len(hosts) == 0:
raise NoServerError, "No connected servers remaining."
return hosts[ketama(key) % len(hosts)]
It attempts to set up these connections right away:
def __init__(self, hosts):
"""
#param hosts: A C{list} of C{tuple}s containing hosts and ports.
"""
self.connect(hosts)
But connection setup is asynchronous, and it doesn't expose an event to indicate when at least one connection has been established.
So your code fails because you call add right away, before any connections exist. A good long-term fix would be to file a bug report against txyam, because this isn't a very nice interface. YamClient could have a whenReady method that returns a Deferred that fires when you are actually allowed to use the YamClient instance. Or there could be an alternate constructor that returns a Deferred that fires with the YamClient instance, but only after it can be used.