I have a model (see code below) on which I want to execute a function after an object is inserted that will update one of the object's fields. I'm using the after_insert Mapper Event to do this.
I've confirmed that the after_insert properly calls the event_extract_audio_text() handler, and the target is getting updated with the correct audio_text value. However, once the event handler finishes executing, the text value is not set for the object in the database.
Code
# Event handler
def event_extract_audio_text(mapper, connect, target):
# Extract text from audio file
audio_text = compute_text_from_audio_file(target.filename)
# Update the 'text' field with extracted text
target.audio_text = audio_text
# Model
class SoundsRaw(db.Model):
__tablename__ = 'soundsraw'
id = db.Column(db.BigInteger(), primary_key=True, autoincrement=True)
filename = db.Column(db.String(255))
audio_text = db.Column(db.Text())
# Event listener
event.listen(SoundsRaw, 'after_insert', event_extract_audio_text)
I've also tried calling db.session.commit() to try to update the object with the text value, but then I get the following stack trace:
File "/Users/alexmarse/.virtualenvs/techmuseum/lib/python2.7/site-packages/sqlalchemy/orm/session.py", line 219, in _assert_active
raise sa_exc.ResourceClosedError(closed_msg)
ResourceClosedError: This transaction is closed
Any ideas?
Software versions
SQLAlchemy 0.9.4
Flask 0.10.1
Flask-SQLAlchemy 1.0
The thing with 'after_insert' kind of handlers is to use the connection directly. Here's how I did it:
class Link(db.Model):
"News link data."
__tablename__ = 'news_links'
id = db.Column(db.BigInteger, primary_key=True)
slug = db.Column(db.String, unique=True) #, nullable=False
url = db.Column(db.String, nullable=False, unique=True)
title = db.Column(db.String)
image_url = db.Column(db.String)
description = db.Column(db.String)
#db.event.listens_for(Link, "after_insert")
def after_insert(mapper, connection, target):
link_table = Link.__table__
if target.slug is None:
connection.execute(
link_table.update().
where(link_table.c.id==target.id).
values(slug=slugify(target.id))
)
I ended up solving this by ditching the Mapper Event approach and using Flask's Signalling Support instead.
Basically, you can register "signals" on your model, which are essentially callback functions that are called whenever a specific kind of event happens. In my case, the event is an "update" on my model.
To configure the signals, I added this method to my app.py file:
def on_models_committed(sender, changes):
"""Handler for model change signals"""
for model, change in changes:
if change == 'insert' and hasattr(model, '__commit_insert__'):
model.__commit_insert__()
if change == 'update' and hasattr(model, '__commit_update__'):
model.__commit_update__()
if change == 'delete' and hasattr(model, '__commit_delete__'):
model.__commit_delete__()
Then, on my model, I added this function to handle the update event:
# Event methods
def __commit_update__(self):
# create a new db session, which avoids the ResourceClosedError
session = create_db_session()
from techmuseum.modules.sensors.models import SoundsRaw
# Get the SoundsRaw record by uuid (self contains the object being updated,
# but we can't just update/commit self -- we'd get a ResourceClosedError)
sound = session.query(SoundsRaw).filter_by(uuid=self.uuid).first()
# Extract text from audio file
audio_text = compute_text_from_audio_file(sound)
# Update the 'text' field of the sound
sound.text = audio_text
# Commit the update to the sound
session.add(sound)
session.commit()
def create_db_session():
# create a new Session
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
psql_url = app.config['SQLALCHEMY_DATABASE_URI']
some_engine = create_engine(psql_url)
# create a configured "Session" class
session = sessionmaker(bind=some_engine)
return session
Related
# models.py
from django.db import models
from django.forms import ModelForm
question_choices = (
(1, 'yes/no'),
(2,'text'),
(3,'numberic'),
)
class Question(models.Model):
title = models.CharField(max_length=500,blank=False)
responseType = models.SmallIntegerField(choices=question_choices,blank=False
,default=2)
mandatory = models.BooleanField(default=True)
def __str__(self):
return self.title
answer_choices = (
(1, 'YES'),
(2,'NO'),
)
class Response(models.Model):
questionId = models.ForeignKey(Question,on_delete=models.CASCADE)
answerType1 = models.CharField(max_length=4,choices=answer_choices,blank=True)
answerType2 = models.TextField(max_length=500,blank=True)
answerType3 = models.IntegerField(blank=True)
def __str__(self):
return self.id
Assume two buttons on UI. 1. Create questions 2. Fill Questions.
When user will click on 1st button(Create questions), according to Question model user will be able to save some question to the DB.
When user clicks on 2nd button (Fill Questions)
If data is there in DB then , create a form according to row of Question model and send it to user such that after getting the response from user data will be saved inside Response model.
For reference i have added screenshot of Question model from admin pannel.
I wanted to create a form according to every row.
I am trying to filter queryset for Many To Many Relationship. But its not working.
I am trying to get objects of TestTag document with keyword name.
models :
class TestKeyword(Document):
name = StringField(required=True)
class TestTag(Document):
tag = StringField(max_length=100, null=True)
keywords = ListField(ReferenceField(TestKeyword), null=True)
Filter:
import django_mongoengine_filter as filters
from app.models import TestTag
class TestTagFilter(filters.FilterSet):
class Meta:
model = TestTag
fields = ['tag', 'keywords__name']
class TestTag(ModelViewSet):
queryset = TestTag.objects.all()
serializer_class = TestTagSerializer
# override filter_queryset function
def filter_queryset(self, queryset):
filter = TestTagFilter(self.request.query_params, queryset=queryset)
return filter.qs
A general solution that worked for me is described bellow:
First make sure you have the following packages in your environment:
Django
djangorestframework
django-rest-framework-mongoengine
mongoengine
django-filter
# patched version of django-mongoengine-filter to support Django 4.0
# https://github.com/oussjarrousse/django-mongoengine-filter
# Pull request https://github.com/barseghyanartur/django-mongoengine-filter/pull/16 or download the original if you are using Django 3.x
django-mongoengine-filter
The idea in this answer is to add filtering support to django-rest-framework-mongoengine using django-mongoengine-filter that is an replacement or an extension to django-filter and should work the same way as django-filter.
First let's edit the project/settings.py file. Find the INSTALLED_APPS variable and make sure the following "Django apps" are added:
# in settings.py:
INSTALLED_APPS = [
# ...,
"rest_framework",
"rest_framework_mongoengine",
"django_filters",
# ...,
]
the app django_filters is required to add classes related to filtering infrastructure, and other things including html templates for DRF.
Then in the variable REST_FRAMEWORK we need to edit the values associated with the key: DEFAULT_FILTER_BACKENDS
# in settings.py:
REST_FRAMEWORK = {
# ...
"DEFAULT_FILTER_BACKENDS": [
"filters.DjangoMongoEngineFilterBackend",
# ...
],
# ...
}
DjangoMongoEngineFilterBackend is a custom built filter backend that we need to add to the folder (depending on how you structure your project) in the file filters
# in filters.py:
from django_filters.rest_framework.backends import DjangoFilterBackend
class DjangoMongoEngineFilterBackend(DjangoFilterBackend):
# filterset_base = django_mongoengine_filter.FilterSet
"""
Patching the DjangoFilterBackend to allow for MongoEngine support
"""
def get_filterset_class(self, view, queryset=None):
"""
Return the `FilterSet` class used to filter the queryset.
"""
filterset_class = getattr(view, "filterset_class", None)
filterset_fields = getattr(view, "filterset_fields", None)
if filterset_class:
filterset_model = filterset_class._meta.model
# FilterSets do not need to specify a Meta class
if filterset_model and queryset is not None:
element = queryset.first()
if element:
queryset_model = element.__class__
assert issubclass(
queryset_model, filterset_model
), "FilterSet model %s does not match queryset model %s" % (
filterset_model,
str(queryset_model),
)
return filterset_class
if filterset_fields and queryset is not None:
MetaBase = getattr(self.filterset_base, "Meta", object)
element = queryset.first()
if element:
queryset_model = element.__class__
class AutoFilterSet(self.filterset_base):
class Meta(MetaBase):
model = queryset_model
fields = filterset_fields
return AutoFilterSet
return None
This custom filter backend will not raise the exceptions that the original django-filter filter backend would raise. The django-filter DjangoFilterBackend access the key model in QuerySet as in queryset.model, however that key does not exist in MongoEngine.
Maybe making it available in MongoEngine should be considered:
https://github.com/MongoEngine/mongoengine/issues/2707
https://github.com/umutbozkurt/django-rest-framework-mongoengine/issues/294
Now we can add a custom filter to the ViewSet:
# in views.py
from rest_framework_mongoengine.viewsets import ModelViewSet
class MyModelViewSet(ModelViewSet):
serializer_class = MyModelSerializer
filter_fields = ["a_string_field", "a_boolean_field"]
filterset_class = MyModelFilter
def get_queryset(self):
queryset = MyModel.objects.all()
return queryset
Finally let's get back to filters.py and add the MyModelFilter
# in filters.py
from django_mongoengine_filter import FilterSet, StringField, BooleanField
class MyModelFilter(FilterSet):
"""
MyModelFilter is a FilterSet that is designed to work with the django-filter.
However the original django-mongoengine-filter is outdated and is causing some troubles
with Django>=4.0.
"""
class Meta:
model = MyModel
fields = [
"a_string_field",
"a_boolean_field",
]
a_string_field = StringFilter()
a_boolean_field = BooleanFilter()
That should do the trick.
I'm trying to get APScheduler to update in my Flask app the Postgresql database every 5 minutes, but the database is only updated the first time, all subsequent times the changes are not saved. APScheduler itself works correctly, and if the function of updating the database is replaced with the function of displaying text, then everything works correctly every time.
In my app im using Flask-SQLAlchemy:
SQLALCHEMY_DATABASE_URI = 'postgresql+psycopg2://postgres:name#localhost/name'
The APScheduler code looks like this:
from apscheduler.schedulers.blocking import BlockingScheduler
sched = BlockingScheduler(daemon=True)
sched.add_job(func=update, trigger='interval', minutes=5)
sched.start()
The database update function looks like this:
def update():
for i in data:
for row in Names.query:
if row.id == i['id']:
row.name = i['name']
row.gender = i['gender']
row.age = i['age']
db.session.commit()
In the logs, APScheduler always works successfully. I also looked at the Postgresql logs, where I found this phrase: 'An existing connection was forcibly closed by the remote host.'
I suspect it might be the database engine and sessions, but I haven't found the instructions I need to implement within the Flask-SQLAlchemy package.
Versions of pacages:
Flask-SQLAlchemy==2.4.1
SQLAlchemy==1.3.17
APScheduler==3.6.3
db Model:
class Names(db.Model):
__searchable__ = ['name', 'age']
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(40))
gender = db.Column(db.String(40))
age = db.Column(db.Numeric)
def __repr__(self):
return '<Names %r>' % self.id
I think I figured out what the problem is. APScheduler somehow caches the contents of a variable the first time it is used and then only uses that value.
Before the function, I have the following code:
request = requests.get('https://privateapi')
data = request.json()
Then the function takes data from data:
def update():
for i in data:
for row in Names.query:
if row.id == i['id']:
row.name = i['name']
row.gender = i['gender']
row.age = i['age']
db.session.commit()
According to the Flask-SQLAlchemy logs, the data is written to the database successfully. I tried adding to the print (data) function so that every 5 minutes it would show me the contents of the data variable and I saw that its contents were not updated.
It turns out that the data is written to the database, but with the same values, so I don't see its update.
Then I tried shortening the request path and not saving its content to a variable:
def update():
for i in requests.get('https://privateapi').json():
for row in Names.query:
if row.id == i['id']:
row.name = i['name']
row.gender = i['gender']
row.age = i['age']
db.session.commit()
But here again nothing has changed.
UPDATE:
I solved this problem by removing the data variable at the end of the function:
def update():
name = requests.get('https://privateapi').json()
for i in name:
for row in Names.query:
if row.id == i['id']:
row.name = i['name']
row.gender = i['gender']
row.age = i['age']
del name
db.session.commit()
I want to be able to create and edit the secondary table attributes (the relational table) of a many-to-many relationship during the creation or editing of either of the primary tables. So, when I edit one of the primary tables and add a relation to another model (implicitly using the secondary table), I want to be able to access / edit the attributes of that secondary relationship.
More specifically:
Models
# "Primary" table
class Paper(db.Model):
__tablename__ = 'papers'
...
chapters = db.relationship(Chapter, secondary="chapter_paper")
...
# "Primary" table
class Chapter(db.Model):
...
papers = db.relationship('Paper', secondary="chapter_paper")
...
# "Secondary" table
class ChapterPaper(db.Model):
__tablename__ = 'chapter_paper'
paper_id = db.Column(db.Integer,
db.ForeignKey('papers.id'),
primary_key=True)
chapter_id = db.Column(db.Integer,
db.ForeignKey('chapters.id'),
primary_key=True)
### WANT TO EDIT
printed = db.Column(db.Boolean, default=False)
note = db.Column(db.Text, nullable=True)
### WANT TO EDIT
paper = db.relationship('Paper',
backref=db.backref("chapter_paper_assoc",
lazy='joined'),
lazy='joined')
chapter = db.relationship(Chapter,
backref=db.backref("chapter_paper_assoc",
lazy='joined'),
lazy='joined')
So, for this example, I want to be able to edit the "printed" and "note" attribute of ChapterPaper from the create / edit forms of Paper and Chapter in flask admin.
ModelViews
# MainModelView subclasses flask_admin.contrib.sqla.ModelView
class PaperModelView(MainModelView):
...
form_columns = (
'title',
'abstract',
'doi',
'pubmed_id',
'link',
'journals',
'keywords',
'authors',
'chapters',
)
# Using form_columns allows CRUD for the many to many
# relation itself, but does not allow access to secondary attributes
...
So, I honestly have very little idea of how to do this. If I added the form fields as extras and then manually validated them...? (I don't know how to do this)
Even then, adding extra fields to the form doesn't really cover multiple models. Can anyone show me how to do this, or point me to a tutorial / even a relevant example from code that's part of some random project?
Thanks!
Alrighty, this was a lot of work and required a lot of RTFM, but it was pretty straightforward once I got going.
The way to do this without a neat API is to extend the model view and replace the create / edit form with a form of your own.
Here is my form class:
class ExtendedPaperForm(FlaskForm):
title = StringField()
abstract = TextAreaField()
doi = StringField()
pubmed_id = StringField()
link = StringField()
journals = QuerySelectMultipleField(
query_factory=_get_model(Journal),
allow_blank=False,
)
issue = StringField()
volume = StringField()
pages = StringField()
authors = QuerySelectMultipleField(
query_factory=_get_model(Author),
allow_blank=False,
)
keywords = QuerySelectMultipleField(
query_factory=_get_model(Keyword),
allow_blank=True,
)
chapters_printed = QuerySelectMultipleField(
query_factory=_get_model(Chapter),
allow_blank=True,
label="Chapters (Printed)",
)
chapters = QuerySelectMultipleField(
query_factory=_get_model(Chapter),
allow_blank=True,
label="Chapters (All)",
)
The important part for making this functionality happen is the on_model_change method, which performs an action before a model is saved.
...
def on_model_change(self, form, model, is_created):
"""
Perform some actions before a model is created or updated.
Called from create_model and update_model in the same transaction (if it has any meaning for a store backend).
By default does nothing.
Parameters:
form – Form used to create/update model
model – Model that will be created/updated
is_created – Will be set to True if model was created and to False if edited
"""
all_chapters = list(set(form.chapters.data + form.chapters_printed.data))
for chapter in all_chapters:
if chapter in form.chapters_printed.data: # if chapter in both, printed takes priority
chapter_paper = ChapterPaper.query.filter_by(chapter_id=chapter.id, paper_id=model.id).first()
if not chapter_paper:
chapter_paper = ChapterPaper(chapter_id=chapter.id, paper_id=model.id)
chapter_paper.printed = True
db.session.add(chapter_paper)
journal = None
if form.journals.data:
journal = form.journals.data[0]
if journal: # Assumes only 1 journal if there are any journals in this field
issue = form.issue.data
volume = form.volume.data
pages = form.pages.data
journal_paper = JournalPaper.query.filter_by(journal_id=journal.id, paper_id=model.id).first()
if not journal_paper:
journal_paper = JournalPaper(journal_id=journal.id, paper_id=model.id)
journal_paper.issue = issue
journal_paper.volume = volume
journal_paper.pages = pages
db.session.add(journal_paper)
...
I'm trying to use the PostgreSQL CREATE TEMPORARY TABLE foo AS SELECT... query in SQLAlchemy Core. I've looked through the docs but don't see a way to do this.
I have a SQLA statement object. How do I create a temporary table from its results?
This is what I came up with. Please tell me if this is the wrong way to do it.
from sqlalchemy.sql import Select
from sqlalchemy.ext.compiler import compiles
class CreateTableAs(Select):
"""Create a CREATE TABLE AS SELECT ... statement."""
def __init__(self, columns, new_table_name, is_temporary=False,
on_commit_delete_rows=False, on_commit_drop=False, *arg, **kw):
"""By default the table sticks around after the transaction. You can
change this behavior using the `on_commit_delete_rows` or
`on_commit_drop` arguments.
:param on_commit_delete_rows: All rows in the temporary table will be
deleted at the end of each transaction block.
:param on_commit_drop: The temporary table will be dropped at the end
of the transaction block.
"""
super(CreateTableAs, self).__init__(columns, *arg, **kw)
self.is_temporary = is_temporary
self.new_table_name = new_table_name
self.on_commit_delete_rows = on_commit_delete_rows
self.on_commit_drop = on_commit_drop
#compiles(CreateTableAs)
def s_create_table_as(element, compiler, **kw):
"""Compile the statement."""
text = compiler.visit_select(element)
spec = ['CREATE', 'TABLE', element.new_table_name, 'AS SELECT']
if element.is_temporary:
spec.insert(1, 'TEMPORARY')
on_commit = None
if element.on_commit_delete_rows:
on_commit = 'ON COMMIT DELETE ROWS'
elif element.on_commit_drop:
on_commit = 'ON COMMIT DROP'
if on_commit:
spec.insert(len(spec)-1, on_commit)
text = text.replace('SELECT', ' '.join(spec))
return text