Writing to multiple tables using sqlalchemy, fastapi, pydantic postgres - postgresql

First API I've built so bear with me, I currently have a FastAPI that is supposed to save a record of an event and when it happened, as well as a list of people who assisted with each event. Currently, my crud.py "post" command currently only posts to 'test', but I also need it to post names of those who helped to 'whohelped'. I've tried to make 'whohelped.event_token' the foreign key of 'Save_Info.token'. A check on whether my models and schema are correctly made would be greatly appreciated. The main issue is I'm totally lost how to make "post" make changes to both tables at once.
models.py
from .database import Base
from sqlalchemy import Column, String, Integer, Date, ForeignKey
from sqlalchemy.orm import relationship
class Save_Info(Base):
__tablename__ = 'test'
token = Column(Integer, primary_key = True, autoincrement = True)
how = Column(String)
date = Column(Date)
children = relationship("Who_Helped",back_populates="test")
class Who_Helped(Base):
__tablename__ = 'whohelped'
id = Column(Integer, primary_key = True, autoincrement = True)
event_token = Column(Integer, ForeignKey('test.token'))
who_helped = Column(String)
schema.py
from pydantic import BaseModel
from typing import Optional, List
from sqlalchemy.orm import relationship
from sqlalchemy import DateTime
class Who_Helped(BaseModel):
id: int
event_token: int
who_helped: Optional[str]
class Save_Info(BaseModel):
token: int
how: str
date: str
class Config:
orm_mode = True
crud.py
from sqlalchemy.orm import Session
from . import schema, models
def post_info(db: Session, info: schema.Save_Info):
device_info_model = models.Save_Info(**info.dict())
db.add(device_info_model)
db.commit()
db.refresh(device_info_model)
return device_info_model
def get_info(db: Session, token: int = None):
if token is None:
return db.query(models.Save_Info).all()
else:
return db.query(models.Save_Info).filter(models.Save_Info.token == token).first()
def error_message(message):
return {
'error': message
}
main.py
from fastapi import FastAPI, Depends, HTTPException
from .database import SessionLocal, engine
from sqlalchemy.orm import Session
from .schema import Save_Info
from . import crud, models
models.Base.metadata.create_all(bind=engine)
app = FastAPI()
def db():
try:
db = SessionLocal()
yield db
finally:
db.close()
#app.post('/device/info')
def post_info(info: Save_Info, db=Depends(db)):
object_in_db = crud.get_info(db, info.token)
if object_in_db:
raise HTTPException(400, detail= crud.error_message('This account of saving the world already exists'))
return crud.post_info(db,info)
#app.get('/device/info/{token}')
def get_info(token: int, db=Depends(db)):
info = crud.get_info(db,token)
if info:
return info
else:
raise HTTPException(404, crud.error_message('No info found for this account of saving the world {}'.format(token)))
#app.get('/device/info')
def get_all_info(db=Depends(db)):
return crud.get_info(db)

Related

Implementation of count(*) in Graphene / Mongo

Good afternoon,
How may I implement a count of items back of MongoDB request and make it available through GraphQl request ? I am currently using MongoDB <=> MongoEngine <=> graphene <=> Flask .
Any help will be welcomed .
Thanks
B.
After a lot of time, reading forums and Internet pages, I am now able to retrieve the total count of entity send back from MongoDB.
Here is the Models.py
from mongoengine import Document
from mongoengine.fields import StringField
class User(Document):
meta = {'collection': 'user'}
first_name = StringField(required=True)
last_name = StringField(required=True)
Here is the Schema.py
from graphene_mongo import MongoengineObjectType,MongoengineConnectionField
import graphene
from graphene.relay import Node
from Models import User as UserModel
from mongoengine import connect
from flask import Flask
from flask_graphql import GraphQLView
connect(db="graphene-mongo-example",host="127.0.0.1:27017",alias="default")
class Connection(graphene.Connection):
class Meta:
abstract = True
total_count = graphene.Int()
def resolve_total_count(self, info):
return len(self.edges)
class User(MongoengineObjectType):
class Meta:
model = UserModel
interfaces=(Node,)
filter_fields = {'first_name': {'startswith', 'contains'}, 'last_name': [""]}
connection_class = Connection
class Query(graphene.ObjectType):
Node=Node.Field()
all_users = MongoengineConnectionField(User)
schema = graphene.Schema(query=Query)
app = Flask(__name__)
app.debug = True
app.add_url_rule(
"/graphql", view_func=GraphQLView.as_view("graphql", schema=schema, graphiql=True,types=[User])
)
if __name__ == "__main__":
app.run()
To run this example :
python Schema.py

Cannot access data after full text search using sqlalchemy, postgres and flask

I would like to search my postgres data base using postgres build-in full text search capability. In my app I have a set of posts stored according to title, content and date.
I think I can search the database using tsvector, but cannot retrieve the data from the results; i.e. the title, the content and the date. Could anyone help me, please?
import json, sys
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql.expression import cast, func
from sqlalchemy import Index
def create_tsvector(*args):
exp = args[0]
for e in args[1:]:
exp += ' ' + e
return func.to_tsvector('english', exp)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'some_key'
app.config["SQLALCHEMY_DATABASE_URI"] = 'postgresql:somedb'
db = SQLAlchemy(app)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Text, nullable=False)
content = db.Column(db.Text, nullable=False)
date = db.Column(db.Text,unique=False)
__ts_vector__ = create_tsvector(
cast(func.coalesce(content, ''), postgresql.TEXT)
)
__table_args__ = (Index('idx_post_fts', __ts_vector__, postgresql_using='gin'), )
def __repr__(self):
return f"Post('{self.title}', '{self.date}')"
if len(sys.argv) > 1:
filename1 = sys.argv[1]
infile=open(filename1,'r')
posts=json.load(infile)
infile.close()
List=list(posts)
art = 0
for j in range(0,len(List)):
if j % 10 == 0:
print(j)
title= posts[List[art]]['Title']
date = posts[List[art]]['Posted']
content=posts[List[art]]['Text']
post = Post(title=title, date=date, content=content)
db.session.add(post)
db.session.commit()
art+=1
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy import select, cast
posts = Post.__ts_vector__.match("bicycle", postgresql_regconfig='english')
print(posts)

mongoengine connection and multiple databases

I have 2 databases I want to query from, but I only get results from one. I'm using mongoengine with python and graphene (it's my first time). I've exhausted my search and I don't understand how I can resolve this issue. Here is my code:
import graphene
from mongoengine import Document, connect
from mongoengine.context_managers import switch_collection
from mongoengine.fields import (
StringField,
UUIDField,
IntField,
FloatField,
BooleanField,
)
from graphene_mongo import MongoengineObjectType
from mongoengine.connection import disconnect
class UserModel(Document):
meta = {"collection": "users"}
userID = UUIDField()
first_name = StringField()
last_name = StringField()
class Users(MongoengineObjectType):
class Meta:
model = UserModel
class UsersQuery(graphene.ObjectType):
users = graphene.List(Users)
user = graphene.Field(Users, userID=graphene.UUID())
def resolve_users(self, info):
db = connect("users")
users = list(UserModel.objects.all())
db.close()
return users
def resolve_user(self, info, userID):
return UserModel.objects(userID=userID).first()
users_schema = graphene.Schema(query=UsersQuery)
import graphene
from mongoengine import Document, connect
from mongoengine.fields import StringField, UUIDField
from graphene_mongo import MongoengineObjectType
from mongoengine.connection import disconnect
class Workout(Document):
meta = {"collection": "workouts"}
workoutID = UUIDField()
workout_label = StringField()
class Workouts(MongoengineObjectType):
class Meta:
model = Workout
class Query(graphene.ObjectType):
workouts = graphene.List(Workouts)
workout = graphene.Field(Workouts, workoutID=graphene.UUID())
def resolve_workouts(self, info):
db = connect("workouts")
wks = list(Workout.objects.all())
db.close()
return wks
def resolve_workout(self, info, workoutID):
return Workout.objects(workoutID=workoutID).first()
workouts_schema = graphene.Schema(query=Query)
Now when I have my python server up, mongod running I can hit the /workouts and it will return the array I need. But /users will not return the results.
I get no errors, nothing is wrong with my graphene query.
I can only get one of the queries to work at once.
I have tried using alias, not closing the connections, declaring the connect at the top level even before class UserModel or Workout.
If each of your model is bound to a different database. You should use something like this (cfr docs):
connect('workouts', alias='dbworkouts') # init a connection to database named "workouts" and register it under alias "dbworkouts"
connect('users', alias='dbusers')
class Workout(Document):
meta = {"db_alias": "dbworkouts"}
workoutID = UUIDField()
...
class UserModel(Document):
meta = {"db_alias": "dbusers"}
userID = UUIDField()
...

Override default Model View

How can I override the Model View so by default all Model Views have exact same settings which I override?
For example:
I have 5 model views linked to some db models which are all custom,but I want all these 5 to have some default settings so I won't have to write code for each view in the ModelView class.
Use inheritance:
class BaseView(ModelView):
# Add common functionality here
pass
class ProductView(BaseView):
# Add specific functionality here
pass
class CategoryView(BaseView):
# Add specific functionality here
pass
A simple one file example below.
Class BaseView turns on can_view_details and formats the description column in upper case.
Notice the difference between ProductView, which inherits from BaseView, and ProductNotInheritedView which inherits directly from ModelView.
Note the code uses the Faker library to generate random data.
from flask import Flask
from flask_admin.contrib.sqla import ModelView
from flask_sqlalchemy import SQLAlchemy
from faker import Faker
from flask_admin import Admin
app = Flask(__name__)
# Create in-memory database
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite://'
app.config['SQLALCHEMY_ECHO'] = True
db = SQLAlchemy(app)
# Flask views
#app.route('/')
def index():
return 'Click me to get to Admin!'
class Supplier(db.Model):
__tablename__ = 'supplier'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(100), nullable=False)
description = db.Column(db.UnicodeText(), nullable=True)
products = db.relationship("Product", back_populates="supplier")
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.name
class Product(db.Model):
__tablename__ = 'product'
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.Unicode(100), nullable=False)
code = db.Column(db.Unicode(32), nullable=False)
description = db.Column(db.UnicodeText(), nullable=True)
supplier_id = db.Column(db.Integer, db.ForeignKey('supplier.id'), index=True, nullable=False)
supplier = db.relationship(Supplier, back_populates='products')
def __str__(self):
return unicode(self).encode('utf-8')
def __unicode__(self):
return self.name
class BaseView(ModelView):
can_view_details = True
column_formatters = {
'description': lambda v, c, m, p: m.description.upper(),
}
class SupplierView(BaseView):
column_list = ('name', 'description', 'products')
class ProductView(BaseView):
pass
class ProductNotInheritedView(ModelView):
pass
admin = Admin(app, template_mode="bootstrap3")
admin.add_view(SupplierView(Supplier, db.session))
admin.add_view(ProductView(Product, db.session))
admin.add_view(
ProductNotInheritedView(Product, db.session, name='Product Not Inherited', endpoint='product-not-inherited'))
#app.before_first_request
def build_sample_db():
db.drop_all()
db.create_all()
fake = Faker()
_suppliers = []
for _ in range(20):
_supplier = Supplier(
name=fake.company(),
description=fake.paragraph(nb_sentences=fake.random.randint(1, 10))
)
_suppliers.append(_supplier)
for _ in range(fake.random.randint(1, 10)):
_supplier.products.append(
Product(
name=' '.join(fake.words(nb=fake.random.randint(1, 5))),
description=fake.paragraph(nb_sentences=fake.random.randint(1, 10)),
code=fake.isbn10(separator="-")
)
)
db.session.add_all(_suppliers)
db.session.commit()
if __name__ == '__main__':
app.run(port=5000, debug=True)

DynamoDB / Scanamo : The provided key element does not match the schema

I've been trying to use DynamoDB through the Scanamo library. My scala code looks like this:
package my.package
import com.amazonaws.ClientConfiguration
import com.amazonaws.regions.{Region, Regions}
import com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient
import com.gu.scanamo._
import com.gu.scanamo.syntax._
import com.amazonaws.auth.{AWSStaticCredentialsProvider, BasicAWSCredentials}
import com.amazonaws.services.dynamodbv2.datamodeling._
object MusicService {
def main(args: Array[String]): Unit = {
val musicService = new MusicService
musicService.getAlbums()
}
}
class MusicService {
def getAlbums() {
val awsCreds = new BasicAWSCredentials("my","creds")
val client = AmazonDynamoDBClient
.builder()
.withRegion(Regions.EU_WEST_2)
.withCredentials(new AWSStaticCredentialsProvider(awsCreds))
.build();
case class Music(#DynamoDBIndexRangeKey(attributeName = "Artist")
artist: String, #DynamoDBIndexHashKey(attributeName = "SongTitle") songTitle: String);
val table = Table[Music]("Music")
val putOp = table.putAll(Set(
Music("The Killers", "Sam's Town"),
Music("The Killers", "Spaceman")
))
Scanamo.exec(client)(putOp)
}
I am getting this error on execing the putOp:
Exception in thread "main" com.amazonaws.services.dynamodbv2.model.AmazonDynamoDBException: The provided key element does not match the schema (Service: AmazonDynamoDBv2; Status Code: 400; Error Code: ValidationException; Request ID: 0KAFH90JO39COO143LC5H6RPPNVV4KQNSO5AEMVJF66Q9ASUAAJG)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.handleErrorResponse(AmazonHttpClient.java:1638)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeOneRequest(AmazonHttpClient.java:1303)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeHelper(AmazonHttpClient.java:1055)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.doExecute(AmazonHttpClient.java:743)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.executeWithTimer(AmazonHttpClient.java:717)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.execute(AmazonHttpClient.java:699)
at com.amazonaws.http.AmazonHttpClient$RequestExecutor.access$500(AmazonHttpClient.java:667)
at com.amazonaws.http.AmazonHttpClient$RequestExecutionBuilderImpl.execute(AmazonHttpClient.java:649)
at com.amazonaws.http.AmazonHttpClient.execute(AmazonHttpClient.java:513)
at com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient.doInvoke(AmazonDynamoDBClient.java:2186)
at com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient.invoke(AmazonDynamoDBClient.java:2162)
at com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient.executeBatchWriteItem(AmazonDynamoDBClient.java:575)
at com.amazonaws.services.dynamodbv2.AmazonDynamoDBClient.batchWriteItem(AmazonDynamoDBClient.java:551)
at com.gu.scanamo.ops.ScanamoInterpreters$$anon$1.apply(ScanamoInterpreters.scala:51)
at com.gu.scanamo.ops.ScanamoInterpreters$$anon$1.apply(ScanamoInterpreters.scala:30)
at cats.free.Free.$anonfun$foldMap$1(Free.scala:126)
at cats.package$$anon$1.tailRecM(package.scala:41)
at cats.free.Free.foldMap(Free.scala:124)
at cats.free.Free.$anonfun$foldMap$1(Free.scala:127)
at cats.package$$anon$1.tailRecM(package.scala:41)
at cats.free.Free.foldMap(Free.scala:124)
at com.gu.scanamo.Scanamo$.exec(Scanamo.scala:17)
at my.package.MusicService.getAlbums(MusicService.scala:39)
at my.package.MusicService$.main(MusicService.scala:14)
at my.package.MusicService.main(MusicService.scala)
My table structure on DynamoDB is incredibly simple and looks like this:
Table name: Music
Partition key: Artist
Sort key: SongTitle
That's all there is.
Please can you give me some guidance why this is failing and what I can do to fix it?
First of all you need to swap #DynamoDBIndexHashKey and #DynamoDBIndexRangeKey (as #DynamoDBIndexHashKey should be for hash key - artist and #DynamoDBIndexRangeKey for sort key - songTitle).
Also you mentioned that Artist is a Partition key and SongTitle is a Sort key. So why you use #DynamoDBIndexHashKey and #DynamoDBIndexRangeKey? I guess you need #DynamoDBHashKey and #DynamoDBRangeKey instead (in case artist and songTitle are not index).