AutoRollback doesn't rollback - scala

After I run the following spec, the table exists. I expected it to never be present as it should only exist within the eventually rolled-back transaction.
import org.specs2.mutable.Specification
import scalikejdbc.{DB, NamedDB}
import scalikejdbc.specs2.mutable.AutoRollback
class MyQuerySpec extends Specification with ArbitraryInput {
sequential
DBs.setup('myDB)
"creating the table" in new AutoRollback {
override def db(): DB = NamedDB('myDB).toDB()
private val tableName = s"test_${UUID.randomUUID().toString.replaceAll("-", "_")}"
private val query = new MyQuery(tableName)
query.createTable
ok
}
}
The line DBs.setup('myDB) is not part of the examples. But if I remove it I get the exception java.lang.IllegalStateException: Connection pool is not yet initialized.(name:'myDB)
The source of MyQuery.create:
SQL(s"DROP TABLE IF EXISTS $tableName").execute().apply()
SQL(s"""
|CREATE TABLE $tableName (
| id bigint PRIMARY KEY
|)""".stripMargin).execute().apply()
Config:
db {
myDB {
driver = "org.postgresql.Driver"
url = "****"
user = "****"
password = "****"
poolInitialSize = 1
poolMaxSize = 300
poolConnectionTimeoutMillis = 120000
poolValidationQuery = "select 1 as one"
poolFactoryName = "commons-dbcp2"
}
}
ScalikeJDBC v2.2.9

The MyQuery#createTable must accept implicit parameter like this:
def createTable(implicit session: DBSession)

Related

How to mock sqlalchemy.engine.cursor.LegacyCursorResult?

I have the following class that makes connection to MSSQL Server instance in my project,
"""MSSql Connection class."""
import logging
import logging.config
import sqlalchemy
class MSSQLConnection:
_connection = None
_engine = None
def __init__(self, host, database, username, password):
connection_string = self.build_connection_string(
host, database, username, password)
self._engine = sqlalchemy.create_engine(
connection_string, fast_executemany=True,
isolation_level="READ COMMITTED")
def connect(self):
self._connection = self._engine.connect()
def get_result_tuple(self, table):
logger = logging.getLogger()
metadata = sqlalchemy.MetaData()
db_table = sqlalchemy.Table(
table, metadata, autoload=True, autoload_with=self._engine)
query = sqlalchemy.select([db_table])
transaction_id = self.get_transaction_id()
result_proxy = self._connection.execute(query)
return transaction_id, result_proxy
def get_transaction_id(self):
"""Get the transaction id."""
connection = self._connection
sql_query = sqlalchemy.text("SELECT CURRENT_TRANSACTION_ID()")
result = connection.execute(sql_query)
row = result.fetchone()
return row[0]
def build_connection_string(self, host, database, username, password):
connection_string = ('mssql+pyodbc://' + username + ':'
+ password + '#' + host + '/' + database
+ '?driver=ODBC+Driver+17+for+SQL+Server')
return connection_string
I would like to mock the method
get_result_tuple
that returns ´transaction_id´ and instance of sqlalchemy.engine.cursor.LegacyCursorResult.
How to mock sqlalchemy.engine.cursor.LegacyCursorResult and return some dummy data on the ResultProxy object?
The caller has the following code,
mssql_connection = MSSQLConnection(
host, database, username, password)
mssql_connection.connect()
result_tuple = mssql_connection.get_result_tuple(table)
transaction_id, result_proxy = result_tuple
logger.info(f'Transaction id = {transaction_id}')
current_date = date.today().strftime("%Y_%m_%d")
while True:
partial_results = result_proxy.fetchmany(rows_fetch_limit)
results_count = len(partial_results)
if (partial_results == [] or results_count == 0):
return
else:
// other logic
Please advise.

Generate SQLite database in Flask REST API code

I am new to REST API and starting building first REST API app using Flask, SQLAlchemy & Marshmallow. This is my app.py file:
from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
# Initialize App
app = Flask(__name__)
basedir = os.path.abspath(os.path.dirname(__file__))
# Database Setup
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + os.path.join(basedir, 'db.sqlite')
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
# Init db
db = SQLAlchemy(app)
# Init marshmallow
ma = Marshmallow(app)
# Product Class/Model
class Product(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=True)
description = db.Column(db.String(200))
price = db.Column(db.Float)
qty = db.Column(db.Integer)
def __init__(self, name, description, price, qty):
self.name = name
self.description = description
self.price = price
self.qty = qty
# Product Schema
class ProductSchema(ma.Schema):
class Meta:
fields = ('id', 'name', 'description', 'price', 'qty')
# Init Schema
product_schema = ProductSchema()
products_schema = ProductSchema(many=True)
# Create Product
#app.route('/product', methods=['POST'])
def add_product():
name = request.json['name']
description = request.json['description']
price = request.json['price']
qty = request.json['qty']
new_product = Product(name, description, price, qty)
db.session.add(new_product)
db.session.commit()
return product_schema.jsonify(new_product)
# Get All Products
#app.route('/receive', methods=['GET'])
def get_products():
all_products = Product.query.all()
result = products_schema.dump(all_products)
return jsonify(result)
# Run the Server
if __name__ == '__main__':
app.run(debug=True)
For generating SQLite database, I have to open python interactive shell and then there I have to do this:
from app import db
db.create_all()
But I have to genreate database from app.py itself so I am inserting the same commands inside app.py, but it's giving me error:
OperationalError: (sqlite3.OperationalError) no such table: product
How do I generate a database from app.py?
Where are you placing your db.create_all()? The error may simply be a result of placement. When I copy and paste your code into PyCharm (running Python 3.7) it creates the DB fine when I place
db.create_all()
immediately before
# Run the Server
if __name__ == '__main__':
app.run(debug=True)
If you try to run db.create_all() before you instantiate the db object it will throw an error because db does not exist yet.
You should not need to use "from app import db" at all because the db object is declared up top.

Session Configuration/Timeout with Slick 3

Is there a way to handle sessions explicitly in Slick 3? I currently have some code that looks like
def findUserByEmail(email: String): Option[User] = {
val users = TableQuery[Users]
val action = users.filter(_.email === email).result.headOption
val result = db.run(action.transactionally)
Await.result(result, Duration.Inf)
}
It works fine the first few times I run it, but then I start running into issues where it looks like connections/sessions are being left open (see below). This code is running inside aws lambda functions and I'm thinking I need to handle sessions more explicitly. How would I do this in Slick 3?
"errorMessage": "Timeout after 5000ms of waiting for a connection.",
"errorType": "java.sql.SQLTimeoutException",
"stackTrace": [
"com.zaxxer.hikari.pool.BaseHikariPool.getConnection(BaseHikariPool.java:233)",
"com.zaxxer.hikari.pool.BaseHikariPool.getConnection(BaseHikariPool.java:183)",
"com.zaxxer.hikari.HikariDataSource.getConnection(HikariDataSource.java:93)",
"slick.jdbc.hikaricp.HikariCPJdbcDataSource.createConnection(HikariCPJdbcDataSource.scala:18)",
"slick.jdbc.JdbcBackend$BaseSession.<init>(JdbcBackend.scala:424)",
"slick.jdbc.JdbcBackend$DatabaseDef.createSession(JdbcBackend.scala:47)",
"slick.jdbc.JdbcBackend$DatabaseDef.createSession(JdbcBackend.scala:38)",
"slick.basic.BasicBackend$DatabaseDef.acquireSession(BasicBackend.scala:218)",
"slick.basic.BasicBackend$DatabaseDef.acquireSession$(BasicBackend.scala:217)",
"slick.jdbc.JdbcBackend$DatabaseDef.acquireSession(JdbcBackend.scala:38)",
"slick.basic.BasicBackend$DatabaseDef$$anon$2.run(BasicBackend.scala:239)",
"java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1142)",
"java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:617)",
"java.lang.Thread.run(Thread.java:745)"
],
"cause": {
"errorMessage": "FATAL: remaining connection slots are reserved for non-replication superuser connections",
"errorType": "org.postgresql.util.PSQLException",
"stackTrace": [
"org.postgresql.core.v3.QueryExecutorImpl.receiveErrorResponse(QueryExecutorImpl.java:2455)",
"org.postgresql.core.v3.QueryExecutorImpl.readStartupMessages(QueryExecutorImpl.java:2586)",
"org.postgresql.core.v3.QueryExecutorImpl.<init>(QueryExecutorImpl.java:113)",
"org.postgresql.core.v3.ConnectionFactoryImpl.openConnectionImpl(ConnectionFactoryImpl.java:222)",
"org.postgresql.core.ConnectionFactory.openConnection(ConnectionFactory.java:52)",
"org.postgresql.jdbc.PgConnection.<init>(PgConnection.java:216)",
"org.postgresql.Driver.makeConnection(Driver.java:404)",
"org.postgresql.Driver.connect(Driver.java:272)",
You could try to set query timeout. Like this:
db.run(action.transactionally.withStatementParameters(statementInit = st => st.setQueryTimeout(100)))
You can also set different properties on Hikari connection pool as below:
slick {
// https://github.com/slick/slick/blob/master/slick-hikaricp/src/main/scala/slick/jdbc/hikaricp/HikariCPJdbcDataSource.scala
dataSourceClass = "slick.jdbc.DriverDataSource"
user = ${database.user}
password = ${database.password}
url = ${database.url}
connectionPool = HikariCP
maxConnections = 50
numThreads = 10
queueSize = 5000
connectionInitSql = "SELECT 1;"
connectionTestQuery = "SELECT 1;"
registerMbeans = true
properties = {
driver = ${database.driver}
url = ${database.url}
}
}

Convert PostgreSQL to sqlite database with SQLAlchemy in Python3

Could it be possible to convert a postgresql database (including data) with SQLAlchemy to a sqlite database?
I tried the code below. It looks like that it works.
What do you think about it? Could this be an answer?
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import sqlalchemy as sa
import sqlalchemy.ext.declarative as sad
import sqlalchemy.orm as sao
import sqlalchemy.orm.session as sas
from sqlalchemy_utils import create_database
_Base = sad.declarative_base()
class Child(_Base):
__tablename__ = 'Child'
_oid = sa.Column('oid', sa.Integer, primary_key=True)
_name = sa.Column('name', sa.String)
def __init__(self, name):
self._name = name
class Parent(_Base):
__tablename__ = 'Parent'
_oid = sa.Column('oid', sa.Integer, primary_key=True)
_name = sa.Column('name', sa.String)
_child_fk = sa.Column('child', sa.Integer, sa.ForeignKey('Child.oid'))
_child = sao.relationship('Child')
def __init__(self, name):
super(Parent, self).__init__()
self._name = name
pstr = 'postgres://postgres#localhost/Family'
sstr = 'sqlite:///family.db'
pengine = sa.create_engine(pstr, echo = True)
sengine = sa.create_engine(sstr, echo = True)
def createPostgreSQL_Family():
"""Create for PostgreSQL the scheme and the data for testing."""
# create schema
create_database(pengine.url)
_Base.metadata.create_all(pengine)
psession = sao.sessionmaker(bind = pengine)()
# child
c = Child('Jim Bob')
psession.add(c)
psession.commit()
# parent
p = Parent('Mr. Doe')
p._child = c
psession.add(p)
psession.commit()
psession.close()
def convert():
# get one object from the PostgreSQL database
psession = sao.sessionmaker(bind = pengine)()
p = psession.query(Parent).first()
sas.make_transient(p)
#p._oid = None
c = psession.query(Child).first()
sas.make_transient(c)
#c._oid = None
psession.close()
# create and open the SQLite database
create_database(sengine.url)
_Base.metadata.create_all(sengine)
# add/convert the one object to the new database
ssession = sao.sessionmaker(bind = sengine)()
ssession.add(c)
ssession.add(p)
ssession.commit()
if __name__ == '__main__':
#createPostgreSQL_Family()
convert()

Alembic autogenerate does not generate upgrade script

I am using sqlalchemy and postgressql in Flask application. The migration tool that I am using is alembic=0.6.3.
if I type alembic current it shows me:
Current revision for postgres://localhost/myDb: None
which is correct database connection. But when I run alembic revision --autogenerate -m 'Add user table' it generates the default alembic template without any sql commands in it. Like:
"""Add user table
Revision ID: 59b6d3503442
Revises: None
Create Date: 2015-04-06 13:42:24.540005
"""
# revision identifiers, used by Alembic.
revision = '59b6d3503442'
down_revision = None
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
pass
### end Alembic commands ###
I couldn't find any proper solution in SO.
Here is my env.py:
from __future__ import with_statement
from logging.config import fileConfig
import os
import sys
import warnings
from alembic import context
from sqlalchemy import create_engine, pool
from sqlalchemy.exc import SAWarning
ROOT = os.path.abspath(
os.path.join(os.path.dirname(__file__), os.pardir, os.pardir)
)
sys.path.append(ROOT)
from myApp import Application
from myApp.extensions import db
# Don't raise exception on `SAWarning`s. For example, if Alembic does
# not recognize some column types when autogenerating migrations,
# Alembic would otherwise crash with SAWarning.
warnings.simplefilter('ignore', SAWarning)
app = Application()
# this is the Alembic Config object, which provides
# access to the values within the .ini file in use.
config = context.config
# Interpret the config file for Python logging.
# This line sets up loggers basically.
fileConfig(config.config_file_name)
target_metadata = db.metadata
def run_migrations_offline():
"""Run migrations in 'offline' mode.
This configures the context with just a URL
and not an Engine, though an Engine is acceptable
here as well. By skipping the Engine creation
we don't even need a DBAPI to be available.
Calls to context.execute() here emit the given string to the
script output.
"""
url = app.config['SQLALCHEMY_DATABASE_URI']
context.configure(url=url)
with context.begin_transaction():
context.run_migrations()
def run_migrations_online():
"""Run migrations in 'online' mode.
In this scenario we need to create an Engine
and associate a connection with the context.
"""
url = app.config['SQLALCHEMY_DATABASE_URI']
engine = create_engine(url, poolclass=pool.NullPool)
connection = engine.connect()
context.configure(
connection=connection,
target_metadata=target_metadata
)
try:
with context.begin_transaction():
context.run_migrations()
finally:
connection.close()
if context.is_offline_mode():
run_migrations_offline()
else:
run_migrations_online()
and this is my alembic.ini:
# A generic, single database configuration.
[alembic]
# path to migration scripts
script_location = myApp/migrations
# template used to generate migration files
# file_template = %%(rev)s_%%(slug)s
# Logging configuration
[loggers]
keys = root,sqlalchemy,alembic
[handlers]
keys = console
[formatters]
keys = generic
[logger_root]
level = WARN
handlers = console
qualname =
[logger_sqlalchemy]
level = WARN
handlers =
qualname = sqlalchemy.engine
[logger_alembic]
level = INFO
handlers =
qualname = alembic
[handler_console]
class = StreamHandler
args = (sys.stderr,)
level = NOTSET
formatter = generic
[formatter_generic]
format = %(levelname)-5.5s [%(name)s] %(message)s
datefmt = %H:%M:%S
Addition files for more information
This is the extensions.py file from where I import db for metadata:
from flask.ext.sqlalchemy import SQLAlchemy
from raven.contrib.flask import Sentry
from sqlalchemy_utils import force_instant_defaults
db = SQLAlchemy()
sentry = Sentry()
force_instant_defaults()
and this the model user.py file:
#
-*- coding: utf-8 -*-
from datetime import datetime
from flask.ext.login import UserMixin
from sqlalchemy_utils.types.password import PasswordType
from ..extensions import db
class User(db.Model, UserMixin):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(
db.Unicode(255),
nullable=False
)
surname = db.Column(
db.Unicode(255),
nullable=False
)
email = db.Column(
db.Unicode(255),
nullable=False,
unique=True
)
password = db.Column(
PasswordType(128, schemes=['sha512_crypt']),
nullable=False
)
created_at = db.Column(
db.DateTime,
nullable=False,
default=datetime.utcnow
)
def is_active(self):
return True
def __repr__(self):
return '<{cls} email={email!r}>'.format(
cls=self.__class__.__name__,
email=self.email,
)
def __str__(self):
return unicode(self).encode('utf8')
def __unicode__(self):
return self.email