Sql query with variables (real or integer) - select

I'm learning python... I tried to execute sql queries with parameters without success...
I tried:
from tkinter import*
import tkinter as tk
from tkinter import ttk
import sqlite3
realNumber = 2.0
database = sqlite3.connect('NumDB.db')
cursor = database.cursor()
cursor.execute("SELECT numColumn from numTable WHERE realNumber=?", ( realNumber ))
results = cursor.fetchall()
print(results)
cursor.close()
database.close()
it works when I define the variable as text:
realNumber "2.0"
or
cursor.execute("SELECT numColumn from numTable WHERE realNumber=?", ( str(realNumber)))
the type of realNumber is set on real in the database.
is it possible to use real or integer variables without converting to string?
Thanks

Depending on the version of Python, sample code below don't require a type conversion: (Note: to avoid sql injection option 4 is best.)
def func1(ag):
return f" variable {ag}"
def func2(ag):
return "SELECT numColumn from numTable WHERE realNumber=?", ( ag )
def func3(ag):
return f"SELECT numColumn from numTable WHERE realNumber={ag}"
def func4(ag):
sql = "SELECT numColumn from numTable WHERE realNumber={}"
return sql.format(ag)
ag = 40.5
print(func1(ag))
print(func2(ag))
print(func3(ag))
print(func4(ag))
Output:
variable 40.5
('SELECT numColumn from numTable WHERE realNumber=?', 40.5)
SELECT numColumn from numTable WHERE realNumber=40.5
SELECT numColumn from numTable WHERE realNumber=40.5
#---------------Original code in question-----------------
from tkinter import*
import tkinter as tk
from tkinter import ttk
import sqlite3
realNumber = 2.0
database = sqlite3.connect('NumDB.db')
cursor = database.cursor()
cursor.execute("SELECT numColumn from numTable WHERE realNumber={}".format(realNumber) )
results = cursor.fetchall()
print(results)
cursor.close()
database.close()

Related

Connecting Scala with Hive Database using sbt for dependencies using IntelliJ

I am having a very difficult time connecting to hive database using Intellij or basic Command line with scala ( would be happy with java too). I have in the past been able to connect to a MYSQL database by adding it on the library mysql-Connector. but I am unable somehow add a jar file to the project structure where it works.
and to make things abit more difficult. I have installed ubuntu with hive,spark, hadoop and I am connecting to it over the network.
Is there someway I can add a depedency on the sbt file?
Lastly, I know there are similar questions but they do not show in detail how to connect to a hive database from scala
`import java.sql.SQLException;
import java.sql.Connection;
import java.sql.ResultSet;
import java.sql.Statement;
import java.sql.DriverManager;
object HiveJdbcClient extends App {
val driverName = "org.apache.hadoop.hive.jdbc.HiveDriver";
Class.forName(driverName);
val con=DriverManager.getConnection("jdbc:hive://http://192.168.43.64:10000/default", "", "");
val stmt = con.createStatement();
val tableName = "testHiveDriverTable";
stmt.executeQuery("drop table " + "wti");
var res = stmt.executeQuery("create table " + tableName + " (key int, value string)");
// select * query
var sql = "select * from " + tableName;
res = stmt.executeQuery(sql);
while (res.next()) {System.out.println(String.valueOf(res.getInt(1)) + "\t" + res.getString(2));
}
// regular hive query
sql = "select count(1) from " + tableName;
res = stmt.executeQuery(sql);
while (res.next()) {
System.out.println(res.getString(1));
}
}`
The driver name is not correct for hive 3.1.2, it should be
org.apache.hive.jdbc.HiveDriver
Cf https://hive.apache.org/javadocs/r3.1.2/api/org/apache/hive/jdbc/HiveDriver.html

Odoo-Creation sequence based on PostgreSQL sequence

I am working with odoo 14 and I want to customize sale.order number generation. So, I want to create new sequence (ir.sequence) based on PostgreSQL database sequence object.
Do you have any idea?
Thank you for your help.
SAAD
from odoo import api, fields, models
import psycopg2
class ventes(models.Model):
_inherit = ['sale.order']
company = fields.Char()
name = fields.Char(string='Order Reference')
#Connection a la base de donnees
def open_conn(self):
try:
connection = psycopg2.connect(user="user",
password="xxxxxxxxxxxxxx",
host="192.168.1.1",
port="5432",
database="ventes")
print("Using Python variable in PostgreSQL select Query")
cursor = connection.cursor()
postgreSQL_select_Query = "select nextval('myOdoo')"
cursor.execute(postgreSQL_select_Query)
row = cursor.fetchone()
return row[0]
except (Exception, psycopg2.Error) as error:
print("Error fetching data from PostgreSQL table", error)
finally:
# closing database connection
if connection:
cursor.close()
connection.close()
print("PostgreSQL connection is closed \n")
#api.model
def create(self, vals):
num = self.open_conn()
vals['name'] = num
result = super(ventes, self).create(vals)
return result

Limit decimal digits on field of a view created with SqlAlchemy

Having created this view:
class OpenPositionMetric(Base):
stmt = (
select(
[
OpenPosition.belongs_to.label("belongs_to"),
OpenPosition.account_number.label("account_number"),
OpenPosition.exchange.label("exchange"),
OpenPosition.symbol.label("symbol"),
round(OpenPosition.actual_shares * OpenPosition.avg_cost_per_share,3).label(
"cost_value"
),
]
)
.select_from(OpenPosition)
.order_by("belongs_to", "account_number", "exchange", "symbol")
)
view = create_materialized_view(
name="vw_open_positions_metrics",
selectable=stmt,
metadata=Base.metadata,
indexes=None,
)
__table__ = view
I get the example result for the field cost_value: 1067.2500060000000000.
Is there a way to limit the number of decimal digits for that view field?
The function round() doesn't work. Maybe because round is a python function and SqlAlchemy is expecting and sql expression language function like func.sum?
Update:
I've found a solution but it isn't pretty. I'm sure there is a better one...
(text("ROUND (operations.tb_open_positions.actual_shares * operations.tb_open_positions.avg_cost_per_share,3) AS cost_value"))),
The value above is now displayed in the view as 1067.250
Solution (thanks Gord Thompson) :
from sqlalchemy import cast, Numeric
class OpenPositionMetric(Base):
stmt = (
select(
[
OpenPosition.belongs_to.label("belongs_to"),
OpenPosition.account_number.label("account_number"),
OpenPosition.exchange.label("exchange"),
OpenPosition.symbol.label("symbol"),
(
cast(
OpenPosition.actual_shares * OpenPosition.avg_cost_per_share,
Numeric(10, 3),
)
).label("cost_value")
]
)
.select_from(OpenPosition)
.order_by("belongs_to", "account_number", "exchange", "symbol")
)
view = create_materialized_view(
name="vw_open_positions_metrics",
selectable=stmt,
metadata=Base.metadata,
indexes=None,
)
__table__ = view
One way to limit the number of decimal places would be to cast the result to Numeric:
import sqlalchemy as sa
# …
class OpenPosition(Base):
__tablename__ = "open_position"
id = sa.Column(sa.Integer, primary_key=True, autoincrement=False)
actual_shares = sa.Column(sa.Float)
avg_cost_per_share = sa.Column(sa.Float)
Base.metadata.drop_all(engine, checkfirst=True)
Base.metadata.create_all(engine)
with sa.orm.Session(engine, future=True) as session:
session.add(
OpenPosition(id=1, actual_shares=1, avg_cost_per_share=1067.250606)
)
session.commit()
result = session.query(
(OpenPosition.actual_shares * OpenPosition.avg_cost_per_share).label(
"cost_value"
)
).all()
print(result) # [(1067.250606,)]
result = session.query(
sa.cast(
(
OpenPosition.actual_shares * OpenPosition.avg_cost_per_share
).label("cost_value"),
sa.Numeric(10, 3),
)
).all()
print(result) # [(Decimal('1067.251'),)]

Beam sql udf to split one column into multiple columns

How to implement a beam sql udf function to split one column into multiple column?
I have already implemented this in bigquery udf function:
CREATE TEMP FUNCTION parseDescription(description STRING)
RETURNS STRUCT<msg STRING, ip STRING, source_region STRING, user_name STRING>
LANGUAGE js AS """
var arr = description.substring(0, description.length - 1).split(",");
var firstIndex = arr[0].indexOf(".");
this.msg = arr[0].substring(0, firstIndex);
this.ip = arr[0].substring(firstIndex + 2).split(": ")[1];
this.source_region = arr[1].split(": ")[1];
this.user_name = arr[2].split(": ")[1];
return this;
""";
INSERT INTO `table1` (parseDescription(event_description).* FROM `table2`;
Does beam sql udf function also support this kind of operation?
I tried to return an object in beam udf function, but it seems that beam sql doesn't support object.* syntax. I also tried to return a map or an array but still got error.
Is there anyway to implement the same udf in beam?
I tried to use MapElement method but got error, seems that the output row expected the same schema as input row, example:
import org.apache.beam.runners.direct.DirectOptions;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.extensions.sql.SqlTransform;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.schemas.Schema;
import org.apache.beam.sdk.transforms.*;
import org.apache.beam.sdk.values.PBegin;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.Row;
public class BeamMain2 {
public static void main(String[] args) {
DirectOptions options = PipelineOptionsFactory.fromArgs(args).withValidation()
.as(DirectOptions.class);
Pipeline p = Pipeline.create(options);
// Define the schema for the records.
Schema appSchema = Schema.builder().addStringField("string1").addInt32Field("int1").build();
Row row1 = Row.withSchema(appSchema).addValues("aaa,bbb", 1).build();
Row row2 = Row.withSchema(appSchema).addValues("ccc,ddd", 2).build();
Row row3 = Row.withSchema(appSchema).addValues("ddd,eee", 3).build();
PCollection<Row> inputTable =
PBegin.in(p).apply(Create.of(row1, row2, row3).withRowSchema(appSchema));
Schema newSchema =
Schema.builder()
.addNullableField("string2", Schema.FieldType.STRING)
.addInt32Field("int1")
.addNullableField("string3", Schema.FieldType.STRING)
.build();
PCollection<Row> outputStream = inputTable.apply(
SqlTransform.query(
"SELECT * "
+ "FROM PCOLLECTION where int1 > 1"))
.apply(MapElements.via(
new SimpleFunction<Row, Row>() {
#Override
public Row apply(Row line) {
return Row.withSchema(newSchema).addValues("a", 1, "b").build();
}
}));
p.run().waitUntilFinish();
}
}
Reference: https://beam.apache.org/documentation/dsls/sql/overview/
You can use emit 'Row' elements from a transform which can be later used as a table
The pipeline would look something like
Schema
Schema schema =
Schema.of(Schema.Field.of("f0", FieldType.INT64), Schema.Field.of("f1", FieldType.INT64));
Transform
private static MapElements<Row, Row> rowsToStrings() {
return MapElements.into(TypeDescriptor.of(Row.class))
.via(
row -> Row.withSchema(schema).addValue(1L).addValue(2L).build(););
}
Pipeline:
pipeline
.apply(
"SQL Query 1",
SqlTransform.query(<Query string 1>))
.apply("Transform column", rowsToStrings())
.apply(
"SQL Query 2",
SqlTransform.query(<Query string 2>))

Cannot access data after full text search using sqlalchemy, postgres and flask

I would like to search my postgres data base using postgres build-in full text search capability. In my app I have a set of posts stored according to title, content and date.
I think I can search the database using tsvector, but cannot retrieve the data from the results; i.e. the title, the content and the date. Could anyone help me, please?
import json, sys
from flask import Flask, render_template
from flask_sqlalchemy import SQLAlchemy
from sqlalchemy.dialects import postgresql
from sqlalchemy.sql.expression import cast, func
from sqlalchemy import Index
def create_tsvector(*args):
exp = args[0]
for e in args[1:]:
exp += ' ' + e
return func.to_tsvector('english', exp)
app = Flask(__name__)
app.config['SECRET_KEY'] = 'some_key'
app.config["SQLALCHEMY_DATABASE_URI"] = 'postgresql:somedb'
db = SQLAlchemy(app)
class Post(db.Model):
id = db.Column(db.Integer, primary_key=True)
title = db.Column(db.Text, nullable=False)
content = db.Column(db.Text, nullable=False)
date = db.Column(db.Text,unique=False)
__ts_vector__ = create_tsvector(
cast(func.coalesce(content, ''), postgresql.TEXT)
)
__table_args__ = (Index('idx_post_fts', __ts_vector__, postgresql_using='gin'), )
def __repr__(self):
return f"Post('{self.title}', '{self.date}')"
if len(sys.argv) > 1:
filename1 = sys.argv[1]
infile=open(filename1,'r')
posts=json.load(infile)
infile.close()
List=list(posts)
art = 0
for j in range(0,len(List)):
if j % 10 == 0:
print(j)
title= posts[List[art]]['Title']
date = posts[List[art]]['Posted']
content=posts[List[art]]['Text']
post = Post(title=title, date=date, content=content)
db.session.add(post)
db.session.commit()
art+=1
from sqlalchemy.dialects.postgresql import TSVECTOR
from sqlalchemy import select, cast
posts = Post.__ts_vector__.match("bicycle", postgresql_regconfig='english')
print(posts)