I am using diesel diesel = { version = "1.4.8", features = ["postgres","64-column-tables","chrono"] } to do a pagination with rust 1.59.0, this is the key part to do the pagination query in diesel:
use diesel::pg::Pg;
use diesel::query_builder::{AstPass, QueryFragment};
use diesel::QueryResult;
use diesel::sql_types::BigInt;
use crate::common::query::pagination::Paginated;
pub fn handle_table_query<T: QueryFragment<Pg>>(this: &Paginated<T>, mut out: AstPass<Pg>) -> QueryResult<()> {
out.push_sql("SELECT *, COUNT(*) OVER () FROM ");
if this.is_sub_query {
out.push_sql("(");
}
this.query.walk_ast(out.reborrow())?;
if this.is_sub_query {
out.push_sql(")");
}
out.push_sql(" t LIMIT ");
out.push_bind_param::<BigInt, _>(&this.per_page)?;
out.push_sql(" OFFSET ");
let offset = (this.page - 1) * this.per_page;
out.push_bind_param::<BigInt, _>(&offset)?;
Ok(())
}
this code will generate the sql like this:
select
*,
COUNT(*) over ()
from
(
select
"article"."id",
"article"."user_id",
"article"."title",
"article"."author",
"article"."guid",
"article"."created_time",
"article"."updated_time",
"article"."link",
"article"."pub_time",
"article"."sub_source_id",
"article"."cover_image",
"article"."channel_reputation",
"article"."editor_pick"
from
"article"
where
"article"."id" > $1) t
limit $2 offset $3
as you know, this sql have a big problem. when the article table data increase. this sub query will cause sequence scan. Now the article table contains 2000000 rows and each time query takes more than 20s. What I am trying to do is remove the window function and move the limit condition into the sub query, the finally sql will look like this:
select
*,
count_estimate('select * from article')
from
(
select
"article"."id",
"article"."user_id",
"article"."title",
"article"."author",
"article"."guid",
"article"."created_time",
"article"."updated_time",
"article"."link",
"article"."pub_time",
"article"."sub_source_id",
"article"."cover_image",
"article"."channel_reputation",
"article"."editor_pick"
from
"article"
where
"article"."id" > $1 limit $2 offset $3 ) t
this sql only take less than 100ms. This is the rust code I am tweak:
pub fn handle_big_table_query<T: QueryFragment<Pg>>(this: &Paginated<T>, mut out: AstPass<Pg>)-> QueryResult<()>{
out.push_sql("SELECT *, count_estimate('select * from article') FROM ");
if this.is_sub_query {
out.push_sql("(");
}
this.query.walk_ast(out.reborrow())?;
if this.is_sub_query {
out.push_sql(" t LIMIT ");
out.push_bind_param::<BigInt, _>(&this.per_page)?;
out.push_sql(" OFFSET ");
let offset = (this.page - 1) * this.per_page;
out.push_bind_param::<BigInt, _>(&offset)?;
out.push_sql(")");
}
Ok(())
}
to my surprise, this new code generate sql did not return any content. is it possible to see the sql? I check my rust source code but did not figure out where is going wrong. And this is the full pagination code:
use diesel::prelude::*;
use diesel::query_dsl::methods::LoadQuery;
use diesel::query_builder::{QueryFragment, Query, AstPass};
use diesel::pg::Pg;
use diesel::sql_types::BigInt;
use diesel::QueryId;
use serde::{Serialize, Deserialize};
use crate::common::query::page_query_handler::{handle_big_table_query, handle_table_query};
pub trait PaginateForQueryFragment: Sized {
fn paginate(self, page: i64, is_big_table: bool) -> Paginated<Self>;
}
impl<T> PaginateForQueryFragment for T
where T: QueryFragment<Pg>{
fn paginate(self, page: i64, is_big_table: bool) -> Paginated<Self> {
Paginated {
query: self,
per_page: 10,
page,
is_sub_query: true,
is_big_table
}
}
}
#[derive(Debug, Clone, Copy, QueryId, Serialize, Deserialize, Default)]
pub struct Paginated<T> {
pub query: T,
pub page: i64,
pub per_page: i64,
pub is_sub_query: bool,
pub is_big_table: bool
}
impl<T> Paginated<T> {
pub fn per_page(self, per_page: i64) -> Self {
Paginated { per_page, ..self }
}
pub fn load_and_count_pages<U>(self, conn: &PgConnection) -> QueryResult<(Vec<U>, i64)>
where
Self: LoadQuery<PgConnection, (U, i64)>,
{
let per_page = self.per_page;
let results = self.load::<(U, i64)>(conn)?;
let total = results.get(0).map(|x| x.1).unwrap_or(0);
let records = results.into_iter().map(|x| x.0).collect();
let total_pages = (total as f64 / per_page as f64).ceil() as i64;
Ok((records, total_pages))
}
pub fn load_and_count_pages_total<U>(self, conn: &PgConnection) -> QueryResult<(Vec<U>, i64, i64)>
where
Self: LoadQuery<PgConnection, (U, i64)>,
{
let per_page = self.per_page;
let results = self.load::<(U, i64)>(conn)?;
let total = results.get(0).map(|x| x.1).unwrap_or(0);
let records = results.into_iter().map(|x| x.0).collect();
let total_pages = (total as f64 / per_page as f64).ceil() as i64;
Ok((records, total_pages,total))
}
}
impl<T: Query> Query for Paginated<T> {
type SqlType = (T::SqlType, BigInt);
}
impl<T> RunQueryDsl<PgConnection> for Paginated<T> {}
impl<T> QueryFragment<Pg> for Paginated<T>
where
T: QueryFragment<Pg>,
{
fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> {
if self.is_big_table {
handle_big_table_query(&self, out);
}else{
handle_table_query(&self,out);
}
Ok(())
}
}
#[derive(Debug, Clone, Copy, QueryId)]
pub struct QuerySourceToQueryFragment<T> {
query_source: T,
}
impl<FC, T> QueryFragment<Pg> for QuerySourceToQueryFragment<T>
where
FC: QueryFragment<Pg>,
T: QuerySource<FromClause=FC>,
{
fn walk_ast(&self, mut out: AstPass<Pg>) -> QueryResult<()> {
self.query_source.from_clause().walk_ast(out.reborrow())?;
Ok(())
}
}
pub trait PaginateForQuerySource: Sized {
fn paginate(self, page: i64, is_big_table: bool) -> Paginated<QuerySourceToQueryFragment<Self>>;
}
impl<T> PaginateForQuerySource for T
where T: QuerySource {
fn paginate(self, page: i64, is_big_table: bool) -> Paginated<QuerySourceToQueryFragment<Self>> {
Paginated {
query: QuerySourceToQueryFragment {query_source: self},
per_page: 10,
page,
is_sub_query: false,
is_big_table
}
}
}
I finally found using this code block generates the SQL command in the wrong format, tweaking the code like this will fix it:
pub fn handle_big_table_query<T: QueryFragment<Pg>>(this: &Paginated<T>, mut out: AstPass<Pg>) -> QueryResult<()> {
out.push_sql("SELECT *, count_estimate('select * from article') FROM ");
if this.is_sub_query {
out.push_sql("(");
}
this.query.walk_ast(out.reborrow())?;
if this.is_sub_query {
out.push_sql(" LIMIT ");
out.push_bind_param::<BigInt, _>(&this.per_page)?;
out.push_sql(" OFFSET ");
let offset = (this.page - 1) * this.per_page;
out.push_bind_param::<BigInt, _>(&offset)?;
out.push_sql(") t");
}
Ok(())
}
Related
I'm trying to use database connection from a Rocket's on_ignite fairing:
use sqlx::{ self, FromRow };
use rocket::fairing::{self, Fairing, Info, Kind};
use rocket::{Build, Rocket};
use crate::database::PostgresDb;
#[derive(FromRow)]
struct TableRow {
column_a: String,
column_b: String
}
#[rocket::async_trait]
impl Fairing for TableRow {
fn info(&self) -> Info {
Info {
name: "Cache table row",
kind: Kind::Ignite,
}
}
async fn on_ignite(&self, rocket: Rocket<Build>) -> fairing::Result {
let mut db = rocket
.state::<Connection<PostgresDb>>()
.expect("Unable to find db connection.");
let row = sqlx::query_as::<_, TableRow>("SELECT * FROM table WHERE id = 1;")
.fetch_one(&mut db)
.await
.unwrap();
fairing::Result::Ok(rocket.manage(row))
}
}
The problem is I get following rust error during .fetch_one(&mut db):
the trait bound `&mut rocket_db_pools::Connection<PostgresDb>: Executor<'_>` is not satisfied
the following other types implement trait `Executor<'c>`:
<&'c mut PgConnection as Executor<'c>>
<&'c mut PgListener as Executor<'c>>
<&'c mut PoolConnection<Postgres> as Executor<'c>>
<&'t mut Transaction<'c, Postgres> as Executor<'t>>
<&sqlx::Pool<DB> as Executor<'p>>rustcClick for full compiler diagnostic
cache_rbac_on_ignite.rs(56, 14): required by a bound introduced by this call
query_as.rs(132, 17): required by a bound in `QueryAs::<'q, DB, O, A>::fetch_all`
I tried solution suggested here: How to get the database Connection in rocket.rs Fairing. but it did not work out.
Here is the code:
use sqlx::{ self, FromRow, Database };
use rocket::fairing::{self, Fairing, Info, Kind};
use rocket::{Build, Rocket};
use crate::database::PostgresDb;
#[derive(FromRow)]
struct TableRow {
column_a: String,
column_b: String
}
#[rocket::async_trait]
impl Fairing for TableRow {
fn info(&self) -> Info {
Info {
name: "Cache table row",
kind: Kind::Ignite,
}
}
async fn on_ignite(&self, rocket: Rocket<Build>) -> fairing::Result {
let mut db = PostgresDb::get_one(rocket).await.unwrap();
let row = sqlx::query_as::<_, TableRow>("SELECT * FROM table WHERE id = 1;")
.fetch_one(&mut db)
.await
.unwrap();
fairing::Result::Ok(rocket.manage(row))
}
}
I get following rust error on line let mut db = PostgresDb::get_one(rocket).await.unwrap();:
no function or associated item named `get_one` found for struct `PostgresDb` in the current scope
function or associated item not found in `PostgresDb`rustcClick for full compiler diagnostic
mod.rs(8, 1): function or associated item `get_one` not found for this struct
What is the right way to use database connection inside of the fairing? Thank you!
Finally found an answer. Here is what worked for me:
use rocket::fairing::{self, Fairing, Info, Kind};
use rocket::{Build, Rocket};
use rocket_db_pools::{ sqlx::{ self, FromRow }, Database };
use crate::database::PostgresDb;
#[derive(FromRow)]
struct TableRow {
column_a: String,
column_b: String
}
#[rocket::async_trait]
impl Fairing for TableRow {
fn info(&self) -> Info {
Info {
name: "Cache table row",
kind: Kind::Ignite,
}
}
async fn on_ignite(&self, rocket: Rocket<Build>) -> fairing::Result {
let db = PostgresDb::fetch(&rocket).unwrap();
let mut conn = db.aquire().await.unwrap();
let row = sqlx::query_as::<_, TableRow>("SELECT * FROM table WHERE id = 1;")
.fetch_one(&mut conn)
.await
.unwrap();
fairing::Result::Ok(rocket.manage(row))
}
}
I want to create a rest api with rust and can't make it work.
My relevant code so far:
In the main.rs:
#[actix_web::main]
async fn main() -> std::io::Result<()> {
// Loading .env into environment variable.
dotenv::dotenv().ok();
env_logger::init_from_env(env_logger::Env::new().default_filter_or("info"));
// set up database connection pool
let database_url = std::env::var("DATABASE_URL").expect("DATABASE_URL");
let manager = ConnectionManager::<PgConnection>::new(database_url);
let pool: DbPool = r2d2::Pool::builder()
.test_on_check_out(true)
.build(manager)
.expect("Could not build connection pool");
let port = std::env::var("PORT").expect("$PORT is not set.");
HttpServer::new(move || {
App::new()
.app_data(web::Data::new(pool.clone()))
.wrap(middleware::Logger::default())
.route("/", web::get().to(|| async { "Actix REST API" }))
.service(handlers::common::houses::index)
})
.bind(("0.0.0.0", port.parse().unwrap()))?
.run()
.await
}
The schema:
diesel::table! {
houses (id) {
id -> Int4,
user_id -> Varchar,
street -> Varchar,
figure -> Varchar,
floor -> Varchar,
create_date -> Timestamp,
update_date -> Timestamp,
is_deleted -> Bool,
}
}
The model:
#[derive(Debug, Serialize, Deserialize, Queryable)]
pub struct House {
pub id: i32,
pub user_id: String,
pub street: String,
pub figure: String,
pub floor: String,
pub create_date: chrono::NaiveDateTime,
pub update_date: chrono::NaiveDateTime,
pub is_deleted: bool,
}
The handler:
#[get("/houses")]
async fn index(pool: web::Data<DbPool>) -> Result<HttpResponse, Error> {
let houses = web::block(move || {
let conn = &pool.get()?;
find_all(&conn)
})
.await?
.map_err(actix_web::error::ErrorInternalServerError)?;
Ok(HttpResponse::Ok().json(houses))
}
fn find_all(conn: &PgConnection) -> Result<Vec<House>, DbError> {
use crate::schemas::common::houses::houses::dsl::*;
let items =houses.load::<House>(&mut conn)?;
Ok(items)
}
The dependencies are:
[dependencies]
actix-web = "4"
chrono = { version = "0.4.19", features = ["serde"] }
diesel = { version = "2.0.3", features = ["postgres", "r2d2", "chrono"] }
dotenv = "0.15.0"
env_logger = "0.10.0"
serde = { version = "1.0.136", features = ["derive"] }
serde_json = "1.0"`
It keeps giving an error, and I don't understand why.
The error is:
`error[E0277]: the trait bound `&diesel::PgConnection: LoadConnection` is not satisfied src\handlers\common\houses.rs:25:37
| 25 | let items =houses.load::<House>(&mut conn)?;
| ---- -^^^^^^^^
| | | the trait `LoadConnection` is not implemented for `&diesel::PgConnection` help: consider removing the leading `&`-reference required by a bound introduced by this call
| note: required for `table` to implement `LoadQuery<'_, &diesel::PgConnection, House>` note: required by a bound in `diesel::RunQueryDsl::load`
I've seen a similar error with the diesel version 1.4, but I think that this version is different.
Plus I'm starting with rust and I'm a little lost in general at the moment.
I was hopping someone knows what the problem is and how to fix it.
PgConnection implements LoadConnection but &PgConnection does not (note the extra &).
Make conn mutable and pass as a mutable reference:
#[get("/houses")]
async fn index(pool: web::Data<DbPool>) -> Result<HttpResponse, Error> {
let houses = web::block(move || {
let mut conn = pool.get()?; // <------------
find_all(&mut conn) // <------------
})
.await?
.map_err(actix_web::error::ErrorInternalServerError)?;
Ok(HttpResponse::Ok().json(houses))
}
fn find_all(conn: &mut PgConnection) -> Result<Vec<House>, DbError> {
// ^^^ <------------
use crate::schemas::common::houses::houses::dsl::*;
let items = houses.load::<House>(conn)?; // <------------
Ok(items)
}
I took the code from the documentation, but it doesn't work.
pub fn get_countries(&self) {
let cursor = self.countries.find(None, None);
for doc in cursor {
println!("{}", doc?)
}
}
mongodb::sync::Cursor<bson::Document> doesn't implement std::fmt::Display
mongodb::sync::Cursor<bson::Document> cannot be formatted with the default formatter
the ? operator can only be applied to values that implement std::ops::Try
the ? operator cannot be applied to type mongodb::sync::Cursor<bson::Document>
Also the cursor.collect() does not work correctly.
the method collect exists for enum std::result::Result<mongodb::sync::Cursor<bson::Document>, mongodb::error::Error>, but its trait bounds were not satisfied
method cannot be called on std::result::Result<mongodb::sync::Cursor<bson::Document>, mongodb::error::Error> due to unsatisfied trait bounds
I tried using cursor.iter() or cursor.into_iter(), the result was the same
Full code of module
use bson::Document;
use mongodb::{
error::Error,
sync::{ Collection, Database},
};
pub struct Core {
db: Database,
countries: Collection<Document>,
}
impl Core {
pub fn new(db: &Database) -> Core {
Core {
db: db.clone(),
countries: db.collection("countries"),
}
}
pub fn get_country(&self, name: &String) -> Result<Option<Document>, Error> {
self.countries.find_one(bson::doc! { "idc": name }, None)
}
pub fn get_countries(&self) {
let cursor = self.countries.find(None, None);
for doc in cursor {
println!("{}", doc?)
}
}
}
It seems that the doc value is returning a Cursor, so I'm guessing that cursor must be rather the Result<Cursor<T>> type returned by the Collection::find method. https://docs.rs/mongodb/latest/mongodb/sync/struct.Collection.html#method.find
Shouldn't you unwrap (or handle the result with a proper match) your self.countries.find(None, None) result ?
pub fn get_countries(&self) {
let cursor = self.countries.find(None, None).unwrap();
for doc in cursor {
println!("{}", doc?)
}
}
My solution
pub fn get_countries(&self) -> Vec<Document> {
let cursor = self.countries.find(None, None).unwrap();
let mut total: Vec<Document> = Vec::new();
for doc in cursor {
total.push(doc.unwrap());
}
total
}
I'm querying an instance of PostgreSQL and selecting a sum of a decimal value:
db=# SELECT SUM(distance) AS total_distance FROM table_name WHERE deleted_at IS NULL;
total_distance
-----------------------
3808.0666666666666578
(1 row)
When I try to execute this query in Rust:
extern crate postgres;
use postgres::{Connection, TlsMode};
fn main() {
let conn = Connection::connect("postgresql://u:p#localhost:5432/db", TlsMode::None).unwrap();
let query = "SELECT SUM(distance) AS total_distance FROM table_name WHERE deleted_at IS NULL;";
for row in &conn.query(query, &[]).unwrap() {
let total_distance: f64 = row.get("total_distance");
println!("{}", total_distance);
}
}
Results in:
thread 'main' panicked at 'error retrieving column "total_distance": Error(Conversion(WrongType(Type(Numeric))))'
I've seen in various threads that the Numeric type isn't supported by the Postgres crate, so I've tried creating my own numeric type:
#[derive(Debug)]
struct Float64(f64);
impl FromSql for Float64 {
fn from_sql(ty: &Type, raw: &[u8]) -> Result<Float64, Box<Error + Sync + Send>> {
let bytes = raw.try_into().expect("failed!");
Ok(Float64(f64::from_be_bytes(bytes)))
}
fn from_sql_null(ty: &Type) -> Result<Float64, Box<Error + Sync + Send>> {
Ok(Float64(0.0))
}
fn from_sql_nullable(
ty: &Type,
raw: Option<&[u8]>,
) -> Result<Float64, Box<Error + Sync + Send>> {
match raw {
None => Ok(Float64(0.0)),
Some(value) => Float64::from_sql(ty, value),
}
}
fn accepts(ty: &Type) -> bool {
NUMERIC.eq(ty)
}
}
impl Display for Float64 {
fn fmt(&self, f: &mut Formatter<'_>) -> std::fmt::Result {
write!(f, "{}", self.to_string())
}
}
But this still doesn't work as the raw bytes fail to unwrap:
thread 'main' panicked at 'failed!: TryFromSliceError(())', src/libcore/result.rs:1165:5
raw: &[u8] has the length of 18, which is why it can't unwrap. What would be the best way to convert an 18 byte slice to f64?
I'd like to create a setter/getter pair of functions where the names are automatically generated based on a shared component, but I couldn't find any example of macro rules generating a new name.
Is there a way to generate code like fn get_$iden() and SomeEnum::XX_GET_$enum_iden?
If you are using Rust >= 1.31.0 I would recommend using my paste crate which provides a stable way to create concatenated identifiers in a macro.
macro_rules! make_a_struct_and_getters {
($name:ident { $($field:ident),* }) => {
// Define the struct. This expands to:
//
// pub struct S {
// a: String,
// b: String,
// c: String,
// }
pub struct $name {
$(
$field: String,
)*
}
paste::item! {
// An impl block with getters. Stuff in [<...>] is concatenated
// together as one identifier. This expands to:
//
// impl S {
// pub fn get_a(&self) -> &str { &self.a }
// pub fn get_b(&self) -> &str { &self.b }
// pub fn get_c(&self) -> &str { &self.c }
// }
impl $name {
$(
pub fn [<get_ $field>](&self) -> &str {
&self.$field
}
)*
}
}
};
}
make_a_struct_and_getters!(S { a, b, c });
My mashup crate provides a stable way to create new identifiers that works with any Rust version >= 1.15.0.
#[macro_use]
extern crate mashup;
macro_rules! make_a_struct_and_getters {
($name:ident { $($field:ident),* }) => {
// Define the struct. This expands to:
//
// pub struct S {
// a: String,
// b: String,
// c: String,
// }
pub struct $name {
$(
$field: String,
)*
}
// Use mashup to define a substitution macro `m!` that replaces every
// occurrence of the tokens `"get" $field` in its input with the
// concatenated identifier `get_ $field`.
mashup! {
$(
m["get" $field] = get_ $field;
)*
}
// Invoke the substitution macro to build an impl block with getters.
// This expands to:
//
// impl S {
// pub fn get_a(&self) -> &str { &self.a }
// pub fn get_b(&self) -> &str { &self.b }
// pub fn get_c(&self) -> &str { &self.c }
// }
m! {
impl $name {
$(
pub fn "get" $field(&self) -> &str {
&self.$field
}
)*
}
}
}
}
make_a_struct_and_getters!(S { a, b, c });
No, not as of Rust 1.22.
If you can use nightly builds...
Yes: concat_idents!(get_, $iden) and such will allow you to create a new identifier.
But no: the parser doesn't allow macro calls everywhere, so many of the places you might have sought to do this won't work. In such cases, you are sadly on your own. fn concat_idents!(get_, $iden)(…) { … }, for example, won't work.
There's a little known crate gensym that can generate unique UUID names and pass them as the first argument to a macro, followed by a comma:
macro_rules! gen_fn {
($a:ty, $b:ty) => {
gensym::gensym!{ _gen_fn!{ $a, $b } }
};
}
macro_rules! _gen_fn {
($gensym:ident, $a:ty, $b:ty) => {
fn $gensym(a: $a, b: $b) {
unimplemented!()
}
};
}
mod test {
gen_fn!{ u64, u64 }
gen_fn!{ u64, u64 }
}
If all you need is a unique name, and you don't care what it is, that can be useful. I used it to solve a problem where each invocation of a macro needed to create a unique static to hold a singleton struct. I couldn't use paste, since I didn't have unique identifiers I could paste together in the first place.