I'm trying to write a macro for destructuring BSON data which looks like this:
let bson: Document = ...;
let (id, hash, name, path, modification_time, size, metadata, commit_data) = bson_destructure! {
get id = from (bson), optional, name ("_id"), as ObjectId;
get hash = from (bson), as String, through (|s| ContentHash::from_str(&s));
get name = from (bson), as String;
get path = from (bson), as Bson, through (PathBuf::from_bson);
get modification_time = from (bson), as UtcDatetime, through (FileTime);
get size = from (bson), as I64, through (|n| n as u64);
get metadata = from (bson), as Document, through (Metadata::from_bson);
get commit_data = from (bson), optional, as Document, through (CommitData::from_bson);
ret (id, hash, name, path, modification_time, size, metadata, commit_data)
};
I've written the following macro (pretty large) for it:
macro_rules! bson_destructure {
// required field
(
#collect req,
[$target:ident, $source:expr, $field:expr, Bson, $f:expr],
[];
$($rest:tt)*
) => {{
let $target = try!(match $source.remove($field) {
Some(v) => $f(v),
None => Err(BsonDestructureError::MissingField {
field_name: $field,
expected: "Bson"
}),
});
bson_destructure!($($rest)*)
}};
(
#collect req,
[$target:ident, $source:expr, $field:expr, $variant:ident, $f:expr],
[];
$($rest:tt)*
) => {{
let $target = try!(match $source.remove($field) {
Some(v) => match v {
::ejdb::bson::Bson::$variant(v) => $f(v),
v => Err(BsonDestructureError::InvalidType {
field_name: $field,
expected: stringify!($variant),
actual: v
})
},
None => Err(BsonDestructureError::MissingField {
field_name: $field,
expected: stringify!($variant)
}),
});
bson_destructure!($($rest)*)
}};
// optional field
(
#collect opt,
[$target:ident, $source:expr, $field:expr, Bson, $f:expr],
[];
$($rest:tt)*
) => {{
let $target = try!(match $source.remove($field) {
Some(v) => $f(v).map(Some),
None => Ok(None),
});
bson_destructure!($($rest)*)
}};
(
#collect opt,
[$target:ident, $source:expr, $field:expr, $variant:ident, $f:expr],
[];
$($rest:tt)*
) => {{
let $target = try!(match $source.remove($field) {
Some(v) => match v {
::ejdb::bson::Bson::$variant(v) => $f(v).map(Some),
v => Err(BsonDestructureError::InvalidType {
field_name: $field,
expected: stringify!($variant),
actual: v
})
},
None => Ok(None),
});
bson_destructure!($($rest)*)
}};
// change variant name
(
#collect $k:tt,
[$target:ident, $source:expr, $field:expr, $variant:ident, $f:expr],
[as $nv:ident, $($word:ident $arg:tt),*];
$($rest:tt)*
) => {
bson_destructure!(
#collect $k,
[$target, $source, $field, $nv, $f],
[$($word $arg),*];
$($rest)*
)
};
// change final mapping function
(
#collect $k:tt,
[$target:ident, $source:expr, $field:expr, $variant:ident, $f:expr],
[through ($nf:expr), $($word:ident $arg:tt),*];
$($rest:tt)*
) => {
bson_destructure!(
#collect $k,
[$target, $source, $field, $variant, $nf],
[$($word $arg),*];
$($rest)*
)
};
// change field name
(
#collect $k:tt,
[$target:ident, $source:expr, $field:expr, $variant:ident, $f:expr],
[name ($nn:expr), $($word:ident $arg:tt),*];
$($rest:tt)*
) => {
bson_destructure!(
#collect $k,
[$target, $source, $nn, $variant, $f],
[$($word $arg),*];
$($rest)*
)
};
// main forms
(get $target:ident = from ($source:expr), $($word:ident $arg:tt),*; $($rest:tt)*) => {
bson_destructure!(
#collect req,
[$target, $source, stringify!($target), Bson, Ok],
[$($word $arg),*];
$($rest)*
)
};
(get $target:ident = from ($source:expr), optional, $($word:ident $arg:tt),*; $($rest:tt)*) => {
bson_destructure!(
#collect opt,
[$target, $source, stringify!($target), Bson, Ok],
[$($word $arg),*];
$($rest)*
)
};
// final form
(ret $e:expr) => { $e }
}
However, the first example above results in the following compilation error:
src/db/data.rs:345:22: 345:25 error: no rules expected the token `opt`
src/db/data.rs:345 #collect opt,
^~~
I'm somewhat surprised that it doesn't show the error location as usual (that is, there is no indication where expansion happens), however, the error vanishes when I comment the piece of code which uses the macro out.
I can't see why it says that no rules expected this token because there is such a rule, but maybe I don't understand something.
I'm pretty sure that this is possible because that's roughly what quick_error crate does, but it seems that my macro writing skills are still lacking.
How should I fix the macro so it would work as I expect?
For completeness, the following is the definition of BsonDestructureError:
#[derive(Debug, Clone)]
pub enum BsonDestructureError {
InvalidType {
field_name: &'static str,
expected: &'static str,
actual: Bson
},
InvalidArrayItemType {
index: usize,
expected: &'static str,
actual: Bson
},
MissingField {
field_name: &'static str,
expected: &'static str
}
}
I'm also using bson crate reexported from ejdb crate. Here is a minimal example, runnable with cargo script on stable Rust.
Both cargo script, a recursive muncher, and my favourite internal rule syntax; how can I not?
First, the exact problem can be identified by running cargo rustc -- -Z trace-macros. This will output each rule as it gets expanded, giving us a "backtrace" which, after some manual reformatting, comes out looking like so:
bson_destructure! {
get id = from ( bson ) , optional , name ( "_id" ) , as ObjectId ;
get hash = from ( bson ) , as String ;
get name = from ( bson ) , as String ;
get path = from ( bson ) , as Bson ;
get modification_time = from ( bson ) , as UtcDatetime ;
get size = from ( bson ) , as I64 , through ( | n | n as u64 ) ;
get metadata = from ( bson ) , as Document ;
get commit_data = from ( bson ) , optional , as Document ;
ret ( id , hash , name , path , modification_time , size , metadata , commit_data )
}
bson_destructure! {
# collect opt ,
[ id , bson , stringify ! ( id ) , Bson , Ok ] ,
[ name ( "_id" ) , as ObjectId ] ;
get hash = from ( bson ) , as String ;
get name = from ( bson ) , as String ;
get path = from ( bson ) , as Bson ;
get modification_time = from ( bson ) , as UtcDatetime ;
get size = from ( bson ) , as I64 , through ( | n | n as u64 ) ;
get metadata = from ( bson ) , as Document ;
get commit_data = from ( bson ) , optional , as Document ;
ret ( id , hash , name , path , modification_time , size , metadata , commit_data )
}
bson_destructure! {
# collect opt ,
[ id , bson , "_id" , Bson , Ok ] , [ as ObjectId ] ;
get hash = from ( bson ) , as String ;
get name = from ( bson ) , as String ;
get path = from ( bson ) , as Bson ;
get modification_time = from ( bson ) , as UtcDatetime ;
get size = from ( bson ) , as I64 , through ( | n | n as u64 ) ;
get metadata = from ( bson ) , as Document ;
get commit_data = from ( bson ) , optional , as Document ;
ret ( id , hash , name , path , modification_time , size , metadata , commit_data )
}
A careful perusal of the rules in bson_destructure! shows the issue: there is no rule which matches the third expansion. macro_rules! is, frankly, rubbish at reporting sane error locations when it comes to recursive rules; that it's pointing to the opt token is irrelevant. The real problem is that it couldn't find a matching rule.
In particular, the offending rule is this one:
// change variant name
(
#collect $k:tt,
[$target:ident, $source:expr, $field:expr, $variant:ident, $f:expr],
[as $nv:ident, $($word:ident $arg:tt),*];
$($rest:tt)*
) => {
...
};
Note the presence of a comma immediately after $nv:ident. Also note that there is no such comma in the input. This can be solved by moving the comma inside the repetition, like so:
// change field name
(
#collect $k:tt,
[$target:ident, $source:expr, $field:expr, $variant:ident, $f:expr],
[name ($nn:expr) $(, $word:ident $arg:tt)*];
$($rest:tt)*
) => {
...
};
Another alternative (and the one I ususally go with), is to simply mutate the input when it is first encountered to make sure there is always a trailing comma in place.
The code won't actually compile on my machine, due to a native dependency, but I did verify that making this change (both here, and to the other rules with a similar issue) allows it to complete macro expansion. You can check the output looks correct using cargo rustc -- -Z unstable-options --pretty=expanded.
Related
I would like to do the following, but macros in that position don’t seem to work (I get error: expected `:`, found `!`. How can I pattern-match individual struct members and attach attributes to them based on the match?
use serde_derive::Serialize;
macro_rules! optional_param {
($name:ident : Option<$type:ty>) => { #[serde(skip_serializing_if = "Option::is_none")] pub $name: Option<$ty> };
($name:ident : Vec <$type:ty>) => { #[serde(skip_serializing_if = "Vec::is_empty" )] pub $name: Vec <$ty> };
($name:ident : bool ) => { #[serde(skip_serializing_if = "bool::not" )] pub $name: bool };
}
macro_rules! impl_extra {
( $name:ident { $( $param:ident : $type:ty ),* $(,)* } ) => (
#[derive(Default,Debug,Serialize)]
pub struct $name {
$( optional_param!($param : $type), )*
}
);
}
impl_extra!(MyStruct { member: Option<String> });
Link to the playground
Indeed, macro invocations are not valid in the middle of a struct definition. However, we can use metavariables there. The trick is to parse the parameters incrementally, building the tokens for the field definitions along the way, and when there's no more input to process, emit a struct definition with the field definitions coming from a metavariable.
As a first step, let's see what a macro that doesn't handle field types specifically looks like:
macro_rules! impl_extra {
( # $name:ident { } -> ($($result:tt)*) ) => (
#[derive(Default, Debug, Serialize)]
pub struct $name {
$($result)*
}
);
( # $name:ident { $param:ident : $type:ty, $($rest:tt)* } -> ($($result:tt)*) ) => (
impl_extra!(# $name { $($rest)* } -> (
$($result)*
pub $param : $type,
));
);
( $name:ident { $( $param:ident : $type:ty ),* $(,)* } ) => (
impl_extra!(# $name { $($param : $type,)* } -> ());
);
}
The only thing this macro does is add pub on each field and define a pub struct with a #[derive] attribute. The first rule handles the terminal case, i.e. when there are no more fields to process. The second rule handles the recursive case, and the third rule handles the macro's "public" syntax and transforms it into the "processing" syntax.
Note that I'm using an # as the initial token for internal rules to distinguish them from "public" rules. If this macro is not meant to be exported to other crates, then you could also move the internal rules to a different macro. If the macro is exported though, then the separate macro for the internal rules might have to be exported too.
Now, let's handle the various field types:
macro_rules! impl_extra {
( # $name:ident { } -> ($($result:tt)*) ) => (
#[derive(Default, Debug, Serialize)]
pub struct $name {
$($result)*
}
);
( # $name:ident { $param:ident : Option<$type:ty>, $($rest:tt)* } -> ($($result:tt)*) ) => (
impl_extra!(# $name { $($rest)* } -> (
$($result)*
#[serde(skip_serializing_if = "Option::is_none")]
pub $param : Option<$type>,
));
);
( # $name:ident { $param:ident : Vec<$type:ty>, $($rest:tt)* } -> ($($result:tt)*) ) => (
impl_extra!(# $name { $($rest)* } -> (
$($result)*
#[serde(skip_serializing_if = "Vec::is_empty")]
pub $param : Vec<$type>,
));
);
( # $name:ident { $param:ident : bool, $($rest:tt)* } -> ($($result:tt)*) ) => (
impl_extra!(# $name { $($rest)* } -> (
$($result)*
#[serde(skip_serializing_if = "bool::not")]
pub $param : bool,
));
);
( $name:ident { $( $param:ident : $($type:tt)* ),* $(,)* } ) => (
impl_extra!(# $name { $($param : $($type)*,)* } -> ());
);
}
Note that there's a difference in the last rule: instead of matching on a ty, we now match on a sequence of tt. That's because once the macro has parsed a ty, it can't be broken down, so when we make a recursive macro call, a ty cannot possibly match something like Option<$type:ty>.
Below is my coffee Script .Its giving "Unexpected Indentation" error.
Any one can guess whats the errror is?
evaluateContainerRules: ( rules, containerType ) ->
deferred = Ext.create( 'Deft.promise.Deferred' )
searchCriteria = []
for rule in rules
if rule.searchTerms? and rule.searchTerms.length > 0
searchCriteria.push( rule )
console.log 'rule'
console.log 'searchCriteria'
if searchCriteria.length <= 0
emptyRules =
[
{
searchOption: 'EMPTY_RULE'
searchTerms: true
}
]
console.log 'emptyRules'
searchCriteria.push( emptyRules )
searchCriteria.push["Hello"]
console.log 'searchCriteria'
store = Ext.create( 'Traverse.core.store.admin.container.ContainerMemberSummarySearchStore',
params:
searchCriterias: searchCriteria
traverseTypeEnums: if containerType is 'device' then [ Traverse.core.enumeration.TraverseType.DEVICE ] else [ Traverse.core.enumeration.TraverseType.TEST ]
)
complete = false
store.on(
'load'
( records, operation, success ) ->
# NOTE: callback is fired multiple times for a paging store, only execute logic during the first call
if complete
return
complete = true
if success
deferred.resolve( store )
else
store.destroyStore()
#showError( operation.getError() )
deferred.reject( operation.getError() )
return
scope: #
)
store.load()
return deferred.promise
Its
This seems a reasonable re-indentation of your code that compiles
evaluateContainerRules: ( rules, containerType ) ->
deferred = Ext.create( 'Deft.promise.Deferred' )
searchCriteria = []
for rule in rules
if rule.searchTerms? and rule.searchTerms.length > 0
searchCriteria.push( rule )
console.log 'rule'
console.log 'searchCriteria'
if searchCriteria.length <= 0
emptyRules =
[{
searchOption: 'EMPTY_RULE'
searchTerms: true
}]
console.log 'emptyRules'
searchCriteria.push( emptyRules )
searchCriteria.push["Hello"]
console.log 'searchCriteria'
store = Ext.create( 'Traverse.core.store.admin.container.ContainerMemberSummarySearchStore',
params:
searchCriterias: searchCriteria
traverseTypeEnums: if containerType is 'device' then [ Traverse.core.enumeration.TraverseType.DEVICE ] else [ Traverse.core.enumeration.TraverseType.TEST ]
)
complete = false
store.on(
'load'
( records, operation, success ) ->
# NOTE: callback is fired multiple times for a paging store, only execute logic during the first call
if complete
return
complete = true
if success
deferred.resolve( store )
else
store.destroyStore()
#showError( operation.getError() )
deferred.reject( operation.getError() )
return
scope: #
)
store.load()
Hope this helps
In a shortcode I can limit wp_query results by custom field values.
Example:
[my-shortcode meta_key=my-custom-field meta_value=100,200 meta_compare='IN']
And obviously it's possible to use multiple custom fields in a wp_query like WP_Query#Custom_Field_Parameters
But how can I use multiple custom fields in my shortcode? At the moment I do pass all the shortcode parameters with $atts.
On of a few different solutions might be to use a JSON encoded format for the meta values. Note that this isn't exactly user-friendly, but certainly would accomplish what you want.
You would of course need to generate the json encoded values first and ensure they are in the proper format. One way of doing that is just using PHP's built in functions:
// Set up your array of values, then json_encode them with PHP
$values = array(
array('key' => 'my_key',
'value' => 'my_value',
'operator' => 'IN'
),
array('key' => 'other_key',
'value' => 'other_value',
)
);
echo json_encode($values);
// outputs: [{"key":"my_key","value":"my_value","operator":"IN"},{"key":"other_key","value":"other_value"}]
Example usage in the shortcode:
[my-shortcode meta='[{"key":"my_key","value":"my_value","operator":"IN"},{"key":"other_key","value":"other_value"}]']
Which then you would parse out in your shortcode function, something like so:
function my_shortcode($atts ) {
$meta = $atts['meta'];
// decode it "quietly" so no errors thrown
$meta = #json_decode( $meta );
// check if $meta set in case it wasn't set or json encoded proper
if ( $meta && is_array( $meta ) ) {
foreach($meta AS $m) {
$key = $m->key;
$value = $m->value;
$op = ( ! empty($m->operator) ) ? $m->operator : '';
// put key, value, op into your meta query here....
}
}
}
Alternate Method
Another method would be to cause your shortcode to accept an arbitrary number of them, with matching numerical indexes, like so:
[my-shortcode meta-key1="my_key" meta-value1="my_value" meta-op1="IN
meta-key2="other_key" meta-value2="other_value"]
Then, in your shortcode function, "watch" for these values and glue them up yourself:
function my_shortcode( $atts ) {
foreach( $atts AS $name => $value ) {
if ( stripos($name, 'meta-key') === 0 ) {
$id = str_ireplace('meta-key', '', $name);
$key = $value;
$value = (isset($atts['meta-value' . $id])) ? $atts['meta-value' . $id] : '';
$op = (isset($atts['meta-op' . $id])) ? $atts['meta-op' . $id] : '';
// use $key, $value, and $op as needed in your meta query
}
}
}
I tried to emulate this SQL in DBIx::Class against the update_or_new function.
UPDATE user SET lastseen = GREATEST( lastseen, ?::timestamp ) WHERE userid = ?
It gives an error on inflate column saying it is unable to invoke is_infinity on undef .
$schema->resultset('user')->update_or_new( {
userid => 'peter',
lastseen => \[ 'GREATEST( lastseen, ?::timestamp )', DateTime->from_epoch(epoch => 1234) ]
} );
I guess this is because the InflateColumn::DataTime does not expect a function there. Is there any clean workaround for this issue?
This is a bug in DBIx::Class ( addressed here: https://github.com/dbsrgits/dbix-class/pull/44 ) and the fix is merged. It should be fine on the next release.
That said, if you're using DBIx::Class <= 0.08270...
You're using update_or_new, but the function only makes sense if the row exists already:
GREATEST( lastseen, ?::timestamp ) lastseen is undefined if the row doesn't exist yet.
I read through the source+docs a bunch and cannot find a way to sidestep the InflateColumn code and still have bind values. You can pass in literal SQL with a scalar ref ( \'NOW()' ) but not an array ref.
Your best bet would be to use the ResultSet's update method instead, which does not 'process/deflate any of the values passed in. This is unlike the corresponding "update" in DBIx::Class::Row.'
my $dtf = $schema->storage->datetime_parser; #https://metacpan.org/pod/DBIx::Class::Storage::DBI#datetime_parser
my $user_rs = $schema->resultset('User')->search({ userid => 'peter' });
my $dt = DateTime->from_epoch(epoch => 1234);
#select count(*) where userid = 'peter';
if( $user_rs->count ) {
$user_rs->update(
lastseen => \[ 'GREATEST( lastseen, ? )', $dtf->format_datetime($dt) ]
);
} else {
$user_rs->create({ lastseen => $dt });
}
Has anyone successfully used something like DBIx::Class::WebForm or CatalystX-CRUD to automagically build a self-validating webform from a database table?
I'm imagining a module that reads a database table schema, reads the constraints for each column, and generates some abstract representation of a webform, with fields for error messages, etc. I'm using Catalyst and Plack with a big existing codebase.
I don't want to code up an HTML webform, nor any validation logic. I'm aiming to write as little code as possible, in the style of Ruby on Rails. Which Perl module is best for this?
UPDATE: I've solved the webform side with HTML::FormFu, but it's still clunky mapping the form inputs onto the database, e.g. date_start and date_end both relate to the 'created' column, and comment should match using 'LIKE %foo%', etc. Where's the 'DBICFu'?
UPDATE: This is for a web application, the webform should not look like a database table. I'm not looking for a database management tool.
You can use use HTML::FormHandler::Moose and HTML::FormHandler::Model::DBIC and get some nice forms.
As a simple example:
The form definition:
package MyStats::Form::Datetime ;
use HTML::FormHandler::Moose ;
extends 'HTML::FormHandler::Model::DBIC' ;
use Date::Calc qw(Today_and_Now) ;
has_field 'datetimeid' => ( label => 'ID' ) ;
has_field 'datetime' => ( type => 'Text',
apply => [ { transform => \&transform_dt } ] ,
deflation => \&deflation_dt ,
required => 1 ) ;
has_field 'submit' => ( type => 'Submit' ,
value => 'Speichern' ) ;
# These are the fields of the table datetime
sub transform_dt {
my ( $dt ) = #_ ;
my #d = ( $dt =~ m/(\d{1,2})\.(\d{1,2})\.(\d{4})\s+(\d{1,2}):(\d{1,2})/ ) ;
return sprintf( '%04d-%02d-%02d %02d:%02d:00' , #d[2,1,0,3,4] ) ;
}
sub deflation_dt {
my ( $dt ) = #_ ;
my #d = ( $dt =~ m/(\d{4})-(\d{2})-(\d{2})\s+(\d{1,2}):(\d{1,2})/ ) ;
if( ! #d ) {
#d = Today_and_Now() ;
}
return sprintf( '%02d.%02d.%04d %02d:%02d:00' , #d[2,1,0,3,4] ) ;
}
1 ;
And the usage in a controller:
package MyStats::Controller::Datetime ;
use Moose ;
use namespace::autoclean ;
BEGIN { extends 'Catalyst::Controller' ; }
use MyStats::Form::Datetime ;
has 'form' => ( isa => 'MyStats::Form::Datetime' ,
is => 'rw' ,
lazy => 1 ,
default => \&new_datetime_form ) ;
sub new_datetime_form {
MyStats::Form::Datetime->new( css_class => 'datetimeform' ,
name => 'datetimeform' ) ;
}
...
sub add :Local :Args(0) {
my ( $self , $ctx ) = #_ ;
my $data = $ctx->model( 'MyStatsDB::Datetime' )->new_result( {} ) ;
$ctx->stash( template => 'datetime/add.tt2' ,
form => $self->form ) ;
$ctx->bread_crumb( { name => 'Datum/Zeit eingeben' ,
location => '/datetime/add' } ) ;
$ctx->req->param( 'datetimeid' , undef ) if $ctx->req->param( 'datetimeid' ) ;
return unless $self->form->process( item => $data ,
params => $ctx->req->params ) ;
$ctx->flash( message => 'Neuer Datensatz ' . $data->datetimeid .
' angelegt.' ,
id_add => $data->datetimeid ) ;
$ctx->res->redirect( $ctx->uri_for( '/datetime' ) ) ;
}
...
__PACKAGE__->meta->make_immutable ;
1 ;
Works good.
I've used HTML::FormHandler to generate forms for me in this fashion. It needs some tweaking, but it does 90% of the work for you. Separately DBIx::Class offers a similar tool.
There are a number of crud options on the Catalyst wiki.
It sounds like AutoCrud would fit your needs.