Elasticsearch now function problems - date

I'm having problems trying to get a query working with the "now" function. My current query looks something like this:
{
"query": {
"bool" : {
"must" : [
{ "match": { "originCountry" : "GB" }},
{ "match": { "destinationCity" : "MIL" }}
]
}
},
"filter" : {
"and": {
"filters": [
{
"exists": {"field": "dateBack"}
} ,
{
"script" : {"script" : "doc['originRegion'].value == doc['destinationRegion'].value"}
},
{
"range": {
"dateOut": {
"gte": "now"
}
}
}
]
}
}
}
That's not returning any results. However if I change the range section to a string date like:
"range": {
"dateOut": {
"gte": "20150101"
}
}
It works perfect. In my index mapping all date fields are using the "basic_date" format (YYYYMMDD)
Could be this creating any issues for the now function? Does anyone knows how the now function works? Is it converting the "now" date to whatever date format the field being compared is using? I'be been unable to find any useful documentation about this.
Thanks

Check your date mapping - it should be YYYYMMdd instead of YYYYMMDD
When I set up the mapping:
curl -XPOST http://localhost:9200/index/testnow/_mapping -d '
{"testnow": {
"properties": {
"dateOut": {"type": "date","format" : "YYYYMMdd"},
"dateBack": {"type": "date","format" : "YYYYMMdd"}
}}}'
and post in a couple of docs:
curl -XPOST http://localhost:9200/index/testnow/ -d '
{
"originCountry": "GB",
"destinationCity": "MIL",
"dateBack" : "20140212",
"originRegion" : "X",
"destinationRegion" : "X",
"dateOut" : "20140201"
}'
curl -XPOST http://localhost:9200/index/testnow/ -d '
{
"originCountry": "GB",
"destinationCity": "MIL",
"dateBack" : "20150212",
"originRegion" : "X",
"destinationRegion" : "X",
"dateOut" : "20150201"
}'
and run the query:
curl -XGET http://localhost:9200/index/testnow/_search -d '
{
"query" : {
"filtered" : {
"query": {
"bool" : {
"must" : [
{ "match": { "originCountry" : "GB" }},
{ "match": { "destinationCity" : "MIL" }}
]
}
},
"filter" : {
"and" : [
{"exists": {"field": "dateBack"}},
{"script" : {"script" : "doc[\"originRegion\"].value == doc[\"destinationRegion\"].value"}},
{"range": {"dateOut": {"gte": "now"}}}
]} }}}'
I get back a single document as expected:
{
"took" : 11,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 1,
"max_score" : 1.4142135,
"hits" : [ {
"_index" : "index",
"_type" : "testnow",
"_id" : "AUqgq8u4aqAGLvfmRnfz",
"_score" : 1.4142135,
"_source":
{
"originCountry": "GB",
"destinationCity": "MIL",
"dateBack" : "20150212",
"originRegion" : "X",
"destinationRegion" : "X",
"dateOut" : "20150201"
}
} ]
}
}

Related

Group and Merge array of objects

I am struggling around with the aggregation pipeline feature from MongoDB.
So far the output for one result looks like this:
{
"type": "inbound",
"sender": "postAG",
"receiver": "maxMusterMan",
"datetime": "20191125",
"info": [
{
"q": "A",
"value": "5",
"name": null,
"plz": 1234
},
{
"q": "B",
"value": "AS",
"name": "ABS",
"plz": null
},
{
"q": "A",
"value": "5",
"name": "aa",
"plz": null
},
... more objects
]
}
The final result should look like:
{
"type": "inbound",
"sender": "postAG",
"receiver": "maxMusterMan",
"datetime": "20191125",
"info": [
{
"q": "A",
"value": "0",
"name": "aa",
"plz": 1234
},
{
"q": "B",
"value": "AS",
"name": "ABS"
}
]
}
So in a nutshell, I want to group the values from the array field info by the "q" field and merge the objects (newer one overwrites the old value).
Further I would like to remove all the values with value "" or null;
There are more fields in the real payload, so I would like to avoid to add a $cond for each field of the object.
Some approaches so far from my side:
for the cleanup, use a UDF, but this is not possible in the pipeline.
use map-reduce for the group and merge, not available in the pipeline.
Please consider that the input file is the output from the several pipeline steps.
So I can not just use map-reduce alone, first I need the pipeline too.
My idea was to create two views, first will do the pipeline stuff and second map-reduce, is this a good solution?
Thx
Andreas
I didn't really understand from your explanation if you can or cannot use map-reduce.
However assuming you can't and you have to 'concat' the pipelines there is no 'generic' workaround for multiple fields - you have to create a condition for each in the pipeline.
With that said here is a working pipeline:
db.collection.aggregate(
[
{
"$unwind" : "$info"
},
{
"$group" : {
"_id" : "$info.q",
"type" : {
"$first" : "$type"
},
"sender" : {
"$first" : "$sender"
},
"receiver" : {
"$first" : "$receiver"
},
"datetime" : {
"$first" : "$datetime"
},
"values" : {
"$push" : "$info.value"
},
"names" : {
"$push" : "$info.name"
},
"plz" : {
"$push" : "$info.plz"
}
}
},
{
"$project" : {
"_id" : 1.0,
"type" : 1.0,
"sender" : 1.0,
"receiver" : 1.0,
"datetime" : 1.0,
"values" : {
"$filter" : {
"input" : "$values",
"as" : "curr",
"cond" : {
"$or" : [
{
"$ne" : [
"$$curr",
null
]
},
{
"$ne" : [
"$$curr",
""
]
}
]
}
}
},
"names" : {
"$filter" : {
"input" : "$names",
"as" : "curr",
"cond" : {
"$or" : [
{
"$ne" : [
"$$curr",
null
]
},
{
"$ne" : [
"$$curr",
""
]
}
]
}
}
},
"plz" : {
"$filter" : {
"input" : "$plz",
"as" : "curr",
"cond" : {
"$or" : [
{
"$ne" : [
"$$curr",
null
]
},
{
"$ne" : [
"$$curr",
""
]
}
]
}
}
}
}
},
{
"$project" : {
"sender" : 1.0,
"receiver" : 1.0,
"datetime" : 1.0,
"type" : 1.0,
"_id" : 1.0,
"value" : {
"$cond" : {
"if" : {
"$gt" : [
{
"$size" : "$values"
},
0.0
]
},
"then" : {
"$arrayElemAt" : [
"$values",
-1.0
]
},
"else" : null
}
},
"name" : {
"$cond" : {
"if" : {
"$gt" : [
{
"$size" : "$names"
},
0.0
]
},
"then" : {
"$arrayElemAt" : [
"$names",
-1.0
]
},
"else" : null
}
},
"plz" : {
"$cond" : {
"if" : {
"$gt" : [
{
"$size" : "$plz"
},
0.0
]
},
"then" : {
"$arrayElemAt" : [
"$plz",
-1.0
]
},
"else" : null
}
}
}
},
{
"$addFields" : {
"infoArray" : [
{
"k" : "type",
"v" : "$_id"
},
{
"k" : "value",
"v" : "$value"
},
{
"k" : "name",
"v" : "$name"
},
{
"k" : "plz",
"v" : "$plz"
}
]
}
},
{
"$addFields" : {
"info" : {
"$arrayToObject" : {
"$filter" : {
"input" : "$infoArray",
"as" : "curr",
"cond" : {
"$ne" : [
"$$curr.v",
null
]
}
}
}
}
}
},
{
"$group" : {
"_id" : null,
"type" : {
"$first" : "$type"
},
"sender" : {
"$first" : "$sender"
},
"receiver" : {
"$first" : "$receiver"
},
"datetime" : {
"$first" : "$datetime"
},
"info" : {
"$push" : "$info"
}
}
}
]
)

Elasticsearch - Range query doesn't work

To try this error I have tried with Elasticsearch 2.x and 5.x but doesn't work in any of these.
I have lots of logs saved in my Elasticsearch instance. They have a field called timestamp whose format is "YYYY-MM-dd HH-mm-ss.SSS" (for example, "2017-11-02 00:00:00.000"). When I try to send a query via POSTMAN which is this:
{
"query": {
"range": {
"timestamp": {
"gte": "2017-10-21 00:00:00.000",
"lte": "2017-10-27 00:00:00.000"
}
}
}
}
I receive nothing and I there are more than 500 logs in that range. What am I doing wrong?
EDIT:
My index (loganalyzer):
{
"loganalyzer" : {
"aliases" : { },
"mappings" : {
"logs" : {
"properties" : {
"entireLog" : {
"type" : "string"
},
"formattedMessage" : {
"type" : "string"
},
"id" : {
"type" : "string"
},
"level" : {
"type" : "string"
},
"loggerName" : {
"type" : "string"
},
"testNo" : {
"type" : "string"
},
"threadName" : {
"type" : "string"
},
"timestamp" : {
"type" : "string"
}
}
}
},
"settings" : {
"index" : {
"refresh_interval" : "1s",
"number_of_shards" : "5",
"creation_date" : "1507415366223",
"store" : {
"type" : "fs"
},
"number_of_replicas" : "1",
"uuid" : "9w3QQQc0S0K0NcKtOERtTw",
"version" : {
"created" : "2040699"
}
}
},
"warmers" : { }
}
}
What I receive sending the request:
{
"took": 429,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 0,
"max_score": null,
"hits": []
}
}
And status 200 (OK).
Your edit with the mappings indicates the problem. The reason you aren't getting any result is because it's attempting to find a "range" for the string you're providing against the values of the field in your index, which are also treated as a string.
"timestamp" : {
"type" : "string"
}
Here's the elastic documentation on that mapping type
You need to apply a date mapping to that field before indexing, or reindex to a new index that has that mapping applied prior to ingestion.
Here is what the mapping request could look like, conforming to your timestamp format:
PUT loganalyzer
{
"mappings": {
"logs": {
"properties": {
"timestamp": {
"type": "date",
"format": "YYYY-MM-dd HH-mm-ss.SSS"
}
}
}
}
}

Elasticsearch's keyword tokenizer and searching for emails does not really work

I have set up an index like this:
POST /testindex/ -d '
{
"settings": {
"analysis": {
"analyzer": {
"analyzer_keyword": {
"tokenizer": "keyword"
}
}
}
},
"mappings": {
"users": {
"properties": {
"email": {
"analyzer": "analyzer_keyword",
"type": "string"
}
}
}
}
}'
Now I have added some users documents to testindex whereas a user contains an email address. If I want to search for a user document by specifying the email address like the following, it does not really work as expected:
GET /testindex/users/_search
{
"query" : {
"term" : { "email" : "hello#host.com" }
}
}
This query returns 0 results. But if I say "email": "hello" or "email": "host.com" it returns the exact document. But what is wrong with the #? How can I search by the complete email address?
The elasticsearch documentation says:
A tokenizer of type keyword that emits the entire input as a single input. The entire input is hello#host.com.
I also tried uax_url_email tokenizer. Does not work either.
Seems to work fine to me:
curl -XDELETE "localhost:9200/testindex?pretty"
curl -XPOST "localhost:9200/testindex?pretty" -d '
{
"settings": {
"analysis": {
"analyzer": {
"analyzer_keyword": {
"tokenizer": "keyword"
}
}
}
},
"mappings": {
"users": {
"properties": {
"email": {
"analyzer": "analyzer_keyword",
"type": "string"
}
}
}
}
}'
curl -XPOST "localhost:9200/testindex/users?pretty&refresh" -d '{"email": "hello#host.com"}'
curl -XGET "localhost:9200/testindex/users/_search?pretty" -d '{
"query" : {
"term" : { "email" : "hello#host.com" }
}
}'
it returns:
{
"error" : "IndexMissingException[[testindex] missing]",
"status" : 404
}
{
"ok" : true,
"acknowledged" : true
}
{
"ok" : true,
"_index" : "testindex",
"_type" : "users",
"_id" : "GkPG9l83RGyeMyGM9x6ecQ",
"_version" : 1
}
{
"took" : 62,
"timed_out" : false,
"_shards" : {
"total" : 5,
"successful" : 5,
"failed" : 0
},
"hits" : {
"total" : 1,
"max_score" : 0.30685282,
"hits" : [ {
"_index" : "testindex",
"_type" : "users",
"_id" : "GkPG9l83RGyeMyGM9x6ecQ",
"_score" : 0.30685282, "_source" : {"email": "hello#host.com"}
} ]
}
}
on both 0.90.7 and current master. Did you try to delete the index before changing the mapping?

Unexpected results from Elasticsearch

I have some documents stored in ES (by logstash). and the results, when querying ES, do not look right:
The first query (see the queries and the results below) is supposed(meant) to return only documents that do not contain region field.
Even further, based on the result of the first query , obviously there is a document that contains field region, however, the results for second query which should (at least) return a document with region=IN, contains no documents.
Is something wrong with my queries?
How can I investigate where the problem is? (The ES logs do not have anything related to these queries)
Here is the query:
curl -X GET 'http://localhost:9200/logstash*/_search?pretty' -d '{
"query" : {
"match_all" : {}
},
filter : {
"and" : [
{ "term" : { "type" : "xsys" } },
{ "missing" : { "field" : "region" } }
]
}, size: 2
}'
And the result:
{
"took" : 40,
"timed_out" : false,
"_shards" : {
"total" : 90,
"successful" : 90,
"failed" : 0
},
"hits" : {
"total" : 5747,
"max_score" : 1.0,
"hits" : [ {
"_index" : "logstash-2013.09.28",
"_type" : "logs",
"_id" : "UMrz9bwKQgCq__TwBT0WmQ",
"_score" : 1.0,
"_source" : {
.....
"type":"xsys",
....
"region":"IN",
}
}, { ....
} ]
}
}
Furthermore, the result for the following query:
curl -X GET 'http://localhost:9200/logstash*/_search?pretty' -d '{
"query" : { "match_all" : {} },
filter : { "term" : { "region" : "IN" } },
size: 1
}'
is:
{
"took" : 55,
"timed_out" : false,
"_shards" : {
"total" : 90,
"successful" : 90,
"failed" : 0
},
"hits" : {
"total" : 0,
"max_score" : null,
"hits" : [ ]
}
The following mapping is used:
curl -XPUT http://localhost:9200/_template/logstash_per_index -d '
{
"template": "logstash*",
"settings": {
"index.query.default_field": "message",
"index.cache.field.type": "soft",
"index.store.compress.stored": true
},
"mappings": {
"_default_": {
"_all": { "enabled": false },
"properties": {
"message": { "type": "string", "index": "analyzed" },
"#version": { "type": "string", "index": "not_analyzed" },
"#timestamp": { "type": "date", "index": "not_analyzed" },
"type": { "type": "string", "index": "not_analyzed" },
....
"region": { "type": "string", "index": "not_analyzed" },
...
}
}
}
}'
Mapping (what ES has returned - curl -XGET 'http://localhost:9200/logstash-2013.09.28/_mapping):
{
"logstash-2013.09.28":{
"logs":{
"_all":{
"enabled":false
},
"properties":{
"#timestamp":{
"type":"date",
"format":"dateOptionalTime"
},
"#version":{
"type":"string",
"index":"not_analyzed",
"omit_norms":true,
"index_options":"docs"
},
"message":{
"type":"string"
},
"region":{
"type":"string"
},
"type":{
"type":"string",
"index":"not_analyzed",
"omit_norms":true,
"index_options":"docs"
}
}
},
"_default_":{
"_all":{
"enabled":false
},
"properties":{
"#timestamp":{
"type":"date",
"format":"dateOptionalTime"
},
"#version":{
"type":"string",
"index":"not_analyzed",
"omit_norms":true,
"index_options":"docs"
},
"message":{
"type":"string"
},
"type":{
"type":"string",
"index":"not_analyzed",
"omit_norms":true,
"index_options":"docs"
}
}
}
}
}

mapping in create index in elasticsearch through mongodb river is not taking effect

I am trying to index mongodb in elasticsearch using mongodb-river using the following command but the document mapping is not taking effect. It is still using the default analyzer(standard) for field text
Mongodb-river
The document specifies the creation of index but there is no documentation on how to provide custom mapping. This is what I tried. Is there any other documentation where I can find how to specify custom analyzers etc in using mongodb-river.
curl -XPUT "localhost:9200/_river/autocompleteindex/_meta" -d '
{
"type": "mongodb",
"mongodb": {
"host": "rahulg-dc",
"port": "27017",
"db": "qna",
"collection": "autocomplete_questions"
},
"index": {
"name": "autocompleteindex",
"type": "autocomplete_questions",
"analysis" : {
"analyzer" : {
"str_search_analyzer" : {
"tokenizer" : "keyword",
"filter" : ["lowercase"]
},
"str_index_analyzer" : {
"tokenizer" : "keyword",
"filter" : ["lowercase", "ngram"]
}
},
"filter" : {
"ngram" : {
"type" : "ngram",
"min_gram" : 2,
"max_gram" : 20
}
}
}
},
"autocompleteindex": {
"_boost" : {
"name" : "po",
"null_value" : 1.0
},
"properties": {
"po": {
"type": "double"
},
"text": {
"type": "string",
"boost": 3.0,
"search_analyzer" : "str_search_analyzer",
"index_analyzer" : "str_index_analyzer"
}
}
}
}'
The query returns proper results is I search by full words but does not match any substring match. Also, the boost factor is not showing its effect.
What am I doing wrong ??
You have to create first your index with your index settings (analyzer):
"analysis" : {
"analyzer" : {
"str_search_analyzer" : {
"tokenizer" : "keyword",
"filter" : ["lowercase"]
},
"str_index_analyzer" : {
"tokenizer" : "keyword",
"filter" : ["lowercase", "ngram"]
}
},
"filter" : {
"ngram" : {
"type" : "ngram",
"min_gram" : 2,
"max_gram" : 20
}
}
}
Then you can define a mapping for your type:
"autocomplete_questions": {
"_boost" : {
"name" : "po",
"null_value" : 1.0
},
"properties": {
"po": {
"type": "double"
},
"text": {
"type": "string",
"boost": 3.0,
"search_analyzer" : "str_search_analyzer",
"index_analyzer" : "str_index_analyzer"
}
}
}
And only then, you can create the river:
curl -XPUT "localhost:9200/_river/autocompleteindex/_meta" -d '
{
"type": "mongodb",
"mongodb": {
"host": "rahulg-dc",
"port": "27017",
"db": "qna",
"collection": "autocomplete_questions"
},
"index": {
"name": "autocompleteindex",
"type": "autocomplete_questions"} }
Does it help?