Opensearch index mapping field value not searchable - opensearch

I am new at Opensearch and using this code on DevTools to make media_image_thumbnail_url field not searchable but having error like index already exist.
PUT cars
{
"mappings" : {
"properties" : {
"fields" : {
"properties" : {
"media_image_thumbnail_url" : {
"type" : "text",
"fields" : {
"keyword" : {
"type" : "keyword",
"ignore_above" : 256
},
"enable":false
}
}
}
}
}
}
}

This one solved my issue
PUT cars/_mappings
{
"properties" : {
"fields" : {
"properties" : {
"media_image_thumbnail_url" : {
"type" : "text",
"fields": {
"raw": {
"type": "text",
"index": "false"
}
}
}
}
}
}
}

Related

Not able to attach custom authorizer to aws api gateway through openapi specifications

We have an API gateway with many routes managed by terraform API gateway resources. As the number of routes increased, it was difficult to manage them from the resources. Hence, we are trying to move it to the body method with merge so that the new routes can be added by importing openapi spec with amazon extensions. But the custom lambda authorizer is not getting attached to the method request.
OpenAPI Spec:
openapi_config = {
openapi = "3.0.1"
info = {
title = "im-dev-api-gateway"
version = "1.0"
}
paths = {
"/v1/api/go/data" : {
"post" : {
"parameters" : [{
"name" : "proxy",
"in" : "path",
"required" : true,
"schema" : {
"type" : "string"
}
}],
"responses" : {
"200" : {
"description" : "200 response",
"headers" : {
"Access-Control-Allow-Origin" : {
"schema" : {
"type" : "string"
}
}
},
"content" : {
"application/json" : {
"schema" : {
"$ref" : "#/components/schemas/Empty"
}
}
}
}
},
"security" : [{
"im-dev-lambda-authorizer" : []
}],
"x-amazon-apigateway-integration" : {
"httpMethod" : "POST",
"uri" : "https://$${stageVariables.LoadBalancerURL}/v1/api/go/data",
"requestParameters" : {
"integration.request.header.X-Auth-Client-ID" : "context.authorizer.x-auth-client-id",
"integration.request.path.proxy" : "method.request.path.proxy",
"integration.request.header.X-Request-ID" : "context.authorizer.x-request-id"
},
"passthroughBehavior" : "when_no_match",
"timeoutInMillis" : 29000,
"type" : "http_proxy"
}
},
"options" : {
"responses" : {
"200" : {
"description" : "200 response",
"headers" : {
"Access-Control-Allow-Origin" : {
"schema" : {
"type" : "string"
}
},
"Access-Control-Allow-Methods" : {
"schema" : {
"type" : "string"
}
},
"Access-Control-Allow-Headers" : {
"schema" : {
"type" : "string"
}
}
},
"content" : {
"application/json" : {
"schema" : {
"$ref" : "#/components/schemas/Empty"
}
}
}
}
},
"x-amazon-apigateway-integration" : {
"responses" : {
"default" : {
"statusCode" : "200",
"responseParameters" : {
"method.response.header.Access-Control-Allow-Methods" : "'GET,OPTIONS,POST,PUT'",
"method.response.header.Access-Control-Allow-Headers" : "'Content-Type,X-Amz-Date,Authorization,X-Api-Key,X-Amz-Security-Token,X-Auth-Client-ID,X-Request-ID'",
"method.response.header.Access-Control-Allow-Origin" : "'*'"
}
}
},
"requestTemplates" : {
"application/json" : "{\"statusCode\": 200}"
},
"passthroughBehavior" : "never",
"timeoutInMillis" : 29000,
"type" : "mock"
}
}
},
"components" : {
"schemas" : {
"Empty" : {
"title" : "Empty Schema",
"type" : "object"
}
},
"securitySchemes" : {
"im-dev-lambda-authorizer" : {
"type" : "apiKey",
"name" : "Unused",
"in" : "header",
"x-amazon-apigateway-authtype" : "custom",
"x-amazon-apigateway-authorizer" : {
"authorizerUri" : "arn:aws:apigateway:ap-south-1:lambda:path/2015-03-31/functions/arn:aws:lambda:ap-south-1:999999999:function:im-dev-authorizer/invocations",
"authorizerCredentials" : "arn:aws:iam::999999999:role/im-dev-api-gateway-auth-invocation",
"authorizerResultTtlInSeconds" : 0,
"identitySource" : "context.$context.requestId",
"type" : "request"
}
}
}
},
}
}
}

Elasticsearch - Range query doesn't work

To try this error I have tried with Elasticsearch 2.x and 5.x but doesn't work in any of these.
I have lots of logs saved in my Elasticsearch instance. They have a field called timestamp whose format is "YYYY-MM-dd HH-mm-ss.SSS" (for example, "2017-11-02 00:00:00.000"). When I try to send a query via POSTMAN which is this:
{
"query": {
"range": {
"timestamp": {
"gte": "2017-10-21 00:00:00.000",
"lte": "2017-10-27 00:00:00.000"
}
}
}
}
I receive nothing and I there are more than 500 logs in that range. What am I doing wrong?
EDIT:
My index (loganalyzer):
{
"loganalyzer" : {
"aliases" : { },
"mappings" : {
"logs" : {
"properties" : {
"entireLog" : {
"type" : "string"
},
"formattedMessage" : {
"type" : "string"
},
"id" : {
"type" : "string"
},
"level" : {
"type" : "string"
},
"loggerName" : {
"type" : "string"
},
"testNo" : {
"type" : "string"
},
"threadName" : {
"type" : "string"
},
"timestamp" : {
"type" : "string"
}
}
}
},
"settings" : {
"index" : {
"refresh_interval" : "1s",
"number_of_shards" : "5",
"creation_date" : "1507415366223",
"store" : {
"type" : "fs"
},
"number_of_replicas" : "1",
"uuid" : "9w3QQQc0S0K0NcKtOERtTw",
"version" : {
"created" : "2040699"
}
}
},
"warmers" : { }
}
}
What I receive sending the request:
{
"took": 429,
"timed_out": false,
"_shards": {
"total": 5,
"successful": 5,
"failed": 0
},
"hits": {
"total": 0,
"max_score": null,
"hits": []
}
}
And status 200 (OK).
Your edit with the mappings indicates the problem. The reason you aren't getting any result is because it's attempting to find a "range" for the string you're providing against the values of the field in your index, which are also treated as a string.
"timestamp" : {
"type" : "string"
}
Here's the elastic documentation on that mapping type
You need to apply a date mapping to that field before indexing, or reindex to a new index that has that mapping applied prior to ingestion.
Here is what the mapping request could look like, conforming to your timestamp format:
PUT loganalyzer
{
"mappings": {
"logs": {
"properties": {
"timestamp": {
"type": "date",
"format": "YYYY-MM-dd HH-mm-ss.SSS"
}
}
}
}
}

Mongo DB Sorting issue with Ascending order

I have a collection named "formTest123" with following records:
/* 0 */
{
"_id" : ObjectId("5784f5aeef31a98294231459"),
"data" : [
{
"name" : "Amir",
"Other" : [
{
"data" : {
"city" : {
"address" : "pncjj"
}
}
},
{
"data" : {
"state" : {
"address" : "xyz"
}
}
}
]
}
]
}
/* 1 */
{
"_id" : ObjectId("5784f62cef31a9829423145a"),
"data" : [
{
"name" : "Zssmir",
"Other" : [
{
"data" : {
"city" : {
"address" : "bncd"
}
}
},
{
"data" : {
"state" : {
"address" : "gyk"
}
}
}
]
}
]
}
/* 2 */
{
"_id" : ObjectId("5784f636ef31a9829423145b"),
"data" : [
{
"name" : "Cmir",
"Other" : [
{
"data" : {
"city" : {
"address" : "tuhn"
}
}
},
{
"data" : {
"state" : {
"address" : "knm"
}
}
}
]
}
]
}
When I query this collection with:
db.formTest123.find().sort( { "data.Other.data.city.address" : -1})
means in descending order it gives correct output that is :
/* 0 */
{
"_id" : ObjectId("5784f636ef31a9829423145b"),
"data" : [
{
"name" : "Cmir",
"Other" : [
{
"data" : {
"city" : {
"address" : "tuhn"
}
}
},
{
"data" : {
"state" : {
"address" : "knm"
}
}
}
]
}
]
}
/* 1 */
{
"_id" : ObjectId("5784f5aeef31a98294231459"),
"data" : [
{
"name" : "Amir",
"Other" : [
{
"data" : {
"city" : {
"address" : "pncjj"
}
}
},
{
"data" : {
"state" : {
"address" : "xyz"
}
}
}
]
}
]
}
/* 2 */
{
"_id" : ObjectId("5784f62cef31a9829423145a"),
"data" : [
{
"name" : "Zssmir",
"Other" : [
{
"data" : {
"city" : {
"address" : "bncd"
}
}
},
{
"data" : {
"state" : {
"address" : "gyk"
}
}
}
]
}
]
}
But When I query with:
db.formTest123.find().sort( { "data.Other.data.city.address" : 1})
to get in ascending order of "city.address" it gives:
/* 0 */
{
"_id" : ObjectId("5784f5aeef31a98294231459"),
"data" : [
{
"name" : "Amir",
"Other" : [
{
"data" : {
"city" : {
"address" : "pncjj"
}
}
},
{
"data" : {
"state" : {
"address" : "xyz"
}
}
}
]
}
]
}
/* 1 */
{
"_id" : ObjectId("5784f62cef31a9829423145a"),
"data" : [
{
"name" : "Zssmir",
"Other" : [
{
"data" : {
"city" : {
"address" : "bncd"
}
}
},
{
"data" : {
"state" : {
"address" : "gyk"
}
}
}
]
}
]
}
/* 2 */
{
"_id" : ObjectId("5784f636ef31a9829423145b"),
"data" : [
{
"name" : "Cmir",
"Other" : [
{
"data" : {
"city" : {
"address" : "tuhn"
}
}
},
{
"data" : {
"state" : {
"address" : "knm"
}
}
}
]
}
]
}
That is clearly wrong as now records are not sorted in ascending order by "city.address"
Can any one guess what is problem with ascending order?
It is because it is ordering from the "lowest" to the "highest".
Problem is, that undefined and/or null has "lower" value than any existing number.
When you ordering by array, it looks to all documents in that array and takes the minimum value, which is "undefined" in the document, which contains state.
If you for example add "city.address" with high-enough value to all your documents, it will work for ascending. Like this one :
"Other" : [
{
"data" : {
"city" : {
"address" : "tuhn"
}
}
},
{
"data" : {
"state" : {
"address" : "knm"
}
"city" : {
"address" : "zzzzzzzzzzzzzzzzz"
}
}
}
]
Note : For descending, it takes maximum value, therefore it "works", because any city.address override all the null and undefined.
If you need ascending order and dont want to change the data structure, I would suggest to order it descending and then reverse order programically, if it is possible.
Since field "Other" is an array of (sub)documents, we must specify the index of the sub-document.
Based on the schema specified in question, following works.
db.formTest123.find().sort( { "data.Other.0.data.city.address" : 1});
and
db.formTest123.find().sort( { "data.Other.0.data.city.address" : -1});

Elasticsearch index operation fails on complex object

I am indexing a data stream to Elasticsearch and I cannot figure out how to normalize incoming data to make it index without error. I have a mapping type "getdatavalues" which is a meta-data query. This meta-data query can return very different looking responses but I'm not seeing the difference. The error I get:
{"index":{"_index":"ens_event-2016.03.11","_type":"getdatavalues","_id":"865800029798177_2016_03_11_03_18_12_100037","status":400,"error":"MapperParsingException[object mapping for [getdatavalues] tried to parse field [output] as object, but got EOF, has a concrete value been provided to it?]"}}
when performing:
curl -XPUT 'http://192.168.99.100:80/es/ens_event-2016.03.11/getdatavalues/865800029798177_2016_03_11_03_18_12_100037' -d '{
"type": "getDataValues",
"input": {
"deviceID": {
"IMEI": "865800029798177",
"serial-number": "64180258"
},
"handle": 644,
"exprCode": "200000010300140000080001005f00a700000000000000",
"noRollHandle": "478669308-578452",
"transactionID": 290
},
"timestamp": "2016-03-11T03:18:12.000Z",
"handle": 644,
"output": {
"noRollPubSessHandle": "478669308-578740",
"publishSessHandle": 1195,
"status": true,
"matchFilter": {
"prefix": "publicExpr.operatorDefined.commercialIdentifier.FoodSvcs.Restaurant.\"A&C Kabul Curry\".\"Rooster Street\"",
"argValues": {
"event": "InternationalEvent",
"hasEvent": "anyEvent"
}
},
"transactionID": 290,
"validFor": 50
}
}'
Here's what Elasticsearch has for the mapping:
"getdatavalues" : {
"dynamic_templates" : [ {
"strings" : {
"mapping" : {
"index" : "not_analyzed",
"type" : "string"
},
"match_mapping_type" : "string"
}
} ],
"properties" : {
"handle" : {
"type" : "long"
},
"input" : {
"properties" : {
"deviceID" : {
"properties" : {
"IMEI" : {
"type" : "string",
"index" : "not_analyzed"
},
"serial-number" : {
"type" : "string",
"index" : "not_analyzed"
}
}
},
"exprCode" : {
"type" : "string",
"index" : "not_analyzed"
},
"handle" : {
"type" : "long"
},
"noRollHandle" : {
"type" : "string",
"index" : "not_analyzed"
},
"serviceVersion" : {
"type" : "string",
"index" : "not_analyzed"
},
"transactionID" : {
"type" : "long"
}
}
},
"output" : {
"properties" : {
"matchFilter" : {
"properties" : {
"argValues" : {
"properties" : {
"Interests" : {
"type" : "object"
},
"MerchantId" : {
"type" : "string",
"index" : "not_analyzed"
},
"Queue" : {
"type" : "string",
"index" : "not_analyzed"
},
"Vibe" : {
"type" : "string",
"index" : "not_analyzed"
},
"event" : {
"properties" : {
"event" : {
"type" : "string",
"index" : "not_analyzed"
},
"hasEvent" : {
"type" : "string",
"index" : "not_analyzed"
}
}
},
"hasEvent" : {
"type" : "string",
"index" : "not_analyzed"
},
"interests" : {
"type" : "string",
"index" : "not_analyzed"
}
}
},
"prefix" : {
"type" : "string",
"index" : "not_analyzed"
},
"transactionID" : {
"type" : "long"
},
"validFor" : {
"type" : "long"
}
}
},
"noRollPubSessHandle" : {
"type" : "string",
"index" : "not_analyzed"
},
"publishSessHandle" : {
"type" : "long"
},
"status" : {
"type" : "boolean"
},
"transactionID" : {
"type" : "long"
},
"validFor" : {
"type" : "long"
}
}
},
"timestamp" : {
"type" : "date",
"format" : "dateOptionalTime"
},
"type" : {
"type" : "string",
"index" : "not_analyzed"
}
}
},
Looks like the argValues object doesn't quite agree with your mapping:
"argValues": {
"event": "InternationalEvent",
"hasEvent": "anyEvent"
}
Either this:
"argValues": {
"event": {
"event": "InternationalEvent"
},
"hasEvent": "anyEvent"
}
Or this:
"argValues": {
"event": {
"event": "InternationalEvent"
"hasEvent": "anyEvent"
},
}
Would both seem to be valid.

Update a nested array objects in a different collection and position in MongoDB

I have a douments like as follows.
How do I update a skillcluster name. Suppose the other document has name :"c" in 4th position.
{
Job: {
post: { name:"x" }
skill: {
skillcluster: [
{name:"c++",id:"23"},
{name:"c",id:"898"}
]
}
}
}
{
Job: {
post: { name:"x" }
skill: {
skillcluster: [
{name:"c++",id:"23"},
{name:"java"},
{name:"python"},
{name:"c",id:"898"}
]
}
}
}
You need to query to match the "name" field at the embedded level of the document using "dot notation", and then pass that match with the positional $ operator within the update:
db.collection.update(
{ "Job.skill.skillcluster.name": "c" },
{ "$set": { "Job.skill.skillcluster.$.name": "Simple C"}},
{ "multi": true }
)
Also use the "multi" flag to match and update more than one document.
The result will be:
{
"_id" : ObjectId("55dbfd0ed96d655eb0ed2b4f"),
"Job" : {
"post" : {
"name" : "x"
},
"skill" : {
"skillcluster" : [
{
"name" : "c++",
"id" : "23"
},
{
"name" : "Simple C",
"id" : "898"
}
]
}
}
}
{
"_id" : ObjectId("55dbfd0ed96d655eb0ed2b50"),
"Job" : {
"post" : {
"name" : "x"
},
"skill" : {
"skillcluster" : [
{
"name" : "c++",
"id" : "23"
},
{
"name" : "java"
},
{
"name" : "python"
},
{
"name" : "Simple C",
"id" : "898"
}
]
}
}
}