Delete data in druid datasource - druid

I have an existing datasource in Druid. I am trying to delete some records by reindexing the data with filter and overwriting the existing data.
If the dataSource within ioConfig is my_datasource and the dataSource within the dataSchema is other_datasource, it works just fine and the other_datasource shows expected result.
But when both the dataSources (ioConfig and dataSchema) are the same, the existing data does not change according to the filters applied.
Here is the configuration sample:
{
"type": "index_parallel",
"spec": {
"dataSchema": {
"dataSource": "my_datasource",
"timestampSpec": {
"column": "RecordDate",
"format": "YYYY-MM-DD"
},
"dimensionsSpec": {
"dimensions":["RecordDate", "Column1", "Column2"]
},
"metricsSpec": [
],
"granularitySpec": {
"type": "uniform",
"queryGranularity": "none",
"segmentGranularity": "day",
"rollup": "false"
},
"transformSpec" : {
"filter" :{"type":"not", "field":{"type":"expression", "expression":"RecordDate >='1997-02-01' && RecordDate<='1997-02-28'"}},
"transforms" : [ ]
}
},
"ioConfig": {
"type": "index_parallel",
"inputSource": {
"type": "druid",
"dataSource": "my_datasource",
"interval": "1970-01-01/2021-12-26"
},
"appendToExisting":"false"
},
"tuningConfig": {
"type": "index_parallel",
"partitionsSpec": {
"type": "dynamic"
},
"maxNumConcurrentSubTasks": 4
}
}
}
What am I missing here? Is there a better way of achieving what I am trying to do?
Appreciate your help. Thank you.

I made it to work by adding dropExisting=true to the ioConfig.
Also, I moved the filter to the dataSource within the ioConfig block.
Here is the complete config.
{
"type": "index_parallel",
"spec": {
"dataSchema": {
"dataSource": "my_datasource",
"timestampSpec": {
"column": "RecordDate",
"format": "YYYY-MM-DD"
},
"dimensionsSpec": {
"dimensions":["RecordDate", "Column1", "Column2"]
},
"metricsSpec": [
],
"granularitySpec": {
"type": "uniform",
"queryGranularity": "none",
"segmentGranularity": "day",
"rollup": "false",
"intervals":["1970-01-01/2021-12-27"]
},
"transformSpec" : {
"transforms" : [ ]
}
},
"ioConfig": {
"type": "index_parallel",
"inputSource": {
"type": "druid",
"dataSource": "my_datasource",
"interval": "1970-01-01/2021-12-26",
"filter" :{"type":"not", "field":{"type":"expression", "expression":"RecordDate >='1997-02-01' && RecordDate<='1997-02-28'"}},
},
"appendToExisting":false,
"dropExisting":true
},
"tuningConfig": {
"type": "index_parallel",
"partitionsSpec": {
"type": "dynamic"
},
"maxNumConcurrentSubTasks": 4
}
}
}

Related

How to group by single field and return more values together

I'm starting to use apache druid but having some difficult to run native queries (and some SQL too).
1- Is it possible to groupBy a single column while also returning more channels?
2- How could I groupBy a single column, while returning different grouped itens on same query/row ?
Query I'm trying to use:
{
"queryType": "groupBy",
"dataSource": "my-data-source",
"granularity": "all",
"intervals": ["2022-06-27T03:00:00.000Z/2022-06-28T03:00:00.000Z"],
"context:": { "timeout: 30000 },
"dimensions": ["userId"],
"filter": {
"type": "and",
"fields": [
{
"type": "or",
"fields": [{...}]
}
]
},
"aggregations": [
{
"type": "count",
"name": "count"
}
]
}
Tried to add a filtered type inside aggregations:[] but 0 changes happened.
"aggregations": [
{
"type: "count",
"name": "count"
},
{
"type": "filtered",
"filter": {
"type": "selector",
"dimension": "block_id",
"value": "block1"
},
"aggregator": {
"type": "count",
"name": "block1",
"fieldName": "block_id"
}
}
]
Grouping Aggregator also didn't work.
"aggregations": [
{
"type": "count",
"name": "count"
},
{
"type": "grouping",
"name": "groupedData",
"groupings": ["block_id"]
}
],
Below is the image illustrating the results I'm trying to achieve.
Not sure yet how to get the results in the format you want, but as a start, something like this might be a step:
{
"queryType": "groupBy",
"dataSource": {
"type": "table",
"name": "dataTest"
},
"intervals": {
"type": "intervals",
"intervals": [
"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z"
]
},
"filter": null,
"granularity": {
"type": "all"
},
"dimensions": [
{
"type": "default",
"dimension": "d2_ts2",
"outputType": "STRING"
},
{
"type": "default",
"dimension": "d3_email",
"outputType": "STRING"
}
],
"aggregations": [
{
"type": "count",
"name": "myCount",
}
],
"descending": false
}
I'm curious, what is the use case?
Using a SQL query you can do it this way:
SELECT UserID,
sum(1) FILTER (WHERE BlockId = 'block1') as Block1,
sum(1) FILTER (WHERE BlockId = 'block2') as Block2,
sum(1) FILTER (WHERE BlockId = 'block3') as Block3
FROM inline_data
GROUP BY 1
The Native Query for this (from the explain) is:
{
"queryType": "topN",
"dataSource": {
"type": "table",
"name": "inline_data"
},
"virtualColumns": [
{
"type": "expression",
"name": "v0",
"expression": "1",
"outputType": "LONG"
}
],
"dimension": {
"type": "default",
"dimension": "UserID",
"outputName": "d0",
"outputType": "STRING"
},
"metric": {
"type": "dimension",
"previousStop": null,
"ordering": {
"type": "lexicographic"
}
},
"threshold": 101,
"intervals": {
"type": "intervals",
"intervals": [
"-146136543-09-08T08:23:32.096Z/146140482-04-24T15:36:27.903Z"
]
},
"filter": null,
"granularity": {
"type": "all"
},
"aggregations": [
{
"type": "filtered",
"aggregator": {
"type": "longSum",
"name": "a0",
"fieldName": "v0",
"expression": null
},
"filter": {
"type": "selector",
"dimension": "BlockId",
"value": "block1",
"extractionFn": null
},
"name": "a0"
},
{
"type": "filtered",
"aggregator": {
"type": "longSum",
"name": "a1",
"fieldName": "v0",
"expression": null
},
"filter": {
"type": "selector",
"dimension": "BlockId",
"value": "block2",
"extractionFn": null
},
"name": "a1"
},
{
"type": "filtered",
"aggregator": {
"type": "longSum",
"name": "a2",
"fieldName": "v0",
"expression": null
},
"filter": {
"type": "selector",
"dimension": "BlockId",
"value": "block3",
"extractionFn": null
},
"name": "a2"
}
],
"postAggregations": [],
"context": {
"populateCache": false,
"sqlOuterLimit": 101,
"sqlQueryId": "bb92e899-c127-49b0-be1b-d4b38909d166",
"useApproximateCountDistinct": false,
"useApproximateTopN": false,
"useCache": false,
"useNativeQueryExplain": true
},
"descending": false
}

Getting error on null and empty string while copying a csv file from blob container to Azure SQL DB

I tried all combination on the datatype of my data but each time my data factory pipeline is giving me this error:
{
"errorCode": "2200",
"message": "ErrorCode=UserErrorColumnNameNotAllowNull,'Type=Microsoft.DataTransfer.Common.Shared.HybridDeliveryException,Message=Empty or Null string found in Column Name 2. Please make sure column name not null and try again.,Source=Microsoft.DataTransfer.Common,'",
"failureType": "UserError",
"target": "xxx",
"details": []
}
My Copy data source code is something like this:{
"name": "xxx",
"description": "uuu",
"type": "Copy",
"dependsOn": [],
"policy": {
"timeout": "7.00:00:00",
"retry": 0,
"retryIntervalInSeconds": 30,
"secureOutput": false,
"secureInput": false
},
"userProperties": [],
"typeProperties": {
"source": {
"type": "DelimitedTextSource",
"storeSettings": {
"type": "AzureBlobStorageReadSettings",
"recursive": true,
"wildcardFileName": "*"
},
"formatSettings": {
"type": "DelimitedTextReadSettings"
}
},
"sink": {
"type": "AzureSqlSink"
},
"enableStaging": false,
"translator": {
"type": "TabularTranslator",
"mappings": [
{
"source": {
"name": "populationId",
"type": "Guid"
},
"sink": {
"name": "PopulationID",
"type": "String"
}
},
{
"source": {
"name": "inputTime",
"type": "DateTime"
},
"sink": {
"name": "inputTime",
"type": "DateTime"
}
},
{
"source": {
"name": "inputCount",
"type": "Decimal"
},
"sink": {
"name": "inputCount",
"type": "Decimal"
}
},
{
"source": {
"name": "inputBiomass",
"type": "Decimal"
},
"sink": {
"name": "inputBiomass",
"type": "Decimal"
}
},
{
"source": {
"name": "inputNumber",
"type": "Decimal"
},
"sink": {
"name": "inputNumber",
"type": "Decimal"
}
},
{
"source": {
"name": "utcOffset",
"type": "String"
},
"sink": {
"name": "utcOffset",
"type": "Int32"
}
},
{
"source": {
"name": "fishGroupName",
"type": "String"
},
"sink": {
"name": "fishgroupname",
"type": "String"
}
},
{
"source": {
"name": "yearClass",
"type": "String"
},
"sink": {
"name": "yearclass",
"type": "String"
}
}
]
}
},
"inputs": [
{
"referenceName": "DelimitedTextFTDimensions",
"type": "DatasetReference"
}
],
"outputs": [
{
"referenceName": "AzureSqlTable1",
"type": "DatasetReference"
}
]
}
Can anyone please help me understand the issue. I see in some blogs they ask me use treatnullasempty but I am not allowed to modify the JSON. is there a way to do that??
I suggest to using Data Flow DerivedColumn, DerivedColumn can help you build expression to replace the null column.
For example:
Derived Column, if Column_2 is null =true, return 'dd' :
iifNull(Column_2,'dd')
Mapping the column
Reference: Data transformation expressions in mapping data flow
Hope this helps.
fixed it.it was a easy fix as one of my column in destination was marked as not null, i changed it as null and it worked.

Configuration for Druid Kafka ingestion

I want to setup kafka druid ingestion but Even after configuring in common.runtime.properties and adding druid - kafka indexing service it s still giving me error . Please help me out with this. My data is in csv format.
{
"type": "kafka",
"spec": {
"dataSchema": {
"dataSource": "london_crime_by_lsoa",
"parser": {
"type": "string",
"parseSpec": {
"format": "csv",
"dimensionsSpec": {
"dimensions": [
"lsoa_code",
"borough",
"major_category",
"minor_category",
{
"name": "value",
"type": "long"
},
{
"name": "year",
"type": "long"
},
{
"name": "month",
"type": "long"
}
]
},
"timestampSpec": {
"column": "year",
"format": "auto"
},
"columns": [
"lsoa_code",
"borough",
"major_category",
"minor_category",
"value",
"year",
"month"
]
}
},
"metricsSpec": [],
"granularitySpec": {
"type": "uniform",
"segmentGranularity": "year",
"queryGranularity": "NONE",
"rollup": false
}
},
"ioConfig": {
"topic": "london_crime_by_lsoa",
"taskDuration": "PT10M",
"useEarliestOffset": "true",
"consumerProperties": {
"bootstrap.servers": "localhost:9092"
}
},
"tuningConfig": {
"type": "kafka",
"maxRowsPerSegment": 500000
}
}
}
After executing this command:
curl -XPOST -H'Content-Type: application/json' -d #quickstart/tutorial/crime_supervisor.json http://localhost:8090/druid/indexer/v1/supervisor
I am getting this error :
{"error":"Instantiation of [simple type, class org.apache.druid.indexing.kafka.supervisor.KafkaSupervisorSpec] value failed: dataSchema"}
I think it's a problem with the way you specified spec in your JSON.
You have to specify dataSchema directly in your JSON instead of as a child attribute of spec.
Here's the format you should follow:
{
"type": "kafka",
"dataSchema": {},
"tuningConfig": {},
"ioConfig": {}
}

Azure Data Factory V2 Dataset Dynamic Folder

In Azure Data Factory (V1) I was able to create a slide and store the output to a specific folder (i.e. {Year}/{Month}/{Day}. See code below.
How do you create the same type of slice in Azure Data Factory V2? I did find that you have to create a paramater. Yes, I was unable to figure out how to pass the parameter.
"folderPath": "#{dataset().path}",
"parameters": {
"path": {
"type": "String"
Here is original ADF V1 code.
{
"name": "EMS_EMSActivations_L1_Snapshot",
"properties": {
"published": false,
"type": "AzureDataLakeStore",
"linkedServiceName": "SalesIntelligence_ADLS_LS",
"typeProperties": {
"fileName": "EMS.FACT_EMSActivations_WTA.tsv",
"folderPath": "/Snapshots/EMS/FACT_EMSActivations_WTA/{Year}/{Month}/{Day}",
"format": {
"type": "TextFormat",
"rowDelimiter": "␀",
"columnDelimiter": "\t",
"nullValue": "#NULL#",
"quoteChar": "\""
},
"partitionedBy": [
{
"name": "Year",
"value": {
"type": "DateTime",
"date": "SliceStart",
"format": "yyyy"
}
},
{
"name": "Month",
"value": {
"type": "DateTime",
"date": "SliceStart",
"format": "MM"
}
},
{
"name": "Day",
"value": {
"type": "DateTime",
"date": "SliceStart",
"format": "dd"
}
},
{
"name": "Hour",
"value": {
"type": "DateTime",
"date": "SliceStart",
"format": "HH"
}
},
{
"name": "Minute",
"value": {
"type": "DateTime",
"date": "SliceStart",
"format": "mm"
}
}
]
},
"availability": {
"frequency": "Day",
"interval": 1
}
}
}
Here is how you create a dynamic folder path when importing data from SQL into ADL. Look at folderPath line.
{
"name": "EBC_BriefingActivitySummary_L1_Snapshot",
"properties": {
"linkedServiceName": {
"referenceName": "SIAzureDataLakeStore",
"type": "LinkedServiceReference"
},
"type": "AzureDataLakeStoreFile",
"typeProperties": {
"format": {
"type": "TextFormat",
"columnDelimiter": ",",
"rowDelimiter": "",
"nullValue": "\\N",
"treatEmptyAsNull": false,
"firstRowAsHeader": false
},
"fileName": {
"value": "EBC.rpt_BriefingActivitySummary.tsv",
"type": "Expression"
},
"folderPath": {
"value": "#concat('/Snapshots/EBC/rpt_BriefingActivitySummary/', formatDateTime(pipeline().parameters.scheduledRunTime, 'yyyy'), '/', formatDateTime(pipeline().parameters.scheduledRunTime, 'MM'), '/', formatDateTime(pipeline().parameters.scheduledRunTime, 'dd'), '/')",
"type": "Expression"
}
}
}
}
Step 1:
Use WindowStartTime / WindowEndTime in folderpath
"folderPath": {
"value": "<<path>>/#{formatDateTime(pipeline().parameters.windowStart,'yyyy')}-#{formatDateTime(pipeline().parameters.windowStart,'MM')}-#{formatDateTime(pipeline().parameters.windowStart,'dd')}/#{formatDateTime(pipeline().parameters.windowStart,'HH')}/",
"type": "Expression"
}
Step2 : Add in Pipeline JSON
"parameters": {
"windowStart": {
"type": "String"
},
"windowEnd": {
"type": "String"
}
}
Step3 : Add Run Parameter in TumblingWindow Trigger
( This is referred in Step 2 )
"parameters": {
"windowStart": {
"type": "Expression",
"value": "#trigger().outputs.windowStartTime"
},
"windowEnd": {
"type": "Expression",
"value": "#trigger().outputs.windowEndTime"
}
}
For more details to understand , Refer
Refer this link.
https://github.com/MicrosoftDocs/azure-docs/blob/master/articles/data-factory/how-to-create-tumbling-window-trigger.md

azure data factory start pipeline different from starting job

I am getting crazy on this issue, I am running an Azure data factory V1, I need to schedule a copy job every week from 01/03/2009 through 01/31/2009, so I defined this schedule on the pipeline:
"start": "2009-01-03T00:00:00Z",
"end": "2009-01-31T00:00:00Z",
"isPaused": false,
monitoring the pipeline, the data factory schedule on these date:
12/29/2008
01/05/2009
01/12/2009
01/19/2009
01/26/2009
instead of this wanted schedule:
01/03/2009
01/10/2009
01/17/2009
01/24/2009
01/31/2009
why the starting date defined on the pipeline doesn't correspond to the schedule date on the monitor?
Many thanks!
Here is the JSON Pipeline:
{
"name": "CopyPipeline-blob2datalake",
"properties": {
"description": "copy from blob storage to datalake directory structure",
"activities": [
{
"type": "DataLakeAnalyticsU-SQL",
"typeProperties": {
"scriptPath": "script/dat230.usql",
"scriptLinkedService": "AzureStorageLinkedService",
"degreeOfParallelism": 5,
"priority": 100,
"parameters": {
"salesfile": "$$Text.Format('/DAT230/{0:yyyy}/{0:MM}/{0:dd}.txt', Date.StartOfDay (SliceStart))",
"lineitemsfile": "$$Text.Format('/dat230/dataloads/{0:yyyy}/{0:MM}/{0:dd}/factinventory/fact.csv', Date.StartOfDay (SliceStart))"
}
},
"inputs": [
{
"name": "InputDataset-dat230"
}
],
"outputs": [
{
"name": "OutputDataset-dat230"
}
],
"policy": {
"timeout": "01:00:00",
"concurrency": 1,
"retry": 1
},
"scheduler": {
"frequency": "Day",
"interval": 7
},
"name": "DataLakeAnalyticsUSqlActivityTemplate",
"linkedServiceName": "AzureDataLakeAnalyticsLinkedService"
}
],
"start": "2009-01-03T00:00:00Z",
"end": "2009-01-11T00:00:00Z",
"isPaused": false,
"hubName": "edxlearningdf_hub",
"pipelineMode": "Scheduled"
}
}
and here the datasets:
{
"name": "InputDataset-dat230",
"properties": {
"structure": [
{
"name": "Date",
"type": "Datetime"
},
{
"name": "StoreID",
"type": "Int64"
},
{
"name": "StoreName",
"type": "String"
},
{
"name": "ProductID",
"type": "Int64"
},
{
"name": "ProductName",
"type": "String"
},
{
"name": "Color",
"type": "String"
},
{
"name": "Size",
"type": "String"
},
{
"name": "Manufacturer",
"type": "String"
},
{
"name": "OnHandQuantity",
"type": "Int64"
},
{
"name": "OnOrderQuantity",
"type": "Int64"
},
{
"name": "SafetyStockQuantity",
"type": "Int64"
},
{
"name": "UnitCost",
"type": "Double"
},
{
"name": "DaysInStock",
"type": "Int64"
},
{
"name": "MinDayInStock",
"type": "Int64"
},
{
"name": "MaxDayInStock",
"type": "Int64"
}
],
"published": false,
"type": "AzureBlob",
"linkedServiceName": "Source-BlobStorage-dat230",
"typeProperties": {
"fileName": "*.txt.gz",
"folderPath": "dat230/{year}/{month}/{day}/",
"format": {
"type": "TextFormat",
"columnDelimiter": "\t",
"firstRowAsHeader": true
},
"partitionedBy": [
{
"name": "year",
"value": {
"type": "DateTime",
"date": "WindowStart",
"format": "yyyy"
}
},
{
"name": "month",
"value": {
"type": "DateTime",
"date": "WindowStart",
"format": "MM"
}
},
{
"name": "day",
"value": {
"type": "DateTime",
"date": "WindowStart",
"format": "dd"
}
}
],
"compression": {
"type": "GZip"
}
},
"availability": {
"frequency": "Day",
"interval": 7
},
"external": true,
"policy": {}
}
}
{
"name": "OutputDataset-dat230",
"properties": {
"structure": [
{
"name": "Date",
"type": "Datetime"
},
{
"name": "StoreID",
"type": "Int64"
},
{
"name": "StoreName",
"type": "String"
},
{
"name": "ProductID",
"type": "Int64"
},
{
"name": "ProductName",
"type": "String"
},
{
"name": "Color",
"type": "String"
},
{
"name": "Size",
"type": "String"
},
{
"name": "Manufacturer",
"type": "String"
},
{
"name": "OnHandQuantity",
"type": "Int64"
},
{
"name": "OnOrderQuantity",
"type": "Int64"
},
{
"name": "SafetyStockQuantity",
"type": "Int64"
},
{
"name": "UnitCost",
"type": "Double"
},
{
"name": "DaysInStock",
"type": "Int64"
},
{
"name": "MinDayInStock",
"type": "Int64"
},
{
"name": "MaxDayInStock",
"type": "Int64"
}
],
"published": false,
"type": "AzureDataLakeStore",
"linkedServiceName": "Destination-DataLakeStore-dat230",
"typeProperties": {
"fileName": "txt.gz",
"folderPath": "dat230/dataloads/{year}/{month}/{day}/factinventory/",
"format": {
"type": "TextFormat",
"columnDelimiter": "\t"
},
"partitionedBy": [
{
"name": "year",
"value": {
"type": "DateTime",
"date": "WindowStart",
"format": "yyyy"
}
},
{
"name": "month",
"value": {
"type": "DateTime",
"date": "WindowStart",
"format": "MM"
}
},
{
"name": "day",
"value": {
"type": "DateTime",
"date": "WindowStart",
"format": "dd"
}
}
]
},
"availability": {
"frequency": "Day",
"interval": 7
},
"external": false,
"policy": {}
}
}
You need to look at the time slices for the datasets and there activity.
The pipeline schedule (badly named) only defines the start and end period in which any activities can use to provision and run there time slices.
ADFv1 doesn't use a recursive schedule like the SQL Server Agent. Each execution has to be provisioned at an interval on the time line (the schedule) you create.
For example, if you pipeline start and end is for 1 year. But your dataset and activity has a frequency of monthly and interval of 1 month you will only get 12 executions of the whatever is happening.
Apologies, but the concept of time slices is a little difficult to explain if you aren't already familiar. Maybe read this post: https://blogs.msdn.microsoft.com/ukdataplatform/2016/05/03/demystifying-activity-scheduling-with-azure-data-factory/
Hope this helps.
Would you share with us the json for the datasets and the pipeline? It would be easier to help you having that.
In the meanwhile, check if you are using "style": "StartOfInterval" at the scheduler property of the activity, and also check if you are using an offset.
Cheers!