Spring cron expression - how to get last execution time - scheduled-tasks

In Spring I can get the nextExecution time using:
final CronSequenceGenerator generator = new CronSequenceGenerator(cronExpression);
final Date nextExecutionDate = generator.next(new Date());
But how to get the last execution time from the cron expression ?

It uses binary search to find the last execution time.
import static org.junit.jupiter.api.Assertions.assertEquals;
import static org.junit.jupiter.api.Assertions.assertNotNull;
import java.time.Duration;
import java.time.ZonedDateTime;
import java.util.Objects;
import java.util.concurrent.TimeUnit;
import org.junit.jupiter.api.Test;
import org.springframework.scheduling.support.CronExpression;
public class CronTest {
#Test
void cronTest() {
CronExpression expression = CronExpression.parse("1 0/10 * * * ?");
ZonedDateTime nextTime = expression.next(ZonedDateTime.now());
assertNotNull(nextTime);
ZonedDateTime prevTime = getPrevTime(expression, nextTime, nextTime.minusDays(1));
assertEquals(nextTime.minusMinutes(10), prevTime);
}
private ZonedDateTime getPrevTime(CronExpression expression, ZonedDateTime nextTime, ZonedDateTime mayPrevTime) {
ZonedDateTime start = mayPrevTime;
ZonedDateTime end = nextTime;
while (start.isBefore(end)) {
ZonedDateTime middle = end.minusSeconds(
TimeUnit.MILLISECONDS.toSeconds(Duration.between(start, end).toMillis() / 2));
ZonedDateTime tmpNextTime = expression.next(middle);
if (Objects.equals(tmpNextTime, nextTime)) {
end = middle.minusSeconds(1);
} else {
start = middle.plusSeconds(1);
}
}
return start;
}
}

Related

Java-Spark-Mongo: filter(dataset.col(newTime).$greater(oldTime)) not running on full data set

I have written a Java-Spark code with Mongo connector. It is supposed to fetch all those rows from MongoDB where column createdDate is greater than previous run's createdDate (like a max of high-water-mark value for each run which I am storing in Oracle. Initially the high-water-mark value in Oracle is 1900-01-01 00:00:00.000).
This column createdDate is ISODate type in mongoDB.
In my MongoDB data, the max value stored for this column createdDate is 2018-04-11 01:43:20.165.
But the filter in the code is not working as desired, i.e. in first run it is fetching sometimes till 2018-03-30 21:48:59.519, then on second or third run its fetching until the max value (2018-04-11 01:43:20.165).
Ideally it should happen in the first run itself when the initial high-water-mark value is 1900-01.....
Here is the code:
package mongo;
import java.net.URI;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.Statement;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.sql.Date;
import java.util.Iterator;
import java.util.List;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.bson.Document;
import static org.apache.spark.sql.functions.*;
import org.apache.spark.sql.DataFrameWriter;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import com.mongodb.spark.MongoSpark;
import com.mongodb.spark.rdd.api.java.JavaMongoRDD;
import java.sql.Timestamp;
public final class MongoRead
{
private static Connection con=null;
private static String readHighWaterMark(String table, String oraConn, String oraUser, String oraPswd) throws Exception
{
String highWaterMarkValue = "";
try
{
con=DriverManager.getConnection(oraConn,oraUser,oraPswd);
Statement stmt=con.createStatement();
ResultSet rs=stmt.executeQuery("select * from difa.HIGH_WATER_MARK_TABLE where table_nm='"+table+"'");
while(rs.next()){
highWaterMarkValue = rs.getString(3);
}
}
catch(Exception e){
e.printStackTrace();
con.close();
}
return highWaterMarkValue;
}
private static void setHighWaterMark(String key, String value) throws Exception
{
PreparedStatement pStmt=con.prepareStatement("UPDATE high_water_mark_table SET high_water_mark_VALUE='"+value+"' where table_nm='"+key+"'");
int i=pStmt.executeUpdate();
System.out.println(i+" records updated");
}
public static void main(final String[] args) throws Exception {
if(args.length<8){
System.out.println("Please provide correct inputs");
System.exit(1);
}
String mongoAddress = args[0];
String clusterAddress = args[1];
String oraConn = args[2];
String oraUser = args[3];
String oraPswd = args[4];
String tableNm = args[5];
String highWaterCol = args[6];
String loadType = args[7];
SparkSession spark = SparkSession.builder()
.master("local")
.appName("MongoSparkRecordReader")
.config("spark.mongodb.input.uri", mongoAddress)
.config("spark.mongodb.output.uri", mongoAddress)
.getOrCreate();
JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
try{
FileSystem fs = FileSystem.get(new URI(clusterAddress),jsc.hadoopConfiguration());
fs.delete(new Path(clusterAddress),true);
}
catch(Exception e){
e.printStackTrace();
}
/* ********Read data from MongoDB******* */
Dataset<Row> dataset = MongoSpark.load(jsc).toDF();
if(loadType.equalsIgnoreCase("I")){
String highWaterMark = readHighWaterMark(tableNm,oraConn,oraUser,oraPswd);
System.out.println("============HIGH_WATER_MARK_VALUE: "+highWaterMark);
Timestamp oldTime = Timestamp.valueOf(highWaterMark.replace("T"," ").replace("Z", ""));
//Fetches records that where createdDate is greater than previous high Water Mark.
Dataset<Row> filtered = dataset.filter(dataset.col(highWaterCol).$greater(oldTime)).persist();
filtered.toJSON().write().text(clusterAddress);
//Calculating the MAX(createdDate) in the fetched dataset.
Dataset<Row> maxHighWaterRow = filtered.agg(max(filtered.col(highWaterCol)).alias("newHighWater")).persist();
List<Timestamp> newHighWaterValue = maxHighWaterRow.select("newHighWater").as(Encoders.TIMESTAMP()).collectAsList();
Timestamp maxHighWaterMarkValue = newHighWaterValue.iterator().next();
SimpleDateFormat dtFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
Timestamp oldDate = Timestamp.valueOf(highWaterMark.replace('T', ' ').replace("Z",""));
//Setting HIGH_WATER_MARK_VALUE if a greater value is detected.
if(maxHighWaterMarkValue !=null && maxHighWaterMarkValue.after(oldDate)){
setHighWaterMark(tableNm,dtFormat.format(maxHighWaterMarkValue).replace(" ", "T").concat("Z"));
}
}
else{
dataset.toJSON().write().text(clusterAddress);
}
con.close();
jsc.close();
}
}
Any idea why the filter and $greater is not fetching the records correctly ?
I fixed this by adding .persist() for the Dataset:
/* ********Read data from MongoDB******* */
Dataset<Row> dataset = MongoSpark.load(jsc).toDF().persist();
....
..
...
Dataset<Row> filtered = dataset.filter(dataset.col(highWaterCol).$greater(old)).persist();
I don't know why without persist() the filter was not running on the whole dataset.

How to solve the following error in Eclipse?

I have the following code, this is to create a graph from Wikipedia index. This code is trying to import Wikipedia graph into a graph.db directory.
// Copyright (c) 2012 Mirko Nasato
//
package org.graphipedia.dataimport.neo4j;
import java.util.HashMap;
import java.util.Map;
import org.neo4j.unsafe.batchinsert.BatchInserter;
import org.neo4j.unsafe.batchinsert.BatchInserters;
public class ImportGraph {
private final BatchInserter inserter;
private final Map<String, Long> inMemoryIndex;
public ImportGraph(String dataDir) {
inserter = BatchInserters.inserter(dataDir);
inserter.createDeferredSchemaIndex(WikiLabel.Page).on("title").create();
inMemoryIndex = new HashMap<String, Long>();
}
public static void main(String[] args) throws Exception {
if (args.length < 2) {
System.out.println("USAGE: ImportGraph <input-file> <data-dir>");
System.exit(255);
}
String inputFile = args[0];
String dataDir = args[1];
ImportGraph importer = new ImportGraph(dataDir);
importer.createNodes(inputFile);
importer.createRelationships(inputFile);
importer.finish();
}
public void createNodes(String fileName) throws Exception {
System.out.println("Importing pages...");
NodeCreator nodeCreator = new NodeCreator(inserter, inMemoryIndex);
long startTime = System.currentTimeMillis();
nodeCreator.parse(fileName);
long elapsedSeconds = (System.currentTimeMillis() - startTime) / 1000;
System.out.printf("\n%d pages imported in %d seconds.\n", nodeCreator.getPageCount(), elapsedSeconds);
}
public void createRelationships(String fileName) throws Exception {
System.out.println("Importing links...");
RelationshipCreator relationshipCreator = new RelationshipCreator(inserter, inMemoryIndex);
long startTime = System.currentTimeMillis();
relationshipCreator.parse(fileName);
long elapsedSeconds = (System.currentTimeMillis() - startTime) / 1000;
System.out.printf("\n%d links imported in %d seconds; %d broken links ignored\n",
relationshipCreator.getLinkCount(), elapsedSeconds, relationshipCreator.getBadLinkCount());
}
public void finish() {
inserter.shutdown();
}
}
However, every time I run this code, I am running into the following error.
Exception in thread "main" java.lang.Error: Unresolved compilation problem:
The method inserter(File) in the type BatchInserters is not applicable for the arguments (String)
at org.graphipedia.dataimport.neo4j.ImportGraph.<init>(ImportGraph.java:36)
at org.graphipedia.dataimport.neo4j.ImportGraph.main(ImportGraph.java:48)
Based on this javadoc
https://neo4j.com/docs/java-reference/current/javadocs/org/neo4j/unsafe/batchinsert/BatchInserters.html
BatchInserters needs a File, not "path/to/dir" string. You will need to create a file object and pass it in.
Code:
Add an import at the top.
import java.io.File
Then replace the following line
inserter = BatchInserters.inserter(dataDir);
with this
inserter = BatchInserters.inserter(new File(dataDir));

Need to convert my server timestamp string to another timezone in 'yyyy-MM-dd' format in java

I have an input string like :
billDate="2016-03-16T10:48:59+05:30" (please see the T in between).
Now I want to convert this to another timestamp (America/New_York).
My final result should be like 16 march 2016 or 15th march 2016 depending upon the hour value.
I saw many examples but got no hint how I can convert the above long datetime string to another string for America/New_York.
Can somebody help me on this?
I tried below code but it always gives 16 march for any hour value.
import java.text.ParseException;
import java.text.SimpleDateFormat;
import java.util.Date;
import java.util.TimeZone;
public class Test {
public static void main(String[] args) {
String output = formatDate("2016-03-1611T:27:58+05:30");
System.out.println(output);
}
public static String formatDate(String inputDate) {
try {
SimpleDateFormat sdf = new SimpleDateFormat("yyyy-MM-dd");
sdf.setTimeZone(TimeZone.getTimeZone("America/New_York"));
Date parsedDate = sdf.parse(inputDate);
return sdf.format(parsedDate);
}
catch (ParseException e) {
// handle exception
}
return null;
}
}
After trying I finally got the code to solve the issue:
The below code works fine:
import java.util.Date;
import java.util.TimeZone;
import java.text.SimpleDateFormat;
public class Test {
public static final SimpleDateFormat fDateTime = new SimpleDateFormat(
"yyyy-MM-dd'T'HH:mm:ss");
public static void main(String[] args) {
String output = getFormattedDate("2016-03-1611T23:27:58+05:30");
System.out.println(output);
}
public static String getFormattedDate(String inputDate) {
try {
Date dateAfterParsing = fDateTime.parse(inputDate);
fDateTime.setTimeZone(TimeZone.getTimeZone("timeZone"));
return fDateTime.format(dateAfterParsing);
} catch (Exception e) {
e.printStackTrace();
}
return null;
}
}

Get no of attempts in quartz

Can any one please tell me how can I get current attempt count in quartz.
Example : if Quartz scheduler is started with repeat count of 5. I want to get the current repeat count.
Here is the Example I am trying with
public class SimpleTriggerExample implements Job
{
int count = 0;
JobDetail job = null;
JobDataMap data = null;
public static void main( String[] args ) throws Exception
{
new SimpleTriggerExample().schedule();
}
public void schedule() throws ParseException, SchedulerException{
job = JobBuilder.newJob(SimpleTriggerExample.class)
.withIdentity("dummyJobName", "group1").build();
Trigger trigger = TriggerBuilder
.newTrigger()
.withIdentity("dummyTriggerName", "group1")
.withSchedule(SimpleScheduleBuilder.simpleSchedule()
.withIntervalInSeconds(10).withRepeatCount(3))
.build();
System.out.println("before in main jobdatamap");
Scheduler scheduler = new StdSchedulerFactory().getScheduler();
scheduler.start();
scheduler.scheduleJob(job, trigger);
}
public void execute(JobExecutionContext context)
throws JobExecutionException {
//count
data = context.getJobDetail().getJobDataMap();
System.out.println("after jobdatamap");
int count1 = data.getInt("EXECUTION_COUNT");
System.out.println("count1-->before"+count1);
count1++;
System.out.println("count1-->after"+count1);
job.getJobDataMap().put("EXECUTION_COUNT", count1);
count = count1;
System.out.println("count"+count);
}
}
Use JobDataMap along with #PersistJobDataAfterExecution annotation.
Make sure when you modify data in JobDataMap the key value should be same.
If you do like this you can persist your attempts as per your requirement.
Example Code Snippet:
package com.mss.quartz.demo;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.quartz.InterruptableJob;
import org.quartz.Job;
import org.quartz.JobBuilder;
import org.quartz.JobDataMap;
import org.quartz.JobDetail;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.quartz.JobKey;
import org.quartz.PersistJobDataAfterExecution;
import org.quartz.Scheduler;
import org.quartz.SchedulerContext;
import org.quartz.SchedulerException;
import org.quartz.SimpleScheduleBuilder;
import org.quartz.TriggerBuilder;
import org.quartz.UnableToInterruptJobException;
import org.quartz.impl.StdSchedulerFactory;
#PersistJobDataAfterExecution
public class HelloJob implements InterruptableJob
{
SchedulerContext schedulerContext = null;
testQuartz test = new testQuartz();
boolean result;
private boolean _interrupted = false;
private JobKey _jobKey = null;
Thread t = null;
//public static int count = 0;
public void interrupt() throws UnableToInterruptJobException {
System.out.println("---" + this._jobKey + " -- INTERRUPTING --");
this._interrupted = true;
}
public void execute(JobExecutionContext context)
throws JobExecutionException {
Scheduler scd = context.getScheduler();
JobDataMap dataMap = context.getJobDetail().getJobDataMap();
String jobSays = dataMap.getString("test1");
int myFloatValue = dataMap.getIntValue("id");
System.out.println("In Job Class"+jobSays+ " "+myFloatValue+" Current time in
Job class "+new Date().toString());
JobKey jobKey = context.getJobDetail().getKey();
int attemps = dataMap.getInt("attempts");
attemps++;
dataMap.put("attempts", attemps);
System.out.println("After putting count in job data map:"+dataMap.get("attempts"));
}
}
Try to add the #PersistJobDataAfterExecution annotation to SimpleTriggerExample class:
#PersistJobDataAfterExecution
public class SimpleTriggerExample implements Job
{ ...}

get current date in dd-mm-yyyy format

how to get current date in DD-MM-YYYY format in BlackBerry
i have already tried the following, but it gives me output of 1318502493
long currentTime = System.currentTimeMillis() / 1000;
System.out.println("Current time in :" + currentTime);
private String pattern = "dd-MM-yyyy";
String dateInString =new SimpleDateFormat(pattern).format(new Date());
DateFormat formatter = new SimpleDateFormat("dd-MM-yyyy");
return formatter.format(new Date());
Check if you can use SimpleDateFormat. If you can, create an object of this class, and use it in order to format the return provided by System.currentTimeMillis(). Some code below:
import java.util.*;
import java.text.*;
public class DateTest {
public static String getCurrentTimeStamp() {
SimpleDateFormat formDate = new SimpleDateFormat("dd-MM-yyyy");
// String strDate = formDate.format(System.currentTimeMillis()); // option 1
String strDate = formDate.format(new Date()); // option 2
return strDate;
}
public static void main (String args[]) {
System.out.println(getCurrentTimeStamp());
}
}