Overriding mybatis typehandler - mybatis

Im using latest spring boot with spring mybatis which uses mybatis 3.5.9
I have a pojo with a LocalDateTime and the db has a timestamp.. So I would expect mybatis to convert it.. But since, 3.4.1 there was a change to the mybatis LocalDateTimeTypeHandler that causes a unable to conver to TIMESTAMPZ error..
So I grabbed the previous mybatis LocalDateTimeTypeHandler that works with LocalDateTime and added it to my project.. But, not matter what I do I cannot get mybatis to use the type handler..
I currently have for the type handler.
package my.mapper.typehandlers;
import java.sql.CallableStatement;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.SQLException;
import java.sql.Timestamp;
import java.time.LocalDateTime;
import org.apache.ibatis.type.BaseTypeHandler;
import org.apache.ibatis.type.JdbcType;
import org.apache.ibatis.type.MappedJdbcTypes;
import org.apache.ibatis.type.MappedTypes;
#MappedJdbcTypes(value=JdbcType.TIMESTAMP)
#MappedTypes(value = LocalDateTime.class)
public class LocalDateTimeTypeHandler extends BaseTypeHandler<LocalDateTime>{
#Override
public void setNonNullParameter(PreparedStatement ps, int i, LocalDateTime parameter, JdbcType jdbcType)
throws SQLException {
ps.setTimestamp(i, Timestamp.valueOf(parameter));
}
#Override
public LocalDateTime getNullableResult(ResultSet rs, String columnName) throws SQLException {
Timestamp timestamp = rs.getTimestamp(columnName);
return getLocalDateTime(timestamp);
}
#Override
public LocalDateTime getNullableResult(ResultSet rs, int columnIndex) throws SQLException {
Timestamp timestamp = rs.getTimestamp(columnIndex);
return getLocalDateTime(timestamp);
}
#Override
public LocalDateTime getNullableResult(CallableStatement cs, int columnIndex) throws SQLException {
Timestamp timestamp = cs.getTimestamp(columnIndex);
return getLocalDateTime(timestamp);
}
private static LocalDateTime getLocalDateTime(Timestamp timestamp) {
if (timestamp != null) {
return timestamp.toLocalDateTime();
}
return null;
}
}
and the Results
#Results(id = "statusResult", value = {
#Result(property = "id", column = "id", id = true),
#Result(property = "patientId", column = "patient_id"),
#Result(property = "enqueueDone", column = "enqueue_done"),
#Result(property = "enqueuedTime", column = "enqueued_time")
})
I've tried explicitly setting the typeHandler in the #Result for enqueued_time but the framework still ignores it.
Update: I added
mybatis.type-handlers-package=my.mapper.typehandlers to application.properties. I can hit my break point and it looks all good.. but then mybatis looks like it just call the other LocalDateTimeTypeHandler anyway and I get the TIMEZONEZ error
How can i get mybatis to ignore the mybatis LocalDateTimeTypeHandler?
Thanks

ok, so it's been a long hot day.. So I got this to work..
All I needed to do was set the typeHandler=my.mapper.typehandlers.LocalDateTimeTypeHandler.class in the #Result
#Results(id = "statusResult", value = {
#Result(property = "id", column = "id", id = true),
#Result(property = "patientId", column = "patient_id"),
#Result(property = "enqueueDone", column = "enqueue_done"),
#Result(property = "enqueuedTime", column = "enqueued_time" ,typeHandler=my.mapper.typehandlers.LocalDateTimeTypeHandler.class)
})
and include mybatis.type-handlers-package=my.mapper.typehandlers in the properties file..
I removed
#MappedJdbcTypes(value=JdbcType.TIMESTAMP)
#MappedTypes(value = LocalDateTime.class)
from my LocalDateTimeTypeHandler class since they were not needed..
All works now.

Related

Java-Spark-Mongo: filter(dataset.col(newTime).$greater(oldTime)) not running on full data set

I have written a Java-Spark code with Mongo connector. It is supposed to fetch all those rows from MongoDB where column createdDate is greater than previous run's createdDate (like a max of high-water-mark value for each run which I am storing in Oracle. Initially the high-water-mark value in Oracle is 1900-01-01 00:00:00.000).
This column createdDate is ISODate type in mongoDB.
In my MongoDB data, the max value stored for this column createdDate is 2018-04-11 01:43:20.165.
But the filter in the code is not working as desired, i.e. in first run it is fetching sometimes till 2018-03-30 21:48:59.519, then on second or third run its fetching until the max value (2018-04-11 01:43:20.165).
Ideally it should happen in the first run itself when the initial high-water-mark value is 1900-01.....
Here is the code:
package mongo;
import java.net.URI;
import java.sql.Connection;
import java.sql.DriverManager;
import java.sql.PreparedStatement;
import java.sql.ResultSet;
import java.sql.Statement;
import java.text.ParsePosition;
import java.text.SimpleDateFormat;
import java.sql.Date;
import java.util.Iterator;
import java.util.List;
import org.apache.spark.api.java.JavaRDD;
import org.apache.spark.api.java.JavaSparkContext;
import org.apache.spark.sql.SparkSession;
import org.apache.hadoop.fs.FileSystem;
import org.apache.hadoop.fs.Path;
import org.bson.Document;
import static org.apache.spark.sql.functions.*;
import org.apache.spark.sql.DataFrameWriter;
import org.apache.spark.sql.Dataset;
import org.apache.spark.sql.Encoders;
import org.apache.spark.sql.Row;
import com.mongodb.spark.MongoSpark;
import com.mongodb.spark.rdd.api.java.JavaMongoRDD;
import java.sql.Timestamp;
public final class MongoRead
{
private static Connection con=null;
private static String readHighWaterMark(String table, String oraConn, String oraUser, String oraPswd) throws Exception
{
String highWaterMarkValue = "";
try
{
con=DriverManager.getConnection(oraConn,oraUser,oraPswd);
Statement stmt=con.createStatement();
ResultSet rs=stmt.executeQuery("select * from difa.HIGH_WATER_MARK_TABLE where table_nm='"+table+"'");
while(rs.next()){
highWaterMarkValue = rs.getString(3);
}
}
catch(Exception e){
e.printStackTrace();
con.close();
}
return highWaterMarkValue;
}
private static void setHighWaterMark(String key, String value) throws Exception
{
PreparedStatement pStmt=con.prepareStatement("UPDATE high_water_mark_table SET high_water_mark_VALUE='"+value+"' where table_nm='"+key+"'");
int i=pStmt.executeUpdate();
System.out.println(i+" records updated");
}
public static void main(final String[] args) throws Exception {
if(args.length<8){
System.out.println("Please provide correct inputs");
System.exit(1);
}
String mongoAddress = args[0];
String clusterAddress = args[1];
String oraConn = args[2];
String oraUser = args[3];
String oraPswd = args[4];
String tableNm = args[5];
String highWaterCol = args[6];
String loadType = args[7];
SparkSession spark = SparkSession.builder()
.master("local")
.appName("MongoSparkRecordReader")
.config("spark.mongodb.input.uri", mongoAddress)
.config("spark.mongodb.output.uri", mongoAddress)
.getOrCreate();
JavaSparkContext jsc = new JavaSparkContext(spark.sparkContext());
try{
FileSystem fs = FileSystem.get(new URI(clusterAddress),jsc.hadoopConfiguration());
fs.delete(new Path(clusterAddress),true);
}
catch(Exception e){
e.printStackTrace();
}
/* ********Read data from MongoDB******* */
Dataset<Row> dataset = MongoSpark.load(jsc).toDF();
if(loadType.equalsIgnoreCase("I")){
String highWaterMark = readHighWaterMark(tableNm,oraConn,oraUser,oraPswd);
System.out.println("============HIGH_WATER_MARK_VALUE: "+highWaterMark);
Timestamp oldTime = Timestamp.valueOf(highWaterMark.replace("T"," ").replace("Z", ""));
//Fetches records that where createdDate is greater than previous high Water Mark.
Dataset<Row> filtered = dataset.filter(dataset.col(highWaterCol).$greater(oldTime)).persist();
filtered.toJSON().write().text(clusterAddress);
//Calculating the MAX(createdDate) in the fetched dataset.
Dataset<Row> maxHighWaterRow = filtered.agg(max(filtered.col(highWaterCol)).alias("newHighWater")).persist();
List<Timestamp> newHighWaterValue = maxHighWaterRow.select("newHighWater").as(Encoders.TIMESTAMP()).collectAsList();
Timestamp maxHighWaterMarkValue = newHighWaterValue.iterator().next();
SimpleDateFormat dtFormat = new SimpleDateFormat("yyyy-MM-dd HH:mm:ss.SSS");
Timestamp oldDate = Timestamp.valueOf(highWaterMark.replace('T', ' ').replace("Z",""));
//Setting HIGH_WATER_MARK_VALUE if a greater value is detected.
if(maxHighWaterMarkValue !=null && maxHighWaterMarkValue.after(oldDate)){
setHighWaterMark(tableNm,dtFormat.format(maxHighWaterMarkValue).replace(" ", "T").concat("Z"));
}
}
else{
dataset.toJSON().write().text(clusterAddress);
}
con.close();
jsc.close();
}
}
Any idea why the filter and $greater is not fetching the records correctly ?
I fixed this by adding .persist() for the Dataset:
/* ********Read data from MongoDB******* */
Dataset<Row> dataset = MongoSpark.load(jsc).toDF().persist();
....
..
...
Dataset<Row> filtered = dataset.filter(dataset.col(highWaterCol).$greater(old)).persist();
I don't know why without persist() the filter was not running on the whole dataset.

net.sqlcipher.database.SQLiteException: file is not a database: , while compiling: select count(*) from sqlite_master

Error this line :
mDataBase = SQLiteDatabase.openDatabase(dbPath, "123", null, SQLiteDatabase.NO_LOCALIZED_COLLATORS);
When open the database . but whats Wrong? how to open database with password? Can any one help me?
I set the password on SQLITE Db Browser > File> Set encryption
open this password in android part
When Open then show error
error : net.sqlcipher.database.SQLiteException: file is not a database: , while compiling: select count(*) from sqlite_master
Can any one help me to solve it? thanks in advance
import android.content.Context;
import android.database.SQLException;
//import android.database.sqlite.SQLiteOpenHelper;
import android.util.Log;
import net.sqlcipher.database.SQLiteDatabase;
import net.sqlcipher.database.SQLiteDatabase;
import net.sqlcipher.database.SQLiteOpenHelper;
import net.sqlcipher.database.SQLiteDatabase.CursorFactory;
import java.io.File;
import java.io.FileOutputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.File;
import net.sqlcipher.database.SQLiteDatabase;
import android.app.Activity;
import android.os.Bundle;
public class DatabaseHelper extends SQLiteOpenHelper {
private static String TAG = DatabaseHelper.class.getName();
private static String DB_PATH = "";
private static String DB_NAME = "ec.db";// Database name
private SQLiteDatabase mDataBase;
private final Context mContext;
File databaseFile=null;
public DatabaseHelper(Context context) {
super(context, DB_NAME, null, 1);
DB_PATH = context.getApplicationInfo().dataDir + "/databases/";
this.mContext = context;
SQLiteDatabase.loadLibs(context);
File databaseFile = context.getDatabasePath(DB_NAME);
databaseFile.mkdirs();
}
public void createDataBase() throws IOException {
// If database not exists copy it from the assets
boolean mDataBaseExist = checkDataBase();
if (!mDataBaseExist) {
this.getWritableDatabase("123");
this.close();
try {
// Copy the database from assests
copyDataBase();
Log.e(TAG, "createDatabase database created");
} catch (IOException mIOException) {
throw new Error(mIOException.toString() + " : " + DB_PATH
+ DB_NAME);// "ErrorCopyingDataBase"
}
}
}
private boolean checkDataBase() {
File dbFile = new File(DB_PATH + DB_NAME);
return dbFile.exists();
}
// Copy the database from assets
private void copyDataBase() throws IOException {
InputStream mInput = mContext.getAssets().open(DB_NAME);
String outFileName = DB_PATH + DB_NAME;
OutputStream mOutput = new FileOutputStream(outFileName);
byte[] mBuffer = new byte[4096];
int mLength;
while ((mLength = mInput.read(mBuffer)) > 0) {
mOutput.write(mBuffer, 0, mLength);
}
mOutput.flush();
mOutput.close();
mInput.close();
}
// Open the database, so we can query it
public boolean openDataBase() throws SQLException {
String mPath = DB_PATH + DB_NAME;
//File dbFile = new File(DB_PATH + DB_NAME);
//File databaseFile = mContext.getDatabasePath(DB_NAME);
//databaseFile.mkdirs();
//databaseFile.delete();
SQLiteDatabase.loadLibs(mContext);
String dbPath = mContext.getDatabasePath("ec.db").getPath();
//databaseFile.delete();
SQLiteDatabase.loadLibs(mContext);
//mDataBase = SQLiteDatabase.openOrCreateDatabase(databaseFile, "123", null);
//mDataBase = SQLiteDatabase.openDatabase(mPath, "123",null, SQLiteDatabase.NO_LOCALIZED_COLLATORS);
mDataBase = SQLiteDatabase.openDatabase(dbPath, "123", null, SQLiteDatabase.NO_LOCALIZED_COLLATORS);
return mDataBase != null;
}
#Override
public synchronized void close() {
if (mDataBase != null)
mDataBase.close();
super.close();
}
#Override
public void onCreate(SQLiteDatabase db) {
}
#Override
public void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) {
}
}
i created a database with sqlcipher V3.5.7 and then changed the sqlcipher version to V4.1.3 and had this problem
in build.gradle i changed
implementation "net.zetetic:android-database-sqlcipher:4.1.3#aar"
to
implementation 'net.zetetic:android-database-sqlcipher:3.5.7#aar'
and the problem solved
You are referencing the password string value of 123456Sa, however your call within createDataBase uses the value of 123 as a password to getWritableDatabase.
I was getting this exception only in obfuscation case.
I added default constructors for both ByteArraySerializer and ByteArrayDeserializer classes.
Note: don't add above classes as inner classes. Declare these classes independently add keep these classes in proguard-rules.

Intersystems Cache using XEP

I am trying to extract data from the Samples namespace that comes with Intersystems Cache install. Specifically, I am trying to retrieve Sample.Company global data using XEP. Inorder to achieve this, I created Sample.Company class like this -
package Sample;
public class Company {
public Long id;
public String mission;
public String name;
public Long revenue;
public String taxId;
public Company(Long id, String mission, String name, Long revenue,
String taxId) {
this.id = id;
this.mission = mission;
this.name = name;
this.revenue = revenue;
this.taxId = taxId;
}
public Company() {
}
}
XEP related code looks like this -
import java.util.ArrayList;
import java.util.List;
import org.springframework.stereotype.Service;
import Sample.Company;
import com.intersys.xep.Event;
import com.intersys.xep.EventPersister;
import com.intersys.xep.EventQuery;
import com.intersys.xep.EventQueryIterator;
import com.intersys.xep.PersisterFactory;
import com.intersys.xep.XEPException;
#Service
public class CompanyService {
public List<Company> fetch() {
EventPersister myPersister = PersisterFactory.createPersister();
myPersister.connect("SAMPLES", "user", "pwd");
try { // delete any existing SingleStringSample events, then import
// new ones
Event.isEvent("Sample.Company");
myPersister.deleteExtent("Sample.Company");
String[] generatedClasses = myPersister.importSchema("Sample.Company");
for (int i = 0; i < generatedClasses.length; i++) {
System.out.println("Event class " + generatedClasses[i]
+ " successfully imported.");
}
} catch (XEPException e) {
System.out.println("import failed:\n" + e);
throw new RuntimeException(e);
}
EventQuery<Company> myQuery = null;
List<Company> list = new ArrayList<Company>();
try {
Event newEvent = myPersister.getEvent("Sample.Company");
String sql = "Select * from Sample.Company";
myQuery = newEvent.createQuery(sql);
newEvent.close();
myQuery.execute();
EventQueryIterator<Company> iterator = myQuery.getIterator();
while (iterator.hasNext()) {
Company c = iterator.next();
System.out.println(c);
list.add(c);
}
myQuery.close();
myPersister.close();
return list;
} catch (XEPException e) {
System.out.println("createQuery failed:\n" + e);
throw new RuntimeException(e);
}
}
}
When I try executing the fetch() method of the above class, I am seeing the following exception -
com.intersys.xep.XEPException: Cannot import - extent for Sample.Company not empty.
at com.intersys.xep.internal.Generator.generate(Generator.java:52)
at com.intersys.xep.EventPersister.importSchema(EventPersister.java:954)
at com.intersys.xep.EventPersister.importSchema(EventPersister.java:363)
I got the simple string example working. Does it mean, we can not read the existing data using XEP? If we can read, Could some please help me in resolving the above issue? Thanks in advance.
You are trying to create a new class named Sample.Company in your instance:
String[] generatedClasses = myPersister.importSchema("Sample.Company");
But you still have data and an existing class there.

How can I avoid converting an empty HashMap to null in morphia?

We are using org.mongodb.morphia to convert objects BasicDBObjects before persistence. One issue encountered is that in some cases the object to convert contains an empty HashMap whose size is 0, after conversion, the HashMap is converted to null. So NullPointerException throw in later accessing. I want to ask experts for help, Is there any way to avoid this? I mean, after conversion, it's still an HashMap with size 0.
Part of the class to be converted:
public class ProjectServiceAdapterConfig {
#NotNull
private String serviceAdapterId;
#NotNull
private String projectId;
#Embedded
#Flatten
private Map<String, Map<String, String>> connections = new HashMap<>();
//...... setter and getter skipped here
}
code for conversion:
// create a mapper with default MapperOptions
private Mapper createMapper() {
return new Mapper();
}
ReplaceableItem objectToItem(final ProjectServiceAdapterConfig obj) {
final Mapper mapper = createMapper();
final MappedClass mc = mapper.getMappedClass(obj.getClass());
final Map<String, Object> map = mapper.toDBObject(obj).toMap();
}
the obj is created in other place. After some debug, I found that, the obj contains an empty Map(following data copied from IntelliJ IDEA debugger):
connections = {java.util.LinkedHashMap#8890} size = 1
[0] = {java.util.LinkedHashMap$Entry#8894}"accounts" -> size = 0
key: java.lang.String = {java.lang.String#8895}"accounts"
value: java.util.LinkedHashMap = {java.util.LinkedHashMap#8896} size = 0
and the one after converted:
[2] = {java.util.LinkedHashMap$Entry#8910}"connections" -> size = 1
key: java.lang.String = {java.lang.String#8911}"connections"
value: com.mongodb.BasicDBObject = {com.mongodb.BasicDBObject#8912} size = 1
[0] = {java.util.LinkedHashMap$Entry#8923}"accounts" -> null
key: java.lang.String = {java.lang.String#8895}"accounts"
value: = null
As you can see , it's converted to null which we try to avoid.
Thanks
Before you call morphia.mapPackage(), do this:
morphia.getMapper().getOptions().storeEmpties = true;
That should map back probably to an empty map for you.
I assume I cannot avoid it without customizing the MapOfValuesConverter. See from the source code that the empty map will be always converted to null:
#Override
public Object encode(Object value, MappedField mf) {
if (value == null)
return null
Map<Object, Object> map = (Map<Object, Object>) value;
if ((map != null) && (map.size() > 0)) {
Map mapForDb = new HashMap();
for (Map.Entry<Object, Object> entry : map.entrySet()) {
String strKey = converters.encode(entry.getKey()).toString();
mapForDb.put(strKey, converters.encode(entry.getValue()));
}
return mapForDb;
}
return null;
}
In case morphia.getMapper().getOptions().setStoreEmpties(true); doesn't work for you another solution would be to use the #PostLoad annotation to check whether you have a null collection and create an empty one if necessary.
import java.util.*;
import org.mongodb.morphia.annotations.*;
import org.bson.types.ObjectId;
#Entity
public class Model {
#Id
private ObjectId id;
private Map<String, String> map;
protected Model() {}
public Model(HashMap<String, String> map) {
super();
setMap(map);
}
public void setMap(HashMap<String, String> map) {
this.map = map;
checkForNullMap();
}
#PostLoad
private void checkForNullMap() {
if (map == null) {
map = new HashMap<String, String>();
}
}
}

Mongo DB grouping datatype changes

I came across an odd occurrence while using mongodb + their java driver.
When I do a grouping query the datatype for the key changes from an int to a double.
(ie. I am grouping on a key for 'hours', which is stored as an int within all the objects, but the key changes into a double type in the results I get back).
It isn't a huge issue...but it is weird that it would just arbitrarily change the datatype of a key-value pair like that. Has anyone else had this come up? is this normal behaviour?
Thanks,
p.s. Doing a regular .find() query returns correct datatype, fyi.
Edit:
Some example code:
import com.mongodb.BasicDBObject;
import com.mongodb.DBCollection;
import com.mongodb.DBCursor;
import com.mongodb.DBObject;
import com.mongodb.QueryOperators;
public class MongoTestQueries {
private static final String TESTDBNAME = "badgerbadgerbadger";
private static final String TESTCOLNAME = "mushroom";
private static final Long TESTMAX = 50L;
private static final String KEY1 = "a";
private static final String KEY2 = "snake";
private static final String KEY3 = "plane";
/**
* This starts running it.
*
* #param args
* the arguments.
*/
public static void main(final String[] args) {
//You'll need to write your own code here for connecting to db as you see fit.
MongoConnection mc = new MongoConnection("someserver.com", TESTDBNAME);
mc.setCurCol(TESTCOLNAME);
mc.getCurCol().drop();
mc.setCurCol(TESTCOLNAME);
DBCollection col = mc.getCurCol();
populateCollection(col);
System.out.println(col.count() + " inserted into db.");
regGroupSearch(col);
}
private static void populateCollection(DBCollection col) {
for (Long l = 0L; l < TESTMAX; l++) {
col.insert(new BasicDBObject(KEY1, new Integer(l.intValue())).append(KEY2,
Math.random()).append(KEY3, (TESTMAX - l) + "a string"));
}
}
private static void regGroupSearch(final DBCollection col) {
System.out.println("Group Search:");
DBObject key = new BasicDBObject(KEY1, true).append(KEY3, true);
DBObject cond = new BasicDBObject().append(KEY1, new BasicDBObject(QueryOperators.GT, 4.0));
DBObject initial = new BasicDBObject("count", 0).append("sum", 0);
String reduce = "function(obj,prev){prev.sum+=obj." + KEY2 + ",prev.count+=1}";
String finalize = "function(obj){obj.ave = obj.sum/obj.count}";
DBObject groupResult = col.group(key, cond, initial, reduce, finalize);
printDBObject(groupResult);
System.out.println("Done.");
}
private static void printDBObject(final DBObject toPrint) {
for (String k : toPrint.keySet()) {
System.out.println(k + ": " + toPrint.get(k));
}
}
}