I have a working code that doesn't work always. Here's my approach:
Creating the backup
Create Entity Manager for source database
Create Entity Manager for destination database (embedded Derby Database)
Copy entities (Select all entries of a table (table order hard coded right now) and copy them to the destination database. Basically a select all and the detach entity from source and persist on destination)
Zip the embedded Derby database.
Loading from backup
Unzip backup
Perform a backup
Clean destination database (delete all tables)
Copy entities
At some point I would use JPA 2 Metadata to fetch the tables to be copied and select the order they need to be copied (due to constraints).
For some reason this approach doesn't work always as I see "lost" entries that are not recovered.
Here's the code:
package com.bluecubs.xinco.core.server;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipOutputStream;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.IOFileFilter;
import org.apache.commons.io.filefilter.TrueFileFilter;
/**
* This is a complex task and is heavily dependant on the architecture
* of the database.
*
* Data needs to be stored in a particular order into the database to comply
* with database constraints. This order can be observed in a dump file or
* create script like the ones generated from MySQL Workbench. Using that
* should be enough. In case that tool is not available basically the logic is
* populating tables from the outside inwards. From the tables with no relationships
* or only one working to the more complex ones. As summary before a table is populated all
* the related tables should be populated already (if we have identifying relationships.
*
* #author Javier A. Ortiz Bultrón <javier.ortiz.78#gmail.com>
*/
public class XincoBackupManager {
private static XincoBackupManager instance;
private static EntityManagerFactory liveEMF;
private static EntityManagerFactory backupEMF;
private static EntityManager live, backup;
private static final ArrayList<String> tables = new ArrayList<String>();
private static XincoBackupFile last;
private static String backupPath;
public static HashMap<String, Integer> stats = new HashMap<String, Integer>();
static {
//Non-order-critical tables
tables.add("XincoCoreAceT");
tables.add("XincoCoreDataT");
tables.add("XincoCoreDataTypeAttributeT");
tables.add("XincoCoreGroupT");
tables.add("XincoCoreLanguageT");
tables.add("XincoCoreNodeT");
tables.add("XincoCoreUserHasXincoCoreGroupT");
tables.add("XincoCoreUserT");
tables.add("XincoSettingT");
tables.add("XincoDependencyTypeT");
tables.add("XincoCoreDataHasDependencyT");
tables.add("XincoSetting");
tables.add("XincoId");
//Order critical tables
tables.add("XincoCoreLanguage");
tables.add("XincoCoreNode");
tables.add("XincoCoreDataType");
tables.add("XincoCoreData");
tables.add("XincoDependencyType");
tables.add("XincoCoreDataHasDependency");
tables.add("XincoCoreUser");
tables.add("XincoCoreUserModifiedRecord");
tables.add("XincoCoreGroup");
tables.add("XincoCoreAce");
tables.add("XincoCoreUserHasXincoCoreGroup");
tables.add("XincoAddAttribute");
tables.add("XincoCoreDataTypeAttribute");
tables.add("XincoCoreLog");
}
public static XincoBackupManager get() {
if (instance == null) {
instance = new XincoBackupManager();
}
return instance;
}
private static void setDBSystemDir(String systemDir) {
// Set the db system directory.
System.setProperty("derby.system.home", systemDir);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Derby home set at: {0}", systemDir);
try {
//Start the embeded DB
Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
private static void initConnections() {
try {
liveEMF = XincoDBManager.getEntityManagerFactory();
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
try {
backupEMF = Persistence.createEntityManagerFactory("XincoBackup");
} catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
protected static boolean backup() throws XincoException {
try {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Initializing connections...");
initConnections();
stats.clear();
backupPath = XincoSettingServer.getSetting("setting.backup.path").getString_value();
//We need to make sure that there's no one in the database
XincoDBManager.setLocked(true);
live = liveEMF.createEntityManager();
//Prepare the backup repository. Create dirs if needed.
File backupDir = new File(backupPath);
backupDir.mkdirs();
//Create folder for this backup
SimpleDateFormat format = new SimpleDateFormat("MM-dd-yyyy");
File backupNewDir = new File(backupPath + System.getProperty("file.separator")
+ format.format(new Date()));
backupNewDir.mkdirs();
/*
* Make sure there's no derby database stuff in the folder.
* Any previous interrupted backup might left corrupted database files.
*/
File tempDir = new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "xinco");
if (tempDir.exists()) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.WARNING,
"Deleting potentially corrupted database files at: {0}", tempDir);
FileUtils.deleteDirectory(tempDir);
//Delete Derby log file
FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "derby.log"));
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Done!");
}
/**
* Prepare system to use derby
*/
setDBSystemDir(backupNewDir.getAbsolutePath());
backup = backupEMF.createEntityManager();
for (String s : tables) {
copyEntities(s, live, backup);
}
/**
* At this point we should have a <Backup Database name> folder in
* <Backup Path>/<Date>.
* Lets zip them for storage.
*/
format = new SimpleDateFormat("MM dd yyyy hh-mm-ss");
zipBackupFiles(backupNewDir, backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "Xinco Backup " + format.format(new Date()));
//Stop Derby database in order to delete
try {
DriverManager.getConnection("jdbc:derby:;shutdown=true");
} catch (SQLException e) {
//When the database shuts down it'll throw an exception
}
//Delete backed up files
String dbName = (String) backup.getProperties().get("javax.persistence.jdbc.url");
dbName = dbName.substring(dbName.lastIndexOf(":") + 1, dbName.indexOf(";"));
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Deleting temp folder: {0}", dbName);
FileUtils.deleteDirectory(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + dbName));
//Delete Derby log file
FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "derby.log"));
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
XincoDBManager.setLocked(false);
return false;
} catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
XincoDBManager.setLocked(false);
return false;
} finally {
if (live != null && live.isOpen()) {
live.close();
}
if (backup != null && backup.isOpen()) {
backup.close();
}
if (backupEMF != null && backupEMF.isOpen()) {
backupEMF.close();
}
}
XincoDBManager.setLocked(false);
return true;
}
private static void zipBackupFiles(File path, String zipName) throws XincoException {
if (!zipName.endsWith(".zip")) {
zipName += ".zip";
}
// These are the files to include in the ZIP file
IOFileFilter filter = new IOFileFilter() {
#Override
public boolean accept(File file) {
if (file.isDirectory()) {
return true;
}
//Ignore other backup files
if (file.isFile() && !file.getName().endsWith(".zip")) {
return true;
}
return false;
}
#Override
public boolean accept(File file, String string) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
#SuppressWarnings("unchecked")
Collection<File> fileList = FileUtils.listFiles(path, filter, TrueFileFilter.INSTANCE);
Object[] files = fileList.toArray();
// Create a buffer for reading the files
byte[] buf = new byte[1024];
try {
// Create the ZIP file
ZipOutputStream out = new ZipOutputStream(new FileOutputStream(zipName));
// Compress the files
for (int i = 0; i < files.length; i++) {
FileInputStream in = new FileInputStream((File) files[i]);
String fileName = ((File) files[i]).getPath();
//Remove not needed folders
fileName = fileName.substring(fileName.indexOf(path.getAbsolutePath()) + path.getAbsolutePath().length() + 1);
// Add ZIP entry to output stream.
out.putNextEntry(new ZipEntry(fileName));
// Transfer bytes from the file to the ZIP file
int len;
while ((len = in.read(buf)) > 0) {
out.write(buf, 0, len);
}
// Complete the entry
out.closeEntry();
in.close();
last = new XincoBackupFile(new File(zipName));
}
// Complete the ZIP file
out.close();
} catch (IOException e) {
throw new XincoException("Error zipping backup: " + e.getLocalizedMessage());
}
}
private static void copyEntities(String table, EntityManager source, EntityManager dest) {
List<Object> result, result2;
result = source.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Copying from table: {0}", table);
int i = 0;
source.clear();
for (Object o : result) {
i++;
Class<?> persistenceClass = null;
try {
persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
dest.getTransaction().begin();
if (dest.contains(persistenceClass.cast(o))) {
//If no exception do a merge because it exists already
dest.merge(persistenceClass.cast(o));
} else {
dest.persist(persistenceClass.cast(o));
}
dest.getTransaction().commit();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
throw new XincoException("No persistence enitiy defined for table: " + table);
}catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
throw new XincoException("Exception copying: " + o);
}
}
stats.put(table, i);
result2 = dest.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Copying for table: {0} completed! Amount of records: {1}",
new Object[]{table, i});
//Make sure the copy is accurate.
//TODO: For some reason XincoId always return twice the amount of records during this routine.
if (result2.size() != result.size() && !table.equals("XincoId")) {
throw new XincoException("Error copying records for table " + table + ". Got " + result2.size() + " instead of " + result.size());
}
result2.clear();
}
#SuppressWarnings({"unchecked"})
public static ArrayList<XincoBackupFile> getBackupFiles() throws XincoException {
// These are the files to include in the ZIP file
IOFileFilter filter = new IOFileFilter() {
#Override
public boolean accept(File file) {
//Only zip files
if (file.isFile() && file.getName().endsWith(".zip")
&& file.getName().startsWith("Xinco Backup")) {
return true;
}
return false;
}
#Override
public boolean accept(File file, String string) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
Collection<File> files = FileUtils.listFiles(
new File(backupPath), filter, TrueFileFilter.INSTANCE);
ArrayList<XincoBackupFile> backupFiles = new ArrayList<XincoBackupFile>();
for (File f : files) {
backupFiles.add(new XincoBackupFile(f));
}
//Sort
Collections.sort(backupFiles, new XincoBackupComparator());
//Sorted from oldest to newer so we need to invert the list.
Collections.reverse(backupFiles);
return backupFiles;
}
protected static boolean restoreFromBackup(XincoBackupFile backupFile) throws XincoException {
try {
stats.clear();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Restoring database from: {0}", backupFile.getName());
//First make a backup of current database just in case
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Creating a restore point for your current database...");
backup();
//We need to make sure that there's no one in the database
XincoDBManager.setLocked(true);
//Load database from the provided backup
loadDatabaseFromBackup(backupFile);
XincoDBManager.setLocked(false);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Restore complete!");
try {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Deleting restore point...");
FileUtils.forceDelete(last);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Done!");
} catch (IOException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
return true;
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
//Recover from last backup
loadDatabaseFromBackup(getLast());
XincoDBManager.setLocked(false);
throw new XincoException("Unable to load backup! Database reverted to original state. \n" + ex.getMessage());
}
}
protected static void loadDatabaseFromBackup(XincoBackupFile backupFile) throws XincoException {
EntityManager backupEM = null;
try {
initConnections();
live = liveEMF.createEntityManager();
//Unzip backup
unzipBackup(backupFile);
//Delete current database (inverse order than writing)
Collections.reverse(tables);
for (String s : tables) {
clearTable(s, live);
}
//Get back to original order
Collections.reverse(tables);
//Make derby start where the backup is
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Connecting to backup data...");
setDBSystemDir(backupPath + "Temp"
+ System.getProperty("file.separator"));
//Connect to backup database
backupEM = Persistence.createEntityManagerFactory("XincoBackup").createEntityManager();
//Start copying
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Starting loading entities...");
for (String s : tables) {
//Copy values from backup
copyEntities(s, backupEM, live);
}
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Load complete!");
//Stop Derby database in order to delete
DriverManager.getConnection("jdbc:derby:;shutdown=true");
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Delete temp folder!");
try {
FileUtils.deleteDirectory(new File(System.getProperty("derby.system.home")));
} catch (IOException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
} catch (SQLException e) {
//When the database shuts down it'll throw an exception
} finally {
if (live != null && live.isOpen()) {
live.close();
}
if (backupEM != null && backupEM.isOpen()) {
backupEM.close();
}
}
}
private static void unzipBackup(XincoBackupFile backup) {
try {
//Make sure that the temp directory is empty before unzipping
FileUtils.deleteDirectory(new File(backupPath
+ System.getProperty("file.separator") + "Temp"));
byte[] buf = new byte[1024];
ZipInputStream zipinputstream = null;
ZipEntry zipentry;
zipinputstream = new ZipInputStream(
new FileInputStream(backup.getBackupFile()));
zipentry = zipinputstream.getNextEntry();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Unzipping backup file: {0}", backup.getName());
while (zipentry != null) {
//for each entry to be extracted
String entryName = zipentry.getName();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Extracting file: {0}", entryName);
int n;
FileOutputStream fileoutputstream;
File newFile = new File(entryName);
String directory = newFile.getParent();
if (directory == null) {
if (newFile.isDirectory()) {
break;
}
}
if (entryName.contains(System.getProperty("file.separator"))) {
//Create any internal folders required
new File(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator") + entryName.substring(
0, entryName.lastIndexOf(
System.getProperty("file.separator")))).mkdirs();
} else {
File tempDir = new File(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator"));
tempDir.mkdirs();
}
fileoutputstream = new FileOutputStream(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator") + entryName);
while ((n = zipinputstream.read(buf, 0, 1024)) > -1) {
fileoutputstream.write(buf, 0, n);
}
fileoutputstream.close();
zipinputstream.closeEntry();
zipentry = zipinputstream.getNextEntry();
}//while
zipinputstream.close();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Unzipping complete!");
} catch (Exception e) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE,
"Error unzipping file!", e);
}
}
private static void clearTable(String table, EntityManager target) throws XincoException {
try {
List<Object> result;
result = target.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Cleaning table: {0}", table);
int i = 0;
Class<?> serverClass = null;
boolean special = false;
try {
serverClass = Class.forName("com.bluecubs.xinco.core.server." + table + "Server");
special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
} catch (ClassNotFoundException ex) {
try {
//Class doesn't exist, try in the add folder
serverClass = Class.forName("com.bluecubs.xinco.add.server." + table + "Server");
special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
} catch (ClassNotFoundException ex1) {
} catch (InstantiationException ex1) {
} catch (NoClassDefFoundError ex1) {
}
} catch (InstantiationException ex) {
} catch (NoClassDefFoundError ex) {
}
if (serverClass != null && special) {
((XincoCRUDSpecialCase) serverClass.newInstance()).clearTable();
special = false;
} else {
for (Object o : result) {
i++;
try {
Class<?> persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
target.getTransaction().begin();
target.remove(persistenceClass.cast(o));
target.getTransaction().commit();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
}
result = target.createNamedQuery(table + ".findAll").getResultList();
if (!result.isEmpty()) {
throw new IllegalStateException("Unable to delete entities: " + result.size());
}
stats.put(table, i);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Cleaning table: {0} completed! Amount of records removed: {1}", new Object[]{table, i});
} catch (IllegalAccessException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
/**
* #return the last
*/
public static XincoBackupFile getLast() {
return last;
}
}
Any flaw in the design?
A better way of doing it?
Any comment is more than welcomed!
Any flaw in the design? A better way of doing it? Any comment is more than welcomed!
Most database engines provides commands or tooling allowing to dump the content of a given database (some of them even supporting incremental backups). JPA will just be less efficient, more complex while you have ready to use solutions so I don't see the point of using JPA for this task.
For Derby, there is actually nothing to do: just zip/tar (or use rsync) the database files and you're done.
And if you want to copy the content of one database engine to another engine, use an ETL.
See also
How i can Dump a derby database into an sql file?
Always better done in the datastore. Some JPA providers provide ways. The one we provide is
http://www.datanucleus.org/products/accessplatform/jpa/replication.html
An interesting option I've found is Scriptella which can be called from Java code. Usage examples. I'll give it a try and post the results.
Related
We tried all the solutions provided in this post (FTP client hangs) but none of them is working. We are using version 3.6 of commons net. Sometimes it hangs while uploading a file, sometimes will checking existence of a directory. Max. file size is around 400 MB. But sometime it hangs even for a small file size < 1KB. Below is the fragment of code:
public boolean uploadData(String inputFilePath, String destinationFolderName) {
if (StringUtil.isNullOrBlank(inputFilePath) || StringUtil.isNullOrBlank(destinationFolderName)) {
LOGGER.error("Invalid parameters to uploadData. Aborting...");
return false;
}
boolean result = false;
FTPSClient ftpClient = getFTPSClient();
if (ftpClient == null) {
logFTPConnectionError();
return false;
}
try {
loginToFTPServer(ftpClient);
result = uploadFileToFTPServer(ftpClient, inputFilePath, destinationFolderName);
} catch (Exception e) {
logErrorUploadingFile(inputFilePath, e);
return false;
} finally {
try {
logoutFromFTPServer(ftpClient);
} catch (Exception e) {
logErrorUploadingFile(inputFilePath, e);
result = false;
}
}
return result;
}
private FTPSClient getFTPSClient() {
FTPSClient ftpClient = null;
try {
ftpClient = new FTPSClient();
LOGGER.debug("Connecting to FTP server...");
ftpClient.setConnectTimeout(connectTimeOut);
ftpClient.connect(server);
int reply = ftpClient.getReplyCode();
if (!FTPReply.isPositiveCompletion(reply)) {
ftpClient.disconnect();
LOGGER.error("Could not connect to FTP server. Aborting.");
return null;
}
} catch (Exception e) {
LOGGER.error("Could not connect to FTP server.", e);
return null;
}
return ftpClient;
}
private void loginToFTPServer(FTPSClient ftpClient) throws Exception {
ftpClient.setDataTimeout(DATA_TIMEOUT);
ftpClient.login(ftpUserName, ftpPassword);
ftpClient.enterLocalPassiveMode();
ftpClient.setFileType(FTP.BINARY_FILE_TYPE);
LOGGER.debug("FTP Client Buffer Size Before:" + ftpClient.getBufferSize());
ftpClient.setBufferSize(BUFFER_SIZE);
LOGGER.debug("FTP Client Buffer Size After:" + ftpClient.getBufferSize());
ftpClient.execPBSZ(0);
ftpClient.execPROT("P");
ftpClient.setControlKeepAliveTimeout(300);
LOGGER.debug("Logged into FTP server.");
}
private void logoutFromFTPServer(FTPSClient ftpClient) throws Exception {
LOGGER.debug("Logging out from FTP server.");
ftpClient.logout();
ftpClient.disconnect();
LOGGER.debug("FTP server connection closed.");
}
private boolean uploadFileToFTPServer(FTPSClient ftpClient, String inputFilePath, String destinationFolderName) {
boolean result = false;
String remoteLocationFile;
File ftpFile = new File(inputFilePath);
try (InputStream inputStream = new FileInputStream(ftpFile)) {
String fileName = ftpFile.getName();
remoteLocationFile = (destinationFolderName == null || destinationFolderName.isEmpty())
? ftpFile.getName()
: destinationFolderName + File.separator + fileName;
LOGGER.info("Storing file " + ftpFile.getName() + " of size "
+ ftpFile.length() + " in folder " + remoteLocationFile);
result = ftpClient.storeFile(remoteLocationFile, inputStream);
if(result) {
LOGGER.info("Successfully stored file " + ftpFile.getName() + " in folder " + remoteLocationFile);
} else {
LOGGER.error("Unable to store file " + ftpFile.getName() + " in folder " + remoteLocationFile);
}
return result;
} catch (Exception e) {
logErrorUploadingFile(inputFilePath, e);
}
return result;
}
The application is hosted in apache tomcat 8. What could be other causes of this issue and how should we fix them? This is crucial functionality of our application and we may even consider to use alternate API if that is stable. Please suggest.
Adding ftpClient.setSoTimeout(20000); has fixed the issue.
Adding a enterLocalPassiveMode right before the retreiveFile should solve this issue.
You also need to add
ftpClient.setControlKeepAliveTimeout(300);
or Check this code which will resolve the hanging issue
I used heroku with mongoDB for using parse. I deployed parse-server-example and followed all of migration guide.
I want to save PNG file and delete it.
There are some problems T_T
When i save image file and seeing on mongoDB, file name is different from i want to save.
When i delete it using parse code on Android, fs.chunks and fs.files are left.
For 1st problem code is below
Java : Saving image file
Drawable drawable = getResources().getDrawable(R.drawable.ic_loading);
Bitmap bitmap = ((BitmapDrawable) drawable).getBitmap();
ByteArrayOutputStream stream = new ByteArrayOutputStream();
bitmap.compress(Bitmap.CompressFormat.PNG, 100, stream);
byte[] img_write = stream.toByteArray();
ParseFile parseFile = new ParseFile("test.png", img_write);
testObject = new ParseObject("TestObject");
testObject.put("test", parseFile);
testObject.saveInBackground(new SaveCallback() {
#Override
public void done(ParseException e) {
if (e == null) {
Log.e("Write", "SUCCESS");
} else {
Log.e("Write", e.getMessage());
}
}
});
but when i retrieve image it's value of 'test(key)' is '32de5cf8c1b205a8e26367dae3b07ca3_test.png' though i made ParseFile's name for "test.png" and saved it to 'test(key)'.
For 2nd problem code is below
query = ParseQuery.getQuery("TestObject");
query.whereContains("test", "test.png");
query.findInBackground(new FindCallback<ParseObject>() {
#Override
public void done(List<ParseObject> objects, ParseException e) {
if (e == null) {
objectId = objects.get(0).getObjectId();
query.getInBackground(objectId, new GetCallback<ParseObject>() {
#Override
public void done(ParseObject object, ParseException e) {
if (e == null) {
object.deleteInBackground(new DeleteCallback() {
#Override
public void done(ParseException e) {
if (e == null) {
Log.e("Delete", "SUCCESS");
} else {
Log.e("Delete", e.getMessage());
}
}
});
} else {
Log.e("getInBackground", e.getMessage());
}
}
});
} else {
Log.e("findInBackground", e.getMessage());
}
}
});
I used 'whereContains' because it's name is '32de5cf8c1b205a8e26367dae3b07ca3_test.png'. And when i delete it 'TestObject' be null but 'fs.files' and 'fs.chunks' are left. I know it is kind of grid file. But is it right that when i delete it, grid files are left?
Please help me for my problems...Thank you for reading my questions.
This question already has answers here:
Closed 10 years ago.
Possible Duplicate:
Running a .sql script using MySQL with JDBC
I have an SQL script file which contains 40-50 SQL statements. Is it possible to run this script file using JDBC?
This link might help you out: http://pastebin.com/f10584951.
Pasted below for posterity:
/*
* Slightly modified version of the com.ibatis.common.jdbc.ScriptRunner class
* from the iBATIS Apache project. Only removed dependency on Resource class
* and a constructor
*/
/*
* Copyright 2004 Clinton Begin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.LineNumberReader;
import java.io.PrintWriter;
import java.io.Reader;
import java.sql.*;
/**
* Tool to run database scripts
*/
public class ScriptRunner {
private static final String DEFAULT_DELIMITER = ";";
private Connection connection;
private boolean stopOnError;
private boolean autoCommit;
private PrintWriter logWriter = new PrintWriter(System.out);
private PrintWriter errorLogWriter = new PrintWriter(System.err);
private String delimiter = DEFAULT_DELIMITER;
private boolean fullLineDelimiter = false;
/**
* Default constructor
*/
public ScriptRunner(Connection connection, boolean autoCommit,
boolean stopOnError) {
this.connection = connection;
this.autoCommit = autoCommit;
this.stopOnError = stopOnError;
}
public void setDelimiter(String delimiter, boolean fullLineDelimiter) {
this.delimiter = delimiter;
this.fullLineDelimiter = fullLineDelimiter;
}
/**
* Setter for logWriter property
*
* #param logWriter
* - the new value of the logWriter property
*/
public void setLogWriter(PrintWriter logWriter) {
this.logWriter = logWriter;
}
/**
* Setter for errorLogWriter property
*
* #param errorLogWriter
* - the new value of the errorLogWriter property
*/
public void setErrorLogWriter(PrintWriter errorLogWriter) {
this.errorLogWriter = errorLogWriter;
}
/**
* Runs an SQL script (read in using the Reader parameter)
*
* #param reader
* - the source of the script
*/
public void runScript(Reader reader) throws IOException, SQLException {
try {
boolean originalAutoCommit = connection.getAutoCommit();
try {
if (originalAutoCommit != this.autoCommit) {
connection.setAutoCommit(this.autoCommit);
}
runScript(connection, reader);
} finally {
connection.setAutoCommit(originalAutoCommit);
}
} catch (IOException e) {
throw e;
} catch (SQLException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error running script. Cause: " + e, e);
}
}
/**
* Runs an SQL script (read in using the Reader parameter) using the
* connection passed in
*
* #param conn
* - the connection to use for the script
* #param reader
* - the source of the script
* #throws SQLException
* if any SQL errors occur
* #throws IOException
* if there is an error reading from the Reader
*/
private void runScript(Connection conn, Reader reader) throws IOException,
SQLException {
StringBuffer command = null;
try {
LineNumberReader lineReader = new LineNumberReader(reader);
String line = null;
while ((line = lineReader.readLine()) != null) {
if (command == null) {
command = new StringBuffer();
}
String trimmedLine = line.trim();
if (trimmedLine.startsWith("--")) {
println(trimmedLine);
} else if (trimmedLine.length() < 1
|| trimmedLine.startsWith("//")) {
// Do nothing
} else if (trimmedLine.length() < 1
|| trimmedLine.startsWith("--")) {
// Do nothing
} else if (!fullLineDelimiter
&& trimmedLine.endsWith(getDelimiter())
|| fullLineDelimiter
&& trimmedLine.equals(getDelimiter())) {
command.append(line.substring(0, line
.lastIndexOf(getDelimiter())));
command.append(" ");
Statement statement = conn.createStatement();
println(command);
boolean hasResults = false;
if (stopOnError) {
hasResults = statement.execute(command.toString());
} else {
try {
statement.execute(command.toString());
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
}
}
if (autoCommit && !conn.getAutoCommit()) {
conn.commit();
}
ResultSet rs = statement.getResultSet();
if (hasResults && rs != null) {
ResultSetMetaData md = rs.getMetaData();
int cols = md.getColumnCount();
for (int i = 0; i < cols; i++) {
String name = md.getColumnLabel(i);
print(name + "\t");
}
println("");
while (rs.next()) {
for (int i = 0; i < cols; i++) {
String value = rs.getString(i);
print(value + "\t");
}
println("");
}
}
command = null;
try {
statement.close();
} catch (Exception e) {
// Ignore to workaround a bug in Jakarta DBCP
}
Thread.yield();
} else {
command.append(line);
command.append(" ");
}
}
if (!autoCommit) {
conn.commit();
}
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} catch (IOException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} finally {
conn.rollback();
flush();
}
}
private String getDelimiter() {
return delimiter;
}
private void print(Object o) {
if (logWriter != null) {
System.out.print(o);
}
}
private void println(Object o) {
if (logWriter != null) {
logWriter.println(o);
}
}
private void printlnError(Object o) {
if (errorLogWriter != null) {
errorLogWriter.println(o);
}
}
private void flush() {
if (logWriter != null) {
logWriter.flush();
}
if (errorLogWriter != null) {
errorLogWriter.flush();
}
}
}
I use this bit of code to import sql statements created by mysqldump:
public static void importSQL(Connection conn, InputStream in) throws SQLException
{
Scanner s = new Scanner(in);
s.useDelimiter("(;(\r)?\n)|(--\n)");
Statement st = null;
try
{
st = conn.createStatement();
while (s.hasNext())
{
String line = s.next();
if (line.startsWith("/*!") && line.endsWith("*/"))
{
int i = line.indexOf(' ');
line = line.substring(i + 1, line.length() - " */".length());
}
if (line.trim().length() > 0)
{
st.execute(line);
}
}
}
finally
{
if (st != null) st.close();
}
}
Another option, this DOESN'T support comments, very useful with AmaterasERD DDL export for Apache Derby:
public void executeSqlScript(Connection conn, File inputFile) {
// Delimiter
String delimiter = ";";
// Create scanner
Scanner scanner;
try {
scanner = new Scanner(inputFile).useDelimiter(delimiter);
} catch (FileNotFoundException e1) {
e1.printStackTrace();
return;
}
// Loop through the SQL file statements
Statement currentStatement = null;
while(scanner.hasNext()) {
// Get statement
String rawStatement = scanner.next() + delimiter;
try {
// Execute statement
currentStatement = conn.createStatement();
currentStatement.execute(rawStatement);
} catch (SQLException e) {
e.printStackTrace();
} finally {
// Release resources
if (currentStatement != null) {
try {
currentStatement.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
currentStatement = null;
}
}
scanner.close();
}
Just read it and then use the preparedstatement with the full sql-file in it.
(If I remember good)
ADD: You can also read and split on ";" and than execute them all in a loop.
Do not forget the comments and add again the ";"
You should be able to parse the SQL file into statements. And run a single statement a time. If you know that your file consists of simple insert/update/delete statements you can use a semicolon as statement delimiter. In common case you have a task to create your specific SQL-dialect parser.
I had the same problem trying to execute an SQL script that creates an SQL database. Googling here and there I found a Java class initially written by Clinton Begin which supports comments (see http://pastebin.com/P14HsYAG). I modified slightly the file to cater for triggers where one has to change the default DELIMITER to something different. I've used that version ScriptRunner (see http://pastebin.com/sb4bMbVv). Since an (open source and free) SQLScriptRunner class is an absolutely necessary utility, it would be good to have some more input from developers and hopefully we'll have soon a more stable version of it.
You can read the script line per line with a BufferedReader and append every line to a StringBuilder so that the script becomes one large string.
Then you can create a Statement object using JDBC and call statement.execute(stringBuilder.toString()).
My drool is working fine when I don't have agenda group but if I set focus i'm getting the following error:
package drools;
import droolsexec.Message;
import droolsexec.Customer;
rule "Good Bye"
agenda-group "group1"
dialect "java"
when
message: Message( status =="GOODBYE" )
customer: Customer(name == "NEHA")
then
System.out.println( message.getStatus());
end
This is my rule and i'm executing it by:
public class ExecuteDrools {
private static PackageBuilder pbuilder = new PackageBuilder();
private static StatefulSession sessionObject;
private static RuleBase rbase = RuleBaseFactory.newRuleBase();
public void runDrools(ArrayList list){
initialiseDrools();
initiliseMessageObject(list);
runRules();
}
private void initialiseDrools() {
//1. Read the DRL File and add to package builder
try {
Reader reader = new InputStreamReader(ExecuteDrools.class.getResourceAsStream("/HelloWorld.drl"));
pbuilder.addPackageFromDrl(reader);
} catch (DroolsParserException ex) {
Logger.getLogger(ExecuteDrools.class.getName()).log(Level.SEVERE, null, ex);
} catch (IOException ex) {
Logger.getLogger(ExecuteDrools.class.getName()).log(Level.SEVERE, null, ex);
}
//2. Check for any errors
PackageBuilderErrors errors = pbuilder.getErrors();
if (errors.getErrors().length > 0) {
System.out.println("Some errors exists in packageBuilder");
for (int i = 0; i < errors.getErrors().length; i++) {
System.out.println(errors.getErrors()[i]);
}
throw new IllegalArgumentException("Could not parse knowledge.");
}
//3. Add package to rule base
try {
rbase.addPackage(pbuilder.getPackage());
} catch (Exception e) {
System.out.println("Error: "+ e);
}
}
private void initiliseMessageObject(ArrayList list) {
sessionObject = rbase.newStatefulSession();
Iterator itr = list.iterator();
while(itr.hasNext()){
sessionObject.insert(itr.next());
}
}
private void runRules() {
sessionObject.getAgenda().getAgendaGroup("group2").setFocus();
sessionObject.fireAllRules();
}
}
I'm getting the following error:
Exception in thread "main" java.lang.UnsupportedOperationException
at org.drools.common.BinaryHeapQueueAgendaGroup.setFocus(BinaryHeapQueueAgendaGroup.java:156)
at droolsexec.ExecuteDrools.runRules(ExecuteDrools.java:83)
at droolsexec.ExecuteDrools.runDrools(ExecuteDrools.java:36)
at droolsexec.MainClass.executeRules(MainClass.java:23)
at droolsexec.MainClass.main(MainClass.java:9)
you do not have an agenda group group2 in your drl file... you have it named group 1
I am creating a webpage having CellTable.I need to feed this table with data from hbase table.
I have written a method to retrieve data from hbase table and tested it.
But when I call that method as GWT asynchronous RPC method then rpc call succeeds but it returns nothing.In my case it returns empty list.The alert box show list's size as 0.
Following is the related code.
Please help.
greetingService.getDeviceIDData(new AsyncCallback<List<DeviceDriverBean>>(){
public void onFailure(Throwable caught) {
// Show the RPC error message to the user
System.out.println("RPC Call failed");
Window.alert("Data : RPC call failed");
}
public void onSuccess(List<DeviceDriverBean> result) {
//on success do something
Window.alert("Data : RPC call successful");
//deviceDataList.addAll(result);
Window.alert("Result size: " +result.size());
// Add a text column to show the driver name.
TextColumn<DeviceDriverBean> nameColumn = new TextColumn<DeviceDriverBean>() {
#Override
public String getValue(DeviceDriverBean object) {
Window.alert(object.getName());
return object.getName();
}
};
table.addColumn(nameColumn, "Name");
// Add a text column to show the device id
TextColumn<DeviceDriverBean> deviceidColumn = new TextColumn<DeviceDriverBean>() {
#Override
public String getValue(DeviceDriverBean object) {
return object.getDeviceId();
}
};
table.addColumn(deviceidColumn, "Device ID");
table.setRowCount(result.size(), true);
// more code here to add columns in celltable
// Push the data into the widget.
table.setRowData(0, result);
SimplePager pager = new SimplePager();
pager.setDisplay(table);
VerticalPanel vp = new VerticalPanel();
vp.add(table);
vp.add(pager);
// Add it to the root panel.
RootPanel.get("datagridContainer").add(vp);
}
});
Code to retrieve data from hbase (server side code)
public List<DeviceDriverBean> getDeviceIDData()
throws IllegalArgumentException {
List<DeviceDriverBean> deviceidList = new ArrayList<DeviceDriverBean>();
// Escape data from the client to avoid cross-site script
// vulnerabilities.
/*
* input = escapeHtml(input); userAgent = escapeHtml(userAgent);
*
* return "Hello, " + input + "!<br><br>I am running " + serverInfo +
* ".<br><br>It looks like you are using:<br>" + userAgent;
*/
try {
Configuration config = HbaseConnectionSingleton.getInstance()
.HbaseConnect();
HTable testTable = new HTable(config, "driver_details");
byte[] family = Bytes.toBytes("details");
Scan scan = new Scan();
int cnt = 0;
ResultScanner rs = testTable.getScanner(scan);
for (Result r = rs.next(); r != null; r = rs.next()) {
DeviceDriverBean deviceDriverBean = new DeviceDriverBean();
byte[] rowid = r.getRow(); // Category, Date, Sentiment
NavigableMap<byte[], byte[]> map = r.getFamilyMap(family);
Iterator<Entry<byte[], byte[]>> itrt = map.entrySet()
.iterator();
deviceDriverBean.setDeviceId(Bytes.toString(rowid));
while (itrt.hasNext()) {
Entry<byte[], byte[]> entry = itrt.next();
//cnt++;
//System.out.println("Count : " + cnt);
byte[] qual = entry.getKey();
byte[] val = entry.getValue();
if (Bytes.toString(qual).equalsIgnoreCase("account_number")) {
deviceDriverBean.setAccountNo(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("make")) {
deviceDriverBean.setMake(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("model")) {
deviceDriverBean.setModel(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("driver_name")) {
deviceDriverBean.setName(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("premium")) {
deviceDriverBean.setPremium(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("year")) {
deviceDriverBean.setYear(Bytes.toString(val));
} else {
System.out.println("No match found");
}
/*
* System.out.println(Bytes.toString(rowid) + " " +
* Bytes.toString(qual) + " " + Bytes.toString(val));
*/
}
deviceidList.add(deviceDriverBean);
}
}
catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
catch (Exception e) {
// System.out.println("Message: "+e.getMessage());
e.printStackTrace();
}
return deviceidList;
}
Could this be lazy fetching on the server side by hbase. This means if you return the list hbase won't get a trigger to actually read the list and you will simple get an empty list. I don't know a correct solution, in the past I've seen a similar problem on GAE. This could by solved by simply asking the size of the list just before returning it to the client.
I don't have the exact answer, but I have an advise. In similar situation I put my own trace to check every step in my program.
On the server side before return put : System.out.println("size of table="+deviceidList.size());
You can put this trace in the loop for deviceidList;