PostgreSQL JDBC query: Include .sql file using \i [duplicate] - postgresql

This question already has answers here:
Closed 10 years ago.
Possible Duplicate:
Running a .sql script using MySQL with JDBC
I have an SQL script file which contains 40-50 SQL statements. Is it possible to run this script file using JDBC?

This link might help you out: http://pastebin.com/f10584951.
Pasted below for posterity:
/*
* Slightly modified version of the com.ibatis.common.jdbc.ScriptRunner class
* from the iBATIS Apache project. Only removed dependency on Resource class
* and a constructor
*/
/*
* Copyright 2004 Clinton Begin
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
import java.io.IOException;
import java.io.LineNumberReader;
import java.io.PrintWriter;
import java.io.Reader;
import java.sql.*;
/**
* Tool to run database scripts
*/
public class ScriptRunner {
private static final String DEFAULT_DELIMITER = ";";
private Connection connection;
private boolean stopOnError;
private boolean autoCommit;
private PrintWriter logWriter = new PrintWriter(System.out);
private PrintWriter errorLogWriter = new PrintWriter(System.err);
private String delimiter = DEFAULT_DELIMITER;
private boolean fullLineDelimiter = false;
/**
* Default constructor
*/
public ScriptRunner(Connection connection, boolean autoCommit,
boolean stopOnError) {
this.connection = connection;
this.autoCommit = autoCommit;
this.stopOnError = stopOnError;
}
public void setDelimiter(String delimiter, boolean fullLineDelimiter) {
this.delimiter = delimiter;
this.fullLineDelimiter = fullLineDelimiter;
}
/**
* Setter for logWriter property
*
* #param logWriter
* - the new value of the logWriter property
*/
public void setLogWriter(PrintWriter logWriter) {
this.logWriter = logWriter;
}
/**
* Setter for errorLogWriter property
*
* #param errorLogWriter
* - the new value of the errorLogWriter property
*/
public void setErrorLogWriter(PrintWriter errorLogWriter) {
this.errorLogWriter = errorLogWriter;
}
/**
* Runs an SQL script (read in using the Reader parameter)
*
* #param reader
* - the source of the script
*/
public void runScript(Reader reader) throws IOException, SQLException {
try {
boolean originalAutoCommit = connection.getAutoCommit();
try {
if (originalAutoCommit != this.autoCommit) {
connection.setAutoCommit(this.autoCommit);
}
runScript(connection, reader);
} finally {
connection.setAutoCommit(originalAutoCommit);
}
} catch (IOException e) {
throw e;
} catch (SQLException e) {
throw e;
} catch (Exception e) {
throw new RuntimeException("Error running script. Cause: " + e, e);
}
}
/**
* Runs an SQL script (read in using the Reader parameter) using the
* connection passed in
*
* #param conn
* - the connection to use for the script
* #param reader
* - the source of the script
* #throws SQLException
* if any SQL errors occur
* #throws IOException
* if there is an error reading from the Reader
*/
private void runScript(Connection conn, Reader reader) throws IOException,
SQLException {
StringBuffer command = null;
try {
LineNumberReader lineReader = new LineNumberReader(reader);
String line = null;
while ((line = lineReader.readLine()) != null) {
if (command == null) {
command = new StringBuffer();
}
String trimmedLine = line.trim();
if (trimmedLine.startsWith("--")) {
println(trimmedLine);
} else if (trimmedLine.length() < 1
|| trimmedLine.startsWith("//")) {
// Do nothing
} else if (trimmedLine.length() < 1
|| trimmedLine.startsWith("--")) {
// Do nothing
} else if (!fullLineDelimiter
&& trimmedLine.endsWith(getDelimiter())
|| fullLineDelimiter
&& trimmedLine.equals(getDelimiter())) {
command.append(line.substring(0, line
.lastIndexOf(getDelimiter())));
command.append(" ");
Statement statement = conn.createStatement();
println(command);
boolean hasResults = false;
if (stopOnError) {
hasResults = statement.execute(command.toString());
} else {
try {
statement.execute(command.toString());
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
}
}
if (autoCommit && !conn.getAutoCommit()) {
conn.commit();
}
ResultSet rs = statement.getResultSet();
if (hasResults && rs != null) {
ResultSetMetaData md = rs.getMetaData();
int cols = md.getColumnCount();
for (int i = 0; i < cols; i++) {
String name = md.getColumnLabel(i);
print(name + "\t");
}
println("");
while (rs.next()) {
for (int i = 0; i < cols; i++) {
String value = rs.getString(i);
print(value + "\t");
}
println("");
}
}
command = null;
try {
statement.close();
} catch (Exception e) {
// Ignore to workaround a bug in Jakarta DBCP
}
Thread.yield();
} else {
command.append(line);
command.append(" ");
}
}
if (!autoCommit) {
conn.commit();
}
} catch (SQLException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} catch (IOException e) {
e.fillInStackTrace();
printlnError("Error executing: " + command);
printlnError(e);
throw e;
} finally {
conn.rollback();
flush();
}
}
private String getDelimiter() {
return delimiter;
}
private void print(Object o) {
if (logWriter != null) {
System.out.print(o);
}
}
private void println(Object o) {
if (logWriter != null) {
logWriter.println(o);
}
}
private void printlnError(Object o) {
if (errorLogWriter != null) {
errorLogWriter.println(o);
}
}
private void flush() {
if (logWriter != null) {
logWriter.flush();
}
if (errorLogWriter != null) {
errorLogWriter.flush();
}
}
}

I use this bit of code to import sql statements created by mysqldump:
public static void importSQL(Connection conn, InputStream in) throws SQLException
{
Scanner s = new Scanner(in);
s.useDelimiter("(;(\r)?\n)|(--\n)");
Statement st = null;
try
{
st = conn.createStatement();
while (s.hasNext())
{
String line = s.next();
if (line.startsWith("/*!") && line.endsWith("*/"))
{
int i = line.indexOf(' ');
line = line.substring(i + 1, line.length() - " */".length());
}
if (line.trim().length() > 0)
{
st.execute(line);
}
}
}
finally
{
if (st != null) st.close();
}
}

Another option, this DOESN'T support comments, very useful with AmaterasERD DDL export for Apache Derby:
public void executeSqlScript(Connection conn, File inputFile) {
// Delimiter
String delimiter = ";";
// Create scanner
Scanner scanner;
try {
scanner = new Scanner(inputFile).useDelimiter(delimiter);
} catch (FileNotFoundException e1) {
e1.printStackTrace();
return;
}
// Loop through the SQL file statements
Statement currentStatement = null;
while(scanner.hasNext()) {
// Get statement
String rawStatement = scanner.next() + delimiter;
try {
// Execute statement
currentStatement = conn.createStatement();
currentStatement.execute(rawStatement);
} catch (SQLException e) {
e.printStackTrace();
} finally {
// Release resources
if (currentStatement != null) {
try {
currentStatement.close();
} catch (SQLException e) {
e.printStackTrace();
}
}
currentStatement = null;
}
}
scanner.close();
}

Just read it and then use the preparedstatement with the full sql-file in it.
(If I remember good)
ADD: You can also read and split on ";" and than execute them all in a loop.
Do not forget the comments and add again the ";"

You should be able to parse the SQL file into statements. And run a single statement a time. If you know that your file consists of simple insert/update/delete statements you can use a semicolon as statement delimiter. In common case you have a task to create your specific SQL-dialect parser.

I had the same problem trying to execute an SQL script that creates an SQL database. Googling here and there I found a Java class initially written by Clinton Begin which supports comments (see http://pastebin.com/P14HsYAG). I modified slightly the file to cater for triggers where one has to change the default DELIMITER to something different. I've used that version ScriptRunner (see http://pastebin.com/sb4bMbVv). Since an (open source and free) SQLScriptRunner class is an absolutely necessary utility, it would be good to have some more input from developers and hopefully we'll have soon a more stable version of it.

You can read the script line per line with a BufferedReader and append every line to a StringBuilder so that the script becomes one large string.
Then you can create a Statement object using JDBC and call statement.execute(stringBuilder.toString()).

Related

Review of Connection handling and Data access layer using C#, sql server compact 3.5

I am developing a stand alone application, using sql server compact 3.5 sp2 which runs in process. No Database writes involved. Its purely a reporting application. Read many articles about reusing open db connections in case of sql compact(connection pooling) due to its different behavior from sql server.
Quoting the comments from a quiz opened by Erik Ejlskov Jensen Link, where its discussed an open early close late strategy for sql server compact databases. Based on this, with my limited experience I have implemented a not so complex Connection handling+Data access layer. Basically I am unsure if i am writing it in a recommended way. Please could any one point me in the right direction with rooms for improvement in this connection handling approach i have written?
The DbConnection class
public class FkDbConnection
{
private static SqlCeConnection conn;
private static DataTable table;
private static SqlCeCommand cmd;
~FkDbConnection() { conn = null; }
//This will be called when the main winform loads and connection will be open as long as the main form is open
public static string ConnectToDatabase()
{
try {
conn = new SqlCeConnection(ConfigurationManager.ConnectionStrings["Connstr"].ConnectionString);
if (conn.State == ConnectionState.Closed || conn.State == ConnectionState.Broken)
{
conn.Open();
}
return "Connected";
}
catch(SqlCeException e) { return e.Message; }
}
public static void Disconnect()
{
if (conn.State == ConnectionState.Open || conn.State == ConnectionState.Connecting || conn.State == ConnectionState.Fetching)
{
conn.Close();
conn.Dispose();
//conn = null; //does conn have to be set to null?
}
//else the connection might be already closed due to failure in opening it
else if (conn.State == ConnectionState.Closed) {
conn.Dispose();
//conn = null; //does conn have to be set to null?
}
}
/// <summary>
/// Generic Select DataAccess
/// </summary>
/// <param name="sql"> the sql query which needs to be executed by command object </param>
public static DataTable ExecuteSelectCommand(SqlCeCommand comm)
{
if (conn != null && conn.State == ConnectionState.Open)
{
#region block using datareader
using (table = new DataTable())
{
//using statement needed for reader? Its closed below
using (SqlCeDataReader reader = comm.ExecuteReader())
{
table.Load(reader);
reader.Close(); //is it needed?
}
}
#endregion
# region block using dataadpater
//I read DataReader is faster?
//using (SqlCeDataAdapter sda = new SqlCeDataAdapter(cmd))
//{
// using (table = new DataTable())
// {
// sda.Fill(table);
// }
//}
#endregion
//}
}
return table;
}
/// <summary>
/// Get Data
/// </summary>
/// <param name="selectedMPs"> string csv, generated from a list of selected posts(checkboxes) from the UI, which forms the field names used in SELECT </param>
public static DataTable GetDataPostsCars(string selectedMPs)
{
DataTable dt;
//i know this it not secure sql, but will be a separate question to pass column names to select as parameters
string sql = string.Format(
"SELECT " + selectedMPs + " "+
"FROM GdRateFixedPosts");
using (cmd = new SqlCeCommand(sql,conn))
{
cmd.CommandType = CommandType.Text;
//cmd.Parameters.Add("#fromDateTime",DbType.DateTime);
//cmd.Parameters.Add("#toDateTime",DbType.DateTime);
dt = ExecuteSelectCommand(cmd);
}
return dt;
}
}
The Main UI (Form) in which connection opened, for connection to be open through out. 2 other reporting forms are opened from here. Closing main form closes all, at which point connection is closed and disposed.
private void FrmMain_Load(object sender, EventArgs e)
{
string str = FkDbConnection.ConnectToDatabase();
statStDbConnection.Items[0].Text = str;
}
private void FrmMain_FormClosing(object sender, FormClosingEventArgs e)
{
FkDbConnection.Disconnect();
}
Comments, improvements on this connection class much appreciated. See my questions also inline code
Thank you.
Updated classes as per Erik's suggestion. with a correction on ExecuteSelectCommand() and an additional class which will instantiate command objs in "using" and pass data to the UI. I intent to add separate GetDataForFormX() methods since the dynamic sql for each form may differ. Hope this is ok?
Correction to Erik's code:
public static DataTable ExecuteSelectCommand(SqlCeCommand comm)
{
var table = new DataTable();
if (conn != null && conn.State == ConnectionState.Open)
{
comm.Connection = conn;
using (SqlCeDataReader reader = comm.ExecuteReader())
{
table.Load(reader);
}
}
return table;
}
New FkDataAccess class for passing Data to UI
public class FkDataAccess
{
public static DataTable GetDataPostsCars(string selectedMPs)
{
var table = new DataTable();
string sql = string.Format(
"SELECT " + selectedMPs + " " +
"FROM GdRateFixedPosts");
if (FkDbConnection.conn != null && FkDbConnection.conn.State == ConnectionState.Open)
{
using (SqlCeCommand cmd = new SqlCeCommand(sql, FkDbConnection.conn))
{
cmd.CommandType = CommandType.Text;
//cmd.Parameters.Add("#fromDateTime",DbType.DateTime);
table = FkDbConnection.ExecuteSelectCommand(cmd);
}
}
return table;
}
//public static DataTable GetDataXY(string selectedvals)
// and so on
}
Too much code in your data access class, makes it unreadable and hard to maintain
The SqlCeonnection object will be disposed when you close it (and when the app closes)
You cannot dispose the DataTable if you want to use it elsewhere, and it is an completely managed object anyway.
It is a good pattern to limit your classes to a single responsibility
public class FkDbConnection
{
private static SqlCeConnection conn;
~FkDbConnection() { conn = null; }
//This will be called when the main winform loads and connection will be open as long as the main form is open
public static void ConnectToDatabase()
{
// Handle failure to open in the caller
conn = new SqlCeConnection(ConfigurationManager.ConnectionStrings["Connstr"].ConnectionString);
conn.Open();
}
public static void Disconnect()
{
if (conn != null)
{
conn.Close();
}
}
public static DataTable ExecuteSelectCommand(SqlCeCommand comm)
{
var table = new DataTable();
if (conn != null && conn.State == ConnectionState.Open)
{
comm.Connection = conn;
using (SqlCeDataReader reader = comm.ExecuteReader())
{
table.Load(reader);
}
}
return table;
}
private void FrmMain_Load(object sender, EventArgs e)
{
try
{
FkDbConnection.ConnectToDatabase();
statStDbConnection.Items[0].Text = "Connected";
}
catch (Exception ex)
{
//Inform use that we canot proceed, what she can do to remedy, and exit
}
}
private void FrmMain_FormClosing(object sender, FormClosingEventArgs e)
{
FkDbConnection.Disconnect();
}

Testing that an Object exists in a Bucket?

I am trying to figure out what the most efficient way to test of the existence of an Object in a Bucket in Google Cloud Store.
This is what I am doing now:
try
{
final GcsFileMetadata md = GCS_SERVICE.getMetadata(bm.getFilename());
if (md == null)
{
// do what I need to do here!
}
}
catch (IOException e)
{
L.error(e.getMessage());
}
Because according to the documentation it returns null if the GcsFilename does not exist.
.
/**
* #param filename The name of the file that you wish to read the metadata of.
* #return The metadata associated with the file, or null if the file does not exist.
* #throws IOException If for any reason the file can't be read.
*/
GcsFileMetadata getMetadata(GcsFilename filename) throws IOException;
Using .list() on a Bucket and checking for .contains() sounds expensive but is explicit in its intention.
Personally I think testing for null to check if something exists is inelegant and not as direct as GCS_SERVICE.objectExists(fileName); but I guess I don't get to design the GCS Client API. I will just create a method to do this test in my API.
Is there a more efficient ( as in time ) or more self documenting way to do this test?
Solution
Here is the working solution I ended up with:
#Nonnull
protected Class<T> getEntityType() { (Class<T>) new TypeToken<T>(getClass()) {}.getRawType(); }
/**
* purge ObjectMetadata records that don't have matching Objects in the GCS anymore.
*/
public void purgeOrphans()
{
ofy().transact(new Work<VoidWork>()
{
#Override
public VoidWork run()
{
try
{
for (final T bm : ofy().load().type(ObjectMetadataEntityService.this.getEntityType()).iterable())
{
final GcsFileMetadata md = GCS_SERVICE.getMetadata(bm.getFilename());
if (md == null)
{
ofy().delete().entity(bm);
}
}
}
catch (IOException e)
{
L.error(e.getMessage());
}
return null;
}
});
}
They added the file.exists() method.
const fileExists = _=>{
return file.exists().then((data)=>{ console.log(data[0]); });
}
fileExists();
//logs a boolean to the console;
//true if the file exists;
//false if the file doesn't exist.

displaying an uploded image 404 error

I'm trying to upload an image from the signup form using InscriptionForm.java to check the respectieve fields. The upload has no problem (Thanks to BalusC's tutorials), but, when I try to display the uploaded image, I can't; refering to the HTTP network monitor I got 404 error as response. Also, when I try to enter the above link into the browser's adress bar http://localhost:8080/projetForum/images/bd/Id_21082013184506.png I've got a 404 error.
InscriptionForm.java
public final class InscriptionForm {
private static final String CHAMP_EMAIL = "email";
private static final String CHAMP_PASS = "motdepasse";
private static final String CHAMP_CONF = "confirmation";
private static final String CHAMP_NOM = "nom";
private static final String CHAMP_DESC = "description";
private static final String CHAMP_LOC = "localisation";
private static final String CHAMP_SW = "siteweb";
public static final String CHAMP_IMAGE = "avatar";
public static final String CHAMP_JOURDENAISS = "jourdenaissance";
public static final String CHAMP_MOISDENAISS = "moisdenaissance";
public static final String CHAMP_ANNEEDENAISS = "anneedenaissance";
public static final String CHAMP_DATEDENAISS = "datedenaissance";
public static final int TAILLE_TAMPON = 10240; // 10 ko
public static final String CHEMIN = "E:\\Bibliothèque logicielle\\workspace\\projetForum\\WebContent\\images\\bd\\";
private String resultat;
private static Map<String, String> erreurs = new HashMap<String, String>();
public String getResultat() {
return resultat;
}
public Map<String, String> getErreurs() {
return erreurs;
}
public Utilisateur inscrireUtilisateur(HttpServletRequest request) {
String email = getValeurChamp(request, CHAMP_EMAIL);
String motDePasse = getValeurChamp(request, CHAMP_PASS);
String confirmation = getValeurChamp(request, CHAMP_CONF);
String nom = getValeurChamp(request, CHAMP_NOM);
String description = getValeurChamp(request, CHAMP_DESC);
String localisation = getValeurChamp(request, CHAMP_LOC);
String siteweb = getValeurChamp(request, CHAMP_SW);
String image = getValeurChamp(request, CHAMP_IMAGE);
String jourdenaissance = getValeurChamp(request, CHAMP_JOURDENAISS);
String moisdenaissance = getValeurChamp(request, CHAMP_MOISDENAISS);
String anneedenaissance = getValeurChamp(request, CHAMP_ANNEEDENAISS);
Integer datedenaissance = null;
try {
validationEmail(email);
} catch (Exception e) {
setErreur(CHAMP_EMAIL, e.getMessage());
}
try {
validationMotsDePasse(motDePasse, confirmation);
} catch (Exception e) {
setErreur(CHAMP_PASS, e.getMessage());
}
try {
validationNom(nom);
} catch (Exception e) {
setErreur(CHAMP_NOM, e.getMessage());
}
try {
image = validationImage(request, CHEMIN);
} catch (Exception e) {
setErreur(CHAMP_IMAGE, e.getMessage());
}
if (!jourdenaissance.equals("defaut")
&& !moisdenaissance.equals("defaut")
&& !anneedenaissance.equals("defaut")) {
try {
validationDateDeNaiss(Integer.parseInt(jourdenaissance),
Integer.parseInt(moisdenaissance),
Integer.parseInt(anneedenaissance));
} catch (Exception e) {
setErreur(CHAMP_DATEDENAISS, e.getMessage());
}
datedenaissance = Integer.parseInt((jourdenaissance + ""
+ moisdenaissance + "" + anneedenaissance));
}
if (jourdenaissance.equals("defaut")
&& moisdenaissance.equals("defaut")
&& anneedenaissance.equals("defaut")) {
} else {
setErreur(CHAMP_DATEDENAISS,
"Merci de vérifier votre date de naissance.");
}
Utilisateur utilisateur = new Utilisateur(email, motDePasse, nom,
localisation, siteweb, description, datedenaissance, image);
if (erreurs.isEmpty()) {
resultat = "Succès de l'inscription.";
createORupdate(utilisateur, request);
} else {
resultat = "Échec de l'inscription.";
}
return utilisateur;
}
private String validationImage(HttpServletRequest request, String chemin)
throws Exception {
File uploadFilePath = new File(chemin);
// Validate file.
Object fileObject = request.getAttribute("avatar");
if (fileObject == null) {
// No file uploaded.
throw new Exception("Please select file to upload.");
} else if (fileObject instanceof FileUploadException) {
// File upload is failed.
FileUploadException fileUploadException = (FileUploadException) fileObject;
throw new Exception(fileUploadException.getMessage());
}
// If there are no errors, proceed with writing file.
FileItem fileItem = (FileItem) fileObject;
// Get file name from uploaded file and trim path from it.
// Some browsers (e.g. IE, Opera) also sends the path, which is
// completely irrelevant.
String fileName = FilenameUtils.getName(fileItem.getName());
// Prepare filename prefix and suffix for an unique filename in upload
// folder.
String prefix = FilenameUtils.getBaseName(fileName) + "_";
String suffix = "." + FilenameUtils.getExtension(fileName);
File file = null;
try {
// Prepare unique local file based on file name of uploaded file.
file = File.createTempFile(prefix, suffix, uploadFilePath);
// Write uploaded file to local file.
fileItem.write(file);
} catch (Exception e) {
// Can be thrown by uniqueFile() and FileItem#write().
throw new Exception(e.getMessage());
}
return file.getName();
}
private void setErreur(String champ, String message) {
erreurs.put(champ, message);
}
private static String getValeurChamp(HttpServletRequest request,
String nomChamp) {
String valeur = request.getParameter(nomChamp);
if (valeur == null || valeur.trim().length() == 0) {
return null;
} else {
return valeur;
}
}
private static void createORupdate(Utilisateur u, HttpServletRequest request) {
Session s = HibernateUtils.getSession();
Transaction tx = s.beginTransaction();
Query q = s
.createQuery("from Utilisateur where Utilisateur_email = :email");
q.setString("email", u.getEmail());
Utilisateur userUpdt = (Utilisateur) q.uniqueResult();
if (userUpdt != null) {
userUpdt.setNom(u.getNom());
userUpdt.setEmail(u.getEmail());
userUpdt.setSiteweb(u.getSiteweb());
userUpdt.setLocalisation(u.getLocalisation());
userUpdt.setDescription(u.getDescription());
s.update(userUpdt);
} else {
SimpleDateFormat formater = new SimpleDateFormat(
"dd-MM-yyyy hh:mm:ss");
Date aujourdhui = new Date();
u.setDateInscrit(formater.format(aujourdhui));
s.persist(u);
}
tx.commit();
}
private void validationEmail(String email) throws Exception {
UtilisateurDAO<Utilisateur, String> ud = new UtilisateurDAO<Utilisateur, String>();
if (ud.findByID(email) != null)
throw new Exception("Adresse mail déjà utilisée.");
else if (email == null || ud.findByID(email) != null
|| !email.matches("([^.#]+)(\\.[^.#]+)*#([^.#]+\\.)+([^.#]+)")) {
throw new Exception("Merci de saisir une adresse mail valide.");
}
}
private void validationDateDeNaiss(Integer jj, Integer mm, Integer aaaa)
throws Exception {
switch (mm) {
case 2:
if (jj > 28 && ((aaaa / 4) % 100 == 0 && aaaa % 400 == 0))
throw new Exception(
"Merci de vérifier votre date de naissance.");
break;
case 4:
if (jj == 31)
throw new Exception(
"Merci de vérifier votre date de naissance.");
break;
case 6:
if (jj == 31)
throw new Exception(
"Merci de vérifier votre date de naissance.");
break;
case 9:
if (jj == 31)
throw new Exception(
"Merci de vérifier votre date de naissance.");
break;
case 11:
if (jj == 31)
throw new Exception(
"Merci de vérifier votre date de naissance.");
break;
}
}
private void validationMotsDePasse(String motDePasse, String confirmation)
throws Exception {
if (motDePasse != null && confirmation != null) {
if (!motDePasse.equals(confirmation)) {
throw new Exception(
"Les mots de passe entrés sont différents, merci de les saisir à nouveau.");
} else if (motDePasse.length() < 6) {
throw new Exception(
"Les mots de passe doivent contenir au moins 6 caractères.");
}
} else {
throw new Exception(
"Merci de saisir et confirmer votre mot de passe.");
}
}
private static void validationNom(String nom) throws Exception {
ConfigFDAO<ConfigF, Integer> cfd = new ConfigFDAO<ConfigF, Integer>();
UtilisateurDAO<Utilisateur, String> ud = new UtilisateurDAO<Utilisateur, String>();
if (ud.findByNom(nom) != null)
throw new Exception("Nom d'utilisateur déjà utilisée.");
else if (nom == null
|| nom.length() < cfd.findByID(0).getPseudominsize()
|| nom.length() > cfd.findByID(0).getPseudomaxsize()) {
throw new Exception("Le nom d'utilisateur doit contenir au moins "
+ cfd.findByID(0).getPseudominsize() + " et au maximum "
+ cfd.findByID(0).getPseudomaxsize() + " caractères.");
}
}
private static String getNomFichier(Part part) {
for (String cd : part.getHeader("content-disposition").split(";")) {
if (cd.trim().startsWith("filename")) {
String filename = cd.substring(cd.indexOf('=') + 1).trim()
.replace("\"", "");
return filename.substring(filename.lastIndexOf('/') + 1)
.substring(filename.lastIndexOf('\\') + 1);
}
}
return null;
}
}
MultipartFilter.java
public class MultipartFilter implements Filter {
// Init
// ---------------------------------------------------------------------------------------
private long maxFileSize;
// Actions
// ------------------------------------------------------------------------------------
/**
* Configure the 'maxFileSize' parameter.
*
* #throws ServletException
* If 'maxFileSize' parameter value is not numeric.
* #see javax.servlet.Filter#init(javax.servlet.FilterConfig)
*/
public void init(FilterConfig filterConfig) throws ServletException {
// Configure maxFileSize.
String maxFileSize = filterConfig.getInitParameter("maxFileSize");
if (maxFileSize != null) {
if (!maxFileSize.matches("^\\d+$")) {
throw new ServletException(
"MultipartFilter 'maxFileSize' is not numeric.");
}
this.maxFileSize = Long.parseLong(maxFileSize);
}
}
/**
* Check the type request and if it is a HttpServletRequest, then parse the
* request.
*
* #throws ServletException
* If parsing of the given HttpServletRequest fails.
* #see javax.servlet.Filter#doFilter(javax.servlet.ServletRequest,
* javax.servlet.ServletResponse, javax.servlet.FilterChain)
*/
public void doFilter(ServletRequest request, ServletResponse response,
FilterChain chain) throws ServletException, IOException {
// Check type request.
if (request instanceof HttpServletRequest) {
// Cast back to HttpServletRequest.
HttpServletRequest httpRequest = (HttpServletRequest) request;
// Parse HttpServletRequest.
HttpServletRequest parsedRequest = parseRequest(httpRequest);
// Continue with filter chain.
chain.doFilter(parsedRequest, response);
} else {
// Not a HttpServletRequest.
chain.doFilter(request, response);
}
}
/**
* #see javax.servlet.Filter#destroy()
*/
public void destroy() {
// I am a boring method.
}
// Helpers
// ------------------------------------------------------------------------------------
/**
* Parse the given HttpServletRequest. If the request is a multipart
* request, then all multipart request items will be processed, else the
* request will be returned unchanged. During the processing of all
* multipart request items, the name and value of each regular form field
* will be added to the parameterMap of the HttpServletRequest. The name and
* File object of each form file field will be added as attribute of the
* given HttpServletRequest. If a FileUploadException has occurred when the
* file size has exceeded the maximum file size, then the
* FileUploadException will be added as attribute value instead of the
* FileItem object.
*
* #param request
* The HttpServletRequest to be checked and parsed as multipart
* request.
* #return The parsed HttpServletRequest.
* #throws ServletException
* If parsing of the given HttpServletRequest fails.
*/
#SuppressWarnings("unchecked")
// ServletFileUpload#parseRequest() does not return generic type.
private HttpServletRequest parseRequest(HttpServletRequest request)
throws ServletException {
// Check if the request is actually a multipart/form-data request.
if (!ServletFileUpload.isMultipartContent(request)) {
// If not, then return the request unchanged.
return request;
}
// Prepare the multipart request items.
// I'd rather call the "FileItem" class "MultipartItem" instead or so.
// What a stupid name ;)
List<FileItem> multipartItems = null;
try {
// Parse the multipart request items.
multipartItems = new ServletFileUpload(new DiskFileItemFactory())
.parseRequest(request);
// Note: we could use ServletFileUpload#setFileSizeMax() here, but
// that would throw a
// FileUploadException immediately without processing the other
// fields. So we're
// checking the file size only if the items are already parsed. See
// processFileField().
} catch (FileUploadException e) {
throw new ServletException("Cannot parse multipart request: "
+ e.getMessage());
}
// Prepare the request parameter map.
Map<String, String[]> parameterMap = new HashMap<String, String[]>();
// Loop through multipart request items.
for (FileItem multipartItem : multipartItems) {
if (multipartItem.isFormField()) {
// Process regular form field (input
// type="text|radio|checkbox|etc", select, etc).
processFormField(multipartItem, parameterMap);
} else {
// Process form file field (input type="file").
processFileField(multipartItem, request);
}
}
// Wrap the request with the parameter map which we just created and
// return it.
return wrapRequest(request, parameterMap);
}
/**
* Process multipart request item as regular form field. The name and value
* of each regular form field will be added to the given parameterMap.
*
* #param formField
* The form field to be processed.
* #param parameterMap
* The parameterMap to be used for the HttpServletRequest.
*/
private void processFormField(FileItem formField,
Map<String, String[]> parameterMap) {
String name = formField.getFieldName();
String value = formField.getString();
String[] values = parameterMap.get(name);
if (values == null) {
// Not in parameter map yet, so add as new value.
parameterMap.put(name, new String[] { value });
} else {
// Multiple field values, so add new value to existing array.
int length = values.length;
String[] newValues = new String[length + 1];
System.arraycopy(values, 0, newValues, 0, length);
newValues[length] = value;
parameterMap.put(name, newValues);
}
}
/**
* Process multipart request item as file field. The name and FileItem
* object of each file field will be added as attribute of the given
* HttpServletRequest. If a FileUploadException has occurred when the file
* size has exceeded the maximum file size, then the FileUploadException
* will be added as attribute value instead of the FileItem object.
*
* #param fileField
* The file field to be processed.
* #param request
* The involved HttpServletRequest.
*/
private void processFileField(FileItem fileField, HttpServletRequest request) {
if (fileField.getName().length() <= 0) {
// No file uploaded.
request.setAttribute(fileField.getFieldName(), null);
} else if (maxFileSize > 0 && fileField.getSize() > maxFileSize) {
// File size exceeds maximum file size.
request.setAttribute(fileField.getFieldName(),
new FileUploadException(
"File size exceeds maximum file size of "
+ maxFileSize + " bytes."));
// Immediately delete temporary file to free up memory and/or disk
// space.
fileField.delete();
} else {
// File uploaded with good size.
request.setAttribute(fileField.getFieldName(), fileField);
}
}
// Utility (may be refactored to public utility class)
// ----------------------------------------
/**
* Wrap the given HttpServletRequest with the given parameterMap.
*
* #param request
* The HttpServletRequest of which the given parameterMap have to
* be wrapped in.
* #param parameterMap
* The parameterMap to be wrapped in the given
* HttpServletRequest.
* #return The HttpServletRequest with the parameterMap wrapped in.
*/
private static HttpServletRequest wrapRequest(HttpServletRequest request,
final Map<String, String[]> parameterMap) {
return new HttpServletRequestWrapper(request) {
public Map<String, String[]> getParameterMap() {
return parameterMap;
}
public String[] getParameterValues(String name) {
return parameterMap.get(name);
}
public String getParameter(String name) {
String[] params = getParameterValues(name);
return params != null && params.length > 0 ? params[0] : null;
}
public Enumeration<String> getParameterNames() {
return Collections.enumeration(parameterMap.keySet());
}
};
}
}
Thanks in advance.

GWT-RPC method returns empty list on success

I am creating a webpage having CellTable.I need to feed this table with data from hbase table.
I have written a method to retrieve data from hbase table and tested it.
But when I call that method as GWT asynchronous RPC method then rpc call succeeds but it returns nothing.In my case it returns empty list.The alert box show list's size as 0.
Following is the related code.
Please help.
greetingService.getDeviceIDData(new AsyncCallback<List<DeviceDriverBean>>(){
public void onFailure(Throwable caught) {
// Show the RPC error message to the user
System.out.println("RPC Call failed");
Window.alert("Data : RPC call failed");
}
public void onSuccess(List<DeviceDriverBean> result) {
//on success do something
Window.alert("Data : RPC call successful");
//deviceDataList.addAll(result);
Window.alert("Result size: " +result.size());
// Add a text column to show the driver name.
TextColumn<DeviceDriverBean> nameColumn = new TextColumn<DeviceDriverBean>() {
#Override
public String getValue(DeviceDriverBean object) {
Window.alert(object.getName());
return object.getName();
}
};
table.addColumn(nameColumn, "Name");
// Add a text column to show the device id
TextColumn<DeviceDriverBean> deviceidColumn = new TextColumn<DeviceDriverBean>() {
#Override
public String getValue(DeviceDriverBean object) {
return object.getDeviceId();
}
};
table.addColumn(deviceidColumn, "Device ID");
table.setRowCount(result.size(), true);
// more code here to add columns in celltable
// Push the data into the widget.
table.setRowData(0, result);
SimplePager pager = new SimplePager();
pager.setDisplay(table);
VerticalPanel vp = new VerticalPanel();
vp.add(table);
vp.add(pager);
// Add it to the root panel.
RootPanel.get("datagridContainer").add(vp);
}
});
Code to retrieve data from hbase (server side code)
public List<DeviceDriverBean> getDeviceIDData()
throws IllegalArgumentException {
List<DeviceDriverBean> deviceidList = new ArrayList<DeviceDriverBean>();
// Escape data from the client to avoid cross-site script
// vulnerabilities.
/*
* input = escapeHtml(input); userAgent = escapeHtml(userAgent);
*
* return "Hello, " + input + "!<br><br>I am running " + serverInfo +
* ".<br><br>It looks like you are using:<br>" + userAgent;
*/
try {
Configuration config = HbaseConnectionSingleton.getInstance()
.HbaseConnect();
HTable testTable = new HTable(config, "driver_details");
byte[] family = Bytes.toBytes("details");
Scan scan = new Scan();
int cnt = 0;
ResultScanner rs = testTable.getScanner(scan);
for (Result r = rs.next(); r != null; r = rs.next()) {
DeviceDriverBean deviceDriverBean = new DeviceDriverBean();
byte[] rowid = r.getRow(); // Category, Date, Sentiment
NavigableMap<byte[], byte[]> map = r.getFamilyMap(family);
Iterator<Entry<byte[], byte[]>> itrt = map.entrySet()
.iterator();
deviceDriverBean.setDeviceId(Bytes.toString(rowid));
while (itrt.hasNext()) {
Entry<byte[], byte[]> entry = itrt.next();
//cnt++;
//System.out.println("Count : " + cnt);
byte[] qual = entry.getKey();
byte[] val = entry.getValue();
if (Bytes.toString(qual).equalsIgnoreCase("account_number")) {
deviceDriverBean.setAccountNo(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("make")) {
deviceDriverBean.setMake(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("model")) {
deviceDriverBean.setModel(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("driver_name")) {
deviceDriverBean.setName(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("premium")) {
deviceDriverBean.setPremium(Bytes.toString(val));
} else if (Bytes.toString(qual).equalsIgnoreCase("year")) {
deviceDriverBean.setYear(Bytes.toString(val));
} else {
System.out.println("No match found");
}
/*
* System.out.println(Bytes.toString(rowid) + " " +
* Bytes.toString(qual) + " " + Bytes.toString(val));
*/
}
deviceidList.add(deviceDriverBean);
}
}
catch (IOException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
catch (Exception e) {
// System.out.println("Message: "+e.getMessage());
e.printStackTrace();
}
return deviceidList;
}
Could this be lazy fetching on the server side by hbase. This means if you return the list hbase won't get a trigger to actually read the list and you will simple get an empty list. I don't know a correct solution, in the past I've seen a similar problem on GAE. This could by solved by simply asking the size of the list just before returning it to the client.
I don't have the exact answer, but I have an advise. In similar situation I put my own trace to check every step in my program.
On the server side before return put : System.out.println("size of table="+deviceidList.size());
You can put this trace in the loop for deviceidList;

Backup a database using JPA (Design)

I have a working code that doesn't work always. Here's my approach:
Creating the backup
Create Entity Manager for source database
Create Entity Manager for destination database (embedded Derby Database)
Copy entities (Select all entries of a table (table order hard coded right now) and copy them to the destination database. Basically a select all and the detach entity from source and persist on destination)
Zip the embedded Derby database.
Loading from backup
Unzip backup
Perform a backup
Clean destination database (delete all tables)
Copy entities
At some point I would use JPA 2 Metadata to fetch the tables to be copied and select the order they need to be copied (due to constraints).
For some reason this approach doesn't work always as I see "lost" entries that are not recovered.
Here's the code:
package com.bluecubs.xinco.core.server;
import java.io.File;
import java.io.FileInputStream;
import java.io.FileOutputStream;
import java.io.IOException;
import java.sql.DriverManager;
import java.sql.SQLException;
import java.text.SimpleDateFormat;
import java.util.Collection;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.List;
import java.util.ArrayList;
import java.util.logging.Level;
import java.util.logging.Logger;
import java.util.zip.ZipEntry;
import java.util.zip.ZipInputStream;
import java.util.zip.ZipOutputStream;
import javax.persistence.EntityManager;
import javax.persistence.EntityManagerFactory;
import javax.persistence.Persistence;
import org.apache.commons.io.FileUtils;
import org.apache.commons.io.filefilter.IOFileFilter;
import org.apache.commons.io.filefilter.TrueFileFilter;
/**
* This is a complex task and is heavily dependant on the architecture
* of the database.
*
* Data needs to be stored in a particular order into the database to comply
* with database constraints. This order can be observed in a dump file or
* create script like the ones generated from MySQL Workbench. Using that
* should be enough. In case that tool is not available basically the logic is
* populating tables from the outside inwards. From the tables with no relationships
* or only one working to the more complex ones. As summary before a table is populated all
* the related tables should be populated already (if we have identifying relationships.
*
* #author Javier A. Ortiz Bultrón <javier.ortiz.78#gmail.com>
*/
public class XincoBackupManager {
private static XincoBackupManager instance;
private static EntityManagerFactory liveEMF;
private static EntityManagerFactory backupEMF;
private static EntityManager live, backup;
private static final ArrayList<String> tables = new ArrayList<String>();
private static XincoBackupFile last;
private static String backupPath;
public static HashMap<String, Integer> stats = new HashMap<String, Integer>();
static {
//Non-order-critical tables
tables.add("XincoCoreAceT");
tables.add("XincoCoreDataT");
tables.add("XincoCoreDataTypeAttributeT");
tables.add("XincoCoreGroupT");
tables.add("XincoCoreLanguageT");
tables.add("XincoCoreNodeT");
tables.add("XincoCoreUserHasXincoCoreGroupT");
tables.add("XincoCoreUserT");
tables.add("XincoSettingT");
tables.add("XincoDependencyTypeT");
tables.add("XincoCoreDataHasDependencyT");
tables.add("XincoSetting");
tables.add("XincoId");
//Order critical tables
tables.add("XincoCoreLanguage");
tables.add("XincoCoreNode");
tables.add("XincoCoreDataType");
tables.add("XincoCoreData");
tables.add("XincoDependencyType");
tables.add("XincoCoreDataHasDependency");
tables.add("XincoCoreUser");
tables.add("XincoCoreUserModifiedRecord");
tables.add("XincoCoreGroup");
tables.add("XincoCoreAce");
tables.add("XincoCoreUserHasXincoCoreGroup");
tables.add("XincoAddAttribute");
tables.add("XincoCoreDataTypeAttribute");
tables.add("XincoCoreLog");
}
public static XincoBackupManager get() {
if (instance == null) {
instance = new XincoBackupManager();
}
return instance;
}
private static void setDBSystemDir(String systemDir) {
// Set the db system directory.
System.setProperty("derby.system.home", systemDir);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Derby home set at: {0}", systemDir);
try {
//Start the embeded DB
Class.forName("org.apache.derby.jdbc.EmbeddedDriver").newInstance();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (IllegalAccessException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
private static void initConnections() {
try {
liveEMF = XincoDBManager.getEntityManagerFactory();
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
try {
backupEMF = Persistence.createEntityManagerFactory("XincoBackup");
} catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
protected static boolean backup() throws XincoException {
try {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Initializing connections...");
initConnections();
stats.clear();
backupPath = XincoSettingServer.getSetting("setting.backup.path").getString_value();
//We need to make sure that there's no one in the database
XincoDBManager.setLocked(true);
live = liveEMF.createEntityManager();
//Prepare the backup repository. Create dirs if needed.
File backupDir = new File(backupPath);
backupDir.mkdirs();
//Create folder for this backup
SimpleDateFormat format = new SimpleDateFormat("MM-dd-yyyy");
File backupNewDir = new File(backupPath + System.getProperty("file.separator")
+ format.format(new Date()));
backupNewDir.mkdirs();
/*
* Make sure there's no derby database stuff in the folder.
* Any previous interrupted backup might left corrupted database files.
*/
File tempDir = new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "xinco");
if (tempDir.exists()) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.WARNING,
"Deleting potentially corrupted database files at: {0}", tempDir);
FileUtils.deleteDirectory(tempDir);
//Delete Derby log file
FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "derby.log"));
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Done!");
}
/**
* Prepare system to use derby
*/
setDBSystemDir(backupNewDir.getAbsolutePath());
backup = backupEMF.createEntityManager();
for (String s : tables) {
copyEntities(s, live, backup);
}
/**
* At this point we should have a <Backup Database name> folder in
* <Backup Path>/<Date>.
* Lets zip them for storage.
*/
format = new SimpleDateFormat("MM dd yyyy hh-mm-ss");
zipBackupFiles(backupNewDir, backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "Xinco Backup " + format.format(new Date()));
//Stop Derby database in order to delete
try {
DriverManager.getConnection("jdbc:derby:;shutdown=true");
} catch (SQLException e) {
//When the database shuts down it'll throw an exception
}
//Delete backed up files
String dbName = (String) backup.getProperties().get("javax.persistence.jdbc.url");
dbName = dbName.substring(dbName.lastIndexOf(":") + 1, dbName.indexOf(";"));
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Deleting temp folder: {0}", dbName);
FileUtils.deleteDirectory(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + dbName));
//Delete Derby log file
FileUtils.forceDelete(new File(backupNewDir.getAbsolutePath()
+ System.getProperty("file.separator") + "derby.log"));
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
XincoDBManager.setLocked(false);
return false;
} catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
XincoDBManager.setLocked(false);
return false;
} finally {
if (live != null && live.isOpen()) {
live.close();
}
if (backup != null && backup.isOpen()) {
backup.close();
}
if (backupEMF != null && backupEMF.isOpen()) {
backupEMF.close();
}
}
XincoDBManager.setLocked(false);
return true;
}
private static void zipBackupFiles(File path, String zipName) throws XincoException {
if (!zipName.endsWith(".zip")) {
zipName += ".zip";
}
// These are the files to include in the ZIP file
IOFileFilter filter = new IOFileFilter() {
#Override
public boolean accept(File file) {
if (file.isDirectory()) {
return true;
}
//Ignore other backup files
if (file.isFile() && !file.getName().endsWith(".zip")) {
return true;
}
return false;
}
#Override
public boolean accept(File file, String string) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
#SuppressWarnings("unchecked")
Collection<File> fileList = FileUtils.listFiles(path, filter, TrueFileFilter.INSTANCE);
Object[] files = fileList.toArray();
// Create a buffer for reading the files
byte[] buf = new byte[1024];
try {
// Create the ZIP file
ZipOutputStream out = new ZipOutputStream(new FileOutputStream(zipName));
// Compress the files
for (int i = 0; i < files.length; i++) {
FileInputStream in = new FileInputStream((File) files[i]);
String fileName = ((File) files[i]).getPath();
//Remove not needed folders
fileName = fileName.substring(fileName.indexOf(path.getAbsolutePath()) + path.getAbsolutePath().length() + 1);
// Add ZIP entry to output stream.
out.putNextEntry(new ZipEntry(fileName));
// Transfer bytes from the file to the ZIP file
int len;
while ((len = in.read(buf)) > 0) {
out.write(buf, 0, len);
}
// Complete the entry
out.closeEntry();
in.close();
last = new XincoBackupFile(new File(zipName));
}
// Complete the ZIP file
out.close();
} catch (IOException e) {
throw new XincoException("Error zipping backup: " + e.getLocalizedMessage());
}
}
private static void copyEntities(String table, EntityManager source, EntityManager dest) {
List<Object> result, result2;
result = source.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Copying from table: {0}", table);
int i = 0;
source.clear();
for (Object o : result) {
i++;
Class<?> persistenceClass = null;
try {
persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
dest.getTransaction().begin();
if (dest.contains(persistenceClass.cast(o))) {
//If no exception do a merge because it exists already
dest.merge(persistenceClass.cast(o));
} else {
dest.persist(persistenceClass.cast(o));
}
dest.getTransaction().commit();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
throw new XincoException("No persistence enitiy defined for table: " + table);
}catch (Exception ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
throw new XincoException("Exception copying: " + o);
}
}
stats.put(table, i);
result2 = dest.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.INFO,
"Copying for table: {0} completed! Amount of records: {1}",
new Object[]{table, i});
//Make sure the copy is accurate.
//TODO: For some reason XincoId always return twice the amount of records during this routine.
if (result2.size() != result.size() && !table.equals("XincoId")) {
throw new XincoException("Error copying records for table " + table + ". Got " + result2.size() + " instead of " + result.size());
}
result2.clear();
}
#SuppressWarnings({"unchecked"})
public static ArrayList<XincoBackupFile> getBackupFiles() throws XincoException {
// These are the files to include in the ZIP file
IOFileFilter filter = new IOFileFilter() {
#Override
public boolean accept(File file) {
//Only zip files
if (file.isFile() && file.getName().endsWith(".zip")
&& file.getName().startsWith("Xinco Backup")) {
return true;
}
return false;
}
#Override
public boolean accept(File file, String string) {
throw new UnsupportedOperationException("Not supported yet.");
}
};
Collection<File> files = FileUtils.listFiles(
new File(backupPath), filter, TrueFileFilter.INSTANCE);
ArrayList<XincoBackupFile> backupFiles = new ArrayList<XincoBackupFile>();
for (File f : files) {
backupFiles.add(new XincoBackupFile(f));
}
//Sort
Collections.sort(backupFiles, new XincoBackupComparator());
//Sorted from oldest to newer so we need to invert the list.
Collections.reverse(backupFiles);
return backupFiles;
}
protected static boolean restoreFromBackup(XincoBackupFile backupFile) throws XincoException {
try {
stats.clear();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Restoring database from: {0}", backupFile.getName());
//First make a backup of current database just in case
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Creating a restore point for your current database...");
backup();
//We need to make sure that there's no one in the database
XincoDBManager.setLocked(true);
//Load database from the provided backup
loadDatabaseFromBackup(backupFile);
XincoDBManager.setLocked(false);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Restore complete!");
try {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Deleting restore point...");
FileUtils.forceDelete(last);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Done!");
} catch (IOException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
return true;
} catch (XincoException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
//Recover from last backup
loadDatabaseFromBackup(getLast());
XincoDBManager.setLocked(false);
throw new XincoException("Unable to load backup! Database reverted to original state. \n" + ex.getMessage());
}
}
protected static void loadDatabaseFromBackup(XincoBackupFile backupFile) throws XincoException {
EntityManager backupEM = null;
try {
initConnections();
live = liveEMF.createEntityManager();
//Unzip backup
unzipBackup(backupFile);
//Delete current database (inverse order than writing)
Collections.reverse(tables);
for (String s : tables) {
clearTable(s, live);
}
//Get back to original order
Collections.reverse(tables);
//Make derby start where the backup is
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Connecting to backup data...");
setDBSystemDir(backupPath + "Temp"
+ System.getProperty("file.separator"));
//Connect to backup database
backupEM = Persistence.createEntityManagerFactory("XincoBackup").createEntityManager();
//Start copying
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Starting loading entities...");
for (String s : tables) {
//Copy values from backup
copyEntities(s, backupEM, live);
}
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Load complete!");
//Stop Derby database in order to delete
DriverManager.getConnection("jdbc:derby:;shutdown=true");
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Delete temp folder!");
try {
FileUtils.deleteDirectory(new File(System.getProperty("derby.system.home")));
} catch (IOException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
} catch (SQLException e) {
//When the database shuts down it'll throw an exception
} finally {
if (live != null && live.isOpen()) {
live.close();
}
if (backupEM != null && backupEM.isOpen()) {
backupEM.close();
}
}
}
private static void unzipBackup(XincoBackupFile backup) {
try {
//Make sure that the temp directory is empty before unzipping
FileUtils.deleteDirectory(new File(backupPath
+ System.getProperty("file.separator") + "Temp"));
byte[] buf = new byte[1024];
ZipInputStream zipinputstream = null;
ZipEntry zipentry;
zipinputstream = new ZipInputStream(
new FileInputStream(backup.getBackupFile()));
zipentry = zipinputstream.getNextEntry();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Unzipping backup file: {0}", backup.getName());
while (zipentry != null) {
//for each entry to be extracted
String entryName = zipentry.getName();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Extracting file: {0}", entryName);
int n;
FileOutputStream fileoutputstream;
File newFile = new File(entryName);
String directory = newFile.getParent();
if (directory == null) {
if (newFile.isDirectory()) {
break;
}
}
if (entryName.contains(System.getProperty("file.separator"))) {
//Create any internal folders required
new File(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator") + entryName.substring(
0, entryName.lastIndexOf(
System.getProperty("file.separator")))).mkdirs();
} else {
File tempDir = new File(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator"));
tempDir.mkdirs();
}
fileoutputstream = new FileOutputStream(backupPath
+ System.getProperty("file.separator") + "Temp"
+ System.getProperty("file.separator") + entryName);
while ((n = zipinputstream.read(buf, 0, 1024)) > -1) {
fileoutputstream.write(buf, 0, n);
}
fileoutputstream.close();
zipinputstream.closeEntry();
zipentry = zipinputstream.getNextEntry();
}//while
zipinputstream.close();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Unzipping complete!");
} catch (Exception e) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE,
"Error unzipping file!", e);
}
}
private static void clearTable(String table, EntityManager target) throws XincoException {
try {
List<Object> result;
result = target.createNamedQuery(table + ".findAll").getResultList();
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Cleaning table: {0}", table);
int i = 0;
Class<?> serverClass = null;
boolean special = false;
try {
serverClass = Class.forName("com.bluecubs.xinco.core.server." + table + "Server");
special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
} catch (ClassNotFoundException ex) {
try {
//Class doesn't exist, try in the add folder
serverClass = Class.forName("com.bluecubs.xinco.add.server." + table + "Server");
special = serverClass.newInstance() instanceof XincoCRUDSpecialCase;
} catch (ClassNotFoundException ex1) {
} catch (InstantiationException ex1) {
} catch (NoClassDefFoundError ex1) {
}
} catch (InstantiationException ex) {
} catch (NoClassDefFoundError ex) {
}
if (serverClass != null && special) {
((XincoCRUDSpecialCase) serverClass.newInstance()).clearTable();
special = false;
} else {
for (Object o : result) {
i++;
try {
Class<?> persistenceClass = Class.forName("com.bluecubs.xinco.core.server.persistence." + table);
target.getTransaction().begin();
target.remove(persistenceClass.cast(o));
target.getTransaction().commit();
} catch (ClassNotFoundException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
}
result = target.createNamedQuery(table + ".findAll").getResultList();
if (!result.isEmpty()) {
throw new IllegalStateException("Unable to delete entities: " + result.size());
}
stats.put(table, i);
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.FINEST,
"Cleaning table: {0} completed! Amount of records removed: {1}", new Object[]{table, i});
} catch (IllegalAccessException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
} catch (InstantiationException ex) {
Logger.getLogger(XincoBackupManager.class.getSimpleName()).log(Level.SEVERE, null, ex);
}
}
/**
* #return the last
*/
public static XincoBackupFile getLast() {
return last;
}
}
Any flaw in the design?
A better way of doing it?
Any comment is more than welcomed!
Any flaw in the design? A better way of doing it? Any comment is more than welcomed!
Most database engines provides commands or tooling allowing to dump the content of a given database (some of them even supporting incremental backups). JPA will just be less efficient, more complex while you have ready to use solutions so I don't see the point of using JPA for this task.
For Derby, there is actually nothing to do: just zip/tar (or use rsync) the database files and you're done.
And if you want to copy the content of one database engine to another engine, use an ETL.
See also
How i can Dump a derby database into an sql file?
Always better done in the datastore. Some JPA providers provide ways. The one we provide is
http://www.datanucleus.org/products/accessplatform/jpa/replication.html
An interesting option I've found is Scriptella which can be called from Java code. Usage examples. I'll give it a try and post the results.