MDB Listener doesn't listen to hornetq - jboss

I configured standalone hornetq and started it. Created one sender class and one MDB receiver class.
When i register my sender class with consumer class using the method,
messageConsumer.setMessageListener(listener) in the sender class itself, it works fine.
But when i deploy my MDB Receiver(.war file !) in the jboss application server, it does not listen to the queue messages.
Sender Class:
**package com.mdas.sender;
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.JMSException;
import javax.jms.MessageConsumer;
import javax.jms.MessageProducer;
import javax.jms.ObjectMessage;
import javax.jms.Queue;
import javax.jms.Session;
import javax.naming.InitialContext;
import javax.naming.NamingException;
import org.apache.log4j.Logger;
import com.mdas.receiver.TestQueueListnerMDB;
import com.mdas.vo.JmsVO;
public class JMSSender {
static Logger logger = Logger.getLogger(JMSSender.class);
// *************** Connection Factory JNDI name *************************
public String connectionFactory;
// *************** Queue JNDI name *************************
public String queueName;
protected ConnectionFactory tconFactory;
protected Connection tcon;
protected Session session;
protected MessageProducer producer;
protected Queue queue;
protected MessageConsumer messageConsumer;
protected TestQueueListnerMDB listener;
protected InitialContext ic;
public JMSSender(InitialContext ic, String connectionFactory, String queueName){
this.ic = ic;
this.connectionFactory = connectionFactory;
this.queueName = queueName;
}
public void sendJms(JmsVO jmsVO) throws Exception
{
System.out.println("Message put in jms destination");
ObjectMessage objectMessage = session.createObjectMessage();
objectMessage.setObject(jmsVO);
producer.send(objectMessage);
}
public void init() throws NamingException,
JMSException
{
System.out.println("0");
tconFactory = (ConnectionFactory) ic.lookup(connectionFactory);
System.out.println("1");
tcon = tconFactory.createConnection();
System.out.println("2");
session = tcon.createSession(false, Session.CLIENT_ACKNOWLEDGE);
System.out.println("3");
queue = (Queue) ic.lookup(queueName);
System.out.println("4");
producer = session.createProducer(queue);
System.out.println("5");
/*messageConsumer = session.createConsumer(queue);
listener = new TestQueueListnerMDB();
messageConsumer.setMessageListener(listener);*/
tcon.start();
}
public void closeQueueConnections(){
System.out.println("<<<< start closeQueueConnections >>>>>");
try {
producer.close();
//messageConsumer.close();
session.close();
tcon.close();
System.out.println("<<<< end closeQueueConnections successfully >>>>>");
} catch (Exception e) {
logger.error("Error in closeQueueConnections()", e);
}
}
}**
Receiver Class:
package com.mdas.receiver;
import javax.ejb.ActivationConfigProperty;
import javax.ejb.EJBException;
import javax.ejb.MessageDriven;
import javax.ejb.MessageDrivenBean;
import javax.ejb.MessageDrivenContext;
import javax.ejb.TransactionAttribute;
import javax.ejb.TransactionAttributeType;
import javax.ejb.TransactionManagement;
import javax.ejb.TransactionManagementType;
import javax.jms.Message;
import javax.jms.MessageListener;
import javax.jms.ObjectMessage;
import org.apache.log4j.Logger;
import org.jboss.ejb3.annotation.ResourceAdapter;
import com.mdas.vo.JmsVO;
#MessageDriven(
activationConfig = { #ActivationConfigProperty(propertyName = "destinationType", propertyValue = "javax.jms.Queue"),
#ActivationConfigProperty(propertyName = "destination", propertyValue = "queue/TestQueue"),
#ActivationConfigProperty(propertyName = "acknowledgeMode", propertyValue = "Auto-acknowledge"),
#ActivationConfigProperty(propertyName = "maxSession", propertyValue = "100"),
#ActivationConfigProperty(propertyName = "hostName", propertyValue = "localhost"),
#ActivationConfigProperty(propertyName = "port", propertyValue = "5455")
})
#TransactionManagement(value = TransactionManagementType.CONTAINER)
#TransactionAttribute(value = TransactionAttributeType.NOT_SUPPORTED)
#ResourceAdapter("hornetq-ra.rar")
public class TestQueueListnerMDB implements MessageListener, MessageDrivenBean{
private static final long serialVersionUID = 1L;
static Logger logger = Logger.getLogger(TestQueueListnerMDB.class);
public TestQueueListnerMDB() {
logger.info("Snmp MDB Created :: " + this);
System.out.println("Snmp MDB Created :: " + this);
}
public void onMessage(Message message) {
try
{
System.out.println("Entered in onMessage::: ");
logger.info("Trap Received In Processor ::: ");
ObjectMessage objectMessage = (ObjectMessage)message;
JmsVO received = (JmsVO)objectMessage.getObject();
System.out.println(received.getText());
message.acknowledge();
}
catch (Exception e)
{
logger.error("Error in receiving alarm from queue", e);
}finally{
}
}
public void ejbRemove() throws EJBException {
logger.info("QueueListnerMDB is being removed");
}
public void setMessageDrivenContext(MessageDrivenContext messageDrivenContext) throws EJBException {
}
}

Your question is not well formulated, but let me try...
To give you a proper answer I would need more information to where is your server (is it remote), and what version you are using.
Usually all you have to do is just to specify the remote server. you have some information on the HornetQ documentation:
http://docs.jboss.org/hornetq/2.2.2.Final/user-manual/en/html/appserver-integration.html#d0e8389
If you provide me more information to what's happening (errors, version) I may try to give you a better answer.

Related

Messages are not consumed when connection use JNDI or ActiveMQConnectionFactory to connect to EmbeddedActiveMQ

This is follow-up to this question.
My code can initiate connection, session etc., however messages are not consumed. I don't see any exceptions in logs.
This test reproduces the problem:
import javax.jms.Connection;
import javax.jms.ConnectionFactory;
import javax.jms.JMSException;
import javax.jms.MessageConsumer;
import javax.jms.MessageProducer;
import javax.jms.Queue;
import javax.jms.Session;
import javax.naming.Context;
import java.io.File;
import java.util.Hashtable;
import java.util.concurrent.CountDownLatch;
import java.util.concurrent.TimeUnit;
import org.apache.activemq.artemis.api.core.QueueConfiguration;
import org.apache.activemq.artemis.api.core.SimpleString;
import org.apache.activemq.artemis.core.config.Configuration;
import org.apache.activemq.artemis.core.config.impl.ConfigurationImpl;
import org.apache.activemq.artemis.core.server.embedded.EmbeddedActiveMQ;
import org.apache.activemq.artemis.core.settings.impl.AddressSettings;
import org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory;
import org.junit.After;
import org.junit.Before;
public class Test {
EmbeddedActiveMQ jmsServer;
final String QUEUE_NAME = "myQueue";
#Before
public void setUp() throws Exception {
final String baseDir = File.separator + "tmp";
final EmbeddedActiveMQ embeddedActiveMQ = new EmbeddedActiveMQ();
final Configuration config = new ConfigurationImpl();
config.setPersistenceEnabled(true);
config.setBindingsDirectory(baseDir + File.separator + "bindings");
config.setJournalDirectory(baseDir + File.separator + "journal");
config.setPagingDirectory(baseDir + File.separator + "paging");
config.setLargeMessagesDirectory(baseDir + File.separator + "largemessages");
config.setSecurityEnabled(false);
AddressSettings adr = new AddressSettings();
adr.setDeadLetterAddress(new SimpleString("DLQ"));
adr.setExpiryAddress(new SimpleString("ExpiryQueue"));
config.addAddressSetting("#", adr);
config.addAcceptorConfiguration("invmConnectionFactory", "vm://0");
embeddedActiveMQ.setConfiguration(config);
this.jmsServer = embeddedActiveMQ;
this.jmsServer.start();
System.out.println("creating queue");
final boolean isSuccess = jmsServer.getActiveMQServer().createQueue(new QueueConfiguration(QUEUE_NAME)) != null;
if(isSuccess) {
System.out.println(QUEUE_NAME + "queue created");
}
}
#After
public void tearDown() {
try {
this.jmsServer.stop();
} catch(Exception e) {
// ignore
}
}
#org.junit.Test
public void simpleTest() throws Exception {
Hashtable d = new Hashtable();
d.put(Context.INITIAL_CONTEXT_FACTORY, "org.apache.activemq.artemis.jndi.ActiveMQInitialContextFactory");
d.put("connectionFactory.invmConnectionFactory", "vm://0");
final ActiveMQInitialContextFactory activeMQInitialContextFactory = new ActiveMQInitialContextFactory();
Context initialContext = activeMQInitialContextFactory.getInitialContext(d);
ConnectionFactory connectionFactory = (ConnectionFactory) initialContext.lookup("invmConnectionFactory");
Connection connection = connectionFactory.createConnection();
Session session = connection.createSession(true, Session.SESSION_TRANSACTED);
Queue queue = session.createQueue(QUEUE_NAME);
MessageProducer producer = session.createProducer(queue);
MessageConsumer consumer = session.createConsumer(queue);
CountDownLatch latch = new CountDownLatch(1);
consumer.setMessageListener(message -> {
System.out.println("=== " + message);
try {
message.acknowledge();
session.commit();
latch.countDown();
} catch(JMSException e) {
e.printStackTrace();
}
});
connection.start();
producer.send(session.createMessage());
session.commit();
if(!latch.await(2, TimeUnit.SECONDS)) {
throw new IllegalStateException();
}
connection.close();
}
}
The problem with this code is subtle but important. When configuring the broker you're creating a queue like so:
...
final String QUEUE_NAME = "myQueue";
...
jmsServer.getActiveMQServer().createQueue(new QueueConfiguration(QUEUE_NAME))
...
This is perfectly valid in and of itself, but for this use-case involving a JMS queue it's important to note that this will result in an address named myQueue and a multicast queue named myQueue since the default routing type is MULTICAST and you didn't specify any routing type on your QueueConfiguration. This is not the kind of configuration you want for a JMS queue. You want an address and an ANYCAST queue of the same name (i.e. myQueue in this case) as noted in the documentation. Therefore, you should use:
...
import org.apache.activemq.artemis.api.core.RoutingType;
...
jmsServer.getActiveMQServer().createQueue(new QueueConfiguration(QUEUE_NAME).setRoutingType(RoutingType.ANYCAST))
When you use the multicast queue the message sent by the JMS client will not actually be routed because it is sent with the anycast routing type.
Another option would be to not create the queue explicitly at all and allow it to be auto-created.

how to verify http connection pool can improve performance

I want to use http connection pool with Spring RestTemplate, but before using it, I need to verify whether it can improve performance.
I do a little programing here:
#Configuration
public class RestTemplateConfig {
#Bean
public RestTemplate restTemplate() {
return new RestTemplate();
}
}
and test code here
#SpringBootTest
class RestnopoolApplicationTests {
String url = "https://www.baidu.com/";
// String url = "http://localhost:8080/actuator/";
#Autowired
RestTemplate restTemplate;
#Test
void contextLoads() {
}
#Test
void verify_health() {
Instant start = Instant.now();
for(int i=0; i < 100; i ++) {
restTemplate.getForObject(url, String.class);
}
Instant end = Instant.now();
Duration d = Duration.between(start,end );
System.out.println("time span " + d.getSeconds());
}
Also, I write http connection pool below
import java.security.KeyManagementException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.TimeUnit;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpResponse;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.ssl.SSLContextBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
/**
* - Supports both HTTP and HTTPS
* - Uses a connection pool to re-use connections and save overhead of creating connections.
* - Has a custom connection keep-alive strategy (to apply a default keep-alive if one isn't specified)
* - Starts an idle connection monitor to continuously clean up stale connections.
*/
#Configuration
#EnableScheduling
public class HttpClientConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(HttpClientConfig.class);
// Determines the timeout in milliseconds until a connection is established.
private static final int CONNECT_TIMEOUT = 30000;
// The timeout when requesting a connection from the connection manager.
private static final int REQUEST_TIMEOUT = 30000;
// The timeout for waiting for data
private static final int SOCKET_TIMEOUT = 60000;
private static final int MAX_TOTAL_CONNECTIONS = 50;
private static final int DEFAULT_KEEP_ALIVE_TIME_MILLIS = 20 * 1000;
private static final int CLOSE_IDLE_CONNECTION_WAIT_TIME_SECS = 30;
#Bean
public PoolingHttpClientConnectionManager poolingConnectionManager() {
SSLContextBuilder builder = new SSLContextBuilder();
try {
builder.loadTrustMaterial(null, new TrustSelfSignedStrategy());
} catch (NoSuchAlgorithmException | KeyStoreException e) {
LOGGER.error("Pooling Connection Manager Initialisation failure because of " + e.getMessage(), e);
}
SSLConnectionSocketFactory sslsf = null;
try {
sslsf = new SSLConnectionSocketFactory(builder.build());
} catch (KeyManagementException | NoSuchAlgorithmException e) {
LOGGER.error("Pooling Connection Manager Initialisation failure because of " + e.getMessage(), e);
}
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder
.<ConnectionSocketFactory>create().register("https", sslsf)
.register("http", new PlainConnectionSocketFactory())
.build();
PoolingHttpClientConnectionManager poolingConnectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
poolingConnectionManager.setMaxTotal(MAX_TOTAL_CONNECTIONS);
return poolingConnectionManager;
}
#Bean
public ConnectionKeepAliveStrategy connectionKeepAliveStrategy() {
return new ConnectionKeepAliveStrategy() {
#Override
public long getKeepAliveDuration(HttpResponse response, HttpContext context) {
HeaderElementIterator it = new BasicHeaderElementIterator
(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
HeaderElement he = it.nextElement();
String param = he.getName();
String value = he.getValue();
if (value != null && param.equalsIgnoreCase("timeout")) {
return Long.parseLong(value) * 1000;
}
}
return DEFAULT_KEEP_ALIVE_TIME_MILLIS;
}
};
}
#Bean
public CloseableHttpClient httpClient() {
RequestConfig requestConfig = RequestConfig.custom()
.setConnectionRequestTimeout(REQUEST_TIMEOUT)
.setConnectTimeout(CONNECT_TIMEOUT)
.setSocketTimeout(SOCKET_TIMEOUT).build();
return HttpClients.custom()
.setDefaultRequestConfig(requestConfig)
.setConnectionManager(poolingConnectionManager())
.setKeepAliveStrategy(connectionKeepAliveStrategy())
.build();
}
#Bean
public Runnable idleConnectionMonitor(final PoolingHttpClientConnectionManager connectionManager) {
return new Runnable() {
#Override
#Scheduled(fixedDelay = 10000)
public void run() {
try {
if (connectionManager != null) {
LOGGER.trace("run IdleConnectionMonitor - Closing expired and idle connections...");
connectionManager.closeExpiredConnections();
connectionManager.closeIdleConnections(CLOSE_IDLE_CONNECTION_WAIT_TIME_SECS, TimeUnit.SECONDS);
} else {
LOGGER.trace("run IdleConnectionMonitor - Http Client Connection manager is not initialised");
}
} catch (Exception e) {
LOGGER.error("run IdleConnectionMonitor - Exception occurred. msg={}, e={}", e.getMessage(), e);
}
}
};
}
}
and RestTemplateConfig below
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.client.HttpComponentsClientHttpRequestFactory;
import org.springframework.scheduling.TaskScheduler;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.web.client.RestTemplate;
import org.apache.http.impl.client.CloseableHttpClient;
#Configuration
public class RestTemplateConfig {
#Autowired
CloseableHttpClient httpClient;
#Bean
public RestTemplate restTemplate() {
RestTemplate restTemplate = new RestTemplate(clientHttpRequestFactory());
return restTemplate;
}
#Bean
public HttpComponentsClientHttpRequestFactory clientHttpRequestFactory() {
HttpComponentsClientHttpRequestFactory clientHttpRequestFactory = new HttpComponentsClientHttpRequestFactory();
clientHttpRequestFactory.setHttpClient(httpClient);
return clientHttpRequestFactory;
}
#Bean
public TaskScheduler taskScheduler() {
ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler();
scheduler.setThreadNamePrefix("poolScheduler");
scheduler.setPoolSize(50);
return scheduler;
}
}
The test result cannot prove that connection pool impvoe performance.
You have not used your new implementation. You are still using the default Apache client. Use your method httpClient() to get the ClosableHttpClient.
Please also note that your test is synchronous, no matter how many connections do you have in the pool, you will use it sequential. Use threads to execute the get request.

Run MyBatis migrations' 'up' command on startup of application

I have myBatis setup for my account. This by using the migrate command in the command line (in Jenkins). Now I want to integrate this with the application itself (Spring boot). Currently I have different sql files with #Undo and up sql code.
So When I start the Sping boot application I want to run the migrate up command without changing the sql files that I already have? Is this possible in MyBatis and Spring?
This is about MyBatis-Migrations, right?
Spring Boot does not provide out-of-box support, however, it seems to be possible to write a custom DatabasePopulator.
Here is a simple implementation.
It uses Migrations' Runtime Migration feature.
import java.io.FileNotFoundException;
import java.io.IOException;
import java.io.InputStream;
import java.io.Reader;
import java.math.BigDecimal;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Collection;
import java.util.List;
import java.util.Properties;
import java.util.TreeSet;
import java.util.stream.Collectors;
import javax.sql.DataSource;
import org.apache.ibatis.migration.Change;
import org.apache.ibatis.migration.DataSourceConnectionProvider;
import org.apache.ibatis.migration.MigrationException;
import org.apache.ibatis.migration.MigrationLoader;
import org.apache.ibatis.migration.MigrationReader;
import org.apache.ibatis.migration.operations.UpOperation;
import org.apache.ibatis.migration.options.DatabaseOperationOption;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.core.io.Resource;
import org.springframework.core.io.support.PathMatchingResourcePatternResolver;
import org.springframework.core.io.support.ResourcePatternResolver;
import org.springframework.jdbc.datasource.init.DataSourceInitializer;
import org.springframework.jdbc.datasource.init.DatabasePopulator;
import org.springframework.jdbc.datasource.init.ScriptException;
import org.springframework.jdbc.datasource.init.UncategorizedScriptException;
#Configuration
public class MyBatisMigrationsConfig {
private static final String scriptsDir = "scripts";
private static final String changelogTable = "changelog";
#Bean
public DataSourceInitializer dataSourceInitializer(DataSource dataSource) {
Properties properties = new Properties();
properties.setProperty("changelog", changelogTable);
DatabaseOperationOption options = new DatabaseOperationOption();
options.setChangelogTable(changelogTable);
MyBatisMigrationsPopulator populator = new MyBatisMigrationsPopulator(dataSource, scriptsDir, properties, options,
new PathMatchingResourcePatternResolver());
DataSourceInitializer dataSourceInitializer = new DataSourceInitializer();
dataSourceInitializer.setDataSource(dataSource);
dataSourceInitializer.setDatabasePopulator(populator);
return dataSourceInitializer;
}
private static class MyBatisMigrationsPopulator implements DatabasePopulator {
private final DataSource dataSource;
private final String scriptsDir;
private final Properties properties;
private final DatabaseOperationOption options;
private final ResourcePatternResolver resourcePatternResolver;
public MyBatisMigrationsPopulator(DataSource dataSource, String scriptsDir,
Properties properties, DatabaseOperationOption options, ResourcePatternResolver resourcePatternResolver) {
super();
this.dataSource = dataSource;
this.scriptsDir = scriptsDir;
this.properties = properties;
this.options = options;
this.resourcePatternResolver = resourcePatternResolver;
}
public void populate(Connection connection) throws SQLException, ScriptException {
try {
new UpOperation().operate(new DataSourceConnectionProvider(dataSource),
createMigrationsLoader(), options, System.out);
} catch (MigrationException e) {
throw new UncategorizedScriptException("Migration failed.", e.getCause());
}
}
protected MigrationLoader createMigrationsLoader() {
return new SpringMigrationLoader(resourcePatternResolver, scriptsDir, "utf-8", properties);
}
}
private static class SpringMigrationLoader implements MigrationLoader {
protected static final String BOOTSTRAP_SQL = "bootstrap.sql";
protected static final String ONABORT_SQL = "onabort.sql";
private ResourcePatternResolver resourcePatternResolver;
private String path;
private String charset;
private Properties properties;
public SpringMigrationLoader(
ResourcePatternResolver resourcePatternResolver,
String path,
String charset,
Properties properties) {
this.resourcePatternResolver = resourcePatternResolver;
this.path = path;
this.charset = charset;
this.properties = properties;
}
#Override
public List<Change> getMigrations() {
Collection<String> filenames = new TreeSet<>();
for (Resource res : getResources("/*.sql")) {
filenames.add(res.getFilename());
}
filenames.remove(BOOTSTRAP_SQL);
filenames.remove(ONABORT_SQL);
return filenames.stream()
.map(this::parseChangeFromFilename)
.collect(Collectors.toList());
}
#Override
public Reader getScriptReader(Change change, boolean undo) {
try {
return getReader(change.getFilename(), undo);
} catch (IOException e) {
throw new MigrationException("Failed to read bootstrap script.", e);
}
}
#Override
public Reader getBootstrapReader() {
try {
return getReader(BOOTSTRAP_SQL, false);
} catch (FileNotFoundException e) {
// ignore
} catch (IOException e) {
throw new MigrationException("Failed to read bootstrap script.", e);
}
return null;
}
#Override
public Reader getOnAbortReader() {
try {
return getReader(ONABORT_SQL, false);
} catch (FileNotFoundException e) {
// ignore
} catch (IOException e) {
throw new MigrationException("Failed to read onabort script.", e);
}
return null;
}
protected Resource getResource(String pattern) {
return this.resourcePatternResolver.getResource(this.path + "/" + pattern);
}
protected Resource[] getResources(String pattern) {
try {
return this.resourcePatternResolver.getResources(this.path + pattern);
} catch (IOException e) {
throw new RuntimeException(e);
}
}
protected Change parseChangeFromFilename(String filename) {
try {
String name = filename.substring(0, filename.lastIndexOf("."));
int separator = name.indexOf("_");
BigDecimal id = new BigDecimal(name.substring(0, separator));
String description = name.substring(separator + 1).replace('_', ' ');
Change change = new Change(id);
change.setFilename(filename);
change.setDescription(description);
return change;
} catch (Exception e) {
throw new MigrationException("Error parsing change from file. Cause: " + e, e);
}
}
protected Reader getReader(String fileName, boolean undo) throws IOException {
InputStream inputStream = getResource(fileName).getURL().openStream();
return new MigrationReader(inputStream, charset, undo, properties);
}
}
}
Here is an executable demo project.
You may need to modify the datasource settings in application.properties.
Hope this helps!
For Spring:
import java.io.File;
import java.net.URL;
import java.sql.Connection;
import java.sql.SQLException;
import java.util.Properties;
import javax.sql.DataSource;
import org.apache.ibatis.migration.ConnectionProvider;
import org.apache.ibatis.migration.FileMigrationLoader;
import org.apache.ibatis.migration.operations.UpOperation;
import org.apache.ibatis.migration.options.DatabaseOperationOption;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.jdbc.datasource.init.DataSourceInitializer;
import org.springframework.jdbc.datasource.init.DatabasePopulator;
import org.springframework.jdbc.datasource.init.ScriptException;
#Configuration
public class MyBatisMigrationRuntimeConfiguration {
private static final String CHANGELOG_TABLE = "changelog";
private static final String MIGRATION_SCRIPTS = "migration/scripts";
#Bean
public DataSourceInitializer dataSourceInitializer(DataSource dataSource) {
DataSourceInitializer dataSourceInitializer = new DataSourceInitializer();
dataSourceInitializer.setDataSource(dataSource);
dataSourceInitializer.setDatabasePopulator(new Populator());
return dataSourceInitializer;
}
private DatabaseOperationOption getOption() {
DatabaseOperationOption options = new DatabaseOperationOption();
options.setChangelogTable(CHANGELOG_TABLE);
return options;
}
private Properties getProperties() {
Properties properties = new Properties();
properties.setProperty("changelog", CHANGELOG_TABLE);
return properties;
}
private File getScriptDir() {
URL url = getClass().getClassLoader().getResource(MIGRATION_SCRIPTS);
if (url == null) {
throw new IllegalArgumentException("file is not found!");
} else {
return new File(url.getFile());
}
}
private class Populator implements DatabasePopulator {
#Override
public void populate(Connection connection) throws SQLException, ScriptException {
new UpOperation().operate(
new SimplyConnectionProvider(connection),
new FileMigrationLoader(getScriptDir(), "utf-8", getProperties()),
getOption(),
System.out
);
}
}
private static class SimplyConnectionProvider implements ConnectionProvider {
private final Connection connection;
public SimplyConnectionProvider(Connection connection) {
this.connection = connection;
}
public Connection getConnection() {
return connection;
}
}
}

Storm Kafka Topolgy terminates without output

This is the StBolt.java class.
package com.storm.cassandra;
import java.util.Map;
import net.sf.json.JSONObject;
import net.sf.json.JSONSerializer;
import org.apache.log4j.Logger;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.IBasicBolt;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
public class StBolt implements IBasicBolt {
private static final long serialVersionUID = 1L;
private static final Logger logger = Logger
.getLogger(StBolt.class);
private static Session session = null;
private Cluster cluster = null;
String cassandraURL;
JSONObject eventJson = null;
String topicname = null;
String ip = null;
String menu = null;
String product = null;
Row row = null;
com.datastax.driver.core.ResultSet viewcount = null;
com.datastax.driver.core.ResultSet segmentlistResult = null;
com.datastax.driver.core.ResultSet newCountUpdatedResult = null;
public StBolt(String topicname) {
this.topicname = topicname;
}
public void prepare(Map stormConf, TopologyContext topologyContext) {
cluster = Cluster.builder().addContactPoint("127.0.0.1").build();
System.out.println("load cassandra ip");
session = cluster.connect();
System.out.println("CassandraCounterBolt prepare method ended");
}
public void execute(Tuple input, BasicOutputCollector collector) {
System.out.println("Execute");
Fields fields = input.getFields();
try {
eventJson = (JSONObject) JSONSerializer.toJSON((String) input
.getValueByField(fields.get(0)));
topicname = (String) eventJson.get("topicName");
ip = (String) eventJson.get("ip");
menu = (String) eventJson.get("menu");
product = (String) eventJson.get("product");
String ievent = "ievent";
String install = "install";
viewcount = session
.execute("update webapp.viewcount set count=count+1 where topicname='"+topicname+
"'and ip= '"+ip+"'and menu='"+menu+"'and product='"+product+"'" );
} catch (Exception e) {
e.printStackTrace();
}
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
}
public Map<String, Object> getComponentConfiguration() {
return null;
}
public void cleanup() {
}
}
Here is the StTopology.java class
package com.storm.cassandra;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.StringScheme;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;
public class StTopology {
public static void main(String[] args) throws Exception {
if (args.length == 4) {
BrokerHosts hosts = new ZkHosts("localhost:2181");
//System.out
//.println("Insufficent Arguements - topologyName kafkaTopic ZKRoot ID");
SpoutConfig kafkaConf1 = new SpoutConfig(hosts, args[1], args[2],
args[3]);
//System.out
//.println("Insufficent Arguements - topologyName kafkaTopic ZKRoot ID");
//kafkaConf1.forceFromStart = false;
kafkaConf1.zkRoot = args[2];
kafkaConf1.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout1 = new KafkaSpout(kafkaConf1);
StBolt countbolt = new StBolt(args[1]);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("kafkaspout", kafkaSpout1, 1);
builder.setBolt("counterbolt", countbolt, 1).shuffleGrouping(
"kafkaspout");
Config config = new Config();
config.setDebug(true);
config.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 1);
config.setNumWorkers(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(args[0], config, builder.createTopology());
// StormSubmitter.submitTopology(args[0], config,
// builder.createTopology());
} else {
System.out
.println("Insufficent Arguements - topologyName kafkaTopic ZKRoot ID");
}
}
}
I am trying to get JSON data from the Kafka console producer, process it in Storm and store it into Cassandra.
For some reason, there is no response from the bolt when I run the code with parameters viewcount usercount /kafkastorm webapp1.
I have Kafka getting data from the console producer as topic usercount, and the correct table in Cassandra.
The code compiles and runs without any error but the console shows terminated.
I have no activity anywhere, despite providing the right JSON input to the Kafka console producer multiple times {"topicname":"usercount","ip":"127.0.0.1","menu":"dress","product":"tshirt"}.
There is no topology shown as being created in the Storm UI's Topology Summary either.
I believe I have all the Kafka, Storm and Cassandra dependencies in place.
Please point me in the right direction with this issue. Thanks.

how to create workflow model programmatically in aem

I would like to create an aem workflow model programmatically due to the business requirement, and i used below code to implement it, but it throws an exception,this problem has tortured for a week. Could you please give some hints? Thanks in advance.
The code is below!
package com.sample.mms.workflow;
import java.util.Iterator;
import java.util.List;
import javax.jcr.RepositoryException;
import org.apache.commons.lang.StringUtils;
import org.apache.felix.scr.annotations.Component;
import org.apache.felix.scr.annotations.Properties;
import org.apache.felix.scr.annotations.Property;
import org.apache.felix.scr.annotations.Reference;
import org.apache.felix.scr.annotations.Service;
import org.apache.jackrabbit.api.security.user.User;
import org.apache.jackrabbit.api.security.user.UserManager;
import org.apache.sling.api.resource.LoginException;
import org.apache.sling.api.resource.Resource;
import org.apache.sling.api.resource.ResourceResolver;
import org.apache.sling.api.resource.ResourceResolverFactory;
import org.osgi.framework.Constants;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import com.sample.mms.service.ConfigurationService;
import com.sample.mms.service.TopicOwnerBizService;
import com.sample.mms.util.WorkflowUtil;
import com.day.cq.workflow.WorkflowException;
import com.day.cq.workflow.WorkflowService;
import com.day.cq.workflow.WorkflowSession;
import com.day.cq.workflow.exec.WorkItem;
import com.day.cq.workflow.exec.WorkflowData;
import com.day.cq.workflow.exec.WorkflowProcess;
import com.day.cq.workflow.metadata.MetaDataMap;
import com.day.cq.workflow.model.WorkflowModel;
import com.day.cq.workflow.model.WorkflowNode;
import com.day.cq.workflow.model.WorkflowTransition;
#Component
#Service
#Properties({
#Property(name = Constants.SERVICE_DESCRIPTION, value = "general topic owner mark and approval each topic page step"),
#Property(name = Constants.SERVICE_VENDOR, value = "Someone"),
#Property(name = "process.label", value = "SAMPLE MMS NL - General Topic Owner Approval Process Step") })
public class TopicOwnerHandleProcessStep implements WorkflowProcess {
protected final Logger logger = LoggerFactory.getLogger(this.getClass());
#Reference
ResourceResolverFactory resourceResolverFactory;
#Reference
private ConfigurationService configurationService;
#Reference
private TopicOwnerBizService topicOwnerBizService;
#Reference
private WorkflowService workflowService;
#Override
public void execute(WorkItem workItem, WorkflowSession workflowSession, MetaDataMap metaDataMap)
throws WorkflowException {
final WorkflowData workflowData = workItem.getWorkflowData();
final String payLoadType = workflowData.getPayloadType();
if(!StringUtils.equals(payLoadType, "JCR_PATH")){
return;
}
final String payLoad = workflowData.getPayload().toString();
String topicOwnerGroup = configurationService.getTopic_owner_participant_group();
ResourceResolver resourceResolver = null;
UserManager userManager = null;
try {
resourceResolver = WorkflowUtil.getResourceResolver(resourceResolverFactory);
userManager = resourceResolver.adaptTo(UserManager.class);
List<User> list = WorkflowUtil.getUsersByGroup(userManager, topicOwnerGroup);
User user = null;
//create a model for each topic owner approval
WorkflowModel wm = workflowSession.createNewModel("sample mms topic owner review each topic page_" + System.currentTimeMillis());
WorkflowData wd = workflowSession.newWorkflowData("JCR_PATH", payLoad);
//get start node
WorkflowNode startNode = wm.getRootNode();
//wm.createNode("start",WorkflowNode.TYPE_START,"");
//get end node
WorkflowNode endNode = wm.getEndNode();
//wm.createNode("end",WorkflowNode.TYPE_END,"");
//create and split node
WorkflowNode andSplitNode = wm.createNode("and split",WorkflowNode.TYPE_AND_SPLIT,null);
//create and join node
WorkflowNode andJoinNode = wm.createNode("and join",WorkflowNode.TYPE_AND_JOIN,"");
wm.validate();
//create transition between start node and split node
wm.createTransition();
//wm.createTransition(startNode,andSplitNode,null);
//create transition between split node and end node
wm.createTransition(andJoinNode,endNode,null);
for(int i=0;i<list.size();i++){
user = list.get(i);
Iterator<Resource> resources = topicOwnerBizService.getResourceByTopicOwner(resourceResolver, payLoad, user.getID());
if(resources.hasNext()){
// ResourceResolver resourceResolver1 = WorkflowUtil.getResourceResolver(resourceResolverFactory);
// Session session = resourceResolver1.adaptTo(Session.class);
// WorkflowSession workflowSession1 = workflowService.getWorkflowSession(session);
// WorkflowModel wm = workflowSession1.getModel(configurationService.getTopicOwnerHandleSubsequentWorkflow());
// WorkflowData wd = workflowSession1.newWorkflowData("JCR_PATH", payLoad);
// wd.getMetaDataMap().put("topicOwnerParticipant", user.getID());
// workflowSession1.startWorkflow(wm, wd);
//create branch node for and split node
WorkflowNode topicOwnerParticipantNode = wm.createNode("topic owner participant",WorkflowNode.TYPE_PARTICIPANT,"");
//{timeoutMillis=0, timeoutHandler=com.sample.mms.workflow.TopicOwnerTimeoutHandler, PARTICIPANT=hunter.liang}
topicOwnerParticipantNode.getMetaDataMap().put("timeoutMillis", 0L);
topicOwnerParticipantNode.getMetaDataMap().put("timeoutHandler", "com.sample.mms.workflow.TopicOwnerTimeoutHandler");
topicOwnerParticipantNode.getMetaDataMap().put("PARTICIPANT", user.getID());
WorkflowNode orSplitNode = wm.createNode("or split",WorkflowNode.TYPE_OR_SPLIT,"");
WorkflowNode orJoinNode = wm.createNode("or join",WorkflowNode.TYPE_OR_JOIN,"");
//{PROCESS_AUTO_ADVANCE=true, PROCESS=com.sample.mms.workflow.TopicOwnerApprovalProcessStep, PROCESS_ARGS=approval}
WorkflowNode topicOwnerApprovalNode = wm.createNode("topic owner approval",WorkflowNode.TYPE_PROCESS,"");
topicOwnerApprovalNode.getMetaDataMap().put("PROCESS_AUTO_ADVANCE", true);
topicOwnerApprovalNode.getMetaDataMap().put("PROCESS", "com.sample.mms.workflow.TopicOwnerApprovalProcessStep");
topicOwnerApprovalNode.getMetaDataMap().put("PROCESS_ARGS", "approval");
//{PROCESS_AUTO_ADVANCE=true, PROCESS=com.sample.mms.workflow.TopicOwnerApprovalProcessStep, PROCESS_ARGS=reject}
WorkflowNode topicOwnerRejectNode = wm.createNode("topic owner reject",WorkflowNode.TYPE_PROCESS,"");
topicOwnerRejectNode.getMetaDataMap().put("PROCESS_AUTO_ADVANCE", true);
topicOwnerRejectNode.getMetaDataMap().put("PROCESS", "com.sample.mms.workflow.TopicOwnerApprovalProcessStep");
topicOwnerRejectNode.getMetaDataMap().put("PROCESS_ARGS", "reject");
WorkflowNode timeoutNode = wm.createNode("time out join",WorkflowNode.TYPE_PROCESS,"");
//wm.createTransition(andSplitNode,orSplitNode,"");
wm.createTransition(orSplitNode,topicOwnerApprovalNode,null);
wm.createTransition(orSplitNode,topicOwnerRejectNode,null);
WorkflowTransition orSplitAndTimeOutTransition = wm.createTransition(orSplitNode,timeoutNode,null);
orSplitAndTimeOutTransition.setRule("function check(){return false;}");
wm.createTransition(topicOwnerApprovalNode,orJoinNode,null);
wm.createTransition(topicOwnerRejectNode,orJoinNode,null);
wm.createTransition(timeoutNode,orJoinNode,null);
wm.createTransition(andSplitNode,orSplitNode,null);
wm.createTransition(orJoinNode,andJoinNode,null);
}
}
workflowSession.startWorkflow(wm, wd);
} catch (LoginException e) {
e.printStackTrace();
} catch (RepositoryException e) {
e.printStackTrace();
}
}
}
The error log is below!
20.04.2016 17:35:24.054 *INFO* [JobHandler: /etc/workflow/instances/2016-04-20/model_27918689599044:/content/samplemms/2016/02/index] com.adobe.granite.workflow.core.WorkflowSessionImpl Workflow model deployed: /etc/workflow/models/sample_mms_topic_owner_175(Version: 1.0)
20.04.2016 17:35:36.015 *ERROR* [JobHandler: /etc/workflow/instances/2016-04-20/model_27918689599044:/content/samplemms/2016/02/index] com.day.cq.workflow.compatibility.CQWorkflowProcessRunner Process execution resulted in an error: null
java.lang.NullPointerException: null
at com.adobe.granite.workflow.core.model.WorkflowModelImpl.createTransition(WorkflowModelImpl.java:155)
at com.adobe.granite.workflow.core.model.WorkflowModelImpl.createTransition(WorkflowModelImpl.java:149)
at com.day.cq.workflow.impl.model.CQWorkflowModelWrapper.createTransition(CQWorkflowModelWrapper.java:145)
at com.sample.mms.workflow.TopicOwnerHandleProcessStep.execute(TopicOwnerHandleProcessStep.java:105)
at com.day.cq.workflow.compatibility.CQWorkflowProcessRunner.execute(CQWorkflowProcessRunner.java:93)
at com.adobe.granite.workflow.core.job.HandlerBase.executeProcess(HandlerBase.java:215)
at com.adobe.granite.workflow.core.job.JobHandler.process(JobHandler.java:140)
at org.apache.sling.event.jobs.JobUtil$1.run(JobUtil.java:365)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
20.04.2016 17:35:36.015 *ERROR* [JobHandler: /etc/workflow/instances/2016-04-20/model_27918689599044:/content/samplemms/2016/02/index] com.adobe.granite.workflow.core.job.JobHandler Process execution resulted in an error
com.adobe.granite.workflow.WorkflowException: Process execution resulted in an error
at com.adobe.granite.workflow.core.job.HandlerBase.executeProcess(HandlerBase.java:225)
at com.adobe.granite.workflow.core.job.JobHandler.process(JobHandler.java:140)
at org.apache.sling.event.jobs.JobUtil$1.run(JobUtil.java:365)
at java.util.concurrent.ThreadPoolExecutor.runWorker(ThreadPoolExecutor.java:1145)
at java.util.concurrent.ThreadPoolExecutor$Worker.run(ThreadPoolExecutor.java:615)
at java.lang.Thread.run(Thread.java:745)
Caused by: com.adobe.granite.workflow.WorkflowException: Failed to execute process
at com.day.cq.workflow.compatibility.CQWorkflowProcessRunner.execute(CQWorkflowProcessRunner.java:108)
at com.adobe.granite.workflow.core.job.HandlerBase.executeProcess(HandlerBase.java:215)
... 5 common frames omitted
Caused by: java.lang.NullPointerException: null
at com.adobe.granite.workflow.core.model.WorkflowModelImpl.createTransition(WorkflowModelImpl.java:155)
at com.adobe.granite.workflow.core.model.WorkflowModelImpl.createTransition(WorkflowModelImpl.java:149)
at com.day.cq.workflow.impl.model.CQWorkflowModelWrapper.createTransition(CQWorkflowModelWrapper.java:145)
at com.sample.mms.workflow.TopicOwnerHandleProcessStep.execute(TopicOwnerHandleProcessStep.java:105)
at com.day.cq.workflow.compatibility.CQWorkflowProcessRunner.execute(CQWorkflowProcessRunner.java:93)
... 6 common frames omitted
Instead of com.day.cq.workflow APIs,Can you try using the com.adobe.granite.workflow APIs.