Get no of attempts in quartz - quartz-scheduler

Can any one please tell me how can I get current attempt count in quartz.
Example : if Quartz scheduler is started with repeat count of 5. I want to get the current repeat count.
Here is the Example I am trying with
public class SimpleTriggerExample implements Job
{
int count = 0;
JobDetail job = null;
JobDataMap data = null;
public static void main( String[] args ) throws Exception
{
new SimpleTriggerExample().schedule();
}
public void schedule() throws ParseException, SchedulerException{
job = JobBuilder.newJob(SimpleTriggerExample.class)
.withIdentity("dummyJobName", "group1").build();
Trigger trigger = TriggerBuilder
.newTrigger()
.withIdentity("dummyTriggerName", "group1")
.withSchedule(SimpleScheduleBuilder.simpleSchedule()
.withIntervalInSeconds(10).withRepeatCount(3))
.build();
System.out.println("before in main jobdatamap");
Scheduler scheduler = new StdSchedulerFactory().getScheduler();
scheduler.start();
scheduler.scheduleJob(job, trigger);
}
public void execute(JobExecutionContext context)
throws JobExecutionException {
//count
data = context.getJobDetail().getJobDataMap();
System.out.println("after jobdatamap");
int count1 = data.getInt("EXECUTION_COUNT");
System.out.println("count1-->before"+count1);
count1++;
System.out.println("count1-->after"+count1);
job.getJobDataMap().put("EXECUTION_COUNT", count1);
count = count1;
System.out.println("count"+count);
}
}

Use JobDataMap along with #PersistJobDataAfterExecution annotation.
Make sure when you modify data in JobDataMap the key value should be same.
If you do like this you can persist your attempts as per your requirement.
Example Code Snippet:
package com.mss.quartz.demo;
import java.text.SimpleDateFormat;
import java.util.Date;
import org.quartz.InterruptableJob;
import org.quartz.Job;
import org.quartz.JobBuilder;
import org.quartz.JobDataMap;
import org.quartz.JobDetail;
import org.quartz.JobExecutionContext;
import org.quartz.JobExecutionException;
import org.quartz.JobKey;
import org.quartz.PersistJobDataAfterExecution;
import org.quartz.Scheduler;
import org.quartz.SchedulerContext;
import org.quartz.SchedulerException;
import org.quartz.SimpleScheduleBuilder;
import org.quartz.TriggerBuilder;
import org.quartz.UnableToInterruptJobException;
import org.quartz.impl.StdSchedulerFactory;
#PersistJobDataAfterExecution
public class HelloJob implements InterruptableJob
{
SchedulerContext schedulerContext = null;
testQuartz test = new testQuartz();
boolean result;
private boolean _interrupted = false;
private JobKey _jobKey = null;
Thread t = null;
//public static int count = 0;
public void interrupt() throws UnableToInterruptJobException {
System.out.println("---" + this._jobKey + " -- INTERRUPTING --");
this._interrupted = true;
}
public void execute(JobExecutionContext context)
throws JobExecutionException {
Scheduler scd = context.getScheduler();
JobDataMap dataMap = context.getJobDetail().getJobDataMap();
String jobSays = dataMap.getString("test1");
int myFloatValue = dataMap.getIntValue("id");
System.out.println("In Job Class"+jobSays+ " "+myFloatValue+" Current time in
Job class "+new Date().toString());
JobKey jobKey = context.getJobDetail().getKey();
int attemps = dataMap.getInt("attempts");
attemps++;
dataMap.put("attempts", attemps);
System.out.println("After putting count in job data map:"+dataMap.get("attempts"));
}
}

Try to add the #PersistJobDataAfterExecution annotation to SimpleTriggerExample class:
#PersistJobDataAfterExecution
public class SimpleTriggerExample implements Job
{ ...}

Related

how to verify http connection pool can improve performance

I want to use http connection pool with Spring RestTemplate, but before using it, I need to verify whether it can improve performance.
I do a little programing here:
#Configuration
public class RestTemplateConfig {
#Bean
public RestTemplate restTemplate() {
return new RestTemplate();
}
}
and test code here
#SpringBootTest
class RestnopoolApplicationTests {
String url = "https://www.baidu.com/";
// String url = "http://localhost:8080/actuator/";
#Autowired
RestTemplate restTemplate;
#Test
void contextLoads() {
}
#Test
void verify_health() {
Instant start = Instant.now();
for(int i=0; i < 100; i ++) {
restTemplate.getForObject(url, String.class);
}
Instant end = Instant.now();
Duration d = Duration.between(start,end );
System.out.println("time span " + d.getSeconds());
}
Also, I write http connection pool below
import java.security.KeyManagementException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.TimeUnit;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpResponse;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.ssl.SSLContextBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
/**
* - Supports both HTTP and HTTPS
* - Uses a connection pool to re-use connections and save overhead of creating connections.
* - Has a custom connection keep-alive strategy (to apply a default keep-alive if one isn't specified)
* - Starts an idle connection monitor to continuously clean up stale connections.
*/
#Configuration
#EnableScheduling
public class HttpClientConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(HttpClientConfig.class);
// Determines the timeout in milliseconds until a connection is established.
private static final int CONNECT_TIMEOUT = 30000;
// The timeout when requesting a connection from the connection manager.
private static final int REQUEST_TIMEOUT = 30000;
// The timeout for waiting for data
private static final int SOCKET_TIMEOUT = 60000;
private static final int MAX_TOTAL_CONNECTIONS = 50;
private static final int DEFAULT_KEEP_ALIVE_TIME_MILLIS = 20 * 1000;
private static final int CLOSE_IDLE_CONNECTION_WAIT_TIME_SECS = 30;
#Bean
public PoolingHttpClientConnectionManager poolingConnectionManager() {
SSLContextBuilder builder = new SSLContextBuilder();
try {
builder.loadTrustMaterial(null, new TrustSelfSignedStrategy());
} catch (NoSuchAlgorithmException | KeyStoreException e) {
LOGGER.error("Pooling Connection Manager Initialisation failure because of " + e.getMessage(), e);
}
SSLConnectionSocketFactory sslsf = null;
try {
sslsf = new SSLConnectionSocketFactory(builder.build());
} catch (KeyManagementException | NoSuchAlgorithmException e) {
LOGGER.error("Pooling Connection Manager Initialisation failure because of " + e.getMessage(), e);
}
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder
.<ConnectionSocketFactory>create().register("https", sslsf)
.register("http", new PlainConnectionSocketFactory())
.build();
PoolingHttpClientConnectionManager poolingConnectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
poolingConnectionManager.setMaxTotal(MAX_TOTAL_CONNECTIONS);
return poolingConnectionManager;
}
#Bean
public ConnectionKeepAliveStrategy connectionKeepAliveStrategy() {
return new ConnectionKeepAliveStrategy() {
#Override
public long getKeepAliveDuration(HttpResponse response, HttpContext context) {
HeaderElementIterator it = new BasicHeaderElementIterator
(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
HeaderElement he = it.nextElement();
String param = he.getName();
String value = he.getValue();
if (value != null && param.equalsIgnoreCase("timeout")) {
return Long.parseLong(value) * 1000;
}
}
return DEFAULT_KEEP_ALIVE_TIME_MILLIS;
}
};
}
#Bean
public CloseableHttpClient httpClient() {
RequestConfig requestConfig = RequestConfig.custom()
.setConnectionRequestTimeout(REQUEST_TIMEOUT)
.setConnectTimeout(CONNECT_TIMEOUT)
.setSocketTimeout(SOCKET_TIMEOUT).build();
return HttpClients.custom()
.setDefaultRequestConfig(requestConfig)
.setConnectionManager(poolingConnectionManager())
.setKeepAliveStrategy(connectionKeepAliveStrategy())
.build();
}
#Bean
public Runnable idleConnectionMonitor(final PoolingHttpClientConnectionManager connectionManager) {
return new Runnable() {
#Override
#Scheduled(fixedDelay = 10000)
public void run() {
try {
if (connectionManager != null) {
LOGGER.trace("run IdleConnectionMonitor - Closing expired and idle connections...");
connectionManager.closeExpiredConnections();
connectionManager.closeIdleConnections(CLOSE_IDLE_CONNECTION_WAIT_TIME_SECS, TimeUnit.SECONDS);
} else {
LOGGER.trace("run IdleConnectionMonitor - Http Client Connection manager is not initialised");
}
} catch (Exception e) {
LOGGER.error("run IdleConnectionMonitor - Exception occurred. msg={}, e={}", e.getMessage(), e);
}
}
};
}
}
and RestTemplateConfig below
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.client.HttpComponentsClientHttpRequestFactory;
import org.springframework.scheduling.TaskScheduler;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.web.client.RestTemplate;
import org.apache.http.impl.client.CloseableHttpClient;
#Configuration
public class RestTemplateConfig {
#Autowired
CloseableHttpClient httpClient;
#Bean
public RestTemplate restTemplate() {
RestTemplate restTemplate = new RestTemplate(clientHttpRequestFactory());
return restTemplate;
}
#Bean
public HttpComponentsClientHttpRequestFactory clientHttpRequestFactory() {
HttpComponentsClientHttpRequestFactory clientHttpRequestFactory = new HttpComponentsClientHttpRequestFactory();
clientHttpRequestFactory.setHttpClient(httpClient);
return clientHttpRequestFactory;
}
#Bean
public TaskScheduler taskScheduler() {
ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler();
scheduler.setThreadNamePrefix("poolScheduler");
scheduler.setPoolSize(50);
return scheduler;
}
}
The test result cannot prove that connection pool impvoe performance.
You have not used your new implementation. You are still using the default Apache client. Use your method httpClient() to get the ClosableHttpClient.
Please also note that your test is synchronous, no matter how many connections do you have in the pool, you will use it sequential. Use threads to execute the get request.

Apache Storm Problem: "Unable to canonicalize address localhost/<unresolved>:2000 because it's not resolvable"

When I launch a simple Strorm script on IntellijIdea or Eclipse i encounter this problem: "Unable to canonicalize address localhost/:2000 because it's not resolvable".
import org.apache.storm.*;
import org.apache.storm.generated.*;
import org.apache.storm.spout.SpoutOutputCollector;
import org.apache.storm.task.OutputCollector;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.topology.TopologyBuilder;
import org.apache.storm.topology.base.BaseRichBolt;
import org.apache.storm.topology.base.BaseRichSpout;
import org.apache.storm.tuple.*;
import org.apache.storm.utils.Utils;
import java.util.Map;
import java.util.Random;
public class ExclamationExample {
public static class TestWordSpout extends BaseRichSpout {
private SpoutOutputCollector _collector;
#Override
public void open(Map map, TopologyContext topologyContext, SpoutOutputCollector spoutOutputCollector) {
_collector = spoutOutputCollector;
}
#Override
public void nextTuple() {
Utils.sleep(100);
final String[] words = new String[]{"nathan", "mike", "jackson", "golda", "bertels"};
final Random rand = new Random();
final String word = words[rand.nextInt(words.length)];
_collector.emit(new Values(word));
System.out.printf("Word spout: %s\n", word);
}
#Override
public void declareOutputFields(OutputFieldsDeclarer outputFieldsDeclarer) {
outputFieldsDeclarer.declare(new Fields("word"));
}
}
public static class ExclamationBolt extends BaseRichBolt {
private OutputCollector collector;
#Override
public void prepare(Map conf, TopologyContext context, OutputCollector collector) {
this.collector = collector;
}
#Override
public void execute(Tuple tuple) {
String val = tuple.getString(0) + "!!!";
collector.emit(tuple, new Values(val));
System.out.printf("Exclamation bolt: %s\n", val);
collector.ack(tuple);
}
#Override
public void declareOutputFields(OutputFieldsDeclarer declarer) {
declarer.declare(new Fields("word"));
}
}
//TOPOLOGY
public static void main(String[] args) throws Exception {
Config conf = new Config();
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("word", new TestWordSpout(), 2);
builder.setBolt("exclaim1", new ExclamationBolt(), 2).shuffleGrouping("word");
builder.setBolt("exclaim2", new ExclamationBolt(), 2).shuffleGrouping("exclaim1");
conf.setDebug(false);
String topologyName = "test";
conf.setNumWorkers(2);
//If there are arguments, we are running on a cluster
if (args != null && args.length > 0) {
conf.setNumWorkers(3);
topologyName = args[0];
StormSubmitter.submitTopology(topologyName, conf, builder.createTopology());
}
try (LocalCluster cluster = new LocalCluster()) {
cluster.submitTopology(topologyName, conf, builder.createTopology());
Utils.sleep(600000); // wait [param] ms
cluster.killTopology(topologyName);
cluster.shutdown();
} catch (Exception e) {
e.printStackTrace();
}
}
}
What is a possible solution?
I resolved my problem. If can help someone i tell the solution. I ran the Java Application using Java SDK 17, this give me problem, then I created new project with JDK 1.8 and everything works!

How to extract error records while inserting into db table using JDBCIO apache beam in java

I am creating in memory PCollection and writing it into postgres sql. now, when I insert data into table, few records may throw exception and will not be inserted. how to extract such failed insert records when I start pipeline?
below is the code I have written for pipeline:
PipelineOptions options = PipelineOptionsFactory.create();
options.setRunner(FlinkRunner.class);
Pipeline p = Pipeline.create(options);
// Preparing dummy data
Collection<Stock> stockList = Arrays.asList(new Stock("AAP", 2000,"Apple Inc"),
new Stock("MSF", 3000, "Microsoft Corporation"),
new Stock("NVDA", 4000, "NVIDIA Corporation"),
new Stock("INT", 3200, "Intel Corporation"));
// Reading dummy data and save it into PCollection<Stock>
PCollection<Stock> data = p.apply(Create.of(stockList)
.withCoder(SerializableCoder.of(Stock.class)));
//insert
#SuppressWarnings("unused")
PDone insertData = data.apply(JdbcIO.<Stock>write()
.withDataSourceConfiguration(JdbcIO.DataSourceConfiguration
.create("org.postgresql.Driver","jdbc:postgresql://localhost:5432/postgres")
.withUsername("postgres").withPassword("sachin"))
.withStatement("insert into stocks values(?, ?, ?)")
.withPreparedStatementSetter(new JdbcIO.PreparedStatementSetter<Stock>() {
private static final long serialVersionUID = 1L;
public void setParameters(Stock element, PreparedStatement query) throws SQLException {
query.setString(1, element.getSymbol());
query.setLong(2, element.getPrice());
query.setString(3, element.getCompany());
}
}));
p.run().waitUntilFinish();
After going through all apache beam programming guide, i did not get any clue, So, copied JdbcIO and modified execute batch where I have separated inserted successful record and insert failed record by using TupleTags. now, It is working.
below is code for modified JdbcIO:
private static class WriteFn<T> extends DoFn<T, T> {
private static final int DEFAULT_BATCH_SIZE = 1;
private final Write<T> spec;
private DataSource dataSource;
private Connection connection;
private PreparedStatement preparedStatement;
**private TupleTag<T> validTupleTag;
private TupleTag<T> inValidTupleTag;**
private int batchCount;
public WriteFn(Write<T> spec) {
this.spec = spec;
}
#Setup
public void setup() throws Exception {
dataSource = spec.getDataSourceConfiguration().buildDatasource();
connection = dataSource.getConnection();
connection.setAutoCommit(false);
preparedStatement = connection.prepareStatement(spec.getStatement());
validTupleTag = spec.getValidTupleTag();
inValidTupleTag = spec.getInvalidTupleTag();
}
#StartBundle
public void startBundle() {
batchCount = 0;
}
#ProcessElement
public void processElement(#Element T record, MultiOutputReceiver out)
throws Exception {
preparedStatement.clearParameters();
spec.getPreparedStatementSetter().setParameters(record,
preparedStatement);
preparedStatement.addBatch();
batchCount++;
if (batchCount >= DEFAULT_BATCH_SIZE) {
if (batchCount > 0) {
try {
preparedStatement.executeBatch();
connection.commit();
**out.get(validTupleTag).output(record);**
} catch (SQLException e1) {
//TODO add logger
**out.get(inValidTupleTag).output(record);**
}
batchCount = 0;
}
}
}
and client code:
import java.sql.PreparedStatement;
import java.sql.SQLException;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collection;
import java.util.List;
import org.apache.beam.runners.flink.FlinkRunner;
import org.apache.beam.sdk.Pipeline;
import org.apache.beam.sdk.PipelineResult.State;
import org.apache.beam.sdk.coders.SerializableCoder;
import org.apache.beam.sdk.options.PipelineOptions;
import org.apache.beam.sdk.options.PipelineOptionsFactory;
import org.apache.beam.sdk.transforms.Create;
import org.apache.beam.sdk.transforms.DoFn;
import org.apache.beam.sdk.transforms.ParDo;
import org.apache.beam.sdk.values.PCollection;
import org.apache.beam.sdk.values.PCollectionTuple;
import org.apache.beam.sdk.values.TupleTag;
/**
* #author sachin
* #date 18-Nov-2021
*/
public class BeamTest {
static List<Stock> stocks = new ArrayList<>();
public static void main(String[] args) {
System.setProperty("java.specification.version", "1.8");
process();
// read();
}
public static void process() {
final TupleTag<Stock> VALID = new TupleTag<Stock>() {
};
final TupleTag<Stock> INVALID = new TupleTag<Stock>() {
};
PipelineOptions options = PipelineOptionsFactory.create();
options.setRunner(FlinkRunner.class);
Pipeline p = Pipeline.create(options);
// Preparing dummy data
Collection<Stock> stockList = Arrays.asList(
new Stock("AAP", 2000, "Apple Inc"),
new Stock("MSF", 3000, "Microsoft Corporation"),
new Stock("NVDA", 4000, "NVIDIA Corporation"),
new Stock("INT", 3200, "Intel Corporation"));
// Reading dummy data and save it into PCollection<Stock>
PCollection<Stock> data =
p.apply(Create.of(stockList).
withCoder(SerializableCoder.of(Stock.class)));
// insert
PCollectionTuple pCollectionTupleResult = data.apply("write",
CustomJdbcIOWrite.<Stock>write()
.withDataSourceConfiguration(CustomJdbcIOWrite.DataSourceConfiguration
.create("org.postgresql.Driver",
"jdbc:postgresql://localhost:5432/postgres")
.withUsername("postgres").withPassword("sachin"))
.withStatement("insert into stocks values(?, ?,
?)").withValidTag(VALID).withInValidTag(INVALID)
.withPreparedStatementSetter(new
CustomJdbcIOWrite.PreparedStatementSetter<Stock>() {
private static final long serialVersionUID = 1L;
public void setParameters(Stock element,
PreparedStatement query) throws SQLException {
query.setString(1, element.getSymbol());
query.setLong(2, element.getPrice());
query.setString(3, element.getCompany());
}
}));
// get failed PCollection using INVALID tupletag
PCollection<Stock> failedPcollection =
pCollectionTupleResult.get(INVALID)
.setCoder(SerializableCoder.of(Stock.class));
failedPcollection.apply(ParDo.of(new DoFn<Stock, Stock>() {
private static final long serialVersionUID = 1L;
#ProcessElement
public void process(ProcessContext pc) {
System.out.println("Failed pCollection element:" +
pc.element().getCompany());
}
}));
//get failed PCollection using INVALID tupletag
PCollection<Stock> insertedPcollection =
pCollectionTupleResult.get(VALID)
.setCoder(SerializableCoder.of(Stock.class));
insertedPcollection.apply(ParDo.of(new DoFn<Stock, Stock>() {
private static final long serialVersionUID = 1L;
#ProcessElement
public void process(ProcessContext pc) {
System.out.println("Inserted pCollection element:" +
pc.element().getCompany());
}
}));
// run pipeline
State state = p.run().waitUntilFinish();
System.out.println("Data inserted successfully with state : " +
state);
}
}
below is the output as new Stock("NVDA", 4000, "NVIDIA Corporation") is intentianlly not inserted as my db column accept only 3 char "NVD" and not 4 chars "NVDA":
Inserted pCollection element:Microsoft Corporation
Failed pCollection element:NVIDIA Corporation
Inserted pCollection element:Intel Corporation
Inserted pCollection element:Apple Inc
Data inserted successfully with state : DONE
Full Details and github link

Storm Kafka Topolgy terminates without output

This is the StBolt.java class.
package com.storm.cassandra;
import java.util.Map;
import net.sf.json.JSONObject;
import net.sf.json.JSONSerializer;
import org.apache.log4j.Logger;
import org.apache.storm.task.TopologyContext;
import org.apache.storm.topology.BasicOutputCollector;
import org.apache.storm.topology.IBasicBolt;
import org.apache.storm.topology.OutputFieldsDeclarer;
import org.apache.storm.tuple.Fields;
import org.apache.storm.tuple.Tuple;
import com.datastax.driver.core.Cluster;
import com.datastax.driver.core.Row;
import com.datastax.driver.core.Session;
public class StBolt implements IBasicBolt {
private static final long serialVersionUID = 1L;
private static final Logger logger = Logger
.getLogger(StBolt.class);
private static Session session = null;
private Cluster cluster = null;
String cassandraURL;
JSONObject eventJson = null;
String topicname = null;
String ip = null;
String menu = null;
String product = null;
Row row = null;
com.datastax.driver.core.ResultSet viewcount = null;
com.datastax.driver.core.ResultSet segmentlistResult = null;
com.datastax.driver.core.ResultSet newCountUpdatedResult = null;
public StBolt(String topicname) {
this.topicname = topicname;
}
public void prepare(Map stormConf, TopologyContext topologyContext) {
cluster = Cluster.builder().addContactPoint("127.0.0.1").build();
System.out.println("load cassandra ip");
session = cluster.connect();
System.out.println("CassandraCounterBolt prepare method ended");
}
public void execute(Tuple input, BasicOutputCollector collector) {
System.out.println("Execute");
Fields fields = input.getFields();
try {
eventJson = (JSONObject) JSONSerializer.toJSON((String) input
.getValueByField(fields.get(0)));
topicname = (String) eventJson.get("topicName");
ip = (String) eventJson.get("ip");
menu = (String) eventJson.get("menu");
product = (String) eventJson.get("product");
String ievent = "ievent";
String install = "install";
viewcount = session
.execute("update webapp.viewcount set count=count+1 where topicname='"+topicname+
"'and ip= '"+ip+"'and menu='"+menu+"'and product='"+product+"'" );
} catch (Exception e) {
e.printStackTrace();
}
}
public void declareOutputFields(OutputFieldsDeclarer declarer) {
}
public Map<String, Object> getComponentConfiguration() {
return null;
}
public void cleanup() {
}
}
Here is the StTopology.java class
package com.storm.cassandra;
import org.apache.storm.kafka.BrokerHosts;
import org.apache.storm.kafka.KafkaSpout;
import org.apache.storm.kafka.SpoutConfig;
import org.apache.storm.kafka.StringScheme;
import org.apache.storm.kafka.ZkHosts;
import org.apache.storm.Config;
import org.apache.storm.LocalCluster;
import org.apache.storm.StormSubmitter;
import org.apache.storm.spout.SchemeAsMultiScheme;
import org.apache.storm.topology.TopologyBuilder;
public class StTopology {
public static void main(String[] args) throws Exception {
if (args.length == 4) {
BrokerHosts hosts = new ZkHosts("localhost:2181");
//System.out
//.println("Insufficent Arguements - topologyName kafkaTopic ZKRoot ID");
SpoutConfig kafkaConf1 = new SpoutConfig(hosts, args[1], args[2],
args[3]);
//System.out
//.println("Insufficent Arguements - topologyName kafkaTopic ZKRoot ID");
//kafkaConf1.forceFromStart = false;
kafkaConf1.zkRoot = args[2];
kafkaConf1.scheme = new SchemeAsMultiScheme(new StringScheme());
KafkaSpout kafkaSpout1 = new KafkaSpout(kafkaConf1);
StBolt countbolt = new StBolt(args[1]);
TopologyBuilder builder = new TopologyBuilder();
builder.setSpout("kafkaspout", kafkaSpout1, 1);
builder.setBolt("counterbolt", countbolt, 1).shuffleGrouping(
"kafkaspout");
Config config = new Config();
config.setDebug(true);
config.put(Config.TOPOLOGY_TRIDENT_BATCH_EMIT_INTERVAL_MILLIS, 1);
config.setNumWorkers(1);
LocalCluster cluster = new LocalCluster();
cluster.submitTopology(args[0], config, builder.createTopology());
// StormSubmitter.submitTopology(args[0], config,
// builder.createTopology());
} else {
System.out
.println("Insufficent Arguements - topologyName kafkaTopic ZKRoot ID");
}
}
}
I am trying to get JSON data from the Kafka console producer, process it in Storm and store it into Cassandra.
For some reason, there is no response from the bolt when I run the code with parameters viewcount usercount /kafkastorm webapp1.
I have Kafka getting data from the console producer as topic usercount, and the correct table in Cassandra.
The code compiles and runs without any error but the console shows terminated.
I have no activity anywhere, despite providing the right JSON input to the Kafka console producer multiple times {"topicname":"usercount","ip":"127.0.0.1","menu":"dress","product":"tshirt"}.
There is no topology shown as being created in the Storm UI's Topology Summary either.
I believe I have all the Kafka, Storm and Cassandra dependencies in place.
Please point me in the right direction with this issue. Thanks.

Intersystems Cache using XEP

I am trying to extract data from the Samples namespace that comes with Intersystems Cache install. Specifically, I am trying to retrieve Sample.Company global data using XEP. Inorder to achieve this, I created Sample.Company class like this -
package Sample;
public class Company {
public Long id;
public String mission;
public String name;
public Long revenue;
public String taxId;
public Company(Long id, String mission, String name, Long revenue,
String taxId) {
this.id = id;
this.mission = mission;
this.name = name;
this.revenue = revenue;
this.taxId = taxId;
}
public Company() {
}
}
XEP related code looks like this -
import java.util.ArrayList;
import java.util.List;
import org.springframework.stereotype.Service;
import Sample.Company;
import com.intersys.xep.Event;
import com.intersys.xep.EventPersister;
import com.intersys.xep.EventQuery;
import com.intersys.xep.EventQueryIterator;
import com.intersys.xep.PersisterFactory;
import com.intersys.xep.XEPException;
#Service
public class CompanyService {
public List<Company> fetch() {
EventPersister myPersister = PersisterFactory.createPersister();
myPersister.connect("SAMPLES", "user", "pwd");
try { // delete any existing SingleStringSample events, then import
// new ones
Event.isEvent("Sample.Company");
myPersister.deleteExtent("Sample.Company");
String[] generatedClasses = myPersister.importSchema("Sample.Company");
for (int i = 0; i < generatedClasses.length; i++) {
System.out.println("Event class " + generatedClasses[i]
+ " successfully imported.");
}
} catch (XEPException e) {
System.out.println("import failed:\n" + e);
throw new RuntimeException(e);
}
EventQuery<Company> myQuery = null;
List<Company> list = new ArrayList<Company>();
try {
Event newEvent = myPersister.getEvent("Sample.Company");
String sql = "Select * from Sample.Company";
myQuery = newEvent.createQuery(sql);
newEvent.close();
myQuery.execute();
EventQueryIterator<Company> iterator = myQuery.getIterator();
while (iterator.hasNext()) {
Company c = iterator.next();
System.out.println(c);
list.add(c);
}
myQuery.close();
myPersister.close();
return list;
} catch (XEPException e) {
System.out.println("createQuery failed:\n" + e);
throw new RuntimeException(e);
}
}
}
When I try executing the fetch() method of the above class, I am seeing the following exception -
com.intersys.xep.XEPException: Cannot import - extent for Sample.Company not empty.
at com.intersys.xep.internal.Generator.generate(Generator.java:52)
at com.intersys.xep.EventPersister.importSchema(EventPersister.java:954)
at com.intersys.xep.EventPersister.importSchema(EventPersister.java:363)
I got the simple string example working. Does it mean, we can not read the existing data using XEP? If we can read, Could some please help me in resolving the above issue? Thanks in advance.
You are trying to create a new class named Sample.Company in your instance:
String[] generatedClasses = myPersister.importSchema("Sample.Company");
But you still have data and an existing class there.