get active connection on HikariDataSource - hikaricp

I am trying to figure out how many connections are currently opened and I can't seem to find an obvious way to do that with Hikari.
HikariPool exposes that information (getActiveConnections) but I don't see an easy way to access that from HikariDataSource.

If you are using spring boot:
new HikariDataSourcePoolMetadata(dataSource).getActive();

You'll have to get it via JMX programmatic access. First, enable MBean registration through the registerMbeans property or by calling setRegisterMeans(). Then consult this page for how to perform programmatic access:
https://github.com/brettwooldridge/HikariCP/wiki/JMX-Monitoring

This can be done very directly.
dataSource.hikariPoolMXBean.activeConnections

You can use below class for better monitoring:
import javax.sql.DataSource;
import org.aspectj.lang.JoinPoint;
import org.aspectj.lang.annotation.After;
import org.aspectj.lang.annotation.Aspect;
import org.aspectj.lang.annotation.Before;
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.stereotype.Component;
import com.zaxxer.hikari.HikariDataSource;
import com.zaxxer.hikari.pool.HikariPool;
import lombok.extern.slf4j.Slf4j;
#Aspect
#Component
#Slf4j
public class DataSourceAspectLogger {
private HikariPool pool;
#Autowired
private HikariDataSource ds;
#Before("execution(* com.x.common.sql.repo.*.*(..))")
public void logBeforeConnection(JoinPoint jp) throws Throwable {
logDataSourceInfos("Before", jp);
}
#After("execution(* com.x.common.sql.repo.*.*(..)) ")
public void logAfterConnection(JoinPoint jp) throws Throwable {
logDataSourceInfos("After", jp);
}
#Autowired
public void getPool() {
try {
java.lang.reflect.Field field = ds.getClass().getDeclaredField("pool");
field.setAccessible(true);
this.pool = (HikariPool) field.get(ds);
} catch (Exception e) {
throw new RuntimeException(e);
}
}
public void logDataSourceInfos(final String time, final JoinPoint jp) {
final String method = String.format("%s:%s", jp.getTarget().getClass().getName(), jp.getSignature().getName());
int totalConnections = pool.getTotalConnections();
int activeConnections = pool.getActiveConnections();
int freeConnections = totalConnections - activeConnections;
int connectionWaiting = pool.getThreadsAwaitingConnection();
log.info(String.format("%s %s: number of connections in use by the application (active): %d.", time, method, activeConnections));
log.info(String.format("%s %s: the number of established but idle connections: %d.", time, method, freeConnections));
log.info(String.format("%s %s: number of threads waiting for a connection: %d.", time, method, connectionWaiting));
log.info(String.format("%s %s: max pool size: %d.", time, method, ds.getMaximumPoolSize()));
}
}

Related

how to verify http connection pool can improve performance

I want to use http connection pool with Spring RestTemplate, but before using it, I need to verify whether it can improve performance.
I do a little programing here:
#Configuration
public class RestTemplateConfig {
#Bean
public RestTemplate restTemplate() {
return new RestTemplate();
}
}
and test code here
#SpringBootTest
class RestnopoolApplicationTests {
String url = "https://www.baidu.com/";
// String url = "http://localhost:8080/actuator/";
#Autowired
RestTemplate restTemplate;
#Test
void contextLoads() {
}
#Test
void verify_health() {
Instant start = Instant.now();
for(int i=0; i < 100; i ++) {
restTemplate.getForObject(url, String.class);
}
Instant end = Instant.now();
Duration d = Duration.between(start,end );
System.out.println("time span " + d.getSeconds());
}
Also, I write http connection pool below
import java.security.KeyManagementException;
import java.security.KeyStoreException;
import java.security.NoSuchAlgorithmException;
import java.util.concurrent.TimeUnit;
import org.apache.http.HeaderElement;
import org.apache.http.HeaderElementIterator;
import org.apache.http.HttpResponse;
import org.apache.http.client.config.RequestConfig;
import org.apache.http.config.Registry;
import org.apache.http.config.RegistryBuilder;
import org.apache.http.conn.ConnectionKeepAliveStrategy;
import org.apache.http.conn.socket.ConnectionSocketFactory;
import org.apache.http.conn.socket.PlainConnectionSocketFactory;
import org.apache.http.conn.ssl.SSLConnectionSocketFactory;
import org.apache.http.conn.ssl.TrustSelfSignedStrategy;
import org.apache.http.impl.client.CloseableHttpClient;
import org.apache.http.impl.client.HttpClients;
import org.apache.http.impl.conn.PoolingHttpClientConnectionManager;
import org.apache.http.message.BasicHeaderElementIterator;
import org.apache.http.protocol.HTTP;
import org.apache.http.protocol.HttpContext;
import org.apache.http.ssl.SSLContextBuilder;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.scheduling.annotation.EnableScheduling;
import org.springframework.scheduling.annotation.Scheduled;
/**
* - Supports both HTTP and HTTPS
* - Uses a connection pool to re-use connections and save overhead of creating connections.
* - Has a custom connection keep-alive strategy (to apply a default keep-alive if one isn't specified)
* - Starts an idle connection monitor to continuously clean up stale connections.
*/
#Configuration
#EnableScheduling
public class HttpClientConfig {
private static final Logger LOGGER = LoggerFactory.getLogger(HttpClientConfig.class);
// Determines the timeout in milliseconds until a connection is established.
private static final int CONNECT_TIMEOUT = 30000;
// The timeout when requesting a connection from the connection manager.
private static final int REQUEST_TIMEOUT = 30000;
// The timeout for waiting for data
private static final int SOCKET_TIMEOUT = 60000;
private static final int MAX_TOTAL_CONNECTIONS = 50;
private static final int DEFAULT_KEEP_ALIVE_TIME_MILLIS = 20 * 1000;
private static final int CLOSE_IDLE_CONNECTION_WAIT_TIME_SECS = 30;
#Bean
public PoolingHttpClientConnectionManager poolingConnectionManager() {
SSLContextBuilder builder = new SSLContextBuilder();
try {
builder.loadTrustMaterial(null, new TrustSelfSignedStrategy());
} catch (NoSuchAlgorithmException | KeyStoreException e) {
LOGGER.error("Pooling Connection Manager Initialisation failure because of " + e.getMessage(), e);
}
SSLConnectionSocketFactory sslsf = null;
try {
sslsf = new SSLConnectionSocketFactory(builder.build());
} catch (KeyManagementException | NoSuchAlgorithmException e) {
LOGGER.error("Pooling Connection Manager Initialisation failure because of " + e.getMessage(), e);
}
Registry<ConnectionSocketFactory> socketFactoryRegistry = RegistryBuilder
.<ConnectionSocketFactory>create().register("https", sslsf)
.register("http", new PlainConnectionSocketFactory())
.build();
PoolingHttpClientConnectionManager poolingConnectionManager = new PoolingHttpClientConnectionManager(socketFactoryRegistry);
poolingConnectionManager.setMaxTotal(MAX_TOTAL_CONNECTIONS);
return poolingConnectionManager;
}
#Bean
public ConnectionKeepAliveStrategy connectionKeepAliveStrategy() {
return new ConnectionKeepAliveStrategy() {
#Override
public long getKeepAliveDuration(HttpResponse response, HttpContext context) {
HeaderElementIterator it = new BasicHeaderElementIterator
(response.headerIterator(HTTP.CONN_KEEP_ALIVE));
while (it.hasNext()) {
HeaderElement he = it.nextElement();
String param = he.getName();
String value = he.getValue();
if (value != null && param.equalsIgnoreCase("timeout")) {
return Long.parseLong(value) * 1000;
}
}
return DEFAULT_KEEP_ALIVE_TIME_MILLIS;
}
};
}
#Bean
public CloseableHttpClient httpClient() {
RequestConfig requestConfig = RequestConfig.custom()
.setConnectionRequestTimeout(REQUEST_TIMEOUT)
.setConnectTimeout(CONNECT_TIMEOUT)
.setSocketTimeout(SOCKET_TIMEOUT).build();
return HttpClients.custom()
.setDefaultRequestConfig(requestConfig)
.setConnectionManager(poolingConnectionManager())
.setKeepAliveStrategy(connectionKeepAliveStrategy())
.build();
}
#Bean
public Runnable idleConnectionMonitor(final PoolingHttpClientConnectionManager connectionManager) {
return new Runnable() {
#Override
#Scheduled(fixedDelay = 10000)
public void run() {
try {
if (connectionManager != null) {
LOGGER.trace("run IdleConnectionMonitor - Closing expired and idle connections...");
connectionManager.closeExpiredConnections();
connectionManager.closeIdleConnections(CLOSE_IDLE_CONNECTION_WAIT_TIME_SECS, TimeUnit.SECONDS);
} else {
LOGGER.trace("run IdleConnectionMonitor - Http Client Connection manager is not initialised");
}
} catch (Exception e) {
LOGGER.error("run IdleConnectionMonitor - Exception occurred. msg={}, e={}", e.getMessage(), e);
}
}
};
}
}
and RestTemplateConfig below
import org.springframework.beans.factory.annotation.Autowired;
import org.springframework.context.annotation.Bean;
import org.springframework.context.annotation.Configuration;
import org.springframework.http.client.HttpComponentsClientHttpRequestFactory;
import org.springframework.scheduling.TaskScheduler;
import org.springframework.scheduling.concurrent.ThreadPoolTaskScheduler;
import org.springframework.web.client.RestTemplate;
import org.apache.http.impl.client.CloseableHttpClient;
#Configuration
public class RestTemplateConfig {
#Autowired
CloseableHttpClient httpClient;
#Bean
public RestTemplate restTemplate() {
RestTemplate restTemplate = new RestTemplate(clientHttpRequestFactory());
return restTemplate;
}
#Bean
public HttpComponentsClientHttpRequestFactory clientHttpRequestFactory() {
HttpComponentsClientHttpRequestFactory clientHttpRequestFactory = new HttpComponentsClientHttpRequestFactory();
clientHttpRequestFactory.setHttpClient(httpClient);
return clientHttpRequestFactory;
}
#Bean
public TaskScheduler taskScheduler() {
ThreadPoolTaskScheduler scheduler = new ThreadPoolTaskScheduler();
scheduler.setThreadNamePrefix("poolScheduler");
scheduler.setPoolSize(50);
return scheduler;
}
}
The test result cannot prove that connection pool impvoe performance.
You have not used your new implementation. You are still using the default Apache client. Use your method httpClient() to get the ClosableHttpClient.
Please also note that your test is synchronous, no matter how many connections do you have in the pool, you will use it sequential. Use threads to execute the get request.

can Flink receive http requests as datasource?

Flink can read a socket stream, can it read http requests? how?
// socket example
DataStream<XXX> socketStream = env
.socketTextStream("localhost", 9999)
.map(...);
There's an open JIRA ticket for creating an HTTP sink connector for Flink, but I've seen no discussion about creating a source connector.
Moreover, it's not clear this is a good idea. Flink's approach to fault tolerance requires sources that can be rewound and replayed, so it works best with input sources that behave like message queues. I would suggest buffering the incoming http requests in a distributed log.
For an example, look at how DriveTribe uses Flink to power their website on the data Artisans blog and on YouTube.
I write one custom http source. please ref OneHourHttpTextStreamFunction. you need create a fat jar to include apache httpserver classes if you want run my code.
package org.apache.flink.streaming.examples.http;
import org.apache.flink.api.common.functions.FlatMapFunction;
import org.apache.flink.api.common.functions.ReduceFunction;
import org.apache.flink.api.java.utils.ParameterTool;
import org.apache.flink.streaming.api.datastream.DataStream;
import org.apache.flink.streaming.api.environment.StreamExecutionEnvironment;
import org.apache.flink.streaming.api.functions.source.SourceFunction;
import org.apache.flink.streaming.api.windowing.time.Time;
import org.apache.flink.streaming.examples.socket.SocketWindowWordCount.WordWithCount;
import org.apache.flink.util.Collector;
import org.apache.http.HttpException;
import org.apache.http.HttpRequest;
import org.apache.http.HttpResponse;
import org.apache.http.entity.StringEntity;
import org.apache.http.impl.bootstrap.HttpServer;
import org.apache.http.impl.bootstrap.ServerBootstrap;
import org.apache.http.protocol.HttpContext;
import org.apache.http.protocol.HttpRequestHandler;
import java.io.IOException;
import java.util.concurrent.TimeUnit;
import static org.apache.flink.util.Preconditions.checkArgument;
import static org.apache.flink.util.Preconditions.checkNotNull;
public class HttpRequestCount {
public static void main(String[] args) throws Exception {
// the host and the port to connect to
final String path;
final int port;
try {
final ParameterTool params = ParameterTool.fromArgs(args);
path = params.has("path") ? params.get("path") : "*";
port = params.getInt("port");
} catch (Exception e) {
System.err.println("No port specified. Please run 'SocketWindowWordCount "
+ "--path <hostname> --port <port>', where path (* by default) "
+ "and port is the address of the text server");
System.err.println("To start a simple text server, run 'netcat -l <port>' and "
+ "type the input text into the command line");
return;
}
// get the execution environment
final StreamExecutionEnvironment env = StreamExecutionEnvironment.getExecutionEnvironment();
// get input data by connecting to the socket
DataStream<String> text = env.addSource(new OneHourHttpTextStreamFunction(path, port));
// parse the data, group it, window it, and aggregate the counts
DataStream<WordWithCount> windowCounts = text
.flatMap(new FlatMapFunction<String, WordWithCount>() {
#Override
public void flatMap(String value, Collector<WordWithCount> out) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
for (String word : value.split("\\s")) {
out.collect(new WordWithCount(word, 1L));
}
}
})
.keyBy("word").timeWindow(Time.seconds(5))
.reduce(new ReduceFunction<WordWithCount>() {
#Override
public WordWithCount reduce(WordWithCount a, WordWithCount b) {
try {
Thread.sleep(1000);
} catch (InterruptedException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
return new WordWithCount(a.word, a.count + b.count);
}
});
// print the results with a single thread, rather than in parallel
windowCounts.print().setParallelism(1);
env.execute("Http Request Count");
}
}
class OneHourHttpTextStreamFunction implements SourceFunction<String> {
private static final long serialVersionUID = 1L;
private final String path;
private final int port;
private transient HttpServer server;
public OneHourHttpTextStreamFunction(String path, int port) {
checkArgument(port > 0 && port < 65536, "port is out of range");
this.path = checkNotNull(path, "path must not be null");
this.port = port;
}
#Override
public void run(SourceContext<String> ctx) throws Exception {
server = ServerBootstrap.bootstrap().setListenerPort(port).registerHandler(path, new HttpRequestHandler(){
#Override
public void handle(HttpRequest req, HttpResponse rep, HttpContext context) throws HttpException, IOException {
ctx.collect(req.getRequestLine().getUri());
rep.setStatusCode(200);
rep.setEntity(new StringEntity("OK"));
}
}).create();
server.start();
server.awaitTermination(1, TimeUnit.HOURS);
}
#Override
public void cancel() {
server.stop();
}
}
Leave you comment, if you want the demo jar.

JMS Consumer terminates and doesn't receive Message

So I'm following this youtube tutorial on Java Message Service with JBoss. My codes are the same to the video however when I run my TopicConsumer and TopicProducer applications, both terminates and don't stay alive for me to receive my message. I read that setMessageListener would have created a new thread so the message should be received even if the main thread was terminated but I'm still not receiving the message.
I found out that it's not calling onMessage, is it because TopicConsumer was terminated before it gets a chance to?
I've my JBoss 5.0 server running, just like in the video I run TopicConsumer first (but it terminates after the print statement unlike in the video) then TopicProduver (which also terminates right after the print statement) and I don't receive my message.
Thanks.
TopicConsumer.java
package jmspubsubtutorial;
import java.util.Properties;
import javax.jms.JMSException;
import javax.jms.Message;
import javax.jms.MessageListener;
import javax.jms.TextMessage;
import javax.jms.Topic;
import javax.jms.TopicConnection;
import javax.jms.TopicConnectionFactory;
import javax.jms.TopicSession;
import javax.naming.Context;
import javax.naming.InitialContext;
import javax.naming.NamingException;
public class TopicConsumer implements MessageListener {
public static void main(String[] args) throws JMSException, NamingException{
System.out.println("---Starting TopicConsumer---");
Context context = TopicConsumer.getInitialContext();
TopicConnectionFactory topicConnectionFactory = (TopicConnectionFactory) context.lookup("ConnectionFactory");
Topic topic = (Topic) context.lookup("topic/JMS_tutorial");
TopicConnection topicConnection = topicConnectionFactory.createTopicConnection();
TopicSession topicSession = topicConnection.createTopicSession(false, TopicSession.AUTO_ACKNOWLEDGE);
topicSession.createSubscriber(topic).setMessageListener(new TopicConsumer());
topicConnection.start();
System.out.println("---Exiting TopicConsumer---");
}
#Override
public void onMessage(Message message) {
System.out.println("--- onMessage ---");
try {
System.out.println("Incoming message: " + ((TextMessage)message).getText());
} catch (JMSException e) {
System.out.println("onMessage failed");
e.printStackTrace();
}
}
public static Context getInitialContext() throws JMSException, NamingException {
Properties props = new Properties();
props.setProperty("java.naming.factory.initial", "org.jnp.interfaces.NamingContextFactory");
props.setProperty("java.naming.factory.url.pkgs", "org.jboss.naming");
props.setProperty("java.naming.provider.url", "localhost:1099");
Context context = new InitialContext(props);
return context;
}
}
TopicProducer.java
package jmspubsubtutorial;
import javax.jms.JMSException;
import javax.jms.TextMessage;
import javax.jms.Topic;
import javax.jms.TopicConnection;
import javax.jms.TopicConnectionFactory;
import javax.jms.TopicPublisher;
import javax.jms.TopicSession;
import javax.naming.Context;
import javax.naming.NamingException;
public class TopicProducer {
public static void main(String[] args) throws JMSException, NamingException{
System.out.println("---Starting TopicProducer---");
Context context = TopicConsumer.getInitialContext();
TopicConnectionFactory topicConnectionFactory = (TopicConnectionFactory) context.lookup("ConnectionFactory");
Topic topic = (Topic) context.lookup("topic/JMS_tutorial");
TopicConnection topicConnection = topicConnectionFactory.createTopicConnection();
TopicSession topicSession = topicConnection.createTopicSession(false, TopicSession.AUTO_ACKNOWLEDGE);
topicConnection.start();
TopicProducer topicProducer = new TopicProducer();
String text = "message 1 from TopicProducer...";
topicProducer.sendMessage(text, topicSession, topic);
System.out.println("---Exiting TopicProducer---");
}
public void sendMessage(String text, TopicSession topicSession, Topic topic) throws JMSException {
System.out.println("Send Message: " + text + " " + topicSession + " " + topic);
TopicPublisher topicPublisher = topicSession.createPublisher(topic);
TextMessage textMessage = topicSession.createTextMessage(text);
topicPublisher.publish(textMessage);
topicPublisher.close();
}
}
So the problem is that you are relying on the JMS library to maintain at least one non-daemon thread in order to keep your application alive after you create the consumer and assign the message listener but in reality there is no guarantee that it will do any such thing.
It's true that many JMS providers do indeed attempt to always have a single non-daemon thread running internally but assuming that this will always be the case is not really advisable. You've seemed to find that the your particular provider does not do this for you, so if you want to ensure your application stays running you should make this happen yourself.

Californium Framework CoAP and PUT request

I am trying to do a request to coap server (er-rest-example) using Californium.
I succesfully do a POST request.
But with PUT I am getting a BAD REQUEST, I try using this URLs in url:
coap://[aaaa::c30c:0000:0000:0002]:5683/actuators/leds
coap://[aaaa::c30c:0000:0000:0002]:5683/actuators/leds?
coap://[aaaa::c30c:0000:0000:0002]:5683/actuators/leds?color=r
But with no one get success.
What I am doing wrong?.
This is my simple script:
package coap_client;
import java.net.URI;
import java.net.URISyntaxException;
import java.util.Arrays;
import java.util.Timer;
import java.util.TimerTask;
import org.eclipse.californium.core.CoapClient;
import org.eclipse.californium.core.CoapResponse;
import org.eclipse.californium.core.coap.MediaTypeRegistry;
public class cliente {
public static void main(String[] args) throws Exception {
Timer timer;
timer = new Timer();
TimerTask task = new TimerTask(){
#Override
public void run(){
String url="coap://[aaaa::c30c:0000:0000:0002]:5683/actuators/leds";
URI uri= null;
try {
uri = new URI(url);
} catch (URISyntaxException e) {
e.printStackTrace();
}
CoapClient client = new CoapClient(uri);
CoapResponse response = client.put("color=r",MediaTypeRegistry.TEXT_PLAIN);
System.out.println(response.isSuccess());
if (response!=null) {
byte[] myreponse=response.getPayload();
String respuesta2 = new String(myreponse);
System.out.println(respuesta2);
}
}
};
timer.schedule(task, 10,10*1000);
}
}
In Contiki er-rest-example, see the POST/PUT handler(1) for the LED CoAP resource. It expects a mode param without which you will get a BAD_REQUEST as response. I assume that has to go in the request body.

Map Reduce Distributed Cache

I am not able to compile my DriverClass at the job.waitforcompletion(boolean) clause.It gives me a NoClassFoundException.If I catch the exception ,the run method throws the error that its expecting a int value.I am using MapReduce New API.Could anyone suggest what is the issue :
import java.io.File;
import java.io.IOException;
import java.net.URI;
import java.net.URISyntaxException;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.conf.Configured;
import org.apache.hadoop.filecache.DistributedCache;
import org.apache.hadoop.fs.Path;
import org.apache.hadoop.util.GenericOptionsParser;
import org.apache.hadoop.util.Tool;
import org.apache.hadoop.util.ToolRunner;
import org.apache.hadoop.mapreduce.Job;
import org.apache.hadoop.mapreduce.lib.input.FileInputFormat;
import org.apache.hadoop.mapreduce.lib.input.KeyValueTextInputFormat;
import org.apache.hadoop.mapreduce.lib.input.TextInputFormat;
import org.apache.hadoop.mapreduce.lib.output.FileOutputFormat;
import org.apache.hadoop.mapreduce.lib.output.TextOutputFormat;
public class Dist_Driver extends Configured implements Tool {
public int run(String args[]) throws IOException, InterruptedException {
// Configuration phase
// Configuration conf=new Configuration();
Job job = new Job(new Configuration());
job.setJarByClass(Dist_Driver.class);
// Mapper Reducer InputFormat
job.setInputFormatClass(FileInputFormat.class);
// Mapper and Reducer Class
job.setMapperClass(Dist_Mapper.class);
job.setReducerClass(DistCache_Reducer.class);
job.setOutputFormatClass(TextOutputFormat.class);
job.setInputFormatClass(KeyValueTextInputFormat.class);
// set FileInputOutput
FileInputFormat.addInputPath(job, new Path(args[0]));
FileOutputFormat.setOutputPath(job, new Path(args[1]));
// setting number of reduce tasks and submit it
job.setNumReduceTasks(2);
// Lets check if the file exist
File f1 = new File("/home/hdfs/trials_mapreduce_progams/emp_id");
if (f1.exists())
System.out.println("The Files Exists");
else
System.out.println("The File doesnot exist");
URI path1;
try {
path1 = new URI(
"/home/hdfs/trials_mapreduce_progams/emp_lookup.txt");
DistributedCache.addCacheFile(path1, job.getConfiguration());
} catch (URISyntaxException e) {
// TODO Auto-generated catch block
e.printStackTrace();
}
if (job.waitForCompletion(true))
return 0;
else
return 1;
}
public static void main(String[] args) throws Exception {
int exitcode = ToolRunner.run(new Dist_Driver(), args);
System.exit(exitcode);
}
}
Just add the ClassNotFoundException to the run method signature
public int run(String args[]) throws IOException,
InterruptedException,
ClassNotFoundException {
The reason you get an error when you try and try/catch it is because if there is a ClassNotFoundException thrown during execution, there will be no return value, and the method has to return something.
If you really want to catch it, just return 1 in the catch clause, which is the error exit code