Spring cloud stream consumer poll timeout has expired - kubernetes

We have a small microservice to read from a kafka topic and write to mqtt, using Spring Cloud Stream. It works fine, but after some time we get the following exception and no further messages are published to mqtt:
"2022-10-18 16:22:29.861 WARN 1 --- [d | tellus-mqtt] o.a.k.c.c.internals.ConsumerCoordinator : [Consumer clientId=consumer-mqtt-2, groupId=mqtt] consumer poll timeout has expired. This means the time between subsequent calls to poll() was longer than the configured max.poll.interval.ms, which typically implies that the poll loop is spending too much time processing messages. You can address this either by increasing max.poll.interval.ms or by reducing the maximum size of batches r
Is there a way to programmatically resubscribe or recover from this timeout?
Could we implement a custom health check for the actuator to include the consumer, and then the pod would get automatically restarted by k8s? Something like:
management:
endpoint:
health:
group:
liveness:
include: livenessstate,binders
Where binders is the kafka component.
EDIT: Here is the consumer code (OutputConfig class):
#Configuration
#Log4j2
#Profile("output")
public class OutputConfig {
private final Mqtt3ReactorClient outboundMqttClient;
private final Mqtt3ReactorClient outboundRootMqttClient;
private final MeterUtils meterUtils;
#Autowired
public OutputConfig(#Qualifier("outboundMqttClient") Mqtt3ReactorClient outboundMqttClient,
#Qualifier("outboundRootMqttClient") Mqtt3ReactorClient outboundRootMqttClient,
MeterUtils meterUtils) {
this.outboundMqttClient = outboundMqttClient;
this.outboundRootMqttClient = outboundRootMqttClient;
this.meterUtils = meterUtils;
log.info("Starting Output Config!");
}
#Bean
public Consumer<Flux<Output.GatewayNotification>> kafka() {
return new Output(outboundMqttClient, meterUtils);
}
#Bean
public Consumer<Flux<Output.GatewayNotification>> kafkaRoot() {
return new Output(outboundRootMqttClient, meterUtils);
}
}
And Output class:
#Log4j2
public class Output implements Consumer<Flux<Output.GatewayNotification>> {
public static final HexFormat FORMAT = HexFormat.of().withDelimiter(" ").withUpperCase();
private final Mqtt3ReactorClient outboundMqttClient;
private final MeterUtils meterUtils;
public Output(Mqtt3ReactorClient outboundMqttClient, MeterUtils meterUtils) {
this.outboundMqttClient = outboundMqttClient;
this.meterUtils = meterUtils;
}
#Override
public void accept(Flux<Output.GatewayNotification> gatewayNotifications) {
Flux<Mqtt3Publish> messagesToPublish = gatewayNotifications
.map(gatewayNotification -> Mqtt3Publish.builder()
.topic(gatewayNotification.getAddress())
.qos(MqttQos.AT_LEAST_ONCE)
.payload(Base64.getDecoder().decode(gatewayNotification.getPayload()))
.build());
outboundMqttClient.publish(messagesToPublish)
.doOnNext(publishResult -> {
log.debug(
"Publish acknowledged: " + FORMAT.formatHex(publishResult.getPublish().getPayloadAsBytes()));
meterUtils.incrementCounter("output");
})
.doOnError(error -> log.error(error.getMessage()))
.subscribe();
}
#Data
public static class GatewayNotification {
private String address;
private String payload;
private Long buildingId;
}
HiveMqMqttConfig:
#Configuration
#Log4j2
public class HiveMqMqttConfig {
#Value("${mqtt.endpointUrl}")
private String endpointUrl;
#Value("${mqtt.rootEndpointUrl}")
private String rootEndpointUrl;
#Value("${mqtt.inboundClientId}")
private String inboundClientId;
#Value("${mqtt.outboundClientId}")
private String outboundClientId;
#Value("${mqtt.caFilename:#{null}}")
private String caFilename;
#Value("${mqtt.inboundPrivateKeyFilename:#{null}}")
private String inboundPrivateKeyFilename;
#Value("${mqtt.inboundRootPrivateKeyFilename:#{null}}")
private String inboundRootPrivateKeyFilename;
#Value("${mqtt.inboundClientCertFilename:#{null}}")
private String inboundClientCertFilename;
#Value("${mqtt.inboundRootClientCertFilename:#{null}}")
private String inboundRootClientCertFilename;
#Value("${mqtt.outboundPrivateKeyFilename:#{null}}")
private String outboundPrivateKeyFilename;
#Value("${mqtt.outboundRootPrivateKeyFilename:#{null}}")
private String outboundRootPrivateKeyFilename;
#Value("${mqtt.outboundClientCertFilename:#{null}}")
private String outboundClientCertFilename;
#Value("${mqtt.outboundRootClientCertFilename:#{null}}")
private String outboundRootClientCertFilename;
#Bean(name = "inboundMqttClient")
public Mqtt3ReactorClient inboundMqttClient() {
var client = Mqtt3ReactorClient.from(buildMqtt3Client(endpointUrl, UUID.randomUUID().toString(), caFilename, inboundPrivateKeyFilename, inboundClientCertFilename));
connectClient(client);
return client;
}
#Bean(name = "inboundRootMqttClient")
public Mqtt3ReactorClient inboundRootMqttClient() {
var client = Mqtt3ReactorClient.from(buildMqtt3Client(rootEndpointUrl, UUID.randomUUID().toString(), caFilename, inboundRootPrivateKeyFilename, inboundRootClientCertFilename));
connectClient(client);
return client;
}
#Bean(name = "outboundMqttClient")
public Mqtt3ReactorClient outboundMqttClient() {
var client = Mqtt3ReactorClient.from(buildMqtt3Client(endpointUrl, UUID.randomUUID().toString(), caFilename, outboundPrivateKeyFilename, outboundClientCertFilename));
connectClient(client);
return client;
}
#Bean(name = "outboundRootMqttClient")
public Mqtt3ReactorClient outboundRootMqttClient() {
var client = Mqtt3ReactorClient.from(buildMqtt3Client(rootEndpointUrl, UUID.randomUUID().toString(), caFilename, outboundRootPrivateKeyFilename, outboundRootClientCertFilename));
connectClient(client);
return client;
}
private Mqtt3Client buildMqtt3Client(String endpointUrl, String clientId, String caFilename, String privateKeyFilename, String clientCertFilename) {
log.info("Creating mqtt3 client with client id: {}", clientId);
// endpoint is in the form 'protocol://host:port'
String[] endpointUrlComponents = endpointUrl.split(":");
String host = endpointUrlComponents[1].substring(2);
int port = Integer.parseInt(endpointUrlComponents[2]);
Mqtt3ClientBuilder mqtt3ClientBuilder = Mqtt3Client.builder()
.identifier(clientId)
.serverHost(host)
.serverPort(port)
.automaticReconnectWithDefaultConfig();
try {
if (caFilename != null && !caFilename.isEmpty()) {
boolean isUsingKeyBasedAuthentication = privateKeyFilename != null && !privateKeyFilename.isEmpty() && clientCertFilename != null && !clientCertFilename.isEmpty();
PemFileSslContext context
= isUsingKeyBasedAuthentication
? new PemFileSslContext(getStreamFromClassPathOrLocal(caFilename), getStreamFromClassPathOrLocal(privateKeyFilename), getStreamFromClassPathOrLocal(clientCertFilename))
: new PemFileSslContext(new ClassPathResource(caFilename).getInputStream());
context.getSocketFactory();
mqtt3ClientBuilder
.sslConfig()
.keyManagerFactory(context.getKeyManagerFactory())
.trustManagerFactory(context.getTrustManagerFactory())
.applySslConfig();
}
} catch (IOException | NoSuchAlgorithmException | KeyStoreException | CertificateException |
InvalidKeySpecException | UnrecoverableKeyException | PemFileSslContext.SocketFactoryCreationFailedException e) {
throw new RuntimeException(e);
}
return mqtt3ClientBuilder.build();
}
private InputStream getStreamFromClassPathOrLocal(String uri) throws IOException {
return new ClassPathResource(uri).getInputStream();
}
private void connectClient(Mqtt3ReactorClient mqtt3ReactorClient) {
Mono<Mqtt3ConnAck> connAckSingle = mqtt3ReactorClient.connect();
connAckSingle
.doOnSuccess(connAck -> log.info("Connected, " + connAck.getReturnCode()))
.doOnError(throwable -> log.info("Connection failed, " + throwable.getMessage()))
.subscribe();
}
}
config:
management:
endpoint:
health:
group:
liveness:
include: livenessstate,kafkaConsumers
spring:
cloud:
stream:
kafka:
bindings:
kafka-in-0:
consumer:
configuration:
max.poll.records: 10
kafkaRoot-in-0:
consumer:
configuration:
max.poll.records: 10
function:
definition: kafka;kafkaRoot
bindings:
kafka-in-0:
destination: output
group: mqtt
consumer:
concurrency: 1
kafkaRoot-in-0:
destination: output
group: mqtt-root
consumer:
concurrency: 1
... (certs/endpoints omitted)

Related

Spring framework integration TCP IP - Client application SSL not working and posting incomplete requests

I am new to Spring framework. We have a requirement where our application is acting as a client and needs to integrate with another application using TCP. We will be sending them fixed length requests and we will receive response for the same. We have been asked to use the same TCP connection for each request. Using the same open connection, our application will also be receiving heartbeat messages from server application and we do not need to send any response for them.
The request messages that we need to send is header + body where header has message type and length details.
We will be using SSL. When we try to test with SSL, it does not show any exception during getConnection but is not able to receive any heartbeat messages.
When we test without SSL, it is able to send requests and receive response as well as heartbeat messages. But after the first request response, it sends partial request text to server application for subsequent messages which is causing issues and connections are being reset by peer due to unexpected message received at their end.
I have tried many things referring to online documents available but not able to successfully implement the requirement.
Please find below code. Thanks in advance.
public class ClientConfig implements ApplicationEventPublisherAware{
protected String port;
protected String host;
protected String connectionTimeout;
protected String keyStorePath;
protected String trustStorePath;
protected String keyStorePassword;
protected String trustStorePassword;
protected String protocol;
private ApplicationEventPublisher applicationEventPublisher;
#Override
public void setApplicationEventPublisher(ApplicationEventPublisher applicationEventPublisher) {
this.applicationEventPublisher = applicationEventPublisher;
}
#Bean
public DefaultTcpNioSSLConnectionSupport connectionSupport() {
if("SSL".equalsIgnoreCase(getProtocol())) {
DefaultTcpSSLContextSupport sslContextSupport =
new DefaultTcpSSLContextSupport(getKeyStorePath(),
getTrustStorePath(), getKeyStorePassword(), getTrustStorePassword());
sslContextSupport.setProtocol(getProtocol());
DefaultTcpNioSSLConnectionSupport tcpNioConnectionSupport =
new DefaultTcpNioSSLConnectionSupport(sslContextSupport);
return tcpNioConnectionSupport;
}
return null;
}
#Bean
public AbstractClientConnectionFactory clientConnectionFactory() {
if(StringUtils.isNullOrEmptyTrim(getHost()) || StringUtils.isNullOrEmptyTrim(getPort())) {
return null;
}
TcpNioClientConnectionFactory tcpNioClientConnectionFactory =
new TcpNioClientConnectionFactory(getHost(), Integer.valueOf(getPort()));
tcpNioClientConnectionFactory.setApplicationEventPublisher(applicationEventPublisher);
tcpNioClientConnectionFactory.setSoKeepAlive(true);
tcpNioClientConnectionFactory.setDeserializer(new CustomSerializerDeserializer());
tcpNioClientConnectionFactory.setSerializer(new CustomSerializerDeserializer());
tcpNioClientConnectionFactory.setLeaveOpen(true);
tcpNioClientConnectionFactory.setSingleUse(false);
if("SSL".equalsIgnoreCase(getProtocol())) {
tcpNioClientConnectionFactory.setSslHandshakeTimeout(60);
tcpNioClientConnectionFactory.setTcpNioConnectionSupport(connectionSupport());
}
return tcpNioClientConnectionFactory;
}
#Bean
public MessageChannel outboundChannel() {
return new DirectChannel();
}
#Bean
public PollableChannel receiverChannel() {
return new QueueChannel();
}
#Bean
#ServiceActivator(inputChannel = "outboundChannel")
public TcpSendingMessageHandler outboundClient
(AbstractClientConnectionFactory clientConnectionFactory) {
TcpSendingMessageHandler outbound = new TcpSendingMessageHandler();
outbound.setConnectionFactory(clientConnectionFactory);
if(!StringUtils.isNullOrEmpty(getConnectionTimeout())) {
long timeout = Long.valueOf(getConnectionTimeout());
outbound.setRetryInterval(TimeUnit.SECONDS.toMillis(timeout));
}
outbound.setClientMode(true);
return outbound;
}
#Bean
public TcpReceivingChannelAdapter inboundClient(TcpNioClientConnectionFactory connectionFactory) {
TcpReceivingChannelAdapter inbound = new TcpReceivingChannelAdapter();
inbound.setConnectionFactory(connectionFactory);
if(!StringUtils.isNullOrEmpty(getConnectionTimeout())) {
long timeout = Long.valueOf(getConnectionTimeout());
inbound.setRetryInterval(TimeUnit.SECONDS.toMillis(timeout));
}
inbound.setOutputChannel(receiverChannel());
inbound.setClientMode(true);
return inbound;
}
}
public class CustomSerializerDeserializer implements Serializer<String>, Deserializer<String> {
#Override
public String deserialize(InputStream inputStream) throws IOException {
int i = 0;
byte[] lenbuf = new byte[8];
String message = null;
while ((i = inputStream.read(lenbuf)) != -1) {
String messageType = new String(lenbuf);
if(messageType.contains(APP_DATA_LEN)){
byte byteResp[] = new byte[RESP_MSG_LEN-8];
inputStream.read(byteResp, 0, RESP_MSG_LEN-8);
String readMsg = new String(byteResp);
message = messageType + readMsg;
}else {
byte byteResp[] = new byte[HANDSHAKE_LEN-8];
inputStream.read(byteResp, 0, HANDSHAKE_LEN-8);
String readMsg = new String(byteResp);
message = messageType + readMsg;
}
}
return message;
}
#Override
public void serialize(String object, OutputStream outputStream) throws IOException {
outputStream.write(object.getBytes());
outputStream.flush();
}
}
#Override
public String sendMessage(String message) {
Message<String> request = MessageBuilder.withPayload(message).build();
DirectChannel outboundChannel = (DirectChannel) applicationContext.getBean(DirectChannel.class);
outboundChannel.send(request);
}
//Below code is being used to open connection
TcpNioClientConnectionFactory cf = (TcpNioClientConnectionFactory) applicationContext.getBean(AbstractClientConnectionFactory.class);
if(cf != null) {
TcpNioConnection conn = (TcpNioConnection) cf.getConnection();
}

How to read the Header values in the Batch listener error handling scenario

I am trying to handle the exception at the listener
#KafkaListener(id = PropertiesUtil.ID,
topics = "#{'${kafka.consumer.topic}'}",
groupId = "${kafka.consumer.group.id.config}",
containerFactory = "containerFactory",
errorHandler = "errorHandler")
public void receiveEvents(#Payload List<ConsumerRecord<String, String>> recordList,
Acknowledgment acknowledgment) {
try {
log.info("Consuming the batch of size {} from kafka topic {}", consumerRecordList.size(),
consumerRecordList.get(0).topic());
processEvent(consumerRecordList);
incrementOffset(acknowledgment);
} catch (Exception exception) {
throwOrHandleExceptions(exception, recordList, acknowledgment);
.........
}
}
The Kafka container config:
#Bean
public KafkaListenerContainerFactory<ConcurrentMessageListenerContainer<String, String>>
containerFactory() {
ConcurrentKafkaListenerContainerFactory<String, String> factory =
new ConcurrentKafkaListenerContainerFactory<>();
factory.setConcurrency(this.numberOfConsumers);
factory.getContainerProperties().setAckOnError(false);
factory.getContainerProperties().setAckMode(ContainerProperties.AckMode.MANUAL);
factory.setConsumerFactory(getConsumerFactory());
factory.setBatchListener(true);
return factory;
}
}
the listener error handler impl
#Bean
public ConsumerAwareListenerErrorHandler errorHandler() {
return (m, e, c) -> {
MessageHeaders headers = m.getHeaders();
List<String> topics = headers.get(KafkaHeaders.RECEIVED_TOPIC, List.class);
List<Integer> partitions = headers.get(KafkaHeaders.RECEIVED_PARTITION_ID, List.class);
List<Long> offsets = headers.get(KafkaHeaders.OFFSET, List.class);
Map<TopicPartition, Long> offsetsToReset = new HashMap<>();
for (int i = 0; i < topics.size(); i++) {
int index = i;
offsetsToReset.compute(new TopicPartition(topics.get(i), partitions.get(i)),
(k, v) -> v == null ? offsets.get(index) : Math.min(v, offsets.get(index)));
}
...
};
}
when i try to run the same without the batching processing then i am able to fetch the partition,topic and offset values but when i enable batch processing and try to test it then i am getting only two values inside the headers i.e id and timestamp and other values are not set. Am i missing anything here??
What version are you using? I just tested it with Boot 2.2.4 (SK 2.3.5) and it works fine...
#SpringBootApplication
public class So60152179Application {
public static void main(String[] args) {
SpringApplication.run(So60152179Application.class, args);
}
#KafkaListener(id = "so60152179", topics = "so60152179", errorHandler = "eh")
public void listen(List<String> in) {
throw new RuntimeException("foo");
}
#Bean
public ConsumerAwareListenerErrorHandler eh() {
return (m, e, c) -> {
System.out.println(m);
return null;
};
}
#Bean
public ApplicationRunner runner(KafkaTemplate<String, String> template) {
return args -> {
template.send("so60152179", "foo");
};
}
#Bean
public NewTopic topic() {
return TopicBuilder.name("so60152179").partitions(1).replicas(1).build();
}
}
spring.kafka.listener.type=batch
spring.kafka.consumer.auto-offset-reset=earliest
GenericMessage [payload=[foo], headers={kafka_offset=[0], kafka_nativeHeaders=[RecordHeaders(headers = [], isReadOnly = false)], kafka_consumer=org.apache.kafka.clients.consumer.KafkaConsumer#2f2e787f, kafka_timestampType=[CREATE_TIME], kafka_receivedMessageKey=[null], kafka_receivedPartitionId=[0], kafka_receivedTopic=[so60152179], kafka_receivedTimestamp=[1581351585253], kafka_groupId=so60152179}]

How to evaluate consuming time in kafka stream application

I have 1.0.0 kafka stream application with two classes as below 'class FilterByPolicyStreamsApp' and 'class FilterByPolicyTransformerSupplier'. In my application, I read the events, perform some conditional checks and forward to same kafka in another topic. I able to get the producing time with 'eventsForwardTimeInMs' variable in FilterByPolicyTransformerSupplier class. But I unable to get the consuming time (with and without (de)serialization). How will I get this time? Please help me.
FilterByPolicyStreamsApp .java:
public class FilterByPolicyStreamsApp implements CommandLineRunner {
String policyKafkaTopicName="policy";
String policyFilterDataKafkaTopicName = "policy.filter.data";
String bootstrapServers="11.1.1.1:9092";
String sampleEventsKafkaTopicName = 'sample-.*";
String applicationId="filter-by-policy-app";
String policyFilteredEventsKafkaTopicName = "policy.filter.events";
public static void main(String[] args) {
SpringApplication.run(FilterByPolicyStreamsApp.class, args);
}
#Override
public void run(String... arg0) {
String policyGlobalTableName = policyKafkaTopicName + ".table";
String policyFilterDataGlobalTable = policyFilterDataKafkaTopicName + ".table";
Properties config = new Properties();
config.put(StreamsConfig.APPLICATION_ID_CONFIG, applicationId);
config.put(StreamsConfig.BOOTSTRAP_SERVERS_CONFIG, bootstrapServers);
config.put(StreamsConfig.DEFAULT_TIMESTAMP_EXTRACTOR_CLASS_CONFIG, WallclockTimestampExtractor.class);
KStreamBuilder builder = new KStreamBuilder();
builder.globalTable(Serdes.String(), new JsonSerde<>(List.class), policyKafkaTopicName,
policyGlobalTableName);
builder.globalTable(Serdes.String(), new JsonSerde<>(PolicyFilterData.class), policyFilterDataKafkaTopicName,
policyFilterDataGlobalTable);
KStream<String, SampleEvent> events = builder.stream(Serdes.String(),
new JsonSerde<>(SampleEvent.class), Pattern.compile(sampleEventsKafkaTopicName));
events = events.transform(new FilterByPolicyTransformerSupplier(policyGlobalTableName,
policyFilterDataGlobalTable));
events.to(Serdes.String(), new JsonSerde<>(SampleEvent.class), policyFilteredEventsKafkaTopicName);
KafkaStreams streams = new KafkaStreams(builder, config);
streams.start();
streams.setUncaughtExceptionHandler(new Thread.UncaughtExceptionHandler() {
#Override
public void uncaughtException(Thread t, Throwable e) {
logger.error(e.getMessage(), e);
}
});
}
}
FilterByPolicyTransformerSupplier.java:
public class FilterByPolicyTransformerSupplier
implements TransformerSupplier<String, SampleEvent, KeyValue<String, SampleEvent>> {
private String policyGlobalTableName;
private String policyFilterDataGlobalTable;
public FilterByPolicyTransformerSupplier(String policyGlobalTableName,
String policyFilterDataGlobalTable) {
this.policyGlobalTableName = policyGlobalTableName;
this.policyFilterDataGlobalTable = policyFilterDataGlobalTable;
}
#Override
public Transformer<String, SampleEvent, KeyValue<String, SampleEvent>> get() {
return new Transformer<String, SampleEvent, KeyValue<String, SampleEvent>>() {
private KeyValueStore<String, List<String>> policyStore;
private KeyValueStore<String, PolicyFilterData> policyMetadataStore;
private ProcessorContext context;
#Override
public void close() {
}
#Override
public void init(ProcessorContext context) {
this.context = context;
// Call punctuate every 1 second
this.context.schedule(1000);
policyStore = (KeyValueStore<String, List<String>>) this.context
.getStateStore(policyGlobalTableName);
policyMetadataStore = (KeyValueStore<String, PolicyFilterData>) this.context
.getStateStore(policyFilterDataGlobalTable);
}
#Override
public KeyValue<String, SampleEvent> punctuate(long arg0) {
return null;
}
#Override
public KeyValue<String, SampleEvent> transform(String key, SampleEvent event) {
long eventsForwardTimeInMs = 0;
long forwardedEventCouunt = 0;
List<String> policyIds = policyStore.get(event.getCustomerCode().toLowerCase());
if (policyIds != null) {
for (String policyId : policyIds) {
/*
PolicyFilterData policyFilterMetadata = policyMetadataStore.get(policyId);
Do some condition checks on the event. If it satisfies then will forward them.
if(policyFilterMetadata == null){
continue;
}
*/
// Using context forward as event can map to multiple policies
long startForwardTime = System.currentTimeMillis();
context.forward(policyId, event);
forwardedEventCouunt++;
eventsForwardTimeInMs += System.currentTimeMillis() - startForwardTime;
}
}
return null;
}
};
}
}

How get original message after get an errorHandler and write a file

I've been building a Spring integration Service Email using Java DSL.
This service must have a recovery policy in order to retry sending the emails but I'm not getting success.
A brief story: The application recieve a Payload and Header and try to send to email server. It tries 3 times and in case of failure, it creates a new file with Header and Body of message.
How could I get the original Message(Header and Payload) and put the information pair in a json file, in case of failure to send the email?
Thanks.
This is my beans and the service:
/**
* #################
* MESSAGE ENDPOINTS
* #################
*/
#Bean(name = PollerMetadata.DEFAULT_POLLER)
public PollerMetadata poller() {
return Pollers
.fixedRate(NumberUtils.createLong(QUEUE_RATE))
.maxMessagesPerPoll(NumberUtils.createLong(QUEUE_CAPACITY))
.errorHandler(e -> LOG.error("Exception : " + e.getMessage()))
.get();
}
#Bean
public MessageChannel recoveryChannel() {
return MessageChannels.direct().get();
}
#MessagingGateway
public static interface MailService {
#Gateway(requestChannel = "mail.input")
void sendMail(String body, #Headers Map<String,String> headers);
}
#Bean
public RetryPolicy retryPolicy() {
final Map<Class<? extends Throwable>, Boolean> map =
new HashMap<Class<? extends Throwable>, Boolean>() {
{
put(MailSendException.class,true);
put(RuntimeException.class, true);
}
private static final long serialVersionUID = -1L;
};
final RetryPolicy ret = new SimpleRetryPolicy(3, map, true);
return ret;
}
#Bean
public RetryTemplate retryTemplate() {
final RetryTemplate ret = new RetryTemplate();
ret.setRetryPolicy(retryPolicy());
ret.setThrowLastExceptionOnExhausted(false);
return ret;
}
#Bean
public Advice retryAdvice() {
final RequestHandlerRetryAdvice advice = new RequestHandlerRetryAdvice();
advice.setRetryTemplate(retryTemplate());
RecoveryCallback<Object> recoveryCallBack = new ErrorMessageSendingRecoverer(recoveryChannel());
advice.setRecoveryCallback(recoveryCallBack);
return advice;
}
private MailSendingMessageHandlerSpec mailOutboundAdapter(){
MailSendingMessageHandlerSpec msmhs =
Mail.outboundAdapter(emailServerHost())
.port(serverPort())
.credentials(MAIL_USER_NAME, MAIL_PASSWORD)
.protocol(emailProtocol())
.javaMailProperties(p -> p
.put("mail.debug", "true")
.put("mail.smtp.ssl.enable",enableSSL())
.put("mail.smtp.connectiontimeout", 5000)
.put("mail.smtp.timeout", 5000));
return msmhs;
}
#Bean
public FileWritingMessageHandler fileOutboundAdapter(){
FileWritingMessageHandler fwmhs = Files
.outboundAdapter(new File("logs/errors/"))
.autoCreateDirectory(true)
.get();
return fwmhs;
}
/**
* ################
* FLOWS
* ################
*/
#Bean
public IntegrationFlow smtp(){
return IntegrationFlows.from("mail.input")
.channel(MessageChannels.queue())
.handle(this.mailOutboundAdapter(),
e -> e.id("smtpOut")
.advice(retryAdvice())
)
.get();
}
#Bean
public IntegrationFlow errorFlow(){
return IntegrationFlows.from(recoveryChannel())
.transform(Transformers.toJson())
.enrichHeaders(c -> c.header(FileHeaders.FILENAME, "emailErrors"))
.handle(this.fileOutboundAdapter())
.get();
}
}
The error message has a payload MessagingException. It has two properties cause and failedMessage.
The failed message is the message at the point of failure, with headers and payload.

netty SimpleChannelInboundHandler<String> channelRead0 only occasionally invoked

I know that there are several similar questions that have either been answered or still outstanding, however, for the life of me...
Later Edit 2016-08-25 10:05 CST - Actually, I asked the wrong question.
The question is the following: given that I have both a netty server (taken from DiscardServer example) and a netty client - (see above) what must I do to force the DiscardServer to immediately send the client a request?
I have added an OutboundHandler to the server and to the client.
After looking at both the DiscardServer and PingPongServer examples, there is an external event occurring to kick off all the action. In the case of Discard server, it is originally waiting for a telnet connection, then will transmit whatever was in the telnet msg to the client.
In the case of PingPongServer, the SERVER is waiting on the client to initiate action.
What I want is for the Server to immediately start transmitting after connection with the client. None of the examples from netty seem to do this.
If I have missed something, and someone can point it out, much good karma.
My client:
public final class P4Listener {
static final Logger LOG;
static final String HOST;
static final int PORT;
static final Boolean SSL = Boolean.FALSE;
public static Dto DTO;
static {
LOG = LoggerFactory.getLogger(P4Listener.class);
HOST = P4ListenerProperties.getP4ServerAddress();
PORT = Integer.valueOf(P4ListenerProperties.getListenerPort());
DTO = new Dto();
}
public static String getId() { return DTO.getId(); }
public static void main(String[] args) throws Exception {
final SslContext sslCtx;
if (SSL) {
LOG.info("{} creating SslContext", getId());
sslCtx = SslContextBuilder.forClient().trustManager(InsecureTrustManagerFactory.INSTANCE).build();
} else {
sslCtx = null;
}
EventLoopGroup group = new NioEventLoopGroup();
try {
Bootstrap b = new Bootstrap();
b.group(group).channel(NioSocketChannel.class)
.handler(new LoggingHandler(LogLevel.INFO))
.handler(new P4ListenerInitializer(sslCtx));
// Start the connection attempt.
LOG.debug(" {} starting connection attempt...", getId());
Channel ch = b.connect(HOST, PORT).sync().channel();
// ChannelFuture localWriteFuture = ch.writeAndFlush("ready\n");
// localWriteFuture.sync();
} finally {
group.shutdownGracefully();
}
}
}
public class P4ListenerHandler extends SimpleChannelInboundHandler<String> {
static final Logger LOG = LoggerFactory.getLogger(P4ListenerHandler.class);
static final DateTimeFormatter DTFormatter = DateTimeFormatter.ofPattern("yyyyMMdd-HHMMss.SSS");
static final String EndSOT;
static final String StartSOT;
static final String EOL = "\n";
static final ClassPathXmlApplicationContext AppContext;
static {
EndSOT = P4ListenerProperties.getEndSOT();
StartSOT = P4ListenerProperties.getStartSOT();
AppContext = new ClassPathXmlApplicationContext(new String[] { "applicationContext.xml" });
}
private final RequestValidator rv = new RequestValidator();
private JAXBContext jaxbContext = null;
private Unmarshaller jaxbUnmarshaller = null;
private boolean initialized = false;
private Dto dto;
public P4ListenerHandler() {
dto = new Dto();
}
public Dto getDto() { return dto; }
public String getId() { return getDto().getId(); }
Message convertXmlToMessage(String xml) {
if (xml == null)
throw new IllegalArgumentException("xml message is null!");
try {
jaxbContext = JAXBContext.newInstance(p4.model.xml.request.Message.class, p4.model.xml.request.Header.class,
p4.model.xml.request.Claims.class, p4.model.xml.request.Insurance.class,
p4.model.xml.request.Body.class, p4.model.xml.request.Prescriber.class,
p4.model.xml.request.PriorAuthorization.class,
p4.model.xml.request.PriorAuthorizationSupportingDocumentation.class);
jaxbUnmarshaller = jaxbContext.createUnmarshaller();
StringReader strReader = new StringReader(xml);
Message m = (Message) jaxbUnmarshaller.unmarshal(strReader);
return m;
} catch (JAXBException jaxbe) {
String error = StacktraceUtil.getCustomStackTrace(jaxbe);
LOG.error(error);
throw new P4XMLUnmarshalException("Problems when attempting to unmarshal transmission string: \n" + xml,
jaxbe);
}
}
#Override
public void channelActive(ChannelHandlerContext ctx) {
LOG.debug("{} let server know we are ready", getId());
ctx.writeAndFlush("Ready...\n");
}
/**
* Important - this method will be renamed to
* <code><b>messageReceived(ChannelHandlerContext, I)</b></code> in netty 5.0
*
* #param ctx
* #param msg
*/
#Override
protected void channelRead0(ChannelHandlerContext ctx, String msg) throws Exception {
ChannelFuture lastWriteFuture = null;
LOG.debug("{} -- received message: {}", getId(), msg);
Channel channel = ctx.channel();
Message m = null;
try {
if (msg instanceof String && msg.length() > 0) {
m = convertXmlToMessage(msg);
m.setMessageStr(msg);
dto.setRequestMsg(m);
LOG.info("{}: received TIMESTAMP: {}", dto.getId(), LocalDateTime.now().format(DTFormatter));
LOG.debug("{}: received from server: {}", dto.getId(), msg);
/*
* theoretically we have a complete P4(XML) request
*/
final List<RequestFieldError> errorList = rv.validateMessage(m);
if (!errorList.isEmpty()) {
for (RequestFieldError fe : errorList) {
lastWriteFuture = channel.writeAndFlush(fe.toString().concat(EOL));
}
}
/*
* Create DBHandler with message, messageStr, clientIp to get
* dbResponse
*/
InetSocketAddress socketAddress = (InetSocketAddress) channel.remoteAddress();
InetAddress inetaddress = socketAddress.getAddress();
String clientIp = inetaddress.getHostAddress();
/*
* I know - bad form to ask the ApplicationContext for the
* bean... BUT ...lack of time turns angels into demons
*/
final P4DbRequestHandler dbHandler = (P4DbRequestHandler) AppContext.getBean("dbRequestHandler");
// must set the requestDTO for the dbHandler!
dbHandler.setClientIp(clientIp);
dbHandler.setRequestDTO(dto);
//
// build database request and receive response (string)
String dbResponse = dbHandler.submitDbRequest();
/*
* create ResponseHandler and get back response string
*/
P4ResponseHandler responseHandler = new P4ResponseHandler(dto, dbHandler);
String responseStr = responseHandler.decodeDbServiceResponse(dbResponse);
/*
* write response string to output and repeat exercise
*/
LOG.debug("{} -- response to be written back to server:\n {}", dto.getId(), responseStr);
lastWriteFuture = channel.writeAndFlush(responseStr.concat(EOL));
//
LOG.info("{}: response sent TIMESTAMP: {}", dto.getId(), LocalDateTime.now().format(DTFormatter));
} else {
throw new P4EventException(dto.getId() + " -- Message received is not a String");
}
processWriteFutures(lastWriteFuture);
} catch (Throwable t) {
String tError = StacktraceUtil.getCustomStackTrace(t);
LOG.error(tError);
} finally {
if (lastWriteFuture != null) {
lastWriteFuture.sync();
}
}
}
private void processWriteFutures(ChannelFuture writeFuture) throws InterruptedException {
// Wait until all messages are flushed before closing the channel.
if (writeFuture != null) {
writeFuture.sync();
}
}
#Override
public void exceptionCaught(ChannelHandlerContext ctx, Throwable cause) {
cause.printStackTrace();
ctx.close();
}
}
/**
* Creates a newly configured {#link ChannelPipeline} for a new channel.
*/
public class P4ListenerInitializer extends ChannelInitializer<SocketChannel> {
private static final StringDecoder DECODER = new StringDecoder();
private static final StringEncoder ENCODER = new StringEncoder();
private final SslContext sslCtx;
public P4ListenerInitializer(SslContext sslCtx) {
this.sslCtx = sslCtx;
}
#Override
public void initChannel(SocketChannel ch) {
P4ListenerHandler lh = null;
ChannelPipeline pipeline = ch.pipeline();
if (sslCtx != null) {
P4Listener.LOG.info("{} -- constructing SslContext new handler ", P4Listener.getId());
pipeline.addLast(sslCtx.newHandler(ch.alloc(), P4Listener.HOST, P4Listener.PORT));
} else {
P4Listener.LOG.info("{} -- SslContext null; bypassing adding sslCtx.newHandler(ch.alloc(), P4Listener.HOST, P4Listener.PORT) ", P4Listener.getId());
}
// Add the text line codec combination first,
pipeline.addLast(new DelimiterBasedFrameDecoder(8192, Delimiters.lineDelimiter()));
pipeline.addLast(DECODER);
P4Listener.LOG.debug("{} -- added Decoder ", P4Listener.getId());
pipeline.addLast(ENCODER);
P4Listener.LOG.debug("{} -- added Encoder ", P4Listener.getId());
// and then business logic.
pipeline.addLast(lh = new P4ListenerHandler());
P4Listener.LOG.debug("{} -- added P4ListenerHandler: {} ", P4Listener.getId(), lh.getClass().getSimpleName());
}
}
#Sharable
public class P4ListenerOutboundHandler extends ChannelOutboundHandlerAdapter {
static final Logger LOG = LoggerFactory.getLogger(P4ListenerOutboundHandler.class);
private Dto outBoundDTO = new Dto();
public String getId() {return this.outBoundDTO.getId(); }
#Override
public void write(ChannelHandlerContext ctx, Object msg, ChannelPromise promise) {
try {
ChannelFuture lastWrite = ctx.write(Unpooled.copiedBuffer((String) msg, CharsetUtil.UTF_8));
try {
if (lastWrite != null) {
lastWrite.sync();
promise.setSuccess();
}
} catch (InterruptedException e) {
promise.setFailure(e);
e.printStackTrace();
}
} finally {
ReferenceCountUtil.release(msg);
}
}
}
output from client
Just override channelActive(...) on the handler of the server and trigger a write there.