RTI DDS warning crashing program - publish-subscribe

I am running a program that uses RTI DDS and keep encounter the following warning message along with constant program crashes:
No transport available to reach locator shmem://0000:0202:0402:0000:0000:0000:0000:0000:7411
What could be causing the above issue and what is a possible solution to the issue? I am running RTI DDS 5.2.0 on RHEL 6.8.
EDIT:
Here is also the QOS file
?xml version="1.0"?>
<dds>
<qos_library name="Keep_History_Library">
<qos_profile name="Keep_Deep_History_profile" is_default_qos="true">
<participant_qos>
<discovery>
<initial_peers>
<element>239.255.0.1</element>
<element>4#builtin.udpv4://127.0.0.1</element>
<element>builtin.shmem://</element>
</initial_peers>
<multicast_receive_addresses>
<element>239.255.0.1</element>
</multicast_receive_addresses>
</discovery>
<property>
<value>
<!--UDP/IP Transport configuration -->
<element>
<name>dds.transport.UDPv4.builtin.parent.message_size_max</name>
<value>65536</value>
</element>
<element>
<name>dds.transport.UDPv4.builtin.send_socket_buffer_size</name>
<value>1000000</value>
</element>
<element>
<name>dds.transport.UDPv4.builtin.recv_socket_buffer_size</name>
<value>2000000</value>
</element>
<!-- Definition of the flow controller See users' manual section
6.6 FlowControllers -->
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.scheduling_policy</name>
<value>DDS_RR_FLOW_CONTROLLER_SCHED_POLICY</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.period.sec</name>
<value>0</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.period.nanosec</name>
<value>10000000</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.max_tokens</name>
<value>100</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.tokens_added_per_period</name>
<value>40</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.tokens_leaked_per_period</name>
<value>0</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.bytes_per_token</name>
<value>66000</value>
</element>
</value>
</property>
</participant_qos>
<participant_factory_qos>
<logging>
<verbosity>ALL</verbosity>
<category>ALL</category>
<print_format>TIMESTAMPED</print_format>
<output_file>/root/Desktop/ddslog.txt</output_file>
</logging>
</participant_factory_qos>
<datawriter_qos name="ReliableWriter">
<publish_mode>
<kind>ASYNCHRONOUS_PUBLISH_MODE_QOS</kind>
<flow_controller_name>DEFAULT_FLOW_CONTROLLER_NAME</flow_controller_name>
</publish_mode>
<durability>
<kind>TRANSIENT_LOCAL_DURABILITY_QOS</kind>
</durability>
<history>
<kind>KEEP_LAST_HISTORY_QOS</kind>
<depth>10</depth>
</history>
<reliability>
<kind>RELIABLE_RELIABILITY_QOS</kind>
</reliability>
<publication_name>
<name>DataWriter</name>
</publication_name>
</datawriter_qos>
<datareader_qos name="ReliableReader">
<history>
<kind>KEEP_LAST_HISTORY_QOS</kind>
<depth>10</depth>
</history>
<reliability>
<kind>RELIABLE_RELIABILITY_QOS</kind>
</reliability>
<durability>
<kind>TRANSIENT_LOCAL_DURABILITY_QOS</kind>
</durability>
<subscription_name>
<name>DataReader</name>
</subscription_name>
</datareader_qos>
</qos_profile>
<qos_profile name="Keep_Short_History_profile" base_name="Keep_Deep_History_profile">
<participant_qos>
<discovery>
<initial_peers>
<element>239.255.0.1</element>
<element>4#builtin.udpv4://127.0.0.1</element>
<element>builtin.shmem://</element>
</initial_peers>
<multicast_receive_addresses>
<element>239.255.0.1</element>
</multicast_receive_addresses>
</discovery>
<property>
<value>
<!--UDP/IP Transport configuration -->
<element>
<name>dds.transport.UDPv4.builtin.parent.message_size_max</name>
<value>65536</value>
</element>
<element>
<name>dds.transport.UDPv4.builtin.send_socket_buffer_size</name>
<value>1000000</value>
</element>
<element>
<name>dds.transport.UDPv4.builtin.recv_socket_buffer_size</name>
<value>2000000</value>
</element>
<!-- Definition of the flow controller See users' manual section
6.6 FlowControllers -->
<element>
<name>
dds.flow_controller.token_bucket.MyFlowController.scheduling_policy
</name>
<value>DDS_RR_FLOW_CONTROLLER_SCHED_POLICY</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.period.sec</name>
<value>0</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.period.nanosec</name>
<value>10000000</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.max_tokens</name>
<value>100</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.tokens_added_per_period</name>
<value>40</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.tokens_leaked_per_period</name>
<value>0</value>
</element>
<element>
<name>dds.flow_controller.token_bucket.MyFlowController.token_bucket.bytes_per_token</name>
<value>66000</value>
</element>
</value>
</property>
</participant_qos>
<participant_factory_qos>
<logging>
<verbosity>ALL</verbosity>
<category>ALL</category>
<print_format>TIMESTAMPED</print_format>
<output_file>/root/Desktop/ddslog.txt</output_file>
</logging>
</participant_factory_qos>
<datawriter_qos name="ReliableWriter">
<publish_mode>
<kind>ASYNCHRONOUS_PUBLISH_MODE_QOS</kind>
<flow_controller_name>DEFAULT_FLOW_CONTROLLER_NAME</flow_controller_name>
</publish_mode>
<history>
<kind>KEEP_LAST_HISTORY_QOS</kind>
<depth>1</depth>
</history>
<reliability>
<kind>RELIABLE_RELIABILITY_QOS</kind>
</reliability>
<durability>
<kind>TRANSIENT_LOCAL_DURABILITY_QOS</kind>
</durability>
<publication_name>
<name>HistoryDataWriter</name>
</publication_name>
</datawriter_qos>
<datareader_qos name="ReliableReader">
<history>
<kind>KEEP_LAST_HISTORY_QOS</kind>
<depth>1</depth>
</history>
<reliability>
<kind>RELIABLE_RELIABILITY_QOS</kind>
</reliability>
<durability>
<kind>TRANSIENT_LOCAL_DURABILITY_QOS</kind>
</durability>
<subscription_name>
<name>HistoryDataReader</name>
</subscription_name>
</datareader_qos>
</qos_profile>
</qos_library>
</dds>

#jgr208 The "No transport available to reach locator" message is in response to your application receiving a locator during the first phase of discovery that it does not have a transport for. This is a warning only and will not cause your application to crash. In fact it might be normal behavior depending on how your applications are set up with transports. This is described in the following knowledge base article:
https://community.rti.com/kb/what-does-cant-reach-locator-error-message-mean
However, the warning message might look different in 5.2.0.
This warning message will not cause the crash. So we'll need more information to determine why the crash is happening. Can you get a stack trace from a core dump, or run your application through gdb to see the back trace?

Related

Is there any way to disable the rank order in rundeck?

I have nodes abcd1,abcd2,abcd3,abcd4
Input = abcd1,abcd3,abcd4,abcd2
If the rank order ascending - execution follows- abcd1,abcd2,abcd3,abcd4 order
IF the rank order descending - execution follows -abcd4,abcd3,abcd2,abcd1 order
.
I want my executions to follow same order as input i.e, abcd1,abcd3,abcd4,abcd2 . How can this be achieved?
You can use the "Rank Attribute" option on your job definition, for that, just define (or use) some attribute on your node source and Rundeck use it, for example, using this resources.xml (check myorder custom attribute):
<?xml version="1.0" encoding="UTF-8"?>
<project>
<node name="node00" description="Node 00" tags="" myorder="02" hostname="192.168.33.20" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.10.0-1062.4.1.el7.x86_64" username="vagrant" ssh-key-storage-path="keys/rundeck"/>
<node name="node01" description="Node 01" tags="" myorder="03" hostname="192.168.33.21" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.10.0-1062.4.1.el7.x86_64" username="vagrant" ssh-key-storage-path="keys/rundeck"/>
<node name="node02" description="Node 02" tags="" myorder="01" hostname="192.168.33.22" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.10.0-1062.4.1.el7.x86_64" username="vagrant" ssh-key-storage-path="keys/rundeck"/>
</project>
Now set set "Rank Attribute" textbox with myorder attribute, save the job and run again. You can see the order based on that attribute.
More information here.
UPDATE 30/07/2020:
Following the first comment of this answer, exist a way to modify the resource.xml file with a bash script, later update the node definition and later launch the child job to new rankAttribute order using Job Reference Step.
First, the script that changes the Rundeck Model Source (needs xmlstarlet):
#!/bin/bash
path=$RD_OPTION_PATH
myorder=$RD_OPTION_NODE_ORDER
counter=1
Field_Separator=$IFS
# set comma as internal field separator for the string list
IFS=,
for value in $myorder;
do
echo $counter
xmlstarlet ed --inplace -u "/project/node[$counter][#myorder]/#myorder" -v $value $path
counter=$((counter+1))
done
IFS=$Field_Separator
Now, the Parent Job (that calls the script above, refresh the Rundeck Model Source and finally launch the "target" job):
<joblist>
<job>
<context>
<options preserveOrder='true'>
<option name='node_order' value='3,2,1' />
<option name='path' required='true' value='/home/m68k/Rundeck/resources/farm.xml' />
</options>
</context>
<defaultTab>nodes</defaultTab>
<description></description>
<executionEnabled>true</executionEnabled>
<id>860e7189-f399-47c4-ab86-5b47017188dd</id>
<loglevel>INFO</loglevel>
<name>Parent</name>
<nodeFilterEditable>true</nodeFilterEditable>
<plugins />
<scheduleEnabled>true</scheduleEnabled>
<sequence keepgoing='false' strategy='sequential'>
<command>
<description>Just a message</description>
<exec>echo "Starting..."</exec>
</command>
<command>
<description>Call the script</description>
<fileExtension>.sh</fileExtension>
<scriptargs />
<scriptfile>/home/m68k/Downloads/TEST.sh</scriptfile>
<scriptinterpreter>/bin/bash</scriptinterpreter>
</command>
<command>
<description>Refresh Rundeck Model Source</description>
<step-plugin type='source-refresh-plugin'>
<configuration>
<entry key='sleep' value='5' />
</configuration>
</step-plugin>
</command>
<command>
<description>And launch the "target" job against the new node order</description>
<jobref name='Child' nodeStep='true'>
<uuid>0e02890e-c9dd-4a65-ace1-bfe90b9b7a8e</uuid>
</jobref>
</command>
</sequence>
<uuid>860e7189-f399-47c4-ab86-5b47017188dd</uuid>
</job>
</joblist>
And finally, the Child Job, the "target job".
<joblist>
<job>
<defaultTab>nodes</defaultTab>
<description></description>
<dispatch>
<excludePrecedence>true</excludePrecedence>
<keepgoing>false</keepgoing>
<rankAttribute>myorder</rankAttribute>
<rankOrder>ascending</rankOrder>
<successOnEmptyNodeFilter>false</successOnEmptyNodeFilter>
<threadcount>1</threadcount>
</dispatch>
<executionEnabled>true</executionEnabled>
<id>0e02890e-c9dd-4a65-ace1-bfe90b9b7a8e</id>
<loglevel>INFO</loglevel>
<name>Child</name>
<nodeFilterEditable>false</nodeFilterEditable>
<nodefilters>
<filter>node.*</filter>
</nodefilters>
<nodesSelectedByDefault>true</nodesSelectedByDefault>
<plugins />
<scheduleEnabled>true</scheduleEnabled>
<sequence keepgoing='false' strategy='node-first'>
<command>
<exec>echo "Hello ${node.name}"</exec>
</command>
</sequence>
<uuid>0e02890e-c9dd-4a65-ace1-bfe90b9b7a8e</uuid>
</job>
</joblist>
So, if you set 3,1,2 order, you can see the result on myorder attribute on resources.xml file and the child job executes on that order.
<?xml version="1.0"?>
<project>
<node name="node00" description="Node 00" tags="" hostname="192.168.33.20" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.10.0-1062.4.1.el7.x86_64" username="vagrant" ssh-key-storage-path="keys/rundeck" myorder="3"/>
<node name="node01" description="Node 01" tags="" hostname="192.168.33.21" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.10.0-1062.4.1.el7.x86_64" username="vagrant" ssh-key-storage-path="keys/rundeck" myorder="1"/>
<node name="node02" description="Node 02" tags="" hostname="192.168.33.22" osArch="amd64" osFamily="unix" osName="Linux" osVersion="3.10.0-1062.4.1.el7.x86_64" username="vagrant" ssh-key-storage-path="keys/rundeck" myorder="2"/>
</project>

Is there an option to specify multiple user names for remote execution on the same node?

Is there anyway i can access the same node via 2 different users (appuser,webuser) to execute scripts?
Will it be ambiguous if there is another entry made for the same node with different username?
test1:
hostname: "10.35.20.76"
nodename: "test1"
osArch: "x86"
osFamily: "unix"
osName: "Solaris"
osVersion: "11.4"
tags: "TEST"
username: "appuser"
A good option to do that is to use a job option pointing to your resources file, in this way:
Job Definition example:
<joblist>
<job>
<context>
<options preserveOrder='true'>
<option name='opt1' value='vagrant' />
</options>
</context>
<defaultTab>nodes</defaultTab>
<description></description>
<dispatch>
<excludePrecedence>true</excludePrecedence>
<keepgoing>false</keepgoing>
<rankOrder>ascending</rankOrder>
<successOnEmptyNodeFilter>false</successOnEmptyNodeFilter>
<threadcount>1</threadcount>
</dispatch>
<executionEnabled>true</executionEnabled>
<id>eaf3f18e-97c0-4c85-b2fd-9a0b8dd8696e</id>
<loglevel>INFO</loglevel>
<name>HelloWorld</name>
<nodeFilterEditable>false</nodeFilterEditable>
<nodefilters>
<filter>name: node00</filter>
</nodefilters>
<nodesSelectedByDefault>true</nodesSelectedByDefault>
<scheduleEnabled>true</scheduleEnabled>
<sequence keepgoing='false' strategy='node-first'>
<command>
<exec>echo "done"</exec>
</command>
</sequence>
<uuid>eaf3f18e-97c0-4c85-b2fd-9a0b8dd8696e</uuid>
</job>
</joblist>
Resources.xml example (check the "username" attribute):
<?xml version="1.0" encoding="UTF-8"?>
<project>
<node name="mylocalhost" description="Rundeck server node" tags="" hostname="mylocalhost" osArch="amd64" osFamily="unix" osName="Linux" osVersion="5.15.0-66-generic" username="jdoe"/>
<node name="node00" description="Node 00" tags="" hostname="192.168.33.20" osArch="amd64" osFamily="unix" osName="Linux" osVersion="4.10.0-1062.4.1.el7.x86_64" username="${option.opt1}" ssh-key-storage-path="keys/rundeck"/>
</project>
Also, you can do the same using passwords. Take a look at this.

Is there a way to use login/password of login account to reuse in a playbook?

I have created a job with 2 options for the user writes his login/password. This credentials will be used later in a playbook as extra-variables.
But all this credentials are readable in logs !!!
How to workaround that ?
You can use the "Mask Password" Log filter in your inline-playbook or your playbook steps (also, for any Rundeck step), you can check the documentation here. I did a Job Definition example that works:
<joblist>
<job>
<context>
<options preserveOrder='true'>
<option name='opt1' secure='true' value='12345' valueExposed='true' />
</options>
</context>
<defaultTab>nodes</defaultTab>
<description></description>
<dispatch>
<excludePrecedence>true</excludePrecedence>
<keepgoing>false</keepgoing>
<rankOrder>ascending</rankOrder>
<successOnEmptyNodeFilter>false</successOnEmptyNodeFilter>
<threadcount>1</threadcount>
</dispatch>
<executionEnabled>true</executionEnabled>
<id>2c9b3903-a545-4dbd-aeac-578889dbb611</id>
<loglevel>INFO</loglevel>
<name>HelloWorld</name>
<nodeFilterEditable>false</nodeFilterEditable>
<nodefilters>
<filter>name: 192.168.33.20</filter>
</nodefilters>
<nodesSelectedByDefault>true</nodesSelectedByDefault>
<scheduleEnabled>true</scheduleEnabled>
<sequence keepgoing='false' strategy='node-first'>
<command>
<node-step-plugin type='com.batix.rundeck.plugins.AnsiblePlaybookInlineWorkflowNodeStep'>
<configuration>
<entry key='ansible-base-dir-path' value='/home/user/' />
<entry key='ansible-become' value='false' />
<entry key='ansible-playbook-inline' value='---
# This playbook prints a simple debug message
- name: Echo
hosts: all
connection: local
tasks:
- name: Print debug message
debug:
msg: ${option.opt1}' />
<entry key='ansible-ssh-passphrase-option' value='option.password' />
<entry key='ansible-ssh-use-agent' value='false' />
</configuration>
</node-step-plugin>
<plugins>
<LogFilter type='LogFilter:mask-passwords'>
<config>
<color>red</color>
<replacement>[SECURE]</replacement>
</config>
</LogFilter>
</plugins>
</command>
</sequence>
<uuid>2c9b3903-a545-4dbd-aeac-578889dbb611</uuid>
</job>
</joblist>

Joomla 3 cannot disable custom plugin

I just installed a custom plugin in Joomla 3 and now I cannot disable it.
When I click disable (or edit) I receive "Internal Server Error".
xml file:
<?xml version="1.0" encoding="utf-8"?>
<extension version="3.0" type="plugin" group="content">
<name>Content - Availability Calendar</name>
<author>Joomla! Project</author>
<creationDate>2010</creationDate>
<copyright>Copyright (C) 2005 - 2010 Open Source Matters. All rights reserved.</copyright>
<license>http://www.gnu.org/licenses/gpl-2.0.html GNU/GPL</license>
<authorEmail>support#msc.gr</authorEmail>
<authorUrl>www.joomla.org</authorUrl>
<version>1.5</version>
<description></description>
<files>
<filename plugin="calendar">calendar.php</filename>
<filename>index.html</filename>
</files>
<params>
<param name="unit_id" type="text" size="5" default="50" label="Villa ID" description="Villa ID"/>
</params>
</extension>
You're XML code is old and is based on the code used for Joomla 1.5. You need to replace the following:
<params>
<param name="unit_id" type="text" size="5" default="50" label="Villa ID" description="Villa ID"/>
</params>
with this:
<config>
<fields name="params">
<fieldset name="basic">
<field name="unit_id" type="text" size="5" default="50" label="Villa ID" description="Villa ID"/>
</fieldset>
</fields>
</config>
Then for each new parameter you want to add, simply add a new <field>

Print/Copy LogCat output with the color highlighting?

LogCat output in its color highlighting is very informative and helpful in isolating problems. Sometimes, however, I need to print it to paper (or copy/paste it to an OpenOffice.org Writer document) with the colors!
Is there a way to print LogCat output with the color highlighting? (or at least copy/paste it without losing the color information)?
Am assuming LogCat output can be opened in a VIM IDE. I found this link - http://vimdoc.sourceforge.net/htmldoc/usr_06.html#06.5 which talks about how to print (along with colors). It also talks about how to save to an HTML. Try this out. May help.
I'm afraid I don't have a solution for highlighting inside Eclipse, but if you're using gedit on Debian, you can use this language highlighter I wrote:
<?xml version="1.0" encoding="UTF-8"?>
<language id="logcat" _name="logcat" version="2.0" _section="Others">
<metadata>
<property name="mimetypes">text/x-logcat</property>
<property name="globs">*.logcat</property>
</metadata>
<styles>
<style id="comment" _name="Comment" map-to="def:comment"/>
<style id="verbose" _name="Verbose" map-to="def:identifier"/>
<style id="debug" _name="Debug" map-to="def:preprocessor"/>
<style id="info" _name="Info" map-to="def:type"/>
<style id="warning" _name="Warning" map-to="def:constant"/>
<style id="error" _name="Error" map-to="def:keyword"/>
<style id="fatal" _name="Fatal" map-to="def:error"/>
<style id="others" _name="Others" map-to="def:comment"/>
</styles>
<definitions>
<context id="logcat">
<include>
<context id="comment" style-ref="comment">
<start>--</start>
<end>$</end>
</context>
<context id="comment" style-ref="comment">
<start>#</start>
<end>$</end>
</context>
<context id="datetime" style-ref="comment">
<start>^[0-9]</start>
<end>: </end>
</context>
<context id="verbose" style-ref="verbose">
<start>V/</start>
<end>$</end>
</context>
<context id="debug" style-ref="debug">
<start>D/</start>
<end>$</end>
</context>
<context id="info" style-ref="info">
<start>I/</start>
<end>$</end>
</context>
<context id="warning" style-ref="warning">
<start>W/</start>
<end>$</end>
</context>
<context id="error" style-ref="error">
<start>E/</start>
<end>$</end>
</context>
<context id="fatal" style-ref="fatal">
<start>F/</start>
<end>$</end>
</context>
</include>
</context>
</definitions>
</language>