I'm trying to use a JBoss AMQ 6.3 to act a a bridge between a WebSphere MQ hosted on mainframe and applications running in distributed environments. In order to make it work I've successfully installed the OSGi libraries for WebSphere MQ and deployed a camel component file:
<?xml version="1.0" encoding="UTF-8"?>
<blueprint xmlns="http://www.osgi.org/xmlns/blueprint/v1.0.0" default-activation="eager">
<camelContext xmlns="http://camel.apache.org/schema/blueprint">
<route>
<from uri="wmq:queue:DLC1.PBX.LS000004"/>
<to uri="activemq:queue:GreenQueue?username=jbamq&password=ThisIsABig10-4"/>
</route>
<route>
<from uri="activemq:queue:BlueQueue?username=jbamq&password=ThisIsABig10-4" />
<to uri="wmq:queue:DLC1.PBX.LE000002" />
</route>
</camelContext>
<bean id="amqConnectionFactory"
class="org.apache.activemq.ActiveMQConnectionFactory">
<property name="brokerURL" value="tcp://localhost:61616" />
</bean>
<bean id="pooledConnectionFactory"
class="org.apache.activemq.pool.PooledConnectionFactory" init-method="start" destroy-method="stop">
<property name="maxConnections" value="8" />
<property name="connectionFactory" ref="amqConnectionFactory" />
</bean>
<bean id="amqConfig"
class="org.apache.camel.component.jms.JmsConfiguration">
<property name="connectionFactory" ref="pooledConnectionFactory"/>
<property name="concurrentConsumers" value="10"/>
</bean>
<bean id="activemq"
class="org.apache.activemq.camel.component.ActiveMQComponent">
<property name="configuration" ref="amqConfig"/>
</bean>
<bean id="wmqConnectionFactory" class="com.ibm.mq.jms.MQConnectionFactory">
<property name="hostName" value="DLC1.thisorganization.org" />
<property name="port" value="1414" />
<property name="queueManager" value="DLC1" />
<property name="channel" value="DLC1.PBX010.SVRCONN" />
<property name="transportType" value="1" />
<property name="shareConvAllowed" value="0" />
</bean>
<bean id="wmq" class="org.apache.camel.component.jms.JmsComponent">
<property name="connectionFactory" ref="wmqConnectionFactory"/>
<property name="maxConcurrentConsumers" value="5"/>
<property name="cacheLevelName" value="CACHE_NONE"/>
</bean>
</blueprint>
At the beginning the component just deployed when starting JBoss AMQ but one week later it is no longer able to start, throwing the following error:
2016-11-16 16:24:43,028 | ERROR | FelixStartLevel | BlueprintContainerImpl | container.BlueprintContainerImpl 409 | 21 - org.apache.aries.blueprint.core - 1.4.5 | Unable to start blueprint container for bundle DeployedQueues.xml/0.0.0
org.osgi.service.blueprint.container.ComponentDefinitionException: Error setting property: PropertyDescriptor <name: connectionFactory, getter: null, setter: [class org.apache.camel.component.jms.JmsComponent.setConnectionFactory(interface javax.jms.ConnectionFactory)]
at org.apache.aries.blueprint.container.BeanRecipe.setProperty(BeanRecipe.java:963)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BeanRecipe.setProperties(BeanRecipe.java:929)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BeanRecipe.setProperties(BeanRecipe.java:910)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BeanRecipe.internalCreate2(BeanRecipe.java:844)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BeanRecipe.internalCreate(BeanRecipe.java:811)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.di.AbstractRecipe$1.call(AbstractRecipe.java:79)[21:org.apache.aries.blueprint.core:1.4.5]
at java.util.concurrent.FutureTask.run(FutureTask.java:262)[:1.7.0_80]
at org.apache.aries.blueprint.di.AbstractRecipe.create(AbstractRecipe.java:88)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintRepository.createInstances(BlueprintRepository.java:247)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintRepository.createAll(BlueprintRepository.java:183)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintContainerImpl.instantiateEagerComponents(BlueprintContainerImpl.java:688)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintContainerImpl.doRun(BlueprintContainerImpl.java:383)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintContainerImpl.run(BlueprintContainerImpl.java:270)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintExtender.createContainer(BlueprintExtender.java:294)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintExtender.createContainer(BlueprintExtender.java:263)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintExtender.modifiedBundle(BlueprintExtender.java:253)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.util.tracker.hook.BundleHookBundleTracker$Tracked.customizerModified(BundleHookBundleTracker.java:500)[15:org.apache.aries.util:1.1.0]
at org.apache.aries.util.tracker.hook.BundleHookBundleTracker$Tracked.customizerModified(BundleHookBundleTracker.java:433)[15:org.apache.aries.util:1.1.0]
at org.apache.aries.util.tracker.hook.BundleHookBundleTracker$AbstractTracked.track(BundleHookBundleTracker.java:725)[15:org.apache.aries.util:1.1.0]
at org.apache.aries.util.tracker.hook.BundleHookBundleTracker$Tracked.bundleChanged(BundleHookBundleTracker.java:463)[15:org.apache.aries.util:1.1.0]
at org.apache.aries.util.tracker.hook.BundleHookBundleTracker$BundleEventHook.event(BundleHookBundleTracker.java:422)[15:org.apache.aries.util:1.1.0]
at org.apache.felix.framework.util.SecureAction.invokeBundleEventHook(SecureAction.java:1127)[org.apache.felix.framework-4.4.1.jar:]
at org.apache.felix.framework.util.EventDispatcher.createWhitelistFromHooks(EventDispatcher.java:696)[org.apache.felix.framework-4.4.1.jar:]
at org.apache.felix.framework.util.EventDispatcher.fireBundleEvent(EventDispatcher.java:484)[org.apache.felix.framework-4.4.1.jar:]
at org.apache.felix.framework.Felix.fireBundleEvent(Felix.java:4429)[org.apache.felix.framework-4.4.1.jar:]
at org.apache.felix.framework.Felix.startBundle(Felix.java:2100)[org.apache.felix.framework-4.4.1.jar:]
at org.apache.felix.framework.Felix.setActiveStartLevel(Felix.java:1299)[org.apache.felix.framework-4.4.1.jar:]
at org.apache.felix.framework.FrameworkStartLevelImpl.run(FrameworkStartLevelImpl.java:304)[org.apache.felix.framework-4.4.1.jar:]
at java.lang.Thread.run(Thread.java:745)[:1.7.0_80]
Caused by: java.lang.Exception: Unable to convert value | com.ibm.mq.jms.MQConnectionFactory#c79b6829 :-
| | XMSC_ADMIN_OBJECT_TYPE :- 20
| | XMSC_ASYNC_EXCEPTIONS :- 1
| | XMSC_CLIENT_ID :- <null>
| | XMSC_CONNECTION_TYPE :- 1
| | XMSC_CONNECTION_TYPE_NAME :- com.ibm.msg.client.wmq
| | XMSC_RTT_DIRECT_AUTH :- 0
| | XMSC_RTT_PROXY_HOSTNAME :- <null>
| | XMSC_RTT_PROXY_PORT :- 443
| | XMSC_WMQ_BROKER_CC_SUBQ :- SYSTEM.JMS.ND.CC.SUBSCRIBER.QUEUE
| | XMSC_WMQ_BROKER_CONTROLQ :- SYSTEM.BROKER.CONTROL.QUEUE
| | XMSC_WMQ_BROKER_PUBQ :- SYSTEM.BROKER.DEFAULT.STREAM
| | XMSC_WMQ_BROKER_QMGR :-
| | XMSC_WMQ_BROKER_SUBQ :- SYSTEM.JMS.ND.SUBSCRIBER.QUEUE
| | XMSC_WMQ_CCDTURL :- <null>
| | XMSC_WMQ_CF_DESCRIPTION :- <null>
| | XMSC_WMQ_CHANNEL :- SQD1.AXB010.SVRCONN
| | XMSC_WMQ_CLEANUP_INTERVAL :- 3600000
| | XMSC_WMQ_CLEANUP_LEVEL :- 1
| | XMSC_WMQ_CLIENT_RECONNECT_OPTIONS :- 0
| | XMSC_WMQ_CLIENT_RECONNECT_TIMEOUT :- 1800
| | XMSC_WMQ_CLONE_SUPPORT :- 0
| | XMSC_WMQ_CONNECTION_MODE :- 1
| | XMSC_WMQ_CONNECTION_NAME_LIST_INT :-
| | | 0 :- SQD1.axa-seguros-es.intraxa(1414)
| | XMSC_WMQ_CONNECTION_TAG :- [B#32525605
| | XMSC_WMQ_CONNECT_OPTIONS :- 0
| | XMSC_WMQ_HEADER_COMP :-
| | | 0 :- 0
| | XMSC_WMQ_LOCAL_ADDRESS :-
| | XMSC_WMQ_MAP_NAME_STYLE :- true
| | XMSC_WMQ_MAX_BUFFER_SIZE :- 1000
| | XMSC_WMQ_MESSAGE_RETENTION :- 1
| | XMSC_WMQ_MESSAGE_SELECTION :- 0
| | XMSC_WMQ_MSG_BATCH_SIZE :- 10
| | XMSC_WMQ_MSG_COMP :-
| | | 0 :- 0
| | XMSC_WMQ_OPT_PUB :- false
| | XMSC_WMQ_OUTCOME_NOTIFICATION :- true
| | XMSC_WMQ_POLLING_INTERVAL :- 5000
| | XMSC_WMQ_PROCESS_DURATION :- 0
| | XMSC_WMQ_PROVIDER_VERSION :- unspecified
| | XMSC_WMQ_PUB_ACK_INTERVAL :- 25
| | XMSC_WMQ_QMGR_CCSID :- 819
| | XMSC_WMQ_QUEUE_MANAGER :- SQD1
| | XMSC_WMQ_RECEIVE_EXIT :- <null>
| | XMSC_WMQ_RECEIVE_EXIT_INIT :- <null>
| | XMSC_WMQ_RECEIVE_ISOLATION :- 0
| | XMSC_WMQ_RESCAN_INTERVAL :- 5000
| | XMSC_WMQ_SECURITY_EXIT :- <null>
| | XMSC_WMQ_SECURITY_EXIT_INIT :- <null>
| | XMSC_WMQ_SEND_CHECK_COUNT :- 0
| | XMSC_WMQ_SEND_EXIT :- <null>
| | XMSC_WMQ_SEND_EXIT_INIT :- <null>
| | XMSC_WMQ_SHARE_CONV_ALLOWED :- 0
| | XMSC_WMQ_SPARSE_SUBSCRIPTIONS :- false
| | XMSC_WMQ_SSL_CERT_STORES_COL :- <null>
| | XMSC_WMQ_SSL_CERT_STORES_STR :- <null>
| | XMSC_WMQ_SSL_CIPHER_SUITE :- <null>
| | XMSC_WMQ_SSL_FIPS_REQUIRED :- false
| | XMSC_WMQ_SSL_KEY_RESETCOUNT :- 0
| | XMSC_WMQ_SSL_PEER_NAME :- <null>
| | XMSC_WMQ_SSL_SOCKET_FACTORY :- <null>
| | XMSC_WMQ_STATUS_REFRESH_INTERVAL :- 60000
| | XMSC_WMQ_SUBSCRIPTION_STORE :- 1
| | XMSC_WMQ_SYNCPOINT_ALL_GETS :- false
| | XMSC_WMQ_TARGET_CLIENT_MATCHING :- true
| | XMSC_WMQ_TEMPORARY_MODEL :- SYSTEM.DEFAULT.MODEL.QUEUE
| | XMSC_WMQ_TEMP_Q_PREFIX :-
| | XMSC_WMQ_TEMP_TOPIC_PREFIX :-
| | XMSC_WMQ_USE_CONNECTION_POOLING :- true
| | brokerVersion :- -1
| | failIfQuiesce :- 1
| | multicast :- 0
| | version :- 7
| | wildcardFormat :- 0 to type javax.jms.ConnectionFactory
at org.apache.aries.blueprint.container.AggregateConverter.convert(AggregateConverter.java:184)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BlueprintRepository.convert(BlueprintRepository.java:402)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.utils.ReflectionUtils$PropertyDescriptor.convert(ReflectionUtils.java:396)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.utils.ReflectionUtils$MethodPropertyDescriptor.internalSet(ReflectionUtils.java:630)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.utils.ReflectionUtils$PropertyDescriptor.set(ReflectionUtils.java:380)[21:org.apache.aries.blueprint.core:1.4.5]
at org.apache.aries.blueprint.container.BeanRecipe.setProperty(BeanRecipe.java:961)[21:org.apache.aries.blueprint.core:1.4.5]
... 28 more
I suspect it stopped working once it received the first message from WebSphereMQ
Finally I managed to deploy the camel component by using a workaround. Once the JBoss A-MQ server is started I login in the web console and go to the OSGi tab, then select to show all bundles and filter by jms.
For each bundle I click on it and on the next screen click the reload button:
Well, perhaps not all bundles are needed to reload but this way finally the Camel component is deployed
Related
I'm trying to aggregate data based on timestamp. Basically I'd like to create an array for each day.
So lets say I've a query like so:
SELECT date(task_start) AS started, task_start
FROM tt_records
GROUP BY started, task_start
ORDER BY started DESC;
The output is:
+------------+------------------------+
| started | task_start |
|------------+------------------------|
| 2021-08-30 | 2021-08-30 16:45:55+00 |
| 2021-08-29 | 2021-08-29 06:47:55+00 |
| 2021-08-29 | 2021-08-29 15:41:50+00 |
| 2021-08-28 | 2021-08-28 12:59:20+00 |
| 2021-08-28 | 2021-08-28 14:50:55+00 |
| 2021-08-26 | 2021-08-26 20:46:44+00 |
| 2021-08-24 | 2021-08-24 16:28:05+00 |
| 2021-08-23 | 2021-08-23 16:22:41+00 |
| 2021-08-22 | 2021-08-22 14:01:10+00 |
| 2021-08-21 | 2021-08-21 19:45:18+00 |
| 2021-08-11 | 2021-08-11 16:08:58+00 |
| 2021-07-28 | 2021-07-28 17:39:14+00 |
| 2021-07-19 | 2021-07-19 17:26:24+00 |
| 2021-07-18 | 2021-07-18 15:04:47+00 |
| 2021-06-24 | 2021-06-24 19:53:33+00 |
| 2021-06-22 | 2021-06-22 19:04:24+00 |
+------------+------------------------+
As you can see the started column has repeating dates.
What I'd like to have is:
+------------+--------------------------------------------------+
| started | task_start |
|------------+--------------------------------------------------|
| 2021-08-30 | [2021-08-30 16:45:55+00] |
| 2021-08-29 | [2021-08-29 06:47:55+00, 2021-08-29 15:41:50+00] |
| 2021-08-28 | [2021-08-28 12:59:20+00, 2021-08-28 14:50:55+00] |
| 2021-08-26 | [2021-08-26 20:46:44+00] |
| 2021-08-24 | [2021-08-24 16:28:05+00] |
| 2021-08-23 | [2021-08-23 16:22:41+00] |
| 2021-08-22 | [2021-08-22 14:01:10+00] |
| 2021-08-21 | [2021-08-21 19:45:18+00] |
| 2021-08-11 | [2021-08-11 16:08:58+00] |
| 2021-07-28 | [2021-07-28 17:39:14+00] |
| 2021-07-19 | [2021-07-19 17:26:24+00] |
| 2021-07-18 | [2021-07-18 15:04:47+00] |
| 2021-06-24 | [2021-06-24 19:53:33+00] |
| 2021-06-22 | [2021-06-22 19:04:24+00] |
+------------+--------------------------------------------------+
I need a query to achieve that. Thank you.
You can use array_agg()
SELECT date(task_start) AS started, array_agg(task_start)
FROM tt_records
GROUP BY started
ORDER BY started DESC;
If you want a JSON array, rather than a native Postgres array, use jsonb_agg() instead
I'm using osquery to monitor servers on my network. The following osquery.conf captures snapshots, every minute, of the processes communicating over the network ports and publishes that data to Kafka:
{
"options": {
"logger_kafka_brokers": "cp01.woolford.io:9092,cp02.woolford.io:9092,cp03.woolford.io:9092",
"logger_kafka_topic": "base_topic",
"logger_kafka_acks": "1"
},
"packs": {
"system-snapshot": {
"queries": {
"processes_by_port": {
"query": "select u.username, p.pid, p.name, pos.local_address, pos.local_port, pos.remote_address, pos.remote_port from processes p join users u on u.uid = p.uid join process_open_sockets pos on pos.pid=p.pid where pos.remote_port != '0'",
"interval": 60,
"snapshot": true
}
}
}
},
"kafka_topics": {
"process-port": [
"pack_system-snapshot_processes_by_port"
]
}
}
Here's an example of the output from the query:
osquery> select u.username, p.pid, p.name, pos.local_address, pos.local_port, pos.remote_address, pos.remote_port from processes p join users u on u.uid = p.uid join process_open_sockets pos on pos.pid=p.pid where pos.remote_port != '0';
+--------------------+-------+---------------+------------------+------------+------------------+-------------+
| username | pid | name | local_address | local_port | remote_address | remote_port |
+--------------------+-------+---------------+------------------+------------+------------------+-------------+
| cp-kafka-connect | 13646 | java | 10.0.1.41 | 49018 | 10.0.1.41 | 9092 |
| cp-kafka-connect | 13646 | java | 10.0.1.41 | 49028 | 10.0.1.41 | 9092 |
| cp-kafka-connect | 13646 | java | 10.0.1.41 | 49026 | 10.0.1.41 | 9092 |
| cp-kafka-connect | 13646 | java | 10.0.1.41 | 50558 | 10.0.1.43 | 9092 |
| cp-kafka-connect | 13646 | java | 10.0.1.41 | 50554 | 10.0.1.43 | 9092 |
| cp-kafka-connect | 13646 | java | 10.0.1.41 | 49014 | 10.0.1.41 | 9092 |
| root | 1505 | sssd_be | 10.0.1.41 | 46436 | 10.0.1.89 | 389 |
...
| cp-ksql | 1757 | java | 10.0.1.41 | 56180 | 10.0.1.41 | 9092 |
| cp-ksql | 1757 | java | 10.0.1.41 | 53878 | 10.0.1.43 | 9092 |
| root | 19684 | sshd | 10.0.1.41 | 22 | 10.0.1.53 | 50238 |
| root | 24082 | sshd | 10.0.1.41 | 22 | 10.0.1.53 | 51233 |
| root | 24107 | java | 10.0.1.41 | 56052 | 10.0.1.41 | 9092 |
| root | 24107 | java | 10.0.1.41 | 56054 | 10.0.1.41 | 9092 |
| cp-schema-registry | 24694 | java | 10.0.1.41 | 50742 | 10.0.1.31 | 2181 |
| cp-schema-registry | 24694 | java | 10.0.1.41 | 47150 | 10.0.1.42 | 9093 |
| cp-schema-registry | 24694 | java | 10.0.1.41 | 58068 | 10.0.1.41 | 9093 |
| cp-schema-registry | 24694 | java | 10.0.1.41 | 47152 | 10.0.1.42 | 9093 |
| root | 25782 | osqueryd | 10.0.1.41 | 57700 | 10.0.1.43 | 9092 |
| root | 25782 | osqueryd | 10.0.1.41 | 56188 | 10.0.1.41 | 9092 |
+--------------------+-------+---------------+------------------+------------+------------------+-------------+
Instead of snapshots, I'd like osquery to capture differentials, i.e. to only publish the changes to Kafka.
I tried toggling the snapshot property from true to false. My expectation was that osquery would send the changes. For some reason, when I set "snapshot": false, no data is published to the process-port topic. Instead, all the data is routed to the catchall base_topic.
Can you see what I'm doing wrong?
Update:
I think I'm running into this bug: https://github.com/osquery/osquery/issues/5559
Here's a video walk-through: https://youtu.be/sPdlBBKgJmY
I filed a bug report, with steps to reproduce, in case it's not the same issue: https://github.com/osquery/osquery/issues/5890
Given the context, I can't immediately tell what is causing the issue you are experiencing.
In order to debug this, I would first try using the filesystem logger plugin instead of (or in addition to) the Kafka logger.
Do you get results to the Kafka topic when the query is configured as a snapshot? If so, are you able to verify that the results are actually changing such that a diff should be generate when the query runs in differential mode?
Can you see results logged locally when you use --logger_plugin=filesystem,kafka?
I can see that it is possible to add metadata to a Rackspace virtual machine instance.
I want to get a list of running instances, filtered by a particular metatag value.
I can't see how to do so in the documentation however.
is it possible?
You should be able to do so using the openstack client... but it depends on which metatag you're interested in.
You can get a list of all servers:
openstack server list
Will spit something like
+--------------------------------------+------------------+--------+-----------------------------------------------------------------------------------------------------------+
| ID | Name | Status | Networks |
+--------------------------------------+------------------+--------+-----------------------------------------------------------------------------------------------------------+
| 97606ae9-7f18-4a3c-903a-1583d446119b | trysmallwin | ERROR | |
| cb78b8d5-2f03-4a3f-ab26-f389acbd0b76 | Win-try again | ERROR | public=2607:f298:5:101d:f816:3eff:fe9e:5cd4, 208.113.133.90, 2607:f298:5:101d:f816:3eff:fe36:da45, |
| | | | 208.113.133.93, 2607:f298:5:101d:f816:3eff:fe40:57d5, 208.113.133.95 |
| 040751d1-c4c5-47aa-8dec-1d69a468be1c | hnxhdkwskrvwvdwr | ACTIVE | public=2607:f298:5:101d:f816:3eff:fe60:324, 208.113.130.52 |
+--------------------------------------+------------------+--------+-----------------------------------------------------------------------------------------------------------+
note the ID of the server and investigate deeper:
openstack server show 040751d1-c4c5-47aa-8dec-1d69a468be1c
+--------------------------------------+------------------------------------------------------------+
| Field | Value |
+--------------------------------------+------------------------------------------------------------+
| OS-DCF:diskConfig | MANUAL |
| OS-EXT-AZ:availability_zone | iad-2 |
| OS-EXT-STS:power_state | Running |
| OS-EXT-STS:task_state | None |
| OS-EXT-STS:vm_state | active |
| OS-SRV-USG:launched_at | 2016-07-26T17:32:01.000000 |
| OS-SRV-USG:terminated_at | None |
| accessIPv4 | |
| accessIPv6 | |
| addresses | public=2607:f298:5:101d:f816:3eff:fe60:324, 208.113.130.52 |
| config_drive | True |
| created | 2016-07-26T17:31:51Z |
| flavor | gp1.semisonic (50) |
| hostId | e1efd75d1e8f6a7f5bb228a35db13647281996087d39c65af8ce83d9 |
| id | 040751d1-c4c5-47aa-8dec-1d69a468be1c |
| image | Ubuntu-14.04 (03f89ff2-d66e-49f5-ae61-656a006bbbe9) |
| key_name | stef |
| name | hnxhdkwskrvwvdwr |
| os-extended-volumes:volumes_attached | [] |
| progress | 0 |
| project_id | d2fb6996496044158cf977c2129c8660 |
| properties | |
| security_groups | [{u'name': u'default'}] |
| status | ACTIVE |
| updated | 2016-07-26T17:32:01Z |
| user_id | 5b2ca246f39a425f9a833460bf322603 |
+--------------------------------------+------------------------------------------------------------+
openstack --f json will output the same stuff but in json format that you can more easily manipulate programmatically.
HTH
For some reason, I cannot get HandlerSocket to start listening when I start mariadb (version
10.0.14). I am using Cent OS 6.5.
my.cnf has the following settings:
handlersocket_port = 9998
handlersocket_port_wr = 9999
handlersocket_address = 127.0.0.1
Calling "SHOW GLOBAL VARIABLES LIKE 'handlersocket%'" from the mariaDb prompt shows:
+-------------------------------+-----------+
| Variable_name | Value |
+-------------------------------+-----------+
| handlersocket_accept_balance | 0 |
| handlersocket_address | 127.0.0.1 |
| handlersocket_backlog | 32768 |
| handlersocket_epoll | 1 |
| handlersocket_plain_secret | |
| handlersocket_plain_secret_wr | |
| handlersocket_port | 9998 |
| handlersocket_port_wr | 9999 |
| handlersocket_rcvbuf | 0 |
| handlersocket_readsize | 0 |
| handlersocket_sndbuf | 0 |
| handlersocket_threads | 16 |
| handlersocket_threads_wr | 1 |
| handlersocket_timeout | 300 |
| handlersocket_verbose | 10 |
| handlersocket_wrlock_timeout | 12 |
+-------------------------------+-----------+
I can start mariadb successfully, but when I check to see which ports are actively listening,
neither 9998 nor 9999 show up. I've checked the mysqld.log file, but no errors seem to be occurring.
Answering my own question here -
SELINUX needed to be set to permissive mode to get HandlerSocket started.
We are using Jasper Reports to build our reports. There's a report which looks somewhat like this.
Rcpt No | Rcpt Date | Dealer Name | Items | Qty Ordered | Qty Received | Qty Accepted | Qty Rejected
====================================================================================================
1 | 12-08-14 | ABC Corp. |
| Item1 | 30 | 30 | 30 | 0
| Item2 | 30 | 30 | 30 | 0
| Item3 | 30 | 30 | 30 | 0
| Item4 | 30 | 30 | 30 | 0
----------------------------------------------------------------------------------------------------
1 | 12-08-14 | ABC Corp. |
| Item1 | 30 | 30 | 30 | 0
| Item2 | 30 | 30 | 30 | 0
| Item3 | 30 | 30 | 30 | 0
| Item4 | 30 | 30 | 30 | 0
----------------------------------------------------------------------------------------------------
1 | 12-08-14 | ABC Corp. |
| Item1 | 30 | 30 | 30 | 0
| Item2 | 30 | 30 | 30 | 0
| Item3 | 30 | 30 | 30 | 0
| Item4 | 30 | 30 | 30 | 0
----------------------------------------------------------------------------------------------------
We are using xml as the data source for the report. This is the XML:
<?xml version="1.0" encoding="UTF-8"?>
<data>
<period>
<fromDate>someFromDate</fromDate>
<toDate>someToDate greater than fromDate</toDate>
</period>
<receiptList>
<someStoresInwardReceiptNumber>
<_id>someMongoGivenId</_id>
<challanNo>someChallanNumber</challanNo>
<itemList>
<item>
<_id>someItemId</_id>
<description>someDescription</description>
<productCatlgNo>someProductCatalogNo</productCatlgNo>
<quantityInNumbers>
<acceptedQuantity>20</acceptedQuantity>
<declaredQuantity>20</declaredQuantity>
<receivedQuantity>20</receivedQuantity>
<rejectedQuantity>0</rejectedQuantity>
<units>pcs</units>
</quantityInNumbers>
<quantityinWeight>
<acceptedWeight>0</acceptedWeight>
<declaredWeight>0</declaredWeight>
<receivedWeight>0</receivedWeight>
<rejectedWeight>0</rejectedWeight>
<units null="true" />
</quantityinWeight>
</item>
<item>
<_id>someItemId1</_id>
<description>someDescription1</description>
<productCatlgNo>someProductCatalogNo1</productCatlgNo>
<quantityInNumbers>
<acceptedQuantity>0</acceptedQuantity>
<declaredQuantity>0</declaredQuantity>
<receivedQuantity>0</receivedQuantity>
<rejectedQuantity>0</rejectedQuantity>
<units null="true" />
</quantityInNumbers>
<quantityinWeight>
<acceptedWeight>300</acceptedWeight>
<declaredWeight>300</declaredWeight>
<receivedWeight>300</receivedWeight>
<rejectedWeight>300</rejectedWeight>
<units>kgs</units>
</quantityinWeight>
</item>
</itemList>
<partyName>somePartyName</partyName>
<receiptDate>someDate</receiptDate>
<receiptNumber>someStoresInwardReceiptNumber</receiptNumber>
</someStoresInwardReceiptNumber>
<someOtherStoresInwardReceiptNumber>
<_id>someMongoGivenId</_id>
<challanNo>someChallanNumber</challanNo>
<itemList>
<item>
<_id>someItemId</_id>
<description>someDescription</description>
<productCatlgNo>someProductCatalogNo</productCatlgNo>
<quantityInNumbers>
<acceptedQuantity>20</acceptedQuantity>
<declaredQuantity>20</declaredQuantity>
<receivedQuantity>20</receivedQuantity>
<rejectedQuantity>0</rejectedQuantity>
<units>pcs</units>
</quantityInNumbers>
<quantityinWeight>
<acceptedWeight>0</acceptedWeight>
<declaredWeight>0</declaredWeight>
<receivedWeight>0</receivedWeight>
<rejectedWeight>0</rejectedWeight>
<units null="true" />
</quantityinWeight>
</item>
<item>
<_id>someItemId1</_id>
<description>someDescription1</description>
<productCatlgNo>someProductCatalogNo1</productCatlgNo>
<quantityInNumbers>
<acceptedQuantity>0</acceptedQuantity>
<declaredQuantity>0</declaredQuantity>
<receivedQuantity>0</receivedQuantity>
<rejectedQuantity>0</rejectedQuantity>
<units null="true" />
</quantityInNumbers>
<quantityinWeight>
<acceptedWeight>300</acceptedWeight>
<declaredWeight>300</declaredWeight>
<receivedWeight>300</receivedWeight>
<rejectedWeight>300</rejectedWeight>
<units>kgs</units>
</quantityinWeight>
</item>
</itemList>
<partyName>somePartyName</partyName>
<receiptDate>someDate</receiptDate>
<receiptNumber>someOtherStoresInwardReceiptNumber</receiptNumber>
</someOtherStoresInwardReceiptNumber>
</receiptList>
</data>
Is using subreport the only way to create this report? Or is there a way which we are not aware of?
Thanks in Advance.
We can "group by" these results either by Rcpt Date or by Dealer Name.
Thanks