NIFI - Disable Client Certificate Request - certificate

I'm using nifi 1.11.4. I have https and simple ldap setup on nifi, however, it still asks for a client certificate when navigating to the page. If i select a certificate it fails, which is understandable, due to not setting up client certificates. If i cancel, it will go to the login screen.
Is there any way to make it not check for client certificates, since I am using ldap to login?
I saw some properties about turning them off, but those properties seem to be gone. I checked the documentation and it seems to mention that it will ask for a client certificate unless another authentication method is setup. However with ldap still setup, it is still asking for a certificate.
login-identity-providers.xml:
<?xml version="1.0" encoding="UTF-8" standalone="yes"?>
<loginIdentityProviders>
<provider>
<identifier>ldap-provider</identifier>
<class>org.apache.nifi.ldap.LdapProvider</class>
<property name="Authentication Strategy">SIMPLE</property>
<property name="Manager DN">CN=blah,OU=USERS,OU=LAW,DC=na,DC=ad,DC=test,DC=com</property>
<property name="Manager Password">secret</property>
<property name="TLS - Keystore"></property>
<property name="TLS - Keystore Password"></property>
<property name="TLS - Keystore Type"></property>
<property name="TLS - Truststore"></property>
<property name="TLS - Truststore Password"></property>
<property name="TLS - Truststore Type"></property>
<property name="TLS - Client Auth"></property>
<property name="TLS - Protocol"></property>
<property name="TLS - Shutdown Gracefully"></property>
<property name="Referral Strategy">IGNORE</property>
<property name="Connect Timeout">10 secs</property>
<property name="Read Timeout">10 secs</property>
<property name="Url">ldaps://ldapserver.na.ad.test.com</property>
<property name="User Search Base">OU=USERS,OU=LAW,DC=na,DC=ad,DC=test,DC=com</property>
<property name="User Search Filter">sAMAccountName={0}</property>
<property name="Identity Strategy">USE_USERNAME</property>
<property name="Authentication Expiration">12 hours</property>
</provider>
</loginIdentityProviders>
nifi.properties file:
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Core Properties #
nifi.flow.configuration.file=./conf/flow.xml.gz
nifi.flow.configuration.archive.enabled=true
nifi.flow.configuration.archive.dir=./conf/archive/
nifi.flow.configuration.archive.max.time=30 days
nifi.flow.configuration.archive.max.storage=500 MB
nifi.flow.configuration.archive.max.count=
nifi.flowcontroller.autoResumeState=true
nifi.flowcontroller.graceful.shutdown.period=10 sec
nifi.flowservice.writedelay.interval=500 ms
nifi.administrative.yield.duration=30 sec
# If a component has no work to do (is "bored"), how long should we wait before checking again for work?
nifi.bored.yield.duration=10 millis
nifi.queue.backpressure.count=10000
nifi.queue.backpressure.size=1 GB
nifi.authorizer.configuration.file=./conf/authorizers.xml
nifi.login.identity.provider.configuration.file=./conf/login-identity-providers.xml
nifi.templates.directory=./conf/templates
nifi.ui.banner.text=
nifi.ui.autorefresh.interval=30 sec
nifi.nar.library.directory=./lib
nifi.nar.library.autoload.directory=./extensions
nifi.nar.working.directory=./work/nar/
nifi.documentation.working.directory=./work/docs/components
####################
# State Management #
####################
nifi.state.management.configuration.file=./conf/state-management.xml
# The ID of the local state provider
nifi.state.management.provider.local=local-provider
# The ID of the cluster-wide state provider. This will be ignored if NiFi is not clustered but must be populated if running in a cluster.
nifi.state.management.provider.cluster=zk-provider
# Specifies whether or not this instance of NiFi should run an embedded ZooKeeper server
nifi.state.management.embedded.zookeeper.start=false
# Properties file that provides the ZooKeeper properties to use if <nifi.state.management.embedded.zookeeper.start> is set to true
nifi.state.management.embedded.zookeeper.properties=./conf/zookeeper.properties
# H2 Settings
nifi.database.directory=./database_repository
nifi.h2.url.append=;LOCK_TIMEOUT=25000;WRITE_DELAY=0;AUTO_SERVER=FALSE
# FlowFile Repository
nifi.flowfile.repository.implementation=org.apache.nifi.controller.repository.WriteAheadFlowFileRepository
nifi.flowfile.repository.wal.implementation=org.apache.nifi.wali.SequentialAccessWriteAheadLog
nifi.flowfile.repository.directory=./flowfile_repository
nifi.flowfile.repository.partitions=256
nifi.flowfile.repository.checkpoint.interval=2 mins
nifi.flowfile.repository.always.sync=false
nifi.flowfile.repository.encryption.key.provider.implementation=
nifi.flowfile.repository.encryption.key.provider.location=
nifi.flowfile.repository.encryption.key.id=
nifi.flowfile.repository.encryption.key=
nifi.swap.manager.implementation=org.apache.nifi.controller.FileSystemSwapManager
nifi.queue.swap.threshold=20000
nifi.swap.in.period=5 sec
nifi.swap.in.threads=1
nifi.swap.out.period=5 sec
nifi.swap.out.threads=4
# Content Repository
nifi.content.repository.implementation=org.apache.nifi.controller.repository.FileSystemRepository
nifi.content.claim.max.appendable.size=1 MB
nifi.content.claim.max.flow.files=100
nifi.content.repository.directory.default=./content_repository
nifi.content.repository.archive.max.retention.period=12 hours
nifi.content.repository.archive.max.usage.percentage=50%
nifi.content.repository.archive.enabled=true
nifi.content.repository.always.sync=false
nifi.content.viewer.url=../nifi-content-viewer/
nifi.content.repository.encryption.key.provider.implementation=
nifi.content.repository.encryption.key.provider.location=
nifi.content.repository.encryption.key.id=
nifi.content.repository.encryption.key=
# Provenance Repository Properties
nifi.provenance.repository.implementation=org.apache.nifi.provenance.WriteAheadProvenanceRepository
nifi.provenance.repository.debug.frequency=1_000_000
nifi.provenance.repository.encryption.key.provider.implementation=
nifi.provenance.repository.encryption.key.provider.location=
nifi.provenance.repository.encryption.key.id=
nifi.provenance.repository.encryption.key=
# Persistent Provenance Repository Properties
nifi.provenance.repository.directory.default=./provenance_repository
nifi.provenance.repository.max.storage.time=24 hours
nifi.provenance.repository.max.storage.size=1 GB
nifi.provenance.repository.rollover.time=30 secs
nifi.provenance.repository.rollover.size=100 MB
nifi.provenance.repository.query.threads=2
nifi.provenance.repository.index.threads=2
nifi.provenance.repository.compress.on.rollover=true
nifi.provenance.repository.always.sync=false
# Comma-separated list of fields. Fields that are not indexed will not be searchable. Valid fields are:
# EventType, FlowFileUUID, Filename, TransitURI, ProcessorID, AlternateIdentifierURI, Relationship, Details
nifi.provenance.repository.indexed.fields=EventType, FlowFileUUID, Filename, ProcessorID, Relationship
# FlowFile Attributes that should be indexed and made searchable. Some examples to consider are filename, uuid, mime.type
nifi.provenance.repository.indexed.attributes=
# Large values for the shard size will result in more Java heap usage when searching the Provenance Repository
# but should provide better performance
nifi.provenance.repository.index.shard.size=500 MB
# Indicates the maximum length that a FlowFile attribute can be when retrieving a Provenance Event from
# the repository. If the length of any attribute exceeds this value, it will be truncated when the event is retrieved.
nifi.provenance.repository.max.attribute.length=65536
nifi.provenance.repository.concurrent.merge.threads=2
# Volatile Provenance Respository Properties
nifi.provenance.repository.buffer.size=100000
# Component Status Repository
nifi.components.status.repository.implementation=org.apache.nifi.controller.status.history.VolatileComponentStatusRepository
nifi.components.status.repository.buffer.size=1440
nifi.components.status.snapshot.frequency=1 min
# Site to Site properties
nifi.remote.input.host=lawdev1
nifi.remote.input.secure=true
nifi.remote.input.socket.port=10443
nifi.remote.input.http.enabled=true
nifi.remote.input.http.transaction.ttl=30 sec
nifi.remote.contents.cache.expiration=30 secs
# web properties #
nifi.web.war.directory=./lib
nifi.web.http.host=
nifi.web.http.port=
nifi.web.http.network.interface.default=
nifi.web.https.host=lawdev1
nifi.web.https.port=9443
nifi.web.https.network.interface.default=
nifi.web.jetty.working.directory=./work/jetty
nifi.web.jetty.threads=200
nifi.web.max.header.size=16 KB
nifi.web.proxy.context.path=
nifi.web.proxy.host=
# security properties #
nifi.sensitive.props.key=
nifi.sensitive.props.key.protected=
nifi.sensitive.props.algorithm=PBEWITHMD5AND256BITAES-CBC-OPENSSL
nifi.sensitive.props.provider=BC
nifi.sensitive.props.additional.keys=
nifi.security.keystore=./conf/keystore.jks
nifi.security.keystoreType=jks
nifi.security.keystorePasswd=secret
nifi.security.keyPasswd=secret
nifi.security.truststore=./conf/truststore.jks
nifi.security.truststoreType=jks
nifi.security.truststorePasswd=secret
nifi.security.user.authorizer=managed-authorizer
nifi.security.user.login.identity.provider=ldap-provider
nifi.security.ocsp.responder.url=
nifi.security.ocsp.responder.certificate=
nifi.security.needClientAuth=false
# OpenId Connect SSO Properties #
nifi.security.user.oidc.discovery.url=
nifi.security.user.oidc.connect.timeout=5 secs
nifi.security.user.oidc.read.timeout=5 secs
nifi.security.user.oidc.client.id=
nifi.security.user.oidc.client.secret=
nifi.security.user.oidc.preferred.jwsalgorithm=
nifi.security.user.oidc.additional.scopes=
nifi.security.user.oidc.claim.identifying.user=
# Apache Knox SSO Properties #
nifi.security.user.knox.url=
nifi.security.user.knox.publicKey=
nifi.security.user.knox.cookieName=hadoop-jwt
nifi.security.user.knox.audiences=
# Identity Mapping Properties #
# These properties allow normalizing user identities such that identities coming from different identity providers
# (certificates, LDAP, Kerberos) can be treated the same internally in NiFi. The following example demonstrates normalizing
# DNs from certificates and principals from Kerberos into a common identity string:
#
# nifi.security.identity.mapping.pattern.dn=^CN=(.*?), OU=(.*?), O=(.*?), L=(.*?), ST=(.*?), C=(.*?)$
# nifi.security.identity.mapping.value.dn=$1#$2
# nifi.security.identity.mapping.transform.dn=NONE
# nifi.security.identity.mapping.pattern.kerb=^(.*?)/instance#(.*?)$
# nifi.security.identity.mapping.value.kerb=$1#$2
# nifi.security.identity.mapping.transform.kerb=UPPER
# Group Mapping Properties #
# These properties allow normalizing group names coming from external sources like LDAP. The following example
# lowercases any group name.
#
# nifi.security.group.mapping.pattern.anygroup=^(.*)$
# nifi.security.group.mapping.value.anygroup=$1
# nifi.security.group.mapping.transform.anygroup=LOWER
# cluster common properties (all nodes must have same values) #
nifi.cluster.protocol.heartbeat.interval=5 sec
nifi.cluster.protocol.is.secure=true
# cluster node properties (only configure for cluster nodes) #
nifi.cluster.is.node=false
nifi.cluster.node.address=lawdev1
nifi.cluster.node.protocol.port=11443
nifi.cluster.node.protocol.threads=10
nifi.cluster.node.protocol.max.threads=50
nifi.cluster.node.event.history.size=25
nifi.cluster.node.connection.timeout=5 sec
nifi.cluster.node.read.timeout=5 sec
nifi.cluster.node.max.concurrent.requests=100
nifi.cluster.firewall.file=
nifi.cluster.flow.election.max.wait.time=5 mins
nifi.cluster.flow.election.max.candidates=
# cluster load balancing properties #
nifi.cluster.load.balance.host=
nifi.cluster.load.balance.port=6342
nifi.cluster.load.balance.connections.per.node=4
nifi.cluster.load.balance.max.thread.count=8
nifi.cluster.load.balance.comms.timeout=30 sec
# zookeeper properties, used for cluster management #
nifi.zookeeper.connect.string=
nifi.zookeeper.connect.timeout=3 secs
nifi.zookeeper.session.timeout=3 secs
nifi.zookeeper.root.node=/nifi
# Zookeeper properties for the authentication scheme used when creating acls on znodes used for cluster management
# Values supported for nifi.zookeeper.auth.type are "default", which will apply world/anyone rights on znodes
# and "sasl" which will give rights to the sasl/kerberos identity used to authenticate the nifi node
# The identity is determined using the value in nifi.kerberos.service.principal and the removeHostFromPrincipal
# and removeRealmFromPrincipal values (which should align with the kerberos.removeHostFromPrincipal and kerberos.removeRealmFromPrincipal
# values configured on the zookeeper server).
nifi.zookeeper.auth.type=
nifi.zookeeper.kerberos.removeHostFromPrincipal=
nifi.zookeeper.kerberos.removeRealmFromPrincipal=
# kerberos #
nifi.kerberos.krb5.file=
# kerberos service principal #
nifi.kerberos.service.principal=
nifi.kerberos.service.keytab.location=
# kerberos spnego principal #
nifi.kerberos.spnego.principal=
nifi.kerberos.spnego.keytab.location=
nifi.kerberos.spnego.authentication.expiration=12 hours
# external properties files for variable registry
# supports a comma delimited list of file locations
nifi.variable.registry.properties=
# analytics properties #
nifi.analytics.predict.enabled=false
nifi.analytics.predict.interval=3 mins
nifi.analytics.query.interval=5 mins
nifi.analytics.connection.model.implementation=org.apache.nifi.controller.status.analytics.models.OrdinaryLeastSquares
nifi.analytics.connection.model.score.name=rSquared
nifi.analytics.connection.model.score.threshold=.90
Thanks,
Dusty Ryba

nifi.security.needClientAuth=false
for old version of NiFi.
In new version:
NiFi’s web server will REQUIRE certificate based client authentication for users accessing the User Interface when not configured with an alternative authentication mechanism which would require one way SSL (for instance LDAP, OpenId Connect, etc). Enabling an alternative authentication mechanism will configure the web server to WANT certificate base client authentication. https://nifi.apache.org/docs/nifi-docs/html/administration-guide.html#security_configuration
So I think that disabling is imposible.

Related

I cannot log in the Chainlink GUI

I am using this helm chart
https://artifacthub.io/packages/helm/vulcanlink/chainlink
I managed to launch and connect Chainlink node with Postgres, with these values
config:
# Login Info
ROOT: /chainlink
API_LOGIN: |
API_EMAIL=admin#admin.com
API_LOGIN=admin
WALLET_PASSWORD: "9xMR9PN7CTk6Axs" # a random test password based on chainlink's demands
# HTTP Security
ALLOW_ORIGINS: "*"
SECURE_COOKIES: "false"
CHAINLINK_PORT: "6688"
CHAINLINK_TLS_PORT: "0"
# Database
DATABASE_TIMEOUT: "0"
DATABASE_URL: postgresql://chainlink:chainlink#pgdb-postgresql:5432/chainlink?sslmode=disable
# Ethereum
ETH_URL: wss://rinkeby.infura.io/ws/v3/somerandomnumber # ws://geth:8546
ETH_CHAIN_ID: "4"
LINK_CONTRACT_ADDRESS: 0x514910771af9ca656af840dff83e8264ecf986ca # this was here ...
I port forward the k8s service and I see the Chainlink UI.
But what combination of the above should I use?
I have tried them all.
EDIT
In order to change the env vars, I ended up destroying the whole minikube env. Insane, and I have no idea why...
Now I get this in the logs
There are no accounts, creating a new account with the specified password
There are no P2P keys; creating a new key encrypted with given password
There are no OCR keys; creating a new key encrypted with given password
2022-09-02T10:22:50Z [INFO] API exposed for user API_EMAIL=admin#admin.com cmd/local_client.go:122
2022-09-02T10:23:32Z [INFO] POST /sessions web/router.go:433 body={"email":"admin#admin.com","password":"*REDACTED*"} clientIP=127.0.0.1 errors=Error #01: Invalid email
latency=4.918708ms method=POST path=/sessions servedAt=2022-09-02 10:23:32 status=401
... so I still cannot log in in the GUI. It is frustrating
EDIT
This is what happens when the instructions are not clear...
The username was API_EMAIL=admin#admin.com and the password API_LOGIN=admin .
Now I can login...but surely gonna change them...

Not able to send filebeat output to mongodb

I have added output.mongodb in filebeat.yml file but it is showing error "Exiting: error initializing publisher: output type mongodb undefined"
Does anyone here has any different fail safe approach towards this requirement where I want to redirect filebeat output directly to mongo database?
Filbeat.yml file
###################### Filebeat Configuration Example #########################
# This file is an example configuration file highlighting only the most common
# options. The filebeat.reference.yml file from the same directory contains all the
# supported options with more comments. You can use it as a reference.
#
# You can find the full configuration reference here:
# https://www.elastic.co/guide/en/beats/filebeat/index.html
# For more available modules and options, please see the filebeat.reference.yml sample
# configuration file.
#=========================== Filebeat inputs =============================
filebeat.inputs:
# Each - is an input. Most options can be set at the input level, so
# you can use different inputs for various configurations.
# Below are the input specific configurations.
- type: log
# Change to true to enable this input configuration.
enabled: true
# Paths that should be crawled and fetched. Glob based paths.
paths:
- /var/log/test.log
#- c:\programdata\elasticsearch\logs\*
# Exclude lines. A list of regular expressions to match. It drops the lines that are
# matching any regular expression from the list.
#exclude_lines: ['^DBG']
# Include lines. A list of regular expressions to match. It exports the lines that are
# matching any regular expression from the list.
#include_lines: ['^ERR', '^WARN']
# Exclude files. A list of regular expressions to match. Filebeat drops the files that
# are matching any regular expression from the list. By default, no files are dropped.
#exclude_files: ['.gz$']
# Optional additional fields. These fields can be freely picked
# to add additional information to the crawled log files for filtering
#fields:
# level: debug
# review: 1
### Multiline options
# Multiline can be used for log messages spanning multiple lines. This is common
# for Java Stack Traces or C-Line Continuation
# The regexp Pattern that has to be matched. The example pattern matches all lines starting with [
#multiline.pattern: ^\[
# Defines if the pattern set under pattern should be negated or not. Default is false.
#multiline.negate: false
# Match can be set to "after" or "before". It is used to define if lines should be append to a pattern
# that was (not) matched before or after or as long as a pattern is not matched based on negate.
# Note: After is the equivalent to previous and before is the equivalent to to next in Logstash
#multiline.match: after
#============================= Filebeat modules ===============================
filebeat.config.modules:
# Glob pattern for configuration loading
path: ${path.config}/modules.d/*.yml
# Set to true to enable config reloading
reload.enabled: false
# Period on which files under path should be checked for changes
reload.period: 5s
#==================== Elasticsearch template setting ==========================
setup.template.settings:
index.number_of_shards: 2
#index.codec: best_compression
#_source.enabled: false
#================================ General =====================================
# The name of the shipper that publishes the network data. It can be used to group
# all the transactions sent by a single shipper in the web interface.
#name:
# The tags of the shipper are included in their own field with each
# transaction published.
#tags: ["service-X", "web-tier"]
# Optional fields that you can specify to add additional information to the
# output.
#fields:
# env: staging
#============================== Dashboards =====================================
# These settings control loading the sample dashboards to the Kibana index. Loading
# the dashboards is disabled by default and can be enabled either by setting the
# options here or by using the `setup` command.
#setup.dashboards.enabled: false
# The URL from where to download the dashboards archive. By default this URL
# has a value which is computed based on the Beat name and version. For released
# versions, this URL points to the dashboard archive on the artifacts.elastic.co
# website.
#setup.dashboards.url:
#============================== Kibana =====================================
# Starting with Beats version 6.0.0, the dashboards are loaded via the Kibana API.
# This requires a Kibana endpoint configuration.
setup.kibana:
# Kibana Host
# Scheme and port can be left out and will be set to the default (http and 5601)
# In case you specify and additional path, the scheme is required: http://localhost:5601/path
# IPv6 addresses should always be defined as: https://[2001:db8::1]:5601
host: "10.27.3.235:5601"
# Kibana Space ID
# ID of the Kibana Space into which the dashboards should be loaded. By default,
# the Default Space will be used.
#space.id:
#============================= Elastic Cloud ==================================
# These settings simplify using Filebeat with the Elastic Cloud (https://cloud.elastic.co/).
# The cloud.id setting overwrites the `output.elasticsearch.hosts` and
# `setup.kibana.host` options.
# You can find the `cloud.id` in the Elastic Cloud web UI.
#cloud.id:
# The cloud.auth setting overwrites the `output.elasticsearch.username` and
# `output.elasticsearch.password` settings. The format is `<user>:<pass>`.
#cloud.auth:
#================================ Outputs =====================================
# Configure what output to use when sending the data collected by the beat.
#-------------------------- Elasticsearch output ------------------------------
# output.elasticsearch:
# # Array of hosts to connect to.
# # hosts: ["10.27.3.235:9200"]
# hosts: ["http://10.27.3.235:9200"]
# index: "filebeatSYS-%{[agent.version]}-%{+yyyy.MM.dd}"
# setup.template:
# name: 'api-access'
# pattern: 'api-access-*'
# enabled: false
#
# # Optional protocol and basic auth credentials.
# #protocol: "https"
# #username: "elastic"
# #password: "changeme"
# #index: "filebeat-%{+yyyy.MM.dd}"
#-------------------------- MongoDB output ------------------------------
output.mongodb:
enabled: true
# URL format, according to mgo.v2 doc : [mongodb://][user:pass#]host1[:port1][,host2[:port2],...][/database][?options]
# More info : https://godoc.org/gopkg.in/mgo.v2#Dial
hosts: ["mongodb://<my-db-url-inserted-here>:27017"]
# The mongodb database to push to
db: "<my-db-here>"
# The database collection to push to
# Could be configured like key/keys of the Redis output : https://www.elastic.co/guide/en/beats/filebeat/current/redis-output.html#_key_2
collection: "filebeat"
# https://www.elastic.co/guide/en/beats/filebeat/current/redis-output.html#_loadbalance
loadbalance: true
# https://www.elastic.co/guide/en/beats/filebeat/current/redis-output.html#_timeout_4
timeout: 5s
# https://www.elastic.co/guide/en/beats/filebeat/current/redis-output.html#_max_retries_4
max_retries: 5
# https://www.elastic.co/guide/en/beats/filebeat/current/redis-output.html#_bulk_max_size_4
bulk_max_size: 2048
#----------------------------- Logstash output --------------------------------
#output.logstash:
# The Logstash hosts
#hosts: ["localhost:5044"]
# Optional SSL. By default is off.
# List of root certificates for HTTPS server verifications
#ssl.certificate_authorities: ["/etc/pki/root/ca.pem"]
# Certificate for SSL client authentication
#ssl.certificate: "/etc/pki/client/cert.pem"
# Client Certificate Key
#ssl.key: "/etc/pki/client/cert.key"
#================================ Processors =====================================
# Configure processors to enhance or manipulate events generated by the beat.
processors:
- add_host_metadata: ~
- add_cloud_metadata: ~
- add_docker_metadata: ~
- add_kubernetes_metadata: ~
#================================ Logging =====================================
# Sets log level. The default log level is info.
# Available log levels are: error, warning, info, debug
#logging.level: debug
# At debug level, you can selectively enable logging only for some components.
# To enable all selectors use ["*"]. Examples of other selectors are "beat",
# "publish", "service".
#logging.selectors: ["*"]
#============================== X-Pack Monitoring ===============================
# filebeat can export internal metrics to a central Elasticsearch monitoring
# cluster. This requires xpack monitoring to be enabled in Elasticsearch. The
# reporting is disabled by default.
# Set to true to enable the monitoring reporter.
#monitoring.enabled: false
# Sets the UUID of the Elasticsearch cluster under which monitoring data for this
# Filebeat instance will appear in the Stack Monitoring UI. If output.elasticsearch
# is enabled, the UUID is derived from the Elasticsearch cluster referenced by output.elasticsearch.
#monitoring.cluster_uuid:
# Uncomment to send the metrics to Elasticsearch. Most settings from the
# Elasticsearch output are accepted here as well.
# Note that the settings should point to your Elasticsearch *monitoring* cluster.
# Any setting that is not set is automatically inherited from the Elasticsearch
# output configuration, so if you have the Elasticsearch output configured such
# that it is pointing to your Elasticsearch monitoring cluster, you can simply
# uncomment the following line.
#monitoring.elasticsearch:
#================================= Migration ==================================
# This allows to enable 6.7 migration aliases
#migration.6_to_7.enabled: true
You get the error
Exiting: error initializing publisher: output type mongodb undefined
because Filebeat does not support this kind of output. Take a look at the Output Configuration doc of Filebeat. There is no output for MongoDB mentioned. Filebeat supports only the following outputs:
Elasticsearch
Logstash
Kafka
Redis
File
Console
Elastic Cloud
By defining
output.mongodb:
Filebeat crashes because 'mongodb' is an unknown/undefined configuration-field in the output-element.
Does anyone here has any different fail safe approach towards this requirement where I want to redirect filebeat output directly to mongo database?
Logstash has a dedicated MongoDB output plugin. So you could send the data from Filebeat to Logstash which sends it to your MongoDB (this approach is not direct but a valid workaround).

unable to conect to VM and k8s pod in hazelcast

Is it possible to activate normal discovery as well as Kubernetes discovery at the same time? Actually what I am trying to do is I have different services already running and connected to hazelcast. I want one service to connect to a service that is running on a VM which is not a part of the k8s cluster.
<?xml version="1.0" encoding="UTF-8"?>
<!--
~ Copyright 2017 Red Hat, Inc.
~
~ Red Hat licenses this file to you under the Apache License, version 2.0
~ (the "License"); you may not use this file except in compliance with the
~ License. You may obtain a copy of the License at:
~
~ http://www.apache.org/licenses/LICENSE-2.0
~
~ Unless required by applicable law or agreed to in writing, software
~ distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
~ WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
~ License for the specific language governing permissions and limitations
~ under the License.
-->
<hazelcast xsi:schemaLocation="http://www.hazelcast.com/schema/config hazelcast-config-3.10.xsd"
xmlns="http://www.hazelcast.com/schema/config"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance">
<properties>
<property name="hazelcast.mancenter.enabled">false</property>
<property name="hazelcast.memcache.enabled">false</property>
<property name="hazelcast.rest.enabled">false</property>
<property name="hazelcast.wait.seconds.before.join">5</property>
<property name ="hazelcast.socket.bind.any">false</property>
</properties>
<management-center enabled="false">http://localhost:8080/mancenter</management-center>
<network>
<port auto-increment="true" port-count="10000">5730</port>
<outbound-ports>
<!--
Allowed port range when connecting to other nodes.
0 or * means use system provided port.
-->
<ports>0</ports>
</outbound-ports>
<join>
<multicast enabled="false">
<multicast-group>224.2.2.3</multicast-group>
<multicast-port>54327</multicast-port>
</multicast>
<tcp-ip enabled="true">
<interface>10.0.2.17</interface>
<interface>10.0.4.21</interface>
</tcp-ip>
<aws enabled="false">
<access-key>my-access-key</access-key>
<secret-key>my-secret-key</secret-key>
<!--optional, default is us-east-1 -->
<region>us-west-1</region>
<!--optional, default is ec2.amazonaws.com. If set, region shouldn't be set as it will override this property -->
<host-header>ec2.amazonaws.com</host-header>
<!-- optional, only instances belonging to this group will be discovered, default will try all running instances -->
<security-group-name>hazelcast-sg</security-group-name>
<tag-key>type</tag-key>
<tag-value>hz-nodes</tag-value>
</aws>
</join>
<interfaces enabled="false">
<interface>10.10.1.*</interface>
</interfaces>
<ssl enabled="false"/>
<socket-interceptor enabled="false"/>
<symmetric-encryption enabled="false">
<!--
encryption algorithm such as
DES/ECB/PKCS5Padding,
PBEWithMD5AndDES,
AES/CBC/PKCS5Padding,
Blowfish,
DESede
-->
<algorithm>PBEWithMD5AndDES</algorithm>
<!-- salt value to use when generating the secret key -->
<salt>thesalt</salt>
<!-- pass phrase to use when generating the secret key -->
<password>thepass</password>
<!-- iteration count to use when generating the secret key -->
<iteration-count>19</iteration-count>
</symmetric-encryption>
</network>
<partition-group enabled="false"/>
<executor-service name="default">
<pool-size>16</pool-size>
<!--Queue capacity. 0 means Integer.MAX_VALUE.-->
<queue-capacity>0</queue-capacity>
</executor-service>
<multimap name="__vertx.subs">
<!--
Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for
fail-safety. 0 means no backup.
-->
<backup-count>1</backup-count>
</multimap>
<map name="__vertx.haInfo">
<!--
Number of backups. If 1 is set as the backup-count for example,
then all entries of the map will be copied to another JVM for
fail-safety. 0 means no backup.
-->
<backup-count>1</backup-count>
<!--
Maximum number of seconds for each entry to stay in the map. Entries that are
older than <time-to-live-seconds> and not updated for <time-to-live-seconds>
will get automatically evicted from the map.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
-->
<time-to-live-seconds>0</time-to-live-seconds>
<!--
Maximum number of seconds for each entry to stay idle in the map. Entries that are
idle(not touched) for more than <max-idle-seconds> will get
automatically evicted from the map. Entry is touched if get, put or containsKey is called.
Any integer between 0 and Integer.MAX_VALUE. 0 means infinite. Default is 0.
-->
<max-idle-seconds>0</max-idle-seconds>
<!--
Valid values are:
NONE (no eviction),
LRU (Least Recently Used),
LFU (Least Frequently Used).
NONE is the default.
-->
<eviction-policy>NONE</eviction-policy>
<!--
Maximum size of the map. When max size is reached,
map is evicted based on the policy defined.
Any integer between 0 and Integer.MAX_VALUE. 0 means
Integer.MAX_VALUE. Default is 0.
-->
<max-size policy="PER_NODE">0</max-size>
<!--
When max. size is reached, specified percentage of
the map will be evicted. Any integer between 0 and 100.
If 25 is set for example, 25% of the entries will
get evicted.
-->
<eviction-percentage>25</eviction-percentage>
<!--
While recovering from split-brain (network partitioning),
map entries in the small cluster will merge into the bigger cluster
based on the policy set here. When an entry merge into the
cluster, there might an existing entry with the same key already.
Values of these entries might be different for that same key.
Which value should be set for the key? Conflict is resolved by
the policy set here. Default policy is PutIfAbsentMapMergePolicy
There are built-in merge policies such as
com.hazelcast.map.merge.PassThroughMergePolicy; entry will be added if there is no existing entry for the key.
com.hazelcast.map.merge.PutIfAbsentMapMergePolicy ; entry will be added if the merging entry doesn't exist in the cluster.
com.hazelcast.map.merge.HigherHitsMapMergePolicy ; entry with the higher hits wins.
com.hazelcast.map.merge.LatestUpdateMapMergePolicy ; entry with the latest update wins.
-->
<merge-policy>com.hazelcast.map.merge.LatestUpdateMapMergePolicy</merge-policy>
</map>
<!-- Used internally in Vert.x to implement async locks -->
<semaphore name="__vertx.*">
<initial-permits>1</initial-permits>
</semaphore>
</hazelcast>
No, it's not possible to have multiple discovery strategies (joiners) and the same time. Please check this for details.

Like JBOSS loads login-config.xml file?

Was considering an application, to try to improve the response time. When I came across the following excerpt from the file "jboss-4.2.3.GA \ server \ default \ conf \ login-config.xml"
<application-policy name="domainroles">
<authentication>
<login-module code="org.jboss.security.auth.spi.DatabaseServerLoginModule" flag="required">
<module-option name="dsJndiName">java:jdbc/PostgreAPP</module-option>
<module-option name="principalsQuery">select "TX_PASS" from appbd."TB_SYSTEM" where "NM_SYSTEM"=?</module-option>
<module-option name="rolesQuery">select "NM_TRANSACTION" || "CD_OPTION", 'Roles' from appbd."TB_TRANSATION" where "ID_TR" in (select "ID_TR" from appbd."TB_TR_SYSTEM" where "ID_SYSTEM" in (select "ID_SYSTEM" from appbd."TB_SYSTEM" where "NM_SYSTEM" = ?))</module-option>
</login-module>
</authentication>
</application-policy>
My question is, each incoming request the webservice when the JBOSS will check if the user can access that particular service, it does a select on the database or it already carries all while starting the server?
If I change the SELECT by a properties file will have a significant improvement in performance?
Note: The application receives 5000 requests per minute.
Thank you
The JaasSecurityManagerService caches authentication results to avoid constant access of the security store associated with login modules. The default cache policy is a time based policy that is controlled by the DefaultCacheTimeout attribute.
<!-- JAAS security manager and realm mapping -->
<mbean code="org.jboss.security.plugins.JaasSecurityManagerService"
name="jboss.security:service=JaasSecurityManager">
<attribute name="SecurityManagerClassName">org.jboss.security.plugins.JaasSecurityManager</attribute>
<attribute name="DefaultUnauthenticatedPrincipal">anonymous</attribute>
<!-- DefaultCacheTimeout: Specifies the default timed cache policy timeout
in seconds.
If you want to disable caching of security credentials, set this to 0 to
force authentication to occur every time. This has no affect if the
AuthenticationCacheJndiName has been changed from the default value.
-->
<attribute name="DefaultCacheTimeout">1800</attribute>
<!-- DefaultCacheResolution: Specifies the default timed cache policy
resolution in seconds. This controls the interval at which the cache
current timestamp is updated and should be less than the DefaultCacheTimeout
in order for the timeout to be meaningful. This has no affect if the
AuthenticationCacheJndiName has been changed from the default value.
-->
<attribute name="DefaultCacheResolution">60</attribute>
</mbean>
DefaultCacheTimeout: Specifies the default timed cache policy timeout in seconds. The default value is 1800 seconds (30 minutes).
The value you use for the timeout is a tradeoff between frequent
authentication operations and how long credential information may be
out of sync with respect to the security information store. If you
want to disable caching of security credentials, set this to 0 to
force authentication to occur every time. This has no affect if the
AuthenticationCacheJndiName has been changed from the default value.
About change to a properties file, depends on amount of user, roles, filesystem etc.
Also the database will cache consecutive queries.

Solr 3.1 Jboss server deployment failed

When I deploy Solr 3.1 to Jboss application server (version 6.0 final) I got the following exception message:
Failed to create Resource solr.war - cause: java.lang.Exception:Failed to start deployment [vfs:///D:/jboss-6.0.0.Final/server/default/deploy/solr.war] during deployment of 'solr.war' - cause: java.lang.RuntimeException:org.jboss.deployers.client.spi.IncompleteDeploymentException: Summary of incomplete deployments (SEE PREVIOUS ERRORS FOR DETAILS): * DEPLOYMENTS IN ERROR: Name -> Error vfs:///D:/jboss-6.0.0.Final/server/default/deploy/solr.war -> org.jboss.deployers.spi.DeploymentException: Error creating managed object for vfs:///D:/jboss-6.0.0.Final/server/default/deploy/solr.war DEPLOYMENTS IN ERROR: Deployment "vfs:///D:/jboss-6.0.0.Final/server/default/deploy/solr.war" is in error due to the following reason(s): org.xml.sax.SAXException: Element type "tlibversion" must be declared. # vfs:///D:/jboss-6.0.0.Final/server/default/deploy/solr.war/WEB-INF/lib/velocity-tools-2.0-beta3.jar/META-INF/velocity-view.tld[22,16] ->
I wonder why this error occurred.
I tried to deploy both Solr version 1.4 and 4.0 to the same server and no error was found.
(My deploy method: Use JBoss AS 6 Admin Console and Add "solr.war" as a new resource for standalone web application)
Thank you for attention and any help is regarded.
me again :) .... good news I fixed it I just edited this file: solr.war\WEB-INF\lib\velocity-tools-2.0-beta3.jar\META-INF\velocity-view.tld
to this (you copy and paste it as is):
<?xml version="1.0" encoding="UTF-8"?>
<!DOCTYPE taglib PUBLIC "-//Sun Microsystems, Inc.//DTD JSP Tag Library 1.2//EN" "http://java.sun.com/dtd/web-jsptaglibrary_1_2.dtd">
<!--
Licensed to the Apache Software Foundation (ASF) under one
or more contributor license agreements. See the NOTICE file
distributed with this work for additional information
regarding copyright ownership. The ASF licenses this file
to you under the Apache License, Version 2.0 (the
"License"); you may not use this file except in compliance
with the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing,
software distributed under the License is distributed on an
"AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
KIND, either express or implied. See the License for the
specific language governing permissions and limitations
under the License.
-->
<taglib>
<tlib-version>1.0</tlib-version>
<jsp-version>1.2</jsp-version>
<short-name>velocity</short-name>
<uri>http://velocity.apache.org/velocity-view</uri>
<display-name>VelocityView Tag</display-name>
<description><![CDATA[Support for using Velocity and VelocityTools within JSP files and tags.
This makes it trivial to render VTL (Velocity Template Language)
or process a Velocity template from within JSP using the current
context. This also provides the typical VelocityView support
for accessing and configuring both custom and provided
VelocityTools.]]></description>
<tag>
<name>view</name>
<tag-class>org.apache.velocity.tools.view.jsp.VelocityViewTag</tag-class>
<body-content>tagdependent</body-content>
<attribute>
<name>id</name>
<required>false</required>
<rtexprvalue>true</rtexprvalue>
<description><![CDATA[A id unique to this usage of the VelocityViewTag. This id is used to uniquely identify this tag in log messages and hopefully at some point serve as a key under which any body for this tag may be cached as an already-parsed template for improved performance. If no id is specified, then a unique is automatically generated, though that will understandably be less useful in log messages.]]></description>
</attribute>
<attribute>
<name>var</name>
<required>false</required>
<rtexprvalue>true</rtexprvalue>
<description><![CDATA[A variable name whose value should be set to the rendered result of this tag.]]></description>
</attribute>
<attribute>
<name>scope</name>
<required>false</required>
<rtexprvalue>true</rtexprvalue>
<description><![CDATA[This property is meaningless unless a 'var' attribute is also set. When it is, this determines the scope into which the resulting variable is set.]]></description>
</attribute>
<attribute>
<name>template</name>
<required>false</required>
<rtexprvalue>true</rtexprvalue>
<description><![CDATA[The name of a template to be requested from the configured Velocity resource loaders and rendered into the page (or variable if the 'var' attribute is set) using the current context. If this tag also has body content, then the body will be rendered first and placed into the context used to render the template as '$bodyContent'; this approximates the "two-pass render" used by the VelocityLayoutServlet.]]></description>
</attribute>
<attribute>
<name>bodyContentKey</name>
<required>false</required>
<rtexprvalue>true</rtexprvalue>
<description><![CDATA[This property is meaningless unless a 'template' attribute is set and the tag has body content in it. When it is, this changes the key under which the rendered result of the body content is placed into the context for use by the specified template. The default value is "bodyContent" and should be sufficient for nearly all users.]]></description>
</attribute>
</tag>
</taglib>