msyql not connect with sphinxsearch - sphinx

I am unable to connect with sphinxsearch mysql
mysql -h0 -P3306
ERROR 2003 (HY000): Can't connect to MySQL server on '0' (111)
How can I remove this error
this is my config file code sphinx.conf
is we need to start any service?
source src1
{
type = mysql
sql_host = localhost
sql_user = root
sql_pass = india#123
sql_db = test
sql_port = 3306
sql_query = \
SELECT id, group_id, UNIX_TIMESTAMP(date_added) AS date_added, title, content \
FROM documents
sql_attr_uint = group_id
sql_attr_timestamp = date_added
sql_ranged_throttle = 0
sql_query_info = SELECT * FROM documents WHERE id=$id
}
source src1throttled : src1
{
sql_ranged_throttle = 100
}
index test1
{
source = src1
path = /var/lib/sphinxsearch/data/test1
docinfo = extern
dict = keywords
mlock = 0
morphology = none
min_word_len = 1
html_strip = 0
}
index test1stemmed : test1
{
path = /var/lib/sphinxsearch/data/test1stemmed
morphology = stem_en
}
index dist1
{
type = distributed
local = test1
local = test1stemmed
agent = localhost:9313:remote1
agent = localhost:9314:remote2,remote3
agent_connect_timeout = 1000
agent_query_timeout = 3000
}
index rt
{
type = rt
path = /var/lib/sphinxsearch/data/rt
rt_field = title
rt_field = content
rt_attr_uint = gid
}
indexer
{
mem_limit = 128M
}
searchd
{
listen = 9312
listen = 9306:mysql41
log = /var/log/sphinxsearch/searchd.log
query_log = /var/log/sphinxsearch/query.log
read_timeout = 5
client_timeout = 300
max_children = 30
persistent_connections_limit = 30
pid_file = /var/run/sphinxsearch/searchd.pid
seamless_rotate = 1
preopen_indexes = 1
unlink_old = 1
mva_updates_pool = 1M
max_packet_size = 8M
max_filters = 256
max_filter_values = 4096
max_batch_queries = 32
workers = threads # for RT to work
}
common
{
}

mysql -h0 -P3306
There you are trying to connect to port 3306. Why?
You seem to have searchd listening on port 9306
listen = 9306:mysql41
... and yes, you will need searchd actually running. The 'service' may be called different things in differnet distributions.

Make sure the sphinx user has permission to this folder!
chown -R sphinx:sphinx /var/lib/sphinxsearch/

Related

magento2 cron not reindex

I have created a crontab with this command in my Ubuntu Server and Plesk 12.5 :
MAILTO=""
SHELL="/bin/bash"
*/1 * * * * php -c -f /var/www/vhosts/system/domainname.com/etc/php.ini /var/www/vhosts/domainname.com/httpdocs/store/bin/magento cron:run > /var/www/vhosts/domainname.com/httpdocs/store/var/log/magento.cron.log&
MAILTO=""
SHELL="/bin/bash"
*/1 * * * * php -c -f /var/www/vhosts/system/domainname.com/etc/php.ini /var/www/vhosts/domainname.com/httpdocs/store/update/cron.php > /var/www/vhosts/domainname.com/httpdocs/store/var/log/update.cron.log&
MAILTO=""
SHELL="/bin/bash"
*/1 * * * * php -c -f /var/www/vhosts/system/domainname.com/etc/php.ini /var/www/vhosts/domainname.com/httpdocs/store/bin/magento setup:cron:run > /var/www/vhosts/domainname.com/httpdocs/store/var/log/setup.cron.log&
When run it creates three files (magento.cron.log; update.cron.log; setup.cron.log). And three files cotains the same text:
; ATTENTION! ; ; DO NOT MODIFY THIS FILE BECAUSE IT WAS GENERATED
AUTOMATICALLY, ; SO ALL YOUR CHANGES WILL BE LOST THE NEXT TIME THE
FILE IS GENERATED.
[PHP] soap.wsdl_cache_limit = 5 cli_server.color = On
mysql.allow_persistent = On mysqli.max_persistent = -1
mysql.connect_timeout = 60 session.use_only_cookies = 1
register_argc_argv = Off mssql.min_error_severity = 10 open_basedir =
"/var/www/vhosts/mydomainname.com/:/tmp/" session.name = PHPSESSID
mysqlnd.collect_statistics = On session.hash_function = 0
session.gc_probability = 0 log_errors_max_len = 1024
mssql.secure_connection = Off pgsql.max_links = -1 variables_order =
"GPCS" ldap.max_links = -1 sybct.allow_persistent = On max_input_time
= 60 odbc.max_links = -1 session.save_handler = files session.save_path = "/var/lib/php5" mysqli.cache_size = 2000
pgsql.auto_reset_persistent = Off error_reporting = E_ALL &
~E_DEPRECATED & ~E_STRICT auto_prepend_file =
sybct.min_client_severity = 10 pgsql.max_persistent = -1
auto_globals_jit = On soap.wsdl_cache_ttl = 86400 allow_url_fopen = On
zend.enable_gc = On mysqli.allow_persistent = On tidy.clean_output =
Off display_startup_errors = Off user_dir = session.cookie_lifetime =
0 mysqli.max_links = -1 default_socket_timeout = 60
session.serialize_handler = php session.hash_bits_per_character = 5
unserialize_callback_func = pdo_mysql.cache_size = 2000
default_mimetype = "text/html" session.cache_expire = 180
max_execution_time = 30 mail.add_x_header = On upload_max_filesize =
2M ibase.max_links = -1 zlib.output_compression = Off
ignore_repeated_errors = Off odbc.max_persistent = -1 file_uploads =
On ibase.max_persistent = -1 mysqli.reconnect = Off
mssql.allow_persistent = On mysql.max_persistent = -1 mssql.max_links
= -1 session.use_trans_sid = 0 mysql.default_socket = always_populate_raw_post_data = -1 mysql.max_links = -1
odbc.defaultbinmode = 1 sybct.max_persistent = -1 output_buffering =
4096 ibase.timeformat = "%H:%M:%S" doc_root = log_errors = On
mysql.default_host = default_charset = "UTF-8" request_order = "GP"
display_errors = Off mysqli.default_socket = mysqli.default_pw =
html_errors = On mssql.compatibility_mode = Off ibase.allow_persistent
= 1 sybct.min_server_severity = 10 mysql.allow_local_infile = On post_max_size = 8M asp_tags = Off memory_limit = 512M short_open_tag =
Off SMTP = localhost precision = 14 session.use_strict_mode = 0
session.gc_maxlifetime = 1440 allow_url_include = Off
mysqli.default_host = mysqli.default_user = session.referer_check =
pgsql.log_notice = 0 mysql.default_port = pgsql.ignore_notice = 0
mysql.trace_mode = Off ibase.timestampformat = "%Y-%m-%d %H:%M:%S"
engine = On odbc.allow_persistent = On ibase.dateformat = "%Y-%m-%d"
track_errors = Off max_file_uploads = 20 pgsql.allow_persistent = On
session.auto_start = 0 auto_append_file = disable_classes =
pdo_mysql.default_socket = mysql.default_password =
url_rewriter.tags =
"a=href,area=href,frame=src,input=src,form=fakeentry" smtp_port = 25
sql.safe_mode = Off session.cookie_path = / expose_php = On
report_memleaks = On session.gc_divisor = 1000 mssql.max_persistent =
-1 serialize_precision = 17 odbc.check_persistent = On sybct.max_links = -1 mysqlnd.collect_memory_statistics = Off session.cookie_domain = session.cookie_httponly = session.cache_limiter = nocache enable_dl =
Off mysqli.default_port = 3306 disable_functions = odbc.defaultlrl =
4096 soap.wsdl_cache_enabled = 1 soap.wsdl_cache_dir = "/tmp"
mssql.min_message_severity = 10 session.use_cookies = 1
mysql.default_user = mysql.cache_size = 2000 implicit_flush = Off
ignore_repeated_source = Off bcmath.scale = 0
But when I enter magenta manager keeps giving the message "One or more indexers are invalid. Make sure your Magento cron job is running."
I do not understand. What Is It that is not working?
Thanks
You have gotten the flags for php wrong. It should be
*/1 * * * * php -c /var/www/vhosts/system/domainname.com/etc/php.ini -f /var/www/vhosts/domainname.com/httpdocs/store/bin/magento cron:run > /var/www/vhosts/domainname.com/httpdocs/store/var/log/magento.cron.log&
Also provide full path to php, which can be figured out with which php command.

Problems when extracting data with Flume/Coordinator - HUE

i'm new to the Hadoop world and i'm having some trouble with my final data.
My purpose is to extract data from a facebook page (i'm using restfb API) using flume, then the data goes to HDFS which will be used by HIVE to gerenerate the final data. This happens every hour. All this on HUE.
I don't know why, but sometimes I success in extract data from the hole day. And some days, I can only extract data from a few hours.
This is the data from Flume:
As you can see, on 03/21 I could only extract the first 4h from the day. While on 03/22, I could extract the hole day.
Some more info.
My Flume config. from Cloudera Manager
FacebookAgent.sources = FacebookPageFansCity FacebookPageFansGenderAge FacebookPageFans FacebookPagePosts FacebookPageViews
FacebookAgent.channels = MemoryChannelFacebookPageFansCity MemoryChannelFacebookPageFansGenderAge MemoryChannelFacebookPageFans MemoryChannelFacebookPagePosts MemoryChannelFacebookPageViews
FacebookAgent.sinks = HDFSFacebookPageFansCity HDFSFacebookPageFansGenderAge HDFSFacebookPageFans HDFSFacebookPagePosts HDFSFacebookPageViews
# FacebookPageFansCity
FacebookAgent.sources.FacebookPageFansCity.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPageFansCitySource
FacebookAgent.sources.FacebookPageFansCity.channels = MemoryChannelFacebookPageFansCity
FacebookAgent.sources.FacebookPageFansCity.appId = null
FacebookAgent.sources.FacebookPageFansCity.appSecret = null
FacebookAgent.sources.FacebookPageFansCity.accessToken = *confidential*
FacebookAgent.sources.FacebookPageFansCity.pageId = *confidential*
FacebookAgent.sources.FacebookPageFansCity.proxyEnabled = false
FacebookAgent.sources.FacebookPageFansCity.proxyHost = null
FacebookAgent.sources.FacebookPageFansCity.proxyPort = -1
FacebookAgent.sources.FacebookPageFansCity.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPageFansCity.channel = MemoryChannelFacebookPageFansCity
FacebookAgent.sinks.HDFSFacebookPageFansCity.type = hdfs
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pagefanscity/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFansCity.type = memory
FacebookAgent.channels.MemoryChannelFacebookPageFansCity.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFansCity.transactionCapacity = 1000
# FacebookPageFansGenderAge
FacebookAgent.sources.FacebookPageFansGenderAge.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPageFansGenderAgeSource
FacebookAgent.sources.FacebookPageFansGenderAge.channels = MemoryChannelFacebookPageFansGenderAge
FacebookAgent.sources.FacebookPageFansGenderAge.appId = null
FacebookAgent.sources.FacebookPageFansGenderAge.appSecret = null
FacebookAgent.sources.FacebookPageFansGenderAge.accessToken = *confidential*
FacebookAgent.sources.FacebookPageFansGenderAge.pageId = *confidential*
FacebookAgent.sources.FacebookPageFansGenderAge.proxyEnabled = false
FacebookAgent.sources.FacebookPageFansGenderAge.proxyHost = null
FacebookAgent.sources.FacebookPageFansGenderAge.proxyPort = -1
FacebookAgent.sources.FacebookPageFansGenderAge.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.channel = MemoryChannelFacebookPageFansGenderAge
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.type = hdfs
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pagefansgenderage/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFansGenderAge.type = memory
FacebookAgent.channels.MemoryChannelFacebookPageFansGenderAge.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFansGenderAge.transactionCapacity = 1000
# FacebookPageFans
FacebookAgent.sources.FacebookPageFans.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPageFansSource
FacebookAgent.sources.FacebookPageFans.channels = MemoryChannelFacebookPageFans
FacebookAgent.sources.FacebookPageFans.appId = null
FacebookAgent.sources.FacebookPageFans.appSecret = null
FacebookAgent.sources.FacebookPageFans.accessToken = *confidential*
FacebookAgent.sources.FacebookPageFans.pageId = *confidential*
FacebookAgent.sources.FacebookPageFans.proxyEnabled = false
FacebookAgent.sources.FacebookPageFans.proxyHost = null
FacebookAgent.sources.FacebookPageFans.proxyPort = -1
FacebookAgent.sources.FacebookPageFans.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPageFans.channel = MemoryChannelFacebookPageFans
FacebookAgent.sinks.HDFSFacebookPageFans.type = hdfs
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pagefans/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFans.type = memory
FacebookAgent.channels.MemoryChannelFacebookPageFans.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFans.transactionCapacity = 1000
# FacebookPagePosts
FacebookAgent.sources.FacebookPagePosts.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPagePostsSource
FacebookAgent.sources.FacebookPagePosts.channels = MemoryChannelFacebookPagePosts
FacebookAgent.sources.FacebookPagePosts.appId = null
FacebookAgent.sources.FacebookPagePosts.appSecret = null
FacebookAgent.sources.FacebookPagePosts.accessToken = *confidential*
FacebookAgent.sources.FacebookPagePosts.pageId = *confidential*
FacebookAgent.sources.FacebookPagePosts.proxyEnabled = false
FacebookAgent.sources.FacebookPagePosts.proxyHost = null
FacebookAgent.sources.FacebookPagePosts.proxyPort = -1
FacebookAgent.sources.FacebookPagePosts.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPagePosts.channel = MemoryChannelFacebookPagePosts
FacebookAgent.sinks.HDFSFacebookPagePosts.type = hdfs
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pageposts/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPagePosts.type = memory
FacebookAgent.channels.MemoryChannelFacebookPagePosts.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPagePosts.transactionCapacity = 5000
# FacebookPageViews
FacebookAgent.sources.FacebookPageViews.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPageViewsSource
FacebookAgent.sources.FacebookPageViews.channels = MemoryChannelFacebookPageViews
FacebookAgent.sources.FacebookPageViews.appId = null
FacebookAgent.sources.FacebookPageViews.appSecret = null
FacebookAgent.sources.FacebookPageViews.accessToken = *confidential*
FacebookAgent.sources.FacebookPageViews.pageId = *confidential*
FacebookAgent.sources.FacebookPageViews.proxyEnabled = false
FacebookAgent.sources.FacebookPageViews.proxyHost = null
FacebookAgent.sources.FacebookPageViews.proxyPort = -1
FacebookAgent.sources.FacebookPageViews.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPageViews.channel = MemoryChannelFacebookPageViews
FacebookAgent.sinks.HDFSFacebookPageViews.type = hdfs
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pageviews/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPageViews.type = memory
FacebookAgent.channels.MemoryChannelFacebookPageViews.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPageViews.transactionCapacity = 1000
Can anybody help me?
UPDATE
My coordinator from Oozie

Sphinx internal error/ query not send to searchd

I'm trying to use Sphinx with a service called questasy (nobody will know it). Our dutch colleges did this before and the software is definitely giving us the opportunity to run searches via Sphinx.
So here the problem I got:
I set up the questasy portal, enabled the questasy usage and the portal runs perfectly.
I unpacked Sphinx to C:/Sphinx, created the /data and /log directories.
I set up the config file and ran the indexer. It works.
I installed searchd as a service with the config and it works and runs.
BUT now when I try to search in the portal it shows me a message like "internal error. Please try again later". When I look into the "Query.log" there is nothing in it, so I think the query isn't send to the searchd-service. I checked the config, I checked the port it is listening on and everything is like our colleges got it too.
Does anybody know about a common bug or problem or something like this we missed??
Here is my .conf:
# Questasy configuration file for sphinx
#
# To handle the Sphinx requirement that every document have a unique 32-bit ID,
# use a unique number for each index as the first 8 bits, and then use
# the normal index from the database for the last 24 bits.
# Here is the list of "index ids"
# 1 - English Question Text
# 2 - Dutch Question Text
# 3 - Concepts
# 4 - Variables
# 5 - Study Units
# 6 - Publications
#
# The full index will combine all of these indexes
#
# COMMANDS
# To index all of the files (when searchd is not running), use the command:
# indexer.exe --config qbase.conf --all
# To index all of the files (when searchd is running), use the command:
# indexer.exe --config qbase.conf --all --rotate
# Set up searchd as a service with the command
# searchd.exe --install --config c:\full\path\to\qbase.conf
# Stop searchd service with the command
# searchd.exe --stop --config c:\full\path\to\qbase.conf
# Remove searchd service with the command
# searchd.exe --delete --config c:\full\path\to\qbase.conf
# To just run searchd for development/testing
# searchd.exe --config qbase.conf
# base class with basic connection information
source base_source
{
type = mysql
sql_host = localhost
sql_user = root
sql_pass =
sql_db = questasy
sql_port = 3306 # optional, default is 3306
}
# Query for English Question Text
source questions_english : base_source
{
sql_query = SELECT ((1<<24)|QuestionItem.id) as id, StudyUnit.id as study_unit_id, QuestionItem.lh_text_1 as question_text, GROUP_CONCAT(Code.lt_label_1 SEPARATOR ' ') as answer_text FROM `question_items` AS `QuestionItem` LEFT JOIN `question_schemes` AS `QuestionScheme` ON (`QuestionItem`.`question_scheme_id` = `QuestionScheme`.`id`) LEFT JOIN `data_collections` AS `DataCollection` ON (`DataCollection`.`id` = `QuestionScheme`.`data_collection_id`) LEFT JOIN `study_units` AS `StudyUnit` ON (`StudyUnit`.`id` = `DataCollection`.`study_unit_id`) LEFT JOIN `response_domains` AS `ResponseDomain` ON (`QuestionItem`.`response_domain_id` = `ResponseDomain`.`id`) LEFT JOIN `code_schemes` As `CodeScheme` ON (`ResponseDomain`.`code_scheme_id` = `CodeScheme`.`id` AND `ResponseDomain`.`domain_type`=4) LEFT JOIN `codes` AS `Code` ON (`Code`.`code_scheme_id` = `CodeScheme`.`id`) WHERE `StudyUnit`.`published` >= 20 GROUP BY QuestionItem.id
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/question_items/view/',$id) AS URL
}
# Query for Dutch Question Text
source questions_dutch : base_source
{
sql_query = SELECT ((2<<24)|QuestionItem.id) as id, StudyUnit.id as study_unit_id, QuestionItem.lh_text_2 as question_text, GROUP_CONCAT(Code.lt_label_2 SEPARATOR ' ') as answer_text FROM `question_items` AS `QuestionItem` LEFT JOIN `question_schemes` AS `QuestionScheme` ON (`QuestionItem`.`question_scheme_id` = `QuestionScheme`.`id`) LEFT JOIN `data_collections` AS `DataCollection` ON (`DataCollection`.`id` = `QuestionScheme`.`data_collection_id`) LEFT JOIN `study_units` AS `StudyUnit` ON (`StudyUnit`.`id` = `DataCollection`.`study_unit_id`) LEFT JOIN `response_domains` AS `ResponseDomain` ON (`QuestionItem`.`response_domain_id` = `ResponseDomain`.`id`) LEFT JOIN `code_schemes` As `CodeScheme` ON (`ResponseDomain`.`code_scheme_id` = `CodeScheme`.`id` AND `ResponseDomain`.`domain_type`=4) LEFT JOIN `codes` AS `Code` ON (`Code`.`code_scheme_id` = `CodeScheme`.`id`) WHERE `StudyUnit`.`published` >= 20 GROUP BY QuestionItem.id
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/question_items/view/',$id) AS URL
}
# Query for Concepts
source concepts : base_source
{
sql_query = SELECT ((3<<24)|Concept.id) as id, Concept.lt_label_1 as concept_label, Concept.lh_description_1 as concept_description FROM `concepts` AS `Concept`
# sql_query_info = SELECT CONCAT('/concepts/view/',$id) AS URL
}
# Query for Data Variable
source variables : base_source
{
sql_query = SELECT ((4<<24)|DataVariable.id) as id, StudyUnit.id as study_unit_id, DataVariable.name as variable_name, DataVariable.lh_label_1 as variable_label FROM `data_variables` AS `DataVariable` LEFT JOIN `variable_schemes` AS `VariableScheme` ON (`DataVariable`.`variable_scheme_id` = `VariableScheme`.`id`) LEFT JOIN `base_logical_products` AS `BaseLogicalProduct` ON (`BaseLogicalProduct`.`id` = `VariableScheme`.`base_logical_product_id`) LEFT JOIN `study_units` AS `StudyUnit` ON (`StudyUnit`.`id` = `BaseLogicalProduct`.`study_unit_id`) WHERE `StudyUnit`.`published` >= 15
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/data_variables/view/',$id) AS URL
}
# Query for Study Units
source study_units : base_source
{
sql_query = SELECT ((5<<24)|StudyUnit.id) as id, StudyUnit.id as study_unit_id, StudyUnit.fulltitle as study_unit_name, StudyUnit.subtitle as study_unit_subtitle, StudyUnit.alternate_title AS study_unit_alternatetitle, StudyUnit.lh_note_1 as study_unit_note, StudyUnit.lh_purpose_1 as study_unit_purpose, StudyUnit.lh_abstract_1 as study_unit_abstract, StudyUnit.creator as study_unit_creator FROM study_units AS StudyUnit WHERE `StudyUnit`.`published` >= 10
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/study_units/view/',$id) AS URL
}
# Query for Publications
source publications : base_source
{
sql_query = SELECT ((6<<24)|Publication.id) as id, Publication.id as publication_id, Publication.title as publication_name, Publication.subtitle as publication_subtitle, Publication.creator as publication_creator, Publication.contributor as publication_contributor, Publication.abstract as publication_abstract, Publication.lh_note_1 as publication_note, Publication.source as publication_source FROM publications AS Publication WHERE NOT(`Publication`.`accepted_timestamp` IS NULL)
# sql_query_info = SELECT CONCAT('/publications/view/',$id) AS URL
}
# Query for Hosted Files - Other materials
source other_materials : base_source
{
sql_query = SELECT ((7<<24)|HostedFile.id) as id, OtherMaterial.title as hosted_file_title, HostedFile.name as hosted_file_name, StudyUnit.id as study_unit_id FROM `hosted_files` as `HostedFile`, `other_materials` as OtherMaterial, `study_units` as `StudyUnit` WHERE OtherMaterial.hosted_file_id = HostedFile.id AND OtherMaterial.study_unit_id = StudyUnit.id AND `StudyUnit`.`published` >= 20
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/hosted_files/download/',$id) AS URL
}
# Query for Hosted Files - Datasets
source physical_instances : base_source
{
sql_query = SELECT ((8<<24)|HostedFile.id) as id, PhysicalInstance.name as hosted_file_name, StudyUnit.id as study_unit_id FROM `hosted_files` as `HostedFile`, `physical_instances` as PhysicalInstance, `study_units` as `StudyUnit` WHERE PhysicalInstance.hosted_file_id = HostedFile.id AND PhysicalInstance.study_unit_id = StudyUnit.id AND `StudyUnit`.`published` >= 20
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/hosted_files/download/',$id) AS URL
}
# Query for Physical Data Products (Variable Schemes)
source physical_data_products : base_source
{
sql_query = SELECT ((9<<24)| PhysicalDataProduct.id) as id, PhysicalDataProduct.name FROM `physical_data_products` AS `PhysicalDataProduct`, `study_units` as `StudyUnit` WHERE PhysicalDataProduct.study_unit_id = StudyUnit.id AND PhysicalDataProduct.deleted = 0 AND StudyUnit.published >= 20
}
# English Question Text Index
index questions_english_index
{
source = questions_english
path = C:\Sphinx\data\questions_english_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Dutch Question Text Index
index questions_dutch_index
{
source = questions_dutch
path = C:\Sphinx\data\questions_dutch_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Concept Index
index concepts_index
{
source = concepts
path = C:\Sphinx\data\concepts_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Variable Index
index variables_index
{
source = variables
path = C:\Sphinx\data\variables_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Study Unit Index
index study_units_index
{
source = study_units
path = C:\Sphinx\data\study_units_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Publication Index
index publications_index
{
source = publications
path = C:\Sphinx\data\publications_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Other Materials Index
index other_materials_index
{
source = other_materials
path = C:\Sphinx\data\other_materials_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Datasets file Index
index physical_instances_index
{
source = physical_instances
path = C:\Sphinx\data\physical_instances_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Datasets Index
index physical_data_products_index
{
source = physical_data_products
path = C:\Sphinx\data\physical_data_products_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Full Index - merge all of the other indexes
index full_index
{
type = distributed
local = questions_english_index
local = questions_dutch_index
local = concepts_index
local = variables_index
local = study_units_index
local = publications_index
local = other_materials_index
local = physical_instances_index
local = physical_data_products_index
}
indexer
{
# memory limit, in bytes, kiloytes (16384K) or megabytes (256M)
# optional, default is 32M, max is 2047M, recommended is 256M to 1024M
mem_limit = 256M
# maximum IO calls per second (for I/O throttling)
# optional, default is 0 (unlimited)
#
# max_iops = 40
# maximum IO call size, bytes (for I/O throttling)
# optional, default is 0 (unlimited)
#
# max_iosize = 1048576
}
# Settings for the searchd service
searchd
{
# port = 3312
log = C:\Sphinx\log\searchd.log
query_log = C:\Sphinx\log\query.log
pid_file = C:\Sphinx\log\searchd.pid
listen = 127.0.0.1
}
# C:\Sphinx\bin\searchd --config C:\xampp\htdocs\sphinx\vendors\questasy.conf
Thanks in advance

To match multiple lines of the snmpwalk result

Below is the snmpwalk command which I am issuing, and the result whatever it displayed, is what I have to match.
When I tried to store the result in an array and then try to match it din't work.
It goes like this:
snmpwalk -mALL -v2c -cpublic 10.126.143.249 ifname **=====>command which i issue**
This is the result it gives (which I have to match)
IF-MIB::ifName.2 = STRING: port ethernet 1/1
IF-MIB::ifName.3 = STRING: port ethernet 1/2
IF-MIB::ifName.67108865 = STRING: SKB
IF-MIB::ifName.67108866 = STRING: i1
IF-MIB::ifName.134217732 = STRING: LINK
IF-MIB::ifName.134217735 = STRING: port ethernet 1/1 dot1q pvc 200 1/2/7
===========================================================================================
Below is the code i tried : sub snmpwalk_ifName() {
my $rs;
my #array=("IF-MIB::ifName.* = STRING: port ethernet *
IF-MIB::ifName.* = STRING: port ethernet *
IF-MIB::ifName.* = STRING: SKB
IF-MIB::ifName.* = STRING: i1
IF-MIB::ifName.* = STRING: LINK
IF-MIB::ifName.* = STRING: port ethernet * dot1q pvc 200 *");
my %out= $::device2->send_cmds("snmpwalk -mALL -v2c -cpublic $::DEVICE1{ADMIN_IP} ifname");
$rs = Match::Match_Output (
OUTPUT => $out{OUTPUT},
EXP_RESULT => #array);
$::test->checkPoint( RESULT => $rs,
MSG => "CHECKPOINT for verifying snmpwalk output");
}
Not sure what you are trying to match but here is a oneliner to hopefully get you thinking (and it is fun to play):
Initial output:
snmpwalk -mALL -v2c -c lab 192.168.1.65 ifname ~
IF-MIB::ifName.4 = STRING: lsi
IF-MIB::ifName.5 = STRING: dsc
IF-MIB::ifName.6 = STRING: lo0
IF-MIB::ifName.7 = STRING: tap
IF-MIB::ifName.8 = STRING: gre
IF-MIB::ifName.9 = STRING: ipip
IF-MIB::ifName.10 = STRING: pime
IF-MIB::ifName.11 = STRING: pimd
IF-MIB::ifName.12 = STRING: mtun
IF-MIB::ifName.16 = STRING: lo0.0
IF-MIB::ifName.21 = STRING: lo0.16384
IF-MIB::ifName.22 = STRING: lo0.16385
IF-MIB::ifName.248 = STRING: lo0.32768
IF-MIB::ifName.501 = STRING: pp0
IF-MIB::ifName.502 = STRING: st0
IF-MIB::ifName.503 = STRING: ppd0
IF-MIB::ifName.504 = STRING: ppe0
IF-MIB::ifName.505 = STRING: vlan
IF-MIB::ifName.506 = STRING: ge-0/0/0
IF-MIB::ifName.507 = STRING: ge-0/0/1
IF-MIB::ifName.508 = STRING: ge-0/0/0.0
IF-MIB::ifName.509 = STRING: sp-0/0/0
IF-MIB::ifName.510 = STRING: sp-0/0/0.0
IF-MIB::ifName.511 = STRING: gr-0/0/0
IF-MIB::ifName.512 = STRING: sp-0/0/0.16383
IF-MIB::ifName.513 = STRING: ip-0/0/0
IF-MIB::ifName.514 = STRING: lsq-0/0/0
IF-MIB::ifName.515 = STRING: mt-0/0/0
IF-MIB::ifName.516 = STRING: lt-0/0/0
IF-MIB::ifName.517 = STRING: ge-0/0/1.0
IF-MIB::ifName.518 = STRING: ge-0/0/2
IF-MIB::ifName.519 = STRING: ge-0/0/2.0
IF-MIB::ifName.520 = STRING: ge-0/0/3
IF-MIB::ifName.521 = STRING: ge-0/0/4
IF-MIB::ifName.522 = STRING: ge-0/0/3.0
Put the interface name into an array with a one liner:
snmpwalk -mALL -v2c -c lab 192.168.1.65 ifname | \
perl -wnE 'push #each_line,(split " ",$_)[-1];END {say $_ for #each_line}'
Output:
lsi
dsc
lo0
tap
gre
ipip
pime
pimd
mtun
lo0.0
lo0.16384
lo0.16385
lo0.32768
pp0
st0
ppd0
ppe0
vlan
ge-0/0/0
ge-0/0/1
ge-0/0/0.0
sp-0/0/0
sp-0/0/0.0
gr-0/0/0
sp-0/0/0.16383
ip-0/0/0
lsq-0/0/0
mt-0/0/0
lt-0/0/0
ge-0/0/1.0
ge-0/0/2
ge-0/0/2.0
ge-0/0/3
ge-0/0/4
ge-0/0/3.0
Or Index + name:
snmpwalk -mALL -v2c -c lab 192.168.1.65 ifname |
perl -wnE 'say "index = $1 , Int Name = $2" if /ifName\.(\d+).*?:\s(.*)/'
Output:
index = 4 , Int Name = lsi
index = 5 , Int Name = dsc
index = 6 , Int Name = lo0
index = 7 , Int Name = tap
index = 8 , Int Name = gre
index = 9 , Int Name = ipip
index = 10 , Int Name = pime
index = 11 , Int Name = pimd
index = 12 , Int Name = mtun
index = 16 , Int Name = lo0.0
index = 21 , Int Name = lo0.16384
index = 22 , Int Name = lo0.16385
index = 248 , Int Name = lo0.32768
index = 501 , Int Name = pp0
index = 502 , Int Name = st0
index = 503 , Int Name = ppd0
index = 504 , Int Name = ppe0
index = 505 , Int Name = vlan
index = 506 , Int Name = ge-0/0/0
index = 507 , Int Name = ge-0/0/1
index = 508 , Int Name = ge-0/0/0.0
index = 509 , Int Name = sp-0/0/0
index = 510 , Int Name = sp-0/0/0.0
index = 511 , Int Name = gr-0/0/0
index = 512 , Int Name = sp-0/0/0.16383
index = 513 , Int Name = ip-0/0/0
index = 514 , Int Name = lsq-0/0/0
index = 515 , Int Name = mt-0/0/0
index = 516 , Int Name = lt-0/0/0
index = 517 , Int Name = ge-0/0/1.0
index = 518 , Int Name = ge-0/0/2
index = 519 , Int Name = ge-0/0/2.0
index = 520 , Int Name = ge-0/0/3
index = 521 , Int Name = ge-0/0/4
index = 522 , Int Name = ge-0/0/3.0
Anyways, perl is great for cool quick oneliner parsing.

circusctl incr <name> [<nbprocess>] does not increment by nbprocess

Here is my circus.ini
[circus]
check_delay = 5
endpoint = tcp://127.0.0.1:5555
pubsub_endpoint = tcp://127.0.0.1:5556
stats_endpoint = tcp://127.0.0.1:5557
httpd = False
debug = False
[watcher:sample1]
cmd = /worker/sample1.php
warmup_delay = 0
numprocesses = 10
[watcher:sample2]
cmd = /worker/sample2.php
warmup_delay = 0
numprocesses = 10
[plugin:flapping]
use = circus.plugins.flapping.Flapping
retry_in = 3
max_retry = 2
I am trying to increase the number of processes by 2(nbprocess) for sample1. I tried
circusctl incr sample1 2
But circus always increases it by 1, not by 2(nbprocess). Any ideas?
Fixed by the author. Here is the reported bug.