My Doxygen version : Doxyfile 1.8.20
Doxygen takes the comments of functions, variables...
But if for example I want to comment on an if, it is not taken into account in the doc.
I would like to have a comment section per piece of code that I comment on.
I want to be able to retrieve the comments not related to the function and variables
/**
* Description : if comment
*/
if ($tri != 0) {
if ($tri == -1)
$tri = 0;
$_SESSION["stat"]["tri"] = $tri;
}
elseif (isset($_SESSION["stat"]["tri"])) {
$tri = $_SESSION["stat"]["tri"];
}
$smarty->assign("tri", $tri);
I have test the command #internal, but it not works ...
I can't find the exact case I'm looking for in the documentation.
However, I just want to display the comments that interest me in addition to the functions and variables. I am sure it is possible. Maybe something to change in my configuration.
Here is my configuration, if anyone has an idea?
My configuration :
# Doxyfile 1.8.20
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = projectName
PROJECT_NUMBER =
PROJECT_BRIEF = ""
PROJECT_LOGO = ./images/logo.png
OUTPUT_DIRECTORY = ../testdoc
CREATE_SUBDIRS = NO
ALLOW_UNICODE_NAMES = NO
OUTPUT_LANGUAGE = French
OUTPUT_TEXT_DIRECTION = None
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF = "The $name class" \
"The $name widget" \
"The $name file" \
is \
provides \
specifies \
contains \
represents \
a \
an \
the
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = YES
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = YES
JAVADOC_BANNER = NO
QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
PYTHON_DOCSTRING = YES
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 4
ALIASES =
OPTIMIZE_OUTPUT_FOR_C = NO
OPTIMIZE_OUTPUT_JAVA = YES
OPTIMIZE_FOR_FORTRAN = NO
OPTIMIZE_OUTPUT_VHDL = NO
OPTIMIZE_OUTPUT_SLICE = NO
EXTENSION_MAPPING =
MARKDOWN_SUPPORT = YES
TOC_INCLUDE_HEADINGS = 5
AUTOLINK_SUPPORT = YES
BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
SIP_SUPPORT = NO
IDL_PROPERTY_SUPPORT = YES
DISTRIBUTE_GROUP_DOC = NO
GROUP_NESTED_COMPOUNDS = NO
SUBGROUPING = YES
INLINE_GROUPED_CLASSES = NO
INLINE_SIMPLE_STRUCTS = NO
TYPEDEF_HIDES_STRUCT = NO
LOOKUP_CACHE_SIZE = 0
NUM_PROC_THREADS = 1
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
EXTRACT_PRIV_VIRTUAL = NO
EXTRACT_PACKAGE = NO
EXTRACT_STATIC = YES
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_LOCAL_METHODS = YES
EXTRACT_ANON_NSPACES = NO
HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
HIDE_FRIEND_COMPOUNDS = NO
HIDE_IN_BODY_DOCS = NO
INTERNAL_DOCS = YES
CASE_SENSE_NAMES = NO
HIDE_SCOPE_NAMES = YES
HIDE_COMPOUND_REFERENCE= NO
SHOW_INCLUDE_FILES = YES
SHOW_GROUPED_MEMB_INC = NO
FORCE_LOCAL_INCLUDES = NO
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = NO
SORT_MEMBERS_CTORS_1ST = YES
SORT_GROUP_NAMES = YES
SORT_BY_SCOPE_NAME = YES
STRICT_PROTO_MATCHING = NO
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS = INTERNAL
MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
SHOW_FILES = YES
SHOW_NAMESPACES = YES
FILE_VERSION_FILTER =
LAYOUT_FILE =
CITE_BIB_FILES =
#---------------------------------------------------------------------------
# Configuration options related to warning and progress messages
#---------------------------------------------------------------------------
QUIET = NO
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = NO
WARN_AS_ERROR = NO
WARN_FORMAT = "$file:$line: $text"
WARN_LOGFILE =
#---------------------------------------------------------------------------
# Configuration options related to the input files
#---------------------------------------------------------------------------
INPUT = .
INPUT_ENCODING = UTF-8
FILE_PATTERNS = *.c \
*.cc \
*.cxx \
*.cpp \
*.c++ \
*.java \
*.ii \
*.ixx \
*.ipp \
*.i++ \
*.inl \
*.idl \
*.ddl \
*.odl \
*.h \
*.hh \
*.hxx \
*.hpp \
*.h++ \
*.cs \
*.d \
*.php \
*.php4 \
*.php5 \
*.phtml \
*.inc \
*.m \
*.markdown \
*.md \
*.mm \
*.dox \
*.py \
*.pyw \
*.f90 \
*.f95 \
*.f03 \
*.f08 \
*.f \
*.for \
*.tcl \
*.vhd \
*.vhdl \
*.ucf \
*.qsf \
*.ice
RECURSIVE = YES
EXCLUDE = ./cache \
./templates_c \
./lib \
./log \
./tests \
"./en production" \
./sysop/templates_c
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS =
EXCLUDE_SYMBOLS =
EXAMPLE_PATH =
EXAMPLE_PATTERNS = *
EXAMPLE_RECURSIVE = NO
IMAGE_PATH =
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
FILTER_SOURCE_PATTERNS =
USE_MDFILE_AS_MAINPAGE =
#---------------------------------------------------------------------------
# Configuration options related to source browsing
#---------------------------------------------------------------------------
SOURCE_BROWSER = NO
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = NO
REFERENCED_BY_RELATION = NO
REFERENCES_RELATION = NO
REFERENCES_LINK_SOURCE = YES
SOURCE_TOOLTIPS = YES
USE_HTAGS = NO
VERBATIM_HEADERS = YES
CLANG_ASSISTED_PARSING = NO
CLANG_OPTIONS =
CLANG_DATABASE_PATH =
#---------------------------------------------------------------------------
# Configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
ALPHABETICAL_INDEX = YES
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the HTML output
#---------------------------------------------------------------------------
GENERATE_HTML = YES
HTML_OUTPUT = .
HTML_FILE_EXTENSION = .html
HTML_HEADER =
HTML_FOOTER =
HTML_STYLESHEET =
HTML_EXTRA_STYLESHEET =
HTML_EXTRA_FILES =
HTML_COLORSTYLE_HUE = 220
HTML_COLORSTYLE_SAT = 100
HTML_COLORSTYLE_GAMMA = 80
HTML_TIMESTAMP = YES
HTML_DYNAMIC_MENUS = YES
HTML_DYNAMIC_SECTIONS = YES
HTML_INDEX_NUM_ENTRIES = 100
GENERATE_DOCSET = NO
DOCSET_FEEDNAME = "Doxygen generated docs"
DOCSET_BUNDLE_ID = org.doxygen.Project
DOCSET_PUBLISHER_ID = org.doxygen.Publisher
DOCSET_PUBLISHER_NAME = Publisher
GENERATE_HTMLHELP = NO
CHM_FILE =
HHC_LOCATION =
GENERATE_CHI = NO
CHM_INDEX_ENCODING =
BINARY_TOC = NO
TOC_EXPAND = NO
GENERATE_QHP = NO
QCH_FILE =
QHP_NAMESPACE = org.doxygen.Project
QHP_VIRTUAL_FOLDER = doc
QHP_CUST_FILTER_NAME =
QHP_CUST_FILTER_ATTRS =
QHP_SECT_FILTER_ATTRS =
QHG_LOCATION =
GENERATE_ECLIPSEHELP = NO
ECLIPSE_DOC_ID = org.doxygen.Project
DISABLE_INDEX = NO
GENERATE_TREEVIEW = NO
ENUM_VALUES_PER_LINE = 4
TREEVIEW_WIDTH = 250
EXT_LINKS_IN_WINDOW = NO
HTML_FORMULA_FORMAT = png
FORMULA_FONTSIZE = 10
FORMULA_TRANSPARENT = YES
FORMULA_MACROFILE =
USE_MATHJAX = NO
MATHJAX_FORMAT = HTML-CSS
MATHJAX_RELPATH = https://cdnjs.cloudflare.com/ajax/libs/mathjax/2.7.5/
MATHJAX_EXTENSIONS =
MATHJAX_CODEFILE =
SEARCHENGINE = YES
SERVER_BASED_SEARCH = NO
EXTERNAL_SEARCH = NO
SEARCHENGINE_URL =
SEARCHDATA_FILE = searchdata.xml
EXTERNAL_SEARCH_ID =
EXTRA_SEARCH_MAPPINGS =
#---------------------------------------------------------------------------
# Configuration options related to the LaTeX output
#---------------------------------------------------------------------------
GENERATE_LATEX = NO
LATEX_OUTPUT = latex
LATEX_CMD_NAME =
MAKEINDEX_CMD_NAME = makeindex
LATEX_MAKEINDEX_CMD = makeindex
COMPACT_LATEX = NO
PAPER_TYPE = a4
EXTRA_PACKAGES =
LATEX_HEADER =
LATEX_FOOTER =
LATEX_EXTRA_STYLESHEET =
LATEX_EXTRA_FILES =
PDF_HYPERLINKS = YES
USE_PDFLATEX = YES
LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
LATEX_SOURCE_CODE = NO
LATEX_BIB_STYLE = plain
LATEX_TIMESTAMP = NO
LATEX_EMOJI_DIRECTORY =
#---------------------------------------------------------------------------
# Configuration options related to the RTF output
#---------------------------------------------------------------------------
GENERATE_RTF = NO
RTF_OUTPUT = rtf
COMPACT_RTF = NO
RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
RTF_SOURCE_CODE = NO
#---------------------------------------------------------------------------
# Configuration options related to the man page output
#---------------------------------------------------------------------------
GENERATE_MAN = NO
MAN_OUTPUT = man
MAN_EXTENSION = .3
MAN_SUBDIR =
MAN_LINKS = NO
#---------------------------------------------------------------------------
# Configuration options related to the XML output
#---------------------------------------------------------------------------
GENERATE_XML = NO
XML_OUTPUT = xml
XML_PROGRAMLISTING = YES
XML_NS_MEMB_FILE_SCOPE = NO
#---------------------------------------------------------------------------
# Configuration options related to the DOCBOOK output
#---------------------------------------------------------------------------
GENERATE_DOCBOOK = NO
DOCBOOK_OUTPUT = docbook
DOCBOOK_PROGRAMLISTING = NO
#---------------------------------------------------------------------------
# Configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# Configuration options related to Sqlite3 output
#---------------------------------------------------------------------------
#---------------------------------------------------------------------------
# Configuration options related to the Perl module output
#---------------------------------------------------------------------------
GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
PERLMOD_PRETTY = YES
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = NO
EXPAND_ONLY_PREDEF = NO
SEARCH_INCLUDES = YES
INCLUDE_PATH =
INCLUDE_FILE_PATTERNS =
PREDEFINED =
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration options related to external references
#---------------------------------------------------------------------------
TAGFILES =
GENERATE_TAGFILE =
ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
EXTERNAL_PAGES = YES
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
CLASS_DIAGRAMS = YES
DIA_PATH =
HIDE_UNDOC_RELATIONS = YES
HAVE_DOT = NO
DOT_NUM_THREADS = 0
DOT_FONTNAME = Helvetica
DOT_FONTSIZE = 10
DOT_FONTPATH =
CLASS_GRAPH = YES
COLLABORATION_GRAPH = YES
GROUP_GRAPHS = YES
UML_LOOK = NO
UML_LIMIT_NUM_FIELDS = 10
TEMPLATE_RELATIONS = NO
INCLUDE_GRAPH = YES
INCLUDED_BY_GRAPH = YES
CALL_GRAPH = NO
CALLER_GRAPH = NO
GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
DOT_IMAGE_FORMAT = png
INTERACTIVE_SVG = NO
DOT_PATH =
DOTFILE_DIRS =
MSCFILE_DIRS =
DIAFILE_DIRS =
PLANTUML_JAR_PATH =
PLANTUML_CFG_FILE =
PLANTUML_INCLUDE_PATH =
DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 0
DOT_TRANSPARENT = NO
DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
DOT_CLEANUP = YES
Given the following macro definition:
#if defined(__GNUC__) && __GNUC__ >= 4
#define WL_PRINTF(x, y) __attribute__((__format__(__printf__, x, y)))
#else
#define WL_PRINTF(x, y)
#endif
And given the following use, as a gcc function attribute:
typedef void (*wl_log_func_t)(const char *, va_list) WL_PRINTF(1, 0);
Doxygen seems to be truncating part of the function attribute, appearing like this:
And this:
Doxygen also truncates it similarly in other cases where I use the macro function attribute, so the problem seems consistent (it's not about this being a (typedef). It documents the macro itself just fine.
My .doxygen config is:
PROJECT_NAME = "Wayland"
PROJECT_NUMBER = 1.12.90
OUTPUT_DIRECTORY = ../../doc/doxygen
JAVADOC_AUTOBRIEF = YES
TAB_SIZE = 8
QUIET = YES
HTML_TIMESTAMP = YES
GENERATE_LATEX = NO
MAN_LINKS = YES
PREDEFINED = WL_EXPORT=
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES
DOT_MULTI_TARGETS = YES
ALIASES += comment{1}="/* \1 *<!-- -->/"
OPTIMIZE_OUTPUT_FOR_C = YES
EXTRACT_ALL = YES
EXTRACT_STATIC = YES
GENERATE_HTML = NO
GENERATE_XML = NO
GENERATE_MAN = NO
Is there some neat way to trick Doxygen into not truncating this?
This seems to be a constraint (pronounced 'bug') in doxygen that causes it to truncate. The alternative is to remove the generation of the function attribute entirely, by adding the macro to the PREDEFINED config.
I have created a crontab with this command in my Ubuntu Server and Plesk 12.5 :
MAILTO=""
SHELL="/bin/bash"
*/1 * * * * php -c -f /var/www/vhosts/system/domainname.com/etc/php.ini /var/www/vhosts/domainname.com/httpdocs/store/bin/magento cron:run > /var/www/vhosts/domainname.com/httpdocs/store/var/log/magento.cron.log&
MAILTO=""
SHELL="/bin/bash"
*/1 * * * * php -c -f /var/www/vhosts/system/domainname.com/etc/php.ini /var/www/vhosts/domainname.com/httpdocs/store/update/cron.php > /var/www/vhosts/domainname.com/httpdocs/store/var/log/update.cron.log&
MAILTO=""
SHELL="/bin/bash"
*/1 * * * * php -c -f /var/www/vhosts/system/domainname.com/etc/php.ini /var/www/vhosts/domainname.com/httpdocs/store/bin/magento setup:cron:run > /var/www/vhosts/domainname.com/httpdocs/store/var/log/setup.cron.log&
When run it creates three files (magento.cron.log; update.cron.log; setup.cron.log). And three files cotains the same text:
; ATTENTION! ; ; DO NOT MODIFY THIS FILE BECAUSE IT WAS GENERATED
AUTOMATICALLY, ; SO ALL YOUR CHANGES WILL BE LOST THE NEXT TIME THE
FILE IS GENERATED.
[PHP] soap.wsdl_cache_limit = 5 cli_server.color = On
mysql.allow_persistent = On mysqli.max_persistent = -1
mysql.connect_timeout = 60 session.use_only_cookies = 1
register_argc_argv = Off mssql.min_error_severity = 10 open_basedir =
"/var/www/vhosts/mydomainname.com/:/tmp/" session.name = PHPSESSID
mysqlnd.collect_statistics = On session.hash_function = 0
session.gc_probability = 0 log_errors_max_len = 1024
mssql.secure_connection = Off pgsql.max_links = -1 variables_order =
"GPCS" ldap.max_links = -1 sybct.allow_persistent = On max_input_time
= 60 odbc.max_links = -1 session.save_handler = files session.save_path = "/var/lib/php5" mysqli.cache_size = 2000
pgsql.auto_reset_persistent = Off error_reporting = E_ALL &
~E_DEPRECATED & ~E_STRICT auto_prepend_file =
sybct.min_client_severity = 10 pgsql.max_persistent = -1
auto_globals_jit = On soap.wsdl_cache_ttl = 86400 allow_url_fopen = On
zend.enable_gc = On mysqli.allow_persistent = On tidy.clean_output =
Off display_startup_errors = Off user_dir = session.cookie_lifetime =
0 mysqli.max_links = -1 default_socket_timeout = 60
session.serialize_handler = php session.hash_bits_per_character = 5
unserialize_callback_func = pdo_mysql.cache_size = 2000
default_mimetype = "text/html" session.cache_expire = 180
max_execution_time = 30 mail.add_x_header = On upload_max_filesize =
2M ibase.max_links = -1 zlib.output_compression = Off
ignore_repeated_errors = Off odbc.max_persistent = -1 file_uploads =
On ibase.max_persistent = -1 mysqli.reconnect = Off
mssql.allow_persistent = On mysql.max_persistent = -1 mssql.max_links
= -1 session.use_trans_sid = 0 mysql.default_socket = always_populate_raw_post_data = -1 mysql.max_links = -1
odbc.defaultbinmode = 1 sybct.max_persistent = -1 output_buffering =
4096 ibase.timeformat = "%H:%M:%S" doc_root = log_errors = On
mysql.default_host = default_charset = "UTF-8" request_order = "GP"
display_errors = Off mysqli.default_socket = mysqli.default_pw =
html_errors = On mssql.compatibility_mode = Off ibase.allow_persistent
= 1 sybct.min_server_severity = 10 mysql.allow_local_infile = On post_max_size = 8M asp_tags = Off memory_limit = 512M short_open_tag =
Off SMTP = localhost precision = 14 session.use_strict_mode = 0
session.gc_maxlifetime = 1440 allow_url_include = Off
mysqli.default_host = mysqli.default_user = session.referer_check =
pgsql.log_notice = 0 mysql.default_port = pgsql.ignore_notice = 0
mysql.trace_mode = Off ibase.timestampformat = "%Y-%m-%d %H:%M:%S"
engine = On odbc.allow_persistent = On ibase.dateformat = "%Y-%m-%d"
track_errors = Off max_file_uploads = 20 pgsql.allow_persistent = On
session.auto_start = 0 auto_append_file = disable_classes =
pdo_mysql.default_socket = mysql.default_password =
url_rewriter.tags =
"a=href,area=href,frame=src,input=src,form=fakeentry" smtp_port = 25
sql.safe_mode = Off session.cookie_path = / expose_php = On
report_memleaks = On session.gc_divisor = 1000 mssql.max_persistent =
-1 serialize_precision = 17 odbc.check_persistent = On sybct.max_links = -1 mysqlnd.collect_memory_statistics = Off session.cookie_domain = session.cookie_httponly = session.cache_limiter = nocache enable_dl =
Off mysqli.default_port = 3306 disable_functions = odbc.defaultlrl =
4096 soap.wsdl_cache_enabled = 1 soap.wsdl_cache_dir = "/tmp"
mssql.min_message_severity = 10 session.use_cookies = 1
mysql.default_user = mysql.cache_size = 2000 implicit_flush = Off
ignore_repeated_source = Off bcmath.scale = 0
But when I enter magenta manager keeps giving the message "One or more indexers are invalid. Make sure your Magento cron job is running."
I do not understand. What Is It that is not working?
Thanks
You have gotten the flags for php wrong. It should be
*/1 * * * * php -c /var/www/vhosts/system/domainname.com/etc/php.ini -f /var/www/vhosts/domainname.com/httpdocs/store/bin/magento cron:run > /var/www/vhosts/domainname.com/httpdocs/store/var/log/magento.cron.log&
Also provide full path to php, which can be figured out with which php command.
i'm new to the Hadoop world and i'm having some trouble with my final data.
My purpose is to extract data from a facebook page (i'm using restfb API) using flume, then the data goes to HDFS which will be used by HIVE to gerenerate the final data. This happens every hour. All this on HUE.
I don't know why, but sometimes I success in extract data from the hole day. And some days, I can only extract data from a few hours.
This is the data from Flume:
As you can see, on 03/21 I could only extract the first 4h from the day. While on 03/22, I could extract the hole day.
Some more info.
My Flume config. from Cloudera Manager
FacebookAgent.sources = FacebookPageFansCity FacebookPageFansGenderAge FacebookPageFans FacebookPagePosts FacebookPageViews
FacebookAgent.channels = MemoryChannelFacebookPageFansCity MemoryChannelFacebookPageFansGenderAge MemoryChannelFacebookPageFans MemoryChannelFacebookPagePosts MemoryChannelFacebookPageViews
FacebookAgent.sinks = HDFSFacebookPageFansCity HDFSFacebookPageFansGenderAge HDFSFacebookPageFans HDFSFacebookPagePosts HDFSFacebookPageViews
# FacebookPageFansCity
FacebookAgent.sources.FacebookPageFansCity.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPageFansCitySource
FacebookAgent.sources.FacebookPageFansCity.channels = MemoryChannelFacebookPageFansCity
FacebookAgent.sources.FacebookPageFansCity.appId = null
FacebookAgent.sources.FacebookPageFansCity.appSecret = null
FacebookAgent.sources.FacebookPageFansCity.accessToken = *confidential*
FacebookAgent.sources.FacebookPageFansCity.pageId = *confidential*
FacebookAgent.sources.FacebookPageFansCity.proxyEnabled = false
FacebookAgent.sources.FacebookPageFansCity.proxyHost = null
FacebookAgent.sources.FacebookPageFansCity.proxyPort = -1
FacebookAgent.sources.FacebookPageFansCity.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPageFansCity.channel = MemoryChannelFacebookPageFansCity
FacebookAgent.sinks.HDFSFacebookPageFansCity.type = hdfs
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pagefanscity/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPageFansCity.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFansCity.type = memory
FacebookAgent.channels.MemoryChannelFacebookPageFansCity.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFansCity.transactionCapacity = 1000
# FacebookPageFansGenderAge
FacebookAgent.sources.FacebookPageFansGenderAge.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPageFansGenderAgeSource
FacebookAgent.sources.FacebookPageFansGenderAge.channels = MemoryChannelFacebookPageFansGenderAge
FacebookAgent.sources.FacebookPageFansGenderAge.appId = null
FacebookAgent.sources.FacebookPageFansGenderAge.appSecret = null
FacebookAgent.sources.FacebookPageFansGenderAge.accessToken = *confidential*
FacebookAgent.sources.FacebookPageFansGenderAge.pageId = *confidential*
FacebookAgent.sources.FacebookPageFansGenderAge.proxyEnabled = false
FacebookAgent.sources.FacebookPageFansGenderAge.proxyHost = null
FacebookAgent.sources.FacebookPageFansGenderAge.proxyPort = -1
FacebookAgent.sources.FacebookPageFansGenderAge.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.channel = MemoryChannelFacebookPageFansGenderAge
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.type = hdfs
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pagefansgenderage/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPageFansGenderAge.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFansGenderAge.type = memory
FacebookAgent.channels.MemoryChannelFacebookPageFansGenderAge.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFansGenderAge.transactionCapacity = 1000
# FacebookPageFans
FacebookAgent.sources.FacebookPageFans.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPageFansSource
FacebookAgent.sources.FacebookPageFans.channels = MemoryChannelFacebookPageFans
FacebookAgent.sources.FacebookPageFans.appId = null
FacebookAgent.sources.FacebookPageFans.appSecret = null
FacebookAgent.sources.FacebookPageFans.accessToken = *confidential*
FacebookAgent.sources.FacebookPageFans.pageId = *confidential*
FacebookAgent.sources.FacebookPageFans.proxyEnabled = false
FacebookAgent.sources.FacebookPageFans.proxyHost = null
FacebookAgent.sources.FacebookPageFans.proxyPort = -1
FacebookAgent.sources.FacebookPageFans.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPageFans.channel = MemoryChannelFacebookPageFans
FacebookAgent.sinks.HDFSFacebookPageFans.type = hdfs
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pagefans/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPageFans.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFans.type = memory
FacebookAgent.channels.MemoryChannelFacebookPageFans.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPageFans.transactionCapacity = 1000
# FacebookPagePosts
FacebookAgent.sources.FacebookPagePosts.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPagePostsSource
FacebookAgent.sources.FacebookPagePosts.channels = MemoryChannelFacebookPagePosts
FacebookAgent.sources.FacebookPagePosts.appId = null
FacebookAgent.sources.FacebookPagePosts.appSecret = null
FacebookAgent.sources.FacebookPagePosts.accessToken = *confidential*
FacebookAgent.sources.FacebookPagePosts.pageId = *confidential*
FacebookAgent.sources.FacebookPagePosts.proxyEnabled = false
FacebookAgent.sources.FacebookPagePosts.proxyHost = null
FacebookAgent.sources.FacebookPagePosts.proxyPort = -1
FacebookAgent.sources.FacebookPagePosts.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPagePosts.channel = MemoryChannelFacebookPagePosts
FacebookAgent.sinks.HDFSFacebookPagePosts.type = hdfs
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pageposts/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPagePosts.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPagePosts.type = memory
FacebookAgent.channels.MemoryChannelFacebookPagePosts.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPagePosts.transactionCapacity = 5000
# FacebookPageViews
FacebookAgent.sources.FacebookPageViews.type = br.com.tsystems.hadoop.flume.source.restfb.FacebookPageViewsSource
FacebookAgent.sources.FacebookPageViews.channels = MemoryChannelFacebookPageViews
FacebookAgent.sources.FacebookPageViews.appId = null
FacebookAgent.sources.FacebookPageViews.appSecret = null
FacebookAgent.sources.FacebookPageViews.accessToken = *confidential*
FacebookAgent.sources.FacebookPageViews.pageId = *confidential*
FacebookAgent.sources.FacebookPageViews.proxyEnabled = false
FacebookAgent.sources.FacebookPageViews.proxyHost = null
FacebookAgent.sources.FacebookPageViews.proxyPort = -1
FacebookAgent.sources.FacebookPageViews.refreshInterval = 3600
FacebookAgent.sinks.HDFSFacebookPageViews.channel = MemoryChannelFacebookPageViews
FacebookAgent.sinks.HDFSFacebookPageViews.type = hdfs
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.path = hdfs://hdoop01:8020/user/flume/pocfacebook/pageviews/%Y%m%d%H
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.fileType = DataStream
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.writeFormat = Text
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.batchSize = 1000
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.rollSize = 0
FacebookAgent.sinks.HDFSFacebookPageViews.hdfs.rollCount = 10000
FacebookAgent.channels.MemoryChannelFacebookPageViews.type = memory
FacebookAgent.channels.MemoryChannelFacebookPageViews.capacity = 10000
FacebookAgent.channels.MemoryChannelFacebookPageViews.transactionCapacity = 1000
Can anybody help me?
UPDATE
My coordinator from Oozie
I'm trying to use Sphinx with a service called questasy (nobody will know it). Our dutch colleges did this before and the software is definitely giving us the opportunity to run searches via Sphinx.
So here the problem I got:
I set up the questasy portal, enabled the questasy usage and the portal runs perfectly.
I unpacked Sphinx to C:/Sphinx, created the /data and /log directories.
I set up the config file and ran the indexer. It works.
I installed searchd as a service with the config and it works and runs.
BUT now when I try to search in the portal it shows me a message like "internal error. Please try again later". When I look into the "Query.log" there is nothing in it, so I think the query isn't send to the searchd-service. I checked the config, I checked the port it is listening on and everything is like our colleges got it too.
Does anybody know about a common bug or problem or something like this we missed??
Here is my .conf:
# Questasy configuration file for sphinx
#
# To handle the Sphinx requirement that every document have a unique 32-bit ID,
# use a unique number for each index as the first 8 bits, and then use
# the normal index from the database for the last 24 bits.
# Here is the list of "index ids"
# 1 - English Question Text
# 2 - Dutch Question Text
# 3 - Concepts
# 4 - Variables
# 5 - Study Units
# 6 - Publications
#
# The full index will combine all of these indexes
#
# COMMANDS
# To index all of the files (when searchd is not running), use the command:
# indexer.exe --config qbase.conf --all
# To index all of the files (when searchd is running), use the command:
# indexer.exe --config qbase.conf --all --rotate
# Set up searchd as a service with the command
# searchd.exe --install --config c:\full\path\to\qbase.conf
# Stop searchd service with the command
# searchd.exe --stop --config c:\full\path\to\qbase.conf
# Remove searchd service with the command
# searchd.exe --delete --config c:\full\path\to\qbase.conf
# To just run searchd for development/testing
# searchd.exe --config qbase.conf
# base class with basic connection information
source base_source
{
type = mysql
sql_host = localhost
sql_user = root
sql_pass =
sql_db = questasy
sql_port = 3306 # optional, default is 3306
}
# Query for English Question Text
source questions_english : base_source
{
sql_query = SELECT ((1<<24)|QuestionItem.id) as id, StudyUnit.id as study_unit_id, QuestionItem.lh_text_1 as question_text, GROUP_CONCAT(Code.lt_label_1 SEPARATOR ' ') as answer_text FROM `question_items` AS `QuestionItem` LEFT JOIN `question_schemes` AS `QuestionScheme` ON (`QuestionItem`.`question_scheme_id` = `QuestionScheme`.`id`) LEFT JOIN `data_collections` AS `DataCollection` ON (`DataCollection`.`id` = `QuestionScheme`.`data_collection_id`) LEFT JOIN `study_units` AS `StudyUnit` ON (`StudyUnit`.`id` = `DataCollection`.`study_unit_id`) LEFT JOIN `response_domains` AS `ResponseDomain` ON (`QuestionItem`.`response_domain_id` = `ResponseDomain`.`id`) LEFT JOIN `code_schemes` As `CodeScheme` ON (`ResponseDomain`.`code_scheme_id` = `CodeScheme`.`id` AND `ResponseDomain`.`domain_type`=4) LEFT JOIN `codes` AS `Code` ON (`Code`.`code_scheme_id` = `CodeScheme`.`id`) WHERE `StudyUnit`.`published` >= 20 GROUP BY QuestionItem.id
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/question_items/view/',$id) AS URL
}
# Query for Dutch Question Text
source questions_dutch : base_source
{
sql_query = SELECT ((2<<24)|QuestionItem.id) as id, StudyUnit.id as study_unit_id, QuestionItem.lh_text_2 as question_text, GROUP_CONCAT(Code.lt_label_2 SEPARATOR ' ') as answer_text FROM `question_items` AS `QuestionItem` LEFT JOIN `question_schemes` AS `QuestionScheme` ON (`QuestionItem`.`question_scheme_id` = `QuestionScheme`.`id`) LEFT JOIN `data_collections` AS `DataCollection` ON (`DataCollection`.`id` = `QuestionScheme`.`data_collection_id`) LEFT JOIN `study_units` AS `StudyUnit` ON (`StudyUnit`.`id` = `DataCollection`.`study_unit_id`) LEFT JOIN `response_domains` AS `ResponseDomain` ON (`QuestionItem`.`response_domain_id` = `ResponseDomain`.`id`) LEFT JOIN `code_schemes` As `CodeScheme` ON (`ResponseDomain`.`code_scheme_id` = `CodeScheme`.`id` AND `ResponseDomain`.`domain_type`=4) LEFT JOIN `codes` AS `Code` ON (`Code`.`code_scheme_id` = `CodeScheme`.`id`) WHERE `StudyUnit`.`published` >= 20 GROUP BY QuestionItem.id
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/question_items/view/',$id) AS URL
}
# Query for Concepts
source concepts : base_source
{
sql_query = SELECT ((3<<24)|Concept.id) as id, Concept.lt_label_1 as concept_label, Concept.lh_description_1 as concept_description FROM `concepts` AS `Concept`
# sql_query_info = SELECT CONCAT('/concepts/view/',$id) AS URL
}
# Query for Data Variable
source variables : base_source
{
sql_query = SELECT ((4<<24)|DataVariable.id) as id, StudyUnit.id as study_unit_id, DataVariable.name as variable_name, DataVariable.lh_label_1 as variable_label FROM `data_variables` AS `DataVariable` LEFT JOIN `variable_schemes` AS `VariableScheme` ON (`DataVariable`.`variable_scheme_id` = `VariableScheme`.`id`) LEFT JOIN `base_logical_products` AS `BaseLogicalProduct` ON (`BaseLogicalProduct`.`id` = `VariableScheme`.`base_logical_product_id`) LEFT JOIN `study_units` AS `StudyUnit` ON (`StudyUnit`.`id` = `BaseLogicalProduct`.`study_unit_id`) WHERE `StudyUnit`.`published` >= 15
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/data_variables/view/',$id) AS URL
}
# Query for Study Units
source study_units : base_source
{
sql_query = SELECT ((5<<24)|StudyUnit.id) as id, StudyUnit.id as study_unit_id, StudyUnit.fulltitle as study_unit_name, StudyUnit.subtitle as study_unit_subtitle, StudyUnit.alternate_title AS study_unit_alternatetitle, StudyUnit.lh_note_1 as study_unit_note, StudyUnit.lh_purpose_1 as study_unit_purpose, StudyUnit.lh_abstract_1 as study_unit_abstract, StudyUnit.creator as study_unit_creator FROM study_units AS StudyUnit WHERE `StudyUnit`.`published` >= 10
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/study_units/view/',$id) AS URL
}
# Query for Publications
source publications : base_source
{
sql_query = SELECT ((6<<24)|Publication.id) as id, Publication.id as publication_id, Publication.title as publication_name, Publication.subtitle as publication_subtitle, Publication.creator as publication_creator, Publication.contributor as publication_contributor, Publication.abstract as publication_abstract, Publication.lh_note_1 as publication_note, Publication.source as publication_source FROM publications AS Publication WHERE NOT(`Publication`.`accepted_timestamp` IS NULL)
# sql_query_info = SELECT CONCAT('/publications/view/',$id) AS URL
}
# Query for Hosted Files - Other materials
source other_materials : base_source
{
sql_query = SELECT ((7<<24)|HostedFile.id) as id, OtherMaterial.title as hosted_file_title, HostedFile.name as hosted_file_name, StudyUnit.id as study_unit_id FROM `hosted_files` as `HostedFile`, `other_materials` as OtherMaterial, `study_units` as `StudyUnit` WHERE OtherMaterial.hosted_file_id = HostedFile.id AND OtherMaterial.study_unit_id = StudyUnit.id AND `StudyUnit`.`published` >= 20
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/hosted_files/download/',$id) AS URL
}
# Query for Hosted Files - Datasets
source physical_instances : base_source
{
sql_query = SELECT ((8<<24)|HostedFile.id) as id, PhysicalInstance.name as hosted_file_name, StudyUnit.id as study_unit_id FROM `hosted_files` as `HostedFile`, `physical_instances` as PhysicalInstance, `study_units` as `StudyUnit` WHERE PhysicalInstance.hosted_file_id = HostedFile.id AND PhysicalInstance.study_unit_id = StudyUnit.id AND `StudyUnit`.`published` >= 20
sql_attr_uint = study_unit_id
# sql_query_info = SELECT CONCAT('/hosted_files/download/',$id) AS URL
}
# Query for Physical Data Products (Variable Schemes)
source physical_data_products : base_source
{
sql_query = SELECT ((9<<24)| PhysicalDataProduct.id) as id, PhysicalDataProduct.name FROM `physical_data_products` AS `PhysicalDataProduct`, `study_units` as `StudyUnit` WHERE PhysicalDataProduct.study_unit_id = StudyUnit.id AND PhysicalDataProduct.deleted = 0 AND StudyUnit.published >= 20
}
# English Question Text Index
index questions_english_index
{
source = questions_english
path = C:\Sphinx\data\questions_english_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Dutch Question Text Index
index questions_dutch_index
{
source = questions_dutch
path = C:\Sphinx\data\questions_dutch_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Concept Index
index concepts_index
{
source = concepts
path = C:\Sphinx\data\concepts_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Variable Index
index variables_index
{
source = variables
path = C:\Sphinx\data\variables_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Study Unit Index
index study_units_index
{
source = study_units
path = C:\Sphinx\data\study_units_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Publication Index
index publications_index
{
source = publications
path = C:\Sphinx\data\publications_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Other Materials Index
index other_materials_index
{
source = other_materials
path = C:\Sphinx\data\other_materials_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Datasets file Index
index physical_instances_index
{
source = physical_instances
path = C:\Sphinx\data\physical_instances_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Datasets Index
index physical_data_products_index
{
source = physical_data_products
path = C:\Sphinx\data\physical_data_products_index
docinfo = extern
mlock = 0
morphology = stem_en
min_word_len = 3
min_prefix_len = 0
min_infix_len = 3
# enable_star = 1
html_strip = 1
# charset_type = utf-8
}
# Full Index - merge all of the other indexes
index full_index
{
type = distributed
local = questions_english_index
local = questions_dutch_index
local = concepts_index
local = variables_index
local = study_units_index
local = publications_index
local = other_materials_index
local = physical_instances_index
local = physical_data_products_index
}
indexer
{
# memory limit, in bytes, kiloytes (16384K) or megabytes (256M)
# optional, default is 32M, max is 2047M, recommended is 256M to 1024M
mem_limit = 256M
# maximum IO calls per second (for I/O throttling)
# optional, default is 0 (unlimited)
#
# max_iops = 40
# maximum IO call size, bytes (for I/O throttling)
# optional, default is 0 (unlimited)
#
# max_iosize = 1048576
}
# Settings for the searchd service
searchd
{
# port = 3312
log = C:\Sphinx\log\searchd.log
query_log = C:\Sphinx\log\query.log
pid_file = C:\Sphinx\log\searchd.pid
listen = 127.0.0.1
}
# C:\Sphinx\bin\searchd --config C:\xampp\htdocs\sphinx\vendors\questasy.conf
Thanks in advance