aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
-rwxr-xr-xconfigure4
-rw-r--r--configure.ac2
-rw-r--r--rcynic-ng/Doxyfile1679
-rw-r--r--rcynic-ng/Makefile.in69
-rw-r--r--rcynic-ng/README452
-rw-r--r--rcynic-ng/defstack.awk58
-rwxr-xr-xrcynic-ng/installation-scripts/darwin/RCynic/RCynic75
-rw-r--r--rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist19
-rw-r--r--rcynic-ng/installation-scripts/darwin/install.sh193
-rw-r--r--rcynic-ng/installation-scripts/deinstall.sh5
-rw-r--r--rcynic-ng/installation-scripts/freebsd/install.sh151
-rwxr-xr-xrcynic-ng/installation-scripts/freebsd/rc.d.rcynic71
-rw-r--r--rcynic-ng/installation-scripts/install.sh14
-rw-r--r--rcynic-ng/installation-scripts/linux/install.sh.in236
-rw-r--r--rcynic-ng/installation-scripts/sample-rcynic.conf27
-rwxr-xr-xrcynic-ng/make-tal.sh42
-rw-r--r--rcynic-ng/rcynic.c3580
-rw-r--r--rcynic-ng/rcynic.xsl221
-rw-r--r--rcynic-ng/sample-trust-anchors/README13
-rw-r--r--rcynic-ng/sample-trust-anchors/afrinic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/apnic.cerbin0 -> 8069 bytes
-rw-r--r--rcynic-ng/sample-trust-anchors/apnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/bbn-testbed.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/lacnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal9
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-apnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-arin.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-ripe.tal8
-rw-r--r--rcynic-ng/show.awk75
-rw-r--r--rcynic-ng/show.sh.in20
-rw-r--r--rcynic-ng/show.xsl55
-rw-r--r--rcynic-ng/static-rsync/Makefile.in41
-rw-r--r--rcynic-ng/static-rsync/README15
-rw-r--r--rcynic-ng/static-rsync/patches/patch-CVE-2007-409160
-rw-r--r--rcynic-ng/static-rsync/rsync-2.6.9.tar.gzbin0 -> 811841 bytes
36 files changed, 7249 insertions, 1 deletions
diff --git a/configure b/configure
index 4009abb4..8dac5494 100755
--- a/configure
+++ b/configure
@@ -4264,7 +4264,7 @@ test $build_django = yes && RPKID_SUBDIRS="$RPKID_SUBDIRS portal-gui"
-ac_config_files="$ac_config_files Makefile rcynic/Makefile rcynic/static-rsync/Makefile utils/Makefile utils/find_roa/Makefile utils/hashdir/Makefile utils/print_rpki_manifest/Makefile utils/print_roa/Makefile utils/uri/Makefile"
+ac_config_files="$ac_config_files Makefile rcynic/Makefile rcynic/static-rsync/Makefile rcynic-ng/Makefile rcynic-ng/static-rsync/Makefile utils/Makefile utils/find_roa/Makefile utils/hashdir/Makefile utils/print_rpki_manifest/Makefile utils/print_roa/Makefile utils/uri/Makefile"
case $host_os in
@@ -5151,6 +5151,8 @@ do
"Makefile") CONFIG_FILES="$CONFIG_FILES Makefile" ;;
"rcynic/Makefile") CONFIG_FILES="$CONFIG_FILES rcynic/Makefile" ;;
"rcynic/static-rsync/Makefile") CONFIG_FILES="$CONFIG_FILES rcynic/static-rsync/Makefile" ;;
+ "rcynic-ng/Makefile") CONFIG_FILES="$CONFIG_FILES rcynic-ng/Makefile" ;;
+ "rcynic-ng/static-rsync/Makefile") CONFIG_FILES="$CONFIG_FILES rcynic-ng/static-rsync/Makefile" ;;
"utils/Makefile") CONFIG_FILES="$CONFIG_FILES utils/Makefile" ;;
"utils/find_roa/Makefile") CONFIG_FILES="$CONFIG_FILES utils/find_roa/Makefile" ;;
"utils/hashdir/Makefile") CONFIG_FILES="$CONFIG_FILES utils/hashdir/Makefile" ;;
diff --git a/configure.ac b/configure.ac
index ca1ee198..e88ef987 100644
--- a/configure.ac
+++ b/configure.ac
@@ -318,6 +318,8 @@ AC_SUBST(RPKID_SUBDIRS)
AC_CONFIG_FILES([Makefile
rcynic/Makefile
rcynic/static-rsync/Makefile
+ rcynic-ng/Makefile
+ rcynic-ng/static-rsync/Makefile
utils/Makefile
utils/find_roa/Makefile
utils/hashdir/Makefile
diff --git a/rcynic-ng/Doxyfile b/rcynic-ng/Doxyfile
new file mode 100644
index 00000000..c52ab27b
--- /dev/null
+++ b/rcynic-ng/Doxyfile
@@ -0,0 +1,1679 @@
+# Doxyfile 1.7.3
+
+# This file describes the settings to be used by the documentation system
+# doxygen (www.doxygen.org) for a project.
+#
+# All text after a hash (#) is considered a comment and will be ignored.
+# The format is:
+# TAG = value [value, ...]
+# For lists items can also be appended using:
+# TAG += value [value, ...]
+# Values that contain spaces should be placed between quotes (" ").
+
+#---------------------------------------------------------------------------
+# Project related configuration options
+#---------------------------------------------------------------------------
+
+# This tag specifies the encoding used for all characters in the config file
+# that follow. The default is UTF-8 which is also the encoding used for all
+# text before the first occurrence of this tag. Doxygen uses libiconv (or the
+# iconv built into libc) for the transcoding. See
+# http://www.gnu.org/software/libiconv for the list of possible encodings.
+
+DOXYFILE_ENCODING = UTF-8
+
+# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
+# by quotes) that should identify the project.
+
+PROJECT_NAME = rcynic
+
+# The PROJECT_NUMBER tag can be used to enter a project or revision number.
+# This could be handy for archiving the generated documentation or
+# if some version control system is used.
+
+PROJECT_NUMBER = 1.0
+
+# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
+
+PROJECT_BRIEF =
+
+# With the PROJECT_LOGO tag one can specify an logo or icon that is
+# included in the documentation. The maximum height of the logo should not
+# exceed 55 pixels and the maximum width should not exceed 200 pixels.
+# Doxygen will copy the logo to the output directory.
+
+PROJECT_LOGO =
+
+# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
+# base path where the generated documentation will be put.
+# If a relative path is entered, it will be relative to the location
+# where doxygen was started. If left blank the current directory will be used.
+
+OUTPUT_DIRECTORY = doc
+
+# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
+# 4096 sub-directories (in 2 levels) under the output directory of each output
+# format and will distribute the generated files over these directories.
+# Enabling this option can be useful when feeding doxygen a huge amount of
+# source files, where putting all generated files in the same directory would
+# otherwise cause performance problems for the file system.
+
+CREATE_SUBDIRS = NO
+
+# The OUTPUT_LANGUAGE tag is used to specify the language in which all
+# documentation generated by doxygen is written. Doxygen will use this
+# information to generate all constant output in the proper language.
+# The default language is English, other supported languages are:
+# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
+# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
+# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
+# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
+# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
+# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
+
+OUTPUT_LANGUAGE = English
+
+# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
+# include brief member descriptions after the members that are listed in
+# the file and class documentation (similar to JavaDoc).
+# Set to NO to disable this.
+
+BRIEF_MEMBER_DESC = YES
+
+# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
+# the brief description of a member or function before the detailed description.
+# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
+# brief descriptions will be completely suppressed.
+
+REPEAT_BRIEF = YES
+
+# This tag implements a quasi-intelligent brief description abbreviator
+# that is used to form the text in various listings. Each string
+# in this list, if found as the leading text of the brief description, will be
+# stripped from the text and the result after processing the whole list, is
+# used as the annotated text. Otherwise, the brief description is used as-is.
+# If left blank, the following values are used ("$name" is automatically
+# replaced with the name of the entity): "The $name class" "The $name widget"
+# "The $name file" "is" "provides" "specifies" "contains"
+# "represents" "a" "an" "the"
+
+ABBREVIATE_BRIEF =
+
+# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
+# Doxygen will generate a detailed section even if there is only a brief
+# description.
+
+ALWAYS_DETAILED_SEC = NO
+
+# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
+# inherited members of a class in the documentation of that class as if those
+# members were ordinary class members. Constructors, destructors and assignment
+# operators of the base classes will not be shown.
+
+INLINE_INHERITED_MEMB = NO
+
+# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
+# path before files name in the file list and in the header files. If set
+# to NO the shortest path that makes the file name unique will be used.
+
+FULL_PATH_NAMES = YES
+
+# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
+# can be used to strip a user-defined part of the path. Stripping is
+# only done if one of the specified strings matches the left-hand part of
+# the path. The tag can be used to show relative paths in the file list.
+# If left blank the directory from which doxygen is run is used as the
+# path to strip.
+
+STRIP_FROM_PATH =
+
+# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
+# the path mentioned in the documentation of a class, which tells
+# the reader which header file to include in order to use a class.
+# If left blank only the name of the header file containing the class
+# definition is used. Otherwise one should specify the include paths that
+# are normally passed to the compiler using the -I flag.
+
+STRIP_FROM_INC_PATH =
+
+# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
+# (but less readable) file names. This can be useful if your file system
+# doesn't support long names like on DOS, Mac, or CD-ROM.
+
+SHORT_NAMES = NO
+
+# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
+# will interpret the first line (until the first dot) of a JavaDoc-style
+# comment as the brief description. If set to NO, the JavaDoc
+# comments will behave just like regular Qt-style comments
+# (thus requiring an explicit @brief command for a brief description.)
+
+JAVADOC_AUTOBRIEF = NO
+
+# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
+# interpret the first line (until the first dot) of a Qt-style
+# comment as the brief description. If set to NO, the comments
+# will behave just like regular Qt-style comments (thus requiring
+# an explicit \brief command for a brief description.)
+
+QT_AUTOBRIEF = NO
+
+# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
+# treat a multi-line C++ special comment block (i.e. a block of //! or ///
+# comments) as a brief description. This used to be the default behaviour.
+# The new default is to treat a multi-line C++ comment block as a detailed
+# description. Set this tag to YES if you prefer the old behaviour instead.
+
+MULTILINE_CPP_IS_BRIEF = NO
+
+# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
+# member inherits the documentation from any documented member that it
+# re-implements.
+
+INHERIT_DOCS = YES
+
+# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
+# a new page for each member. If set to NO, the documentation of a member will
+# be part of the file/class/namespace that contains it.
+
+SEPARATE_MEMBER_PAGES = YES
+
+# The TAB_SIZE tag can be used to set the number of spaces in a tab.
+# Doxygen uses this value to replace tabs by spaces in code fragments.
+
+TAB_SIZE = 8
+
+# This tag can be used to specify a number of aliases that acts
+# as commands in the documentation. An alias has the form "name=value".
+# For example adding "sideeffect=\par Side Effects:\n" will allow you to
+# put the command \sideeffect (or @sideeffect) in the documentation, which
+# will result in a user-defined paragraph with heading "Side Effects:".
+# You can put \n's in the value part of an alias to insert newlines.
+
+ALIASES =
+
+# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
+# sources only. Doxygen will then generate output that is more tailored for C.
+# For instance, some of the names that are used will be different. The list
+# of all members will be omitted, etc.
+
+OPTIMIZE_OUTPUT_FOR_C = YES
+
+# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
+# sources only. Doxygen will then generate output that is more tailored for
+# Java. For instance, namespaces will be presented as packages, qualified
+# scopes will look different, etc.
+
+OPTIMIZE_OUTPUT_JAVA = NO
+
+# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
+# sources only. Doxygen will then generate output that is more tailored for
+# Fortran.
+
+OPTIMIZE_FOR_FORTRAN = NO
+
+# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
+# sources. Doxygen will then generate output that is tailored for
+# VHDL.
+
+OPTIMIZE_OUTPUT_VHDL = NO
+
+# Doxygen selects the parser to use depending on the extension of the files it
+# parses. With this tag you can assign which parser to use for a given extension.
+# Doxygen has a built-in mapping, but you can override or extend it using this
+# tag. The format is ext=language, where ext is a file extension, and language
+# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
+# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
+# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
+# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
+# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
+
+EXTENSION_MAPPING =
+
+# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
+# to include (a tag file for) the STL sources as input, then you should
+# set this tag to YES in order to let doxygen match functions declarations and
+# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
+# func(std::string) {}). This also makes the inheritance and collaboration
+# diagrams that involve STL classes more complete and accurate.
+
+BUILTIN_STL_SUPPORT = NO
+
+# If you use Microsoft's C++/CLI language, you should set this option to YES to
+# enable parsing support.
+
+CPP_CLI_SUPPORT = NO
+
+# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
+# Doxygen will parse them like normal C++ but will assume all classes use public
+# instead of private inheritance when no explicit protection keyword is present.
+
+SIP_SUPPORT = NO
+
+# For Microsoft's IDL there are propget and propput attributes to indicate getter
+# and setter methods for a property. Setting this option to YES (the default)
+# will make doxygen replace the get and set methods by a property in the
+# documentation. This will only work if the methods are indeed getting or
+# setting a simple type. If this is not the case, or you want to show the
+# methods anyway, you should set this option to NO.
+
+IDL_PROPERTY_SUPPORT = YES
+
+# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
+# tag is set to YES, then doxygen will reuse the documentation of the first
+# member in the group (if any) for the other members of the group. By default
+# all members of a group must be documented explicitly.
+
+DISTRIBUTE_GROUP_DOC = NO
+
+# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
+# the same type (for instance a group of public functions) to be put as a
+# subgroup of that type (e.g. under the Public Functions section). Set it to
+# NO to prevent subgrouping. Alternatively, this can be done per class using
+# the \nosubgrouping command.
+
+SUBGROUPING = YES
+
+# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
+# is documented as struct, union, or enum with the name of the typedef. So
+# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
+# with name TypeT. When disabled the typedef will appear as a member of a file,
+# namespace, or class. And the struct will be named TypeS. This can typically
+# be useful for C code in case the coding convention dictates that all compound
+# types are typedef'ed and only the typedef is referenced, never the tag name.
+
+TYPEDEF_HIDES_STRUCT = NO
+
+# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
+# determine which symbols to keep in memory and which to flush to disk.
+# When the cache is full, less often used symbols will be written to disk.
+# For small to medium size projects (<1000 input files) the default value is
+# probably good enough. For larger projects a too small cache size can cause
+# doxygen to be busy swapping symbols to and from disk most of the time
+# causing a significant performance penalty.
+# If the system has enough physical memory increasing the cache will improve the
+# performance by keeping more symbols in memory. Note that the value works on
+# a logarithmic scale so increasing the size by one will roughly double the
+# memory usage. The cache size is given by this formula:
+# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
+# corresponding to a cache size of 2^16 = 65536 symbols
+
+SYMBOL_CACHE_SIZE = 0
+
+#---------------------------------------------------------------------------
+# Build related configuration options
+#---------------------------------------------------------------------------
+
+# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
+# documentation are documented, even if no documentation was available.
+# Private class members and static file members will be hidden unless
+# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
+
+EXTRACT_ALL = YES
+
+# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
+# will be included in the documentation.
+
+EXTRACT_PRIVATE = YES
+
+# If the EXTRACT_STATIC tag is set to YES all static members of a file
+# will be included in the documentation.
+
+EXTRACT_STATIC = YES
+
+# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
+# defined locally in source files will be included in the documentation.
+# If set to NO only classes defined in header files are included.
+
+EXTRACT_LOCAL_CLASSES = YES
+
+# This flag is only useful for Objective-C code. When set to YES local
+# methods, which are defined in the implementation section but not in
+# the interface are included in the documentation.
+# If set to NO (the default) only methods in the interface are included.
+
+EXTRACT_LOCAL_METHODS = NO
+
+# If this flag is set to YES, the members of anonymous namespaces will be
+# extracted and appear in the documentation as a namespace called
+# 'anonymous_namespace{file}', where file will be replaced with the base
+# name of the file that contains the anonymous namespace. By default
+# anonymous namespaces are hidden.
+
+EXTRACT_ANON_NSPACES = YES
+
+# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
+# undocumented members of documented classes, files or namespaces.
+# If set to NO (the default) these members will be included in the
+# various overviews, but no documentation section is generated.
+# This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_MEMBERS = NO
+
+# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
+# undocumented classes that are normally visible in the class hierarchy.
+# If set to NO (the default) these classes will be included in the various
+# overviews. This option has no effect if EXTRACT_ALL is enabled.
+
+HIDE_UNDOC_CLASSES = NO
+
+# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
+# friend (class|struct|union) declarations.
+# If set to NO (the default) these declarations will be included in the
+# documentation.
+
+HIDE_FRIEND_COMPOUNDS = NO
+
+# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
+# documentation blocks found inside the body of a function.
+# If set to NO (the default) these blocks will be appended to the
+# function's detailed documentation block.
+
+HIDE_IN_BODY_DOCS = NO
+
+# The INTERNAL_DOCS tag determines if documentation
+# that is typed after a \internal command is included. If the tag is set
+# to NO (the default) then the documentation will be excluded.
+# Set it to YES to include the internal documentation.
+
+INTERNAL_DOCS = NO
+
+# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
+# file names in lower-case letters. If set to YES upper-case letters are also
+# allowed. This is useful if you have classes or files whose names only differ
+# in case and if your file system supports case sensitive file names. Windows
+# and Mac users are advised to set this option to NO.
+
+CASE_SENSE_NAMES = YES
+
+# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
+# will show members with their full class and namespace scopes in the
+# documentation. If set to YES the scope will be hidden.
+
+HIDE_SCOPE_NAMES = NO
+
+# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
+# will put a list of the files that are included by a file in the documentation
+# of that file.
+
+SHOW_INCLUDE_FILES = NO
+
+# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
+# will list include files with double quotes in the documentation
+# rather than with sharp brackets.
+
+FORCE_LOCAL_INCLUDES = NO
+
+# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
+# is inserted in the documentation for inline members.
+
+INLINE_INFO = YES
+
+# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
+# will sort the (detailed) documentation of file and class members
+# alphabetically by member name. If set to NO the members will appear in
+# declaration order.
+
+SORT_MEMBER_DOCS = YES
+
+# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
+# brief documentation of file, namespace and class members alphabetically
+# by member name. If set to NO (the default) the members will appear in
+# declaration order.
+
+SORT_BRIEF_DOCS = NO
+
+# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
+# will sort the (brief and detailed) documentation of class members so that
+# constructors and destructors are listed first. If set to NO (the default)
+# the constructors will appear in the respective orders defined by
+# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
+# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
+# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
+
+SORT_MEMBERS_CTORS_1ST = NO
+
+# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
+# hierarchy of group names into alphabetical order. If set to NO (the default)
+# the group names will appear in their defined order.
+
+SORT_GROUP_NAMES = NO
+
+# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
+# sorted by fully-qualified names, including namespaces. If set to
+# NO (the default), the class list will be sorted only by class name,
+# not including the namespace part.
+# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
+# Note: This option applies only to the class list, not to the
+# alphabetical list.
+
+SORT_BY_SCOPE_NAME = NO
+
+# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
+# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
+# will still accept a match between prototype and implementation in such cases.
+
+STRICT_PROTO_MATCHING = NO
+
+# The GENERATE_TODOLIST tag can be used to enable (YES) or
+# disable (NO) the todo list. This list is created by putting \todo
+# commands in the documentation.
+
+GENERATE_TODOLIST = YES
+
+# The GENERATE_TESTLIST tag can be used to enable (YES) or
+# disable (NO) the test list. This list is created by putting \test
+# commands in the documentation.
+
+GENERATE_TESTLIST = YES
+
+# The GENERATE_BUGLIST tag can be used to enable (YES) or
+# disable (NO) the bug list. This list is created by putting \bug
+# commands in the documentation.
+
+GENERATE_BUGLIST = YES
+
+# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
+# disable (NO) the deprecated list. This list is created by putting
+# \deprecated commands in the documentation.
+
+GENERATE_DEPRECATEDLIST= YES
+
+# The ENABLED_SECTIONS tag can be used to enable conditional
+# documentation sections, marked by \if sectionname ... \endif.
+
+ENABLED_SECTIONS =
+
+# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
+# the initial value of a variable or macro consists of for it to appear in
+# the documentation. If the initializer consists of more lines than specified
+# here it will be hidden. Use a value of 0 to hide initializers completely.
+# The appearance of the initializer of individual variables and macros in the
+# documentation can be controlled using \showinitializer or \hideinitializer
+# command in the documentation regardless of this setting.
+
+MAX_INITIALIZER_LINES = 30
+
+# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
+# at the bottom of the documentation of classes and structs. If set to YES the
+# list will mention the files that were used to generate the documentation.
+
+SHOW_USED_FILES = YES
+
+# If the sources in your project are distributed over multiple directories
+# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
+# in the documentation. The default is NO.
+
+SHOW_DIRECTORIES = NO
+
+# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
+# This will remove the Files entry from the Quick Index and from the
+# Folder Tree View (if specified). The default is YES.
+
+SHOW_FILES = YES
+
+# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
+# Namespaces page.
+# This will remove the Namespaces entry from the Quick Index
+# and from the Folder Tree View (if specified). The default is YES.
+
+SHOW_NAMESPACES = YES
+
+# The FILE_VERSION_FILTER tag can be used to specify a program or script that
+# doxygen should invoke to get the current version for each file (typically from
+# the version control system). Doxygen will invoke the program by executing (via
+# popen()) the command <command> <input-file>, where <command> is the value of
+# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
+# provided by doxygen. Whatever the program writes to standard output
+# is used as the file version. See the manual for examples.
+
+FILE_VERSION_FILTER =
+
+# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
+# by doxygen. The layout file controls the global structure of the generated
+# output files in an output format independent way. The create the layout file
+# that represents doxygen's defaults, run doxygen with the -l option.
+# You can optionally specify a file name after the option, if omitted
+# DoxygenLayout.xml will be used as the name of the layout file.
+
+LAYOUT_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to warning and progress messages
+#---------------------------------------------------------------------------
+
+# The QUIET tag can be used to turn on/off the messages that are generated
+# by doxygen. Possible values are YES and NO. If left blank NO is used.
+
+QUIET = YES
+
+# The WARNINGS tag can be used to turn on/off the warning messages that are
+# generated by doxygen. Possible values are YES and NO. If left blank
+# NO is used.
+
+WARNINGS = YES
+
+# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
+# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
+# automatically be disabled.
+
+WARN_IF_UNDOCUMENTED = YES
+
+# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
+# potential errors in the documentation, such as not documenting some
+# parameters in a documented function, or documenting parameters that
+# don't exist or using markup commands wrongly.
+
+WARN_IF_DOC_ERROR = YES
+
+# The WARN_NO_PARAMDOC option can be enabled to get warnings for
+# functions that are documented, but have no documentation for their parameters
+# or return value. If set to NO (the default) doxygen will only warn about
+# wrong or incomplete parameter documentation, but not about the absence of
+# documentation.
+
+WARN_NO_PARAMDOC = NO
+
+# The WARN_FORMAT tag determines the format of the warning messages that
+# doxygen can produce. The string should contain the $file, $line, and $text
+# tags, which will be replaced by the file and line number from which the
+# warning originated and the warning text. Optionally the format may contain
+# $version, which will be replaced by the version of the file (if it could
+# be obtained via FILE_VERSION_FILTER)
+
+WARN_FORMAT = "$file:$line: $text"
+
+# The WARN_LOGFILE tag can be used to specify a file to which warning
+# and error messages should be written. If left blank the output is written
+# to stderr.
+
+WARN_LOGFILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the input files
+#---------------------------------------------------------------------------
+
+# The INPUT tag can be used to specify the files and/or directories that contain
+# documented source files. You may enter file names like "myfile.cpp" or
+# directories like "/usr/src/myproject". Separate the files or directories
+# with spaces.
+
+INPUT = rcynic.c
+
+# This tag can be used to specify the character encoding of the source files
+# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
+# also the default input encoding. Doxygen uses libiconv (or the iconv built
+# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
+# the list of possible encodings.
+
+INPUT_ENCODING = UTF-8
+
+# If the value of the INPUT tag contains directories, you can use the
+# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank the following patterns are tested:
+# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
+# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
+# *.f90 *.f *.for *.vhd *.vhdl
+
+FILE_PATTERNS =
+
+# The RECURSIVE tag can be used to turn specify whether or not subdirectories
+# should be searched for input files as well. Possible values are YES and NO.
+# If left blank NO is used.
+
+RECURSIVE = NO
+
+# The EXCLUDE tag can be used to specify files and/or directories that should
+# excluded from the INPUT source files. This way you can easily exclude a
+# subdirectory from a directory tree whose root is specified with the INPUT tag.
+
+EXCLUDE =
+
+# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
+# directories that are symbolic links (a Unix file system feature) are excluded
+# from the input.
+
+EXCLUDE_SYMLINKS = NO
+
+# If the value of the INPUT tag contains directories, you can use the
+# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
+# certain files from those directories. Note that the wildcards are matched
+# against the file with absolute path, so to exclude all test directories
+# for example use the pattern */test/*
+
+EXCLUDE_PATTERNS =
+
+# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
+# (namespaces, classes, functions, etc.) that should be excluded from the
+# output. The symbol name can be a fully qualified name, a word, or if the
+# wildcard * is used, a substring. Examples: ANamespace, AClass,
+# AClass::ANamespace, ANamespace::*Test
+
+EXCLUDE_SYMBOLS =
+
+# The EXAMPLE_PATH tag can be used to specify one or more files or
+# directories that contain example code fragments that are included (see
+# the \include command).
+
+EXAMPLE_PATH =
+
+# If the value of the EXAMPLE_PATH tag contains directories, you can use the
+# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
+# and *.h) to filter out the source-files in the directories. If left
+# blank all files are included.
+
+EXAMPLE_PATTERNS =
+
+# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
+# searched for input files to be used with the \include or \dontinclude
+# commands irrespective of the value of the RECURSIVE tag.
+# Possible values are YES and NO. If left blank NO is used.
+
+EXAMPLE_RECURSIVE = NO
+
+# The IMAGE_PATH tag can be used to specify one or more files or
+# directories that contain image that are included in the documentation (see
+# the \image command).
+
+IMAGE_PATH =
+
+# The INPUT_FILTER tag can be used to specify a program that doxygen should
+# invoke to filter for each input file. Doxygen will invoke the filter program
+# by executing (via popen()) the command <filter> <input-file>, where <filter>
+# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
+# input file. Doxygen will then use the output that the filter program writes
+# to standard output.
+# If FILTER_PATTERNS is specified, this tag will be
+# ignored.
+
+INPUT_FILTER =
+
+# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
+# basis.
+# Doxygen will compare the file name with each pattern and apply the
+# filter if there is a match.
+# The filters are a list of the form:
+# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
+# info on how filters are used. If FILTER_PATTERNS is empty or if
+# non of the patterns match the file name, INPUT_FILTER is applied.
+
+FILTER_PATTERNS =
+
+# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
+# INPUT_FILTER) will be used to filter the input files when producing source
+# files to browse (i.e. when SOURCE_BROWSER is set to YES).
+
+FILTER_SOURCE_FILES = NO
+
+# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
+# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
+# and it is also possible to disable source filtering for a specific pattern
+# using *.ext= (so without naming a filter). This option only has effect when
+# FILTER_SOURCE_FILES is enabled.
+
+FILTER_SOURCE_PATTERNS =
+
+#---------------------------------------------------------------------------
+# configuration options related to source browsing
+#---------------------------------------------------------------------------
+
+# If the SOURCE_BROWSER tag is set to YES then a list of source files will
+# be generated. Documented entities will be cross-referenced with these sources.
+# Note: To get rid of all source code in the generated output, make sure also
+# VERBATIM_HEADERS is set to NO.
+
+SOURCE_BROWSER = YES
+
+# Setting the INLINE_SOURCES tag to YES will include the body
+# of functions and classes directly in the documentation.
+
+INLINE_SOURCES = NO
+
+# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
+# doxygen to hide any special comment blocks from generated source code
+# fragments. Normal C and C++ comments will always remain visible.
+
+STRIP_CODE_COMMENTS = NO
+
+# If the REFERENCED_BY_RELATION tag is set to YES
+# then for each documented function all documented
+# functions referencing it will be listed.
+
+REFERENCED_BY_RELATION = YES
+
+# If the REFERENCES_RELATION tag is set to YES
+# then for each documented function all documented entities
+# called/used by that function will be listed.
+
+REFERENCES_RELATION = YES
+
+# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
+# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
+# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
+# link to the source code.
+# Otherwise they will link to the documentation.
+
+REFERENCES_LINK_SOURCE = YES
+
+# If the USE_HTAGS tag is set to YES then the references to source code
+# will point to the HTML generated by the htags(1) tool instead of doxygen
+# built-in source browser. The htags tool is part of GNU's global source
+# tagging system (see http://www.gnu.org/software/global/global.html). You
+# will need version 4.8.6 or higher.
+
+USE_HTAGS = NO
+
+# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
+# will generate a verbatim copy of the header file for each class for
+# which an include is specified. Set to NO to disable this.
+
+VERBATIM_HEADERS = YES
+
+#---------------------------------------------------------------------------
+# configuration options related to the alphabetical class index
+#---------------------------------------------------------------------------
+
+# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
+# of all compounds will be generated. Enable this if the project
+# contains a lot of classes, structs, unions or interfaces.
+
+ALPHABETICAL_INDEX = YES
+
+# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
+# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
+# in which this list will be split (can be a number in the range [1..20])
+
+COLS_IN_ALPHA_INDEX = 5
+
+# In case all classes in a project start with a common prefix, all
+# classes will be put under the same header in the alphabetical index.
+# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
+# should be ignored while generating the index headers.
+
+IGNORE_PREFIX =
+
+#---------------------------------------------------------------------------
+# configuration options related to the HTML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
+# generate HTML output.
+
+GENERATE_HTML = YES
+
+# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `html' will be used as the default path.
+
+HTML_OUTPUT = html
+
+# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
+# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
+# doxygen will generate files with .html extension.
+
+HTML_FILE_EXTENSION = .html
+
+# The HTML_HEADER tag can be used to specify a personal HTML header for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard header.
+
+HTML_HEADER =
+
+# The HTML_FOOTER tag can be used to specify a personal HTML footer for
+# each generated HTML page. If it is left blank doxygen will generate a
+# standard footer.
+
+HTML_FOOTER =
+
+# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
+# style sheet that is used by each HTML page. It can be used to
+# fine-tune the look of the HTML output. If the tag is left blank doxygen
+# will generate a default style sheet. Note that doxygen will try to copy
+# the style sheet file to the HTML output directory, so don't put your own
+# stylesheet in the HTML output directory as well, or it will be erased!
+
+HTML_STYLESHEET =
+
+# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
+# Doxygen will adjust the colors in the stylesheet and background images
+# according to this color. Hue is specified as an angle on a colorwheel,
+# see http://en.wikipedia.org/wiki/Hue for more information.
+# For instance the value 0 represents red, 60 is yellow, 120 is green,
+# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
+# The allowed range is 0 to 359.
+
+HTML_COLORSTYLE_HUE = 220
+
+# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
+# the colors in the HTML output. For a value of 0 the output will use
+# grayscales only. A value of 255 will produce the most vivid colors.
+
+HTML_COLORSTYLE_SAT = 100
+
+# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
+# the luminance component of the colors in the HTML output. Values below
+# 100 gradually make the output lighter, whereas values above 100 make
+# the output darker. The value divided by 100 is the actual gamma applied,
+# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
+# and 100 does not change the gamma.
+
+HTML_COLORSTYLE_GAMMA = 80
+
+# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
+# page will contain the date and time when the page was generated. Setting
+# this to NO can help when comparing the output of multiple runs.
+
+HTML_TIMESTAMP = YES
+
+# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
+# files or namespaces will be aligned in HTML using tables. If set to
+# NO a bullet list will be used.
+
+HTML_ALIGN_MEMBERS = YES
+
+# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
+# documentation will contain sections that can be hidden and shown after the
+# page has loaded. For this to work a browser that supports
+# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
+# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
+
+HTML_DYNAMIC_SECTIONS = NO
+
+# If the GENERATE_DOCSET tag is set to YES, additional index files
+# will be generated that can be used as input for Apple's Xcode 3
+# integrated development environment, introduced with OSX 10.5 (Leopard).
+# To create a documentation set, doxygen will generate a Makefile in the
+# HTML output directory. Running make will produce the docset in that
+# directory and running "make install" will install the docset in
+# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
+# it at startup.
+# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
+# for more information.
+
+GENERATE_DOCSET = NO
+
+# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
+# feed. A documentation feed provides an umbrella under which multiple
+# documentation sets from a single provider (such as a company or product suite)
+# can be grouped.
+
+DOCSET_FEEDNAME = "Doxygen generated docs"
+
+# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
+# should uniquely identify the documentation set bundle. This should be a
+# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
+# will append .docset to the name.
+
+DOCSET_BUNDLE_ID = org.doxygen.Project
+
+# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
+# the documentation publisher. This should be a reverse domain-name style
+# string, e.g. com.mycompany.MyDocSet.documentation.
+
+DOCSET_PUBLISHER_ID = org.doxygen.Publisher
+
+# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
+
+DOCSET_PUBLISHER_NAME = Publisher
+
+# If the GENERATE_HTMLHELP tag is set to YES, additional index files
+# will be generated that can be used as input for tools like the
+# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
+# of the generated HTML documentation.
+
+GENERATE_HTMLHELP = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
+# be used to specify the file name of the resulting .chm file. You
+# can add a path in front of the file if the result should not be
+# written to the html output directory.
+
+CHM_FILE =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
+# be used to specify the location (absolute path including file name) of
+# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
+# the HTML help compiler on the generated index.hhp.
+
+HHC_LOCATION =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
+# controls if a separate .chi index file is generated (YES) or that
+# it should be included in the master .chm file (NO).
+
+GENERATE_CHI = NO
+
+# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
+# is used to encode HtmlHelp index (hhk), content (hhc) and project file
+# content.
+
+CHM_INDEX_ENCODING =
+
+# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
+# controls whether a binary table of contents is generated (YES) or a
+# normal table of contents (NO) in the .chm file.
+
+BINARY_TOC = NO
+
+# The TOC_EXPAND flag can be set to YES to add extra items for group members
+# to the contents of the HTML help documentation and to the tree view.
+
+TOC_EXPAND = NO
+
+# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
+# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
+# that can be used as input for Qt's qhelpgenerator to generate a
+# Qt Compressed Help (.qch) of the generated HTML documentation.
+
+GENERATE_QHP = NO
+
+# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
+# be used to specify the file name of the resulting .qch file.
+# The path specified is relative to the HTML output folder.
+
+QCH_FILE =
+
+# The QHP_NAMESPACE tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#namespace
+
+QHP_NAMESPACE =
+
+# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
+# Qt Help Project output. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#virtual-folders
+
+QHP_VIRTUAL_FOLDER = doc
+
+# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
+# add. For more information please see
+# http://doc.trolltech.com/qthelpproject.html#custom-filters
+
+QHP_CUST_FILTER_NAME =
+
+# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
+# custom filter to add. For more information please see
+# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
+# Qt Help Project / Custom Filters</a>.
+
+QHP_CUST_FILTER_ATTRS =
+
+# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
+# project's
+# filter section matches.
+# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
+# Qt Help Project / Filter Attributes</a>.
+
+QHP_SECT_FILTER_ATTRS =
+
+# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
+# be used to specify the location of Qt's qhelpgenerator.
+# If non-empty doxygen will try to run qhelpgenerator on the generated
+# .qhp file.
+
+QHG_LOCATION =
+
+# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
+# will be generated, which together with the HTML files, form an Eclipse help
+# plugin. To install this plugin and make it available under the help contents
+# menu in Eclipse, the contents of the directory containing the HTML and XML
+# files needs to be copied into the plugins directory of eclipse. The name of
+# the directory within the plugins directory should be the same as
+# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
+# the help appears.
+
+GENERATE_ECLIPSEHELP = NO
+
+# A unique identifier for the eclipse help plugin. When installing the plugin
+# the directory name containing the HTML and XML files should also have
+# this name.
+
+ECLIPSE_DOC_ID = org.doxygen.Project
+
+# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
+# top of each HTML page. The value NO (the default) enables the index and
+# the value YES disables it.
+
+DISABLE_INDEX = NO
+
+# This tag can be used to set the number of enum values (range [0,1..20])
+# that doxygen will group on one line in the generated HTML documentation.
+# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
+
+ENUM_VALUES_PER_LINE = 4
+
+# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
+# structure should be generated to display hierarchical information.
+# If the tag value is set to YES, a side panel will be generated
+# containing a tree-like index structure (just like the one that
+# is generated for HTML Help). For this to work a browser that supports
+# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
+# Windows users are probably better off using the HTML help feature.
+
+GENERATE_TREEVIEW = NO
+
+# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
+# and Class Hierarchy pages using a tree view instead of an ordered list.
+
+USE_INLINE_TREES = NO
+
+# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
+# used to set the initial width (in pixels) of the frame in which the tree
+# is shown.
+
+TREEVIEW_WIDTH = 250
+
+# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
+# links to external symbols imported via tag files in a separate window.
+
+EXT_LINKS_IN_WINDOW = NO
+
+# Use this tag to change the font size of Latex formulas included
+# as images in the HTML documentation. The default is 10. Note that
+# when you change the font size after a successful doxygen run you need
+# to manually remove any form_*.png images from the HTML output directory
+# to force them to be regenerated.
+
+FORMULA_FONTSIZE = 10
+
+# Use the FORMULA_TRANPARENT tag to determine whether or not the images
+# generated for formulas are transparent PNGs. Transparent PNGs are
+# not supported properly for IE 6.0, but are supported on all modern browsers.
+# Note that when changing this option you need to delete any form_*.png files
+# in the HTML output before the changes have effect.
+
+FORMULA_TRANSPARENT = YES
+
+# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
+# (see http://www.mathjax.org) which uses client side Javascript for the
+# rendering instead of using prerendered bitmaps. Use this if you do not
+# have LaTeX installed or if you want to formulas look prettier in the HTML
+# output. When enabled you also need to install MathJax separately and
+# configure the path to it using the MATHJAX_RELPATH option.
+
+USE_MATHJAX = NO
+
+# When MathJax is enabled you need to specify the location relative to the
+# HTML output directory using the MATHJAX_RELPATH option. The destination
+# directory should contain the MathJax.js script. For instance, if the mathjax
+# directory is located at the same level as the HTML output directory, then
+# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
+# MathJax, but it is strongly recommended to install a local copy of MathJax
+# before deployment.
+
+MATHJAX_RELPATH = http://www.mathjax.org/mathjax
+
+# When the SEARCHENGINE tag is enabled doxygen will generate a search box
+# for the HTML output. The underlying search engine uses javascript
+# and DHTML and should work on any modern browser. Note that when using
+# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
+# (GENERATE_DOCSET) there is already a search function so this one should
+# typically be disabled. For large projects the javascript based search engine
+# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
+
+SEARCHENGINE = YES
+
+# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
+# implemented using a PHP enabled web server instead of at the web client
+# using Javascript. Doxygen will generate the search PHP script and index
+# file to put on the web server. The advantage of the server
+# based approach is that it scales better to large projects and allows
+# full text search. The disadvantages are that it is more difficult to setup
+# and does not have live searching capabilities.
+
+SERVER_BASED_SEARCH = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the LaTeX output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
+# generate Latex output.
+
+GENERATE_LATEX = YES
+
+# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `latex' will be used as the default path.
+
+LATEX_OUTPUT = latex
+
+# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
+# invoked. If left blank `latex' will be used as the default command name.
+# Note that when enabling USE_PDFLATEX this option is only used for
+# generating bitmaps for formulas in the HTML output, but not in the
+# Makefile that is written to the output directory.
+
+LATEX_CMD_NAME = latex
+
+# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
+# generate index for LaTeX. If left blank `makeindex' will be used as the
+# default command name.
+
+MAKEINDEX_CMD_NAME = makeindex
+
+# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
+# LaTeX documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_LATEX = YES
+
+# The PAPER_TYPE tag can be used to set the paper type that is used
+# by the printer. Possible values are: a4, letter, legal and
+# executive. If left blank a4wide will be used.
+
+PAPER_TYPE = letter
+
+# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
+# packages that should be included in the LaTeX output.
+
+EXTRA_PACKAGES =
+
+# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
+# the generated latex document. The header should contain everything until
+# the first chapter. If it is left blank doxygen will generate a
+# standard header. Notice: only use this tag if you know what you are doing!
+
+LATEX_HEADER =
+
+# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
+# is prepared for conversion to pdf (using ps2pdf). The pdf file will
+# contain links (just like the HTML output) instead of page references
+# This makes the output suitable for online browsing using a pdf viewer.
+
+PDF_HYPERLINKS = YES
+
+# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
+# plain latex in the generated Makefile. Set this option to YES to get a
+# higher quality PDF documentation.
+
+USE_PDFLATEX = YES
+
+# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
+# command to the generated LaTeX files. This will instruct LaTeX to keep
+# running if errors occur, instead of asking the user for help.
+# This option is also used when generating formulas in HTML.
+
+LATEX_BATCHMODE = YES
+
+# If LATEX_HIDE_INDICES is set to YES then doxygen will not
+# include the index chapters (such as File Index, Compound Index, etc.)
+# in the output.
+
+LATEX_HIDE_INDICES = YES
+
+# If LATEX_SOURCE_CODE is set to YES then doxygen will include
+# source code with syntax highlighting in the LaTeX output.
+# Note that which sources are shown also depends on other settings
+# such as SOURCE_BROWSER.
+
+LATEX_SOURCE_CODE = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the RTF output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
+# The RTF output is optimized for Word 97 and may not look very pretty with
+# other RTF readers or editors.
+
+GENERATE_RTF = NO
+
+# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `rtf' will be used as the default path.
+
+RTF_OUTPUT = rtf
+
+# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
+# RTF documents. This may be useful for small projects and may help to
+# save some trees in general.
+
+COMPACT_RTF = NO
+
+# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
+# will contain hyperlink fields. The RTF file will
+# contain links (just like the HTML output) instead of page references.
+# This makes the output suitable for online browsing using WORD or other
+# programs which support those fields.
+# Note: wordpad (write) and others do not support links.
+
+RTF_HYPERLINKS = NO
+
+# Load stylesheet definitions from file. Syntax is similar to doxygen's
+# config file, i.e. a series of assignments. You only have to provide
+# replacements, missing definitions are set to their default value.
+
+RTF_STYLESHEET_FILE =
+
+# Set optional variables used in the generation of an rtf document.
+# Syntax is similar to doxygen's config file.
+
+RTF_EXTENSIONS_FILE =
+
+#---------------------------------------------------------------------------
+# configuration options related to the man page output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
+# generate man pages
+
+GENERATE_MAN = NO
+
+# The MAN_OUTPUT tag is used to specify where the man pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `man' will be used as the default path.
+
+MAN_OUTPUT = man
+
+# The MAN_EXTENSION tag determines the extension that is added to
+# the generated man pages (default is the subroutine's section .3)
+
+MAN_EXTENSION = .3
+
+# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
+# then it will generate one additional man file for each entity
+# documented in the real man page(s). These additional files
+# only source the real man page, but without them the man command
+# would be unable to find the correct page. The default is NO.
+
+MAN_LINKS = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the XML output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_XML tag is set to YES Doxygen will
+# generate an XML file that captures the structure of
+# the code including all documentation.
+
+GENERATE_XML = NO
+
+# The XML_OUTPUT tag is used to specify where the XML pages will be put.
+# If a relative path is entered the value of OUTPUT_DIRECTORY will be
+# put in front of it. If left blank `xml' will be used as the default path.
+
+XML_OUTPUT = xml
+
+# The XML_SCHEMA tag can be used to specify an XML schema,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_SCHEMA =
+
+# The XML_DTD tag can be used to specify an XML DTD,
+# which can be used by a validating XML parser to check the
+# syntax of the XML files.
+
+XML_DTD =
+
+# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
+# dump the program listings (including syntax highlighting
+# and cross-referencing information) to the XML output. Note that
+# enabling this will significantly increase the size of the XML output.
+
+XML_PROGRAMLISTING = YES
+
+#---------------------------------------------------------------------------
+# configuration options for the AutoGen Definitions output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
+# generate an AutoGen Definitions (see autogen.sf.net) file
+# that captures the structure of the code including all
+# documentation. Note that this feature is still experimental
+# and incomplete at the moment.
+
+GENERATE_AUTOGEN_DEF = NO
+
+#---------------------------------------------------------------------------
+# configuration options related to the Perl module output
+#---------------------------------------------------------------------------
+
+# If the GENERATE_PERLMOD tag is set to YES Doxygen will
+# generate a Perl module file that captures the structure of
+# the code including all documentation. Note that this
+# feature is still experimental and incomplete at the
+# moment.
+
+GENERATE_PERLMOD = NO
+
+# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
+# the necessary Makefile rules, Perl scripts and LaTeX code to be able
+# to generate PDF and DVI output from the Perl module output.
+
+PERLMOD_LATEX = NO
+
+# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
+# nicely formatted so it can be parsed by a human reader.
+# This is useful
+# if you want to understand what is going on.
+# On the other hand, if this
+# tag is set to NO the size of the Perl module output will be much smaller
+# and Perl will parse it just the same.
+
+PERLMOD_PRETTY = YES
+
+# The names of the make variables in the generated doxyrules.make file
+# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
+# This is useful so different doxyrules.make files included by the same
+# Makefile don't overwrite each other's variables.
+
+PERLMOD_MAKEVAR_PREFIX =
+
+#---------------------------------------------------------------------------
+# Configuration options related to the preprocessor
+#---------------------------------------------------------------------------
+
+# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
+# evaluate all C-preprocessor directives found in the sources and include
+# files.
+
+ENABLE_PREPROCESSING = YES
+
+# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
+# names in the source code. If set to NO (the default) only conditional
+# compilation will be performed. Macro expansion can be done in a controlled
+# way by setting EXPAND_ONLY_PREDEF to YES.
+
+MACRO_EXPANSION = NO
+
+# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
+# then the macro expansion is limited to the macros specified with the
+# PREDEFINED and EXPAND_AS_DEFINED tags.
+
+EXPAND_ONLY_PREDEF = NO
+
+# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
+# in the INCLUDE_PATH (see below) will be search if a #include is found.
+
+SEARCH_INCLUDES = YES
+
+# The INCLUDE_PATH tag can be used to specify one or more directories that
+# contain include files that are not input files but should be processed by
+# the preprocessor.
+
+INCLUDE_PATH =
+
+# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
+# patterns (like *.h and *.hpp) to filter out the header-files in the
+# directories. If left blank, the patterns specified with FILE_PATTERNS will
+# be used.
+
+INCLUDE_FILE_PATTERNS =
+
+# The PREDEFINED tag can be used to specify one or more macro names that
+# are defined before the preprocessor is started (similar to the -D option of
+# gcc). The argument of the tag is a list of macros of the form: name
+# or name=definition (no spaces). If the definition and the = are
+# omitted =1 is assumed. To prevent a macro definition from being
+# undefined via #undef or recursively expanded use the := operator
+# instead of the = operator.
+
+PREDEFINED = DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION
+
+# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
+# this tag can be used to specify a list of macro names that should be expanded.
+# The macro definition that is found in the sources will be used.
+# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
+
+EXPAND_AS_DEFINED =
+
+# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
+# doxygen's preprocessor will remove all references to function-like macros
+# that are alone on a line, have an all uppercase name, and do not end with a
+# semicolon, because these will confuse the parser if not removed.
+
+SKIP_FUNCTION_MACROS = YES
+
+#---------------------------------------------------------------------------
+# Configuration::additions related to external references
+#---------------------------------------------------------------------------
+
+# The TAGFILES option can be used to specify one or more tagfiles.
+# Optionally an initial location of the external documentation
+# can be added for each tagfile. The format of a tag file without
+# this location is as follows:
+#
+# TAGFILES = file1 file2 ...
+# Adding location for the tag files is done as follows:
+#
+# TAGFILES = file1=loc1 "file2 = loc2" ...
+# where "loc1" and "loc2" can be relative or absolute paths or
+# URLs. If a location is present for each tag, the installdox tool
+# does not have to be run to correct the links.
+# Note that each tag file must have a unique name
+# (where the name does NOT include the path)
+# If a tag file is not located in the directory in which doxygen
+# is run, you must also specify the path to the tagfile here.
+
+TAGFILES =
+
+# When a file name is specified after GENERATE_TAGFILE, doxygen will create
+# a tag file that is based on the input files it reads.
+
+GENERATE_TAGFILE =
+
+# If the ALLEXTERNALS tag is set to YES all external classes will be listed
+# in the class index. If set to NO only the inherited external classes
+# will be listed.
+
+ALLEXTERNALS = NO
+
+# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
+# in the modules index. If set to NO, only the current project's groups will
+# be listed.
+
+EXTERNAL_GROUPS = YES
+
+# The PERL_PATH should be the absolute path and name of the perl script
+# interpreter (i.e. the result of `which perl').
+
+PERL_PATH = /usr/bin/perl
+
+#---------------------------------------------------------------------------
+# Configuration options related to the dot tool
+#---------------------------------------------------------------------------
+
+# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
+# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
+# or super classes. Setting the tag to NO turns the diagrams off. Note that
+# this option also works with HAVE_DOT disabled, but it is recommended to
+# install and use dot, since it yields more powerful graphs.
+
+CLASS_DIAGRAMS = YES
+
+# You can define message sequence charts within doxygen comments using the \msc
+# command. Doxygen will then run the mscgen tool (see
+# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
+# documentation. The MSCGEN_PATH tag allows you to specify the directory where
+# the mscgen tool resides. If left empty the tool is assumed to be found in the
+# default search path.
+
+MSCGEN_PATH =
+
+# If set to YES, the inheritance and collaboration graphs will hide
+# inheritance and usage relations if the target is undocumented
+# or is not a class.
+
+HIDE_UNDOC_RELATIONS = NO
+
+# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
+# available from the path. This tool is part of Graphviz, a graph visualization
+# toolkit from AT&T and Lucent Bell Labs. The other options in this section
+# have no effect if this option is set to NO (the default)
+
+HAVE_DOT = YES
+
+# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
+# allowed to run in parallel. When set to 0 (the default) doxygen will
+# base this on the number of processors available in the system. You can set it
+# explicitly to a value larger than 0 to get control over the balance
+# between CPU load and processing speed.
+
+DOT_NUM_THREADS = 0
+
+# By default doxygen will write a font called Helvetica to the output
+# directory and reference it in all dot files that doxygen generates.
+# When you want a differently looking font you can specify the font name
+# using DOT_FONTNAME. You need to make sure dot is able to find the font,
+# which can be done by putting it in a standard location or by setting the
+# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
+# containing the font.
+
+DOT_FONTNAME = FreeSans
+
+# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
+# The default size is 10pt.
+
+DOT_FONTSIZE = 10
+
+# By default doxygen will tell dot to use the output directory to look for the
+# FreeSans.ttf font (which doxygen will put there itself). If you specify a
+# different font using DOT_FONTNAME you can set the path where dot
+# can find it using this tag.
+
+DOT_FONTPATH =
+
+# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect inheritance relations. Setting this tag to YES will force the
+# the CLASS_DIAGRAMS tag to NO.
+
+CLASS_GRAPH = YES
+
+# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for each documented class showing the direct and
+# indirect implementation dependencies (inheritance, containment, and
+# class references variables) of the class with other documented classes.
+
+COLLABORATION_GRAPH = YES
+
+# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
+# will generate a graph for groups, showing the direct groups dependencies
+
+GROUP_GRAPHS = YES
+
+# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
+# collaboration diagrams in a style similar to the OMG's Unified Modeling
+# Language.
+
+UML_LOOK = YES
+
+# If set to YES, the inheritance and collaboration graphs will show the
+# relations between templates and their instances.
+
+TEMPLATE_RELATIONS = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
+# tags are set to YES then doxygen will generate a graph for each documented
+# file showing the direct and indirect include dependencies of the file with
+# other documented files.
+
+INCLUDE_GRAPH = NO
+
+# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
+# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
+# documented header file showing the documented files that directly or
+# indirectly include this file.
+
+INCLUDED_BY_GRAPH = YES
+
+# If the CALL_GRAPH and HAVE_DOT options are set to YES then
+# doxygen will generate a call dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable call graphs
+# for selected functions only using the \callgraph command.
+
+CALL_GRAPH = YES
+
+# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
+# doxygen will generate a caller dependency graph for every global function
+# or class method. Note that enabling this option will significantly increase
+# the time of a run. So in most cases it will be better to enable caller
+# graphs for selected functions only using the \callergraph command.
+
+CALLER_GRAPH = YES
+
+# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
+# will generate a graphical hierarchy of all classes instead of a textual one.
+
+GRAPHICAL_HIERARCHY = YES
+
+# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
+# then doxygen will show the dependencies a directory has on other directories
+# in a graphical way. The dependency relations are determined by the #include
+# relations between the files in the directories.
+
+DIRECTORY_GRAPH = YES
+
+# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
+# generated by dot. Possible values are png, svg, gif or svg.
+# If left blank png will be used.
+
+DOT_IMAGE_FORMAT = png
+
+# The tag DOT_PATH can be used to specify the path where the dot tool can be
+# found. If left blank, it is assumed the dot tool can be found in the path.
+
+DOT_PATH =
+
+# The DOTFILE_DIRS tag can be used to specify one or more directories that
+# contain dot files that are included in the documentation (see the
+# \dotfile command).
+
+DOTFILE_DIRS =
+
+# The MSCFILE_DIRS tag can be used to specify one or more directories that
+# contain msc files that are included in the documentation (see the
+# \mscfile command).
+
+MSCFILE_DIRS =
+
+# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
+# nodes that will be shown in the graph. If the number of nodes in a graph
+# becomes larger than this value, doxygen will truncate the graph, which is
+# visualized by representing a node as a red box. Note that doxygen if the
+# number of direct children of the root node in a graph is already larger than
+# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
+# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
+
+DOT_GRAPH_MAX_NODES = 50
+
+# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
+# graphs generated by dot. A depth value of 3 means that only nodes reachable
+# from the root by following a path via at most 3 edges will be shown. Nodes
+# that lay further from the root node will be omitted. Note that setting this
+# option to 1 or 2 may greatly reduce the computation time needed for large
+# code bases. Also note that the size of a graph can be further restricted by
+# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
+
+MAX_DOT_GRAPH_DEPTH = 0
+
+# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
+# background. This is disabled by default, because dot on Windows does not
+# seem to support this out of the box. Warning: Depending on the platform used,
+# enabling this option may lead to badly anti-aliased labels on the edges of
+# a graph (i.e. they become hard to read).
+
+DOT_TRANSPARENT = NO
+
+# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
+# files in one run (i.e. multiple -o and -T options on the command line). This
+# makes dot run faster, but since only newer versions of dot (>1.8.10)
+# support this, this feature is disabled by default.
+
+DOT_MULTI_TARGETS = NO
+
+# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
+# generate a legend page explaining the meaning of the various boxes and
+# arrows in the dot generated graphs.
+
+GENERATE_LEGEND = YES
+
+# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
+# remove the intermediate dot files that are used to generate
+# the various graphs.
+
+DOT_CLEANUP = YES
diff --git a/rcynic-ng/Makefile.in b/rcynic-ng/Makefile.in
new file mode 100644
index 00000000..5c32563f
--- /dev/null
+++ b/rcynic-ng/Makefile.in
@@ -0,0 +1,69 @@
+# $Id$
+
+NAME = rcynic
+
+BIN = ${NAME}
+SRC = ${NAME}.c
+OBJ = ${NAME}.o
+
+GEN = defstack.h
+HDR = ${GEN}
+
+CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations
+LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@
+LIBS = @LIBS@
+
+AWK = @AWK@
+
+abs_top_srcdir = @abs_top_srcdir@
+abs_top_builddir = @abs_top_builddir@
+
+host_os = @host_os@
+
+all: ${BIN}
+
+clean:
+ cd static-rsync; ${MAKE} $@
+ rm -f ${BIN} ${OBJ} ${GEN}
+
+${BIN}: ${SRC} ${HDR}
+ ${CC} ${CFLAGS} -o $@ ${SRC} ${LDFLAGS} ${LIBS}
+
+defstack.h: defstack.awk
+ ${AWK} -f >$@ defstack.awk
+
+test: ${BIN}
+ if test -r rcynic.conf; then ./${BIN} -j 0 && echo && ./show.sh; else echo No rcynic.conf, skipping test; fi
+
+install: ${BIN} installation-scripts/install.sh
+ cd installation-scripts; host_os="${host_os}"; DESTDIR="${DESTDIR}"; . ./install.sh
+
+uninstall deinstall:
+ cd installation-scripts; host_os="${host_os}"; DESTDIR="${DESTDIR}"; . ./deinstall.sh
+
+distclean: clean docclean
+ cd static-rsync; ${MAKE} $@
+ rm -f show.sh installation-scripts/linux/install.sh Makefile
+
+tags: TAGS
+
+TAGS: ${SRC}
+ etags ${SRC}
+
+# Doc stuff right now is just internals doc, of interest only to
+# programmers. Real doc for rcynic is still the README. This may
+# change some day.
+
+html:
+ TZ='' doxygen
+
+pdf: html
+ cd doc/latex && TZ='' ${MAKE} pdf && ln -f refman.pdf ../manual.pdf
+
+tgz: html
+ cd doc && tar -cf - html | gzip -9 >manual.tar.gz
+
+docs: html pdf tgz
+
+docclean:
+ rm -rf doc
diff --git a/rcynic-ng/README b/rcynic-ng/README
new file mode 100644
index 00000000..27a2e455
--- /dev/null
+++ b/rcynic-ng/README
@@ -0,0 +1,452 @@
+-*- Text -*- $Id$
+
+"Cynical rsync" -- fetch and validate RPKI certificates.
+
+To build this you will need to link it against an OpenSSL libcrypto
+that has support for the RFC 3779 extensions. See ../openssl/README.
+
+I developed this code on FreeBSD 6-STABLE. It is also known to run
+work on Ubuntu (8.10) and Mac OS X (Snow Leopard). In theory it
+should run on any reasonably POSIX-like system. As far as I know I
+have not used any seriously non-portable features, but neither have I
+done a POSIX reference manual lookup for every function call. Please
+report any portability problems.
+
+All certificates and CRLs are in DER format, with filenames derived
+from the RPKI rsync URIs at which the data are published. At some
+point I'll probably write a companion program to convert a tree of DER
+into the hashed directory of PEM format that most OpenSSL applications
+expect.
+
+All configuration is via an OpenSSL-style configuration file, except
+for selection of the name of the configuration file itself. A few of
+the parameters can also be set from the command line, to simplify
+testing. The default name for the configuration is rcynic.conf; you
+can override this with the -c option on the command line. The config
+file uses OpenSSL's config file syntax, and you can set OpenSSL
+library configuration paramaters (eg, "engine" settings) in the config
+file as well. rcynic's own configuration parameters are in a section
+called "[rcynic]".
+
+Most configuration parameters are optional and have defaults that
+should do something reasonable if you are running rcynic in a test
+directory. If you're running it as a system progran, perhaps under
+cron, you'll want to set additional parameters to tell rcynic where to
+find its data and where to write its output.
+
+The one thing you MUST specify in the config file in order for the
+program to do anything useful is file name of one or more trust
+anchors. Trust anchors for this program are represented as
+DER-formated X509 objects that look just like certificates, except
+that they're trust anchors. To date I have only tested this code with
+self-signed trust anchors; in theory, this is not required, in
+practice the code may require tweaks to support other trust anchors.
+
+Example of a minimal config file:
+
+ [rcynic]
+
+ trust-anchor.0 = trust-anchors/apnic-trust-anchor.cer
+ trust-anchor.1 = trust-anchors/ripe-ripe-trust-anchor.cer
+ trust-anchor.2 = trust-anchors/ripe-arin-trust-anchor.cer
+
+By default, rcynic uses three writable directory trees:
+
+- unauthenticated Raw data fetched via rsync. In order to take
+ full advantage of rsync's optimized transfers,
+ you should preserve and reuse this directory
+ across rcynic runs, so that rcynic need not
+ re-fetch data that have not changed.
+
+- authenticated Data that rcynic has checked. This is the
+ real output of the process.
+
+- old_authenticated Saved results from immediately previous rcynic
+ run, used when attempting to recover from
+ certain kinds of errors.
+
+rcynic renames the authenticated tree to become the old_authenticated
+tree when it starts up, then builds a new authenticated tree.
+
+rcynic copies the trust anchors themselves into the top level
+directory of the authenticated tree xxxxxxxx.n.cer, where xxxxxxxx and
+n are the OpenSSL object name hash and index within the resulting
+virtual hash bucket (the same as the c_hash Perl script that comes
+with OpenSSL would produce), and ".cer" is the literal string ".cer".
+The reason for this is that trust anchors, by definition, are not
+fetched automatically, and thus do not really have publication URIs in
+the sense that every other object in these trees do. So rcynic uses a
+naming scheme which insures (a) that each trust anchor has a unique
+name within the output tree and (b) that trust anchors cannot be
+confusd with certificates: trust anchors always go in the top level of
+the tree, data fetched via rsync always go in subdirectories.
+
+As currently implemented, rcynic does not attempt to maintain an
+in-memory cache of objects it might need again later. It does keep an
+internal cache of the URIs from which it has already fetched data in
+this pass, and it keeps a stack containing the current certificate
+chain as it does its validation walk. All other data (eg, CRLs) are
+freed immediately after use and read from disk again as needed. From
+a database design standpoint, this is not very efficient, but as the
+rcynic's main bottlenecks are expected to be crypto and network
+operations, it seemed best to keep the design as simple as possible,
+at least until execution profiling demonstrates a real issue.
+
+Usage and configuration:
+
+Logging levels:
+
+rcynic has its own system of logging levels, similar to what syslog()
+uses but customized to the specific task rcynic performs. Levels:
+
+ log_sys_err Error from operating system or library
+ log_usage_err Bad usage (local configuration error)
+ log_data_err Bad data (broken certificates or CRLs)
+ log_telemetry Normal chatter about rcynic's progress
+ log_verbose Extra verbose chatter
+ log_debug Only useful when debugging
+
+Command line options:
+
+ -c configfile Path to configuration file (default: rcynic.conf)
+ -l loglevel Logging level (default: log_telemetry)
+ -s Log via syslog
+ -e Log via stderr when also using syslog
+ -j Start-up jitter interval (see below; default: 600)
+ -V Print rcynic's version to standard output and exit
+
+Configuration file:
+
+rcynic uses the OpenSSL libcrypto configuration file mechanism. All
+libcrypto configuration options (eg, for engine support) are
+available. All rcynic-specific options are in the "[rcynic]"
+section. You -must- have a configuration file in order for rcynic to
+do anything useful, as the configuration file is the only way to list
+your trust anchors.
+
+Configuration variables:
+
+authenticated Path to output directory (where rcynic should
+ place objects it has been able to validate).
+ Default: rcynic-data/authenticated
+
+old-authenticated Path to which rcynic should rename the output
+ directory (if any) from the previous rcynic
+ run. rcynic preserves the previous run's
+ output directory both as a backup data source
+ for the current run and also so that you don't
+ lose all your state if rcynic chokes and
+ dies. Default: rcynic-data/authenticated.old
+
+
+unauthenticated Path to directory where rcynic should store
+ unauthenticatd data retrieved via rsync.
+ Unless something goes horribly wrong, you want
+ rcynic to preserve and reuse this directory
+ across runs to minimize the network traffic
+ necessary to bring your repository mirror up
+ to date. Default: rcynic-data/unauthenticated
+
+rsync-timeout How long (in seconds) to let rsync run before
+ terminating the rsync process, or zero for no
+ timeout. You want this timeout to be fairly
+ long, to avoid terminating rsync connections
+ prematurely. It's present to let you defend
+ against evil rsync server operators who try to
+ tarpit your connection as a form of denial of
+ service attack on rcynic. Default: no timeout
+ (but this may change, best set it explictly).
+
+
+rsync-program Path to the rsync program. Default: rsync,
+ but you should probably set this variable
+ rather than just trusting the PATH environment
+ variable to be set correctly.
+
+log-level Same as -l option on command line. Command
+ line setting overrides config file setting.
+ Default: log_telemetry
+
+use-syslog Same as -s option on command line. Command
+ line setting overrides config file setting.
+ Values: true or false. Default: false
+
+use-stderr Same as -e option on command line. Command
+ line setting overrides config file setting.
+ Values: true or false. Default: false, but
+ if neither use-syslog nor use-stderr is set,
+ log output will go to stderr.
+
+syslog-facility Syslog facility to use. Default: local0
+
+
+syslog-priority-xyz (where xyz is an rcynic logging level, above)
+ Override the syslog priority value to use when
+ logging messages at this rcynic level.
+ Defaults:
+
+ syslog-priority-log_sys_err: err
+ syslog-priority-log_usage_err: err
+ syslog-priority-log_data_err: notice
+ syslog-priority-log_telemetry: info
+ syslog-priority-log_verbose: info
+ syslog-priority-log_debug: debug
+
+jitter Startup jitter interval, same as -j option on
+ command line. Jitter interval, specified in
+ number of seconds. rcynic will pick a random
+ number within the interval from zero to this
+ value, and will delay for that many seconds on
+ startup. The purpose of this is to spread the
+ load from large numbers of rcynic clients all
+ running under cron with synchronized clocks,
+ in particular to avoid hammering the RPKI
+ rsync servers into the ground at midnight UTC.
+ Default: 600
+
+lockfile Name of lockfile, or empty for no lock. If
+ you run rcynic under cron, you should use this
+ parameter to set a lockfile so that successive
+ instances of rcynic don't stomp on each other.
+ Default: no lock
+
+xml-summary Enable output of a per-host summary at the
+ end of an rcynic run in XML format. Some
+ users prefer this to the log_telemetry style
+ of logging, or just want it in addition to
+ logging. Value: filename to which XML summary
+ should be written; "-" will send XML summary
+ to stdout. Default: no XML summary
+
+allow-stale-crl Allow use of CRLs which are past their
+ nextUpdate timestamp. This is probably
+ harmless, but since it may be an early warning
+ of problems, it's configurable.
+ Values: true or false. Default: true
+
+prune Clean up old files corresponding to URIs that
+ rcynic did not see at all during this run.
+ rcynic invokes rsync with the --delete option
+ to clean up old objects from collections that
+ rcynic revisits, but if a URI changes so that
+ rcynic never visits the old collection again,
+ old files will remain in the local mirror
+ indefinitely unless you enable this option.
+ Values: true or false. Default: true
+
+allow-stale-manifest Allow use of manifests which are past their
+ nextUpdate timestamp. This is probably
+ harmless, but since it may be an early warning
+ of problems, it's configurable.
+ Values: true or false. Default: true
+
+require-crl-in-manifest Reject manifests which don't list the CRL
+ covering the manifest EE certificate.
+ Values: true or false. Default: false
+
+allow-non-self-signed-trust-anchor
+ Experimental. Attempts to work around OpenSSL's
+ strong preference for self-signed trust
+ anchors. Do not use this unless you really know
+ what you are doing.
+ Values: true or false. Default: false
+
+trust-anchor Specify one RPKI trust anchor, represented as
+ a local file containing an X.509 certificate
+ in DER format. Value of this option is the
+ pathname of the file. No default.
+
+trust-anchor-uri-with-key
+ Specify one RPKI trust anchor, represented as
+ an rsync URI and a local file containing the
+ RSA public key of the X.509 object specified
+ by the URI. The RSA public key should be in
+ DER format. Value for this option consists of
+ the URI and the filename of the public key, in
+ that order, separated by whitespace. No
+ default.
+
+trust-anchor-locator
+ Specify one RPKI trust anchor, represented as
+ a local file containing an rsync URI and the
+ RSA public key of the X.509 object specified
+ by the URI. First line of the file is the
+ URI, remainder is the public key in Base64
+ encoded DER format. Value of this option
+ is the pathname of the file. No default.
+
+indirect-trust-anchor
+ An old name for trust-anchor-locator, kept for
+ backwards compatability. See description of
+ trust-anchor-locator. The name was changed
+ to keep members of the IETF SIDR WG happy.
+
+trust-anchor-uri-with-key and indirect-trust-anchor are semantically
+identical, the difference is just how the URI and public key are
+stored.
+
+There's a companion XSLT template in rcynic.xsl, which translates what
+the xml-summary option writes into HTML.
+
+
+
+Running rcynic chrooted
+
+[This is only a sketch, needs details and finicky proofreading]
+
+rcynic does not include any direct support for running chrooted, but
+is designed to be (relatively) easy to run in a chroot jail. Here's
+how.
+
+You'll either need staticly linked copies of rcynic and rsync, or
+you'll need to figure out which shared libraries these programs need
+(try using the "ldd" command). Here we assume staticly linked
+binaries, because that's simpler.
+
+You'll need a chroot wrapper program. Your platform may already have
+one (FreeBSD does -- /usr/sbin/chroot), but if you don't, you can
+download Wietse Venema's "chrootuid" program from:
+
+ ftp://ftp.porcupine.org/pub/security/chrootuid1.3.tar.gz
+
+Warning: The chroot program included in at least some Linux
+distributions is not adaquate to this task, you need a wrapper that
+knows how to drop privileges after performing the chroot() operation
+itself. If in doubt, use chrootuid.
+
+Unfortunately, the precise details of setting up a proper chroot jail
+vary wildly from one system to another, so the following instructions
+will likely not be a precise match for the preferred way of doing this
+on any particular platform. We have sample scripts that do the right
+thing for FreeBSD, feel free to contribute such scripts for other
+platforms.
+
+Step 1: Build the static binaries. You might want to test them at
+this stage too, although you can defer that until after you've got the
+jail built.
+
+Step 2: Create a userid under which to run rcynic. Here we'll assume
+that you've created a user "rcynic", whose default group is also named
+"rcynic". Do not add any other userids to the rcynic group unless you
+really know what you are doing.
+
+Step 3: Build the jail. You'll need, at minimum, a directory in which
+to put the binaries, a subdirectory tree that's writable by the userid
+which will be running rcynic and rsync, your trust anchors, and
+whatever device inodes the various libraries need on your system.
+Most likely the devices that matter will be /dev/null, /dev/random,a
+nd /dev/urandom; if you're running a FreeBSD system with devfs, you
+do this by mounting and configuring a devfs instance in the jail, on
+other platforms you probably use the mknod program or something.
+
+Important: other than the directories that you want rcynic and rsync
+to be able to modify, -nothing- in the initial jail setup should be
+writable by the rcynic userid. In particular, rcynic and rsync should
+-not- be allowed to modify: their own binary images, any of the
+configuration files, or your trust anchors. It's simplest just to
+have root own all the files and directories that rcynic and rsync are
+not allowed to modify.
+
+Sample jail tree, assuming that we're putting all of this under
+/var/rcynic:
+
+ # mkdir /var/rcynic
+ # mkdir /var/rcynic/bin
+ # mkdir /var/rcynic/data
+ # mkdir /var/rcynic/dev
+ # mkdir /var/rcynic/etc
+ # mkdir /var/rcynic/etc/trust-anchors
+
+Copy your trust anchors into /var/rcynic/etc/trust-anchors.
+
+Copy the staticly linked rcynic and rsync into /var/rcynic/bin.
+
+Copy /etc/resolv.conf and /etc/localtime (if it exists) into
+/var/rcynic/etc.
+
+Write an rcynic configuration file as /var/rcynic/etc/rcynic.conf
+(path names in this file must match the jail setup, more below).
+
+ # chmod -R go-w /var/rcynic
+ # chown -R root:wheel /var/rcynic
+ # chown -R rcynic:rcynic /var/rcynic/data
+
+If you're using devfs, arrange for it to be mounted at
+/var/rcynic/dev; otherwise, create whatever device inodes you need in
+/var/rcynic/dev and make sure that they have sane permissions (copying
+whatever permissions are used in your system /dev directory should
+suffice).
+
+rcynic.conf to match this configuration:
+
+ [rcynic]
+
+ trust-anchor.1 = /etc/trust-anchors/ta-1.cer
+ trust-anchor.2 = /etc/trust-anchors/ta-2.cer
+ trust-anchor.3 = /etc/trust-anchors/ta-3.cer
+
+ rsync-program = /bin/rsync
+ authenticated = /data/authenticated
+ old-authenticated = /data/authenticated.old
+ unauthenticated = /data/unauthenticated
+
+Once you've got all this set up, you're ready to try running rcynic in
+the jail. Try it from the command line first, then if that works, you
+should be able to run it under cron.
+
+Note: chroot, chrootuid, and other programs of this type are usually
+intended to be run by root, and should -not- be setuid programs unless
+you -really- know what you are doing.
+
+Sample command line:
+
+ # /usr/local/bin/chrootuid /var/rcynic rcynic /bin/rcynic -s -c /etc/rcynic.conf
+
+Note that we use absolute pathnames everywhere. This is not an
+accident. Programs running in jails under cron should not make
+assumptions about the current working directory or environment
+variable settings, and programs running in chroot jails would need
+different PATH settings anyway. Best just to specify everything.
+
+Building static binaries:
+
+On FreeBSD, building a staticly linked rsync is easy: just set the
+environment variable LDFLAGS='-static' before building the rsync port
+and the right thing will happen. Since this is really just GNU
+configure picking up the environment variable, the same trick should
+work on other platforms.
+
+For simplicity, I've taken the same approach with rcynic, so
+
+ $ make LDFLAGS='-static'
+
+should work. Except that you don't even have to do that: static
+linking is the default, because I run it jailed.
+
+syslog:
+
+Depending on your syslogd configuration, syslog may not work properly
+with rcynic in a chroot jail. On FreeBSD, the easiest way to fix this
+is to add the following lines to /etc/rc.conf:
+
+ altlog_proglist="named rcynic"
+ rcynic_chrootdir="/var/rcynic"
+ rcynic_enable="YES"
+
+
+
+If you're using the experimental trust-anchor-uri-with-key trust
+anchor format, you'll need a copy of the public key in DER format.
+One can extract this from an X.509 format trust anchor using the
+OpenSSL command line tool, but the path is poorly documented. Try
+something like this:
+
+ $ openssl x509 -inform DER -in foo.cer -pubkey -noout | openssl rsa -outform DER -pubin -out foo.key
+
+The important bits here are:
+
+a) You're asking the x509 command to extract the public key and send
+ it (in PEM format) to stdout without the rest of the certificate
+
+b) You're asking the rsa command to read a public key (in PEM format)
+ on stdin, convert it to DER format and write it out.
diff --git a/rcynic-ng/defstack.awk b/rcynic-ng/defstack.awk
new file mode 100644
index 00000000..fce44f4f
--- /dev/null
+++ b/rcynic-ng/defstack.awk
@@ -0,0 +1,58 @@
+# $Id$
+#
+# Copyright (C) 2011 Internet Systems Consortium ("ISC")
+#
+# Permission to use, copy, modify, and distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+function print_line(name, line)
+{
+ gsub(/%/, name, line);
+ print line;
+}
+
+function define_stack(name)
+{
+ print_line(name, "/*");
+ print_line(name, " * Safestack macros for %.");
+ print_line(name, " */");
+ print_line(name, "#define sk_%_new(st) SKM_sk_new(%, (st))");
+ print_line(name, "#define sk_%_new_null() SKM_sk_new_null(%)");
+ print_line(name, "#define sk_%_free(st) SKM_sk_free(%, (st))");
+ print_line(name, "#define sk_%_num(st) SKM_sk_num(%, (st))");
+ print_line(name, "#define sk_%_value(st, i) SKM_sk_value(%, (st), (i))");
+ print_line(name, "#define sk_%_set(st, i, val) SKM_sk_set(%, (st), (i), (val))");
+ print_line(name, "#define sk_%_zero(st) SKM_sk_zero(%, (st))");
+ print_line(name, "#define sk_%_push(st, val) SKM_sk_push(%, (st), (val))");
+ print_line(name, "#define sk_%_unshift(st, val) SKM_sk_unshift(%, (st), (val))");
+ print_line(name, "#define sk_%_find(st, val) SKM_sk_find(%, (st), (val))");
+ print_line(name, "#define sk_%_find_ex(st, val) SKM_sk_find_ex(%, (st), (val))");
+ print_line(name, "#define sk_%_delete(st, i) SKM_sk_delete(%, (st), (i))");
+ print_line(name, "#define sk_%_delete_ptr(st, ptr) SKM_sk_delete_ptr(%, (st), (ptr))");
+ print_line(name, "#define sk_%_insert(st, val, i) SKM_sk_insert(%, (st), (val), (i))");
+ print_line(name, "#define sk_%_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(%, (st), (cmp))");
+ print_line(name, "#define sk_%_dup(st) SKM_sk_dup(%, st)");
+ print_line(name, "#define sk_%_pop_free(st, free_func) SKM_sk_pop_free(%, (st), (free_func))");
+ print_line(name, "#define sk_%_shift(st) SKM_sk_shift(%, (st))");
+ print_line(name, "#define sk_%_pop(st) SKM_sk_pop(%, (st))");
+ print_line(name, "#define sk_%_sort(st) SKM_sk_sort(%, (st))");
+ print_line(name, "#define sk_%_is_sorted(st) SKM_sk_is_sorted(%, (st))");
+ print_line(name, "");
+}
+
+BEGIN {
+ define_stack("HOST_MIB_COUNTER");
+ define_stack("VALIDATION_STATUS");
+ define_stack("FileAndHash");
+ define_stack("ROAIPAddress");
+ define_stack("ROAIPAddressFamily");
+}
diff --git a/rcynic-ng/installation-scripts/darwin/RCynic/RCynic b/rcynic-ng/installation-scripts/darwin/RCynic/RCynic
new file mode 100755
index 00000000..bf36d6e5
--- /dev/null
+++ b/rcynic-ng/installation-scripts/darwin/RCynic/RCynic
@@ -0,0 +1,75 @@
+#!/bin/sh -
+#
+# $Id$
+#
+. /etc/rc.common
+
+name="rcynic"
+start_cmd="rcynic_start"
+stop_cmd="rcynic_stop"
+
+: ${rcynic_jaildir="/var/rcynic"}
+: ${rcynic_user="rcynic"}
+: ${rcynic_group="rcynic"}
+
+StartService()
+{
+ /bin/test -d "${rcynic_jaildir}" || /bin/mkdir "${rcynic_jaildir}"
+ /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
+
+ /usr/sbin/mtree -deU -p "${rcynic_jaildir}" <<EOF
+
+ /set type=dir uname=root gname=wheel mode=0555
+ .
+ bin
+ ..
+ dev
+ ..
+ etc
+ trust-anchors
+ ..
+ ..
+ usr
+ lib
+ system
+ ..
+ ..
+ ..
+ var
+ run
+ ..
+ ..
+ data uname=${rcynic_user} gname=${rcynic_group} mode=0755
+ ..
+ ..
+EOF
+
+ /bin/chmod -R a-w "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
+ /usr/sbin/chown -R root:wheel "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
+
+ if ! /sbin/mount_devfs devfs "${rcynic_jaildir}/dev"; then
+ echo "Mounting devfs on ${rcynic_jaildir}/dev failed..."
+ exit 1
+ fi
+
+ for i in /etc/localtime /etc/resolv.conf; do
+ j="${rcynic_jaildir}${i}"
+ if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
+ /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
+ fi
+ done
+
+ /bin/ln -f /var/run/mDNSResponder "${rcynic_jaildir}/var/run/mDNSResponder"
+}
+
+StopService()
+{
+ /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
+}
+
+RestartService()
+{
+ StartService
+}
+
+RunService "$1"
diff --git a/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist b/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist
new file mode 100644
index 00000000..ca46b676
--- /dev/null
+++ b/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist
@@ -0,0 +1,19 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
+<plist version="1.0">
+<dict>
+ <key>Description</key>
+ <string>RCynic Setup</string>
+ <key>OrderPreference</key>
+ <string>None</string>
+ <key>Provides</key>
+ <array>
+ <string>RCynic</string>
+ </array>
+ <key>Uses</key>
+ <array>
+ <string>Network</string>
+ <string>Resolver</string>
+ </array>
+ </dict>
+</plist>
diff --git a/rcynic-ng/installation-scripts/darwin/install.sh b/rcynic-ng/installation-scripts/darwin/install.sh
new file mode 100644
index 00000000..1f0dfe7d
--- /dev/null
+++ b/rcynic-ng/installation-scripts/darwin/install.sh
@@ -0,0 +1,193 @@
+#!/bin/sh -
+# $Id$
+#
+# Create a chroot jail for rcynic.
+#
+# This is approximately what a pkg-install script might do if this were
+# a FreeBSD port. Perhaps some day it will be.
+
+: ${jaildir="${DESTDIR}/var/rcynic"}
+: ${jailuser="rcynic"}
+: ${jailgroup="rcynic"}
+: ${jailname="RPKI Validation System"}
+: ${setupcron="YES"}
+
+echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
+
+if /usr/bin/dscl . -read "/Groups/${jailgroup}" >/dev/null 2>&1
+then
+ echo "You already have a group \"${jailgroup}\", so I will use it."
+elif gid="$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $2 >= gid {gid = 1 + $2} END {print gid}')" &&
+ /usr/bin/dscl . -create "/Groups/${jailgroup}" &&
+ /usr/bin/dscl . -create "/Groups/${jailgroup}" RealName "${jailname}" &&
+ /usr/bin/dscl . -create "/Groups/${jailgroup}" PrimaryGroupID "$gid" &&
+ /usr/bin/dscl . -create "/Groups/${jailgroup}" GeneratedUID "$(/usr/bin/uuidgen)" &&
+ /usr/bin/dscl . -create "/Groups/${jailgroup}" Password "*"
+then
+ echo "Added group \"${jailgroup}\"."
+else
+ echo "Adding group \"${jailgroup}\" failed..."
+ echo "Please create it, then try again."
+ exit 1
+fi
+
+if /usr/bin/dscl . -read "/Users/${jailuser}" >/dev/null 2>&1
+then
+ echo "You already have a user \"${jailuser}\", so I will use it."
+elif uid="$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $2 >= uid {uid = 1 + $2} END {print uid}')" &&
+ /usr/bin/dscl . -create "/Users/${jailuser}" &&
+ /usr/bin/dscl . -create "/Users/${jailuser}" UserShell "/usr/bin/false" &&
+ /usr/bin/dscl . -create "/Users/${jailuser}" RealName "${jailname}" &&
+ /usr/bin/dscl . -create "/Users/${jailuser}" UniqueID "$uid" &&
+ /usr/bin/dscl . -create "/Users/${jailuser}" PrimaryGroupID "$gid" &&
+ /usr/bin/dscl . -create "/Users/${jailuser}" NFSHomeDirectory "/var/empty" &&
+ /usr/bin/dscl . -create "/Users/${jailuser}" GeneratedUID "$(/usr/bin/uuidgen)" &&
+ /usr/bin/dscl . -create "/Users/${jailuser}" Password "*"
+then
+ echo "Added user \"${jailuser}\"."
+else
+ echo "Adding user \"${jailuser}\" failed..."
+ echo "Please create it, then try again."
+ exit 1
+fi
+
+if ! /bin/test -d "${jaildir}"; then
+ /bin/mkdir "${jaildir}"
+fi
+
+if /usr/bin/install -o root -g wheel -d ${DESTDIR}/Library/StartupItems/RCynic &&
+ /usr/bin/install -o root -g wheel -m 555 RCynic/RCynic RCynic/StartupParameters.plist ${DESTDIR}/Library/StartupItems/RCynic; then
+ echo "Installed ${DESTDIR}/Library/StartupItems/RCynic"
+else
+ echo "Installing ${DESTDIR}/Library/StartupItems/RCynic failed"
+ exit 1
+fi
+
+echo "Running ${DESTDIR}/Library/StartupItems/RCynic/RCynic to set up directories"
+
+if ! rcynic_jaildir="$jaildir" rcynic_user="$jailuser" rcynic_group="$jailgroup" /Library/StartupItems/RCynic/RCynic start; then
+ echo "Directory setup failed"
+ exit 1
+fi
+
+if /bin/test -r "$jaildir/etc/rcynic.conf"; then
+ echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
+elif /usr/bin/install -m 444 -o root -g wheel -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
+ echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
+ for i in ../../sample-trust-anchors/*.tal; do
+ j="$jaildir/etc/trust-anchors/${i##*/}"
+ /bin/test -r "$i" || continue
+ /bin/test -r "$j" && continue
+ echo "Installing $i as $j"
+ /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
+ done
+ j=1
+ for i in $jaildir/etc/trust-anchors/*.tal; do
+ echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
+ j=$((j+1))
+ done
+else
+ echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
+ exit 1
+fi
+
+echo "Installing rcynic as ${jaildir}/bin/rcynic"
+
+/usr/bin/install -m 555 -o root -g wheel -p ../../rcynic "${jaildir}/bin/rcynic"
+
+if /bin/test -x "$jaildir/bin/rsync"; then
+ echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
+elif /usr/bin/install -m 555 -o root -g wheel -p /usr/bin/rsync "${jaildir}/bin/rsync"; then
+ echo "Installed ${jaildir}/bin/rsync"
+else
+ echo "Installing ${jaildir}/bin/rsync failed"
+ exit 1
+fi
+
+echo "Copying required shared libraries"
+
+shared_libraries="${jaildir}/bin/rcynic ${jaildir}/bin/rsync"
+while true
+do
+ closure="$(/usr/bin/otool -L ${shared_libraries} | /usr/bin/awk '/:$/ {next} {print $1}' | /usr/bin/sort -u)"
+ if test "x$shared_libraries" = "x$closure"
+ then
+ break
+ else
+ shared_libraries="$closure"
+ fi
+done
+
+for shared in /usr/lib/dyld $shared_libraries
+do
+ if /bin/test -r "${jaildir}/${shared}"
+ then
+ echo "You already have a \"${jaildir}/${shared}\", so I will use it"
+ elif /usr/bin/install -m 555 -o root -g wheel -p "${shared}" "${jaildir}/${shared}"
+ then
+ echo "Copied ${shared} into ${jaildir}"
+ else
+ echo "Unable to copy ${shared} into ${jaildir}"
+ exit 1
+ fi
+done
+
+if /usr/bin/install -m 444 -o root -g wheel -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
+ echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
+else
+ echo "Installing rcynic.xsl failed"
+ exit 1
+fi
+
+echo "Setting up root's crontab to run jailed rcynic"
+
+case "$setupcron" in
+YES|yes)
+ /usr/bin/crontab -l -u root 2>/dev/null |
+ /usr/bin/awk -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
+ BEGIN {
+ cmd = "exec /usr/sbin/chroot -u " jailuser " -g " jailgroup " " jaildir;
+ cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
+ }
+ $0 !~ cmd {
+ print;
+ }
+ END {
+ "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
+ printf "%u * * * *\t%s\n", $1 % 60, cmd;
+ }' |
+ /usr/bin/crontab -u root -
+ /bin/cat <<EOF
+
+ crontab is set up to run rcynic hourly, at a randomly selected
+ minute (to spread load on the rsync servers). Please do NOT
+ adjust this to run on the hour. In particular please do NOT
+ adjust this to run at midnight UTC.
+EOF
+ ;;
+
+*)
+ /bin/cat <<EOF
+
+ You'll need to add a crontab entry running the following command as root:
+
+ /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
+
+ Please try to pick a random time for this, don't just run it on the hour,
+ or at local midnight, or, worst of all, at midnight UTC.
+
+EOF
+ ;;
+
+esac
+
+/bin/cat <<EOF
+
+ Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
+ If you did not install your own trust anchors, a default set
+ of SAMPLE trust anchors may have been installed for you, but
+ you, the relying party, are the only one who can decide
+ whether you trust those anchors. rcynic will not do anything
+ useful without good trust anchors.
+
+EOF
diff --git a/rcynic-ng/installation-scripts/deinstall.sh b/rcynic-ng/installation-scripts/deinstall.sh
new file mode 100644
index 00000000..113794a4
--- /dev/null
+++ b/rcynic-ng/installation-scripts/deinstall.sh
@@ -0,0 +1,5 @@
+#!/bin/sh -
+# $Id$
+
+echo Sorry, automated deinstallation of rcynic is not implemented yet
+exit 1
diff --git a/rcynic-ng/installation-scripts/freebsd/install.sh b/rcynic-ng/installation-scripts/freebsd/install.sh
new file mode 100644
index 00000000..6c3453cb
--- /dev/null
+++ b/rcynic-ng/installation-scripts/freebsd/install.sh
@@ -0,0 +1,151 @@
+#!/bin/sh -
+# $Id$
+#
+# Create a chroot jail for rcynic.
+#
+# This is approximately what a pkg-install script might do if this were
+# a FreeBSD port. Perhaps some day it will be.
+
+: ${jaildir="${DESTDIR}/var/rcynic"}
+: ${jailuser="rcynic"}
+: ${jailgroup="rcynic"}
+: ${setupcron="NO"}
+
+echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
+
+if /usr/sbin/pw groupshow "${jailgroup}" 2>/dev/null; then
+ echo "You already have a group \"${jailgroup}\", so I will use it."
+elif /usr/sbin/pw groupadd ${jailgroup}; then
+ echo "Added group \"${jailgroup}\"."
+else
+ echo "Adding group \"${jailgroup}\" failed..."
+ echo "Please create it, then try again."
+ exit 1
+fi
+
+if /usr/sbin/pw usershow "${jailuser}" 2>/dev/null; then
+ echo "You already have a user \"${jailuser}\", so I will use it."
+elif /usr/sbin/pw useradd ${jailuser} -g ${jailgroup} -h - -d /nonexistant -s /usr/sbin/nologin -c "RPKI validation system"; then
+ echo "Added user \"${jailuser}\"."
+else
+ echo "Adding user \"${jailuser}\" failed..."
+ echo "Please create it, then try again."
+ exit 1
+fi
+
+if ! /bin/test -d "${jaildir}"; then
+ /bin/mkdir "${jaildir}"
+fi
+
+if /usr/bin/install -m 555 -o root -g wheel -p rc.d.rcynic ${DESTDIR}/usr/local/etc/rc.d/rcynic; then
+ echo "Installed rc.d.rcynic as ${DESTDIR}/usr/local/etc/rc.d/rcynic"
+else
+ echo "Installing ${DESTDIR}/usr/local/etc/rc.d/rcynic failed"
+ exit 1
+fi
+
+echo "Running /usr/local/etc/rc.d/rcynic to set up directories"
+
+if ! rcynic_jaildir="$jaildir" rcynic_user="$jailuser" rcynic_group="$jailgroup" /bin/sh /usr/local/etc/rc.d/rcynic start; then
+ echo "Directory setup failed"
+ exit 1
+fi
+
+if /bin/test -r "$jaildir/etc/rcynic.conf"; then
+ echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
+elif /usr/bin/install -m 444 -o root -g wheel -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
+ echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
+ for i in ../../sample-trust-anchors/*.tal; do
+ j="$jaildir/etc/trust-anchors/${i##*/}"
+ /bin/test -r "$i" || continue
+ /bin/test -r "$j" && continue
+ echo "Installing $i as $j"
+ /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
+ done
+ j=1
+ for i in $jaildir/etc/trust-anchors/*.tal; do
+ echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
+ j=$((j+1))
+ done
+else
+ echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
+ exit 1
+fi
+
+echo "Installing rcynic as ${jaildir}/bin/rcynic"
+
+/usr/bin/install -m 555 -o root -g wheel -p ../../rcynic "${jaildir}/bin/rcynic"
+
+if /bin/test ! -x "$jaildir/bin/rsync" -a ! -x ../../static-rsync/rsync; then
+ echo "Building static rsync for jail, this may take a little while"
+ (cd ../../static-rsync && exec make)
+fi
+
+if /bin/test -x "$jaildir/bin/rsync"; then
+ echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
+elif /usr/bin/install -m 555 -o root -g wheel -p ../../static-rsync/rsync "${jaildir}/bin/rsync"; then
+ echo "Installed static rsync as \"${jaildir}/bin/rsync\""
+else
+ echo "Installing static rsync failed"
+ exit 1
+fi
+
+if /usr/bin/install -m 444 -o root -g wheel -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
+ echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
+else
+ echo "Installing rcynic.xsl failed"
+ exit 1
+fi
+
+echo "Setting up root's crontab to run jailed rcynic"
+
+case "$setupcron" in
+YES|yes)
+ /usr/bin/crontab -l -u root 2>/dev/null |
+ /usr/bin/awk -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
+ BEGIN {
+ cmd = "exec /usr/sbin/chroot -u " jailuser " -g " jailgroup " " jaildir;
+ cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
+ }
+ $0 !~ cmd {
+ print;
+ }
+ END {
+ "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
+ printf "%u * * * *\t%s\n", $1 % 60, cmd;
+ }' |
+ /usr/bin/crontab -u root -
+ /bin/cat <<EOF
+
+ crontab is set up to run rcynic hourly, at a randomly selected
+ minute (to spread load on the rsync servers). Please do NOT
+ adjust this to run on the hour. In particular please do NOT
+ adjust this to run at midnight UTC.
+EOF
+ ;;
+
+*)
+ /bin/cat <<EOF
+
+ You'll need to add a crontab entry running the following command as root:
+
+ /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
+
+ Please try to pick a random time for this, don't just run it on the hour,
+ or at local midnight, or, worst of all, at midnight UTC.
+
+EOF
+ ;;
+
+esac
+
+/bin/cat <<EOF
+
+ Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
+ If you did not install your own trust anchors, a default set
+ of SAMPLE trust anchors may have been installed for you, but
+ you, the relying party, are the only one who can decide
+ whether you trust those anchors. rcynic will not do anything
+ useful without good trust anchors.
+
+EOF
diff --git a/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic b/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic
new file mode 100755
index 00000000..b86c9b81
--- /dev/null
+++ b/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic
@@ -0,0 +1,71 @@
+#!/bin/sh -
+#
+# $Id$
+#
+# PROVIDE: rcynic
+# REQUIRE: DAEMON
+# KEYWORD: nojail
+
+. /etc/rc.subr
+
+name="rcynic"
+start_cmd="rcynic_start"
+stop_cmd="rcynic_stop"
+
+: ${rcynic_jaildir="/var/rcynic"}
+: ${rcynic_user="rcynic"}
+: ${rcynic_group="rcynic"}
+
+rcynic_start()
+{
+ /bin/test -d "${rcynic_jaildir}" || /bin/mkdir "${rcynic_jaildir}"
+ /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
+
+ /usr/sbin/mtree -deU -p "${rcynic_jaildir}" <<EOF
+
+ /set type=dir uname=root gname=wheel mode=0555
+ .
+ bin
+ ..
+ dev
+ ..
+ etc
+ trust-anchors
+ ..
+ ..
+ var
+ run
+ ..
+ ..
+ data uname=${rcynic_user} gname=${rcynic_group} mode=0755
+ ..
+ ..
+EOF
+
+ /bin/chmod -R a-w "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
+ /usr/sbin/chown -R root:wheel "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
+
+ if ! /sbin/mount -t devfs dev "${rcynic_jaildir}/dev"; then
+ echo "Mounting devfs on ${rcynic_jaildir}/dev failed..."
+ exit 1
+ fi
+
+ /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply hide
+ /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply path null unhide
+ /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply path random unhide
+
+ for i in /etc/localtime /etc/resolv.conf; do
+ j="${rcynic_jaildir}${i}"
+ if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
+ /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
+ fi
+ done
+}
+
+rcynic_stop()
+{
+ /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
+}
+
+load_rc_config $name
+run_rc_command "$1"
diff --git a/rcynic-ng/installation-scripts/install.sh b/rcynic-ng/installation-scripts/install.sh
new file mode 100644
index 00000000..8c708a28
--- /dev/null
+++ b/rcynic-ng/installation-scripts/install.sh
@@ -0,0 +1,14 @@
+#!/bin/sh -
+# $Id$
+
+set -e
+
+case "${host_os}" in
+
+freebsd*) cd freebsd; . ./install.sh;;
+darwin*) cd darwin; . ./install.sh;;
+linux*) cd linux; . ./install.sh;;
+
+*) echo 1>&2 "Don't know how to install rcynic jail on platform ${host_os}"
+ exit 1;;
+esac
diff --git a/rcynic-ng/installation-scripts/linux/install.sh.in b/rcynic-ng/installation-scripts/linux/install.sh.in
new file mode 100644
index 00000000..f0bdc505
--- /dev/null
+++ b/rcynic-ng/installation-scripts/linux/install.sh.in
@@ -0,0 +1,236 @@
+#!/bin/sh -
+# $Id$
+#
+# Create a chroot jail for rcynic.
+#
+# This is approximately what a package installation script might do.
+
+: ${jaildir="${DESTDIR}/var/rcynic"}
+: ${jailuser="rcynic"}
+: ${jailgroup="rcynic"}
+: ${setupcron="YES"}
+
+AWK='@AWK@'
+SORT='@SORT@'
+
+echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
+
+if ${AWK} -F: -v jailgroup="${jailgroup}" 'BEGIN {status = 1} $1 == jailgroup {status = 0} END {exit status}' /etc/group
+then
+ echo "You already have a group \"${jailgroup}\", so I will use it."
+elif /usr/sbin/groupadd ${jailgroup}
+then
+ echo "Added group \"${jailgroup}\"."
+else
+ echo "Adding group \"${jailgroup}\" failed..."
+ echo "Please create it, then try again."
+ exit 1
+fi
+
+# The adduser program on CentOS 5.x uses the -n flag instead of -N to
+# avoid creating the associated group.
+nogroup='-N'
+if test -f /etc/redhat-release; then
+ read vendor release version < /etc/redhat-release
+ if test $vendor = CentOS; then
+ nogroup='-n'
+ fi
+fi
+
+if ${AWK} -F: -v jailuser="${jailuser}" 'BEGIN {status = 1} $1 == jailuser {status = 0} END {exit status}' /etc/passwd
+then
+ echo "You already have a user \"${jailuser}\", so I will use it."
+elif /usr/sbin/useradd -g ${jailgroup} -M $nogroup -d "${jaildir}" -s /sbin/nologin -c "RPKI validation system" ${jailuser}
+then
+ echo "Added user \"${jailuser}\"."
+else
+ echo "Adding user \"${jailuser}\" failed..."
+ echo "Please create it, then try again."
+ exit 1
+fi
+
+# test for x86_64 target
+if test -d /lib64; then
+ libdir=/lib64
+else
+ libdir=/lib
+fi
+
+echo "Building directories"
+
+if ! /bin/mkdir -p -v -m 555 \
+ "${jaildir}/bin" \
+ "${jaildir}/dev" \
+ "${jaildir}/etc/trust-anchors" \
+ "${jaildir}/${libdir}" \
+ "${jaildir}/usr/lib" \
+ "${jaildir}/data"
+then
+ echo "Unable to build directories under \"${jaildir}\", please fix this then try again."
+ exit 1
+fi
+
+echo "Installing device inodes"
+
+if ! (cd /dev; /bin/ls null zero random urandom | /bin/cpio -puv "${jaildir}/dev")
+then
+ echo "Unable to install device inodes in ${jaildir}/dev/, please fix this then try again"
+ exit 1
+fi
+
+echo "Copying files from /etc"
+
+for i in /etc/localtime /etc/resolv.conf /etc/passwd /etc/group
+do
+ j="${jaildir}${i}"
+ if test -r "$i" &&
+ ! /usr/bin/cmp -s "$i" "$j" &&
+ ! /bin/cp -p "$i" "$j"
+ then
+ echo "Unable to copy $i to ${jaildir}, please fix this then try again"
+ exit 1
+ fi
+done
+
+echo "Whacking file permissions"
+
+if ! /bin/chmod -R a-w "${jaildir}/bin" "${jaildir}/etc" ||
+ ! /bin/chmod -R 755 "${jaildir}/data" ||
+ ! /bin/chown -R root:root "${jaildir}/bin" "${jaildir}/etc" ||
+ ! /bin/chown -R "${jailuser}:${jailgroup}" "${jaildir}/data"
+then
+ echo "Unable to set file permissions and ownerships correctly, please fix this and try again"
+ exit 1
+fi
+
+if test -r "$jaildir/etc/rcynic.conf"; then
+ echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
+elif /usr/bin/install -m 444 -o root -g root -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
+ echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
+ for i in ../../sample-trust-anchors/*.tal; do
+ j="$jaildir/etc/trust-anchors/${i##*/}"
+ test -r "$i" || continue
+ test -r "$j" && continue
+ echo "Installing $i as $j"
+ /usr/bin/install -m 444 -o root -g root -p "$i" "$j"
+ done
+ j=1
+ for i in $jaildir/etc/trust-anchors/*.tal; do
+ echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
+ j=$((j+1))
+ done
+else
+ echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
+ exit 1
+fi
+
+echo "Installing rcynic as ${jaildir}/bin/rcynic"
+
+/usr/bin/install -m 555 -o root -g root -p ../../rcynic "${jaildir}/bin/rcynic"
+
+if test -x "$jaildir/bin/rsync"; then
+ echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
+elif /usr/bin/install -m 555 -o root -g root -p /usr/bin/rsync "${jaildir}/bin/rsync"; then
+ echo "Installed ${jaildir}/bin/rsync"
+else
+ echo "Installing ${jaildir}/bin/rsync failed"
+ exit 1
+fi
+
+echo "Copying required shared libraries"
+
+shared_libraries="${jaildir}/bin/rcynic ${jaildir}/bin/rsync"
+while true
+do
+ closure="$(/usr/bin/ldd ${shared_libraries} |
+ ${AWK} -v "rcynic=${jaildir}/bin/rcynic" -v "rsync=${jaildir}/bin/rsync" \
+ '{sub(/:$/, "")} $0 == rcynic || $0 == rsync {next} {for (i = 1; i <= NF; i++) if ($i ~ /^\//) print $i}' |
+ ${SORT} -u)"
+ if test "x$shared_libraries" = "x$closure"
+ then
+ break
+ else
+ shared_libraries="$closure"
+ fi
+done
+
+# Under CentOS 5.5, rsync requires libresolv, but ldd doesn't show
+# it.
+if test -f ${libdir}/libresolv.so.2; then
+ shared_libraries="${shared_libraries} ${libdir}/libresolv.so.2"
+fi
+
+for shared in ${libdir}/ld*.so $shared_libraries ${libdir}/libnss*.so.*
+do
+ if test -r "${jaildir}/${shared}"
+ then
+ echo "You already have a \"${jaildir}${shared}\", so I will use it"
+ elif /usr/bin/install -m 555 -o root -g root -d "${jaildir}${shared%/*}" &&
+ /usr/bin/install -m 555 -o root -g root -p "${shared}" "${jaildir}${shared}"
+ then
+ echo "Copied ${shared} into ${jaildir}"
+ else
+ echo "Unable to copy ${shared} into ${jaildir}"
+ exit 1
+ fi
+done
+
+if /usr/bin/install -m 444 -o root -g root -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
+ echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
+else
+ echo "Installing rcynic.xsl failed"
+ exit 1
+fi
+
+echo "Setting up root's crontab to run jailed rcynic"
+
+case "$setupcron" in
+YES|yes)
+ /usr/bin/crontab -l -u root 2>/dev/null |
+ ${AWK} -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
+ BEGIN {
+ cmd = "exec /usr/sbin/chroot --userspec=" jailuser ":" jailgroup " " jaildir;
+ cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
+ }
+ $0 !~ cmd {
+ print;
+ }
+ END {
+ "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
+ printf "%u * * * *\t%s\n", $1 % 60, cmd;
+ }' |
+ /usr/bin/crontab -u root -
+ /bin/cat <<EOF
+
+ crontab is set up to run rcynic hourly, at a randomly selected
+ minute (to spread load on the rsync servers). Please do NOT
+ adjust this to run on the hour. In particular please do NOT
+ adjust this to run at midnight UTC.
+EOF
+ ;;
+
+*)
+ /bin/cat <<EOF
+
+ You'll need to add a crontab entry running the following command as root:
+
+ /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
+
+ Please try to pick a random time for this, don't just run it on the hour,
+ or at local midnight, or, worst of all, at midnight UTC.
+
+EOF
+ ;;
+
+esac
+
+/bin/cat <<EOF
+
+ Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
+ If you did not install your own trust anchors, a default set
+ of SAMPLE trust anchors may have been installed for you, but
+ you, the relying party, are the only one who can decide
+ whether you trust those anchors. rcynic will not do anything
+ useful without good trust anchors.
+
+EOF
diff --git a/rcynic-ng/installation-scripts/sample-rcynic.conf b/rcynic-ng/installation-scripts/sample-rcynic.conf
new file mode 100644
index 00000000..6e464e7e
--- /dev/null
+++ b/rcynic-ng/installation-scripts/sample-rcynic.conf
@@ -0,0 +1,27 @@
+# $Id$
+#
+# Sample rcynic configuration file for FreeBSD jailed environment
+
+[rcynic]
+rsync-program = /bin/rsync
+authenticated = /data/authenticated
+old-authenticated = /data/authenticated.old
+unauthenticated = /data/unauthenticated
+lockfile = /data/lock
+jitter = 600
+use-syslog = true
+log-level = log_usage_err
+
+# You need to specify some trust anchors here, eg:
+
+#trust-anchor.1 = /etc/trust-anchors/ta-1.cer
+#trust-anchor.2 = /etc/trust-anchors/ta-2.cer
+
+# or, using the "Trust Anchor Locator" form:
+
+#trust-anchor-locator.1 = /etc/trust-anchors/ta-1.tal
+#trust-anchor-locator.2 = /etc/trust-anchors/ta-2.tal
+
+# The choice between these two formats depends largely on the policies
+# of the entity generating the corresponding trust anchor, ie, will
+# probably be made for you by the generating entity.
diff --git a/rcynic-ng/make-tal.sh b/rcynic-ng/make-tal.sh
new file mode 100755
index 00000000..854a76b9
--- /dev/null
+++ b/rcynic-ng/make-tal.sh
@@ -0,0 +1,42 @@
+#!/bin/sh -
+# $Id$
+#
+# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Generate a trust anchor locator (TAL) given the rsync URI for a
+# self-signed RFC 3779 certificate.
+#
+# Usage: make-tal.sh uri [local_copy_of_certificate]
+#
+# The optional second parameter is the name of a local copy of the
+# certificate to be checked against the copy retrieved from the URI;
+# if present, this should be a local X.509 file in DER format.
+
+case "$1" in rsync://*) :;; *) echo 1>&2 "\"$1\" is not a rsync URI"; exit 1;; esac
+
+tmpfile="make-tal.tmp.$$"
+trap "rm -f $tmpfile" 0 1 2 15
+
+rsync "$1" "$tmpfile" || exit
+
+if test -n "$2"
+then
+ diff -q "$tmpfile" "$2" || exit
+fi
+
+echo "$1"
+echo
+openssl x509 -inform DER -in "$tmpfile" -pubkey -noout |
+awk '!/-----(BEGIN|END)/'
diff --git a/rcynic-ng/rcynic.c b/rcynic-ng/rcynic.c
new file mode 100644
index 00000000..0d855105
--- /dev/null
+++ b/rcynic-ng/rcynic.c
@@ -0,0 +1,3580 @@
+/*
+ * Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ *
+ * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN")
+ *
+ * Permission to use, copy, modify, and distribute this software for any
+ * purpose with or without fee is hereby granted, provided that the above
+ * copyright notice and this permission notice appear in all copies.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+ * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ * AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+ * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ * PERFORMANCE OF THIS SOFTWARE.
+ */
+
+/* $Id$ */
+
+/**
+ * @mainpage
+ *
+ * "Cynical rsync": Recursively walk RPKI tree using rsync to pull
+ * data from remote sites, validating certificates and CRLs as we go.
+ *
+ * Doxygen doesn't quite know what to make of a one-file C program,
+ * and ends up putting most of the interesting data @link rcynic.c
+ * here. @endlink
+ */
+
+#include <assert.h>
+#include <stdio.h>
+#include <stdlib.h>
+#include <stdarg.h>
+#include <unistd.h>
+#include <sys/types.h>
+#include <sys/time.h>
+#include <sys/file.h>
+#include <errno.h>
+#include <sys/signal.h>
+#include <sys/wait.h>
+#include <time.h>
+#include <string.h>
+#include <sys/stat.h>
+#include <dirent.h>
+#include <limits.h>
+#include <fcntl.h>
+#include <signal.h>
+
+#define SYSLOG_NAMES /* defines CODE prioritynames[], facilitynames[] */
+#include <syslog.h>
+
+#include <openssl/bio.h>
+#include <openssl/pem.h>
+#include <openssl/err.h>
+#include <openssl/x509.h>
+#include <openssl/x509v3.h>
+#include <openssl/safestack.h>
+#include <openssl/conf.h>
+#include <openssl/rand.h>
+#include <openssl/asn1t.h>
+#include <openssl/cms.h>
+
+#include "defstack.h"
+
+#ifndef FILENAME_MAX
+#define FILENAME_MAX 1024
+#endif
+
+#define SCHEME_RSYNC ("rsync://")
+#define SIZEOF_RSYNC (sizeof(SCHEME_RSYNC) - 1)
+
+/**
+ * Maximum length of an URI.
+ */
+#define URI_MAX (FILENAME_MAX + SIZEOF_RSYNC)
+
+/**
+ * Maximum number of times we try to kill an inferior process before
+ * giving up.
+ */
+#define KILL_MAX 10
+
+#ifndef HOST_NAME_MAX
+#define HOST_NAME_MAX 256
+#endif
+
+/**
+ * Version number of XML summary output.
+ */
+#define XML_SUMMARY_VERSION 1
+
+/**
+ * How much buffer space do we need for a raw address?
+ */
+#define ADDR_RAW_BUF_LEN 16
+
+/**
+ * Logging levels. Same general idea as syslog(), but our own
+ * catagories based on what makes sense for this program. Default
+ * mappings to syslog() priorities are here because it's the easiest
+ * way to make sure that we assign a syslog level to each of ours.
+ */
+
+#define LOG_LEVELS \
+ QQ(log_sys_err, LOG_ERR) /* Error from OS or library */ \
+ QQ(log_usage_err, LOG_ERR) /* Bad usage (local error) */ \
+ QQ(log_data_err, LOG_NOTICE) /* Bad data, no biscuit */ \
+ QQ(log_telemetry, LOG_INFO) /* Normal progress chatter */ \
+ QQ(log_verbose, LOG_INFO) /* Extra chatter */ \
+ QQ(log_debug, LOG_DEBUG) /* Only useful when debugging */
+
+#define QQ(x,y) x ,
+typedef enum log_level { LOG_LEVELS LOG_LEVEL_T_MAX } log_level_t;
+#undef QQ
+
+#define QQ(x,y) { #x , x },
+static const struct {
+ const char *name;
+ log_level_t value;
+} log_levels[] = {
+ LOG_LEVELS
+};
+#undef QQ
+
+/**
+ * MIB counters derived from OpenSSL. Long list of validation failure
+ * codes from OpenSSL (crypto/x509/x509_vfy.h).
+ */
+
+#define MIB_COUNTERS_FROM_OPENSSL \
+ QV(X509_V_ERR_UNABLE_TO_GET_CRL) \
+ QV(X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE) \
+ QV(X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE) \
+ QV(X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY) \
+ QV(X509_V_ERR_CERT_SIGNATURE_FAILURE) \
+ QV(X509_V_ERR_CRL_SIGNATURE_FAILURE) \
+ QV(X509_V_ERR_CERT_NOT_YET_VALID) \
+ QV(X509_V_ERR_CERT_HAS_EXPIRED) \
+ QV(X509_V_ERR_CRL_NOT_YET_VALID) \
+ QV(X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD) \
+ QV(X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD) \
+ QV(X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD) \
+ QV(X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD) \
+ QV(X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) \
+ QV(X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN) \
+ QV(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY) \
+ QV(X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE) \
+ QV(X509_V_ERR_CERT_CHAIN_TOO_LONG) \
+ QV(X509_V_ERR_CERT_REVOKED) \
+ QV(X509_V_ERR_INVALID_CA) \
+ QV(X509_V_ERR_PATH_LENGTH_EXCEEDED) \
+ QV(X509_V_ERR_INVALID_PURPOSE) \
+ QV(X509_V_ERR_CERT_UNTRUSTED) \
+ QV(X509_V_ERR_CERT_REJECTED) \
+ QV(X509_V_ERR_AKID_SKID_MISMATCH) \
+ QV(X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH) \
+ QV(X509_V_ERR_KEYUSAGE_NO_CERTSIGN) \
+ QV(X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER) \
+ QV(X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION) \
+ QV(X509_V_ERR_KEYUSAGE_NO_CRL_SIGN) \
+ QV(X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION) \
+ QV(X509_V_ERR_INVALID_NON_CA) \
+ QV(X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED) \
+ QV(X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE) \
+ QV(X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED) \
+ QV(X509_V_ERR_INVALID_EXTENSION) \
+ QV(X509_V_ERR_INVALID_POLICY_EXTENSION) \
+ QV(X509_V_ERR_NO_EXPLICIT_POLICY) \
+ QV(X509_V_ERR_UNNESTED_RESOURCE)
+
+/**
+ * MIB counters specific to rcynic. "validation_ok" is not used as a
+ * counter, but is used as a validation status code.
+ */
+
+#define MIB_COUNTERS \
+ QG(validation_ok, "OK") \
+ QG(backup_cert_accepted, "Backup certificates accepted") \
+ QB(backup_cert_rejected, "Backup certificates rejected") \
+ QG(backup_crl_accepted, "Backup CRLs accepted") \
+ QB(backup_crl_rejected, "Backup CRLs rejected") \
+ QG(current_cert_accepted, "Current certificates accepted") \
+ QB(current_cert_rejected, "Current certificates rejected") \
+ QG(current_crl_accepted, "Current CRLs accepted") \
+ QB(current_crl_rejected, "Current CRLs rejected") \
+ QG(current_manifest_accepted, "Current Manifests accepted") \
+ QB(current_manifest_rejected, "Current Manifests rejected") \
+ QG(backup_manifest_accepted, "Backup Manifests accepted") \
+ QB(backup_manifest_rejected, "Backup Manifests rejected") \
+ QB(rsync_failed, "rsync transfers failed") \
+ QG(rsync_succeeded, "rsync transfers succeeded") \
+ QB(rsync_timed_out, "rsync transfers timed out") \
+ QW(stale_crl, "Stale CRLs") \
+ QB(malformed_sia, "Malformed SIA extensions") \
+ QB(sia_missing, "SIA extensions missing") \
+ QB(aia_missing, "AIA extensions missing") \
+ QB(crldp_missing, "CRLDP extensions missing") \
+ QB(aia_mismatch, "Mismatched AIA extensions") \
+ QB(unknown_verify_error, "Unknown OpenSSL verify error") \
+ QG(current_cert_recheck, "Certificates rechecked") \
+ QB(manifest_invalid_ee, "Invalid manifest certificates") \
+ QB(manifest_invalid_cms, "Manifest validation failures") \
+ QB(manifest_decode_error, "Manifest decode errors") \
+ QW(stale_manifest, "Stale manifests") \
+ QB(manifest_not_yet_valid, "Manifests not yet valid") \
+ QB(manifest_bad_econtenttype, "Bad manifest eContentType") \
+ QB(manifest_missing_signer, "Missing manifest signers") \
+ QB(manifest_missing_crldp, "Missing manifest CRLDP") \
+ QB(manifest_malformed_crldp, "Malformed manifest CRLDP") \
+ QB(certificate_digest_mismatch, "Certificate digest mismatches") \
+ QB(crl_digest_mismatch, "CRL digest mismatches") \
+ QB(crl_not_in_manifest, "CRL not listed in manifest") \
+ QB(roa_invalid_ee, "Invalid ROA certificates") \
+ QB(roa_invalid_cms, "ROA validation failures") \
+ QB(roa_decode_error, "ROA decode errors") \
+ QB(roa_bad_econtenttype, "Bad ROA eContentType") \
+ QB(roa_missing_signer, "Missing ROA signers") \
+ QB(roa_digest_mismatch, "ROA digest mismatches") \
+ QG(current_roa_accepted, "Current ROAs accepted") \
+ QB(current_roa_rejected, "Current ROAs rejected") \
+ QG(backup_roa_accepted, "Backup ROAs accepted") \
+ QB(backup_roa_rejected, "Backup ROAs rejected") \
+ QB(malformed_roa_addressfamily, "Malformed ROA addressFamilys") \
+ QB(manifest_wrong_version, "Wrong manifest versions") \
+ QB(roa_wrong_version, "Wrong ROA versions") \
+ QW(trust_anchor_not_self_signed, "Trust anchor not self-signed") \
+ QB(uri_too_long, "URI too long") \
+ QB(malformed_crldp, "Malformed CRDLP extension") \
+ QB(certificate_bad_signature, "Bad certificate signature") \
+ QB(certificate_bad_crl, "Bad certificate CRL") \
+ QB(manifest_bad_crl, "Manifest has bad CRL") \
+ QB(roa_resources_malformed, "ROA resources malformed") \
+ QB(roa_bad_afi, "ROA contains bad AFI value") \
+ QB(roa_not_nested, "ROA resources not in EE") \
+ QB(roa_bad_crl, "ROA EE has bad CRL") \
+ QB(ghostbuster_digest_mismatch, "Ghostbuster digest mismatches") \
+ QB(ghostbuster_bad_econtenttype, "Bad Ghostbuster eContentType") \
+ QB(ghostbuster_invalid_cms, "Ghostbuster validation failures") \
+ QB(ghostbuster_missing_signer, "Missing Ghostbuster signers") \
+ QB(ghostbuster_bad_crl, "Ghostbuster EE has bad CRL") \
+ QB(ghostbuster_invalid_ee, "Invalid Ghostbuster certificates") \
+ QG(current_ghostbuster_accepted, "Current Ghostbusters accepted") \
+ QB(current_ghostbuster_rejected, "Current Ghostbusters rejected") \
+ QG(backup_ghostbuster_accepted, "Backup Ghostbusters accepted") \
+ QB(backup_ghostbuster_rejected, "Backup Ghostbusters rejected") \
+ QB(disallowed_extension, "Disallowed X.509v3 extension") \
+ QB(crldp_mismatch, "CRLDP doesn't match issuer's SIA") \
+ QB(manifest_missing, "Manifest pointer missing") \
+ QB(manifest_mismatch, "Manifest doesn't match SIA") \
+ QB(trust_anchor_with_crldp, "Trust anchor can't have CRLDP") \
+ QW(object_not_in_manifest, "Object not in manifest") \
+ MIB_COUNTERS_FROM_OPENSSL
+
+#define QV(x) QB(mib_openssl_##x, 0)
+
+static const char
+ mib_counter_kind_good[] = "good",
+ mib_counter_kind_warn[] = "warn",
+ mib_counter_kind_bad[] = "bad";
+
+#define QG(x,y) mib_counter_kind_good ,
+#define QW(x,y) mib_counter_kind_warn ,
+#define QB(x,y) mib_counter_kind_bad ,
+static const char * const mib_counter_kind[] = { MIB_COUNTERS NULL };
+#undef QB
+#undef QW
+#undef QG
+
+#define QG(x,y) QQ(x,y)
+#define QW(x,y) QQ(x,y)
+#define QB(x,y) QQ(x,y)
+
+#define QQ(x,y) x ,
+typedef enum mib_counter { MIB_COUNTERS MIB_COUNTER_T_MAX } mib_counter_t;
+#undef QQ
+
+#define QQ(x,y) y ,
+static const char * const mib_counter_desc[] = { MIB_COUNTERS NULL };
+#undef QQ
+
+#define QQ(x,y) #x ,
+static const char * const mib_counter_label[] = { MIB_COUNTERS NULL };
+#undef QQ
+
+#undef QV
+
+#define QQ(x,y) 0 ,
+#define QV(x) x ,
+static const long mib_counter_openssl[] = { MIB_COUNTERS 0 };
+#undef QV
+#undef QQ
+
+/**
+ * Per-host MIB counter object.
+ * hostname[] must be first element.
+ */
+typedef struct host_mib_counter {
+ char hostname[URI_MAX];
+ unsigned long counters[MIB_COUNTER_T_MAX];
+} HOST_MIB_COUNTER;
+
+DECLARE_STACK_OF(HOST_MIB_COUNTER)
+
+/**
+ * Per-URI validation status object.
+ */
+typedef struct validation_status {
+ char uri[URI_MAX];
+ time_t timestamp;
+ mib_counter_t code;
+} VALIDATION_STATUS;
+
+DECLARE_STACK_OF(VALIDATION_STATUS)
+
+/**
+ * Structure to hold data parsed out of a certificate.
+ */
+typedef struct certinfo {
+ int ca, ta;
+ char uri[URI_MAX], sia[URI_MAX], aia[URI_MAX], crldp[URI_MAX], manifest[URI_MAX];
+} certinfo_t;
+
+/**
+ * Program context that would otherwise be a mess of global variables.
+ */
+typedef struct rcynic_ctx {
+ char *authenticated, *old_authenticated, *unauthenticated;
+ char *jane, *rsync_program;
+ STACK_OF(OPENSSL_STRING) *rsync_cache, *backup_cache, *stale_cache;
+ STACK_OF(HOST_MIB_COUNTER) *host_counters;
+ STACK_OF(VALIDATION_STATUS) *validation_status;
+ int indent, use_syslog, allow_stale_crl, allow_stale_manifest, use_links;
+ int require_crl_in_manifest, rsync_timeout, priority[LOG_LEVEL_T_MAX];
+ int allow_non_self_signed_trust_anchor, allow_object_not_in_manifest;
+ log_level_t log_level;
+ X509_STORE *x509_store;
+} rcynic_ctx_t;
+
+/**
+ * Extended context for verify callbacks. This is a wrapper around
+ * OpenSSL's X509_STORE_CTX, and the embedded X509_STORE_CTX @em must be
+ * the first element of this structure in order for the evil cast to
+ * do the right thing. This is ugly but safe, as the C language
+ * promises us that the address of the first element of a structure is
+ * the same as the address of the structure.
+ */
+typedef struct rcynic_x509_store_ctx {
+ X509_STORE_CTX ctx; /* Must be first */
+ const rcynic_ctx_t *rc;
+ const certinfo_t *subject;
+} rcynic_x509_store_ctx_t;
+
+/**
+ * Subversion ID data.
+ */
+static const char svn_id[] = "$Id$";
+
+/*
+ * ASN.1 Object identifiers in form suitable for use with oid_cmp()
+ */
+
+/** 1.3.6.1.5.5.7.48.2 */
+static const unsigned char id_ad_caIssuers[] =
+ {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x2};
+
+/** 1.3.6.1.5.5.7.48.5 */
+static const unsigned char id_ad_caRepository[] =
+ {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x5};
+
+/** 1.3.6.1.5.5.7.48.10 */
+static const unsigned char id_ad_rpkiManifest[] =
+ {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0xa};
+
+/** 1.2.840.113549.1.9.16.1.24 */
+static const unsigned char id_ct_routeOriginAttestation[] =
+ {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x18};
+
+/** 1.2.840.113549.1.9.16.1.26 */
+static const unsigned char id_ct_rpkiManifest[] =
+ {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x1a};
+
+/** 1.2.840.113549.1.9.16.1.35 */
+static const unsigned char id_ct_rpkiGhostbusters[] =
+ {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x23};
+
+/** 2.16.840.1.101.3.4.2.1 */
+static const unsigned char id_sha256[] =
+ {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01};
+
+/**
+ * RPKI certificate policy OID in form suitable for use with
+ * X509_VERIFY_PARAM_add0_policy().
+ */
+static const char rpki_policy_oid[] = "1.3.6.1.5.5.7.14.2";
+
+
+
+/**
+ * Type-safe wrapper around free() to keep safestack macros happy.
+ */
+static void OPENSSL_STRING_free(OPENSSL_STRING s)
+{
+ if (s)
+ free(s);
+}
+
+/**
+ * Wrapper around an idiom we use with OPENSSL_STRING stacks. There's
+ * a bug in the current sk_OPENSSL_STRING_delete() macro that casts
+ * the return value to the wrong type, so we cast it to something
+ * innocuous here and avoid using that macro elsewhere.
+ */
+static void sk_OPENSSL_STRING_remove(STACK_OF(OPENSSL_STRING) *sk, const char *str)
+{
+ OPENSSL_STRING_free((void *) sk_OPENSSL_STRING_delete(sk, sk_OPENSSL_STRING_find(sk, str)));
+}
+
+/**
+ * Allocate a new HOST_MIB_COUNTER object.
+ */
+static HOST_MIB_COUNTER *HOST_MIB_COUNTER_new(void)
+{
+ HOST_MIB_COUNTER *h = malloc(sizeof(*h));
+ if (h)
+ memset(h, 0, sizeof(*h));
+ return h;
+}
+
+/**
+ * Allocate a new VALIDATION_STATUS object.
+ */
+static VALIDATION_STATUS *VALIDATION_STATUS_new(void)
+{
+ VALIDATION_STATUS *v = malloc(sizeof(*v));
+ if (v)
+ memset(v, 0, sizeof(*v));
+ return v;
+}
+
+/**
+ * Type-safe wrapper around free() to keep safestack macros happy.
+ */
+static void HOST_MIB_COUNTER_free(HOST_MIB_COUNTER *h)
+{
+ if (h)
+ free(h);
+}
+
+/**
+ * Type-safe wrapper around free() to keep safestack macros happy.
+ */
+static void VALIDATION_STATUS_free(VALIDATION_STATUS *v)
+{
+ if (v)
+ free(v);
+}
+
+
+
+/*
+ * ASN.1 templates. Not sure that ASN1_EXP_OPT() is the right macro
+ * for these defaulted "version" fields, but it's what the examples
+ * for this construction use. Probably doesn't matter since this
+ * program only decodes manifests, never encodes them.
+ *
+ * Putting this section under conditional compilation is a hack to
+ * keep Doxygen's parser from becoming hopelessly confused by the
+ * weird OpenSSL ASN.1 macros. Someday perhaps I'll have time to
+ * track down the problem in Doxygen's parser, but this works for now.
+ */
+
+#ifndef DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION
+
+typedef struct FileAndHash_st {
+ ASN1_IA5STRING *file;
+ ASN1_BIT_STRING *hash;
+} FileAndHash;
+
+DECLARE_STACK_OF(FileAndHash)
+
+ASN1_SEQUENCE(FileAndHash) = {
+ ASN1_SIMPLE(FileAndHash, file, ASN1_IA5STRING),
+ ASN1_SIMPLE(FileAndHash, hash, ASN1_BIT_STRING)
+} ASN1_SEQUENCE_END(FileAndHash)
+
+typedef struct Manifest_st {
+ ASN1_INTEGER *version, *manifestNumber;
+ ASN1_GENERALIZEDTIME *thisUpdate, *nextUpdate;
+ ASN1_OBJECT *fileHashAlg;
+ STACK_OF(FileAndHash) *fileList;
+} Manifest;
+
+ASN1_SEQUENCE(Manifest) = {
+ ASN1_EXP_OPT(Manifest, version, ASN1_INTEGER, 0),
+ ASN1_SIMPLE(Manifest, manifestNumber, ASN1_INTEGER),
+ ASN1_SIMPLE(Manifest, thisUpdate, ASN1_GENERALIZEDTIME),
+ ASN1_SIMPLE(Manifest, nextUpdate, ASN1_GENERALIZEDTIME),
+ ASN1_SIMPLE(Manifest, fileHashAlg, ASN1_OBJECT),
+ ASN1_SEQUENCE_OF(Manifest, fileList, FileAndHash)
+} ASN1_SEQUENCE_END(Manifest)
+
+DECLARE_ASN1_FUNCTIONS(FileAndHash)
+DECLARE_ASN1_FUNCTIONS(Manifest)
+
+IMPLEMENT_ASN1_FUNCTIONS(FileAndHash)
+IMPLEMENT_ASN1_FUNCTIONS(Manifest)
+
+typedef struct ROAIPAddress_st {
+ ASN1_BIT_STRING *IPAddress;
+ ASN1_INTEGER *maxLength;
+} ROAIPAddress;
+
+DECLARE_STACK_OF(ROAIPAddress)
+
+ASN1_SEQUENCE(ROAIPAddress) = {
+ ASN1_SIMPLE(ROAIPAddress, IPAddress, ASN1_BIT_STRING),
+ ASN1_OPT(ROAIPAddress, maxLength, ASN1_INTEGER)
+} ASN1_SEQUENCE_END(ROAIPAddress)
+
+typedef struct ROAIPAddressFamily_st {
+ ASN1_OCTET_STRING *addressFamily;
+ STACK_OF(ROAIPAddress) *addresses;
+} ROAIPAddressFamily;
+
+DECLARE_STACK_OF(ROAIPAddressFamily)
+
+ASN1_SEQUENCE(ROAIPAddressFamily) = {
+ ASN1_SIMPLE(ROAIPAddressFamily, addressFamily, ASN1_OCTET_STRING),
+ ASN1_SEQUENCE_OF(ROAIPAddressFamily, addresses, ROAIPAddress)
+} ASN1_SEQUENCE_END(ROAIPAddressFamily)
+
+typedef struct ROA_st {
+ ASN1_INTEGER *version, *asID;
+ STACK_OF(ROAIPAddressFamily) *ipAddrBlocks;
+} ROA;
+
+ASN1_SEQUENCE(ROA) = {
+ ASN1_EXP_OPT(ROA, version, ASN1_INTEGER, 0),
+ ASN1_SIMPLE(ROA, asID, ASN1_INTEGER),
+ ASN1_SEQUENCE_OF(ROA, ipAddrBlocks, ROAIPAddressFamily)
+} ASN1_SEQUENCE_END(ROA)
+
+DECLARE_ASN1_FUNCTIONS(ROAIPAddress)
+DECLARE_ASN1_FUNCTIONS(ROAIPAddressFamily)
+DECLARE_ASN1_FUNCTIONS(ROA)
+
+IMPLEMENT_ASN1_FUNCTIONS(ROAIPAddress)
+IMPLEMENT_ASN1_FUNCTIONS(ROAIPAddressFamily)
+IMPLEMENT_ASN1_FUNCTIONS(ROA)
+
+#endif /* DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION */
+
+
+
+/**
+ * Logging.
+ */
+static void vlogmsg(const rcynic_ctx_t *rc,
+ const log_level_t level,
+ const char *fmt,
+ va_list ap)
+{
+ char tad[sizeof("00:00:00")+1];
+ time_t tad_time;
+
+ assert(rc && fmt);
+
+ if (rc->log_level < level)
+ return;
+
+ if (rc->use_syslog) {
+ vsyslog(rc->priority[level], fmt, ap);
+ } else {
+ time(&tad_time);
+ strftime(tad, sizeof(tad), "%H:%M:%S", localtime(&tad_time));
+ fprintf(stderr, "%s: ", tad);
+ if (rc->jane)
+ fprintf(stderr, "%s: ", rc->jane);
+ if (rc->indent)
+ fprintf(stderr, "%*s", rc->indent, " ");
+ vfprintf(stderr, fmt, ap);
+ putc('\n', stderr);
+ }
+}
+
+/**
+ * Logging.
+ */
+static void logmsg(const rcynic_ctx_t *rc,
+ const log_level_t level,
+ const char *fmt, ...)
+{
+ va_list ap;
+ va_start(ap, fmt);
+ vlogmsg(rc, level, fmt, ap);
+ va_end(ap);
+}
+
+/**
+ * Print OpenSSL library errors.
+ */
+static void log_openssl_errors(const rcynic_ctx_t *rc)
+{
+ const char *data, *file;
+ unsigned long code;
+ char error[256];
+ int flags, line;
+
+ if (!rc->log_level < log_verbose)
+ return;
+
+ while ((code = ERR_get_error_line_data(&file, &line, &data, &flags))) {
+ ERR_error_string_n(code, error, sizeof(error));
+ if (data && (flags & ERR_TXT_STRING))
+ logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s", file, line, error, data);
+ else
+ logmsg(rc, log_sys_err, "OpenSSL error %s:%d", file, line, error);
+ }
+}
+
+/**
+ * Configure logging.
+ */
+static int configure_logmsg(rcynic_ctx_t *rc, const char *name)
+{
+ int i;
+
+ assert(rc && name);
+
+ for (i = 0; i < sizeof(log_levels)/sizeof(*log_levels); i++) {
+ if (!strcmp(name, log_levels[i].name)) {
+ rc->log_level = log_levels[i].value;
+ return 1;
+ }
+ }
+
+ logmsg(rc, log_usage_err, "Bad log level %s", name);
+ return 0;
+}
+
+/**
+ * Configure syslog.
+ */
+static int configure_syslog(const rcynic_ctx_t *rc,
+ int *result,
+ const CODE *table,
+ const char *name)
+{
+ assert(result && table && name);
+
+ while (table->c_name && strcmp(table->c_name, name))
+ table++;
+
+ if (table->c_name) {
+ *result = table->c_val;
+ return 1;
+ } else {
+ logmsg(rc, log_usage_err, "Bad syslog code %s", name);
+ return 0;
+ }
+}
+
+/**
+ * Configure boolean variable.
+ */
+static int configure_boolean(const rcynic_ctx_t *rc,
+ int *result,
+ const char *val)
+{
+ assert(rc && result && val);
+
+ switch (*val) {
+ case 'y': case 'Y': case 't': case 'T': case '1':
+ *result = 1;
+ return 1;
+ case 'n': case 'N': case 'f': case 'F': case '0':
+ *result = 0;
+ return 1;
+ default:
+ logmsg(rc, log_usage_err, "Bad boolean value %s", val);
+ return 0;
+ }
+}
+
+/**
+ * Configure integer variable.
+ */
+static int configure_integer(const rcynic_ctx_t *rc,
+ int *result,
+ const char *val)
+{
+ long res;
+ char *p;
+
+ assert(rc && result && val);
+
+ res = strtol(val, &p, 10);
+
+ if (*val != '\0' && *p == '\0') {
+ *result = (int) res;
+ return 1;
+ } else {
+ logmsg(rc, log_usage_err, "Bad integer value %s", val);
+ return 0;
+ }
+}
+
+
+
+/**
+ * Make a directory if it doesn't already exist.
+ */
+static int mkdir_maybe(const rcynic_ctx_t *rc, const char *name)
+{
+ char *b, buffer[FILENAME_MAX];
+
+ assert(name != NULL);
+ if (strlen(name) >= sizeof(buffer)) {
+ logmsg(rc, log_data_err, "Pathname %s too long", name);
+ return 0;
+ }
+ strcpy(buffer, name);
+ b = buffer[0] == '/' ? buffer + 1 : buffer;
+ if ((b = strrchr(b, '/')) == NULL)
+ return 1;
+ *b = '\0';
+ if (!mkdir_maybe(rc, buffer)) {
+ logmsg(rc, log_sys_err, "Failed to make directory %s", buffer);
+ return 0;
+ }
+ if (!access(buffer, F_OK))
+ return 1;
+ logmsg(rc, log_verbose, "Creating directory %s", buffer);
+ return mkdir(buffer, 0777) == 0;
+}
+
+/**
+ * strdup() a string and push it onto a stack.
+ */
+static int sk_OPENSSL_STRING_push_strdup(STACK_OF(OPENSSL_STRING) *sk, const char *str)
+{
+ OPENSSL_STRING s = strdup(str);
+
+ if (s && sk_OPENSSL_STRING_push(sk, s))
+ return 1;
+ if (s)
+ free(s);
+ return 0;
+}
+
+/**
+ * Compare two URI strings, for OpenSSL STACK operations.
+ */
+
+static int uri_cmp(const char * const *a, const char * const *b)
+{
+ return strcmp(*a, *b);
+}
+
+/**
+ * Is string an rsync URI?
+ */
+static int is_rsync(const char *uri)
+{
+ return uri && !strncmp(uri, SCHEME_RSYNC, SIZEOF_RSYNC);
+}
+
+/**
+ * Convert an rsync URI to a filename, checking for evil character
+ * sequences. NB: This routine can't call mib_increment(), because
+ * mib_increment() calls it, so errors detected here only go into
+ * the log, not the MIB.
+ */
+static int uri_to_filename(const rcynic_ctx_t *rc,
+ const char *uri,
+ char *buffer,
+ const size_t buflen,
+ const char *prefix)
+{
+ const char *u;
+ size_t n;
+
+ buffer[0] = '\0';
+
+ if (!is_rsync(uri)) {
+ logmsg(rc, log_telemetry, "%s is not an rsync URI, not converting to filename", uri);
+ return 0;
+ }
+
+ u = uri + SIZEOF_RSYNC;
+ n = strlen(u);
+
+ if (u[0] == '/' || u[0] == '.' || strstr(u, "/../") ||
+ (n >= 3 && !strcmp(u + n - 3, "/.."))) {
+ logmsg(rc, log_data_err, "Dangerous URI %s, not converting to filename", uri);
+ return 0;
+ }
+
+ if (prefix)
+ n += strlen(prefix);
+
+ if (n >= buflen) {
+ logmsg(rc, log_data_err, "URI %s too long, not converting to filename", uri);
+ return 0;
+ }
+
+ if (prefix) {
+ strcpy(buffer, prefix);
+ strcat(buffer, u);
+ } else {
+ strcpy(buffer, u);
+ }
+
+ return 1;
+}
+
+/**
+ * OID comparison.
+ */
+static int oid_cmp(const ASN1_OBJECT *obj, const unsigned char *oid, const size_t oidlen)
+{
+ assert(obj != NULL && oid != NULL);
+ if (obj->length != oidlen)
+ return obj->length - oidlen;
+ else
+ return memcmp(obj->data, oid, oidlen);
+}
+
+/**
+ * Host MIB counter comparision.
+ */
+static int host_mib_counter_cmp(const HOST_MIB_COUNTER * const *a, const HOST_MIB_COUNTER * const *b)
+{
+ return strcasecmp((*a)->hostname, (*b)->hostname);
+}
+
+/**
+ * MIB counter manipulation.
+ */
+static void mib_increment(const rcynic_ctx_t *rc,
+ const char *uri,
+ const mib_counter_t counter)
+{
+ HOST_MIB_COUNTER *h = NULL, hn;
+ char *s;
+
+ assert(rc && uri && strlen(uri) < URI_MAX);
+
+ if (!rc->host_counters)
+ return;
+
+ memset(&hn, 0, sizeof(hn));
+
+ if (!uri_to_filename(rc, uri, hn.hostname, sizeof(hn.hostname), NULL)) {
+ logmsg(rc, log_data_err, "Couldn't convert URI %s to hostname", uri);
+ return;
+ }
+
+ if ((s = strchr(hn.hostname, '/')) != NULL)
+ *s = '\0';
+
+ h = sk_HOST_MIB_COUNTER_value(rc->host_counters,
+ sk_HOST_MIB_COUNTER_find(rc->host_counters,
+ &hn));
+ if (!h) {
+ if ((h = HOST_MIB_COUNTER_new()) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate MIB counters for %s", uri);
+ return;
+ }
+ strcpy(h->hostname, hn.hostname);
+ if (!sk_HOST_MIB_COUNTER_push(rc->host_counters, h)) {
+ logmsg(rc, log_sys_err, "Couldn't store MIB counters for %s", uri);
+ free(h);
+ return;
+ }
+ }
+
+ h->counters[counter]++;
+}
+
+/**
+ * Add a validation status entry to internal log.
+ */
+static void log_validation_status(const rcynic_ctx_t *rc,
+ const char *uri,
+ const mib_counter_t code)
+{
+ VALIDATION_STATUS *v = NULL;
+
+ assert(rc && uri && strlen(uri) < URI_MAX);
+
+ if (!rc->validation_status)
+ return;
+
+ if ((v = VALIDATION_STATUS_new()) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate validation status entry for %s", uri);
+ goto punt;
+ }
+
+ strcpy(v->uri, uri);
+ v->timestamp = time(0);
+ v->code = code;
+
+ if (!sk_VALIDATION_STATUS_push(rc->validation_status, v)) {
+ logmsg(rc, log_sys_err, "Couldn't store validation status entry for %s", uri);
+ goto punt;
+ }
+
+ v = NULL;
+
+ punt:
+ if (v)
+ free(v);
+}
+
+/**
+ * Reject an object.
+ */
+static void reject(const rcynic_ctx_t *rc,
+ const char *uri,
+ const mib_counter_t code,
+ const char *fmt, ...)
+{
+ char format[URI_MAX * 2];
+ va_list ap;
+
+ assert(fmt && strlen(fmt) + sizeof("Rejected %s") < sizeof(format));
+ snprintf(format, sizeof(format), "Rejected %s %s", uri, fmt);
+ log_validation_status(rc, uri, code);
+ va_start(ap, fmt);
+ vlogmsg(rc, log_data_err, format, ap);
+ va_end(ap);
+}
+
+/**
+ * Copy a file
+ */
+static int cp(const char *source, const char *target)
+{
+ FILE *in = NULL, *out = NULL;
+ int c, ret = 0;
+
+ if ((in = fopen(source, "rb")) == NULL ||
+ (out = fopen(target, "wb")) == NULL)
+ goto done;
+
+ while ((c = getc(in)) != EOF)
+ if (putc(c, out) == EOF)
+ goto done;
+
+ ret = 1;
+
+ done:
+ ret &= !(in != NULL && fclose(in) == EOF);
+ ret &= !(out != NULL && fclose(out) == EOF);
+ return ret;
+}
+
+/**
+ * Link a file
+ */
+static int ln(const char *source, const char *target)
+{
+ unlink(target);
+ return link(source, target) == 0;
+}
+
+/**
+ * Install an object. It'd be nice if we could just use link(), but
+ * that would require us to trust rsync never to do anything bad. For
+ * now we just copy in the simplest way possible. Come back to this
+ * if profiling shows a hotspot here.
+ *
+ * Well, ok, profiling didn't show an issue, but inode exhaustion did.
+ * So we now make copy vs link a configuration choice.
+ */
+static int install_object(const rcynic_ctx_t *rc,
+ const char *uri,
+ const char *source)
+{
+ char target[FILENAME_MAX];
+
+ if (!uri_to_filename(rc, uri, target, sizeof(target), rc->authenticated)) {
+ logmsg(rc, log_data_err, "Couldn't generate installation name for %s", uri);
+ return 0;
+ }
+
+ if (!mkdir_maybe(rc, target)) {
+ logmsg(rc, log_sys_err, "Couldn't create directory for %s", target);
+ return 0;
+ }
+
+ if (rc->use_links ? !ln(source, target) : !cp(source, target)) {
+ logmsg(rc, log_sys_err, "Couldn't %s %s to %s",
+ (rc->use_links ? "link" : "copy"), source, target);
+ return 0;
+ }
+ log_validation_status(rc, uri, validation_ok);
+ logmsg(rc, log_telemetry, "Accepted %s", uri);
+ return 1;
+}
+
+/**
+ * Check str for a trailing suffix.
+ */
+static int endswith(const char *str, const char *suffix)
+{
+ size_t len_str, len_suffix;
+ assert(str != NULL && suffix != NULL);
+ len_str = strlen(str);
+ len_suffix = strlen(suffix);
+ return len_str >= len_suffix && !strcmp(str + len_str - len_suffix, suffix);
+}
+
+/**
+ * Check str for a prefix.
+ */
+static int startswith(const char *str, const char *prefix)
+{
+ size_t len_str, len_prefix;
+ assert(str != NULL && prefix != NULL);
+ len_str = strlen(str);
+ len_prefix = strlen(prefix);
+ return len_str >= len_prefix && !strncmp(str, prefix, len_prefix);
+}
+
+
+/**
+ * Set a directory name, making sure it has the trailing slash we
+ * require in various other routines.
+ */
+static void set_directory(char **out, const char *in)
+{
+ int need_slash;
+ size_t n;
+ char *s;
+
+ assert(in && out);
+ n = strlen(in);
+ assert(n > 0);
+ need_slash = in[n - 1] != '/';
+ s = malloc(n + need_slash + 1);
+ assert(s != NULL);
+ strcpy(s, in);
+ if (need_slash)
+ strcat(s, "/");
+ if (*out)
+ free(*out);
+ *out = s;
+}
+
+/**
+ * Remove a directory tree, like rm -rf.
+ */
+static int rm_rf(const char *name)
+{
+ char path[FILENAME_MAX];
+ struct dirent *d;
+ size_t len;
+ DIR *dir;
+ int ret = 0, need_slash;
+
+ assert(name);
+ len = strlen(name);
+ assert(len > 0 && len < sizeof(path));
+ need_slash = name[len - 1] != '/';
+
+ if (rmdir(name) == 0)
+ return 1;
+
+ switch (errno) {
+ case ENOENT:
+ return 1;
+ case ENOTEMPTY:
+ break;
+ default:
+ return 0;
+ }
+
+ if ((dir = opendir(name)) == NULL)
+ return 0;
+
+ while ((d = readdir(dir)) != NULL) {
+ if (d->d_name[0] == '.' && (d->d_name[1] == '\0' || (d->d_name[1] == '.' && d->d_name[2] == '\0')))
+ continue;
+ if (len + strlen(d->d_name) + need_slash >= sizeof(path))
+ goto done;
+ strcpy(path, name);
+ if (need_slash)
+ strcat(path, "/");
+ strcat(path, d->d_name);
+ switch (d->d_type) {
+ case DT_DIR:
+ if (!rm_rf(path))
+ goto done;
+ continue;
+ default:
+ if (unlink(path) < 0)
+ goto done;
+ continue;
+ }
+ }
+
+ ret = rmdir(name) == 0;
+
+ done:
+ closedir(dir);
+ return ret;
+}
+
+
+
+/**
+ * Maintain a cache of URIs we've already fetched.
+ */
+static int rsync_cached_string(const rcynic_ctx_t *rc,
+ const char *string)
+{
+ char *s, buffer[URI_MAX];
+
+ assert(rc && rc->rsync_cache && strlen(string) < sizeof(buffer));
+ strcpy(buffer, string);
+ if ((s = strrchr(buffer, '/')) != NULL && s[1] == '\0')
+ *s = '\0';
+ while (sk_OPENSSL_STRING_find(rc->rsync_cache, buffer) < 0) {
+ if ((s = strrchr(buffer, '/')) == NULL)
+ return 0;
+ *s = '\0';
+ }
+ return 1;
+}
+
+/**
+ * Check whether a particular URI has been cached.
+ */
+static int rsync_cached_uri(const rcynic_ctx_t *rc,
+ const char *uri)
+{
+ return is_rsync(uri) && rsync_cached_string(rc, uri + SIZEOF_RSYNC);
+}
+
+
+/**
+ * Run rsync. This is fairly nasty, because we need to:
+ *
+ * @li Construct the argument list for rsync;
+ *
+ * @li Run rsync in a child process;
+ *
+ * @li Sit listening to rsync's output, logging whatever we get;
+ *
+ * @li Impose an optional time limit on rsync's execution time
+ *
+ * @li Clean up from (b), (c), and (d); and
+ *
+ * @li Keep track of which URIs we've already fetched, so we don't
+ * have to do it again.
+ *
+ * Taken all together, this is pretty icky. Breaking it into separate
+ * functions wouldn't help much. Don't read this on a full stomach.
+ */
+static int rsync(const rcynic_ctx_t *rc,
+ const char * const *args,
+ const char *uri)
+{
+ static const char *rsync_cmd[] = {
+ "rsync", "--update", "--times", "--copy-links", "--itemize-changes", NULL
+ };
+
+ const char *argv[100];
+ char *s, *b, buffer[URI_MAX * 4], path[FILENAME_MAX];
+ int i, n, ret, pipe_fds[2], argc = 0, pid_status = -1;
+ time_t now, deadline;
+ struct timeval tv;
+ pid_t pid, wpid;
+ fd_set rfds;
+
+ assert(rc && uri);
+
+ memset(argv, 0, sizeof(argv));
+
+ for (i = 0; rsync_cmd[i]; i++) {
+ assert(argc < sizeof(argv)/sizeof(*argv));
+ argv[argc++] = rsync_cmd[i];
+ }
+ if (args) {
+ for (i = 0; args[i]; i++) {
+ assert(argc < sizeof(argv)/sizeof(*argv));
+ argv[argc++] = args[i];
+ }
+ }
+
+ if (rc->rsync_program)
+ argv[0] = rc->rsync_program;
+
+ if (!uri_to_filename(rc, uri, path, sizeof(path), rc->unauthenticated)) {
+ logmsg(rc, log_data_err, "Couldn't extract filename from URI: %s", uri);
+ return 0;
+ }
+
+ assert(argc < sizeof(argv)/sizeof(*argv));
+ argv[argc++] = uri;
+
+ assert(argc < sizeof(argv)/sizeof(*argv));
+ argv[argc++] = path;
+
+ assert(strlen(uri) > SIZEOF_RSYNC);
+ if (rsync_cached_uri(rc, uri)) {
+ logmsg(rc, log_verbose, "rsync cache hit for %s", uri);
+ return 1;
+ }
+
+ if (!mkdir_maybe(rc, path)) {
+ logmsg(rc, log_sys_err, "Couldn't make target directory: %s", path);
+ return 0;
+ }
+
+ logmsg(rc, log_telemetry, "Fetching %s", uri);
+
+ for (i = 0; i < argc; i++)
+ logmsg(rc, log_verbose, "rsync argv[%d]: %s", i, argv[i]);
+
+ if (pipe(pipe_fds) < 0) {
+ logmsg(rc, log_sys_err, "pipe() failed: %s", strerror(errno));
+ return 0;
+ }
+
+ if ((i = fcntl(pipe_fds[0], F_GETFL, 0)) == -1 ||
+ fcntl(pipe_fds[0], F_SETFL, i | O_NONBLOCK) == -1) {
+ logmsg(rc, log_sys_err,
+ "Couldn't set rsync's output stream non-blocking: %s",
+ strerror(errno));
+ close(pipe_fds[0]);
+ close(pipe_fds[1]);
+ return 0;
+ }
+
+ switch ((pid = vfork())) {
+ case -1:
+ logmsg(rc, log_sys_err, "vfork() failed: %s", strerror(errno));
+ close(pipe_fds[0]);
+ close(pipe_fds[1]);
+ return 0;
+ case 0:
+#define whine(msg) write(2, msg, sizeof(msg) - 1)
+ close(pipe_fds[0]);
+ if (dup2(pipe_fds[1], 1) < 0)
+ whine("dup2(1) failed\n");
+ else if (dup2(pipe_fds[1], 2) < 0)
+ whine("dup2(2) failed\n");
+ else if (execvp(argv[0], (char * const *) argv) < 0)
+ whine("execvp() failed\n");
+ whine("last system error: ");
+ write(2, strerror(errno), strlen(strerror(errno)));
+ whine("\n");
+ _exit(1);
+#undef whine
+ }
+
+ close(pipe_fds[1]);
+
+ now = time(0);
+ deadline = now + rc->rsync_timeout;
+
+ n = -1;
+ i = 0;
+ while ((wpid = waitpid(pid, &pid_status, WNOHANG)) == 0 &&
+ (!rc->rsync_timeout || (now = time(0)) < deadline)) {
+ FD_ZERO(&rfds);
+ FD_SET(pipe_fds[0], &rfds);
+ if (rc->rsync_timeout) {
+ tv.tv_sec = deadline - now;
+ tv.tv_usec = 0;
+ n = select(pipe_fds[0] + 1, &rfds, NULL, NULL, &tv);
+ } else {
+ n = select(pipe_fds[0] + 1, &rfds, NULL, NULL, NULL);
+ }
+ if (n == 0 || (n < 0 && errno == EINTR))
+ continue;
+ if (n < 0)
+ break;
+ while ((n = read(pipe_fds[0], buffer + i, sizeof(buffer) - i - 1)) > 0) {
+ n += i;
+ assert(n < sizeof(buffer));
+ buffer[n] = '\0';
+ for (b = buffer; (s = strchr(b, '\n')) != NULL; b = s) {
+ *s++ = '\0';
+ logmsg(rc, log_telemetry, "%s", b);
+ }
+ i = strlen(b);
+ assert(i < sizeof(buffer) && b + i < buffer + sizeof(buffer));
+ if (b == buffer && i == sizeof(buffer) - 1) {
+ logmsg(rc, log_telemetry, "%s\\", b);
+ i = 0;
+ }
+ if (i > 0) {
+ memmove(buffer, b, i);
+ }
+ }
+ if (n == 0 || (n < 0 && errno != EAGAIN))
+ break;
+ }
+
+ close(pipe_fds[0]);
+
+ assert(i >= 0 && i < sizeof(buffer));
+ if (i) {
+ buffer[i] = '\0';
+ logmsg(rc, log_telemetry, "%s", buffer);
+ }
+
+ if (n < 0 && errno != EAGAIN)
+ logmsg(rc, log_sys_err, "Problem reading rsync's output: %s",
+ strerror(errno));
+
+ if (rc->rsync_timeout && now >= deadline)
+ logmsg(rc, log_data_err,
+ "Fetch of %s took longer than %d seconds, terminating fetch",
+ uri, rc->rsync_timeout);
+
+ assert(pid > 0);
+ for (i = 0; i < KILL_MAX && wpid == 0; i++) {
+ if ((wpid = waitpid(pid, &pid_status, 0)) != 0 && WIFEXITED(pid_status))
+ break;
+ kill(pid, SIGTERM);
+ }
+
+ if (WEXITSTATUS(pid_status)) {
+ logmsg(rc, log_data_err, "rsync exited with status %d fetching %s",
+ WEXITSTATUS(pid_status), uri);
+ ret = 0;
+ mib_increment(rc, uri, (rc->rsync_timeout && now >= deadline
+ ? rsync_timed_out
+ : rsync_failed));
+ } else {
+ ret = 1;
+ mib_increment(rc, uri, rsync_succeeded);
+ }
+
+ assert(strlen(uri) > SIZEOF_RSYNC);
+ strcpy(buffer, uri + SIZEOF_RSYNC);
+ if ((s = strrchr(buffer, '/')) != NULL && s[1] == '\0')
+ *s = '\0';
+ if (!sk_OPENSSL_STRING_push_strdup(rc->rsync_cache, buffer))
+ logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri);
+
+ return ret;
+}
+
+/**
+ * rsync a single file (CRL, manifest, ROA, whatever).
+ */
+static int rsync_file(const rcynic_ctx_t *rc, const char *uri)
+{
+ return rsync(rc, NULL, uri);
+}
+
+/**
+ * rsync an entire subtree, generally rooted at a SIA collection.
+ */
+static int rsync_tree(const rcynic_ctx_t *rc, const char *uri)
+{
+ static const char * const rsync_args[] = { "--recursive", "--delete", NULL };
+ return rsync(rc, rsync_args, uri);
+}
+
+
+
+/**
+ * Clean up old stuff from previous rsync runs. --delete doesn't help
+ * if the URI changes and we never visit the old URI again.
+ */
+static int prune_unauthenticated(const rcynic_ctx_t *rc,
+ const char *name,
+ const size_t baselen)
+{
+ char path[FILENAME_MAX];
+ struct dirent *d;
+ size_t len;
+ DIR *dir;
+ int need_slash;
+
+ assert(rc && name && baselen > 0);
+ len = strlen(name);
+ assert(len >= baselen && len < sizeof(path));
+ need_slash = name[len - 1] != '/';
+
+ if (rsync_cached_string(rc, name + baselen)) {
+ logmsg(rc, log_debug, "prune: cache hit for %s, not cleaning", name);
+ return 1;
+ }
+
+ if (rmdir(name) == 0) {
+ logmsg(rc, log_debug, "prune: removed %s", name);
+ return 1;
+ }
+
+ switch (errno) {
+ case ENOENT:
+ logmsg(rc, log_debug, "prune: nonexistant %s", name);
+ return 1;
+ case ENOTEMPTY:
+ break;
+ default:
+ logmsg(rc, log_debug, "prune: other error %s: %s", name, strerror(errno));
+ return 0;
+ }
+
+ if ((dir = opendir(name)) == NULL)
+ return 0;
+
+ while ((d = readdir(dir)) != NULL) {
+ if (d->d_name[0] == '.' && (d->d_name[1] == '\0' || (d->d_name[1] == '.' && d->d_name[2] == '\0')))
+ continue;
+ if (len + strlen(d->d_name) + need_slash >= sizeof(path)) {
+ logmsg(rc, log_debug, "prune: %s%s%s too long", name, (need_slash ? "/" : ""), d->d_name);
+ goto done;
+ }
+ strcpy(path, name);
+ if (need_slash)
+ strcat(path, "/");
+ strcat(path, d->d_name);
+ switch (d->d_type) {
+ case DT_DIR:
+ if (!prune_unauthenticated(rc, path, baselen))
+ goto done;
+ continue;
+ default:
+ if (rsync_cached_string(rc, path + baselen)) {
+ logmsg(rc, log_debug, "prune: cache hit %s", path);
+ continue;
+ }
+ if (unlink(path) < 0) {
+ logmsg(rc, log_debug, "prune: removing %s failed: %s", path, strerror(errno));
+ goto done;
+ }
+ logmsg(rc, log_debug, "prune: removed %s", path);
+ continue;
+ }
+ }
+
+ if (rmdir(name) < 0 && errno != ENOTEMPTY)
+ logmsg(rc, log_debug, "prune: couldn't remove %s: %s", name, strerror(errno));
+
+ done:
+ closedir(dir);
+ return !d;
+}
+
+
+
+/**
+ * Read a DER object using a BIO pipeline that hashes the file content
+ * as we read it. Returns the internal form of the parsed DER object,
+ * sets the hash buffer (if specified) as a side effect. The default
+ * hash algorithm is SHA-256.
+ */
+static void *read_file_with_hash(const char *filename,
+ const ASN1_ITEM *it,
+ const EVP_MD *md,
+ unsigned char *hash,
+ const size_t hashlen)
+{
+ void *result = NULL;
+ BIO *b;
+
+ if ((b = BIO_new_file(filename, "rb")) == NULL)
+ goto error;
+
+ if (hash != NULL) {
+ BIO *b2 = BIO_new(BIO_f_md());
+ if (b2 == NULL)
+ goto error;
+ if (md == NULL)
+ md = EVP_sha256();
+ if (!BIO_set_md(b2, md)) {
+ BIO_free(b2);
+ goto error;
+ }
+ BIO_push(b2, b);
+ b = b2;
+ }
+
+ if ((result = ASN1_item_d2i_bio(it, b, NULL)) == NULL)
+ goto error;
+
+ if (hash != NULL) {
+ memset(hash, 0, hashlen);
+ BIO_gets(b, (char *) hash, hashlen);
+ }
+
+ error:
+ BIO_free_all(b);
+ return result;
+}
+
+/**
+ * Read and hash a certificate.
+ */
+static X509 *read_cert(const char *filename, unsigned char *hash, const size_t hashlen)
+{
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(X509), NULL, hash, hashlen);
+}
+
+/**
+ * Read and hash a CRL.
+ */
+static X509_CRL *read_crl(const char *filename, unsigned char *hash, const size_t hashlen)
+{
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(X509_CRL), NULL, hash, hashlen);
+}
+
+/**
+ * Read and hash a CMS message.
+ */
+static CMS_ContentInfo *read_cms(const char *filename, unsigned char *hash, const size_t hashlen)
+{
+ return read_file_with_hash(filename, ASN1_ITEM_rptr(CMS_ContentInfo), NULL, hash, hashlen);
+}
+
+
+
+/**
+ * Extract CRLDP data from a certificate.
+ */
+static void extract_crldp_uri(const rcynic_ctx_t *rc,
+ const char *uri,
+ const STACK_OF(DIST_POINT) *crldp,
+ char *result,
+ const int resultlen)
+{
+ DIST_POINT *d;
+ int i;
+
+ assert(crldp);
+
+ if (sk_DIST_POINT_num(crldp) != 1) {
+ logmsg(rc, log_data_err, "CRLDistributionPoints sequence length is %d (should be 1) for %s",
+ sk_DIST_POINT_num(crldp), uri);
+ mib_increment(rc, uri, malformed_crldp);
+ return;
+ }
+
+ d = sk_DIST_POINT_value(crldp, 0);
+
+ if (d->reasons || d->CRLissuer || !d->distpoint || d->distpoint->type != 0) {
+ logmsg(rc, log_data_err, "CRLDP does not match RPKI certificate profile for %s", uri);
+ mib_increment(rc, uri, malformed_crldp);
+ return;
+ }
+
+ for (i = 0; i < sk_GENERAL_NAME_num(d->distpoint->name.fullname); i++) {
+ GENERAL_NAME *n = sk_GENERAL_NAME_value(d->distpoint->name.fullname, i);
+ assert(n != NULL);
+ if (n->type != GEN_URI) {
+ logmsg(rc, log_data_err, "CRLDP contains non-URI GeneralName for %s", uri);
+ mib_increment(rc, uri, malformed_crldp);
+ return;
+ }
+ if (!is_rsync((char *) n->d.uniformResourceIdentifier->data)) {
+ logmsg(rc, log_verbose, "Skipping non-rsync URI %s for %s",
+ (char *) n->d.uniformResourceIdentifier->data, uri);
+ continue;
+ }
+ if (resultlen <= n->d.uniformResourceIdentifier->length) {
+ logmsg(rc, log_data_err, "Skipping improbably long URI %s for %s",
+ (char *) n->d.uniformResourceIdentifier->data, uri);
+ mib_increment(rc, uri, uri_too_long);
+ continue;
+ }
+ strcpy(result, (char *) n->d.uniformResourceIdentifier->data);
+ return;
+ }
+}
+
+/**
+ * Extract SIA or AIA data from a certificate.
+ */
+static void extract_access_uri(const rcynic_ctx_t *rc,
+ const char *uri,
+ const AUTHORITY_INFO_ACCESS *xia,
+ const unsigned char *oid,
+ const int oidlen,
+ char *result,
+ const int resultlen)
+{
+ int i;
+
+ if (!xia)
+ return;
+
+ for (i = 0; i < sk_ACCESS_DESCRIPTION_num(xia); i++) {
+ ACCESS_DESCRIPTION *a = sk_ACCESS_DESCRIPTION_value(xia, i);
+ assert(a != NULL);
+ if (a->location->type != GEN_URI)
+ return;
+ if (oid_cmp(a->method, oid, oidlen))
+ continue;
+ if (!is_rsync((char *) a->location->d.uniformResourceIdentifier->data)) {
+ logmsg(rc, log_verbose, "Skipping non-rsync URI %s for %s",
+ a->location->d.uniformResourceIdentifier->data, uri);
+ continue;
+ }
+ if (resultlen <= a->location->d.uniformResourceIdentifier->length) {
+ logmsg(rc, log_data_err, "Skipping improbably long URI %s for %s",
+ a->location->d.uniformResourceIdentifier->data, uri);
+ mib_increment(rc, uri, uri_too_long);
+ continue;
+ }
+ strcpy(result, (char *) a->location->d.uniformResourceIdentifier->data);
+ return;
+ }
+}
+
+/**
+ * Parse interesting stuff from a certificate.
+ */
+static void parse_cert(const rcynic_ctx_t *rc, X509 *x, certinfo_t *c, const char *uri)
+{
+ STACK_OF(DIST_POINT) *crldp;
+ AUTHORITY_INFO_ACCESS *xia;
+
+ assert(x != NULL && c != NULL && uri != NULL);
+ memset(c, 0, sizeof(*c));
+
+ c->ca = X509_check_ca(x) == 1;
+
+ assert(strlen(uri) < sizeof(c->uri));
+ strcpy(c->uri, uri);
+
+ if ((xia = X509_get_ext_d2i(x, NID_info_access, NULL, NULL)) != NULL) {
+ extract_access_uri(rc, uri, xia, id_ad_caIssuers, sizeof(id_ad_caIssuers), c->aia, sizeof(c->aia));
+ sk_ACCESS_DESCRIPTION_pop_free(xia, ACCESS_DESCRIPTION_free);
+ }
+
+ if ((xia = X509_get_ext_d2i(x, NID_sinfo_access, NULL, NULL)) != NULL) {
+ extract_access_uri(rc, uri, xia, id_ad_caRepository, sizeof(id_ad_caRepository), c->sia, sizeof(c->sia));
+ extract_access_uri(rc, uri, xia, id_ad_rpkiManifest, sizeof(id_ad_rpkiManifest), c->manifest, sizeof(c->manifest));
+ sk_ACCESS_DESCRIPTION_pop_free(xia, ACCESS_DESCRIPTION_free);
+ }
+
+ if ((crldp = X509_get_ext_d2i(x, NID_crl_distribution_points, NULL, NULL)) != NULL) {
+ extract_crldp_uri(rc, uri, crldp, c->crldp, sizeof(c->crldp));
+ sk_DIST_POINT_pop_free(crldp, DIST_POINT_free);
+ }
+}
+
+
+
+/**
+ * Attempt to read and check one CRL from disk.
+ */
+
+static X509_CRL *check_crl_1(const rcynic_ctx_t *rc,
+ const char *uri,
+ char *path, const int pathlen,
+ const char *prefix,
+ X509 *issuer,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ unsigned char hashbuf[EVP_MAX_MD_SIZE];
+ X509_CRL *crl = NULL;
+ EVP_PKEY *pkey;
+ int ret;
+
+ assert(uri && path && issuer && hashlen <= sizeof(hashbuf));
+
+ if (!uri_to_filename(rc, uri, path, pathlen, prefix))
+ goto punt;
+
+ if (hash)
+ crl = read_crl(path, hashbuf, sizeof(hashbuf));
+ else
+ crl = read_crl(path, NULL, 0);
+
+ if (!crl)
+ goto punt;
+
+ if (hash && memcmp(hashbuf, hash, hashlen)) {
+ reject(rc, uri, crl_digest_mismatch,
+ "because digest of CRL did not match value from manifest");
+ goto punt;
+ }
+
+ if ((pkey = X509_get_pubkey(issuer)) == NULL)
+ goto punt;
+ ret = X509_CRL_verify(crl, pkey);
+ EVP_PKEY_free(pkey);
+
+ if (ret > 0)
+ return crl;
+
+ punt:
+ X509_CRL_free(crl);
+ return NULL;
+}
+
+/**
+ * Check whether we already have a particular CRL, attempt to fetch it
+ * and check issuer's signature if we don't.
+ */
+static X509_CRL *check_crl(const rcynic_ctx_t *rc,
+ const char *uri,
+ X509 *issuer,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ char path[FILENAME_MAX];
+ X509_CRL *crl;
+
+ if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
+ (crl = read_crl(path, NULL, 0)) != NULL)
+ return crl;
+
+ logmsg(rc, log_telemetry, "Checking CRL %s", uri);
+
+ assert(rsync_cached_uri(rc, uri));
+
+ if ((crl = check_crl_1(rc, uri, path, sizeof(path), rc->unauthenticated,
+ issuer, hash, hashlen))) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri, current_crl_accepted);
+ return crl;
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri, current_crl_rejected);
+ }
+
+ if ((crl = check_crl_1(rc, uri, path, sizeof(path), rc->old_authenticated,
+ issuer, hash, hashlen))) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri, backup_crl_accepted);
+ return crl;
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri, backup_crl_rejected);
+ }
+
+ return NULL;
+}
+
+
+
+/**
+ * Validation callback function for use with x509_verify_cert().
+ */
+static int check_x509_cb(int ok, X509_STORE_CTX *ctx)
+{
+ rcynic_x509_store_ctx_t *rctx = (rcynic_x509_store_ctx_t *) ctx;
+ mib_counter_t counter;
+
+ assert(rctx != NULL);
+
+ switch (ctx->error) {
+ case X509_V_OK:
+ return ok;
+
+ case X509_V_ERR_SUBJECT_ISSUER_MISMATCH:
+ /*
+ * Informational events, not really errors. ctx->check_issued()
+ * is called in many places where failure to find an issuer is not
+ * a failure for the calling function. Just leave these alone.
+ */
+ return ok;
+
+ case X509_V_ERR_CRL_HAS_EXPIRED:
+ /*
+ * This may not be an error at all. CRLs don't really "expire",
+ * although the signatures over them do. What OpenSSL really
+ * means by this error is just "it's now later than this source
+ * said it intended to publish a new CRL. Unclear whether this
+ * should be an error; current theory is that it should not be.
+ */
+ if (rctx->rc->allow_stale_crl) {
+ ok = 1;
+ if (sk_OPENSSL_STRING_find(rctx->rc->stale_cache, rctx->subject->crldp) >= 0)
+ return ok;
+ if (!sk_OPENSSL_STRING_push_strdup(rctx->rc->stale_cache, rctx->subject->crldp))
+ logmsg(rctx->rc, log_sys_err,
+ "Couldn't cache stale CRLDP %s, blundering onward", rctx->subject->crldp);
+ }
+ logmsg(rctx->rc, log_data_err, "Stale CRL %s", rctx->subject->crldp);
+ if (ok)
+ mib_increment(rctx->rc, rctx->subject->uri, stale_crl);
+ else
+ reject(rctx->rc, rctx->subject->uri, stale_crl, "due to stale CRL %s", rctx->subject->crldp);
+ return ok;
+
+ case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
+ /*
+ * This is another error that's only an error in the strange world
+ * of OpenSSL, but a more serious one. By default, OpenSSL
+ * expects all trust anchors to be self-signed. This is not a
+ * PKIX requirement, it's just an OpenSSL thing, but one violates
+ * it at one's peril, because the only way to convince OpenSSL to
+ * allow a non-self-signed trust anchor is to intercept this
+ * "error" in the verify callback handler.
+ *
+ * So this program supports non-self-signed trust anchors, but be
+ * warned that enabling this feature may cause this program's
+ * output not to work with other OpenSSL-based applications.
+ */
+ if (rctx->rc->allow_non_self_signed_trust_anchor)
+ ok = 1;
+ if (ok)
+ mib_increment(rctx->rc, rctx->subject->uri, trust_anchor_not_self_signed);
+ else
+ reject(rctx->rc, rctx->subject->uri, trust_anchor_not_self_signed,
+ "because trust anchor was not self-signed");
+ return ok;
+
+ /*
+ * Select correct MIB counter for every known OpenSSL verify errors
+ * except the ones we handle explicitly above, then fall through to
+ * common handling for all of these.
+ */
+#define QV(x) \
+ case x: \
+ counter = mib_openssl_##x; \
+ break;
+ MIB_COUNTERS_FROM_OPENSSL;
+#undef QV
+
+ default:
+ counter = unknown_verify_error;
+ break;
+ }
+
+ if (ok)
+ mib_increment(rctx->rc, rctx->subject->uri, counter);
+ else
+ reject(rctx->rc, rctx->subject->uri, counter,
+ "due to validation failure at depth %d: %s",
+ ctx->error_depth,
+ X509_verify_cert_error_string(ctx->error));
+
+ return ok;
+}
+
+/**
+ * Check crypto aspects of a certificate, including policy checks
+ * and RFC 3779 path validation.
+ */
+static int check_x509(const rcynic_ctx_t *rc,
+ STACK_OF(X509) *certs,
+ X509 *x,
+ const certinfo_t *subject)
+{
+ rcynic_x509_store_ctx_t rctx;
+ STACK_OF(X509_CRL) *crls = NULL;
+ EVP_PKEY *pkey = NULL;
+ X509_CRL *crl = NULL;
+ X509 *issuer;
+ int ret = 0;
+
+ assert(rc && certs && x && subject && subject->crldp[0]);
+
+ issuer = sk_X509_value(certs, sk_X509_num(certs) - 1);
+ assert(issuer != NULL);
+
+ if (!X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, x, NULL))
+ return 0;
+ rctx.rc = rc;
+ rctx.subject = subject;
+
+ if (subject->ta) {
+
+ if (subject->crldp[0]) {
+ reject(rc, subject->uri, trust_anchor_with_crldp,
+ "because it's a trust anchor but has a CRLDP extension");
+ goto done;
+ }
+
+ } else {
+
+ if ((pkey = X509_get_pubkey(issuer)) == NULL || X509_verify(x, pkey) <= 0) {
+ reject(rc, subject->uri, certificate_bad_signature,
+ "because it failed signature check prior to CRL fetch");
+ goto done;
+ }
+
+ if ((crl = check_crl(rc, subject->crldp, issuer, NULL, 0)) == NULL) {
+ reject(rc, subject->uri, certificate_bad_crl,
+ "due to bad CRL %s", subject->crldp);
+ goto done;
+ }
+ }
+
+ if ((crls = sk_X509_CRL_new_null()) == NULL ||
+ !sk_X509_CRL_push(crls, crl)) {
+ logmsg(rc, log_sys_err,
+ "Internal allocation error setting up CRL for validation");
+ goto done;
+ }
+ crl = NULL;
+
+ X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
+ X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
+ X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
+
+ X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
+ X509_V_FLAG_CRL_CHECK |
+ X509_V_FLAG_POLICY_CHECK |
+ X509_V_FLAG_EXPLICIT_POLICY |
+ X509_V_FLAG_X509_STRICT);
+
+ X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
+
+ if (X509_verify_cert(&rctx.ctx) <= 0) {
+ /*
+ * Redundant error message?
+ */
+ logmsg(rc, log_data_err, "Validation failure for %s",
+ subject->uri[0] ? subject->uri : subject->ta ? "[Trust anchor]" : "[???]");
+ goto done;
+ }
+
+ ret = 1;
+
+ done:
+ sk_X509_CRL_pop_free(crls, X509_CRL_free);
+ X509_STORE_CTX_cleanup(&rctx.ctx);
+ EVP_PKEY_free(pkey);
+ X509_CRL_free(crl);
+
+ return ret;
+}
+
+/**
+ * Check whether extensions in a certificate are allowed by profile.
+ * Also returns failure in a few null-pointer cases that can't
+ * possibly conform to profile.
+ */
+static int check_cert_only_allowed_extensions(const X509 *x, const int allow_eku)
+{
+ int i;
+
+ if (x == NULL || x->cert_info == NULL || x->cert_info->extensions == NULL)
+ return 0;
+
+ for (i = 0; i < sk_X509_EXTENSION_num(x->cert_info->extensions); i++) {
+ switch (OBJ_obj2nid(sk_X509_EXTENSION_value(x->cert_info->extensions,
+ i)->object)) {
+ case NID_basic_constraints:
+ case NID_subject_key_identifier:
+ case NID_authority_key_identifier:
+ case NID_key_usage:
+ case NID_crl_distribution_points:
+ case NID_info_access:
+ case NID_sinfo_access:
+ case NID_certificate_policies:
+ case NID_sbgp_ipAddrBlock:
+ case NID_sbgp_autonomousSysNum:
+ continue;
+ case NID_ext_key_usage:
+ if (allow_eku)
+ continue;
+ else
+ return 0;
+ default:
+ return 0;
+ }
+ }
+
+ return 1;
+}
+
+/**
+ * Check a certificate for conformance to the RPKI certificate profile.
+ */
+static X509 *check_cert_1(const rcynic_ctx_t *rc,
+ const char *uri,
+ char *path,
+ const int pathlen,
+ const char *prefix,
+ STACK_OF(X509) *certs,
+ const certinfo_t *issuer,
+ certinfo_t *subject,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ unsigned char hashbuf[EVP_MAX_MD_SIZE];
+ X509 *x = NULL;
+
+ assert(uri && path && certs && issuer && subject);
+
+ if (!uri_to_filename(rc, uri, path, pathlen, prefix)) {
+ logmsg(rc, log_data_err, "Can't convert URI %s to filename", uri);
+ return NULL;
+ }
+
+ if (access(path, R_OK))
+ return NULL;
+
+ if (hash)
+ x = read_cert(path, hashbuf, sizeof(hashbuf));
+ else
+ x = read_cert(path, NULL, 0);
+
+ if (!x) {
+ logmsg(rc, log_sys_err, "Can't read certificate %s", path);
+ goto punt;
+ }
+
+ if (hash && memcmp(hashbuf, hash, hashlen)) {
+ reject(rc, uri, certificate_digest_mismatch,
+ "because digest did not match value in manifest");
+ goto punt;
+ }
+
+ parse_cert(rc, x, subject, uri);
+
+ if (subject->sia[0] && subject->sia[strlen(subject->sia) - 1] != '/') {
+ reject(rc, uri, malformed_sia,
+ "due to malformed SIA %s", subject->sia);
+ goto punt;
+ }
+
+ if (!subject->aia[0]) {
+ reject(rc, uri, aia_missing, "due to missing AIA extension");
+ goto punt;
+ }
+
+ if (!issuer->ta && strcmp(issuer->uri, subject->aia)) {
+ reject(rc, uri, aia_mismatch,
+ "because AIA %s doesn't match parent", subject->aia);
+ goto punt;
+ }
+
+ if (subject->ca && !subject->sia[0]) {
+ reject(rc, uri, sia_missing,
+ "because SIA extension repository pointer is missing");
+ goto punt;
+ }
+
+ if (!subject->crldp[0]) {
+ reject(rc, uri, crldp_missing, "because CRLDP extension is missing");
+ goto punt;
+ }
+
+ if (subject->ca && !startswith(subject->crldp, issuer->sia)) {
+ reject(rc, uri, crldp_mismatch,
+ "because CRLDP %s points outside issuer's publication point %s",
+ subject->crldp, issuer->sia);
+ goto punt;
+ }
+
+ if (subject->ca && !subject->manifest[0]) {
+ reject(rc, uri, manifest_missing,
+ "because SIA extension manifest pointer is missing");
+ goto punt;
+ }
+
+ if (subject->ca && !startswith(subject->manifest, subject->sia)) {
+ reject(rc, uri, manifest_mismatch,
+ "because SIA manifest %s points outside publication point %s",
+ subject->manifest, subject->sia);
+ goto punt;
+ }
+
+ if (!check_cert_only_allowed_extensions(x, !subject->ca)) {
+ reject(rc, uri, disallowed_extension,
+ "due to disallowed X.509v3 extension");
+ goto punt;
+ }
+
+ if (!check_x509(rc, certs, x, subject)) {
+ /*
+ * Redundant error message?
+ */
+ logmsg(rc, log_data_err, "Certificate %s failed validation", uri);
+ goto punt;
+ }
+
+ return x;
+
+ punt:
+ X509_free(x);
+ return NULL;
+}
+
+/**
+ * Try to find a good copy of a certificate either in fresh data or in
+ * backup data from a previous run of this program.
+ */
+static X509 *check_cert(rcynic_ctx_t *rc,
+ char *uri,
+ STACK_OF(X509) *certs,
+ const certinfo_t *issuer,
+ certinfo_t *subject,
+ const char *prefix,
+ const int backup,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ char path[FILENAME_MAX];
+ X509 *x;
+
+ assert(rc && uri && certs && issuer && subject && prefix);
+
+ /*
+ * If target file already exists and we're not here to recheck with
+ * better data, just get out now.
+ */
+
+ if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
+ !access(path, R_OK)) {
+ if (backup || sk_OPENSSL_STRING_find(rc->backup_cache, uri) < 0)
+ return NULL;
+ mib_increment(rc, uri, current_cert_recheck);
+ logmsg(rc, log_telemetry, "Rechecking %s", uri);
+ } else {
+ logmsg(rc, log_telemetry, "Checking %s", uri);
+ }
+
+ rc->indent++;
+
+ if ((x = check_cert_1(rc, uri, path, sizeof(path), prefix,
+ certs, issuer, subject, hash, hashlen)) != NULL) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri,
+ (backup ? backup_cert_accepted : current_cert_accepted));
+ if (!backup)
+ sk_OPENSSL_STRING_remove(rc->backup_cache, uri);
+ else if (!sk_OPENSSL_STRING_push_strdup(rc->backup_cache, uri))
+ logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri);
+
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri,
+ (backup ? backup_cert_rejected : current_cert_rejected));
+ }
+
+ rc->indent--;
+
+ return x;
+}
+
+
+
+/**
+ * Read and check one manifest from disk.
+ */
+static Manifest *check_manifest_1(const rcynic_ctx_t *rc,
+ const char *uri,
+ char *path,
+ const int pathlen,
+ const char *prefix,
+ STACK_OF(X509) *certs)
+{
+ CMS_ContentInfo *cms = NULL;
+ const ASN1_OBJECT *eContentType = NULL;
+ STACK_OF(X509) *signers = NULL;
+ STACK_OF(X509_CRL) *crls = NULL;
+ X509_CRL *crl = NULL;
+ Manifest *manifest = NULL, *result = NULL;
+ BIO *bio = NULL;
+ rcynic_x509_store_ctx_t rctx;
+ certinfo_t certinfo;
+ int i, initialized_store_ctx = 0;
+ FileAndHash *fah = NULL;
+ char *crl_tail;
+
+ assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
+
+ if (!uri_to_filename(rc, uri, path, pathlen, prefix) ||
+ (cms = read_cms(path, NULL, 0)) == NULL)
+ goto done;
+
+ if ((eContentType = CMS_get0_eContentType(cms)) == NULL ||
+ oid_cmp(eContentType, id_ct_rpkiManifest, sizeof(id_ct_rpkiManifest))) {
+ reject(rc, uri, manifest_bad_econtenttype,
+ "due to bad manifest eContentType");
+ goto done;
+ }
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for manifest %s", uri);
+ goto done;
+ }
+
+ if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
+ reject(rc, uri, manifest_invalid_cms,
+ "due to validation failure for manifest CMS message");
+ goto done;
+ }
+
+ if ((signers = CMS_get0_signers(cms)) == NULL || sk_X509_num(signers) != 1) {
+ reject(rc, uri, manifest_missing_signer,
+ "because could not couldn't extract manifest EE certificate from CMS");
+ goto done;
+ }
+
+ parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri);
+
+ if (!certinfo.crldp[0]) {
+ reject(rc, uri, manifest_missing_crldp,
+ "due to missing CRLDP in manifest EE certificate");
+ goto done;
+ }
+
+ if ((crl_tail = strrchr(certinfo.crldp, '/')) == NULL) {
+ reject(rc, uri, manifest_malformed_crldp,
+ "due to malformed CRLDP %s in manifest EE certificate",
+ certinfo.crldp);
+ goto done;
+ }
+ crl_tail++;
+
+ if ((manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL)) == NULL) {
+ reject(rc, uri, manifest_decode_error, "because unable to decode manifest");
+ goto done;
+ }
+
+ if (manifest->version) {
+ reject(rc, uri, manifest_wrong_version,
+ "because manifest version should be defaulted zero, not %ld",
+ ASN1_INTEGER_get(manifest->version));
+ goto done;
+ }
+
+ if (X509_cmp_current_time(manifest->thisUpdate) > 0) {
+ reject(rc, uri, manifest_not_yet_valid, "because manifest not yet valid");
+ goto done;
+ }
+
+ if (X509_cmp_current_time(manifest->nextUpdate) < 0 &&
+ sk_OPENSSL_STRING_find(rc->stale_cache, uri) < 0) {
+ if (!sk_OPENSSL_STRING_push_strdup(rc->stale_cache, uri))
+ logmsg(rc, log_sys_err, "Couldn't cache stale manifest %s, blundering onward", uri);
+ if (!rc->allow_stale_manifest) {
+ reject(rc, uri, stale_manifest,
+ "because it is a stale manifest");
+ goto done;
+ }
+ logmsg(rc, log_data_err, "Stale manifest %s", uri);
+ mib_increment(rc, uri, stale_manifest);
+ }
+
+ if (manifest->fileHashAlg == NULL ||
+ oid_cmp(manifest->fileHashAlg, id_sha256, sizeof(id_sha256)))
+ goto done;
+
+ for (i = 0; (fah = sk_FileAndHash_value(manifest->fileList, i)) != NULL; i++)
+ if (!strcmp((char *) fah->file->data, crl_tail))
+ break;
+
+ if (fah) {
+ crl = check_crl(rc, certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1),
+ fah->hash->data, fah->hash->length);
+ } else if (rc->require_crl_in_manifest) {
+ reject(rc, uri, crl_not_in_manifest,
+ "because CRL %s missing from manifest", certinfo.crldp);
+ goto done;
+ } else {
+ logmsg(rc, log_data_err, "Manifest %s is missing entry for CRL %s", uri, certinfo.crldp);
+ mib_increment(rc, uri, crl_not_in_manifest);
+ crl = check_crl(rc, certinfo.crldp,
+ sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0);
+ }
+
+ if (!crl) {
+ reject(rc, uri, manifest_bad_crl, "due to bad manifest CRL %s", certinfo.crldp);
+ goto done;
+ }
+
+ if ((crls = sk_X509_CRL_new_null()) == NULL || !sk_X509_CRL_push(crls, crl))
+ goto done;
+ crl = NULL;
+
+ if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
+ goto done;
+
+ rctx.rc = rc;
+ rctx.subject = &certinfo;
+
+ X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
+ X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
+ X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
+
+ X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
+ X509_V_FLAG_CRL_CHECK |
+ X509_V_FLAG_POLICY_CHECK |
+ X509_V_FLAG_EXPLICIT_POLICY |
+ X509_V_FLAG_X509_STRICT);
+
+ X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
+
+ if (X509_verify_cert(&rctx.ctx) <= 0) {
+ /*
+ * Redundant error message?
+ */
+ logmsg(rc, log_data_err, "Validation failure for manifest %s EE certificate",uri);
+ mib_increment(rc, uri, manifest_invalid_ee);
+ goto done;
+ }
+
+ result = manifest;
+ manifest = NULL;
+
+ done:
+ if (initialized_store_ctx)
+ X509_STORE_CTX_cleanup(&rctx.ctx);
+ BIO_free(bio);
+ Manifest_free(manifest);
+ CMS_ContentInfo_free(cms);
+ sk_X509_free(signers);
+ sk_X509_CRL_pop_free(crls, X509_CRL_free);
+
+ return result;
+}
+
+/**
+ * Check whether we already have a particular manifest, attempt to fetch it
+ * and check issuer's signature if we don't.
+ */
+static Manifest *check_manifest(const rcynic_ctx_t *rc,
+ const char *uri,
+ STACK_OF(X509) *certs)
+{
+ CMS_ContentInfo *cms = NULL;
+ Manifest *manifest = NULL;
+ char path[FILENAME_MAX];
+ BIO *bio = NULL;
+
+ if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
+ (cms = read_cms(path, NULL, 0)) != NULL &&
+ (bio = BIO_new(BIO_s_mem()))!= NULL &&
+ CMS_verify(cms, NULL, NULL, NULL, bio,
+ CMS_NO_SIGNER_CERT_VERIFY |
+ CMS_NO_ATTR_VERIFY |
+ CMS_NO_CONTENT_VERIFY) > 0)
+ manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL);
+
+ CMS_ContentInfo_free(cms);
+ BIO_free(bio);
+
+ if (manifest != NULL)
+ return manifest;
+
+ logmsg(rc, log_telemetry, "Checking manifest %s", uri);
+
+ assert(rsync_cached_uri(rc, uri));
+
+ if ((manifest = check_manifest_1(rc, uri, path, sizeof(path),
+ rc->unauthenticated, certs))) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri, current_manifest_accepted);
+ return manifest;
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri, current_manifest_rejected);
+ }
+
+ if ((manifest = check_manifest_1(rc, uri, path, sizeof(path),
+ rc->old_authenticated, certs))) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri, backup_manifest_accepted);
+ return manifest;
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri, backup_manifest_rejected);
+ }
+
+ return NULL;
+}
+
+
+
+/**
+ * Extract a ROA prefix from the ASN.1 bitstring encoding.
+ */
+static int extract_roa_prefix(unsigned char *addr,
+ unsigned *prefixlen,
+ const ASN1_BIT_STRING *bs,
+ const unsigned afi)
+{
+ unsigned length;
+
+ switch (afi) {
+ case IANA_AFI_IPV4: length = 4; break;
+ case IANA_AFI_IPV6: length = 16; break;
+ default: return 0;
+ }
+
+ if (bs->length < 0 || bs->length > length)
+ return 0;
+
+ if (bs->length > 0) {
+ memcpy(addr, bs->data, bs->length);
+ if ((bs->flags & 7) != 0) {
+ unsigned char mask = 0xFF >> (8 - (bs->flags & 7));
+ addr[bs->length - 1] &= ~mask;
+ }
+ }
+
+ memset(addr + bs->length, 0, length - bs->length);
+
+ *prefixlen = (bs->length * 8) - (bs->flags & 7);
+
+ return 1;
+}
+
+/**
+ * Read and check one ROA from disk.
+ */
+static int check_roa_1(const rcynic_ctx_t *rc,
+ const char *uri,
+ char *path,
+ const int pathlen,
+ const char *prefix,
+ STACK_OF(X509) *certs,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ unsigned char hashbuf[EVP_MAX_MD_SIZE], addrbuf[ADDR_RAW_BUF_LEN];
+ const ASN1_OBJECT *eContentType = NULL;
+ STACK_OF(IPAddressFamily) *roa_resources = NULL, *ee_resources = NULL;
+ STACK_OF(X509_CRL) *crls = NULL;
+ STACK_OF(X509) *signers = NULL;
+ CMS_ContentInfo *cms = NULL;
+ X509_CRL *crl = NULL;
+ ROA *roa = NULL;
+ BIO *bio = NULL;
+ rcynic_x509_store_ctx_t rctx;
+ certinfo_t certinfo;
+ int i, j, initialized_store_ctx = 0, result = 0;
+ unsigned afi, *safi = NULL, safi_, prefixlen;
+ ROAIPAddressFamily *rf;
+ ROAIPAddress *ra;
+
+ assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
+
+ if (!uri_to_filename(rc, uri, path, pathlen, prefix))
+ goto error;
+
+ if (hash)
+ cms = read_cms(path, hashbuf, sizeof(hashbuf));
+ else
+ cms = read_cms(path, NULL, 0);
+
+ if (!cms)
+ goto error;
+
+ if (hash && memcmp(hashbuf, hash, hashlen)) {
+ reject(rc, uri, roa_digest_mismatch,
+ "because ROA does not match manifest digest");
+ goto error;
+ }
+
+ if (!(eContentType = CMS_get0_eContentType(cms)) ||
+ oid_cmp(eContentType, id_ct_routeOriginAttestation,
+ sizeof(id_ct_routeOriginAttestation))) {
+ reject(rc, uri, roa_bad_econtenttype,
+ "because ROA has bad eContentType");
+ goto error;
+ }
+
+ if ((bio = BIO_new(BIO_s_mem())) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for ROA %s", uri);
+ goto error;
+ }
+
+ if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
+ reject(rc, uri, roa_invalid_cms, "because ROA CMS failed validation");
+ goto error;
+ }
+
+ if (!(signers = CMS_get0_signers(cms)) || sk_X509_num(signers) != 1) {
+ reject(rc, uri, roa_missing_signer,
+ "because couldn't extract CMS signer from ROA");
+ goto error;
+ }
+
+ parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri);
+
+ if (!(roa = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), bio, NULL))) {
+ reject(rc, uri, roa_decode_error, "because could not decode ROA");
+ goto error;
+ }
+
+ if (roa->version) {
+ reject(rc, uri, roa_wrong_version,
+ "because ROA version should be defaulted zero, not %ld",
+ ASN1_INTEGER_get(roa->version));
+ goto error;
+ }
+
+ /*
+ * ROA issuer doesn't need rights to the ASN, so we don't need to
+ * check the asID field.
+ */
+
+ ee_resources = X509_get_ext_d2i(sk_X509_value(signers, 0), NID_sbgp_ipAddrBlock, NULL, NULL);
+
+ /*
+ * Extract prefixes from ROA and convert them into a resource set.
+ */
+
+ if (!(roa_resources = sk_IPAddressFamily_new_null()))
+ goto error;
+
+ for (i = 0; i < sk_ROAIPAddressFamily_num(roa->ipAddrBlocks); i++) {
+ rf = sk_ROAIPAddressFamily_value(roa->ipAddrBlocks, i);
+ if (!rf || !rf->addressFamily || rf->addressFamily->length < 2 || rf->addressFamily->length > 3) {
+ reject(rc, uri, malformed_roa_addressfamily,
+ "because ROA addressFamily length should be 2 or 3, not %lu",
+ (unsigned long) rf->addressFamily->length);
+ goto error;
+ }
+ afi = (rf->addressFamily->data[0] << 8) | (rf->addressFamily->data[1]);
+ if (rf->addressFamily->length == 3)
+ *(safi = &safi_) = rf->addressFamily->data[2];
+ for (j = 0; j < sk_ROAIPAddress_num(rf->addresses); j++) {
+ ra = sk_ROAIPAddress_value(rf->addresses, j);
+ if (!ra ||
+ !extract_roa_prefix(addrbuf, &prefixlen, ra->IPAddress, afi) ||
+ !v3_addr_add_prefix(roa_resources, afi, safi, addrbuf, prefixlen)) {
+ reject(rc, uri, roa_resources_malformed,
+ "because ROA resources appear malformed");
+ goto error;
+ }
+ }
+ }
+
+ /*
+ * ROAs can include nested prefixes, so direct translation to
+ * resource sets could include overlapping ranges, which is illegal.
+ * So we have to remove nested stuff before whacking into canonical
+ * form. Fortunately, this is relatively easy, since we know these
+ * are just prefixes, not ranges: in a list of prefixes sorted by
+ * the RFC 3779 rules, the first element of a set of nested prefixes
+ * will always be the least specific.
+ */
+
+ for (i = 0; i < sk_IPAddressFamily_num(roa_resources); i++) {
+ IPAddressFamily *f = sk_IPAddressFamily_value(roa_resources, i);
+
+ if ((afi = v3_addr_get_afi(f)) == 0) {
+ reject(rc, uri, roa_bad_afi,
+ "because found bad AFI while extracting data from ROA");
+ goto error;
+ }
+
+ if (f->ipAddressChoice->type == IPAddressChoice_addressesOrRanges) {
+ IPAddressOrRanges *aors = f->ipAddressChoice->u.addressesOrRanges;
+
+ sk_IPAddressOrRange_sort(aors);
+
+ for (j = 0; j < sk_IPAddressOrRange_num(aors) - 1; j++) {
+ IPAddressOrRange *a = sk_IPAddressOrRange_value(aors, j);
+ IPAddressOrRange *b = sk_IPAddressOrRange_value(aors, j + 1);
+ unsigned char a_min[ADDR_RAW_BUF_LEN], a_max[ADDR_RAW_BUF_LEN];
+ unsigned char b_min[ADDR_RAW_BUF_LEN], b_max[ADDR_RAW_BUF_LEN];
+ int length;
+
+ if ((length = v3_addr_get_range(a, afi, a_min, a_max, ADDR_RAW_BUF_LEN)) == 0 ||
+ (length = v3_addr_get_range(b, afi, b_min, b_max, ADDR_RAW_BUF_LEN)) == 0) {
+ reject(rc, uri, roa_resources_malformed, "because ROA resources appear malformed");
+ goto error;
+ }
+
+ if (memcmp(a_max, b_max, length) >= 0) {
+ (void) sk_IPAddressOrRange_delete(aors, j + 1);
+ IPAddressOrRange_free(b);
+ --j;
+ }
+ }
+ }
+ }
+
+ if (!v3_addr_canonize(roa_resources)) {
+ reject(rc, uri, roa_resources_malformed, "because ROA resources appear malformed");
+ goto error;
+ }
+
+ if (!v3_addr_subset(roa_resources, ee_resources)) {
+ reject(rc, uri, roa_not_nested,
+ "because ROA's resources are not a subset of its signing EE certificate's resources");
+ goto error;
+ }
+
+ if (!(crl = check_crl(rc, certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
+ reject(rc, uri, roa_bad_crl, "because ROA EE certificate has bad CRL %s", certinfo.crldp);
+ goto error;
+ }
+
+ if (!(crls = sk_X509_CRL_new_null()) || !sk_X509_CRL_push(crls, crl))
+ goto error;
+ crl = NULL;
+
+ if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
+ goto error;
+
+ rctx.rc = rc;
+ rctx.subject = &certinfo;
+
+ X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
+ X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
+ X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
+
+ X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
+ X509_V_FLAG_CRL_CHECK |
+ X509_V_FLAG_POLICY_CHECK |
+ X509_V_FLAG_EXPLICIT_POLICY |
+ X509_V_FLAG_X509_STRICT);
+
+ X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
+
+ if (X509_verify_cert(&rctx.ctx) <= 0) {
+ /*
+ * Redundant error message?
+ */
+ logmsg(rc, log_data_err, "Validation failure for ROA %s EE certificate",uri);
+ mib_increment(rc, uri, roa_invalid_ee);
+ goto error;
+ }
+
+ result = 1;
+
+ error:
+ if (initialized_store_ctx)
+ X509_STORE_CTX_cleanup(&rctx.ctx);
+ BIO_free(bio);
+ ROA_free(roa);
+ CMS_ContentInfo_free(cms);
+ sk_X509_free(signers);
+ sk_X509_CRL_pop_free(crls, X509_CRL_free);
+ sk_IPAddressFamily_pop_free(roa_resources, IPAddressFamily_free);
+ sk_IPAddressFamily_pop_free(ee_resources, IPAddressFamily_free);
+
+ return result;
+}
+
+/**
+ * Check whether we already have a particular ROA, attempt to fetch it
+ * and check issuer's signature if we don't.
+ */
+static void check_roa(const rcynic_ctx_t *rc,
+ const char *uri,
+ STACK_OF(X509) *certs,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ char path[FILENAME_MAX];
+
+ if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
+ !access(path, F_OK))
+ return;
+
+ logmsg(rc, log_telemetry, "Checking ROA %s", uri);
+
+ assert(rsync_cached_uri(rc, uri));
+
+ if (check_roa_1(rc, uri, path, sizeof(path), rc->unauthenticated,
+ certs, hash, hashlen)) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri, current_roa_accepted);
+ return;
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri, current_roa_rejected);
+ }
+
+ if (check_roa_1(rc, uri, path, sizeof(path), rc->old_authenticated,
+ certs, hash, hashlen)) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri, backup_roa_accepted);
+ return;
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri, backup_roa_rejected);
+ }
+}
+
+
+
+/**
+ * Read and check one Ghostbuster record from disk.
+ */
+static int check_ghostbuster_1(const rcynic_ctx_t *rc,
+ const char *uri,
+ char *path,
+ const int pathlen,
+ const char *prefix,
+ STACK_OF(X509) *certs,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ unsigned char hashbuf[EVP_MAX_MD_SIZE];
+ const ASN1_OBJECT *eContentType = NULL;
+ STACK_OF(X509_CRL) *crls = NULL;
+ STACK_OF(X509) *signers = NULL;
+ CMS_ContentInfo *cms = NULL;
+ X509_CRL *crl = NULL;
+ BIO *bio = NULL;
+ rcynic_x509_store_ctx_t rctx;
+ certinfo_t certinfo;
+ int initialized_store_ctx = 0, result = 0;
+
+ assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
+
+ if (!uri_to_filename(rc, uri, path, pathlen, prefix))
+ goto error;
+
+ if (hash)
+ cms = read_cms(path, hashbuf, sizeof(hashbuf));
+ else
+ cms = read_cms(path, NULL, 0);
+
+ if (!cms)
+ goto error;
+
+ if (hash && memcmp(hashbuf, hash, hashlen)) {
+ reject(rc, uri, ghostbuster_digest_mismatch,
+ "because Ghostbuster record does not match manifest digest");
+ goto error;
+ }
+
+ if (!(eContentType = CMS_get0_eContentType(cms)) ||
+ oid_cmp(eContentType, id_ct_rpkiGhostbusters,
+ sizeof(id_ct_rpkiGhostbusters))) {
+ reject(rc, uri, ghostbuster_bad_econtenttype,
+ "because Ghostbuster record has bad eContentType");
+ goto error;
+ }
+
+#if 0
+ /*
+ * May want this later if we're going to inspect the VCard. For now,
+ * just leave this NULL and the right thing should happen.
+ */
+ if ((bio = BIO_new(BIO_s_mem())) == NULL) {
+ logmsg(rc, log_sys_err, "Couldn't allocate BIO for Ghostbuster record %s", uri);
+ goto error;
+ }
+#endif
+
+ if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
+ reject(rc, uri, ghostbuster_invalid_cms, "because Ghostbuster record CMS failed validation");
+ goto error;
+ }
+
+ if (!(signers = CMS_get0_signers(cms)) || sk_X509_num(signers) != 1) {
+ reject(rc, uri, ghostbuster_missing_signer,
+ "because couldn't extract CMS signer from Ghostbuster record");
+ goto error;
+ }
+
+ parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri);
+
+#if 0
+ /*
+ * Here is where we would read the VCard from the bio returned by
+ * CMS_verify() so that we could check the VCard.
+ */
+#endif
+
+ if (!(crl = check_crl(rc, certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
+ reject(rc, uri, ghostbuster_bad_crl, "because Ghostbuster record EE certificate has bad CRL %s", certinfo.crldp);
+ goto error;
+ }
+
+ if (!(crls = sk_X509_CRL_new_null()) || !sk_X509_CRL_push(crls, crl))
+ goto error;
+ crl = NULL;
+
+ if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
+ goto error;
+
+ rctx.rc = rc;
+ rctx.subject = &certinfo;
+
+ X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
+ X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
+ X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
+
+ X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
+ X509_V_FLAG_CRL_CHECK |
+ X509_V_FLAG_POLICY_CHECK |
+ X509_V_FLAG_EXPLICIT_POLICY |
+ X509_V_FLAG_X509_STRICT);
+
+ X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
+
+ if (X509_verify_cert(&rctx.ctx) <= 0) {
+ /*
+ * Redundant error message?
+ */
+ logmsg(rc, log_data_err, "Validation failure for Ghostbuster record %s EE certificate", uri);
+ mib_increment(rc, uri, ghostbuster_invalid_ee);
+ goto error;
+ }
+
+ result = 1;
+
+ error:
+ if (initialized_store_ctx)
+ X509_STORE_CTX_cleanup(&rctx.ctx);
+ BIO_free(bio);
+ CMS_ContentInfo_free(cms);
+ sk_X509_free(signers);
+ sk_X509_CRL_pop_free(crls, X509_CRL_free);
+
+ return result;
+}
+
+/**
+ * Check whether we already have a particular Ghostbuster record,
+ * attempt to fetch it and check issuer's signature if we don't.
+ */
+static void check_ghostbuster(const rcynic_ctx_t *rc,
+ const char *uri,
+ STACK_OF(X509) *certs,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ char path[FILENAME_MAX];
+
+ if (uri_to_filename(rc, uri, path, sizeof(path), rc->authenticated) &&
+ !access(path, F_OK))
+ return;
+
+ logmsg(rc, log_telemetry, "Checking Ghostbuster record %s", uri);
+
+ assert(rsync_cached_uri(rc, uri));
+
+ if (check_ghostbuster_1(rc, uri, path, sizeof(path), rc->unauthenticated,
+ certs, hash, hashlen)) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri, current_ghostbuster_accepted);
+ return;
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri, current_ghostbuster_rejected);
+ }
+
+ if (check_ghostbuster_1(rc, uri, path, sizeof(path), rc->old_authenticated,
+ certs, hash, hashlen)) {
+ install_object(rc, uri, path);
+ mib_increment(rc, uri, backup_ghostbuster_accepted);
+ return;
+ } else if (!access(path, F_OK)) {
+ mib_increment(rc, uri, backup_ghostbuster_rejected);
+ }
+}
+
+
+
+static void walk_cert(rcynic_ctx_t *rc,
+ const certinfo_t *parent,
+ STACK_OF(X509) *certs);
+
+/**
+ * Recursive walk of certificate hierarchy (core of the program). The
+ * daisy chain recursion is to avoid having to duplicate the stack
+ * manipulation and error handling.
+ */
+static void walk_cert_1(rcynic_ctx_t *rc,
+ char *uri,
+ STACK_OF(X509) *certs,
+ const certinfo_t *parent,
+ const char *prefix,
+ const int backup,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ certinfo_t child;
+ X509 *x;
+
+ if ((x = check_cert(rc, uri, certs, parent, &child, prefix, backup, hash, hashlen)) == NULL)
+ return;
+
+ if (!sk_X509_push(certs, x)) {
+ logmsg(rc, log_sys_err,
+ "Internal allocation failure recursing over certificate");
+ return;
+ }
+
+ walk_cert(rc, &child, certs);
+ X509_free(sk_X509_pop(certs));
+}
+
+/**
+ * Recursive walk of certificate hierarchy (core of the program). The
+ * daisy chain recursion is to avoid having to duplicate the stack
+ * manipulation and error handling.
+ */
+static void walk_cert_2(rcynic_ctx_t *rc,
+ char *uri,
+ STACK_OF(X509) *certs,
+ const certinfo_t *parent,
+ const char *prefix,
+ const int backup,
+ const unsigned char *hash,
+ const size_t hashlen)
+{
+ if (endswith(uri, ".cer"))
+ walk_cert_1(rc, uri, certs, parent, prefix, backup, hash, hashlen);
+ else if (endswith(uri, ".roa"))
+ check_roa(rc, uri, certs, hash, hashlen);
+ else if (endswith(uri, ".gbr"))
+ check_ghostbuster(rc, uri, certs, hash, hashlen);
+ else if (!endswith(uri, ".crl"))
+ logmsg(rc, log_telemetry, "Don't know how to check object %s, ignoring", uri);
+}
+
+/**
+ * Recursive walk of certificate hierarchy (core of the program). The
+ * daisy chain recursion is to avoid having to duplicate the stack
+ * manipulation and error handling.
+ */
+static void walk_cert_3(rcynic_ctx_t *rc,
+ STACK_OF(X509) *certs,
+ const certinfo_t *parent,
+ const char *prefix,
+ const int backup,
+ Manifest *manifest)
+{
+ char uri[URI_MAX], path[FILENAME_MAX];
+ FileAndHash *fah;
+ STACK_OF(OPENSSL_STRING) *stray_ducks = NULL;
+ DIR *dir = NULL;
+ struct dirent *d;
+ int i;
+
+ /*
+ * Pull all non-directory filenames from the publication point directory.
+ */
+ if ((stray_ducks = sk_OPENSSL_STRING_new(uri_cmp)) == NULL)
+ logmsg(rc, log_sys_err, "Couldn't allocate stray_ducks stack");
+ else if (!uri_to_filename(rc, parent->sia, path, sizeof(path), prefix) || (dir = opendir(path)) == NULL)
+ logmsg(rc, log_data_err, "Couldn't list directory %s, skipping check for out-of-manifest data", path);
+ else
+ while ((d = readdir(dir)) != NULL)
+ if (d->d_type != DT_DIR && !sk_OPENSSL_STRING_push_strdup(stray_ducks, d->d_name))
+ logmsg(rc, log_sys_err, "Couldn't strdup() string \"%s\", blundering onwards", d->d_name);
+
+ if (dir != NULL)
+ closedir(dir);
+
+ /*
+ * Loop over manifest, checking everything it lists. Remove any
+ * filenames we find in the manifest from our list of objects found
+ * in the publication point directory, so we don't check stuff twice.
+ */
+ for (i = 0; (fah = sk_FileAndHash_value(manifest->fileList, i)) != NULL; i++) {
+ sk_OPENSSL_STRING_remove(stray_ducks, (char *) fah->file->data);
+ if (strlen(parent->sia) + strlen((char *) fah->file->data) >= sizeof(uri)) {
+ logmsg(rc, log_data_err, "URI %s%s too long, skipping", parent->sia, fah->file->data);
+ } else {
+ strcpy(uri, parent->sia);
+ strcat(uri, (char *) fah->file->data);
+ walk_cert_2(rc, uri, certs, parent, prefix, backup, fah->hash->data, fah->hash->length);
+ }
+ }
+
+ /*
+ * Whine about and maybe check any object that was in the directory
+ * but not in the manifest, except for the manifest itself.
+ */
+ for (i = 0; i < sk_OPENSSL_STRING_num(stray_ducks); i++) {
+ char *s = sk_OPENSSL_STRING_value(stray_ducks, i);
+ if (strlen(parent->sia) + strlen(s) >= sizeof(uri)) {
+ logmsg(rc, log_data_err, "URI %s%s too long, skipping", parent->sia, s);
+ continue;
+ }
+ strcpy(uri, parent->sia);
+ strcat(uri, s);
+ if (!strcmp(uri, parent->manifest))
+ continue;
+ logmsg(rc, log_telemetry, "Object %s present in publication directory but not in manifest", uri);
+ mib_increment(rc, uri, object_not_in_manifest);
+ if (rc->allow_object_not_in_manifest)
+ walk_cert_2(rc, uri, certs, parent, prefix, backup, NULL, 0);
+ }
+
+ sk_OPENSSL_STRING_pop_free(stray_ducks, OPENSSL_STRING_free);
+}
+
+/**
+ * Recursive walk of certificate hierarchy (core of the program). The
+ * daisy chain recursion is to avoid having to duplicate the stack
+ * manipulation and error handling.
+ */
+static void walk_cert(rcynic_ctx_t *rc,
+ const certinfo_t *parent,
+ STACK_OF(X509) *certs)
+{
+ assert(parent && certs);
+
+ if (parent->sia[0] && parent->ca) {
+ int n_cert = sk_X509_num(certs);
+ Manifest *manifest = NULL;
+
+ rc->indent++;
+
+ rsync_tree(rc, parent->sia);
+
+ if (!parent->manifest[0]) {
+
+ logmsg(rc, log_data_err, "Parent certificate does not specify a manifest, skipping collection");
+
+ } else if ((manifest = check_manifest(rc, parent->manifest, certs)) == NULL) {
+
+ logmsg(rc, log_data_err, "Couldn't get manifest %s, skipping collection", parent->manifest);
+
+ } else {
+
+ logmsg(rc, log_debug, "Walking unauthenticated store");
+ walk_cert_3(rc, certs, parent, rc->unauthenticated, 0, manifest);
+ logmsg(rc, log_debug, "Done walking unauthenticated store");
+
+ logmsg(rc, log_debug, "Walking old authenticated store");
+ walk_cert_3(rc, certs, parent, rc->old_authenticated, 1, manifest);
+ logmsg(rc, log_debug, "Done walking old authenticated store");
+
+ Manifest_free(manifest);
+ }
+
+ assert(sk_X509_num(certs) == n_cert);
+
+ rc->indent--;
+ }
+}
+
+
+
+/**
+ * Main program. Parse command line, read config file, iterate over
+ * trust anchors found via config file and do a tree walk for each
+ * trust anchor.
+ */
+int main(int argc, char *argv[])
+{
+ int opt_jitter = 0, use_syslog = 0, use_stderr = 0, syslog_facility = 0;
+ int opt_syslog = 0, opt_stderr = 0, opt_level = 0, prune = 1;
+ char *cfg_file = "rcynic.conf";
+ char *lockfile = NULL, *xmlfile = NULL;
+ int c, i, j, ret = 1, jitter = 600, lockfd = -1;
+ STACK_OF(CONF_VALUE) *cfg_section = NULL;
+ STACK_OF(X509) *certs = NULL;
+ CONF *cfg_handle = NULL;
+ time_t start = 0, finish;
+ unsigned long hash;
+ rcynic_ctx_t rc;
+ unsigned delay;
+ long eline = 0;
+ BIO *bio = NULL;
+
+ memset(&rc, 0, sizeof(rc));
+
+ if ((rc.jane = strrchr(argv[0], '/')) == NULL)
+ rc.jane = argv[0];
+ else
+ rc.jane++;
+
+ set_directory(&rc.authenticated, "rcynic-data/authenticated/");
+ set_directory(&rc.old_authenticated, "rcynic-data/authenticated.old/");
+ set_directory(&rc.unauthenticated, "rcynic-data/unauthenticated/");
+ rc.log_level = log_telemetry;
+ rc.allow_stale_crl = 1;
+ rc.allow_stale_manifest = 1;
+
+#define QQ(x,y) rc.priority[x] = y;
+ LOG_LEVELS;
+#undef QQ
+
+ OpenSSL_add_all_algorithms();
+ ERR_load_crypto_strings();
+
+ while ((c = getopt(argc, argv, "c:l:sej:V")) > 0) {
+ switch (c) {
+ case 'c':
+ cfg_file = optarg;
+ break;
+ case 'l':
+ opt_level = 1;
+ if (!configure_logmsg(&rc, optarg))
+ goto done;
+ break;
+ case 's':
+ use_syslog = opt_syslog = 1;
+ break;
+ case 'e':
+ use_stderr = opt_stderr = 1;
+ break;
+ case 'j':
+ if (!configure_integer(&rc, &jitter, optarg))
+ goto done;
+ opt_jitter = 1;
+ break;
+ case 'V':
+ puts(svn_id);
+ ret = 0;
+ goto done;
+ default:
+ logmsg(&rc, log_usage_err,
+ "usage: %s [-c configfile] [-s] [-e] [-l loglevel] [-j jitter] [-V]",
+ rc.jane);
+ goto done;
+ }
+ }
+
+ if ((cfg_handle = NCONF_new(NULL)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't create CONF opbject");
+ goto done;
+ }
+
+ if (NCONF_load(cfg_handle, cfg_file, &eline) <= 0) {
+ if (eline <= 0)
+ logmsg(&rc, log_usage_err, "Couldn't load config file %s", cfg_file);
+ else
+ logmsg(&rc, log_usage_err, "Error on line %ld of config file %s", eline, cfg_file);
+ goto done;
+ }
+
+ if (CONF_modules_load(cfg_handle, NULL, 0) <= 0) {
+ logmsg(&rc, log_sys_err, "Couldn't configure OpenSSL");
+ goto done;
+ }
+
+ if ((cfg_section = NCONF_get_section(cfg_handle, "rcynic")) == NULL) {
+ logmsg(&rc, log_usage_err, "Couldn't load rcynic section from config file");
+ goto done;
+ }
+
+ for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
+ CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
+
+ assert(val && val->name && val->value);
+
+ if (!name_cmp(val->name, "authenticated"))
+ set_directory(&rc.authenticated, val->value);
+
+ else if (!name_cmp(val->name, "old-authenticated"))
+ set_directory(&rc.old_authenticated, val->value);
+
+ else if (!name_cmp(val->name, "unauthenticated"))
+ set_directory(&rc.unauthenticated, val->value);
+
+ else if (!name_cmp(val->name, "rsync-timeout") &&
+ !configure_integer(&rc, &rc.rsync_timeout, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "rsync-program"))
+ rc.rsync_program = strdup(val->value);
+
+ else if (!name_cmp(val->name, "lockfile"))
+ lockfile = strdup(val->value);
+
+ else if (!opt_jitter &&
+ !name_cmp(val->name, "jitter") &&
+ !configure_integer(&rc, &jitter, val->value))
+ goto done;
+
+ else if (!opt_level &&
+ !name_cmp(val->name, "log-level") &&
+ !configure_logmsg(&rc, val->value))
+ goto done;
+
+ else if (!opt_syslog &&
+ !name_cmp(val->name, "use-syslog") &&
+ !configure_boolean(&rc, &use_syslog, val->value))
+ goto done;
+
+ else if (!opt_stderr &&
+ !name_cmp(val->name, "use-stderr") &&
+ !configure_boolean(&rc, &use_stderr, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "syslog-facility") &&
+ !configure_syslog(&rc, &syslog_facility,
+ facilitynames, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "xml-summary"))
+ xmlfile = strdup(val->value);
+
+ else if (!name_cmp(val->name, "allow-stale-crl") &&
+ !configure_boolean(&rc, &rc.allow_stale_crl, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-stale-manifest") &&
+ !configure_boolean(&rc, &rc.allow_stale_manifest, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-non-self-signed-trust-anchor") &&
+ !configure_boolean(&rc, &rc.allow_non_self_signed_trust_anchor, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "require-crl-in-manifest") &&
+ !configure_boolean(&rc, &rc.require_crl_in_manifest, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "allow-object-not-in-manifest") &&
+ !configure_boolean(&rc, &rc.allow_object_not_in_manifest, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "use-links") &&
+ !configure_boolean(&rc, &rc.use_links, val->value))
+ goto done;
+
+ else if (!name_cmp(val->name, "prune") &&
+ !configure_boolean(&rc, &prune, val->value))
+ goto done;
+
+ /*
+ * Ugly, but the easiest way to handle all these strings.
+ */
+
+#define QQ(x,y) \
+ else if (!name_cmp(val->name, "syslog-priority-" #x) && \
+ !configure_syslog(&rc, &rc.priority[x], \
+ prioritynames, val->value)) \
+ goto done;
+
+ LOG_LEVELS; /* the semicolon is for emacs */
+
+#undef QQ
+
+ }
+
+ if ((rc.rsync_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate rsync_cache stack");
+ goto done;
+ }
+
+ if ((rc.backup_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate backup_cache stack");
+ goto done;
+ }
+
+ if ((rc.stale_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate stale_cache stack");
+ goto done;
+ }
+
+ if (xmlfile != NULL) {
+ if ((rc.host_counters = sk_HOST_MIB_COUNTER_new(host_mib_counter_cmp)) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate host_counters stack");
+ goto done;
+ }
+ if ((rc.validation_status = sk_VALIDATION_STATUS_new_null()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate validation_status stack");
+ goto done;
+ }
+ }
+
+ if ((certs = sk_X509_new_null()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate certificate stack");
+ goto done;
+ }
+
+ if ((rc.x509_store = X509_STORE_new()) == NULL) {
+ logmsg(&rc, log_sys_err, "Couldn't allocate X509_STORE");
+ goto done;
+ }
+
+ rc.use_syslog = use_syslog;
+
+ if (use_syslog)
+ openlog(rc.jane,
+ LOG_PID | (use_stderr ? LOG_PERROR : 0),
+ (syslog_facility ? syslog_facility : LOG_LOCAL0));
+
+ if (jitter > 0) {
+ if (RAND_bytes((unsigned char *) &delay, sizeof(delay)) <= 0) {
+ logmsg(&rc, log_sys_err, "Couldn't read random bytes");
+ goto done;
+ }
+ delay %= jitter;
+ logmsg(&rc, log_telemetry, "Delaying %u seconds before startup", delay);
+ while (delay > 0)
+ delay = sleep(delay);
+ }
+
+ if (lockfile &&
+ ((lockfd = open(lockfile, O_RDWR|O_CREAT|O_NONBLOCK, 0666)) < 0 ||
+ lockf(lockfd, F_TLOCK, 0) < 0)) {
+ if (lockfd >= 0 && errno == EAGAIN)
+ logmsg(&rc, log_telemetry, "Lock %s held by another process", lockfile);
+ else
+ logmsg(&rc, log_sys_err, "Problem locking %s: %s", lockfile, strerror(errno));
+ lockfd = -1;
+ goto done;
+ }
+
+ start = time(0);
+ logmsg(&rc, log_telemetry, "Starting");
+
+ if (!rm_rf(rc.old_authenticated)) {
+ logmsg(&rc, log_sys_err, "Couldn't remove %s: %s",
+ rc.old_authenticated, strerror(errno));
+ goto done;
+ }
+
+ if (rename(rc.authenticated, rc.old_authenticated) < 0 &&
+ errno != ENOENT) {
+ logmsg(&rc, log_sys_err, "Couldn't rename %s to %s: %s",
+ rc.old_authenticated, rc.authenticated, strerror(errno));
+ goto done;
+ }
+
+ if (!access(rc.authenticated, F_OK) || !mkdir_maybe(&rc, rc.authenticated)) {
+ logmsg(&rc, log_sys_err, "Couldn't prepare directory %s: %s",
+ rc.authenticated, strerror(errno));
+ goto done;
+ }
+
+ for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
+ CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
+ char path1[FILENAME_MAX], path2[FILENAME_MAX], uri[URI_MAX];
+ certinfo_t ta_info;
+ X509 *x = NULL;
+
+ assert(val && val->name && val->value);
+
+ if (!name_cmp(val->name, "trust-anchor")) {
+ /*
+ * Old local file trust anchor method.
+ */
+ logmsg(&rc, log_telemetry, "Processing trust anchor from local file %s", val->value);
+ if (strlen(val->value) >= sizeof(path1)) {
+ logmsg(&rc, log_usage_err, "Trust anchor path name too long %s", val->value);
+ goto done;
+ }
+ strcpy(path1, val->value);
+ if ((x = read_cert(path1, NULL, 0)) == NULL) {
+ logmsg(&rc, log_usage_err, "Couldn't read trust anchor %s", path1);
+ goto done;
+ }
+ hash = X509_subject_name_hash(x);
+ for (j = 0; j < INT_MAX; j++) {
+ if (snprintf(path2, sizeof(path2), "%s%lx.%d.cer",
+ rc.authenticated, hash, j) == sizeof(path2)) {
+ logmsg(&rc, log_sys_err,
+ "Couldn't construct path name for trust anchor %s", path1);
+ goto done;
+ }
+ if (access(path2, F_OK))
+ break;
+ }
+ if (j == INT_MAX) {
+ logmsg(&rc, log_sys_err, "Couldn't find a free name for trust anchor %s", path1);
+ goto done;
+ }
+ }
+
+ if (!name_cmp(val->name, "trust-anchor-uri-with-key") ||
+ !name_cmp(val->name, "indirect-trust-anchor") ||
+ !name_cmp(val->name, "trust-anchor-locator")) {
+ /*
+ * Newfangled URI + public key method. Two different versions
+ * of essentially the same mechanism.
+ *
+ * NB: EVP_PKEY_cmp() returns 1 for success, not 0 like every
+ * other xyz_cmp() function in the entire OpenSSL library.
+ * Go figure.
+ */
+ int unified = (!name_cmp(val->name, "indirect-trust-anchor") ||
+ !name_cmp(val->name, "trust-anchor-locator"));
+ EVP_PKEY *pkey = NULL, *xpkey = NULL;
+ char *fn;
+ if (unified) {
+ fn = val->value;
+ bio = BIO_new_file(fn, "r");
+ if (!bio || BIO_gets(bio, uri, sizeof(uri)) <= 0) {
+ logmsg(&rc, log_usage_err, "Couldn't read trust anchor URI from %s", fn);
+ goto done;
+ }
+ uri[strcspn(uri, " \t\r\n")] = '\0';
+ bio = BIO_push(BIO_new(BIO_f_base64()), bio);
+ } else {
+ j = strcspn(val->value, " \t");
+ if (j >= sizeof(uri)) {
+ logmsg(&rc, log_usage_err, "Trust anchor URI too long %s", val->value);
+ goto done;
+ }
+ memcpy(uri, val->value, j);
+ uri[j] = '\0';
+ j += strspn(val->value + j, " \t");
+ fn = val->value + j;
+ bio = BIO_new_file(fn, "rb");
+ }
+ if (!uri_to_filename(&rc, uri, path1, sizeof(path1), rc.unauthenticated) ||
+ !uri_to_filename(&rc, uri, path2, sizeof(path2), rc.authenticated)) {
+ logmsg(&rc, log_usage_err, "Couldn't convert trust anchor URI %s to filename", uri);
+ goto done;
+ }
+ logmsg(&rc, log_telemetry, "Processing trust anchor from URI %s", uri);
+ if (!rsync_file(&rc, uri)) {
+ logmsg(&rc, log_data_err, "Could not fetch trust anchor from %s", uri);
+ continue;
+ }
+ if (bio)
+ pkey = d2i_PUBKEY_bio(bio, NULL);
+ BIO_free_all(bio);
+ bio = NULL;
+ if (!pkey) {
+ logmsg(&rc, log_usage_err, "Couldn't read trust anchor public key for %s from %s", uri, fn);
+ goto done;
+ }
+ if ((x = read_cert(path1, NULL, 0)) == NULL)
+ logmsg(&rc, log_data_err, "Couldn't read trust anchor %s", path1);
+ if (x && (xpkey = X509_get_pubkey(x)) == NULL)
+ logmsg(&rc, log_data_err, "Rejected %s because couldn't read public key from trust anchor locator", uri);
+ j = (xpkey && EVP_PKEY_cmp(pkey, xpkey) == 1);
+ EVP_PKEY_free(pkey);
+ EVP_PKEY_free(xpkey);
+ if (!j) {
+ logmsg(&rc, log_data_err, "Rejected %s because known public key didn't match trust anchor locator", uri);
+ X509_free(x);
+ continue;
+ }
+ }
+
+ if (!x)
+ continue;
+
+ logmsg(&rc, log_telemetry, "Copying trust anchor %s to %s", path1, path2);
+
+ if (!mkdir_maybe(&rc, path2) ||
+ !(rc.use_links ? ln(path1, path2) : cp(path1, path2))) {
+ logmsg(&rc, log_sys_err, "Couldn't %s trust anchor %s",
+ (rc.use_links ? "link" : "copy"), path1);
+ goto done;
+ }
+
+ parse_cert(&rc, x, &ta_info, "");
+ ta_info.ta = 1;
+ sk_X509_push(certs, x);
+
+ if (ta_info.crldp[0] && !check_x509(&rc, certs, x, &ta_info)) {
+ logmsg(&rc, log_data_err, "Couldn't get CRL for trust anchor %s", path1);
+ } else {
+ walk_cert(&rc, &ta_info, certs);
+ }
+
+ X509_free(sk_X509_pop(certs));
+ assert(sk_X509_num(certs) == 0);
+ }
+
+ if (prune && !prune_unauthenticated(&rc, rc.unauthenticated,
+ strlen(rc.unauthenticated))) {
+ logmsg(&rc, log_sys_err, "Trouble pruning old unauthenticated data");
+ goto done;
+ }
+
+ ret = 0;
+
+ done:
+ log_openssl_errors(&rc);
+
+ if (xmlfile != NULL) {
+
+ char tad[sizeof("2006-10-13T11:22:33Z") + 1];
+ char hostname[HOST_NAME_MAX];
+ time_t tad_time = time(0);
+ struct tm *tad_tm = gmtime(&tad_time);
+ int ok = 1, use_stdout = !strcmp(xmlfile, "-");
+ FILE *f = NULL;
+
+ strftime(tad, sizeof(tad), "%Y-%m-%dT%H:%M:%SZ", tad_tm);
+
+ ok &= gethostname(hostname, sizeof(hostname)) == 0;
+
+ if (use_stdout)
+ f = stdout;
+ else if (ok)
+ ok &= (f = fopen(xmlfile, "w")) != NULL;
+
+ if (ok)
+ logmsg(&rc, log_telemetry, "Writing XML summary to %s",
+ (use_stdout ? "standard output" : xmlfile));
+
+ if (ok)
+ ok &= fprintf(f, "<?xml version=\"1.0\" ?>\n"
+ "<rcynic-summary date=\"%s\" rcynic-version=\"%s\""
+ " summary-version=\"%d\" reporting-hostname=\"%s\">\n"
+ " <labels>\n"
+ " <hostname>Publication Repository</hostname>\n",
+ tad, svn_id, XML_SUMMARY_VERSION, hostname) != EOF;
+
+ for (j = 0; ok && j < MIB_COUNTER_T_MAX; ++j)
+ ok &= fprintf(f, " <%s kind=\"%s\">%s</%s>\n",
+ mib_counter_label[j], mib_counter_kind[j],
+ (mib_counter_desc[j]
+ ? mib_counter_desc[j]
+ : X509_verify_cert_error_string(mib_counter_openssl[j])),
+ mib_counter_label[j]) != EOF;
+
+ if (ok)
+ ok &= fprintf(f, " </labels>\n") != EOF;
+
+ for (i = 0; ok && i < sk_HOST_MIB_COUNTER_num(rc.host_counters); i++) {
+ HOST_MIB_COUNTER *h = sk_HOST_MIB_COUNTER_value(rc.host_counters, i);
+ assert(h);
+
+ if (ok)
+ ok &= fprintf(f, " <host>\n <hostname>%s</hostname>\n",
+ h->hostname) != EOF;
+
+ for (j = 0; ok && j < MIB_COUNTER_T_MAX; ++j)
+ ok &= fprintf(f, " <%s>%lu</%s>\n", mib_counter_label[j],
+ h->counters[j], mib_counter_label[j]) != EOF;
+
+ if (ok)
+ ok &= fprintf(f, " </host>\n") != EOF;
+ }
+
+
+ for (i = 0; ok && i < sk_VALIDATION_STATUS_num(rc.validation_status); i++) {
+ VALIDATION_STATUS *v = sk_VALIDATION_STATUS_value(rc.validation_status, i);
+ assert(v);
+
+ tad_tm = gmtime(&v->timestamp);
+ strftime(tad, sizeof(tad), "%Y-%m-%dT%H:%M:%SZ", tad_tm);
+
+ ok &= fprintf(f, " <validation_status timestamp=\"%s\" status=\"%s\">%s</validation_status>\n",
+ tad, mib_counter_label[v->code], v->uri) != EOF;
+ }
+
+ if (ok)
+ ok &= fprintf(f, "</rcynic-summary>\n") != EOF;
+
+ if (f && !use_stdout)
+ ok &= fclose(f) != EOF;
+
+ if (!ok)
+ logmsg(&rc, log_sys_err, "Couldn't write XML summary to %s: %s",
+ xmlfile, strerror(errno));
+
+ }
+
+ /*
+ * Do NOT free cfg_section, NCONF_free() takes care of that
+ */
+ sk_X509_pop_free(certs, X509_free);
+ sk_OPENSSL_STRING_pop_free(rc.rsync_cache, OPENSSL_STRING_free);
+ sk_OPENSSL_STRING_pop_free(rc.backup_cache, OPENSSL_STRING_free);
+ sk_OPENSSL_STRING_pop_free(rc.stale_cache, OPENSSL_STRING_free);
+ sk_HOST_MIB_COUNTER_pop_free(rc.host_counters, HOST_MIB_COUNTER_free);
+ sk_VALIDATION_STATUS_pop_free(rc.validation_status, VALIDATION_STATUS_free);
+ X509_STORE_free(rc.x509_store);
+ NCONF_free(cfg_handle);
+ CONF_modules_free();
+ BIO_free(bio);
+ EVP_cleanup();
+ ERR_free_strings();
+ free(rc.authenticated);
+ free(rc.old_authenticated);
+ free(rc.unauthenticated);
+ if (rc.rsync_program)
+ free(rc.rsync_program);
+ if (lockfile && lockfd >= 0)
+ unlink(lockfile);
+ if (lockfile)
+ free(lockfile);
+ if (xmlfile)
+ free(xmlfile);
+
+ if (start) {
+ finish = time(0);
+ logmsg(&rc, log_telemetry,
+ "Finished, elapsed time %d:%02d:%02d",
+ (finish - start) / 3600,
+ (finish - start) / 60 % 60,
+ (finish - start) % 60);
+ }
+
+ return ret;
+}
diff --git a/rcynic-ng/rcynic.xsl b/rcynic-ng/rcynic.xsl
new file mode 100644
index 00000000..25aa7b2c
--- /dev/null
+++ b/rcynic-ng/rcynic.xsl
@@ -0,0 +1,221 @@
+<?xml version="1.0"?>
+<!--
+ - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+ -
+ - Portions copyright (C) 2006 American Registry for Internet Numbers ("ARIN")
+ -
+ - Permission to use, copy, modify, and distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+ -->
+
+<!-- $Id$ -->
+
+<!--
+ - XSL stylesheet to render rcynic's xml-summary output as basic (X)HTML.
+ -
+ - This is a bit more complicated than strictly necessary, because I wanted
+ - the ability to drop out columns that are nothing but zeros.
+ - There's probably some clever way of using XPath to simplify this,
+ - but I don't expect the data sets to be large enough for performance
+ - to be an issue here. Feel free to show me how to do better.
+ -->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
+ version="1.0"
+ xmlns:exslt="http://exslt.org/common"
+ exclude-result-prefixes="exslt">
+
+ <xsl:output omit-xml-declaration="yes" indent="yes" method="xml" encoding="US-ASCII"
+ doctype-public="-//W3C//DTD XHTML 1.0 Strict//EN"
+ doctype-system="http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"/>
+
+ <xsl:param name="refresh" select="1800"/>
+ <xsl:param name="suppress-zero-columns" select="1"/>
+ <xsl:param name="show-total" select="1"/>
+ <xsl:param name="use-colors" select="1"/>
+ <xsl:param name="show-detailed-status" select="0"/>
+ <xsl:param name="show-problems" select="1"/>
+ <xsl:param name="show-summary" select="1"/>
+
+ <xsl:variable name="sums">
+ <xsl:for-each select="rcynic-summary/labels/*">
+ <x sum="{sum(/rcynic-summary/host/*[name() = name(current()) and . != 0])}"/>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <xsl:variable name="moods">
+ <xsl:for-each select="rcynic-summary/host">
+ <xsl:variable name="hostname" select="hostname"/>
+ <xsl:for-each select="*[not(self::hostname)]">
+ <xsl:variable name="label" select="name()"/>
+ <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $label]/@kind"/>
+ <x hostname="{$hostname}" mood="{$mood}">
+ <xsl:value-of select="."/>
+ </x>
+ </xsl:for-each>
+ </xsl:for-each>
+ </xsl:variable>
+
+ <xsl:template match="/">
+ <xsl:comment>Generators</xsl:comment>
+ <xsl:comment><xsl:value-of select="rcynic-summary/@rcynic-version"/></xsl:comment>
+ <xsl:comment>$Id$</xsl:comment>
+ <html>
+ <xsl:variable name="title">
+ <xsl:text>rcynic summary </xsl:text>
+ <xsl:value-of select="rcynic-summary/@date"/>
+ </xsl:variable>
+ <head>
+ <title>
+ <xsl:value-of select="$title"/>
+ </title>
+ <xsl:if test="$refresh != 0">
+ <meta http-equiv="Refresh" content="{$refresh}"/>
+ </xsl:if>
+ <style type="text/css">
+ td { text-align: center; padding: 4px }
+ td.uri { text-align: left }
+ <xsl:if test="$use-colors != 0">
+ tr.good { background-color: #77ff77 }
+ tr.warn { background-color: yellow }
+ tr.bad { background-color: #ff5500 }
+ </xsl:if>
+ </style>
+ </head>
+ <body>
+
+ <xsl:if test="$show-summary != 0">
+ <h1>
+ <xsl:value-of select="$title"/>
+ </h1>
+ <table class="summary" rules="all">
+ <thead>
+ <tr>
+ <xsl:for-each select="rcynic-summary/labels/*">
+ <xsl:variable name="p" select="position()"/>
+ <xsl:if test="$suppress-zero-columns = 0 or position() = 1 or exslt:node-set($sums)/x[$p]/@sum &gt; 0">
+ <td><b><xsl:apply-templates/></b></td>
+ </xsl:if>
+ </xsl:for-each>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="rcynic-summary/host">
+ <xsl:sort order="descending" data-type="number" select="sum(*[not(self::hostname)])"/>
+ <xsl:sort order="ascending" data-type="text" select="hostname"/>
+ <xsl:variable name="hostname" select="hostname"/>
+ <xsl:variable name="goodness" select="sum(exslt:node-set($moods)/x[@hostname = $hostname and @mood = 'good'])"/>
+ <xsl:variable name="badness" select="sum(exslt:node-set($moods)/x[@hostname = $hostname and @mood = 'bad'])"/>
+ <xsl:variable name="warnings" select="sum(exslt:node-set($moods)/x[@hostname = $hostname and @mood = 'warn'])"/>
+ <xsl:variable name="mood">
+ <xsl:choose>
+ <xsl:when test="$goodness != 0 and $warnings = 0 and $badness = 0">good</xsl:when>
+ <xsl:when test="$goodness + $warnings != 0">warn</xsl:when>
+ <xsl:otherwise>bad</xsl:otherwise>
+ </xsl:choose>
+ </xsl:variable>
+ <tr class="{$mood}">
+ <xsl:for-each select="*">
+ <xsl:variable name="p" select="position()"/>
+ <xsl:if test="$suppress-zero-columns = 0 or position() = 1 or exslt:node-set($sums)/x[$p]/@sum &gt; 0">
+ <td><xsl:if test=". != 0"><xsl:apply-templates/></xsl:if></td>
+ </xsl:if>
+ </xsl:for-each>
+ </tr>
+ </xsl:for-each>
+ <xsl:if test="$show-total != 0">
+ <tr>
+ <td><b>Total</b></td>
+ <xsl:for-each select="exslt:node-set($sums)/x[position() &gt; 1]">
+ <xsl:if test="$suppress-zero-columns = 0 or @sum &gt; 0">
+ <td><b><xsl:value-of select="@sum"/></b></td>
+ </xsl:if>
+ </xsl:for-each>
+ </tr>
+ </xsl:if>
+ </tbody>
+ </table>
+ </xsl:if>
+
+ <xsl:if test="$show-problems != 0">
+ <br/>
+ <h1>Problems</h1>
+ <table class="problems" rules="all" >
+ <thead>
+ <tr>
+ <td class="status"><b>Status</b></td>
+ <td class="uri"><b>URI</b></td>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="rcynic-summary/validation_status">
+ <xsl:variable name="status" select="@status"/>
+ <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
+ <xsl:if test="$mood != 'good'">
+ <tr>
+ <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
+ <td class="uri"><xsl:value-of select="."/></td>
+ </tr>
+ </xsl:if>
+ </xsl:for-each>
+ </tbody>
+ </table>
+ </xsl:if>
+
+ <xsl:if test="$show-detailed-status != 0">
+ <br/>
+ <h1>Validation Status</h1>
+ <table class="details" rules="all" >
+ <thead>
+ <tr>
+ <td class="timestamp"><b>Timestamp</b></td>
+ <td class="status"><b>Status</b></td>
+ <td class="uri"><b>URI</b></td>
+ </tr>
+ </thead>
+ <tbody>
+ <xsl:for-each select="rcynic-summary/validation_status">
+ <xsl:variable name="status" select="@status"/>
+ <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
+ <tr class="{$mood}">
+ <td class="timestamp"><xsl:value-of select="@timestamp"/></td>
+ <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
+ <td class="uri"><xsl:value-of select="."/></td>
+ </tr>
+ </xsl:for-each>
+ </tbody>
+ </table>
+ </xsl:if>
+
+ </body>
+ </html>
+ </xsl:template>
+
+</xsl:stylesheet>
+
+<!--
+ - Local variables:
+ - mode: sgml
+ - End:
+ -->
diff --git a/rcynic-ng/sample-trust-anchors/README b/rcynic-ng/sample-trust-anchors/README
new file mode 100644
index 00000000..b0c45dd3
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/README
@@ -0,0 +1,13 @@
+$Id$
+
+These are SAMPLE trust anchors for testing rcynic. Any resemblance
+between these files and real trust anchors living, dead, or wandering
+the night in ghostly torment is purely coincidental.
+
+Remember: it's only a trust anchor if a replying party says it is.
+You're the relying party, so it's your call.
+
+The .cer files (if any) are self-signed X.509 trust anchors.
+
+The .tal files are "trust anchor locators", formerly known as
+"indirect trust anchors" -- see draft-ietf-sidr-ta.
diff --git a/rcynic-ng/sample-trust-anchors/afrinic.tal b/rcynic-ng/sample-trust-anchors/afrinic.tal
new file mode 100644
index 00000000..0f202c73
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/afrinic.tal
@@ -0,0 +1,8 @@
+rsync://rpki.afrinic.net/repository/AfriNIC.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxsAqAhWIO+ON2Ef9oRDM
+pKxv+AfmSLIdLWJtjrvUyDxJPBjgR+kVrOHUeTaujygFUp49tuN5H2C1rUuQavTH
+vve6xNF5fU3OkTcqEzMOZy+ctkbde2SRMVdvbO22+TH9gNhKDc9l7Vu01qU4LeJH
+k3X0f5uu5346YrGAOSv6AaYBXVgXxa0s9ZvgqFpim50pReQe/WI3QwFKNgpPzfQL
+6Y7fDPYdYaVOXPXSKtx7P4s4KLA/ZWmRL/bobw/i2fFviAGhDrjqqqum+/9w1hEl
+L/vqihVnV18saKTnLvkItA/Bf5i11Yhw2K7qv573YWxyuqCknO/iYLTR1DToBZcZ
+UQIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/apnic.cer b/rcynic-ng/sample-trust-anchors/apnic.cer
new file mode 100644
index 00000000..bbb70cac
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/apnic.cer
Binary files differ
diff --git a/rcynic-ng/sample-trust-anchors/apnic.tal b/rcynic-ng/sample-trust-anchors/apnic.tal
new file mode 100644
index 00000000..9e061061
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/apnic.tal
@@ -0,0 +1,8 @@
+rsync://rpki.apnic.net/repository/APNIC.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr36frxrKWB/g+b2F4TSN
+TJYm5vcq0QjSb6Sas3O84doujITy5FmAbcFlTUc5m/hhDrWO6OBowAFmLt8KowCj
+Gf0aBFGeXULYiqcZFCiP1LPqvS2M5WN5PLrz88f21EUpqsm+Fq/5fwdH6OHZvSbC
+BE2grVw1tQM5LCSqCs4vmrdvGi1tVcFF2FlDFaCpKN7p0Eg7YmLedf4ANkbpNxqk
+mjbDVybK1qL6p6j85uBTbPoGHHr/AUw2UXdnl1Z+mPj1lZv01/AQe7X4KobJW+Tc
+KljtK6Zccs6+/LiC1zh59VHdbIn1K4KyXVTeNAOHzu3ZOOlcTMMfO0dCnqtKoab/
+eQIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/bbn-testbed.tal b/rcynic-ng/sample-trust-anchors/bbn-testbed.tal
new file mode 100644
index 00000000..21302ea5
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/bbn-testbed.tal
@@ -0,0 +1,8 @@
+rsync://rpki.bbn.com/rpki/root.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvEjkk0oC2N8Ij+ruAkrX
+cI7lSOIG1sQLC8xJJqklMlOBLao+3If0AVKSEP5+pb/hJwpXPomt6Lm4+fuZ6vX/
+q15llQL3YWkHZ49t/X47yCqxvM0txRaZafffbk6lJrofKhKKrYEcrr/rHiG+IQXn
+U1MGaQ/a8DfRDWiCQxonh7CthrLi7tr+QI9s9fDvvHPCK9OmmhZvCUDOydf+/vMn
+VShQ57KsUFcuZ0EX9UwsaIGCYGyvsYwk54UtoIGovg2IavfZK8ai0/5XEVpvKQiR
+8AixZpg5bSe7555+LhzpL5+LdqX6/hVRah0JrdR8KiXvr16Kmcitj+JLqVc0Wkd8
+zwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/lacnic.tal b/rcynic-ng/sample-trust-anchors/lacnic.tal
new file mode 100644
index 00000000..77a142d2
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/lacnic.tal
@@ -0,0 +1,8 @@
+rsync://repository.lacnic.net/rpki/lacnic/RTA_LACNIC_RPKI.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1AuR49ZoKS59Vnpq8M0X
+djeV3ROqtElwx6sNmUXvWBFPQlZLs2tR5/0MwprIWRi91WnMBVWjsECcLBe7Pu+u
+V/tTvPMJRXm/c+l8nR+FhAj7pn4M5A2pHFBndCPc1UrFD+BLACx9DSNiUjzKr1t7
+wjHTW+F0NMnZ9g9hKdxDNCFi66BGx2f3TTW3uGns/IPfkxrRCeYtJcBpQ5mKoc8g
+QOndiEG/33uXDS9EOe1dycmnaw9EQqxqHp+Bj0TIVoFyfDNuT+soJ3uwtQr2g5Ys
+AIxJtmBAZrLj+acmLeQrYC0xQuK118dSAS9r6GSm476m2aGEYtb083fLodeYSEjM
+/wIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal b/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal
new file mode 100644
index 00000000..ebdb9f45
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal
@@ -0,0 +1,9 @@
+rsync://rpki.ripe.net/ta/ripe-ncc-ta.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0URYSGqUz2m
+yBsOzeW1jQ6NsxNvlLMyhWknvnl8NiBCs/T/S2XuNKQNZ+wBZxIgPPV
+2pFBFeQAvoH/WK83HwA26V2siwm/MY2nKZ+Olw+wlpzlZ1p3Ipj2eNc
+Krmit8BwBC8xImzuCGaV0jkRB0GZ0hoH6Ml03umLprRsn6v0xOP0+l6
+Qc1ZHMFVFb385IQ7FQQTcVIxrdeMsoyJq9eMkE6DoclHhF/NlSllXub
+ASQ9KUWqJ0+Ot3QCXr4LXECMfkpkVR2TZT+v5v658bHVs6ZxRD1b6Uk
+1uQKAyHUbn/tXvP8lrjAibGzVsXDT2L0x4Edx+QdixPgOji3gBMyL2V
+wIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-apnic.tal b/rcynic-ng/sample-trust-anchors/testbed-apnic.tal
new file mode 100644
index 00000000..3e57b05e
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/testbed-apnic.tal
@@ -0,0 +1,8 @@
+rsync://apnic.rpki.net/rpki/apnic/root.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
+GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
+Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
+nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
+BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
+ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
+aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal b/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal
new file mode 100644
index 00000000..ae16a302
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal
@@ -0,0 +1,8 @@
+rsync://apnicrpki.rand.apnic.net/repository/root.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApQofhU9VeRGZxlNgkrFR
+ShSIkGtRUwg1mkISvieM2oxA6LFhHoqFV25BdK/3IhOh/0I/E9WX7uWPD72MFGBB
+LsFc6JpZFkvg8+9KUIHquk46wn4bEvA8xMsPbGo+mK0CAkqOdHQSemC7nqUlR9VH
+5zH1t8aYS4mrVN59LfClmiEE7QElgmVyvccfaEd+gMGaxsDvSylWIdvVxYfFG2rB
+LiEsmfXwdn2a8b1Zx5eaFD80XV9Z7h15ESP1epSScGzMp2XB0FYMC3f9k7l5sydX
+Rj5BYo1rWaM+Y6AoHe/d8G0xm6iX6b/JqT4Ywdt8/7DbsweaIk73VnpnXrYATCt9
+VwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-arin.tal b/rcynic-ng/sample-trust-anchors/testbed-arin.tal
new file mode 100644
index 00000000..5ec14f36
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/testbed-arin.tal
@@ -0,0 +1,8 @@
+rsync://arin.rpki.net/rpki/arin/root.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
+GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
+Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
+nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
+BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
+ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
+aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-ripe.tal b/rcynic-ng/sample-trust-anchors/testbed-ripe.tal
new file mode 100644
index 00000000..d52aba62
--- /dev/null
+++ b/rcynic-ng/sample-trust-anchors/testbed-ripe.tal
@@ -0,0 +1,8 @@
+rsync://ripe.rpki.net/rpki/ripe/root.cer
+MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
+GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
+Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
+nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
+BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
+ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
+aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/show.awk b/rcynic-ng/show.awk
new file mode 100644
index 00000000..2e13dce2
--- /dev/null
+++ b/rcynic-ng/show.awk
@@ -0,0 +1,75 @@
+# $Id$
+#
+# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Prettyprint tab-delimited rcynic summary data. This is intended as
+# a postprocessor for show.xsl.
+
+BEGIN {
+ FS = "\t";
+}
+
+NR == 1 {
+ nf = NF;
+ for (i = 1; i <= nf; i++) {
+ nh = split($i, h, /[ \t]+/);
+ for (j = 1; j <= nh; j++) {
+ head[i, j] = h[j];
+ if (length(h[j]) > width[i])
+ width[i] = length(h[j]);
+ }
+ }
+}
+
+NR > 1 {
+ nr = NR - 1;
+ for (j = 1; j <= NF; j++) {
+ data[nr, j] = $j;
+ sum[j] += $j;
+ if (length($j) > width[j])
+ width[j] = length($j);
+ }
+}
+
+END {
+ for (i = 1;; i++) {
+ blank = 1;
+ for (j = 2; j <= nf; j++)
+ if (head[j, i] && sum[j] > 0)
+ blank = 0;
+ if (blank)
+ break;
+ for (j = 1; j <= nf; j++)
+ if (j == 1)
+ printf "%*s", width[j], head[j, i];
+ else if (sum[j] > 0)
+ printf " %*s", width[j], head[j, i];
+ print "";
+ }
+ for (i = 1; i <= nr; i++) {
+ for (j = 1; j <= nf; j++)
+ if (j == 1)
+ printf "%*s", width[j], data[i, j];
+ else if (sum[j] > 0)
+ printf " %*s", width[j], data[i, j];
+ print "";
+ }
+ for (j = 1; j <= nf; j++)
+ if (j == 1)
+ printf "%*s", width[j], "Total";
+ else if (sum[j] > 0)
+ printf " %*s", width[j], sum[j];
+ print "";
+}
diff --git a/rcynic-ng/show.sh.in b/rcynic-ng/show.sh.in
new file mode 100644
index 00000000..c62a325b
--- /dev/null
+++ b/rcynic-ng/show.sh.in
@@ -0,0 +1,20 @@
+#!/bin/sh -
+# $Id$
+#
+# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+#
+# Permission to use, copy, modify, and/or distribute this software for any
+# purpose with or without fee is hereby granted, provided that the above
+# copyright notice and this permission notice appear in all copies.
+#
+# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+# PERFORMANCE OF THIS SOFTWARE.
+
+# Prettyprint rcynic summary data as flat text.
+
+@XSLTPROC@ @abs_builddir@/show.xsl ${1-rcynic.xml} | @AWK@ -f @abs_builddir@/show.awk
diff --git a/rcynic-ng/show.xsl b/rcynic-ng/show.xsl
new file mode 100644
index 00000000..c6362f78
--- /dev/null
+++ b/rcynic-ng/show.xsl
@@ -0,0 +1,55 @@
+<?xml version="1.0"?>
+<!--
+ - Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
+ -
+ - Permission to use, copy, modify, and/or distribute this software for any
+ - purpose with or without fee is hereby granted, provided that the above
+ - copyright notice and this permission notice appear in all copies.
+ -
+ - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
+ - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
+ - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
+ - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
+ - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
+ - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
+ - PERFORMANCE OF THIS SOFTWARE.
+ -->
+
+<!-- $Id$ -->
+
+<!--
+ - XSL stylesheet to render rcynic's xml-summary output as tab-delimited text.
+ -->
+
+<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
+
+ <xsl:output method="text" encoding="US-ASCII"/>
+
+ <xsl:template match="/">
+ <xsl:for-each select="rcynic-summary/labels/*">
+ <xsl:if test="position() != 1">
+ <xsl:text>&#9;</xsl:text>
+ </xsl:if>
+ <xsl:apply-templates/>
+ </xsl:for-each>
+ <xsl:text>&#10;</xsl:text>
+ <xsl:for-each select="rcynic-summary/host">
+ <xsl:sort order="descending" data-type="number" select="sum(*[not(self::hostname)])"/>
+ <xsl:sort order="ascending" data-type="text" select="hostname"/>
+ <xsl:for-each select="*">
+ <xsl:if test="position() != 1">
+ <xsl:text>&#9;</xsl:text>
+ </xsl:if>
+ <xsl:apply-templates/>
+ </xsl:for-each>
+ <xsl:text>&#10;</xsl:text>
+ </xsl:for-each>
+ </xsl:template>
+
+</xsl:stylesheet>
+
+<!--
+ - Local variables:
+ - mode: sgml
+ - End:
+ -->
diff --git a/rcynic-ng/static-rsync/Makefile.in b/rcynic-ng/static-rsync/Makefile.in
new file mode 100644
index 00000000..9c8556e0
--- /dev/null
+++ b/rcynic-ng/static-rsync/Makefile.in
@@ -0,0 +1,41 @@
+# $Id$
+
+VERSION = 2.6.9
+
+CFG_ENV = CFLAGS='@CFLAGS@' LDFLAGS='@LDFLAGS@ @LD_STATIC_FLAG@'
+CFG_ARG =
+
+TARBALL = rsync-${VERSION}.tar.gz
+DIRNAME = rsync-${VERSION}
+
+CFG_LOG = > ../config.log 2>&1
+BIN_LOG = > ../build.log 2>&1
+
+BIN = rsync
+
+all: ${BIN}
+
+${BIN}: ${DIRNAME}/${BIN}
+ ln ${DIRNAME}/${BIN} $@
+ file $@
+
+${DIRNAME}/${BIN}: configured.stamp
+ cd ${DIRNAME} && ${MAKE} ${BIN_LOG}
+
+extracted.stamp: ${TARBALL}
+ gzip -c -d ${TARBALL} | tar -xf -
+ touch $@
+
+patched.stamp: extracted.stamp
+ for i in patches/patch-*; do if test -f "$$i"; then patch -d ${DIRNAME} <"$$i"; else :; fi; done
+ touch $@
+
+configured.stamp: patched.stamp
+ cd ${DIRNAME} && ${CFG_ENV} ./configure ${CFG_ARG} ${CFG_LOG}
+ touch $@
+
+clean:
+ rm -rf ${BIN} ${DIRNAME} *.stamp *.log
+
+distclean: clean
+ rm -f Makefile
diff --git a/rcynic-ng/static-rsync/README b/rcynic-ng/static-rsync/README
new file mode 100644
index 00000000..9ff5afa8
--- /dev/null
+++ b/rcynic-ng/static-rsync/README
@@ -0,0 +1,15 @@
+$Id$
+
+Hack to build a static rsync binary suitable for use in a chroot jail.
+
+The default configuration is for gcc, since that's the most widely
+used compiler on the platforms we use. I've provided hooks intended
+to make it simple to support other compilers just by overriding make
+variables on the command line: if you need to do something more
+drastic than this to get your compiler working, please tell me.
+
+If your platform doesn't support static binaries at all, you're on
+your own (and should whine at your OS vendor, as this is nuts).
+
+We try to stick with rsync release code, but apply security patches
+when necessary.
diff --git a/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091 b/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091
new file mode 100644
index 00000000..201af96a
--- /dev/null
+++ b/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091
@@ -0,0 +1,60 @@
+--- sender.c 2006-09-20 03:53:32.000000000 +0200
++++ sender.c 2007-07-25 15:33:05.000000000 +0200
+@@ -123,6 +123,7 @@
+ char fname[MAXPATHLEN];
+ struct file_struct *file;
+ unsigned int offset;
++ size_t l = 0;
+
+ if (ndx < 0 || ndx >= the_file_list->count)
+ return;
+@@ -133,6 +134,20 @@
+ file->dir.root, "/", NULL);
+ } else
+ offset = 0;
++
++ l = offset + 1;
++ if (file) {
++ if (file->dirname)
++ l += strlen(file->dirname);
++ if (file->basename)
++ l += strlen(file->basename);
++ }
++
++ if (l >= sizeof(fname)) {
++ rprintf(FERROR, "Overlong pathname\n");
++ exit_cleanup(RERR_FILESELECT);
++ }
++
+ f_name(file, fname + offset);
+ if (remove_source_files) {
+ if (do_unlink(fname) == 0) {
+@@ -224,6 +239,7 @@
+ enum logcode log_code = log_before_transfer ? FLOG : FINFO;
+ int f_xfer = write_batch < 0 ? batch_fd : f_out;
+ int i, j;
++ size_t l = 0;
+
+ if (verbose > 2)
+ rprintf(FINFO, "send_files starting\n");
+@@ -259,6 +275,20 @@
+ fname[offset++] = '/';
+ } else
+ offset = 0;
++
++ l = offset + 1;
++ if (file) {
++ if (file->dirname)
++ l += strlen(file->dirname);
++ if (file->basename)
++ l += strlen(file->basename);
++ }
++
++ if (l >= sizeof(fname)) {
++ rprintf(FERROR, "Overlong pathname\n");
++ exit_cleanup(RERR_FILESELECT);
++ }
++
+ fname2 = f_name(file, fname + offset);
+
+ if (verbose > 2)
diff --git a/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz b/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz
new file mode 100644
index 00000000..6377f639
--- /dev/null
+++ b/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz
Binary files differ