aboutsummaryrefslogtreecommitdiff
path: root/rcynic-ng
diff options
context:
space:
mode:
Diffstat (limited to 'rcynic-ng')
-rw-r--r--rcynic-ng/Doxyfile1679
-rw-r--r--rcynic-ng/Makefile.in77
-rw-r--r--rcynic-ng/README582
-rw-r--r--rcynic-ng/bio_f_linebreak.c268
-rw-r--r--rcynic-ng/bio_f_linebreak.h10
-rw-r--r--rcynic-ng/defasn1.h140
-rw-r--r--rcynic-ng/defstack.awk71
-rwxr-xr-xrcynic-ng/installation-scripts/darwin/RCynic/RCynic75
-rw-r--r--rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist19
-rw-r--r--rcynic-ng/installation-scripts/darwin/install.sh193
-rw-r--r--rcynic-ng/installation-scripts/deinstall.sh5
-rw-r--r--rcynic-ng/installation-scripts/freebsd/install.sh151
-rwxr-xr-xrcynic-ng/installation-scripts/freebsd/rc.d.rcynic71
-rw-r--r--rcynic-ng/installation-scripts/install.sh14
-rw-r--r--rcynic-ng/installation-scripts/linux/install.sh.in236
-rw-r--r--rcynic-ng/installation-scripts/sample-rcynic.conf27
-rwxr-xr-xrcynic-ng/make-tal.sh42
-rw-r--r--rcynic-ng/rcynic.c4644
-rw-r--r--rcynic-ng/rcynic.xsl312
-rw-r--r--rcynic-ng/sample-trust-anchors/README13
-rw-r--r--rcynic-ng/sample-trust-anchors/afrinic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/apnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/bbn-testbed.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/lacnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal9
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-apnic.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-arin.tal8
-rw-r--r--rcynic-ng/sample-trust-anchors/testbed-ripe.tal8
-rw-r--r--rcynic-ng/show.awk93
-rw-r--r--rcynic-ng/show.sh.in20
-rw-r--r--rcynic-ng/show.xsl59
-rw-r--r--rcynic-ng/static-rsync/Makefile.in41
-rw-r--r--rcynic-ng/static-rsync/README15
-rw-r--r--rcynic-ng/static-rsync/patches/patch-CVE-2007-409160
-rw-r--r--rcynic-ng/static-rsync/rsync-2.6.9.tar.gzbin811841 -> 0 bytes
-rw-r--r--rcynic-ng/validation_status.xsl47
37 files changed, 0 insertions, 9037 deletions
diff --git a/rcynic-ng/Doxyfile b/rcynic-ng/Doxyfile
deleted file mode 100644
index bac51c4f..00000000
--- a/rcynic-ng/Doxyfile
+++ /dev/null
@@ -1,1679 +0,0 @@
-# Doxyfile 1.7.3
-
-# This file describes the settings to be used by the documentation system
-# doxygen (www.doxygen.org) for a project.
-#
-# All text after a hash (#) is considered a comment and will be ignored.
-# The format is:
-# TAG = value [value, ...]
-# For lists items can also be appended using:
-# TAG += value [value, ...]
-# Values that contain spaces should be placed between quotes (" ").
-
-#---------------------------------------------------------------------------
-# Project related configuration options
-#---------------------------------------------------------------------------
-
-# This tag specifies the encoding used for all characters in the config file
-# that follow. The default is UTF-8 which is also the encoding used for all
-# text before the first occurrence of this tag. Doxygen uses libiconv (or the
-# iconv built into libc) for the transcoding. See
-# http://www.gnu.org/software/libiconv for the list of possible encodings.
-
-DOXYFILE_ENCODING = UTF-8
-
-# The PROJECT_NAME tag is a single word (or a sequence of words surrounded
-# by quotes) that should identify the project.
-
-PROJECT_NAME = rcynic
-
-# The PROJECT_NUMBER tag can be used to enter a project or revision number.
-# This could be handy for archiving the generated documentation or
-# if some version control system is used.
-
-PROJECT_NUMBER = 1.0
-
-# Using the PROJECT_BRIEF tag one can provide an optional one line description for a project that appears at the top of each page and should give viewer a quick idea about the purpose of the project. Keep the description short.
-
-PROJECT_BRIEF =
-
-# With the PROJECT_LOGO tag one can specify an logo or icon that is
-# included in the documentation. The maximum height of the logo should not
-# exceed 55 pixels and the maximum width should not exceed 200 pixels.
-# Doxygen will copy the logo to the output directory.
-
-PROJECT_LOGO =
-
-# The OUTPUT_DIRECTORY tag is used to specify the (relative or absolute)
-# base path where the generated documentation will be put.
-# If a relative path is entered, it will be relative to the location
-# where doxygen was started. If left blank the current directory will be used.
-
-OUTPUT_DIRECTORY = doc
-
-# If the CREATE_SUBDIRS tag is set to YES, then doxygen will create
-# 4096 sub-directories (in 2 levels) under the output directory of each output
-# format and will distribute the generated files over these directories.
-# Enabling this option can be useful when feeding doxygen a huge amount of
-# source files, where putting all generated files in the same directory would
-# otherwise cause performance problems for the file system.
-
-CREATE_SUBDIRS = NO
-
-# The OUTPUT_LANGUAGE tag is used to specify the language in which all
-# documentation generated by doxygen is written. Doxygen will use this
-# information to generate all constant output in the proper language.
-# The default language is English, other supported languages are:
-# Afrikaans, Arabic, Brazilian, Catalan, Chinese, Chinese-Traditional,
-# Croatian, Czech, Danish, Dutch, Esperanto, Farsi, Finnish, French, German,
-# Greek, Hungarian, Italian, Japanese, Japanese-en (Japanese with English
-# messages), Korean, Korean-en, Lithuanian, Norwegian, Macedonian, Persian,
-# Polish, Portuguese, Romanian, Russian, Serbian, Serbian-Cyrillic, Slovak,
-# Slovene, Spanish, Swedish, Ukrainian, and Vietnamese.
-
-OUTPUT_LANGUAGE = English
-
-# If the BRIEF_MEMBER_DESC tag is set to YES (the default) Doxygen will
-# include brief member descriptions after the members that are listed in
-# the file and class documentation (similar to JavaDoc).
-# Set to NO to disable this.
-
-BRIEF_MEMBER_DESC = YES
-
-# If the REPEAT_BRIEF tag is set to YES (the default) Doxygen will prepend
-# the brief description of a member or function before the detailed description.
-# Note: if both HIDE_UNDOC_MEMBERS and BRIEF_MEMBER_DESC are set to NO, the
-# brief descriptions will be completely suppressed.
-
-REPEAT_BRIEF = YES
-
-# This tag implements a quasi-intelligent brief description abbreviator
-# that is used to form the text in various listings. Each string
-# in this list, if found as the leading text of the brief description, will be
-# stripped from the text and the result after processing the whole list, is
-# used as the annotated text. Otherwise, the brief description is used as-is.
-# If left blank, the following values are used ("$name" is automatically
-# replaced with the name of the entity): "The $name class" "The $name widget"
-# "The $name file" "is" "provides" "specifies" "contains"
-# "represents" "a" "an" "the"
-
-ABBREVIATE_BRIEF =
-
-# If the ALWAYS_DETAILED_SEC and REPEAT_BRIEF tags are both set to YES then
-# Doxygen will generate a detailed section even if there is only a brief
-# description.
-
-ALWAYS_DETAILED_SEC = NO
-
-# If the INLINE_INHERITED_MEMB tag is set to YES, doxygen will show all
-# inherited members of a class in the documentation of that class as if those
-# members were ordinary class members. Constructors, destructors and assignment
-# operators of the base classes will not be shown.
-
-INLINE_INHERITED_MEMB = NO
-
-# If the FULL_PATH_NAMES tag is set to YES then Doxygen will prepend the full
-# path before files name in the file list and in the header files. If set
-# to NO the shortest path that makes the file name unique will be used.
-
-FULL_PATH_NAMES = YES
-
-# If the FULL_PATH_NAMES tag is set to YES then the STRIP_FROM_PATH tag
-# can be used to strip a user-defined part of the path. Stripping is
-# only done if one of the specified strings matches the left-hand part of
-# the path. The tag can be used to show relative paths in the file list.
-# If left blank the directory from which doxygen is run is used as the
-# path to strip.
-
-STRIP_FROM_PATH =
-
-# The STRIP_FROM_INC_PATH tag can be used to strip a user-defined part of
-# the path mentioned in the documentation of a class, which tells
-# the reader which header file to include in order to use a class.
-# If left blank only the name of the header file containing the class
-# definition is used. Otherwise one should specify the include paths that
-# are normally passed to the compiler using the -I flag.
-
-STRIP_FROM_INC_PATH =
-
-# If the SHORT_NAMES tag is set to YES, doxygen will generate much shorter
-# (but less readable) file names. This can be useful if your file system
-# doesn't support long names like on DOS, Mac, or CD-ROM.
-
-SHORT_NAMES = NO
-
-# If the JAVADOC_AUTOBRIEF tag is set to YES then Doxygen
-# will interpret the first line (until the first dot) of a JavaDoc-style
-# comment as the brief description. If set to NO, the JavaDoc
-# comments will behave just like regular Qt-style comments
-# (thus requiring an explicit @brief command for a brief description.)
-
-JAVADOC_AUTOBRIEF = NO
-
-# If the QT_AUTOBRIEF tag is set to YES then Doxygen will
-# interpret the first line (until the first dot) of a Qt-style
-# comment as the brief description. If set to NO, the comments
-# will behave just like regular Qt-style comments (thus requiring
-# an explicit \brief command for a brief description.)
-
-QT_AUTOBRIEF = NO
-
-# The MULTILINE_CPP_IS_BRIEF tag can be set to YES to make Doxygen
-# treat a multi-line C++ special comment block (i.e. a block of //! or ///
-# comments) as a brief description. This used to be the default behaviour.
-# The new default is to treat a multi-line C++ comment block as a detailed
-# description. Set this tag to YES if you prefer the old behaviour instead.
-
-MULTILINE_CPP_IS_BRIEF = NO
-
-# If the INHERIT_DOCS tag is set to YES (the default) then an undocumented
-# member inherits the documentation from any documented member that it
-# re-implements.
-
-INHERIT_DOCS = YES
-
-# If the SEPARATE_MEMBER_PAGES tag is set to YES, then doxygen will produce
-# a new page for each member. If set to NO, the documentation of a member will
-# be part of the file/class/namespace that contains it.
-
-SEPARATE_MEMBER_PAGES = YES
-
-# The TAB_SIZE tag can be used to set the number of spaces in a tab.
-# Doxygen uses this value to replace tabs by spaces in code fragments.
-
-TAB_SIZE = 8
-
-# This tag can be used to specify a number of aliases that acts
-# as commands in the documentation. An alias has the form "name=value".
-# For example adding "sideeffect=\par Side Effects:\n" will allow you to
-# put the command \sideeffect (or @sideeffect) in the documentation, which
-# will result in a user-defined paragraph with heading "Side Effects:".
-# You can put \n's in the value part of an alias to insert newlines.
-
-ALIASES =
-
-# Set the OPTIMIZE_OUTPUT_FOR_C tag to YES if your project consists of C
-# sources only. Doxygen will then generate output that is more tailored for C.
-# For instance, some of the names that are used will be different. The list
-# of all members will be omitted, etc.
-
-OPTIMIZE_OUTPUT_FOR_C = YES
-
-# Set the OPTIMIZE_OUTPUT_JAVA tag to YES if your project consists of Java
-# sources only. Doxygen will then generate output that is more tailored for
-# Java. For instance, namespaces will be presented as packages, qualified
-# scopes will look different, etc.
-
-OPTIMIZE_OUTPUT_JAVA = NO
-
-# Set the OPTIMIZE_FOR_FORTRAN tag to YES if your project consists of Fortran
-# sources only. Doxygen will then generate output that is more tailored for
-# Fortran.
-
-OPTIMIZE_FOR_FORTRAN = NO
-
-# Set the OPTIMIZE_OUTPUT_VHDL tag to YES if your project consists of VHDL
-# sources. Doxygen will then generate output that is tailored for
-# VHDL.
-
-OPTIMIZE_OUTPUT_VHDL = NO
-
-# Doxygen selects the parser to use depending on the extension of the files it
-# parses. With this tag you can assign which parser to use for a given extension.
-# Doxygen has a built-in mapping, but you can override or extend it using this
-# tag. The format is ext=language, where ext is a file extension, and language
-# is one of the parsers supported by doxygen: IDL, Java, Javascript, CSharp, C,
-# C++, D, PHP, Objective-C, Python, Fortran, VHDL, C, C++. For instance to make
-# doxygen treat .inc files as Fortran files (default is PHP), and .f files as C
-# (default is Fortran), use: inc=Fortran f=C. Note that for custom extensions
-# you also need to set FILE_PATTERNS otherwise the files are not read by doxygen.
-
-EXTENSION_MAPPING =
-
-# If you use STL classes (i.e. std::string, std::vector, etc.) but do not want
-# to include (a tag file for) the STL sources as input, then you should
-# set this tag to YES in order to let doxygen match functions declarations and
-# definitions whose arguments contain STL classes (e.g. func(std::string); v.s.
-# func(std::string) {}). This also makes the inheritance and collaboration
-# diagrams that involve STL classes more complete and accurate.
-
-BUILTIN_STL_SUPPORT = NO
-
-# If you use Microsoft's C++/CLI language, you should set this option to YES to
-# enable parsing support.
-
-CPP_CLI_SUPPORT = NO
-
-# Set the SIP_SUPPORT tag to YES if your project consists of sip sources only.
-# Doxygen will parse them like normal C++ but will assume all classes use public
-# instead of private inheritance when no explicit protection keyword is present.
-
-SIP_SUPPORT = NO
-
-# For Microsoft's IDL there are propget and propput attributes to indicate getter
-# and setter methods for a property. Setting this option to YES (the default)
-# will make doxygen replace the get and set methods by a property in the
-# documentation. This will only work if the methods are indeed getting or
-# setting a simple type. If this is not the case, or you want to show the
-# methods anyway, you should set this option to NO.
-
-IDL_PROPERTY_SUPPORT = YES
-
-# If member grouping is used in the documentation and the DISTRIBUTE_GROUP_DOC
-# tag is set to YES, then doxygen will reuse the documentation of the first
-# member in the group (if any) for the other members of the group. By default
-# all members of a group must be documented explicitly.
-
-DISTRIBUTE_GROUP_DOC = NO
-
-# Set the SUBGROUPING tag to YES (the default) to allow class member groups of
-# the same type (for instance a group of public functions) to be put as a
-# subgroup of that type (e.g. under the Public Functions section). Set it to
-# NO to prevent subgrouping. Alternatively, this can be done per class using
-# the \nosubgrouping command.
-
-SUBGROUPING = YES
-
-# When TYPEDEF_HIDES_STRUCT is enabled, a typedef of a struct, union, or enum
-# is documented as struct, union, or enum with the name of the typedef. So
-# typedef struct TypeS {} TypeT, will appear in the documentation as a struct
-# with name TypeT. When disabled the typedef will appear as a member of a file,
-# namespace, or class. And the struct will be named TypeS. This can typically
-# be useful for C code in case the coding convention dictates that all compound
-# types are typedef'ed and only the typedef is referenced, never the tag name.
-
-TYPEDEF_HIDES_STRUCT = NO
-
-# The SYMBOL_CACHE_SIZE determines the size of the internal cache use to
-# determine which symbols to keep in memory and which to flush to disk.
-# When the cache is full, less often used symbols will be written to disk.
-# For small to medium size projects (<1000 input files) the default value is
-# probably good enough. For larger projects a too small cache size can cause
-# doxygen to be busy swapping symbols to and from disk most of the time
-# causing a significant performance penalty.
-# If the system has enough physical memory increasing the cache will improve the
-# performance by keeping more symbols in memory. Note that the value works on
-# a logarithmic scale so increasing the size by one will roughly double the
-# memory usage. The cache size is given by this formula:
-# 2^(16+SYMBOL_CACHE_SIZE). The valid range is 0..9, the default is 0,
-# corresponding to a cache size of 2^16 = 65536 symbols
-
-SYMBOL_CACHE_SIZE = 0
-
-#---------------------------------------------------------------------------
-# Build related configuration options
-#---------------------------------------------------------------------------
-
-# If the EXTRACT_ALL tag is set to YES doxygen will assume all entities in
-# documentation are documented, even if no documentation was available.
-# Private class members and static file members will be hidden unless
-# the EXTRACT_PRIVATE and EXTRACT_STATIC tags are set to YES
-
-EXTRACT_ALL = YES
-
-# If the EXTRACT_PRIVATE tag is set to YES all private members of a class
-# will be included in the documentation.
-
-EXTRACT_PRIVATE = YES
-
-# If the EXTRACT_STATIC tag is set to YES all static members of a file
-# will be included in the documentation.
-
-EXTRACT_STATIC = YES
-
-# If the EXTRACT_LOCAL_CLASSES tag is set to YES classes (and structs)
-# defined locally in source files will be included in the documentation.
-# If set to NO only classes defined in header files are included.
-
-EXTRACT_LOCAL_CLASSES = YES
-
-# This flag is only useful for Objective-C code. When set to YES local
-# methods, which are defined in the implementation section but not in
-# the interface are included in the documentation.
-# If set to NO (the default) only methods in the interface are included.
-
-EXTRACT_LOCAL_METHODS = NO
-
-# If this flag is set to YES, the members of anonymous namespaces will be
-# extracted and appear in the documentation as a namespace called
-# 'anonymous_namespace{file}', where file will be replaced with the base
-# name of the file that contains the anonymous namespace. By default
-# anonymous namespaces are hidden.
-
-EXTRACT_ANON_NSPACES = YES
-
-# If the HIDE_UNDOC_MEMBERS tag is set to YES, Doxygen will hide all
-# undocumented members of documented classes, files or namespaces.
-# If set to NO (the default) these members will be included in the
-# various overviews, but no documentation section is generated.
-# This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_MEMBERS = NO
-
-# If the HIDE_UNDOC_CLASSES tag is set to YES, Doxygen will hide all
-# undocumented classes that are normally visible in the class hierarchy.
-# If set to NO (the default) these classes will be included in the various
-# overviews. This option has no effect if EXTRACT_ALL is enabled.
-
-HIDE_UNDOC_CLASSES = NO
-
-# If the HIDE_FRIEND_COMPOUNDS tag is set to YES, Doxygen will hide all
-# friend (class|struct|union) declarations.
-# If set to NO (the default) these declarations will be included in the
-# documentation.
-
-HIDE_FRIEND_COMPOUNDS = NO
-
-# If the HIDE_IN_BODY_DOCS tag is set to YES, Doxygen will hide any
-# documentation blocks found inside the body of a function.
-# If set to NO (the default) these blocks will be appended to the
-# function's detailed documentation block.
-
-HIDE_IN_BODY_DOCS = NO
-
-# The INTERNAL_DOCS tag determines if documentation
-# that is typed after a \internal command is included. If the tag is set
-# to NO (the default) then the documentation will be excluded.
-# Set it to YES to include the internal documentation.
-
-INTERNAL_DOCS = NO
-
-# If the CASE_SENSE_NAMES tag is set to NO then Doxygen will only generate
-# file names in lower-case letters. If set to YES upper-case letters are also
-# allowed. This is useful if you have classes or files whose names only differ
-# in case and if your file system supports case sensitive file names. Windows
-# and Mac users are advised to set this option to NO.
-
-CASE_SENSE_NAMES = YES
-
-# If the HIDE_SCOPE_NAMES tag is set to NO (the default) then Doxygen
-# will show members with their full class and namespace scopes in the
-# documentation. If set to YES the scope will be hidden.
-
-HIDE_SCOPE_NAMES = NO
-
-# If the SHOW_INCLUDE_FILES tag is set to YES (the default) then Doxygen
-# will put a list of the files that are included by a file in the documentation
-# of that file.
-
-SHOW_INCLUDE_FILES = NO
-
-# If the FORCE_LOCAL_INCLUDES tag is set to YES then Doxygen
-# will list include files with double quotes in the documentation
-# rather than with sharp brackets.
-
-FORCE_LOCAL_INCLUDES = NO
-
-# If the INLINE_INFO tag is set to YES (the default) then a tag [inline]
-# is inserted in the documentation for inline members.
-
-INLINE_INFO = YES
-
-# If the SORT_MEMBER_DOCS tag is set to YES (the default) then doxygen
-# will sort the (detailed) documentation of file and class members
-# alphabetically by member name. If set to NO the members will appear in
-# declaration order.
-
-SORT_MEMBER_DOCS = YES
-
-# If the SORT_BRIEF_DOCS tag is set to YES then doxygen will sort the
-# brief documentation of file, namespace and class members alphabetically
-# by member name. If set to NO (the default) the members will appear in
-# declaration order.
-
-SORT_BRIEF_DOCS = NO
-
-# If the SORT_MEMBERS_CTORS_1ST tag is set to YES then doxygen
-# will sort the (brief and detailed) documentation of class members so that
-# constructors and destructors are listed first. If set to NO (the default)
-# the constructors will appear in the respective orders defined by
-# SORT_MEMBER_DOCS and SORT_BRIEF_DOCS.
-# This tag will be ignored for brief docs if SORT_BRIEF_DOCS is set to NO
-# and ignored for detailed docs if SORT_MEMBER_DOCS is set to NO.
-
-SORT_MEMBERS_CTORS_1ST = NO
-
-# If the SORT_GROUP_NAMES tag is set to YES then doxygen will sort the
-# hierarchy of group names into alphabetical order. If set to NO (the default)
-# the group names will appear in their defined order.
-
-SORT_GROUP_NAMES = NO
-
-# If the SORT_BY_SCOPE_NAME tag is set to YES, the class list will be
-# sorted by fully-qualified names, including namespaces. If set to
-# NO (the default), the class list will be sorted only by class name,
-# not including the namespace part.
-# Note: This option is not very useful if HIDE_SCOPE_NAMES is set to YES.
-# Note: This option applies only to the class list, not to the
-# alphabetical list.
-
-SORT_BY_SCOPE_NAME = NO
-
-# If the STRICT_PROTO_MATCHING option is enabled and doxygen fails to do proper type resolution of all parameters of a function it will reject a
-# match between the prototype and the implementation of a member function even if there is only one candidate or it is obvious which candidate to choose by doing a simple string match. By disabling STRICT_PROTO_MATCHING doxygen
-# will still accept a match between prototype and implementation in such cases.
-
-STRICT_PROTO_MATCHING = NO
-
-# The GENERATE_TODOLIST tag can be used to enable (YES) or
-# disable (NO) the todo list. This list is created by putting \todo
-# commands in the documentation.
-
-GENERATE_TODOLIST = YES
-
-# The GENERATE_TESTLIST tag can be used to enable (YES) or
-# disable (NO) the test list. This list is created by putting \test
-# commands in the documentation.
-
-GENERATE_TESTLIST = YES
-
-# The GENERATE_BUGLIST tag can be used to enable (YES) or
-# disable (NO) the bug list. This list is created by putting \bug
-# commands in the documentation.
-
-GENERATE_BUGLIST = YES
-
-# The GENERATE_DEPRECATEDLIST tag can be used to enable (YES) or
-# disable (NO) the deprecated list. This list is created by putting
-# \deprecated commands in the documentation.
-
-GENERATE_DEPRECATEDLIST= YES
-
-# The ENABLED_SECTIONS tag can be used to enable conditional
-# documentation sections, marked by \if sectionname ... \endif.
-
-ENABLED_SECTIONS =
-
-# The MAX_INITIALIZER_LINES tag determines the maximum number of lines
-# the initial value of a variable or macro consists of for it to appear in
-# the documentation. If the initializer consists of more lines than specified
-# here it will be hidden. Use a value of 0 to hide initializers completely.
-# The appearance of the initializer of individual variables and macros in the
-# documentation can be controlled using \showinitializer or \hideinitializer
-# command in the documentation regardless of this setting.
-
-MAX_INITIALIZER_LINES = 30
-
-# Set the SHOW_USED_FILES tag to NO to disable the list of files generated
-# at the bottom of the documentation of classes and structs. If set to YES the
-# list will mention the files that were used to generate the documentation.
-
-SHOW_USED_FILES = YES
-
-# If the sources in your project are distributed over multiple directories
-# then setting the SHOW_DIRECTORIES tag to YES will show the directory hierarchy
-# in the documentation. The default is NO.
-
-SHOW_DIRECTORIES = NO
-
-# Set the SHOW_FILES tag to NO to disable the generation of the Files page.
-# This will remove the Files entry from the Quick Index and from the
-# Folder Tree View (if specified). The default is YES.
-
-SHOW_FILES = YES
-
-# Set the SHOW_NAMESPACES tag to NO to disable the generation of the
-# Namespaces page.
-# This will remove the Namespaces entry from the Quick Index
-# and from the Folder Tree View (if specified). The default is YES.
-
-SHOW_NAMESPACES = YES
-
-# The FILE_VERSION_FILTER tag can be used to specify a program or script that
-# doxygen should invoke to get the current version for each file (typically from
-# the version control system). Doxygen will invoke the program by executing (via
-# popen()) the command <command> <input-file>, where <command> is the value of
-# the FILE_VERSION_FILTER tag, and <input-file> is the name of an input file
-# provided by doxygen. Whatever the program writes to standard output
-# is used as the file version. See the manual for examples.
-
-FILE_VERSION_FILTER =
-
-# The LAYOUT_FILE tag can be used to specify a layout file which will be parsed
-# by doxygen. The layout file controls the global structure of the generated
-# output files in an output format independent way. The create the layout file
-# that represents doxygen's defaults, run doxygen with the -l option.
-# You can optionally specify a file name after the option, if omitted
-# DoxygenLayout.xml will be used as the name of the layout file.
-
-LAYOUT_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to warning and progress messages
-#---------------------------------------------------------------------------
-
-# The QUIET tag can be used to turn on/off the messages that are generated
-# by doxygen. Possible values are YES and NO. If left blank NO is used.
-
-QUIET = YES
-
-# The WARNINGS tag can be used to turn on/off the warning messages that are
-# generated by doxygen. Possible values are YES and NO. If left blank
-# NO is used.
-
-WARNINGS = YES
-
-# If WARN_IF_UNDOCUMENTED is set to YES, then doxygen will generate warnings
-# for undocumented members. If EXTRACT_ALL is set to YES then this flag will
-# automatically be disabled.
-
-WARN_IF_UNDOCUMENTED = YES
-
-# If WARN_IF_DOC_ERROR is set to YES, doxygen will generate warnings for
-# potential errors in the documentation, such as not documenting some
-# parameters in a documented function, or documenting parameters that
-# don't exist or using markup commands wrongly.
-
-WARN_IF_DOC_ERROR = YES
-
-# The WARN_NO_PARAMDOC option can be enabled to get warnings for
-# functions that are documented, but have no documentation for their parameters
-# or return value. If set to NO (the default) doxygen will only warn about
-# wrong or incomplete parameter documentation, but not about the absence of
-# documentation.
-
-WARN_NO_PARAMDOC = NO
-
-# The WARN_FORMAT tag determines the format of the warning messages that
-# doxygen can produce. The string should contain the $file, $line, and $text
-# tags, which will be replaced by the file and line number from which the
-# warning originated and the warning text. Optionally the format may contain
-# $version, which will be replaced by the version of the file (if it could
-# be obtained via FILE_VERSION_FILTER)
-
-WARN_FORMAT = "$file:$line: $text"
-
-# The WARN_LOGFILE tag can be used to specify a file to which warning
-# and error messages should be written. If left blank the output is written
-# to stderr.
-
-WARN_LOGFILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the input files
-#---------------------------------------------------------------------------
-
-# The INPUT tag can be used to specify the files and/or directories that contain
-# documented source files. You may enter file names like "myfile.cpp" or
-# directories like "/usr/src/myproject". Separate the files or directories
-# with spaces.
-
-INPUT = rcynic.c
-
-# This tag can be used to specify the character encoding of the source files
-# that doxygen parses. Internally doxygen uses the UTF-8 encoding, which is
-# also the default input encoding. Doxygen uses libiconv (or the iconv built
-# into libc) for the transcoding. See http://www.gnu.org/software/libiconv for
-# the list of possible encodings.
-
-INPUT_ENCODING = UTF-8
-
-# If the value of the INPUT tag contains directories, you can use the
-# FILE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank the following patterns are tested:
-# *.c *.cc *.cxx *.cpp *.c++ *.d *.java *.ii *.ixx *.ipp *.i++ *.inl *.h *.hh
-# *.hxx *.hpp *.h++ *.idl *.odl *.cs *.php *.php3 *.inc *.m *.mm *.dox *.py
-# *.f90 *.f *.for *.vhd *.vhdl
-
-FILE_PATTERNS =
-
-# The RECURSIVE tag can be used to turn specify whether or not subdirectories
-# should be searched for input files as well. Possible values are YES and NO.
-# If left blank NO is used.
-
-RECURSIVE = NO
-
-# The EXCLUDE tag can be used to specify files and/or directories that should
-# excluded from the INPUT source files. This way you can easily exclude a
-# subdirectory from a directory tree whose root is specified with the INPUT tag.
-
-EXCLUDE =
-
-# The EXCLUDE_SYMLINKS tag can be used select whether or not files or
-# directories that are symbolic links (a Unix file system feature) are excluded
-# from the input.
-
-EXCLUDE_SYMLINKS = NO
-
-# If the value of the INPUT tag contains directories, you can use the
-# EXCLUDE_PATTERNS tag to specify one or more wildcard patterns to exclude
-# certain files from those directories. Note that the wildcards are matched
-# against the file with absolute path, so to exclude all test directories
-# for example use the pattern */test/*
-
-EXCLUDE_PATTERNS =
-
-# The EXCLUDE_SYMBOLS tag can be used to specify one or more symbol names
-# (namespaces, classes, functions, etc.) that should be excluded from the
-# output. The symbol name can be a fully qualified name, a word, or if the
-# wildcard * is used, a substring. Examples: ANamespace, AClass,
-# AClass::ANamespace, ANamespace::*Test
-
-EXCLUDE_SYMBOLS =
-
-# The EXAMPLE_PATH tag can be used to specify one or more files or
-# directories that contain example code fragments that are included (see
-# the \include command).
-
-EXAMPLE_PATH =
-
-# If the value of the EXAMPLE_PATH tag contains directories, you can use the
-# EXAMPLE_PATTERNS tag to specify one or more wildcard pattern (like *.cpp
-# and *.h) to filter out the source-files in the directories. If left
-# blank all files are included.
-
-EXAMPLE_PATTERNS =
-
-# If the EXAMPLE_RECURSIVE tag is set to YES then subdirectories will be
-# searched for input files to be used with the \include or \dontinclude
-# commands irrespective of the value of the RECURSIVE tag.
-# Possible values are YES and NO. If left blank NO is used.
-
-EXAMPLE_RECURSIVE = NO
-
-# The IMAGE_PATH tag can be used to specify one or more files or
-# directories that contain image that are included in the documentation (see
-# the \image command).
-
-IMAGE_PATH =
-
-# The INPUT_FILTER tag can be used to specify a program that doxygen should
-# invoke to filter for each input file. Doxygen will invoke the filter program
-# by executing (via popen()) the command <filter> <input-file>, where <filter>
-# is the value of the INPUT_FILTER tag, and <input-file> is the name of an
-# input file. Doxygen will then use the output that the filter program writes
-# to standard output.
-# If FILTER_PATTERNS is specified, this tag will be
-# ignored.
-
-INPUT_FILTER =
-
-# The FILTER_PATTERNS tag can be used to specify filters on a per file pattern
-# basis.
-# Doxygen will compare the file name with each pattern and apply the
-# filter if there is a match.
-# The filters are a list of the form:
-# pattern=filter (like *.cpp=my_cpp_filter). See INPUT_FILTER for further
-# info on how filters are used. If FILTER_PATTERNS is empty or if
-# non of the patterns match the file name, INPUT_FILTER is applied.
-
-FILTER_PATTERNS =
-
-# If the FILTER_SOURCE_FILES tag is set to YES, the input filter (if set using
-# INPUT_FILTER) will be used to filter the input files when producing source
-# files to browse (i.e. when SOURCE_BROWSER is set to YES).
-
-FILTER_SOURCE_FILES = NO
-
-# The FILTER_SOURCE_PATTERNS tag can be used to specify source filters per file
-# pattern. A pattern will override the setting for FILTER_PATTERN (if any)
-# and it is also possible to disable source filtering for a specific pattern
-# using *.ext= (so without naming a filter). This option only has effect when
-# FILTER_SOURCE_FILES is enabled.
-
-FILTER_SOURCE_PATTERNS =
-
-#---------------------------------------------------------------------------
-# configuration options related to source browsing
-#---------------------------------------------------------------------------
-
-# If the SOURCE_BROWSER tag is set to YES then a list of source files will
-# be generated. Documented entities will be cross-referenced with these sources.
-# Note: To get rid of all source code in the generated output, make sure also
-# VERBATIM_HEADERS is set to NO.
-
-SOURCE_BROWSER = YES
-
-# Setting the INLINE_SOURCES tag to YES will include the body
-# of functions and classes directly in the documentation.
-
-INLINE_SOURCES = NO
-
-# Setting the STRIP_CODE_COMMENTS tag to YES (the default) will instruct
-# doxygen to hide any special comment blocks from generated source code
-# fragments. Normal C and C++ comments will always remain visible.
-
-STRIP_CODE_COMMENTS = NO
-
-# If the REFERENCED_BY_RELATION tag is set to YES
-# then for each documented function all documented
-# functions referencing it will be listed.
-
-REFERENCED_BY_RELATION = YES
-
-# If the REFERENCES_RELATION tag is set to YES
-# then for each documented function all documented entities
-# called/used by that function will be listed.
-
-REFERENCES_RELATION = YES
-
-# If the REFERENCES_LINK_SOURCE tag is set to YES (the default)
-# and SOURCE_BROWSER tag is set to YES, then the hyperlinks from
-# functions in REFERENCES_RELATION and REFERENCED_BY_RELATION lists will
-# link to the source code.
-# Otherwise they will link to the documentation.
-
-REFERENCES_LINK_SOURCE = YES
-
-# If the USE_HTAGS tag is set to YES then the references to source code
-# will point to the HTML generated by the htags(1) tool instead of doxygen
-# built-in source browser. The htags tool is part of GNU's global source
-# tagging system (see http://www.gnu.org/software/global/global.html). You
-# will need version 4.8.6 or higher.
-
-USE_HTAGS = NO
-
-# If the VERBATIM_HEADERS tag is set to YES (the default) then Doxygen
-# will generate a verbatim copy of the header file for each class for
-# which an include is specified. Set to NO to disable this.
-
-VERBATIM_HEADERS = YES
-
-#---------------------------------------------------------------------------
-# configuration options related to the alphabetical class index
-#---------------------------------------------------------------------------
-
-# If the ALPHABETICAL_INDEX tag is set to YES, an alphabetical index
-# of all compounds will be generated. Enable this if the project
-# contains a lot of classes, structs, unions or interfaces.
-
-ALPHABETICAL_INDEX = YES
-
-# If the alphabetical index is enabled (see ALPHABETICAL_INDEX) then
-# the COLS_IN_ALPHA_INDEX tag can be used to specify the number of columns
-# in which this list will be split (can be a number in the range [1..20])
-
-COLS_IN_ALPHA_INDEX = 5
-
-# In case all classes in a project start with a common prefix, all
-# classes will be put under the same header in the alphabetical index.
-# The IGNORE_PREFIX tag can be used to specify one or more prefixes that
-# should be ignored while generating the index headers.
-
-IGNORE_PREFIX =
-
-#---------------------------------------------------------------------------
-# configuration options related to the HTML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_HTML tag is set to YES (the default) Doxygen will
-# generate HTML output.
-
-GENERATE_HTML = YES
-
-# The HTML_OUTPUT tag is used to specify where the HTML docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `html' will be used as the default path.
-
-HTML_OUTPUT = html
-
-# The HTML_FILE_EXTENSION tag can be used to specify the file extension for
-# each generated HTML page (for example: .htm,.php,.asp). If it is left blank
-# doxygen will generate files with .html extension.
-
-HTML_FILE_EXTENSION = .html
-
-# The HTML_HEADER tag can be used to specify a personal HTML header for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard header.
-
-HTML_HEADER =
-
-# The HTML_FOOTER tag can be used to specify a personal HTML footer for
-# each generated HTML page. If it is left blank doxygen will generate a
-# standard footer.
-
-HTML_FOOTER =
-
-# The HTML_STYLESHEET tag can be used to specify a user-defined cascading
-# style sheet that is used by each HTML page. It can be used to
-# fine-tune the look of the HTML output. If the tag is left blank doxygen
-# will generate a default style sheet. Note that doxygen will try to copy
-# the style sheet file to the HTML output directory, so don't put your own
-# stylesheet in the HTML output directory as well, or it will be erased!
-
-HTML_STYLESHEET =
-
-# The HTML_COLORSTYLE_HUE tag controls the color of the HTML output.
-# Doxygen will adjust the colors in the stylesheet and background images
-# according to this color. Hue is specified as an angle on a colorwheel,
-# see http://en.wikipedia.org/wiki/Hue for more information.
-# For instance the value 0 represents red, 60 is yellow, 120 is green,
-# 180 is cyan, 240 is blue, 300 purple, and 360 is red again.
-# The allowed range is 0 to 359.
-
-HTML_COLORSTYLE_HUE = 220
-
-# The HTML_COLORSTYLE_SAT tag controls the purity (or saturation) of
-# the colors in the HTML output. For a value of 0 the output will use
-# grayscales only. A value of 255 will produce the most vivid colors.
-
-HTML_COLORSTYLE_SAT = 100
-
-# The HTML_COLORSTYLE_GAMMA tag controls the gamma correction applied to
-# the luminance component of the colors in the HTML output. Values below
-# 100 gradually make the output lighter, whereas values above 100 make
-# the output darker. The value divided by 100 is the actual gamma applied,
-# so 80 represents a gamma of 0.8, The value 220 represents a gamma of 2.2,
-# and 100 does not change the gamma.
-
-HTML_COLORSTYLE_GAMMA = 80
-
-# If the HTML_TIMESTAMP tag is set to YES then the footer of each generated HTML
-# page will contain the date and time when the page was generated. Setting
-# this to NO can help when comparing the output of multiple runs.
-
-HTML_TIMESTAMP = YES
-
-# If the HTML_ALIGN_MEMBERS tag is set to YES, the members of classes,
-# files or namespaces will be aligned in HTML using tables. If set to
-# NO a bullet list will be used.
-
-HTML_ALIGN_MEMBERS = YES
-
-# If the HTML_DYNAMIC_SECTIONS tag is set to YES then the generated HTML
-# documentation will contain sections that can be hidden and shown after the
-# page has loaded. For this to work a browser that supports
-# JavaScript and DHTML is required (for instance Mozilla 1.0+, Firefox
-# Netscape 6.0+, Internet explorer 5.0+, Konqueror, or Safari).
-
-HTML_DYNAMIC_SECTIONS = NO
-
-# If the GENERATE_DOCSET tag is set to YES, additional index files
-# will be generated that can be used as input for Apple's Xcode 3
-# integrated development environment, introduced with OSX 10.5 (Leopard).
-# To create a documentation set, doxygen will generate a Makefile in the
-# HTML output directory. Running make will produce the docset in that
-# directory and running "make install" will install the docset in
-# ~/Library/Developer/Shared/Documentation/DocSets so that Xcode will find
-# it at startup.
-# See http://developer.apple.com/tools/creatingdocsetswithdoxygen.html
-# for more information.
-
-GENERATE_DOCSET = NO
-
-# When GENERATE_DOCSET tag is set to YES, this tag determines the name of the
-# feed. A documentation feed provides an umbrella under which multiple
-# documentation sets from a single provider (such as a company or product suite)
-# can be grouped.
-
-DOCSET_FEEDNAME = "Doxygen generated docs"
-
-# When GENERATE_DOCSET tag is set to YES, this tag specifies a string that
-# should uniquely identify the documentation set bundle. This should be a
-# reverse domain-name style string, e.g. com.mycompany.MyDocSet. Doxygen
-# will append .docset to the name.
-
-DOCSET_BUNDLE_ID = org.doxygen.Project
-
-# When GENERATE_PUBLISHER_ID tag specifies a string that should uniquely identify
-# the documentation publisher. This should be a reverse domain-name style
-# string, e.g. com.mycompany.MyDocSet.documentation.
-
-DOCSET_PUBLISHER_ID = org.doxygen.Publisher
-
-# The GENERATE_PUBLISHER_NAME tag identifies the documentation publisher.
-
-DOCSET_PUBLISHER_NAME = Publisher
-
-# If the GENERATE_HTMLHELP tag is set to YES, additional index files
-# will be generated that can be used as input for tools like the
-# Microsoft HTML help workshop to generate a compiled HTML help file (.chm)
-# of the generated HTML documentation.
-
-GENERATE_HTMLHELP = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_FILE tag can
-# be used to specify the file name of the resulting .chm file. You
-# can add a path in front of the file if the result should not be
-# written to the html output directory.
-
-CHM_FILE =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the HHC_LOCATION tag can
-# be used to specify the location (absolute path including file name) of
-# the HTML help compiler (hhc.exe). If non-empty doxygen will try to run
-# the HTML help compiler on the generated index.hhp.
-
-HHC_LOCATION =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the GENERATE_CHI flag
-# controls if a separate .chi index file is generated (YES) or that
-# it should be included in the master .chm file (NO).
-
-GENERATE_CHI = NO
-
-# If the GENERATE_HTMLHELP tag is set to YES, the CHM_INDEX_ENCODING
-# is used to encode HtmlHelp index (hhk), content (hhc) and project file
-# content.
-
-CHM_INDEX_ENCODING =
-
-# If the GENERATE_HTMLHELP tag is set to YES, the BINARY_TOC flag
-# controls whether a binary table of contents is generated (YES) or a
-# normal table of contents (NO) in the .chm file.
-
-BINARY_TOC = NO
-
-# The TOC_EXPAND flag can be set to YES to add extra items for group members
-# to the contents of the HTML help documentation and to the tree view.
-
-TOC_EXPAND = NO
-
-# If the GENERATE_QHP tag is set to YES and both QHP_NAMESPACE and
-# QHP_VIRTUAL_FOLDER are set, an additional index file will be generated
-# that can be used as input for Qt's qhelpgenerator to generate a
-# Qt Compressed Help (.qch) of the generated HTML documentation.
-
-GENERATE_QHP = NO
-
-# If the QHG_LOCATION tag is specified, the QCH_FILE tag can
-# be used to specify the file name of the resulting .qch file.
-# The path specified is relative to the HTML output folder.
-
-QCH_FILE =
-
-# The QHP_NAMESPACE tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#namespace
-
-QHP_NAMESPACE =
-
-# The QHP_VIRTUAL_FOLDER tag specifies the namespace to use when generating
-# Qt Help Project output. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#virtual-folders
-
-QHP_VIRTUAL_FOLDER = doc
-
-# If QHP_CUST_FILTER_NAME is set, it specifies the name of a custom filter to
-# add. For more information please see
-# http://doc.trolltech.com/qthelpproject.html#custom-filters
-
-QHP_CUST_FILTER_NAME =
-
-# The QHP_CUST_FILT_ATTRS tag specifies the list of the attributes of the
-# custom filter to add. For more information please see
-# <a href="http://doc.trolltech.com/qthelpproject.html#custom-filters">
-# Qt Help Project / Custom Filters</a>.
-
-QHP_CUST_FILTER_ATTRS =
-
-# The QHP_SECT_FILTER_ATTRS tag specifies the list of the attributes this
-# project's
-# filter section matches.
-# <a href="http://doc.trolltech.com/qthelpproject.html#filter-attributes">
-# Qt Help Project / Filter Attributes</a>.
-
-QHP_SECT_FILTER_ATTRS =
-
-# If the GENERATE_QHP tag is set to YES, the QHG_LOCATION tag can
-# be used to specify the location of Qt's qhelpgenerator.
-# If non-empty doxygen will try to run qhelpgenerator on the generated
-# .qhp file.
-
-QHG_LOCATION =
-
-# If the GENERATE_ECLIPSEHELP tag is set to YES, additional index files
-# will be generated, which together with the HTML files, form an Eclipse help
-# plugin. To install this plugin and make it available under the help contents
-# menu in Eclipse, the contents of the directory containing the HTML and XML
-# files needs to be copied into the plugins directory of eclipse. The name of
-# the directory within the plugins directory should be the same as
-# the ECLIPSE_DOC_ID value. After copying Eclipse needs to be restarted before
-# the help appears.
-
-GENERATE_ECLIPSEHELP = NO
-
-# A unique identifier for the eclipse help plugin. When installing the plugin
-# the directory name containing the HTML and XML files should also have
-# this name.
-
-ECLIPSE_DOC_ID = org.doxygen.Project
-
-# The DISABLE_INDEX tag can be used to turn on/off the condensed index at
-# top of each HTML page. The value NO (the default) enables the index and
-# the value YES disables it.
-
-DISABLE_INDEX = NO
-
-# This tag can be used to set the number of enum values (range [0,1..20])
-# that doxygen will group on one line in the generated HTML documentation.
-# Note that a value of 0 will completely suppress the enum values from appearing in the overview section.
-
-ENUM_VALUES_PER_LINE = 4
-
-# The GENERATE_TREEVIEW tag is used to specify whether a tree-like index
-# structure should be generated to display hierarchical information.
-# If the tag value is set to YES, a side panel will be generated
-# containing a tree-like index structure (just like the one that
-# is generated for HTML Help). For this to work a browser that supports
-# JavaScript, DHTML, CSS and frames is required (i.e. any modern browser).
-# Windows users are probably better off using the HTML help feature.
-
-GENERATE_TREEVIEW = NO
-
-# By enabling USE_INLINE_TREES, doxygen will generate the Groups, Directories,
-# and Class Hierarchy pages using a tree view instead of an ordered list.
-
-USE_INLINE_TREES = NO
-
-# If the treeview is enabled (see GENERATE_TREEVIEW) then this tag can be
-# used to set the initial width (in pixels) of the frame in which the tree
-# is shown.
-
-TREEVIEW_WIDTH = 250
-
-# When the EXT_LINKS_IN_WINDOW option is set to YES doxygen will open
-# links to external symbols imported via tag files in a separate window.
-
-EXT_LINKS_IN_WINDOW = NO
-
-# Use this tag to change the font size of Latex formulas included
-# as images in the HTML documentation. The default is 10. Note that
-# when you change the font size after a successful doxygen run you need
-# to manually remove any form_*.png images from the HTML output directory
-# to force them to be regenerated.
-
-FORMULA_FONTSIZE = 10
-
-# Use the FORMULA_TRANPARENT tag to determine whether or not the images
-# generated for formulas are transparent PNGs. Transparent PNGs are
-# not supported properly for IE 6.0, but are supported on all modern browsers.
-# Note that when changing this option you need to delete any form_*.png files
-# in the HTML output before the changes have effect.
-
-FORMULA_TRANSPARENT = YES
-
-# Enable the USE_MATHJAX option to render LaTeX formulas using MathJax
-# (see http://www.mathjax.org) which uses client side Javascript for the
-# rendering instead of using prerendered bitmaps. Use this if you do not
-# have LaTeX installed or if you want to formulas look prettier in the HTML
-# output. When enabled you also need to install MathJax separately and
-# configure the path to it using the MATHJAX_RELPATH option.
-
-USE_MATHJAX = NO
-
-# When MathJax is enabled you need to specify the location relative to the
-# HTML output directory using the MATHJAX_RELPATH option. The destination
-# directory should contain the MathJax.js script. For instance, if the mathjax
-# directory is located at the same level as the HTML output directory, then
-# MATHJAX_RELPATH should be ../mathjax. The default value points to the mathjax.org site, so you can quickly see the result without installing
-# MathJax, but it is strongly recommended to install a local copy of MathJax
-# before deployment.
-
-MATHJAX_RELPATH = http://www.mathjax.org/mathjax
-
-# When the SEARCHENGINE tag is enabled doxygen will generate a search box
-# for the HTML output. The underlying search engine uses javascript
-# and DHTML and should work on any modern browser. Note that when using
-# HTML help (GENERATE_HTMLHELP), Qt help (GENERATE_QHP), or docsets
-# (GENERATE_DOCSET) there is already a search function so this one should
-# typically be disabled. For large projects the javascript based search engine
-# can be slow, then enabling SERVER_BASED_SEARCH may provide a better solution.
-
-SEARCHENGINE = YES
-
-# When the SERVER_BASED_SEARCH tag is enabled the search engine will be
-# implemented using a PHP enabled web server instead of at the web client
-# using Javascript. Doxygen will generate the search PHP script and index
-# file to put on the web server. The advantage of the server
-# based approach is that it scales better to large projects and allows
-# full text search. The disadvantages are that it is more difficult to setup
-# and does not have live searching capabilities.
-
-SERVER_BASED_SEARCH = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the LaTeX output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_LATEX tag is set to YES (the default) Doxygen will
-# generate Latex output.
-
-GENERATE_LATEX = YES
-
-# The LATEX_OUTPUT tag is used to specify where the LaTeX docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `latex' will be used as the default path.
-
-LATEX_OUTPUT = latex
-
-# The LATEX_CMD_NAME tag can be used to specify the LaTeX command name to be
-# invoked. If left blank `latex' will be used as the default command name.
-# Note that when enabling USE_PDFLATEX this option is only used for
-# generating bitmaps for formulas in the HTML output, but not in the
-# Makefile that is written to the output directory.
-
-LATEX_CMD_NAME = latex
-
-# The MAKEINDEX_CMD_NAME tag can be used to specify the command name to
-# generate index for LaTeX. If left blank `makeindex' will be used as the
-# default command name.
-
-MAKEINDEX_CMD_NAME = makeindex
-
-# If the COMPACT_LATEX tag is set to YES Doxygen generates more compact
-# LaTeX documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_LATEX = YES
-
-# The PAPER_TYPE tag can be used to set the paper type that is used
-# by the printer. Possible values are: a4, letter, legal and
-# executive. If left blank a4wide will be used.
-
-PAPER_TYPE = letter
-
-# The EXTRA_PACKAGES tag can be to specify one or more names of LaTeX
-# packages that should be included in the LaTeX output.
-
-EXTRA_PACKAGES =
-
-# The LATEX_HEADER tag can be used to specify a personal LaTeX header for
-# the generated latex document. The header should contain everything until
-# the first chapter. If it is left blank doxygen will generate a
-# standard header. Notice: only use this tag if you know what you are doing!
-
-LATEX_HEADER =
-
-# If the PDF_HYPERLINKS tag is set to YES, the LaTeX that is generated
-# is prepared for conversion to pdf (using ps2pdf). The pdf file will
-# contain links (just like the HTML output) instead of page references
-# This makes the output suitable for online browsing using a pdf viewer.
-
-PDF_HYPERLINKS = YES
-
-# If the USE_PDFLATEX tag is set to YES, pdflatex will be used instead of
-# plain latex in the generated Makefile. Set this option to YES to get a
-# higher quality PDF documentation.
-
-USE_PDFLATEX = YES
-
-# If the LATEX_BATCHMODE tag is set to YES, doxygen will add the \\batchmode.
-# command to the generated LaTeX files. This will instruct LaTeX to keep
-# running if errors occur, instead of asking the user for help.
-# This option is also used when generating formulas in HTML.
-
-LATEX_BATCHMODE = YES
-
-# If LATEX_HIDE_INDICES is set to YES then doxygen will not
-# include the index chapters (such as File Index, Compound Index, etc.)
-# in the output.
-
-LATEX_HIDE_INDICES = YES
-
-# If LATEX_SOURCE_CODE is set to YES then doxygen will include
-# source code with syntax highlighting in the LaTeX output.
-# Note that which sources are shown also depends on other settings
-# such as SOURCE_BROWSER.
-
-LATEX_SOURCE_CODE = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the RTF output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_RTF tag is set to YES Doxygen will generate RTF output
-# The RTF output is optimized for Word 97 and may not look very pretty with
-# other RTF readers or editors.
-
-GENERATE_RTF = NO
-
-# The RTF_OUTPUT tag is used to specify where the RTF docs will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `rtf' will be used as the default path.
-
-RTF_OUTPUT = rtf
-
-# If the COMPACT_RTF tag is set to YES Doxygen generates more compact
-# RTF documents. This may be useful for small projects and may help to
-# save some trees in general.
-
-COMPACT_RTF = NO
-
-# If the RTF_HYPERLINKS tag is set to YES, the RTF that is generated
-# will contain hyperlink fields. The RTF file will
-# contain links (just like the HTML output) instead of page references.
-# This makes the output suitable for online browsing using WORD or other
-# programs which support those fields.
-# Note: wordpad (write) and others do not support links.
-
-RTF_HYPERLINKS = NO
-
-# Load stylesheet definitions from file. Syntax is similar to doxygen's
-# config file, i.e. a series of assignments. You only have to provide
-# replacements, missing definitions are set to their default value.
-
-RTF_STYLESHEET_FILE =
-
-# Set optional variables used in the generation of an rtf document.
-# Syntax is similar to doxygen's config file.
-
-RTF_EXTENSIONS_FILE =
-
-#---------------------------------------------------------------------------
-# configuration options related to the man page output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_MAN tag is set to YES (the default) Doxygen will
-# generate man pages
-
-GENERATE_MAN = NO
-
-# The MAN_OUTPUT tag is used to specify where the man pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `man' will be used as the default path.
-
-MAN_OUTPUT = man
-
-# The MAN_EXTENSION tag determines the extension that is added to
-# the generated man pages (default is the subroutine's section .3)
-
-MAN_EXTENSION = .3
-
-# If the MAN_LINKS tag is set to YES and Doxygen generates man output,
-# then it will generate one additional man file for each entity
-# documented in the real man page(s). These additional files
-# only source the real man page, but without them the man command
-# would be unable to find the correct page. The default is NO.
-
-MAN_LINKS = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the XML output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_XML tag is set to YES Doxygen will
-# generate an XML file that captures the structure of
-# the code including all documentation.
-
-GENERATE_XML = NO
-
-# The XML_OUTPUT tag is used to specify where the XML pages will be put.
-# If a relative path is entered the value of OUTPUT_DIRECTORY will be
-# put in front of it. If left blank `xml' will be used as the default path.
-
-XML_OUTPUT = xml
-
-# The XML_SCHEMA tag can be used to specify an XML schema,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_SCHEMA =
-
-# The XML_DTD tag can be used to specify an XML DTD,
-# which can be used by a validating XML parser to check the
-# syntax of the XML files.
-
-XML_DTD =
-
-# If the XML_PROGRAMLISTING tag is set to YES Doxygen will
-# dump the program listings (including syntax highlighting
-# and cross-referencing information) to the XML output. Note that
-# enabling this will significantly increase the size of the XML output.
-
-XML_PROGRAMLISTING = YES
-
-#---------------------------------------------------------------------------
-# configuration options for the AutoGen Definitions output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_AUTOGEN_DEF tag is set to YES Doxygen will
-# generate an AutoGen Definitions (see autogen.sf.net) file
-# that captures the structure of the code including all
-# documentation. Note that this feature is still experimental
-# and incomplete at the moment.
-
-GENERATE_AUTOGEN_DEF = NO
-
-#---------------------------------------------------------------------------
-# configuration options related to the Perl module output
-#---------------------------------------------------------------------------
-
-# If the GENERATE_PERLMOD tag is set to YES Doxygen will
-# generate a Perl module file that captures the structure of
-# the code including all documentation. Note that this
-# feature is still experimental and incomplete at the
-# moment.
-
-GENERATE_PERLMOD = NO
-
-# If the PERLMOD_LATEX tag is set to YES Doxygen will generate
-# the necessary Makefile rules, Perl scripts and LaTeX code to be able
-# to generate PDF and DVI output from the Perl module output.
-
-PERLMOD_LATEX = NO
-
-# If the PERLMOD_PRETTY tag is set to YES the Perl module output will be
-# nicely formatted so it can be parsed by a human reader.
-# This is useful
-# if you want to understand what is going on.
-# On the other hand, if this
-# tag is set to NO the size of the Perl module output will be much smaller
-# and Perl will parse it just the same.
-
-PERLMOD_PRETTY = YES
-
-# The names of the make variables in the generated doxyrules.make file
-# are prefixed with the string contained in PERLMOD_MAKEVAR_PREFIX.
-# This is useful so different doxyrules.make files included by the same
-# Makefile don't overwrite each other's variables.
-
-PERLMOD_MAKEVAR_PREFIX =
-
-#---------------------------------------------------------------------------
-# Configuration options related to the preprocessor
-#---------------------------------------------------------------------------
-
-# If the ENABLE_PREPROCESSING tag is set to YES (the default) Doxygen will
-# evaluate all C-preprocessor directives found in the sources and include
-# files.
-
-ENABLE_PREPROCESSING = YES
-
-# If the MACRO_EXPANSION tag is set to YES Doxygen will expand all macro
-# names in the source code. If set to NO (the default) only conditional
-# compilation will be performed. Macro expansion can be done in a controlled
-# way by setting EXPAND_ONLY_PREDEF to YES.
-
-MACRO_EXPANSION = YES
-
-# If the EXPAND_ONLY_PREDEF and MACRO_EXPANSION tags are both set to YES
-# then the macro expansion is limited to the macros specified with the
-# PREDEFINED and EXPAND_AS_DEFINED tags.
-
-EXPAND_ONLY_PREDEF = NO
-
-# If the SEARCH_INCLUDES tag is set to YES (the default) the includes files
-# in the INCLUDE_PATH (see below) will be search if a #include is found.
-
-SEARCH_INCLUDES = YES
-
-# The INCLUDE_PATH tag can be used to specify one or more directories that
-# contain include files that are not input files but should be processed by
-# the preprocessor.
-
-INCLUDE_PATH =
-
-# You can use the INCLUDE_FILE_PATTERNS tag to specify one or more wildcard
-# patterns (like *.h and *.hpp) to filter out the header-files in the
-# directories. If left blank, the patterns specified with FILE_PATTERNS will
-# be used.
-
-INCLUDE_FILE_PATTERNS =
-
-# The PREDEFINED tag can be used to specify one or more macro names that
-# are defined before the preprocessor is started (similar to the -D option of
-# gcc). The argument of the tag is a list of macros of the form: name
-# or name=definition (no spaces). If the definition and the = are
-# omitted =1 is assumed. To prevent a macro definition from being
-# undefined via #undef or recursively expanded use the := operator
-# instead of the = operator.
-
-PREDEFINED = DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION
-
-# If the MACRO_EXPANSION and EXPAND_ONLY_PREDEF tags are set to YES then
-# this tag can be used to specify a list of macro names that should be expanded.
-# The macro definition that is found in the sources will be used.
-# Use the PREDEFINED tag if you want to use a different macro definition that overrules the definition found in the source code.
-
-EXPAND_AS_DEFINED =
-
-# If the SKIP_FUNCTION_MACROS tag is set to YES (the default) then
-# doxygen's preprocessor will remove all references to function-like macros
-# that are alone on a line, have an all uppercase name, and do not end with a
-# semicolon, because these will confuse the parser if not removed.
-
-SKIP_FUNCTION_MACROS = YES
-
-#---------------------------------------------------------------------------
-# Configuration::additions related to external references
-#---------------------------------------------------------------------------
-
-# The TAGFILES option can be used to specify one or more tagfiles.
-# Optionally an initial location of the external documentation
-# can be added for each tagfile. The format of a tag file without
-# this location is as follows:
-#
-# TAGFILES = file1 file2 ...
-# Adding location for the tag files is done as follows:
-#
-# TAGFILES = file1=loc1 "file2 = loc2" ...
-# where "loc1" and "loc2" can be relative or absolute paths or
-# URLs. If a location is present for each tag, the installdox tool
-# does not have to be run to correct the links.
-# Note that each tag file must have a unique name
-# (where the name does NOT include the path)
-# If a tag file is not located in the directory in which doxygen
-# is run, you must also specify the path to the tagfile here.
-
-TAGFILES =
-
-# When a file name is specified after GENERATE_TAGFILE, doxygen will create
-# a tag file that is based on the input files it reads.
-
-GENERATE_TAGFILE =
-
-# If the ALLEXTERNALS tag is set to YES all external classes will be listed
-# in the class index. If set to NO only the inherited external classes
-# will be listed.
-
-ALLEXTERNALS = NO
-
-# If the EXTERNAL_GROUPS tag is set to YES all external groups will be listed
-# in the modules index. If set to NO, only the current project's groups will
-# be listed.
-
-EXTERNAL_GROUPS = YES
-
-# The PERL_PATH should be the absolute path and name of the perl script
-# interpreter (i.e. the result of `which perl').
-
-PERL_PATH = /usr/bin/perl
-
-#---------------------------------------------------------------------------
-# Configuration options related to the dot tool
-#---------------------------------------------------------------------------
-
-# If the CLASS_DIAGRAMS tag is set to YES (the default) Doxygen will
-# generate a inheritance diagram (in HTML, RTF and LaTeX) for classes with base
-# or super classes. Setting the tag to NO turns the diagrams off. Note that
-# this option also works with HAVE_DOT disabled, but it is recommended to
-# install and use dot, since it yields more powerful graphs.
-
-CLASS_DIAGRAMS = YES
-
-# You can define message sequence charts within doxygen comments using the \msc
-# command. Doxygen will then run the mscgen tool (see
-# http://www.mcternan.me.uk/mscgen/) to produce the chart and insert it in the
-# documentation. The MSCGEN_PATH tag allows you to specify the directory where
-# the mscgen tool resides. If left empty the tool is assumed to be found in the
-# default search path.
-
-MSCGEN_PATH =
-
-# If set to YES, the inheritance and collaboration graphs will hide
-# inheritance and usage relations if the target is undocumented
-# or is not a class.
-
-HIDE_UNDOC_RELATIONS = NO
-
-# If you set the HAVE_DOT tag to YES then doxygen will assume the dot tool is
-# available from the path. This tool is part of Graphviz, a graph visualization
-# toolkit from AT&T and Lucent Bell Labs. The other options in this section
-# have no effect if this option is set to NO (the default)
-
-HAVE_DOT = YES
-
-# The DOT_NUM_THREADS specifies the number of dot invocations doxygen is
-# allowed to run in parallel. When set to 0 (the default) doxygen will
-# base this on the number of processors available in the system. You can set it
-# explicitly to a value larger than 0 to get control over the balance
-# between CPU load and processing speed.
-
-DOT_NUM_THREADS = 0
-
-# By default doxygen will write a font called Helvetica to the output
-# directory and reference it in all dot files that doxygen generates.
-# When you want a differently looking font you can specify the font name
-# using DOT_FONTNAME. You need to make sure dot is able to find the font,
-# which can be done by putting it in a standard location or by setting the
-# DOTFONTPATH environment variable or by setting DOT_FONTPATH to the directory
-# containing the font.
-
-DOT_FONTNAME = FreeSans
-
-# The DOT_FONTSIZE tag can be used to set the size of the font of dot graphs.
-# The default size is 10pt.
-
-DOT_FONTSIZE = 10
-
-# By default doxygen will tell dot to use the output directory to look for the
-# FreeSans.ttf font (which doxygen will put there itself). If you specify a
-# different font using DOT_FONTNAME you can set the path where dot
-# can find it using this tag.
-
-DOT_FONTPATH =
-
-# If the CLASS_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect inheritance relations. Setting this tag to YES will force the
-# the CLASS_DIAGRAMS tag to NO.
-
-CLASS_GRAPH = YES
-
-# If the COLLABORATION_GRAPH and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for each documented class showing the direct and
-# indirect implementation dependencies (inheritance, containment, and
-# class references variables) of the class with other documented classes.
-
-COLLABORATION_GRAPH = YES
-
-# If the GROUP_GRAPHS and HAVE_DOT tags are set to YES then doxygen
-# will generate a graph for groups, showing the direct groups dependencies
-
-GROUP_GRAPHS = YES
-
-# If the UML_LOOK tag is set to YES doxygen will generate inheritance and
-# collaboration diagrams in a style similar to the OMG's Unified Modeling
-# Language.
-
-UML_LOOK = YES
-
-# If set to YES, the inheritance and collaboration graphs will show the
-# relations between templates and their instances.
-
-TEMPLATE_RELATIONS = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDE_GRAPH, and HAVE_DOT
-# tags are set to YES then doxygen will generate a graph for each documented
-# file showing the direct and indirect include dependencies of the file with
-# other documented files.
-
-INCLUDE_GRAPH = NO
-
-# If the ENABLE_PREPROCESSING, SEARCH_INCLUDES, INCLUDED_BY_GRAPH, and
-# HAVE_DOT tags are set to YES then doxygen will generate a graph for each
-# documented header file showing the documented files that directly or
-# indirectly include this file.
-
-INCLUDED_BY_GRAPH = YES
-
-# If the CALL_GRAPH and HAVE_DOT options are set to YES then
-# doxygen will generate a call dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable call graphs
-# for selected functions only using the \callgraph command.
-
-CALL_GRAPH = YES
-
-# If the CALLER_GRAPH and HAVE_DOT tags are set to YES then
-# doxygen will generate a caller dependency graph for every global function
-# or class method. Note that enabling this option will significantly increase
-# the time of a run. So in most cases it will be better to enable caller
-# graphs for selected functions only using the \callergraph command.
-
-CALLER_GRAPH = YES
-
-# If the GRAPHICAL_HIERARCHY and HAVE_DOT tags are set to YES then doxygen
-# will generate a graphical hierarchy of all classes instead of a textual one.
-
-GRAPHICAL_HIERARCHY = YES
-
-# If the DIRECTORY_GRAPH, SHOW_DIRECTORIES and HAVE_DOT tags are set to YES
-# then doxygen will show the dependencies a directory has on other directories
-# in a graphical way. The dependency relations are determined by the #include
-# relations between the files in the directories.
-
-DIRECTORY_GRAPH = YES
-
-# The DOT_IMAGE_FORMAT tag can be used to set the image format of the images
-# generated by dot. Possible values are png, svg, gif or svg.
-# If left blank png will be used.
-
-DOT_IMAGE_FORMAT = png
-
-# The tag DOT_PATH can be used to specify the path where the dot tool can be
-# found. If left blank, it is assumed the dot tool can be found in the path.
-
-DOT_PATH =
-
-# The DOTFILE_DIRS tag can be used to specify one or more directories that
-# contain dot files that are included in the documentation (see the
-# \dotfile command).
-
-DOTFILE_DIRS =
-
-# The MSCFILE_DIRS tag can be used to specify one or more directories that
-# contain msc files that are included in the documentation (see the
-# \mscfile command).
-
-MSCFILE_DIRS =
-
-# The DOT_GRAPH_MAX_NODES tag can be used to set the maximum number of
-# nodes that will be shown in the graph. If the number of nodes in a graph
-# becomes larger than this value, doxygen will truncate the graph, which is
-# visualized by representing a node as a red box. Note that doxygen if the
-# number of direct children of the root node in a graph is already larger than
-# DOT_GRAPH_MAX_NODES then the graph will not be shown at all. Also note
-# that the size of a graph can be further restricted by MAX_DOT_GRAPH_DEPTH.
-
-DOT_GRAPH_MAX_NODES = 50
-
-# The MAX_DOT_GRAPH_DEPTH tag can be used to set the maximum depth of the
-# graphs generated by dot. A depth value of 3 means that only nodes reachable
-# from the root by following a path via at most 3 edges will be shown. Nodes
-# that lay further from the root node will be omitted. Note that setting this
-# option to 1 or 2 may greatly reduce the computation time needed for large
-# code bases. Also note that the size of a graph can be further restricted by
-# DOT_GRAPH_MAX_NODES. Using a depth of 0 means no depth restriction.
-
-MAX_DOT_GRAPH_DEPTH = 0
-
-# Set the DOT_TRANSPARENT tag to YES to generate images with a transparent
-# background. This is disabled by default, because dot on Windows does not
-# seem to support this out of the box. Warning: Depending on the platform used,
-# enabling this option may lead to badly anti-aliased labels on the edges of
-# a graph (i.e. they become hard to read).
-
-DOT_TRANSPARENT = NO
-
-# Set the DOT_MULTI_TARGETS tag to YES allow dot to generate multiple output
-# files in one run (i.e. multiple -o and -T options on the command line). This
-# makes dot run faster, but since only newer versions of dot (>1.8.10)
-# support this, this feature is disabled by default.
-
-DOT_MULTI_TARGETS = NO
-
-# If the GENERATE_LEGEND tag is set to YES (the default) Doxygen will
-# generate a legend page explaining the meaning of the various boxes and
-# arrows in the dot generated graphs.
-
-GENERATE_LEGEND = YES
-
-# If the DOT_CLEANUP tag is set to YES (the default) Doxygen will
-# remove the intermediate dot files that are used to generate
-# the various graphs.
-
-DOT_CLEANUP = YES
diff --git a/rcynic-ng/Makefile.in b/rcynic-ng/Makefile.in
deleted file mode 100644
index 3b86a510..00000000
--- a/rcynic-ng/Makefile.in
+++ /dev/null
@@ -1,77 +0,0 @@
-# $Id$
-
-NAME = rcynic
-
-BIN = ${NAME}
-SRC = ${NAME}.c
-OBJ = ${NAME}.o
-
-HDR = defasn1.h
-GEN = defstack.h
-
-OBJS = ${OBJ} bio_f_linebreak.o
-
-CFLAGS = @CFLAGS@ -Wall -Wshadow -Wmissing-prototypes -Wmissing-declarations -Werror-implicit-function-declaration
-LDFLAGS = @LDFLAGS@ @LD_STATIC_FLAG@
-LIBS = @LIBS@
-
-AWK = @AWK@
-XSLTPROC = @XSLTPROC@
-
-abs_top_srcdir = @abs_top_srcdir@
-abs_top_builddir = @abs_top_builddir@
-
-host_os = @host_os@
-
-all: ${BIN}
-
-clean:
- cd static-rsync; ${MAKE} $@
- rm -f ${BIN} ${OBJS} ${GEN}
-
-${OBJ}: ${SRC} ${HDR} ${GEN}
-
-${BIN}: ${OBJS}
- ${CC} ${CFLAGS} -o $@ ${OBJS} ${LDFLAGS} ${LIBS}
-
-defstack.h: defstack.awk ${SRC} ${HDR}
- ${AWK} -f >$@ defstack.awk ${SRC} ${HDR}
-
-test: ${BIN}
- if test -r rcynic.conf; then ./${BIN} -j 0 && echo && ./show.sh; else echo No rcynic.conf, skipping test; fi
-
-install: ${BIN} installation-scripts/install.sh
- cd installation-scripts; host_os="${host_os}"; DESTDIR="${DESTDIR}"; . ./install.sh
-
-uninstall deinstall:
- cd installation-scripts; host_os="${host_os}"; DESTDIR="${DESTDIR}"; . ./deinstall.sh
-
-distclean: clean docclean
- cd static-rsync; ${MAKE} $@
- rm -f show.sh installation-scripts/linux/install.sh Makefile
-
-tags: TAGS
-
-TAGS: ${SRC} ${HDR} ${GEN}
- etags ${SRC} ${HDR} ${GEN}
-
-rcynic.html: rcynic.xml rcynic.xsl
- ${XSLTPROC} -o $@ rcynic.xsl rcynic.xml
-
-# Doc stuff right now is just internals doc, of interest only to
-# programmers. Real doc for rcynic is still the README. This may
-# change some day.
-
-html:
- TZ='' doxygen
-
-pdf: html
- cd doc/latex && TZ='' ${MAKE} pdf && ln -f refman.pdf ../manual.pdf
-
-tgz: html
- cd doc && tar -cf - html | gzip -9 >manual.tar.gz
-
-docs: html pdf tgz
-
-docclean:
- rm -rf doc
diff --git a/rcynic-ng/README b/rcynic-ng/README
deleted file mode 100644
index fb810e6e..00000000
--- a/rcynic-ng/README
+++ /dev/null
@@ -1,582 +0,0 @@
--*- Text -*- $Id$
-
-"Cynical rsync" -- fetch and validate RPKI certificates.
-
-To build this you will need to link it against an OpenSSL libcrypto
-that has support for the RFC 3779 extensions. See ../openssl/README.
-
-I developed this code on FreeBSD 6-STABLE. It is also known to run
-work on Ubuntu (8.10) and Mac OS X (Snow Leopard). In theory it
-should run on any reasonably POSIX-like system. As far as I know I
-have not used any seriously non-portable features, but neither have I
-done a POSIX reference manual lookup for every function call. Please
-report any portability problems.
-
-All certificates and CRLs are in DER format, with filenames derived
-from the RPKI rsync URIs at which the data are published. See
-../utils/ and ../rtr-origin/ for tools that use rcynic's output.
-
-All configuration is via an OpenSSL-style configuration file, except
-for selection of the name of the configuration file itself. A few of
-the parameters can also be set from the command line, to simplify
-testing. The default name for the configuration is rcynic.conf; you
-can override this with the -c option on the command line. The config
-file uses OpenSSL's config file syntax, and you can set OpenSSL
-library configuration paramaters (eg, "engine" settings) in the config
-file as well. rcynic's own configuration parameters are in a section
-called "[rcynic]".
-
-Most configuration parameters are optional and have defaults that
-should do something reasonable if you are running rcynic in a test
-directory. If you're running it as a system progran, perhaps under
-cron, you'll want to set additional parameters to tell rcynic where to
-find its data and where to write its output.
-
-The one thing you MUST specify in the config file in order for the
-program to do anything useful is file name of one or more trust
-anchors. Trust anchors for this program are represented as
-DER-formated X509 objects that look just like certificates, except
-that they're trust anchors.
-
-Strictly speaking, trust anchors do not need to be self-signed, but
-many programs (including OpenSSL ) assume that trust anchors will be
-self-signed. See the allow-non-self-signed-trust-anchor configuration
-option if you need to use a non-self-signed trust anchor, but be
-warned that the results, while technically correct, may not be useful.
-
-There are two ways of specifying trust anchors:
-
-- Via the "trust-anchor" directive, to name a local file containing
- the DER-encoded trust anchor.
-
-- Via the "trust-anchor-locator" directive, to name a local file
- containing a "trust anchor locator" (TAL). See draft-ietf-sidr-ta
- for details [update this once RFC has been issued].
-
-In most cases, except perhaps for testing, you will want to use trust
-anchor locators, since they allow the trust anchor itself to be
-updated without requiring reconfiguration of rcynic.
-
-See the make-tal.sh script in this directory if you need to generate
-your own TAL file for a trust anchor.
-
-As of when I write this documentation, there still is no global trust
-anchor for the RPKI system, so you have to specify separate trust
-anchors for each RIR that's publishing data:
-
-Example of a minimal config file:
-
- [rcynic]
-
- trust-anchor-locator.0 = trust-anchors/apnic.tal
- trust-anchor-locator.1 = trust-anchors/ripe.tal
- trust-anchor-locator.2 = trust-anchors/afrinic.tal
- trust-anchor-locator.3 = trust-anchors/lacnic.tal
-
-Eventually, this should all be collapsed into a single trust anchor,
-so that relying parties (people running tools like rcynic) don't need
-to sort out this sort of issue, at which point the above
-configuration can become something like:
-
- [rcynic]
-
- trust-anchor-locator = trust-anchors/iana.tal
-
-
-By default, rcynic uses two writable directory trees:
-
-- unauthenticated Raw data fetched via rsync. In order to take
- full advantage of rsync's optimized transfers,
- you should preserve and reuse this directory
- across rcynic runs, so that rcynic need not
- re-fetch data that have not changed.
-
-- authenticated Data that rcynic has checked. This is the
- real output of the process.
-
-authenticated is really a symbolic link to a directory with a name of
-the form authenticated.<timestamp>, where <timestamp> is an ISO 8601
-timestamp like 2001-04-01T01:23:45Z. rcynic creates a new timestamped
-directory every time it runs, and moves the symbolic link as an atomic
-operation when the validation process completes. The intent is that
-authenticated always points to the most recent usable validation
-results, so that programs which use rcynic's output don't need to
-worry about whether an rcynic run is in progress.
-
-rynic stores trust anchors specified via the trust-anchor-locator
-directive in the unauthenticated tree just like any other fetched
-object, and copies into the authenticated trees just like any other
-object once they pass rcynic's checks.
-
-rcynic copies trust anchors specified via the "trust-anchor" directive
-into the top level directory of the authenticated tree xxxxxxxx.n.cer,
-where xxxxxxxx and n are the OpenSSL object name hash and index within
-the resulting virtual hash bucket (the same as the c_hash Perl script
-that comes with OpenSSL would produce), and ".cer" is the literal
-string ".cer". The reason for this is that these trust anchors, by
-definition, are not fetched automatically, and thus do not really have
-publication URIs in the sense that every other object in these trees
-do. So rcynic uses a naming scheme which insures (a) that each trust
-anchor has a unique name within the output tree and (b) that trust
-anchors cannot be confusd with certificates: trust anchors always go
-in the top level of the tree, data fetched via rsync always go in
-subdirectories.
-
-As currently implemented, rcynic does not attempt to maintain an
-in-memory cache of objects it might need again later. It does keep an
-internal cache of the URIs from which it has already fetched data in
-this pass, and it keeps a stack containing the current certificate
-chain as it does its validation walk. All other data (eg, CRLs) are
-freed immediately after use and read from disk again as needed. From
-a database design standpoint, this is not very efficient, but as the
-rcynic's main bottlenecks are expected to be crypto and network
-operations, it seemed best to keep the design as simple as possible,
-at least until execution profiling demonstrates a real issue here.
-
-Usage and configuration:
-
-Logging levels:
-
-rcynic has its own system of logging levels, similar to what syslog()
-uses but customized to the specific task rcynic performs. Levels:
-
- log_sys_err Error from operating system or library
- log_usage_err Bad usage (local configuration error)
- log_data_err Bad data (broken certificates or CRLs)
- log_telemetry Normal chatter about rcynic's progress
- log_verbose Extra verbose chatter
- log_debug Only useful when debugging
-
-Command line options:
-
- -c configfile Path to configuration file (default: rcynic.conf)
- -l loglevel Logging level (default: log_data_err)
- -s Log via syslog
- -e Log via stderr when also using syslog
- -j Start-up jitter interval (see below; default: 600)
- -V Print rcynic's version to standard output and exit
-
-Configuration file:
-
-rcynic uses the OpenSSL libcrypto configuration file mechanism. All
-libcrypto configuration options (eg, for engine support) are
-available. All rcynic-specific options are in the "[rcynic]"
-section. You -must- have a configuration file in order for rcynic to
-do anything useful, as the configuration file is the only way to list
-your trust anchors.
-
-Configuration variables:
-
-authenticated Path to output directory (where rcynic should
- place objects it has been able to validate).
-
- Default: rcynic-data/authenticated
-
-
-unauthenticated Path to directory where rcynic should store
- unauthenticatd data retrieved via rsync.
- Unless something goes horribly wrong, you want
- rcynic to preserve and reuse this directory
- across runs to minimize the network traffic
- necessary to bring your repository mirror up
- to date.
-
- Default: rcynic-data/unauthenticated
-
-
-rsync-timeout How long (in seconds) to let rsync run before
- terminating the rsync process, or zero for no
- timeout. You want this timeout to be fairly
- long, to avoid terminating rsync connections
- prematurely. It's present to let you defend
- against evil rsync server operators who try to
- tarpit your connection as a form of denial of
- service attack on rcynic.
-
- Default: 300 seconds.
-
-
-max-parallel-fetches Upper limit on the number of copies of rsync
- that rcynic is allowed to run at once. Used
- properly, this can speed up synchronization
- considerably when fetching from repositories
- built with sub-optimal tree layouts or when
- dealing with unreachable repositories. Used
- improperly, this option can generate excessive
- load on repositories, cause synchronization to
- be interrupted by firewalls, and generally
- creates create a public nuisance. Use with
- caution.
-
- As of this writing, values in the range 2-4
- are reasonably safe. At least one RIR
- currently refuses service at settings above 4,
- and another RIR appears to be running some
- kind of firewall that silently blocks
- connections when it thinks decides that the
- connection rate is excessive.
-
- rcynic can't really detect all of the possible
- problems created by excessive values of this
- parameter, but if rcynic's report shows that
- both successful retrivial and skipped
- retrieval from the same repository host,
- that's a pretty good hint that something is
- wrong, and an excessive value here is a good
- first guess as to the cause.
-
- Default: 1
-
-
-rsync-program Path to the rsync program.
-
- Default: rsync, but you should probably set
- this variable rather than just trusting the
- PATH environment variable to be set correctly.
-
-
-log-level Same as -l option on command line. Command
- line setting overrides config file setting.
-
- Default: log_log_err
-
-
-use-syslog Same as -s option on command line. Command
- line setting overrides config file setting.
-
- Values: true or false.
-
- Default: false
-
-
-use-stderr Same as -e option on command line. Command
- line setting overrides config file setting.
-
- Values: true or false.
-
- Default: false, but if neither use-syslog nor
- use-stderr is set, log output goes to stderr.
-
-
-syslog-facility Syslog facility to use.
-
- Default: local0
-
-
-
-syslog-priority-xyz (where xyz is an rcynic logging level, above)
- Override the syslog priority value to use when
- logging messages at this rcynic level.
-
- Defaults:
-
- syslog-priority-log_sys_err: err
- syslog-priority-log_usage_err: err
- syslog-priority-log_data_err: notice
- syslog-priority-log_telemetry: info
- syslog-priority-log_verbose: info
- syslog-priority-log_debug: debug
-
-
-jitter Startup jitter interval, same as -j option on
- command line. Jitter interval, specified in
- number of seconds. rcynic will pick a random
- number within the interval from zero to this
- value, and will delay for that many seconds on
- startup. The purpose of this is to spread the
- load from large numbers of rcynic clients all
- running under cron with synchronized clocks,
- in particular to avoid hammering the RPKI
- rsync servers into the ground at midnight UTC.
-
- Default: 600
-
-
-lockfile Name of lockfile, or empty for no lock. If
- you run rcynic under cron, you should use this
- parameter to set a lockfile so that successive
- instances of rcynic don't stomp on each other.
-
- Default: no lock
-
-xml-summary Enable output of a per-host summary at the
- end of an rcynic run in XML format. Some
- users prefer this to the log_telemetry style
- of logging, or just want it in addition to
- logging. Value: filename to which XML summary
- should be written; "-" will send XML summary
- to stdout.
-
- Default: no XML summary
-
-
-allow-stale-crl Allow use of CRLs which are past their
- nextUpdate timestamp. This is probably
- harmless, but since it may be an early warning
- of problems, it's configurable.
-
- Values: true or false.
-
- Default: true
-
-
-prune Clean up old files corresponding to URIs that
- rcynic did not see at all during this run.
- rcynic invokes rsync with the --delete option
- to clean up old objects from collections that
- rcynic revisits, but if a URI changes so that
- rcynic never visits the old collection again,
- old files will remain in the local mirror
- indefinitely unless you enable this option.
-
- Values: true or false.
-
- Default: true
-
-
-allow-stale-manifest Allow use of manifests which are past their
- nextUpdate timestamp. This is probably
- harmless, but since it may be an early warning
- of problems, it's configurable.
-
- Values: true or false.
-
- Default: true
-
-
-require-crl-in-manifest Reject manifests which don't list the CRL
- covering the manifest EE certificate.
-
- Values: true or false.
-
- Default: false
-
-
-allow-non-self-signed-trust-anchor
- Experimental. Attempts to work around OpenSSL's
- strong preference for self-signed trust
- anchors. Do not use this unless you really know
- what you are doing.
-
- Values: true or false.
-
- Default: false
-
-
-run-rsync
-
- Whether to run rsync to fetch data. You don't
- want to change this except when building
- complex topologies where rcynic running on one
- set of machines acts as aggregators for
- another set of validators. A large ISP might
- want to build such a topology so that they
- could have a local validation cache in each
- POP while minimizing load on the global
- repository system and maintaining some degree
- of internal consistancy between POPs. In such
- cases, one might want the rcynic instances in
- the POPs to validate data fetched from the
- aggregators via an external process, without
- the POP rcynic instances attempting to fetch
- anything themselves.
-
- Don't touch this unless you really know what
- you're doing.
-
- Values: true or false.
-
- Default: true
-
-
-use-links Whether to use hard links rather than copying
- valid objects from the unauthenticated to
- authenticated tree. Using links is slightly
- more fragile (anything that stomps on the
- unauthenticated file also stomps on the
- authenticated file) but is a bit faster and
- reduces the number of inodes consumed by a
- large data collection. At the moment, copying
- is the default behavior, but this may change
- in the future.
-
- Values: true or false.
-
- Default: false
-
-
-trust-anchor Specify one RPKI trust anchor, represented as
- a local file containing an X.509 certificate
- in DER format. Value of this option is the
- pathname of the file.
-
- No default.
-
-
-trust-anchor-locator
- Specify one RPKI trust anchor, represented as
- a local file containing an rsync URI and the
- RSA public key of the X.509 object specified
- by the URI. First line of the file is the
- URI, remainder is the public key in Base64
- encoded DER format. Value of this option
- is the pathname of the file.
-
- No default.
-
-
-There's a companion XSLT template in rcynic.xsl, which translates what
-the xml-summary option writes into HTML.
-
-
-
-Running rcynic chrooted
-
-This is an attempt to describe the process of setting up rcynic in a
-chrooted environment. The installation scripts that ship with rcynic
-attempt to do this automatically for the platforms we support, but the
-process is somewhat finicky, so some explanation seems in order. If
-you're running on one of the supported platforms, the following steps
-may be handled for you by the Makefiles, but you may still want to
-understand what all this is trying to do.
-
-rcynic itself does not include any direct support for running
-chrooted, but is designed to be (relatively) easy to run in a chroot
-jail. Here's how.
-
-You'll either need staticly linked copies of rcynic and rsync, or
-you'll need to figure out which shared libraries these programs need
-(try using the "ldd" command). Here we assume staticly linked
-binaries, because that's simpler.
-
-You'll need a chroot wrapper program. Your platform may already have
-one (FreeBSD does -- /usr/sbin/chroot), but if you don't, you can
-download Wietse Venema's "chrootuid" program from:
-
- ftp://ftp.porcupine.org/pub/security/chrootuid1.3.tar.gz
-
-Warning: The chroot program included in at least some Linux
-distributions is not adaquate to this task, you need a wrapper that
-knows how to drop privileges after performing the chroot() operation
-itself. If in doubt, use chrootuid.
-
-Unfortunately, the precise details of setting up a proper chroot jail
-vary wildly from one system to another, so the following instructions
-will likely not be a precise match for the preferred way of doing this
-on any particular platform. We have sample scripts that do the right
-thing for FreeBSD, feel free to contribute such scripts for other
-platforms.
-
-Step 1: Build the static binaries. You might want to test them at
-this stage too, although you can defer that until after you've got the
-jail built.
-
-Step 2: Create a userid under which to run rcynic. Here we'll assume
-that you've created a user "rcynic", whose default group is also named
-"rcynic". Do not add any other userids to the rcynic group unless you
-really know what you are doing.
-
-Step 3: Build the jail. You'll need, at minimum, a directory in which
-to put the binaries, a subdirectory tree that's writable by the userid
-which will be running rcynic and rsync, your trust anchors, and
-whatever device inodes the various libraries need on your system.
-Most likely the devices that matter will be /dev/null, /dev/random,a
-nd /dev/urandom; if you're running a FreeBSD system with devfs, you
-do this by mounting and configuring a devfs instance in the jail, on
-other platforms you probably use the mknod program or something.
-
-Important: other than the directories that you want rcynic and rsync
-to be able to modify, -nothing- in the initial jail setup should be
-writable by the rcynic userid. In particular, rcynic and rsync should
--not- be allowed to modify: their own binary images, any of the
-configuration files, or your trust anchors. It's simplest just to
-have root own all the files and directories that rcynic and rsync are
-not allowed to modify, and make sure that the permissions for all of
-those directories and files make them writable only by root.
-
-Sample jail tree, assuming that we're putting all of this under
-/var/rcynic:
-
- # mkdir /var/rcynic
- # mkdir /var/rcynic/bin
- # mkdir /var/rcynic/data
- # mkdir /var/rcynic/dev
- # mkdir /var/rcynic/etc
- # mkdir /var/rcynic/etc/trust-anchors
-
-Copy your trust anchors into /var/rcynic/etc/trust-anchors.
-
-Copy the staticly linked rcynic and rsync into /var/rcynic/bin.
-
-Copy /etc/resolv.conf and /etc/localtime (if it exists) into
-/var/rcynic/etc.
-
-Write an rcynic configuration file as /var/rcynic/etc/rcynic.conf
-(path names in this file must match the jail setup, more below).
-
- # chmod -R go-w /var/rcynic
- # chown -R root:wheel /var/rcynic
- # chown -R rcynic:rcynic /var/rcynic/data
-
-If you're using devfs, arrange for it to be mounted at
-/var/rcynic/dev; otherwise, create whatever device inodes you need in
-/var/rcynic/dev and make sure that they have sane permissions (copying
-whatever permissions are used in your system /dev directory should
-suffice).
-
-rcynic.conf to match this configuration:
-
- [rcynic]
-
- trust-anchor-locator.1 = /etc/trust-anchors/ta-1.tal
- trust-anchor-locator.2 = /etc/trust-anchors/ta-2.tal
- trust-anchor-locator.3 = /etc/trust-anchors/ta-3.tal
-
- rsync-program = /bin/rsync
- authenticated = /data/authenticated
- unauthenticated = /data/unauthenticated
-
-Once you've got all this set up, you're ready to try running rcynic in
-the jail. Try it from the command line first, then if that works, you
-should be able to run it under cron.
-
-Note: chroot, chrootuid, and other programs of this type are usually
-intended to be run by root, and should -not- be setuid programs unless
-you -really- know what you are doing.
-
-Sample command line:
-
- # /usr/local/bin/chrootuid /var/rcynic rcynic /bin/rcynic -s -c /etc/rcynic.conf
-
-Note that we use absolute pathnames everywhere. This is not an
-accident. Programs running in jails under cron should not make
-assumptions about the current working directory or environment
-variable settings, and programs running in chroot jails would need
-different PATH settings anyway. Best just to specify everything.
-
-Building static binaries:
-
-On FreeBSD, building a staticly linked rsync is easy: just set the
-environment variable LDFLAGS='-static' before building the rsync port
-and the right thing will happen. Since this is really just GNU
-configure picking up the environment variable, the same trick should
-work on other platforms...except that some compilers don't support
--static, and some platforms are missing some or all of the non-shared
-libraries you'd need to link the resulting binary.
-
-For simplicity, I've taken the same approach with rcynic, so
-
- $ make LDFLAGS='-static'
-
-should work. Except that you don't even have to do that: static
-linking is the default where supported, because I run it jailed.
-
-syslog:
-
-Depending on your syslogd configuration, syslog may not work properly
-with rcynic in a chroot jail. On FreeBSD, the easiest way to fix this
-is to add the following lines to /etc/rc.conf:
-
- altlog_proglist="named rcynic"
- rcynic_chrootdir="/var/rcynic"
- rcynic_enable="YES"
diff --git a/rcynic-ng/bio_f_linebreak.c b/rcynic-ng/bio_f_linebreak.c
deleted file mode 100644
index 0376456e..00000000
--- a/rcynic-ng/bio_f_linebreak.c
+++ /dev/null
@@ -1,268 +0,0 @@
-/* $Id$ */
-
-/** @file bio_f_linebreak.c
- *
- * This implements a trivial filter BIO (see OpenSSL manual) which
- * does one rather silly thing: on read, it inserts line break into
- * the input stream at regular intervals.
- *
- * You might reasonaly ask why anyone would want such a thing. The
- * answer is that OpenSSL's Base64 filter BIO has two input modes,
- * neither of which is really useful for reading generalized Base64
- * input. In one mode, it requires line breaks at most every 79
- * characters; in the other mode, it requires that there to be no
- * whitespace of any kind at all. These modes work for the things
- * that OpenSSL itself does with Base64 decoding, but fail miserably
- * when used to read free-form Base64 text.
- *
- * The real solution would be to rewrite OpenSSL's Base64 filter to
- * support a third mode in which it accepts generalized Base64 text,
- * but that's been suggested before and nothing has been done about
- * it, probably because OpenSSL's Base64 implementation is completely
- * line-oriented and rather nasty.
- *
- * So this filter is a stop-gap to let us get the job done. Since it
- * uses a (relatively) well-defined OpenSSL internal API, it should be
- * reasonably stable.
- *
- * 98% of the code in this module is derived from "null filter" BIO
- * that ships with OpenSSL (BIO_TYPE_NULL_FILTER), so I consider this
- * to be a derivative work, thus am leaving it under OpenSSL's license.
- */
-
-/* Original crypto/bio/bf_null.c code was:
- *
- * Copyright (C) 1995-1998 Eric Young (eay@cryptsoft.com)
- * All rights reserved.
- *
- * This package is an SSL implementation written
- * by Eric Young (eay@cryptsoft.com).
- * The implementation was written so as to conform with Netscapes SSL.
- *
- * This library is free for commercial and non-commercial use as long as
- * the following conditions are aheared to. The following conditions
- * apply to all code found in this distribution, be it the RC4, RSA,
- * lhash, DES, etc., code; not just the SSL code. The SSL documentation
- * included with this distribution is covered by the same copyright terms
- * except that the holder is Tim Hudson (tjh@cryptsoft.com).
- *
- * Copyright remains Eric Young's, and as such any Copyright notices in
- * the code are not to be removed.
- * If this package is used in a product, Eric Young should be given attribution
- * as the author of the parts of the library used.
- * This can be in the form of a textual message at program startup or
- * in documentation (online or textual) provided with the package.
- *
- * Redistribution and use in source and binary forms, with or without
- * modification, are permitted provided that the following conditions
- * are met:
- * 1. Redistributions of source code must retain the copyright
- * notice, this list of conditions and the following disclaimer.
- * 2. Redistributions in binary form must reproduce the above copyright
- * notice, this list of conditions and the following disclaimer in the
- * documentation and/or other materials provided with the distribution.
- * 3. All advertising materials mentioning features or use of this software
- * must display the following acknowledgement:
- * "This product includes cryptographic software written by
- * Eric Young (eay@cryptsoft.com)"
- * The word 'cryptographic' can be left out if the rouines from the library
- * being used are not cryptographic related :-).
- * 4. If you include any Windows specific code (or a derivative thereof) from
- * the apps directory (application code) you must include an acknowledgement:
- * "This product includes software written by Tim Hudson (tjh@cryptsoft.com)"
- *
- * THIS SOFTWARE IS PROVIDED BY ERIC YOUNG ``AS IS'' AND
- * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
- * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
- * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
- * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
- * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
- * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
- * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
- * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
- * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
- * SUCH DAMAGE.
- *
- * The licence and distribution terms for any publically available version or
- * derivative of this code cannot be changed. i.e. this code cannot simply be
- * copied and put under another distribution licence
- * [including the GNU Public Licence.]
- */
-
-#include <stdio.h>
-#include <errno.h>
-#include <string.h>
-#include <openssl/bio.h>
-
-#include "bio_f_linebreak.h"
-
-#ifndef BIO_TYPE_LINEBREAK_FILTER
-#define BIO_TYPE_LINEBREAK_FILTER (99 | BIO_TYPE_FILTER)
-#endif
-
-#ifndef LINEBREAK_MAX_LINE
-#define LINEBREAK_MAX_LINE 72 /* We break anything longer than this */
-#endif
-
-static int linebreak_new(BIO *b)
-{
- b->init = 1;
- b->ptr = NULL;
- b->flags = 0;
- b->num = 0;
- return 1;
-}
-
-static int linebreak_free(BIO *b)
-{
- return b != NULL;
-}
-
-static int linebreak_read(BIO *b, char *out, int outl)
-{
- int ret = 0, want, n, i;
-
- if (out == NULL || b->next_bio == NULL || outl <= 0)
- return 0;
-
- while (outl > 0) {
-
- if (b->num >= LINEBREAK_MAX_LINE) {
- b->num = 0;
- *out++ = '\n';
- outl--;
- ret++;
- continue;
- }
-
- want = LINEBREAK_MAX_LINE - b->num;
- if (want > outl)
- want = outl;
-
- n = BIO_read(b->next_bio, out, want);
-
- BIO_clear_retry_flags(b);
- BIO_copy_next_retry(b);
-
- if (n > 0) {
- for (i = n - 1; i >= 0; i--)
- if (out[i] == '\n')
- break;
- if (i >= 0)
- b->num = n - i - 1;
- else
- b->num += n;
- out += n;
- outl -= n;
- ret += n;
- continue;
- }
-
- if (ret == 0)
- ret = n;
- break;
-
- }
-
- return ret;
-}
-
-static int linebreak_write(BIO *b, const char *in, int inl)
-{
- int ret = 0;
-
- if (in == NULL || inl <= 0 || b->next_bio == NULL)
- return 0;
-
- ret = BIO_write(b->next_bio, in, inl);
-
- BIO_clear_retry_flags(b);
- BIO_copy_next_retry(b);
-
- return ret;
-}
-
-static long linebreak_ctrl(BIO *b, int cmd, long num, void *ptr)
-{
- long ret;
-
- if (b->next_bio == NULL)
- return 0;
-
- switch (cmd) {
-
- case BIO_C_DO_STATE_MACHINE:
- BIO_clear_retry_flags(b);
- ret = BIO_ctrl(b->next_bio, cmd, num, ptr);
- BIO_copy_next_retry(b);
- return ret;
-
- case BIO_CTRL_DUP:
- return 0;
-
- default:
- return BIO_ctrl(b->next_bio, cmd, num, ptr);
-
- }
-}
-
-static long linebreak_callback_ctrl(BIO *b, int cmd, bio_info_cb *fp)
-{
- if (b->next_bio == NULL)
- return 0;
- else
- return BIO_callback_ctrl(b->next_bio, cmd, fp);
-}
-
-static int linebreak_puts(BIO *bp, const char *str)
-{
- if (bp->next_bio == NULL)
- return 0;
- else
- return BIO_puts(bp->next_bio, str);
-}
-
-static BIO_METHOD methods_linebreak = {
- BIO_TYPE_LINEBREAK_FILTER,
- "Linebreak filter",
- linebreak_write,
- linebreak_read,
- linebreak_puts,
- NULL, /* No linebreak_gets() */
- linebreak_ctrl,
- linebreak_new,
- linebreak_free,
- linebreak_callback_ctrl,
-};
-
-BIO_METHOD *BIO_f_linebreak(void)
-{
- return &methods_linebreak;
-}
-
-
-#ifdef __BIO_F_LINEBREAK_UNIT_TEST__
-
-int main (int argc, char *argv[])
-{
- BIO *ich = BIO_new_fd(0, 1);
- BIO *och = BIO_new_fd(1, 1);
- BIO *fch = BIO_new(BIO_f_linebreak());
- char buffer[4098];
- int n;
-
- if (ich == NULL || och == NULL || fch == NULL)
- return 1;
-
- BIO_push(fch, ich);
- ich = fch;
-
- while ((n = BIO_read(ich, buffer, sizeof(buffer))) > 0)
- BIO_write(och, buffer, n);
-
- BIO_free_all(ich);
- BIO_free_all(och);
- return 0;
-}
-
-#endif
diff --git a/rcynic-ng/bio_f_linebreak.h b/rcynic-ng/bio_f_linebreak.h
deleted file mode 100644
index b5becfa6..00000000
--- a/rcynic-ng/bio_f_linebreak.h
+++ /dev/null
@@ -1,10 +0,0 @@
-/* $Id$ */
-
-#ifndef __BIO_F_LINEBREAK__
-#define __BIO_F_LINEBREAK__
-
-#include <openssl/bio.h>
-
-BIO_METHOD *BIO_f_linebreak(void);
-
-#endif /* __BIO_F_LINEBREAK__ */
diff --git a/rcynic-ng/defasn1.h b/rcynic-ng/defasn1.h
deleted file mode 100644
index c14e0ce5..00000000
--- a/rcynic-ng/defasn1.h
+++ /dev/null
@@ -1,140 +0,0 @@
-/*
- * Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- *
- * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN")
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/* $Id$ */
-
-#ifndef __DEFASN1_H__
-#define __DEFASN1_H__
-
-#include <openssl/bio.h>
-#include <openssl/pem.h>
-#include <openssl/err.h>
-#include <openssl/x509.h>
-#include <openssl/x509v3.h>
-#include <openssl/safestack.h>
-#include <openssl/conf.h>
-#include <openssl/rand.h>
-#include <openssl/asn1t.h>
-#include <openssl/cms.h>
-
-/*
- * ASN.1 templates. Not sure that ASN1_EXP_OPT() is the right macro
- * for these defaulted "version" fields, but it's what the examples
- * for this construction use. Probably doesn't matter since this
- * program only decodes manifests, never encodes them.
- *
- * Putting this section under conditional compilation is a hack to
- * keep Doxygen's parser from becoming hopelessly confused by the
- * weird OpenSSL ASN.1 macros. Someday perhaps I'll have time to
- * track down the problem in Doxygen's parser, but this works for now.
- */
-
-#ifndef DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION
-
-typedef struct FileAndHash_st {
- ASN1_IA5STRING *file;
- ASN1_BIT_STRING *hash;
-} FileAndHash;
-
-DECLARE_STACK_OF(FileAndHash)
-
-ASN1_SEQUENCE(FileAndHash) = {
- ASN1_SIMPLE(FileAndHash, file, ASN1_IA5STRING),
- ASN1_SIMPLE(FileAndHash, hash, ASN1_BIT_STRING)
-} ASN1_SEQUENCE_END(FileAndHash)
-
-typedef struct Manifest_st {
- ASN1_INTEGER *version, *manifestNumber;
- ASN1_GENERALIZEDTIME *thisUpdate, *nextUpdate;
- ASN1_OBJECT *fileHashAlg;
- STACK_OF(FileAndHash) *fileList;
-} Manifest;
-
-ASN1_SEQUENCE(Manifest) = {
- ASN1_EXP_OPT(Manifest, version, ASN1_INTEGER, 0),
- ASN1_SIMPLE(Manifest, manifestNumber, ASN1_INTEGER),
- ASN1_SIMPLE(Manifest, thisUpdate, ASN1_GENERALIZEDTIME),
- ASN1_SIMPLE(Manifest, nextUpdate, ASN1_GENERALIZEDTIME),
- ASN1_SIMPLE(Manifest, fileHashAlg, ASN1_OBJECT),
- ASN1_SEQUENCE_OF(Manifest, fileList, FileAndHash)
-} ASN1_SEQUENCE_END(Manifest)
-
-DECLARE_ASN1_FUNCTIONS(FileAndHash)
-DECLARE_ASN1_FUNCTIONS(Manifest)
-
-IMPLEMENT_ASN1_FUNCTIONS(FileAndHash)
-IMPLEMENT_ASN1_FUNCTIONS(Manifest)
-
-typedef struct ROAIPAddress_st {
- ASN1_BIT_STRING *IPAddress;
- ASN1_INTEGER *maxLength;
-} ROAIPAddress;
-
-DECLARE_STACK_OF(ROAIPAddress)
-
-ASN1_SEQUENCE(ROAIPAddress) = {
- ASN1_SIMPLE(ROAIPAddress, IPAddress, ASN1_BIT_STRING),
- ASN1_OPT(ROAIPAddress, maxLength, ASN1_INTEGER)
-} ASN1_SEQUENCE_END(ROAIPAddress)
-
-typedef struct ROAIPAddressFamily_st {
- ASN1_OCTET_STRING *addressFamily;
- STACK_OF(ROAIPAddress) *addresses;
-} ROAIPAddressFamily;
-
-DECLARE_STACK_OF(ROAIPAddressFamily)
-
-ASN1_SEQUENCE(ROAIPAddressFamily) = {
- ASN1_SIMPLE(ROAIPAddressFamily, addressFamily, ASN1_OCTET_STRING),
- ASN1_SEQUENCE_OF(ROAIPAddressFamily, addresses, ROAIPAddress)
-} ASN1_SEQUENCE_END(ROAIPAddressFamily)
-
-typedef struct ROA_st {
- ASN1_INTEGER *version, *asID;
- STACK_OF(ROAIPAddressFamily) *ipAddrBlocks;
-} ROA;
-
-ASN1_SEQUENCE(ROA) = {
- ASN1_EXP_OPT(ROA, version, ASN1_INTEGER, 0),
- ASN1_SIMPLE(ROA, asID, ASN1_INTEGER),
- ASN1_SEQUENCE_OF(ROA, ipAddrBlocks, ROAIPAddressFamily)
-} ASN1_SEQUENCE_END(ROA)
-
-DECLARE_ASN1_FUNCTIONS(ROAIPAddress)
-DECLARE_ASN1_FUNCTIONS(ROAIPAddressFamily)
-DECLARE_ASN1_FUNCTIONS(ROA)
-
-IMPLEMENT_ASN1_FUNCTIONS(ROAIPAddress)
-IMPLEMENT_ASN1_FUNCTIONS(ROAIPAddressFamily)
-IMPLEMENT_ASN1_FUNCTIONS(ROA)
-
-#endif /* DOXYGEN_GETS_HOPELESSLY_CONFUSED_BY_THIS_SECTION */
-
-#endif /* __DEFASN1_H__ */
diff --git a/rcynic-ng/defstack.awk b/rcynic-ng/defstack.awk
deleted file mode 100644
index 4593cb33..00000000
--- a/rcynic-ng/defstack.awk
+++ /dev/null
@@ -1,71 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2011 Internet Systems Consortium ("ISC")
-#
-# Permission to use, copy, modify, and distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-function print_line(name, line)
-{
- gsub(/%/, name, line);
- print line;
-}
-
-function define_stack(name)
-{
- print_line(name, "/*");
- print_line(name, " * Safestack macros for %.");
- print_line(name, " */");
- print_line(name, "#define sk_%_new(st) SKM_sk_new(%, (st))");
- print_line(name, "#define sk_%_new_null() SKM_sk_new_null(%)");
- print_line(name, "#define sk_%_free(st) SKM_sk_free(%, (st))");
- print_line(name, "#define sk_%_num(st) SKM_sk_num(%, (st))");
- print_line(name, "#define sk_%_value(st, i) SKM_sk_value(%, (st), (i))");
- print_line(name, "#define sk_%_set(st, i, val) SKM_sk_set(%, (st), (i), (val))");
- print_line(name, "#define sk_%_zero(st) SKM_sk_zero(%, (st))");
- print_line(name, "#define sk_%_push(st, val) SKM_sk_push(%, (st), (val))");
- print_line(name, "#define sk_%_unshift(st, val) SKM_sk_unshift(%, (st), (val))");
- print_line(name, "#define sk_%_find(st, val) SKM_sk_find(%, (st), (val))");
- print_line(name, "#define sk_%_find_ex(st, val) SKM_sk_find_ex(%, (st), (val))");
- print_line(name, "#define sk_%_delete(st, i) SKM_sk_delete(%, (st), (i))");
- print_line(name, "#define sk_%_delete_ptr(st, ptr) SKM_sk_delete_ptr(%, (st), (ptr))");
- print_line(name, "#define sk_%_insert(st, val, i) SKM_sk_insert(%, (st), (val), (i))");
- print_line(name, "#define sk_%_set_cmp_func(st, cmp) SKM_sk_set_cmp_func(%, (st), (cmp))");
- print_line(name, "#define sk_%_dup(st) SKM_sk_dup(%, st)");
- print_line(name, "#define sk_%_pop_free(st, free_func) SKM_sk_pop_free(%, (st), (free_func))");
- print_line(name, "#define sk_%_shift(st) SKM_sk_shift(%, (st))");
- print_line(name, "#define sk_%_pop(st) SKM_sk_pop(%, (st))");
- print_line(name, "#define sk_%_sort(st) SKM_sk_sort(%, (st))");
- print_line(name, "#define sk_%_is_sorted(st) SKM_sk_is_sorted(%, (st))");
- print_line(name, "");
-}
-
-BEGIN {
- print "/*";
- print " * Automatically generated, do not edit.";
- print " * Generator $Id$";
- print " */";
- print "";
- print "#ifndef __DEFSTACK_H__";
- print "#define __DEFSTACK_H__";
- print "";
-}
-
-/DECLARE_STACK_OF/ {
- sub(/^[ \t]+/, "");
- if (split($0, a, /[() \t]+/) > 1 && a[1] == "DECLARE_STACK_OF")
- define_stack(a[2]);
-}
-
-END {
- print "#endif /* __DEFSTACK_H__ */";
-}
diff --git a/rcynic-ng/installation-scripts/darwin/RCynic/RCynic b/rcynic-ng/installation-scripts/darwin/RCynic/RCynic
deleted file mode 100755
index bf36d6e5..00000000
--- a/rcynic-ng/installation-scripts/darwin/RCynic/RCynic
+++ /dev/null
@@ -1,75 +0,0 @@
-#!/bin/sh -
-#
-# $Id$
-#
-. /etc/rc.common
-
-name="rcynic"
-start_cmd="rcynic_start"
-stop_cmd="rcynic_stop"
-
-: ${rcynic_jaildir="/var/rcynic"}
-: ${rcynic_user="rcynic"}
-: ${rcynic_group="rcynic"}
-
-StartService()
-{
- /bin/test -d "${rcynic_jaildir}" || /bin/mkdir "${rcynic_jaildir}"
- /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
-
- /usr/sbin/mtree -deU -p "${rcynic_jaildir}" <<EOF
-
- /set type=dir uname=root gname=wheel mode=0555
- .
- bin
- ..
- dev
- ..
- etc
- trust-anchors
- ..
- ..
- usr
- lib
- system
- ..
- ..
- ..
- var
- run
- ..
- ..
- data uname=${rcynic_user} gname=${rcynic_group} mode=0755
- ..
- ..
-EOF
-
- /bin/chmod -R a-w "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
- /usr/sbin/chown -R root:wheel "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
-
- if ! /sbin/mount_devfs devfs "${rcynic_jaildir}/dev"; then
- echo "Mounting devfs on ${rcynic_jaildir}/dev failed..."
- exit 1
- fi
-
- for i in /etc/localtime /etc/resolv.conf; do
- j="${rcynic_jaildir}${i}"
- if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- fi
- done
-
- /bin/ln -f /var/run/mDNSResponder "${rcynic_jaildir}/var/run/mDNSResponder"
-}
-
-StopService()
-{
- /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
-}
-
-RestartService()
-{
- StartService
-}
-
-RunService "$1"
diff --git a/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist b/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist
deleted file mode 100644
index ca46b676..00000000
--- a/rcynic-ng/installation-scripts/darwin/RCynic/StartupParameters.plist
+++ /dev/null
@@ -1,19 +0,0 @@
-<?xml version="1.0" encoding="UTF-8"?>
-<!DOCTYPE plist PUBLIC "-//Apple Computer//DTD PLIST 1.0//EN" "http://www.apple.com/DTDs/PropertyList-1.0.dtd">
-<plist version="1.0">
-<dict>
- <key>Description</key>
- <string>RCynic Setup</string>
- <key>OrderPreference</key>
- <string>None</string>
- <key>Provides</key>
- <array>
- <string>RCynic</string>
- </array>
- <key>Uses</key>
- <array>
- <string>Network</string>
- <string>Resolver</string>
- </array>
- </dict>
-</plist>
diff --git a/rcynic-ng/installation-scripts/darwin/install.sh b/rcynic-ng/installation-scripts/darwin/install.sh
deleted file mode 100644
index 1f0dfe7d..00000000
--- a/rcynic-ng/installation-scripts/darwin/install.sh
+++ /dev/null
@@ -1,193 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Create a chroot jail for rcynic.
-#
-# This is approximately what a pkg-install script might do if this were
-# a FreeBSD port. Perhaps some day it will be.
-
-: ${jaildir="${DESTDIR}/var/rcynic"}
-: ${jailuser="rcynic"}
-: ${jailgroup="rcynic"}
-: ${jailname="RPKI Validation System"}
-: ${setupcron="YES"}
-
-echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
-
-if /usr/bin/dscl . -read "/Groups/${jailgroup}" >/dev/null 2>&1
-then
- echo "You already have a group \"${jailgroup}\", so I will use it."
-elif gid="$(/usr/bin/dscl . -list /Groups PrimaryGroupID | /usr/bin/awk 'BEGIN {gid = 501} $2 >= gid {gid = 1 + $2} END {print gid}')" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" RealName "${jailname}" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" PrimaryGroupID "$gid" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" GeneratedUID "$(/usr/bin/uuidgen)" &&
- /usr/bin/dscl . -create "/Groups/${jailgroup}" Password "*"
-then
- echo "Added group \"${jailgroup}\"."
-else
- echo "Adding group \"${jailgroup}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-if /usr/bin/dscl . -read "/Users/${jailuser}" >/dev/null 2>&1
-then
- echo "You already have a user \"${jailuser}\", so I will use it."
-elif uid="$(/usr/bin/dscl . -list /Users UniqueID | /usr/bin/awk 'BEGIN {uid = 501} $2 >= uid {uid = 1 + $2} END {print uid}')" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" UserShell "/usr/bin/false" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" RealName "${jailname}" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" UniqueID "$uid" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" PrimaryGroupID "$gid" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" NFSHomeDirectory "/var/empty" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" GeneratedUID "$(/usr/bin/uuidgen)" &&
- /usr/bin/dscl . -create "/Users/${jailuser}" Password "*"
-then
- echo "Added user \"${jailuser}\"."
-else
- echo "Adding user \"${jailuser}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-if ! /bin/test -d "${jaildir}"; then
- /bin/mkdir "${jaildir}"
-fi
-
-if /usr/bin/install -o root -g wheel -d ${DESTDIR}/Library/StartupItems/RCynic &&
- /usr/bin/install -o root -g wheel -m 555 RCynic/RCynic RCynic/StartupParameters.plist ${DESTDIR}/Library/StartupItems/RCynic; then
- echo "Installed ${DESTDIR}/Library/StartupItems/RCynic"
-else
- echo "Installing ${DESTDIR}/Library/StartupItems/RCynic failed"
- exit 1
-fi
-
-echo "Running ${DESTDIR}/Library/StartupItems/RCynic/RCynic to set up directories"
-
-if ! rcynic_jaildir="$jaildir" rcynic_user="$jailuser" rcynic_group="$jailgroup" /Library/StartupItems/RCynic/RCynic start; then
- echo "Directory setup failed"
- exit 1
-fi
-
-if /bin/test -r "$jaildir/etc/rcynic.conf"; then
- echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
-elif /usr/bin/install -m 444 -o root -g wheel -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
- echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
- for i in ../../sample-trust-anchors/*.tal; do
- j="$jaildir/etc/trust-anchors/${i##*/}"
- /bin/test -r "$i" || continue
- /bin/test -r "$j" && continue
- echo "Installing $i as $j"
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- done
- j=1
- for i in $jaildir/etc/trust-anchors/*.tal; do
- echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
- j=$((j+1))
- done
-else
- echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
- exit 1
-fi
-
-echo "Installing rcynic as ${jaildir}/bin/rcynic"
-
-/usr/bin/install -m 555 -o root -g wheel -p ../../rcynic "${jaildir}/bin/rcynic"
-
-if /bin/test -x "$jaildir/bin/rsync"; then
- echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
-elif /usr/bin/install -m 555 -o root -g wheel -p /usr/bin/rsync "${jaildir}/bin/rsync"; then
- echo "Installed ${jaildir}/bin/rsync"
-else
- echo "Installing ${jaildir}/bin/rsync failed"
- exit 1
-fi
-
-echo "Copying required shared libraries"
-
-shared_libraries="${jaildir}/bin/rcynic ${jaildir}/bin/rsync"
-while true
-do
- closure="$(/usr/bin/otool -L ${shared_libraries} | /usr/bin/awk '/:$/ {next} {print $1}' | /usr/bin/sort -u)"
- if test "x$shared_libraries" = "x$closure"
- then
- break
- else
- shared_libraries="$closure"
- fi
-done
-
-for shared in /usr/lib/dyld $shared_libraries
-do
- if /bin/test -r "${jaildir}/${shared}"
- then
- echo "You already have a \"${jaildir}/${shared}\", so I will use it"
- elif /usr/bin/install -m 555 -o root -g wheel -p "${shared}" "${jaildir}/${shared}"
- then
- echo "Copied ${shared} into ${jaildir}"
- else
- echo "Unable to copy ${shared} into ${jaildir}"
- exit 1
- fi
-done
-
-if /usr/bin/install -m 444 -o root -g wheel -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
- echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
-else
- echo "Installing rcynic.xsl failed"
- exit 1
-fi
-
-echo "Setting up root's crontab to run jailed rcynic"
-
-case "$setupcron" in
-YES|yes)
- /usr/bin/crontab -l -u root 2>/dev/null |
- /usr/bin/awk -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
- BEGIN {
- cmd = "exec /usr/sbin/chroot -u " jailuser " -g " jailgroup " " jaildir;
- cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
- }
- $0 !~ cmd {
- print;
- }
- END {
- "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
- printf "%u * * * *\t%s\n", $1 % 60, cmd;
- }' |
- /usr/bin/crontab -u root -
- /bin/cat <<EOF
-
- crontab is set up to run rcynic hourly, at a randomly selected
- minute (to spread load on the rsync servers). Please do NOT
- adjust this to run on the hour. In particular please do NOT
- adjust this to run at midnight UTC.
-EOF
- ;;
-
-*)
- /bin/cat <<EOF
-
- You'll need to add a crontab entry running the following command as root:
-
- /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
-
- Please try to pick a random time for this, don't just run it on the hour,
- or at local midnight, or, worst of all, at midnight UTC.
-
-EOF
- ;;
-
-esac
-
-/bin/cat <<EOF
-
- Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
- If you did not install your own trust anchors, a default set
- of SAMPLE trust anchors may have been installed for you, but
- you, the relying party, are the only one who can decide
- whether you trust those anchors. rcynic will not do anything
- useful without good trust anchors.
-
-EOF
diff --git a/rcynic-ng/installation-scripts/deinstall.sh b/rcynic-ng/installation-scripts/deinstall.sh
deleted file mode 100644
index 113794a4..00000000
--- a/rcynic-ng/installation-scripts/deinstall.sh
+++ /dev/null
@@ -1,5 +0,0 @@
-#!/bin/sh -
-# $Id$
-
-echo Sorry, automated deinstallation of rcynic is not implemented yet
-exit 1
diff --git a/rcynic-ng/installation-scripts/freebsd/install.sh b/rcynic-ng/installation-scripts/freebsd/install.sh
deleted file mode 100644
index 6c3453cb..00000000
--- a/rcynic-ng/installation-scripts/freebsd/install.sh
+++ /dev/null
@@ -1,151 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Create a chroot jail for rcynic.
-#
-# This is approximately what a pkg-install script might do if this were
-# a FreeBSD port. Perhaps some day it will be.
-
-: ${jaildir="${DESTDIR}/var/rcynic"}
-: ${jailuser="rcynic"}
-: ${jailgroup="rcynic"}
-: ${setupcron="NO"}
-
-echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
-
-if /usr/sbin/pw groupshow "${jailgroup}" 2>/dev/null; then
- echo "You already have a group \"${jailgroup}\", so I will use it."
-elif /usr/sbin/pw groupadd ${jailgroup}; then
- echo "Added group \"${jailgroup}\"."
-else
- echo "Adding group \"${jailgroup}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-if /usr/sbin/pw usershow "${jailuser}" 2>/dev/null; then
- echo "You already have a user \"${jailuser}\", so I will use it."
-elif /usr/sbin/pw useradd ${jailuser} -g ${jailgroup} -h - -d /nonexistant -s /usr/sbin/nologin -c "RPKI validation system"; then
- echo "Added user \"${jailuser}\"."
-else
- echo "Adding user \"${jailuser}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-if ! /bin/test -d "${jaildir}"; then
- /bin/mkdir "${jaildir}"
-fi
-
-if /usr/bin/install -m 555 -o root -g wheel -p rc.d.rcynic ${DESTDIR}/usr/local/etc/rc.d/rcynic; then
- echo "Installed rc.d.rcynic as ${DESTDIR}/usr/local/etc/rc.d/rcynic"
-else
- echo "Installing ${DESTDIR}/usr/local/etc/rc.d/rcynic failed"
- exit 1
-fi
-
-echo "Running /usr/local/etc/rc.d/rcynic to set up directories"
-
-if ! rcynic_jaildir="$jaildir" rcynic_user="$jailuser" rcynic_group="$jailgroup" /bin/sh /usr/local/etc/rc.d/rcynic start; then
- echo "Directory setup failed"
- exit 1
-fi
-
-if /bin/test -r "$jaildir/etc/rcynic.conf"; then
- echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
-elif /usr/bin/install -m 444 -o root -g wheel -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
- echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
- for i in ../../sample-trust-anchors/*.tal; do
- j="$jaildir/etc/trust-anchors/${i##*/}"
- /bin/test -r "$i" || continue
- /bin/test -r "$j" && continue
- echo "Installing $i as $j"
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- done
- j=1
- for i in $jaildir/etc/trust-anchors/*.tal; do
- echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
- j=$((j+1))
- done
-else
- echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
- exit 1
-fi
-
-echo "Installing rcynic as ${jaildir}/bin/rcynic"
-
-/usr/bin/install -m 555 -o root -g wheel -p ../../rcynic "${jaildir}/bin/rcynic"
-
-if /bin/test ! -x "$jaildir/bin/rsync" -a ! -x ../../static-rsync/rsync; then
- echo "Building static rsync for jail, this may take a little while"
- (cd ../../static-rsync && exec make)
-fi
-
-if /bin/test -x "$jaildir/bin/rsync"; then
- echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
-elif /usr/bin/install -m 555 -o root -g wheel -p ../../static-rsync/rsync "${jaildir}/bin/rsync"; then
- echo "Installed static rsync as \"${jaildir}/bin/rsync\""
-else
- echo "Installing static rsync failed"
- exit 1
-fi
-
-if /usr/bin/install -m 444 -o root -g wheel -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
- echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
-else
- echo "Installing rcynic.xsl failed"
- exit 1
-fi
-
-echo "Setting up root's crontab to run jailed rcynic"
-
-case "$setupcron" in
-YES|yes)
- /usr/bin/crontab -l -u root 2>/dev/null |
- /usr/bin/awk -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
- BEGIN {
- cmd = "exec /usr/sbin/chroot -u " jailuser " -g " jailgroup " " jaildir;
- cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
- }
- $0 !~ cmd {
- print;
- }
- END {
- "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
- printf "%u * * * *\t%s\n", $1 % 60, cmd;
- }' |
- /usr/bin/crontab -u root -
- /bin/cat <<EOF
-
- crontab is set up to run rcynic hourly, at a randomly selected
- minute (to spread load on the rsync servers). Please do NOT
- adjust this to run on the hour. In particular please do NOT
- adjust this to run at midnight UTC.
-EOF
- ;;
-
-*)
- /bin/cat <<EOF
-
- You'll need to add a crontab entry running the following command as root:
-
- /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
-
- Please try to pick a random time for this, don't just run it on the hour,
- or at local midnight, or, worst of all, at midnight UTC.
-
-EOF
- ;;
-
-esac
-
-/bin/cat <<EOF
-
- Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
- If you did not install your own trust anchors, a default set
- of SAMPLE trust anchors may have been installed for you, but
- you, the relying party, are the only one who can decide
- whether you trust those anchors. rcynic will not do anything
- useful without good trust anchors.
-
-EOF
diff --git a/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic b/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic
deleted file mode 100755
index b86c9b81..00000000
--- a/rcynic-ng/installation-scripts/freebsd/rc.d.rcynic
+++ /dev/null
@@ -1,71 +0,0 @@
-#!/bin/sh -
-#
-# $Id$
-#
-# PROVIDE: rcynic
-# REQUIRE: DAEMON
-# KEYWORD: nojail
-
-. /etc/rc.subr
-
-name="rcynic"
-start_cmd="rcynic_start"
-stop_cmd="rcynic_stop"
-
-: ${rcynic_jaildir="/var/rcynic"}
-: ${rcynic_user="rcynic"}
-: ${rcynic_group="rcynic"}
-
-rcynic_start()
-{
- /bin/test -d "${rcynic_jaildir}" || /bin/mkdir "${rcynic_jaildir}"
- /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
-
- /usr/sbin/mtree -deU -p "${rcynic_jaildir}" <<EOF
-
- /set type=dir uname=root gname=wheel mode=0555
- .
- bin
- ..
- dev
- ..
- etc
- trust-anchors
- ..
- ..
- var
- run
- ..
- ..
- data uname=${rcynic_user} gname=${rcynic_group} mode=0755
- ..
- ..
-EOF
-
- /bin/chmod -R a-w "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
- /usr/sbin/chown -R root:wheel "${rcynic_jaildir}/bin" "${rcynic_jaildir}/etc"
-
- if ! /sbin/mount -t devfs dev "${rcynic_jaildir}/dev"; then
- echo "Mounting devfs on ${rcynic_jaildir}/dev failed..."
- exit 1
- fi
-
- /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply hide
- /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply path null unhide
- /sbin/devfs -m "${rcynic_jaildir}/dev" rule apply path random unhide
-
- for i in /etc/localtime /etc/resolv.conf; do
- j="${rcynic_jaildir}${i}"
- if /bin/test -r "$i" && ! /usr/bin/cmp -s "$i" "$j"; then
- /usr/bin/install -m 444 -o root -g wheel -p "$i" "$j"
- fi
- done
-}
-
-rcynic_stop()
-{
- /sbin/umount "${rcynic_jaildir}/dev" 2>/dev/null
-}
-
-load_rc_config $name
-run_rc_command "$1"
diff --git a/rcynic-ng/installation-scripts/install.sh b/rcynic-ng/installation-scripts/install.sh
deleted file mode 100644
index 8c708a28..00000000
--- a/rcynic-ng/installation-scripts/install.sh
+++ /dev/null
@@ -1,14 +0,0 @@
-#!/bin/sh -
-# $Id$
-
-set -e
-
-case "${host_os}" in
-
-freebsd*) cd freebsd; . ./install.sh;;
-darwin*) cd darwin; . ./install.sh;;
-linux*) cd linux; . ./install.sh;;
-
-*) echo 1>&2 "Don't know how to install rcynic jail on platform ${host_os}"
- exit 1;;
-esac
diff --git a/rcynic-ng/installation-scripts/linux/install.sh.in b/rcynic-ng/installation-scripts/linux/install.sh.in
deleted file mode 100644
index f0bdc505..00000000
--- a/rcynic-ng/installation-scripts/linux/install.sh.in
+++ /dev/null
@@ -1,236 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Create a chroot jail for rcynic.
-#
-# This is approximately what a package installation script might do.
-
-: ${jaildir="${DESTDIR}/var/rcynic"}
-: ${jailuser="rcynic"}
-: ${jailgroup="rcynic"}
-: ${setupcron="YES"}
-
-AWK='@AWK@'
-SORT='@SORT@'
-
-echo "Setting up \"${jaildir}\" as a chroot jail for rcynic."
-
-if ${AWK} -F: -v jailgroup="${jailgroup}" 'BEGIN {status = 1} $1 == jailgroup {status = 0} END {exit status}' /etc/group
-then
- echo "You already have a group \"${jailgroup}\", so I will use it."
-elif /usr/sbin/groupadd ${jailgroup}
-then
- echo "Added group \"${jailgroup}\"."
-else
- echo "Adding group \"${jailgroup}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-# The adduser program on CentOS 5.x uses the -n flag instead of -N to
-# avoid creating the associated group.
-nogroup='-N'
-if test -f /etc/redhat-release; then
- read vendor release version < /etc/redhat-release
- if test $vendor = CentOS; then
- nogroup='-n'
- fi
-fi
-
-if ${AWK} -F: -v jailuser="${jailuser}" 'BEGIN {status = 1} $1 == jailuser {status = 0} END {exit status}' /etc/passwd
-then
- echo "You already have a user \"${jailuser}\", so I will use it."
-elif /usr/sbin/useradd -g ${jailgroup} -M $nogroup -d "${jaildir}" -s /sbin/nologin -c "RPKI validation system" ${jailuser}
-then
- echo "Added user \"${jailuser}\"."
-else
- echo "Adding user \"${jailuser}\" failed..."
- echo "Please create it, then try again."
- exit 1
-fi
-
-# test for x86_64 target
-if test -d /lib64; then
- libdir=/lib64
-else
- libdir=/lib
-fi
-
-echo "Building directories"
-
-if ! /bin/mkdir -p -v -m 555 \
- "${jaildir}/bin" \
- "${jaildir}/dev" \
- "${jaildir}/etc/trust-anchors" \
- "${jaildir}/${libdir}" \
- "${jaildir}/usr/lib" \
- "${jaildir}/data"
-then
- echo "Unable to build directories under \"${jaildir}\", please fix this then try again."
- exit 1
-fi
-
-echo "Installing device inodes"
-
-if ! (cd /dev; /bin/ls null zero random urandom | /bin/cpio -puv "${jaildir}/dev")
-then
- echo "Unable to install device inodes in ${jaildir}/dev/, please fix this then try again"
- exit 1
-fi
-
-echo "Copying files from /etc"
-
-for i in /etc/localtime /etc/resolv.conf /etc/passwd /etc/group
-do
- j="${jaildir}${i}"
- if test -r "$i" &&
- ! /usr/bin/cmp -s "$i" "$j" &&
- ! /bin/cp -p "$i" "$j"
- then
- echo "Unable to copy $i to ${jaildir}, please fix this then try again"
- exit 1
- fi
-done
-
-echo "Whacking file permissions"
-
-if ! /bin/chmod -R a-w "${jaildir}/bin" "${jaildir}/etc" ||
- ! /bin/chmod -R 755 "${jaildir}/data" ||
- ! /bin/chown -R root:root "${jaildir}/bin" "${jaildir}/etc" ||
- ! /bin/chown -R "${jailuser}:${jailgroup}" "${jaildir}/data"
-then
- echo "Unable to set file permissions and ownerships correctly, please fix this and try again"
- exit 1
-fi
-
-if test -r "$jaildir/etc/rcynic.conf"; then
- echo "You already have config file \"${jaildir}/etc/rcynic.conf\", so I will use it."
-elif /usr/bin/install -m 444 -o root -g root -p ../sample-rcynic.conf "${jaildir}/etc/rcynic.conf"; then
- echo "Installed minimal ${jaildir}/etc/rcynic.conf, adding SAMPLE trust anchors"
- for i in ../../sample-trust-anchors/*.tal; do
- j="$jaildir/etc/trust-anchors/${i##*/}"
- test -r "$i" || continue
- test -r "$j" && continue
- echo "Installing $i as $j"
- /usr/bin/install -m 444 -o root -g root -p "$i" "$j"
- done
- j=1
- for i in $jaildir/etc/trust-anchors/*.tal; do
- echo >>"${jaildir}/etc/rcynic.conf" "trust-anchor-locator.$j = /etc/trust-anchors/${i##*/}"
- j=$((j+1))
- done
-else
- echo "Installing minimal ${jaildir}/etc/rcynic.conf failed"
- exit 1
-fi
-
-echo "Installing rcynic as ${jaildir}/bin/rcynic"
-
-/usr/bin/install -m 555 -o root -g root -p ../../rcynic "${jaildir}/bin/rcynic"
-
-if test -x "$jaildir/bin/rsync"; then
- echo "You already have an executable \"$jaildir/bin/rsync\", so I will use it"
-elif /usr/bin/install -m 555 -o root -g root -p /usr/bin/rsync "${jaildir}/bin/rsync"; then
- echo "Installed ${jaildir}/bin/rsync"
-else
- echo "Installing ${jaildir}/bin/rsync failed"
- exit 1
-fi
-
-echo "Copying required shared libraries"
-
-shared_libraries="${jaildir}/bin/rcynic ${jaildir}/bin/rsync"
-while true
-do
- closure="$(/usr/bin/ldd ${shared_libraries} |
- ${AWK} -v "rcynic=${jaildir}/bin/rcynic" -v "rsync=${jaildir}/bin/rsync" \
- '{sub(/:$/, "")} $0 == rcynic || $0 == rsync {next} {for (i = 1; i <= NF; i++) if ($i ~ /^\//) print $i}' |
- ${SORT} -u)"
- if test "x$shared_libraries" = "x$closure"
- then
- break
- else
- shared_libraries="$closure"
- fi
-done
-
-# Under CentOS 5.5, rsync requires libresolv, but ldd doesn't show
-# it.
-if test -f ${libdir}/libresolv.so.2; then
- shared_libraries="${shared_libraries} ${libdir}/libresolv.so.2"
-fi
-
-for shared in ${libdir}/ld*.so $shared_libraries ${libdir}/libnss*.so.*
-do
- if test -r "${jaildir}/${shared}"
- then
- echo "You already have a \"${jaildir}${shared}\", so I will use it"
- elif /usr/bin/install -m 555 -o root -g root -d "${jaildir}${shared%/*}" &&
- /usr/bin/install -m 555 -o root -g root -p "${shared}" "${jaildir}${shared}"
- then
- echo "Copied ${shared} into ${jaildir}"
- else
- echo "Unable to copy ${shared} into ${jaildir}"
- exit 1
- fi
-done
-
-if /usr/bin/install -m 444 -o root -g root -p ../../rcynic.xsl "${jaildir}/etc/rcynic.xsl"; then
- echo "Installed rcynic.xsl as \"${jaildir}/etc/rcynic.xsl\""
-else
- echo "Installing rcynic.xsl failed"
- exit 1
-fi
-
-echo "Setting up root's crontab to run jailed rcynic"
-
-case "$setupcron" in
-YES|yes)
- /usr/bin/crontab -l -u root 2>/dev/null |
- ${AWK} -v "jailuser=$jailuser" -v "jailgroup=$jailgroup" -v "jaildir=$jaildir" '
- BEGIN {
- cmd = "exec /usr/sbin/chroot --userspec=" jailuser ":" jailgroup " " jaildir;
- cmd = cmd " /bin/rcynic -c /etc/rcynic.conf";
- }
- $0 !~ cmd {
- print;
- }
- END {
- "/usr/bin/hexdump -n 2 -e \"\\\"%u\\\\\\n\\\"\" /dev/random" | getline;
- printf "%u * * * *\t%s\n", $1 % 60, cmd;
- }' |
- /usr/bin/crontab -u root -
- /bin/cat <<EOF
-
- crontab is set up to run rcynic hourly, at a randomly selected
- minute (to spread load on the rsync servers). Please do NOT
- adjust this to run on the hour. In particular please do NOT
- adjust this to run at midnight UTC.
-EOF
- ;;
-
-*)
- /bin/cat <<EOF
-
- You'll need to add a crontab entry running the following command as root:
-
- /usr/sbin/chroot -u $jailuser -g $jailgroup $jaildir /bin/rcynic -c /etc/rcynic.conf
-
- Please try to pick a random time for this, don't just run it on the hour,
- or at local midnight, or, worst of all, at midnight UTC.
-
-EOF
- ;;
-
-esac
-
-/bin/cat <<EOF
-
- Jail set up. You may need to customize $jaildir/etc/rcynic.conf.
- If you did not install your own trust anchors, a default set
- of SAMPLE trust anchors may have been installed for you, but
- you, the relying party, are the only one who can decide
- whether you trust those anchors. rcynic will not do anything
- useful without good trust anchors.
-
-EOF
diff --git a/rcynic-ng/installation-scripts/sample-rcynic.conf b/rcynic-ng/installation-scripts/sample-rcynic.conf
deleted file mode 100644
index 6e464e7e..00000000
--- a/rcynic-ng/installation-scripts/sample-rcynic.conf
+++ /dev/null
@@ -1,27 +0,0 @@
-# $Id$
-#
-# Sample rcynic configuration file for FreeBSD jailed environment
-
-[rcynic]
-rsync-program = /bin/rsync
-authenticated = /data/authenticated
-old-authenticated = /data/authenticated.old
-unauthenticated = /data/unauthenticated
-lockfile = /data/lock
-jitter = 600
-use-syslog = true
-log-level = log_usage_err
-
-# You need to specify some trust anchors here, eg:
-
-#trust-anchor.1 = /etc/trust-anchors/ta-1.cer
-#trust-anchor.2 = /etc/trust-anchors/ta-2.cer
-
-# or, using the "Trust Anchor Locator" form:
-
-#trust-anchor-locator.1 = /etc/trust-anchors/ta-1.tal
-#trust-anchor-locator.2 = /etc/trust-anchors/ta-2.tal
-
-# The choice between these two formats depends largely on the policies
-# of the entity generating the corresponding trust anchor, ie, will
-# probably be made for you by the generating entity.
diff --git a/rcynic-ng/make-tal.sh b/rcynic-ng/make-tal.sh
deleted file mode 100755
index 854a76b9..00000000
--- a/rcynic-ng/make-tal.sh
+++ /dev/null
@@ -1,42 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# Generate a trust anchor locator (TAL) given the rsync URI for a
-# self-signed RFC 3779 certificate.
-#
-# Usage: make-tal.sh uri [local_copy_of_certificate]
-#
-# The optional second parameter is the name of a local copy of the
-# certificate to be checked against the copy retrieved from the URI;
-# if present, this should be a local X.509 file in DER format.
-
-case "$1" in rsync://*) :;; *) echo 1>&2 "\"$1\" is not a rsync URI"; exit 1;; esac
-
-tmpfile="make-tal.tmp.$$"
-trap "rm -f $tmpfile" 0 1 2 15
-
-rsync "$1" "$tmpfile" || exit
-
-if test -n "$2"
-then
- diff -q "$tmpfile" "$2" || exit
-fi
-
-echo "$1"
-echo
-openssl x509 -inform DER -in "$tmpfile" -pubkey -noout |
-awk '!/-----(BEGIN|END)/'
diff --git a/rcynic-ng/rcynic.c b/rcynic-ng/rcynic.c
deleted file mode 100644
index 09fe1360..00000000
--- a/rcynic-ng/rcynic.c
+++ /dev/null
@@ -1,4644 +0,0 @@
-/*
- * Copyright (C) 2009--2011 Internet Systems Consortium ("ISC")
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- *
- * Portions copyright (C) 2006--2008 American Registry for Internet Numbers ("ARIN")
- *
- * Permission to use, copy, modify, and distribute this software for any
- * purpose with or without fee is hereby granted, provided that the above
- * copyright notice and this permission notice appear in all copies.
- *
- * THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
- * REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- * AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
- * INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- * LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- * OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- * PERFORMANCE OF THIS SOFTWARE.
- */
-
-/* $Id$ */
-
-/**
- * @mainpage
- *
- * "Cynical rsync": Recursively walk RPKI tree using rsync to pull
- * data from remote sites, validating certificates and CRLs as we go.
- *
- * Doxygen doesn't quite know what to make of a one-file C program,
- * and ends up putting most of the interesting data @link rcynic.c
- * here. @endlink
- */
-
-#include <assert.h>
-#include <stdio.h>
-#include <stdlib.h>
-#include <stdarg.h>
-#include <unistd.h>
-#include <sys/types.h>
-#include <sys/time.h>
-#include <sys/file.h>
-#include <errno.h>
-#include <sys/signal.h>
-#include <sys/wait.h>
-#include <time.h>
-#include <string.h>
-#include <sys/stat.h>
-#include <dirent.h>
-#include <limits.h>
-#include <fcntl.h>
-#include <signal.h>
-#include <utime.h>
-#include <glob.h>
-#include <sys/param.h>
-
-#define SYSLOG_NAMES /* defines CODE prioritynames[], facilitynames[] */
-#include <syslog.h>
-
-#include <openssl/bio.h>
-#include <openssl/pem.h>
-#include <openssl/err.h>
-#include <openssl/x509.h>
-#include <openssl/x509v3.h>
-#include <openssl/safestack.h>
-#include <openssl/conf.h>
-#include <openssl/rand.h>
-#include <openssl/asn1t.h>
-#include <openssl/cms.h>
-
-#include "bio_f_linebreak.h"
-
-#include "defstack.h"
-#include "defasn1.h"
-
-#if !defined(FILENAME_MAX) && defined(PATH_MAX) && PATH_MAX > 1024
-#define FILENAME_MAX PATH_MAX
-#elif !defined(FILENAME_MAX)
-#define FILENAME_MAX 1024
-#endif
-
-#define SCHEME_RSYNC ("rsync://")
-#define SIZEOF_RSYNC (sizeof(SCHEME_RSYNC) - 1)
-
-/**
- * Maximum length of an URI.
- */
-#define URI_MAX (FILENAME_MAX + SIZEOF_RSYNC)
-
-/**
- * Maximum number of times we try to kill an inferior process before
- * giving up.
- */
-#define KILL_MAX 10
-
-#ifndef HOSTNAME_MAX
-#define HOSTNAME_MAX 256
-#endif
-
-/**
- * Version number of XML summary output.
- */
-#define XML_SUMMARY_VERSION 1
-
-/**
- * How much buffer space do we need for a raw address?
- */
-#define ADDR_RAW_BUF_LEN 16
-
-/**
- * Logging levels. Same general idea as syslog(), but our own
- * catagories based on what makes sense for this program. Default
- * mappings to syslog() priorities are here because it's the easiest
- * way to make sure that we assign a syslog level to each of ours.
- */
-
-#define LOG_LEVELS \
- QQ(log_sys_err, LOG_ERR) /* Error from OS or library */ \
- QQ(log_usage_err, LOG_ERR) /* Bad usage (local error) */ \
- QQ(log_data_err, LOG_NOTICE) /* Bad data, no biscuit */ \
- QQ(log_telemetry, LOG_INFO) /* Normal progress chatter */ \
- QQ(log_verbose, LOG_INFO) /* Extra chatter */ \
- QQ(log_debug, LOG_DEBUG) /* Only useful when debugging */
-
-#define QQ(x,y) x ,
-typedef enum log_level { LOG_LEVELS LOG_LEVEL_T_MAX } log_level_t;
-#undef QQ
-
-#define QQ(x,y) { #x , x },
-static const struct {
- const char *name;
- log_level_t value;
-} log_levels[] = {
- LOG_LEVELS
-};
-#undef QQ
-
-/**
- * MIB counters derived from OpenSSL. Long list of validation failure
- * codes from OpenSSL (crypto/x509/x509_vfy.h).
- */
-
-#define MIB_COUNTERS_FROM_OPENSSL \
- QV(X509_V_ERR_UNABLE_TO_GET_CRL) \
- QV(X509_V_ERR_UNABLE_TO_DECRYPT_CERT_SIGNATURE) \
- QV(X509_V_ERR_UNABLE_TO_DECRYPT_CRL_SIGNATURE) \
- QV(X509_V_ERR_UNABLE_TO_DECODE_ISSUER_PUBLIC_KEY) \
- QV(X509_V_ERR_CERT_SIGNATURE_FAILURE) \
- QV(X509_V_ERR_CRL_SIGNATURE_FAILURE) \
- QV(X509_V_ERR_CERT_NOT_YET_VALID) \
- QV(X509_V_ERR_CERT_HAS_EXPIRED) \
- QV(X509_V_ERR_CRL_NOT_YET_VALID) \
- QV(X509_V_ERR_ERROR_IN_CERT_NOT_BEFORE_FIELD) \
- QV(X509_V_ERR_ERROR_IN_CERT_NOT_AFTER_FIELD) \
- QV(X509_V_ERR_ERROR_IN_CRL_LAST_UPDATE_FIELD) \
- QV(X509_V_ERR_ERROR_IN_CRL_NEXT_UPDATE_FIELD) \
- QV(X509_V_ERR_DEPTH_ZERO_SELF_SIGNED_CERT) \
- QV(X509_V_ERR_SELF_SIGNED_CERT_IN_CHAIN) \
- QV(X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT_LOCALLY) \
- QV(X509_V_ERR_UNABLE_TO_VERIFY_LEAF_SIGNATURE) \
- QV(X509_V_ERR_CERT_CHAIN_TOO_LONG) \
- QV(X509_V_ERR_CERT_REVOKED) \
- QV(X509_V_ERR_INVALID_CA) \
- QV(X509_V_ERR_PATH_LENGTH_EXCEEDED) \
- QV(X509_V_ERR_INVALID_PURPOSE) \
- QV(X509_V_ERR_CERT_UNTRUSTED) \
- QV(X509_V_ERR_CERT_REJECTED) \
- QV(X509_V_ERR_AKID_SKID_MISMATCH) \
- QV(X509_V_ERR_AKID_ISSUER_SERIAL_MISMATCH) \
- QV(X509_V_ERR_KEYUSAGE_NO_CERTSIGN) \
- QV(X509_V_ERR_UNABLE_TO_GET_CRL_ISSUER) \
- QV(X509_V_ERR_UNHANDLED_CRITICAL_EXTENSION) \
- QV(X509_V_ERR_KEYUSAGE_NO_CRL_SIGN) \
- QV(X509_V_ERR_UNHANDLED_CRITICAL_CRL_EXTENSION) \
- QV(X509_V_ERR_INVALID_NON_CA) \
- QV(X509_V_ERR_PROXY_PATH_LENGTH_EXCEEDED) \
- QV(X509_V_ERR_KEYUSAGE_NO_DIGITAL_SIGNATURE) \
- QV(X509_V_ERR_PROXY_CERTIFICATES_NOT_ALLOWED) \
- QV(X509_V_ERR_INVALID_EXTENSION) \
- QV(X509_V_ERR_INVALID_POLICY_EXTENSION) \
- QV(X509_V_ERR_NO_EXPLICIT_POLICY) \
- QV(X509_V_ERR_UNNESTED_RESOURCE)
-
-/**
- * MIB counters specific to rcynic.
- */
-
-#define MIB_COUNTERS \
- MIB_COUNTERS_FROM_OPENSSL \
- QB(aia_mismatch, "Mismatched AIA extension") \
- QB(aia_missing, "AIA extension missing") \
- QB(certificate_bad_crl, "Bad certificate CRL") \
- QB(certificate_bad_signature, "Bad certificate signature") \
- QB(certificate_digest_mismatch, "Certificate digest mismatch") \
- QB(certificate_failed_validation, "Certificate failed validation") \
- QB(crl_digest_mismatch, "CRL digest mismatch") \
- QB(crl_not_in_manifest, "CRL not listed in manifest") \
- QB(crl_not_yet_valid, "CRL not yet valid") \
- QB(crldp_mismatch, "CRLDP doesn't match issuer's SIA") \
- QB(crldp_missing, "CRLDP extension missing") \
- QB(disallowed_extension, "Disallowed X.509v3 extension") \
- QB(ghostbuster_bad_crl, "Ghostbuster EE has bad CRL") \
- QB(ghostbuster_bad_econtenttype, "Bad Ghostbuster eContentType") \
- QB(ghostbuster_digest_mismatch, "Ghostbuster digest mismatch") \
- QB(ghostbuster_invalid_cms, "Ghostbuster validation failure") \
- QB(ghostbuster_invalid_ee, "Invalid Ghostbuster certificate") \
- QB(ghostbuster_missing_signer, "Missing Ghostbuster signer") \
- QB(hash_too_long, "Hash value is too long") \
- QB(malformed_crldp, "Malformed CRDLP extension") \
- QB(malformed_roa_addressfamily, "Malformed ROA addressFamily") \
- QB(malformed_sia, "Malformed SIA extension") \
- QB(manifest_bad_econtenttype, "Bad manifest eContentType") \
- QB(manifest_decode_error, "Manifest decode error") \
- QB(manifest_invalid_cms, "Manifest validation failure") \
- QB(manifest_invalid_ee, "Invalid manifest certificate") \
- QB(manifest_malformed_crldp, "Malformed manifest CRLDP") \
- QB(manifest_mismatch, "Manifest doesn't match SIA") \
- QB(manifest_missing, "Manifest pointer missing") \
- QB(manifest_missing_crldp, "Missing manifest CRLDP") \
- QB(manifest_missing_signer, "Missing manifest signer") \
- QB(manifest_not_yet_valid, "Manifest not yet valid") \
- QB(manifest_wrong_version, "Wrong manifest version") \
- QB(object_rejected, "Object rejected") \
- QB(roa_bad_afi, "ROA contains bad AFI value") \
- QB(roa_bad_crl, "ROA EE has bad CRL") \
- QB(roa_bad_econtenttype, "Bad ROA eContentType") \
- QB(roa_decode_error, "ROA decode error") \
- QB(roa_digest_mismatch, "ROA digest mismatch") \
- QB(roa_invalid_cms, "ROA validation failure") \
- QB(roa_invalid_ee, "Invalid ROA certificate") \
- QB(roa_missing_signer, "Missing ROA signer") \
- QB(roa_not_nested, "ROA resource not in EE") \
- QB(roa_resources_malformed, "ROA resources malformed") \
- QB(roa_wrong_version, "Wrong ROA version") \
- QB(rsync_failed, "rsync transfer failed") \
- QB(rsync_timed_out, "rsync transfer timed out") \
- QB(sia_missing, "SIA extension missing") \
- QB(trust_anchor_key_mismatch, "Trust anchor key mismatch") \
- QB(trust_anchor_with_crldp, "Trust anchor can't have CRLDP") \
- QB(unknown_verify_error, "Unknown OpenSSL verify error") \
- QB(unreadable_trust_anchor, "Unreadable trust anchor") \
- QB(unreadable_trust_anchor_locator, "Unreadable trust anchor locator") \
- QB(uri_too_long, "URI too long") \
- QW(nonconformant_issuer_name, "Nonconformant X.509 issuer name") \
- QW(nonconformant_subject_name, "Nonconformant X.509 subject name") \
- QW(rsync_skipped, "rsync transfer skipped") \
- QW(stale_crl, "Stale CRL") \
- QW(stale_manifest, "Stale manifest") \
- QW(tainted_by_stale_crl, "Tainted by stale CRL") \
- QW(tainted_by_stale_manifest, "Tainted by stale manifest") \
- QW(tainted_by_not_being_in_manifest, "Tainted by not being in manifest") \
- QW(trust_anchor_not_self_signed, "Trust anchor not self-signed") \
- QW(unknown_object_type_skipped, "Unknown object type skipped") \
- QG(current_cert_recheck, "Certificate rechecked") \
- QG(object_accepted, "Object accepted") \
- QG(rsync_succeeded, "rsync transfer succeeded") \
- QG(validation_ok, "OK")
-
-#define QV(x) QB(mib_openssl_##x, 0)
-
-static const char
- mib_counter_kind_good[] = "good",
- mib_counter_kind_warn[] = "warn",
- mib_counter_kind_bad[] = "bad";
-
-#define QG(x,y) mib_counter_kind_good ,
-#define QW(x,y) mib_counter_kind_warn ,
-#define QB(x,y) mib_counter_kind_bad ,
-static const char * const mib_counter_kind[] = { MIB_COUNTERS NULL };
-#undef QB
-#undef QW
-#undef QG
-
-#define QG(x,y) QQ(x,y)
-#define QW(x,y) QQ(x,y)
-#define QB(x,y) QQ(x,y)
-
-#define QQ(x,y) x ,
-typedef enum mib_counter { MIB_COUNTERS MIB_COUNTER_T_MAX } mib_counter_t;
-#undef QQ
-
-#define QQ(x,y) y ,
-static const char * const mib_counter_desc[] = { MIB_COUNTERS NULL };
-#undef QQ
-
-#define QQ(x,y) #x ,
-static const char * const mib_counter_label[] = { MIB_COUNTERS NULL };
-#undef QQ
-
-#undef QV
-
-#define QQ(x,y) 0 ,
-#define QV(x) x ,
-static const long mib_counter_openssl[] = { MIB_COUNTERS 0 };
-#undef QV
-#undef QQ
-
-/**
- * Object sources. We always try to get fresh copies of objects using
- * rsync, but if that fails we try using backup copies from what
- * worked the last time we were run. This means that a URI
- * potentially represents two different objects, so we need to
- * distinguish them for tracking purposes in our validation log.
- */
-
-#define OBJECT_GENERATIONS \
- QQ(null) \
- QQ(current) \
- QQ(backup)
-
-#define QQ(x) object_generation_##x ,
-typedef enum object_generation { OBJECT_GENERATIONS OBJECT_GENERATION_MAX } object_generation_t;
-#undef QQ
-
-#define QQ(x) #x ,
-static const char * const object_generation_label[] = { OBJECT_GENERATIONS NULL };
-#undef QQ
-
-/**
- * Type-safe string wrapper for URIs.
- */
-typedef struct { char s[URI_MAX]; } uri_t;
-
-/**
- * Type-safe string wrapper for filename paths.
- */
-typedef struct { char s[FILENAME_MAX]; } path_t;
-
-/**
- * Type-safe string wrapper for hostnames.
- */
-typedef struct { char s[HOSTNAME_MAX]; } hostname_t;
-
-/**
- * Type-safe wrapper for hash buffers.
- */
-typedef struct { unsigned char h[EVP_MAX_MD_SIZE]; } hashbuf_t;
-
-/**
- * Per-URI validation status object.
- * uri must be first element.
- */
-typedef struct validation_status {
- uri_t uri;
- object_generation_t generation;
- time_t timestamp;
- unsigned char events[(MIB_COUNTER_T_MAX + 7) / 8];
-} validation_status_t;
-
-DECLARE_STACK_OF(validation_status_t)
-
-/**
- * Structure to hold data parsed out of a certificate.
- */
-typedef struct certinfo {
- int ca, ta;
- object_generation_t generation;
- uri_t uri, sia, aia, crldp, manifest;
-} certinfo_t;
-
-typedef struct rcynic_ctx rcynic_ctx_t;
-
-/**
- * States that a walk_ctx_t can be in.
- */
-typedef enum {
- walk_state_initial, /**< Initial state */
- walk_state_rsync, /**< rsyncing certinfo.sia */
- walk_state_ready, /**< Ready to traverse outputs */
- walk_state_current, /**< prefix = rc->unauthenticated */
- walk_state_backup, /**< prefix = rc->old_authenticated */
- walk_state_done /**< Done walking this cert's outputs */
-} walk_state_t;
-
-/**
- * Context for certificate tree walks. This includes all the stuff
- * that we would keep as automatic variables on the call stack if we
- * didn't have to use callbacks to support multiple rsync processes.
- */
-typedef struct walk_ctx {
- unsigned refcount;
- certinfo_t certinfo;
- X509 *cert;
- Manifest *manifest;
- STACK_OF(OPENSSL_STRING) *filenames;
- int manifest_iteration, filename_iteration, stale_manifest;
- walk_state_t state;
-} walk_ctx_t;
-
-DECLARE_STACK_OF(walk_ctx_t)
-
-/**
- * Return codes from rsync functions.
- */
-typedef enum {
- rsync_status_done, /* Request completed */
- rsync_status_failed, /* Request failed */
- rsync_status_timed_out, /* Request timed out */
- rsync_status_pending, /* Request in progress */
- rsync_status_skipped /* Request not attempted */
-} rsync_status_t;
-
-/**
- * Context for asyncronous rsync.
- */
-typedef struct rsync_ctx {
- uri_t uri;
- void (*handler)(const rcynic_ctx_t *, const struct rsync_ctx *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *);
- STACK_OF(walk_ctx_t) *wsk;
- enum {
- rsync_state_initial, /* Must be first */
- rsync_state_running,
- rsync_state_conflict_wait,
- rsync_state_retry_wait,
- rsync_state_terminating
- } state;
- enum {
- rsync_problem_none, /* Must be first */
- rsync_problem_timed_out,
- rsync_problem_refused
- } problem;
- unsigned tries;
- pid_t pid;
- int fd;
- time_t started, deadline;
- char buffer[URI_MAX * 4];
- size_t buflen;
-} rsync_ctx_t;
-
-DECLARE_STACK_OF(rsync_ctx_t)
-
-/**
- * Deferred task.
- */
-typedef struct task {
- void (*handler)(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *);
- STACK_OF(walk_ctx_t) *wsk;
-} task_t;
-
-DECLARE_STACK_OF(task_t)
-
-/**
- * Extended context for verify callbacks. This is a wrapper around
- * OpenSSL's X509_STORE_CTX, and the embedded X509_STORE_CTX @em must be
- * the first element of this structure in order for the evil cast to
- * do the right thing. This is ugly but safe, as the C language
- * promises us that the address of the first element of a structure is
- * the same as the address of the structure.
- */
-typedef struct rcynic_x509_store_ctx {
- X509_STORE_CTX ctx; /* Must be first */
- const rcynic_ctx_t *rc;
- const certinfo_t *subject;
-} rcynic_x509_store_ctx_t;
-
-/**
- * Program context that would otherwise be a mess of global variables.
- */
-struct rcynic_ctx {
- path_t authenticated, old_authenticated, new_authenticated, unauthenticated;
- char *jane, *rsync_program;
- STACK_OF(OPENSSL_STRING) *rsync_cache, *backup_cache, *dead_host_cache;
- STACK_OF(validation_status_t) *validation_status;
- STACK_OF(rsync_ctx_t) *rsync_queue;
- STACK_OF(task_t) *task_queue;
- int use_syslog, allow_stale_crl, allow_stale_manifest, use_links;
- int require_crl_in_manifest, rsync_timeout, priority[LOG_LEVEL_T_MAX];
- int allow_non_self_signed_trust_anchor, allow_object_not_in_manifest;
- int max_parallel_fetches, max_retries, retry_wait_min, run_rsync;
- log_level_t log_level;
- X509_STORE *x509_store;
-};
-
-
-/**
- * Subversion ID data.
- */
-static const char svn_id[] = "$Id$";
-
-/*
- * ASN.1 Object identifiers in form suitable for use with oid_cmp()
- */
-
-/** 1.3.6.1.5.5.7.48.2 */
-static const unsigned char id_ad_caIssuers[] =
- {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x2};
-
-/** 1.3.6.1.5.5.7.48.5 */
-static const unsigned char id_ad_caRepository[] =
- {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0x5};
-
-/** 1.3.6.1.5.5.7.48.10 */
-static const unsigned char id_ad_rpkiManifest[] =
- {0x2b, 0x6, 0x1, 0x5, 0x5, 0x7, 0x30, 0xa};
-
-/** 1.2.840.113549.1.9.16.1.24 */
-static const unsigned char id_ct_routeOriginAttestation[] =
- {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x18};
-
-/** 1.2.840.113549.1.9.16.1.26 */
-static const unsigned char id_ct_rpkiManifest[] =
- {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x1a};
-
-/** 1.2.840.113549.1.9.16.1.35 */
-static const unsigned char id_ct_rpkiGhostbusters[] =
- {0x2a, 0x86, 0x48, 0x86, 0xf7, 0x0d, 0x01, 0x09, 0x10, 0x01, 0x23};
-
-/** 2.16.840.1.101.3.4.2.1 */
-static const unsigned char id_sha256[] =
- {0x60, 0x86, 0x48, 0x01, 0x65, 0x03, 0x04, 0x02, 0x01};
-
-/**
- * RPKI certificate policy OID in form suitable for use with
- * X509_VERIFY_PARAM_add0_policy().
- */
-static const char rpki_policy_oid[] = "1.3.6.1.5.5.7.14.2";
-
-/**
- * Suffix we use temporarily during the symlink shuffle. Could be
- * almost anything, but we want to do the length check early, before
- * we waste a lot of work we'll just have to throw away, so we just
- * wire in something short and obvious.
- */
-static const char authenticated_symlink_suffix[] = ".new";
-
-
-
-/**
- * Type-safe wrapper around free() to keep safestack macros happy.
- */
-static void OPENSSL_STRING_free(OPENSSL_STRING s)
-{
- if (s)
- free(s);
-}
-
-/**
- * Wrapper around an idiom we use with OPENSSL_STRING stacks. There's
- * a bug in the current sk_OPENSSL_STRING_delete() macro that casts
- * the return value to the wrong type, so we cast it to something
- * innocuous here and avoid using that macro elsewhere.
- */
-static void sk_OPENSSL_STRING_remove(STACK_OF(OPENSSL_STRING) *sk, const char *str)
-{
- OPENSSL_STRING_free((void *) sk_OPENSSL_STRING_delete(sk, sk_OPENSSL_STRING_find(sk, str)));
-}
-
-/**
- * Allocate a new validation_status_t object.
- */
-static validation_status_t *validation_status_t_new(void)
-{
- validation_status_t *v = malloc(sizeof(*v));
- if (v)
- memset(v, 0, sizeof(*v));
- return v;
-}
-
-/**
- * Type-safe wrapper around free() to keep safestack macros happy.
- */
-static void validation_status_t_free(validation_status_t *v)
-{
- if (v)
- free(v);
-}
-
-
-
-/*
- * GCC attributes to help catch format string errors.
- */
-
-#ifdef __GNUC__
-
-static void logmsg(const rcynic_ctx_t *rc,
- const log_level_t level,
- const char *fmt, ...)
- __attribute__ ((format (printf, 3, 4)));
-#endif
-
-/**
- * Logging.
- */
-static void vlogmsg(const rcynic_ctx_t *rc,
- const log_level_t level,
- const char *fmt,
- va_list ap)
-{
- char tad[sizeof("00:00:00")+1];
- time_t tad_time;
-
- assert(rc && fmt);
-
- if (rc->log_level < level)
- return;
-
- if (rc->use_syslog) {
- vsyslog(rc->priority[level], fmt, ap);
- } else {
- time(&tad_time);
- strftime(tad, sizeof(tad), "%H:%M:%S", localtime(&tad_time));
- fprintf(stderr, "%s: ", tad);
- if (rc->jane)
- fprintf(stderr, "%s: ", rc->jane);
- vfprintf(stderr, fmt, ap);
- putc('\n', stderr);
- }
-}
-
-/**
- * Logging.
- */
-static void logmsg(const rcynic_ctx_t *rc,
- const log_level_t level,
- const char *fmt, ...)
-{
- va_list ap;
- va_start(ap, fmt);
- vlogmsg(rc, level, fmt, ap);
- va_end(ap);
-}
-
-/**
- * Print OpenSSL library errors.
- */
-static void log_openssl_errors(const rcynic_ctx_t *rc)
-{
- const char *data, *file;
- unsigned long code;
- char error[256];
- int flags, line;
-
- if (!rc->log_level < log_verbose)
- return;
-
- while ((code = ERR_get_error_line_data(&file, &line, &data, &flags))) {
- ERR_error_string_n(code, error, sizeof(error));
- if (data && (flags & ERR_TXT_STRING))
- logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s: %s", file, line, error, data);
- else
- logmsg(rc, log_sys_err, "OpenSSL error %s:%d: %s", file, line, error);
- }
-}
-
-/**
- * Configure logging.
- */
-static int configure_logmsg(rcynic_ctx_t *rc, const char *name)
-{
- int i;
-
- assert(rc && name);
-
- for (i = 0; i < sizeof(log_levels)/sizeof(*log_levels); i++) {
- if (!strcmp(name, log_levels[i].name)) {
- rc->log_level = log_levels[i].value;
- return 1;
- }
- }
-
- logmsg(rc, log_usage_err, "Bad log level %s", name);
- return 0;
-}
-
-/**
- * Configure syslog.
- */
-static int configure_syslog(const rcynic_ctx_t *rc,
- int *result,
- const CODE *table,
- const char *name)
-{
- assert(result && table && name);
-
- while (table->c_name && strcmp(table->c_name, name))
- table++;
-
- if (table->c_name) {
- *result = table->c_val;
- return 1;
- } else {
- logmsg(rc, log_usage_err, "Bad syslog code %s", name);
- return 0;
- }
-}
-
-/**
- * Configure boolean variable.
- */
-static int configure_boolean(const rcynic_ctx_t *rc,
- int *result,
- const char *val)
-{
- assert(rc && result && val);
-
- switch (*val) {
- case 'y': case 'Y': case 't': case 'T': case '1':
- *result = 1;
- return 1;
- case 'n': case 'N': case 'f': case 'F': case '0':
- *result = 0;
- return 1;
- default:
- logmsg(rc, log_usage_err, "Bad boolean value %s", val);
- return 0;
- }
-}
-
-/**
- * Configure integer variable.
- */
-static int configure_integer(const rcynic_ctx_t *rc,
- int *result,
- const char *val)
-{
- long res;
- char *p;
-
- assert(rc && result && val);
-
- res = strtol(val, &p, 10);
-
- if (*val != '\0' && *p == '\0') {
- *result = (int) res;
- return 1;
- } else {
- logmsg(rc, log_usage_err, "Bad integer value %s", val);
- return 0;
- }
-}
-
-
-
-/**
- * Make a directory if it doesn't already exist.
- */
-static int mkdir_maybe(const rcynic_ctx_t *rc, const path_t *name)
-{
- path_t path;
- char *s;
-
- assert(name != NULL);
- if (strlen(name->s) >= sizeof(path.s)) {
- logmsg(rc, log_data_err, "Pathname %s too long", name->s);
- return 0;
- }
- strcpy(path.s, name->s);
- s = path.s[0] == '/' ? path.s + 1 : path.s;
- if ((s = strrchr(s, '/')) == NULL)
- return 1;
- *s = '\0';
- if (!mkdir_maybe(rc, &path)) {
- logmsg(rc, log_sys_err, "Failed to make directory %s", path.s);
- return 0;
- }
- if (!access(path.s, F_OK))
- return 1;
- logmsg(rc, log_verbose, "Creating directory %s", path.s);
- return mkdir(path.s, 0777) == 0;
-}
-
-/**
- * strdup() a string and push it onto a stack.
- */
-static int sk_OPENSSL_STRING_push_strdup(STACK_OF(OPENSSL_STRING) *sk, const char *str)
-{
- OPENSSL_STRING s = strdup(str);
-
- if (s && sk_OPENSSL_STRING_push(sk, s))
- return 1;
- if (s)
- free(s);
- return 0;
-}
-
-/**
- * Compare two URI strings, for OpenSSL STACK operations.
- */
-
-static int uri_cmp(const char * const *a, const char * const *b)
-{
- return strcmp(*a, *b);
-}
-
-/**
- * Is string an rsync URI?
- */
-static int is_rsync(const char *uri)
-{
- return uri && !strncmp(uri, SCHEME_RSYNC, SIZEOF_RSYNC);
-}
-
-/**
- * Convert an rsync URI to a filename, checking for evil character
- * sequences. NB: This routine can't call mib_increment(), because
- * mib_increment() calls it, so errors detected here only go into
- * the log, not the MIB.
- */
-static int uri_to_filename(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix)
-{
- const char *u;
- size_t n;
-
- path->s[0] = '\0';
-
- if (!is_rsync(uri->s)) {
- logmsg(rc, log_telemetry, "%s is not an rsync URI, not converting to filename", uri->s);
- return 0;
- }
-
- u = uri->s + SIZEOF_RSYNC;
- n = strlen(u);
-
- if (u[0] == '/' || u[0] == '.' || strstr(u, "/../") ||
- (n >= 3 && !strcmp(u + n - 3, "/.."))) {
- logmsg(rc, log_data_err, "Dangerous URI %s, not converting to filename", uri->s);
- return 0;
- }
-
- if (prefix)
- n += strlen(prefix->s);
-
- if (n >= sizeof(path->s)) {
- logmsg(rc, log_data_err, "URI %s too long, not converting to filename", uri->s);
- return 0;
- }
-
- if (prefix) {
- strcpy(path->s, prefix->s);
- strcat(path->s, u);
- } else {
- strcpy(path->s, u);
- }
-
- return 1;
-}
-
-/**
- * Extract a hostname from a URI.
- */
-static int uri_to_hostname(const uri_t *uri,
- hostname_t *hostname)
-{
- size_t n;
-
- if (!uri || !hostname || !is_rsync(uri->s) ||
- (n = strcspn(uri->s + SIZEOF_RSYNC, "/")) >= sizeof(hostname->s))
- return 0;
-
- strncpy(hostname->s, uri->s + SIZEOF_RSYNC, n);
- hostname->s[n] = '\0';
- return 1;
-}
-
-/**
- * OID comparison.
- */
-static int oid_cmp(const ASN1_OBJECT *obj, const unsigned char *oid, const size_t oidlen)
-{
- assert(obj != NULL && oid != NULL);
- if (obj->length != oidlen)
- return obj->length - oidlen;
- else
- return memcmp(obj->data, oid, oidlen);
-}
-
-/**
- * Get value of code in a validation_status_t.
- */
-static int validation_status_get_code(const validation_status_t *v,
- const mib_counter_t code)
-{
- assert(v && code < MIB_COUNTER_T_MAX);
- return (v->events[code / 8] & (1 << (code % 8))) != 0;
-}
-
-/**
- * Set value of code in a validation_status_t.
- */
-static void validation_status_set_code(validation_status_t *v,
- const mib_counter_t code,
- int value)
-{
- assert(v && code < MIB_COUNTER_T_MAX);
- if (value)
- v->events[code / 8] |= (1 << (code % 8));
- else
- v->events[code / 8] &= ~(1 << (code % 8));
-}
-
-/**
- * Add a validation status entry to internal log.
- */
-static void log_validation_status(const rcynic_ctx_t *rc,
- const uri_t *uri,
- const mib_counter_t code,
- const object_generation_t generation)
-{
- validation_status_t v_, *v = NULL;
- int was_set;
-
- assert(rc && uri && code < MIB_COUNTER_T_MAX && generation < OBJECT_GENERATION_MAX);
-
- if (!rc->validation_status)
- return;
-
- memset(&v_, 0, sizeof(v_));
- v_.uri = *uri;
- v_.generation = generation;
-
- v = sk_validation_status_t_value(rc->validation_status, sk_validation_status_t_find(rc->validation_status, &v_));
- if (v == NULL) {
- if ((v = validation_status_t_new()) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate validation status entry for %s", uri->s);
- return;
- }
- *v = v_;
- if (!sk_validation_status_t_push(rc->validation_status, v)) {
- logmsg(rc, log_sys_err, "Couldn't store validation status entry for %s", uri->s);
- free(v);
- return;
- }
- }
-
- was_set = validation_status_get_code(v, code);
-
- v->timestamp = time(0);
- validation_status_set_code(v, code, 1);
-
- if (!was_set)
- logmsg(rc, log_verbose, "Recording \"%s\" for %s%s%s",
- (mib_counter_desc[code]
- ? mib_counter_desc[code]
- : X509_verify_cert_error_string(mib_counter_openssl[code])),
- (generation != object_generation_null ? object_generation_label[generation] : ""),
- (generation != object_generation_null ? " " : ""),
- uri->s);
-}
-
-/**
- * Validation status object comparision.
- */
-static int validation_status_cmp(const validation_status_t * const *a, const validation_status_t * const *b)
-{
- int cmp = strcmp((*a)->uri.s, (*b)->uri.s);
- if (cmp)
- return cmp;
- cmp = (int) ((*a)->generation) - (int) ((*b)->generation);
- if (cmp)
- return cmp;
- return 0;
-}
-
-/**
- * Copy or link a file, as the case may be.
- */
-static int cp_ln(const rcynic_ctx_t *rc, const path_t *source, const path_t *target)
-{
- struct stat statbuf;
- struct utimbuf utimebuf;
- FILE *in = NULL, *out = NULL;
- int c, ok = 0;
-
- if (rc->use_links) {
- (void) unlink(target->s);
- ok = link(source->s, target->s) == 0;
- if (!ok)
- logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s",
- source->s, target->s, strerror(errno));
- return ok;
- }
-
- if ((in = fopen(source->s, "rb")) == NULL ||
- (out = fopen(target->s, "wb")) == NULL)
- goto done;
-
- while ((c = getc(in)) != EOF)
- if (putc(c, out) == EOF)
- goto done;
-
- ok = 1;
-
- done:
- ok &= !(in != NULL && fclose(in) == EOF);
- ok &= !(out != NULL && fclose(out) == EOF);
-
- if (!ok) {
- logmsg(rc, log_sys_err, "Couldn't copy %s to %s: %s",
- source->s, target->s, strerror(errno));
- return ok;
- }
-
- /*
- * Perserve the file modification time to allow for detection of
- * changed objects in the authenticated directory. Failure to reset
- * the times is not optimal, but is also not critical, thus no
- * failure return.
- */
- if (stat(source->s, &statbuf) < 0 ||
- (utimebuf.actime = statbuf.st_atime,
- utimebuf.modtime = statbuf.st_mtime,
- utime(target->s, &utimebuf) < 0))
- logmsg(rc, log_sys_err, "Couldn't copy inode timestamp from %s to %s: %s",
- source->s, target->s, strerror(errno));
-
- return ok;
-}
-
-/**
- * Install an object.
- */
-static int install_object(const rcynic_ctx_t *rc,
- const uri_t *uri,
- const path_t *source,
- const mib_counter_t code,
- const object_generation_t generation)
-{
- path_t target;
-
- if (!uri_to_filename(rc, uri, &target, &rc->new_authenticated)) {
- logmsg(rc, log_data_err, "Couldn't generate installation name for %s", uri->s);
- return 0;
- }
-
- if (!mkdir_maybe(rc, &target)) {
- logmsg(rc, log_sys_err, "Couldn't create directory for %s", target.s);
- return 0;
- }
-
- if (!cp_ln(rc, source, &target))
- return 0;
- log_validation_status(rc, uri, code, generation);
- return 1;
-}
-
-/**
- * Check str for a suffix.
- */
-static int endswith(const char *str, const char *suffix)
-{
- size_t len_str, len_suffix;
- assert(str != NULL && suffix != NULL);
- len_str = strlen(str);
- len_suffix = strlen(suffix);
- return len_str >= len_suffix && !strcmp(str + len_str - len_suffix, suffix);
-}
-
-/**
- * Check str for a prefix.
- */
-static int startswith(const char *str, const char *prefix)
-{
- size_t len_str, len_prefix;
- assert(str != NULL && prefix != NULL);
- len_str = strlen(str);
- len_prefix = strlen(prefix);
- return len_str >= len_prefix && !strncmp(str, prefix, len_prefix);
-}
-
-
-/**
- * Set a directory name, adding or stripping trailing slash as needed.
- */
-static int set_directory(const rcynic_ctx_t *rc, path_t *out, const char *in, const int want_slash)
-{
- int has_slash, need_slash;
- size_t n;
-
- assert(rc && in && out);
-
- n = strlen(in);
-
- if (n == 0) {
- logmsg(rc, log_usage_err, "Empty path");
- return 0;
- }
-
- has_slash = in[n - 1] == '/';
-
- need_slash = want_slash && !has_slash;
-
- if (n + need_slash + 1 > sizeof(out->s)) {
- logmsg(rc, log_usage_err, "Path \"%s\" too long", in);
- return 0;
- }
-
- strcpy(out->s, in);
- if (need_slash)
- strcat(out->s, "/");
- else if (has_slash && !want_slash)
- out->s[n - 1] = '\0';
-
- return 1;
-}
-
-/**
- * Remove a directory tree, like rm -rf.
- */
-static int rm_rf(const path_t *name)
-{
- path_t path;
- struct dirent *d;
- size_t len;
- DIR *dir;
- int ret = 0, need_slash;
-
- assert(name);
- len = strlen(name->s);
- assert(len > 0 && len < sizeof(path.s));
- need_slash = name->s[len - 1] != '/';
-
- if (rmdir(name->s) == 0)
- return 1;
-
- switch (errno) {
- case ENOENT:
- return 1;
- case ENOTEMPTY:
- break;
- default:
- return 0;
- }
-
- if ((dir = opendir(name->s)) == NULL)
- return 0;
-
- while ((d = readdir(dir)) != NULL) {
- if (d->d_name[0] == '.' && (d->d_name[1] == '\0' || (d->d_name[1] == '.' && d->d_name[2] == '\0')))
- continue;
- if (len + strlen(d->d_name) + need_slash >= sizeof(path.s))
- goto done;
- strcpy(path.s, name->s);
- if (need_slash)
- strcat(path.s, "/");
- strcat(path.s, d->d_name);
- switch (d->d_type) {
- case DT_DIR:
- if (!rm_rf(&path))
- goto done;
- continue;
- default:
- if (unlink(path.s) < 0)
- goto done;
- continue;
- }
- }
-
- ret = rmdir(name->s) == 0;
-
- done:
- closedir(dir);
- return ret;
-}
-
-/**
- * Construct names for the directories not directly settable by the
- * user.
- *
- * This function also checks for an old-style rc->authenticated
- * directory, to simplify upgrade from older versions of rcynic.
- */
-static int construct_directory_names(rcynic_ctx_t *rc)
-{
- struct stat st;
- ssize_t n;
- path_t p;
- time_t t = time(0);
-
- p = rc->authenticated;
-
- n = strlen(p.s);
-
- if (n + sizeof(authenticated_symlink_suffix) >= sizeof(p.s)) {
- logmsg(rc, log_usage_err, "Symlink name would be too long");
- return 0;
- }
-
- if (strftime(p.s + n, sizeof(p.s) - n - 1, ".%Y-%m-%dT%H:%M:%SZ", gmtime(&t)) == 0) {
- logmsg(rc, log_usage_err, "Generated path with timestamp would be too long");
- return 0;
- }
-
- if (!set_directory(rc, &rc->new_authenticated, p.s, 1))
- return 0;
-
- if (!set_directory(rc, &rc->old_authenticated, rc->authenticated.s, 1))
- return 0;
-
- if (lstat(rc->authenticated.s, &st) == 0 && (st.st_mode & S_IFDIR) != 0 &&
- strlen(rc->authenticated.s) + sizeof(".old") < sizeof(p.s)) {
- p = rc->authenticated;
- strcat(p.s, ".old");
- rm_rf(&p);
- (void) rename(rc->authenticated.s, p.s);
- }
-
- if (lstat(rc->authenticated.s, &st) == 0 && (st.st_mode & S_IFDIR) != 0) {
- logmsg(rc, log_usage_err,
- "Existing %s directory is in the way, please remove it",
- rc->authenticated.s);
- return 0;
- }
-
- return 1;
-}
-
-/**
- * Do final symlink shuffle and cleanup of output directories.
- */
-static int finalize_directories(const rcynic_ctx_t *rc)
-{
- path_t path, sym, real_old, real_new;
- const char *dir;
- size_t n;
- glob_t g;
- int i;
-
- if (!realpath(rc->old_authenticated.s, real_old.s))
- real_old.s[0] = '\0';
-
- if (!realpath(rc->new_authenticated.s, real_new.s))
- real_old.s[0] = '\0';
-
- path = rc->new_authenticated;
-
- n = strlen(path.s);
- assert(n > 1 && path.s[n - 1] == '/');
- path.s[n - 1] = '\0';
-
- if ((dir = strrchr(path.s, '/')) == NULL)
- dir = path.s;
- else
- dir++;
-
- sym = rc->authenticated;
-
- assert(strlen(sym.s) + sizeof(authenticated_symlink_suffix) < sizeof(sym.s));
- strcat(sym.s, authenticated_symlink_suffix);
-
- (void) unlink(sym.s);
-
- if (symlink(dir, sym.s) < 0) {
- logmsg(rc, log_sys_err, "Couldn't link %s to %s: %s",
- sym.s, dir, strerror(errno));
- return 0;
- }
-
- if (rename(sym.s, rc->authenticated.s) < 0) {
- logmsg(rc, log_sys_err, "Couldn't rename %s to %s: %s",
- sym.s, rc->authenticated.s, strerror(errno));
- return 0;
- }
-
- path = rc->authenticated;
- assert(strlen(path.s) + sizeof(".*") < sizeof(path.s));
- strcat(path.s, ".*");
-
- memset(&g, 0, sizeof(g));
-
- if (real_new.s[0] && glob(path.s, 0, 0, &g) == 0)
- for (i = 0; i < g.gl_pathc; i++)
- if (realpath(g.gl_pathv[i], path.s) &&
- strcmp(path.s, real_old.s) &&
- strcmp(path.s, real_new.s))
- rm_rf(&path);
-
- return 1;
-}
-
-
-
-/**
- * Check to see whether a hostname is in the dead host cache.
- */
-static int dead_host_check(const rcynic_ctx_t *rc, const uri_t *uri)
-{
- hostname_t hostname;
-
- assert(rc && uri && rc->dead_host_cache);
-
- return (uri_to_hostname(uri, &hostname) &&
- sk_OPENSSL_STRING_find(rc->dead_host_cache, hostname.s) >= 0);
-}
-
-
-/**
- * Add an entry to the dead host cache.
- */
-static void dead_host_add(const rcynic_ctx_t *rc, const uri_t *uri)
-{
- hostname_t hostname;
-
- assert(rc && uri && rc->dead_host_cache);
-
- if (dead_host_check(rc, uri))
- return;
-
- if (!uri_to_hostname(uri, &hostname))
- return;
-
- (void) sk_OPENSSL_STRING_push_strdup(rc->dead_host_cache, hostname.s);
-}
-
-
-
-/**
- * Test whether a pair of URIs "conflict", that is, whether attempting
- * to rsync both of them at the same time in parallel might cause
- * unpredictable behavior. Might need a better name for this test.
- *
- * Returns non-zero iff the two URIs "conflict".
- */
-static int conflicting_uris(const uri_t *a, const uri_t *b)
-{
- size_t len_a, len_b;
-
- assert(a && is_rsync(a->s) && b && is_rsync(b->s));
-
- len_a = strlen(a->s);
- len_b = strlen(b->s);
-
- assert(len_a < sizeof(a->s) && len_b < sizeof(b->s));
-
- return !strncmp(a->s, b->s, len_a < len_b ? len_a : len_b);
-}
-
-
-
-/**
- * Read non-directory filenames from a directory, so we can check to
- * see what's missing from a manifest.
- */
-static STACK_OF(OPENSSL_STRING) *directory_filenames(const rcynic_ctx_t *rc,
- const walk_state_t state,
- const uri_t *uri)
-{
- STACK_OF(OPENSSL_STRING) *result = NULL;
- path_t path;
- const path_t *prefix = NULL;
- DIR *dir = NULL;
- struct dirent *d;
- int ok = 0;
-
- assert(rc && uri);
-
- switch (state) {
- case walk_state_current:
- prefix = &rc->unauthenticated;
- break;
- case walk_state_backup:
- prefix = &rc->old_authenticated;
- break;
- default:
- goto done;
- }
-
- if (!uri_to_filename(rc, uri, &path, prefix) ||
- (dir = opendir(path.s)) == NULL ||
- (result = sk_OPENSSL_STRING_new(uri_cmp)) == NULL)
- goto done;
-
- while ((d = readdir(dir)) != NULL)
- if (d->d_type != DT_DIR && !sk_OPENSSL_STRING_push_strdup(result, d->d_name))
- goto done;
-
- ok = 1;
-
- done:
- if (dir != NULL)
- closedir(dir);
-
- if (ok)
- return result;
-
- sk_OPENSSL_STRING_pop_free(result, OPENSSL_STRING_free);
- return NULL;
-}
-
-
-
-/**
- * Increment walk context reference count.
- */
-static void walk_ctx_attach(walk_ctx_t *w)
-{
- if (w != NULL) {
- w->refcount++;
- assert(w->refcount != 0);
- }
-}
-
-/**
- * Decrement walk context reference count; freeing the context if the
- * reference count is now zero.
- */
-static void walk_ctx_detach(walk_ctx_t *w)
-{
- if (w != NULL && --(w->refcount) == 0) {
- assert(w->refcount == 0);
- X509_free(w->cert);
- Manifest_free(w->manifest);
- sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free);
- free(w);
- }
-}
-
-/**
- * Return top context of a walk context stack.
- */
-static walk_ctx_t *walk_ctx_stack_head(STACK_OF(walk_ctx_t) *wsk)
-{
- return sk_walk_ctx_t_value(wsk, sk_walk_ctx_t_num(wsk) - 1);
-}
-
-/**
- * Walk context iterator. Think of this as the thing you call in the
- * third clause of a conceptual "for" loop: this reinitializes as
- * necessary for the next pass through the loop.
- *
- * This is still under construction, but general idea is that we have
- * several state variables in a walk context which collectively define
- * the current pass, product URI, etc, and we want to be able to
- * iterate through this sequence via the event system. So we need a
- * function which steps to the next state.
- */
-static void walk_ctx_loop_next(const rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
-
- assert(rc && wsk && w);
-
- if (w->manifest && w->manifest_iteration + 1 < sk_FileAndHash_num(w->manifest->fileList)) {
- w->manifest_iteration++;
- return;
- }
-
- if (w->filenames && w->filename_iteration + 1 < sk_OPENSSL_STRING_num(w->filenames)) {
- w->filename_iteration++;
- return;
- }
-
- if (w->state < walk_state_done) {
- w->state++;
- w->manifest_iteration = 0;
- w->filename_iteration = 0;
- sk_OPENSSL_STRING_pop_free(w->filenames, OPENSSL_STRING_free);
- w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia);
- }
-}
-
-/**
- * Whether we're done iterating over a walk context. Think of this as
- * the thing you call (negated) in the second clause of a conceptual
- * "for" loop.
- */
-static int walk_ctx_loop_done(STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
- return wsk == NULL || w == NULL || w->state >= walk_state_done;
-}
-
-static Manifest *check_manifest(const rcynic_ctx_t *rc,
- STACK_OF(walk_ctx_t) *wsk);
-
-/**
- * Loop initializer for walk context. Think of this as the thing you
- * call in the first clause of a conceptual "for" loop.
- */
-static void walk_ctx_loop_init(const rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
-
- assert(rc && wsk && w && w->state == walk_state_ready);
-
- assert(w->manifest == NULL);
- if ((w->manifest = check_manifest(rc, wsk)) == NULL)
- logmsg(rc, log_telemetry, "Couldn't get manifest %s, blundering onward", w->certinfo.manifest.s);
-
- assert(w->filenames == NULL);
- w->filenames = directory_filenames(rc, w->state, &w->certinfo.sia);
-
- w->stale_manifest = w->manifest != NULL && X509_cmp_current_time(w->manifest->nextUpdate) < 0;
-
- w->manifest_iteration = 0;
- w->filename_iteration = 0;
- w->state++;
-
- assert(w->state == walk_state_current);
-
- while (!walk_ctx_loop_done(wsk) &&
- (w->manifest == NULL || w->manifest_iteration >= sk_FileAndHash_num(w->manifest->fileList)) &&
- (w->filenames == NULL || w->filename_iteration >= sk_OPENSSL_STRING_num(w->filenames)))
- walk_ctx_loop_next(rc, wsk);
-}
-
-/**
- * Extract URI and hash values from walk context.
- */
-static int walk_ctx_loop_this(const rcynic_ctx_t *rc,
- STACK_OF(walk_ctx_t) *wsk,
- uri_t *uri,
- const unsigned char **hash,
- size_t *hashlen)
-{
- const walk_ctx_t *w = walk_ctx_stack_head(wsk);
- const char *name = NULL;
- FileAndHash *fah = NULL;
-
- assert(rc && wsk && w && uri && hash && hashlen);
-
- if (w->manifest != NULL && w->manifest_iteration < sk_FileAndHash_num(w->manifest->fileList)) {
- fah = sk_FileAndHash_value(w->manifest->fileList, w->manifest_iteration);
- name = (const char *) fah->file->data;
- } else if (w->filenames != NULL && w->filename_iteration < sk_OPENSSL_STRING_num(w->filenames)) {
- name = sk_OPENSSL_STRING_value(w->filenames, w->filename_iteration);
- }
-
- if (name == NULL) {
- logmsg(rc, log_sys_err, "Can't find a URI in walk context, this shouldn't happen: state %d, manifest_iteration %d, filename_iteration %d",
- (int) w->state, w->manifest_iteration, w->filename_iteration);
- return 0;
- }
-
- if (strlen(w->certinfo.sia.s) + strlen(name) >= sizeof(uri->s)) {
- logmsg(rc, log_data_err, "URI %s%s too long, skipping", w->certinfo.sia.s, uri->s);
- return 0;
- }
-
- strcpy(uri->s, w->certinfo.sia.s);
- strcat(uri->s, name);
-
- if (fah != NULL) {
- sk_OPENSSL_STRING_remove(w->filenames, name);
- *hash = fah->hash->data;
- *hashlen = fah->hash->length;
- } else {
- *hash = NULL;
- *hashlen = 0;
- }
-
- return 1;
-}
-
-/**
- * Create a new walk context stack.
- */
-static STACK_OF(walk_ctx_t) *walk_ctx_stack_new(void)
-{
- return sk_walk_ctx_t_new_null();
-}
-
-/**
- * Push a walk context onto a walk context stack, return the new context.
- */
-static walk_ctx_t *walk_ctx_stack_push(STACK_OF(walk_ctx_t) *wsk,
- X509 *x,
- const certinfo_t *certinfo)
-{
- walk_ctx_t *w;
-
- if (x == NULL || certinfo == NULL)
- return NULL;
-
- if ((w = malloc(sizeof(*w))) == NULL)
- return NULL;
-
- memset(w, 0, sizeof(*w));
- w->cert = x;
- w->certinfo = *certinfo;
-
- if (!sk_walk_ctx_t_push(wsk, w)) {
- free(w);
- return NULL;
- }
-
- walk_ctx_attach(w);
- return w;
-}
-
-/**
- * Pop and discard a walk context from a walk context stack.
- */
-static void walk_ctx_stack_pop(STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_detach(sk_walk_ctx_t_pop(wsk));
-}
-
-/**
- * Clone a stack of walk contexts.
- */
-static STACK_OF(walk_ctx_t) *walk_ctx_stack_clone(STACK_OF(walk_ctx_t) *old_wsk)
-{
- STACK_OF(walk_ctx_t) *new_wsk;
- int i;
- if (old_wsk == NULL || (new_wsk = sk_walk_ctx_t_dup(old_wsk)) == NULL)
- return NULL;
- for (i = 0; i < sk_walk_ctx_t_num(new_wsk); i++)
- walk_ctx_attach(sk_walk_ctx_t_value(new_wsk, i));
- return new_wsk;
-}
-
-/**
- * Extract certificate stack from walk context stack. Returns a newly
- * created STACK_OF(X509) pointing to the existing cert objects (ie,
- * this is a shallow copy, so only free the STACK_OF(X509), not the
- * certificates themselves).
- */
-static STACK_OF(X509) *walk_ctx_stack_certs(STACK_OF(walk_ctx_t) *wsk)
-{
- STACK_OF(X509) *xsk = sk_X509_new_null();
- walk_ctx_t *w;
- int i;
-
- for (i = 0; i < sk_walk_ctx_t_num(wsk); i++)
- if ((w = sk_walk_ctx_t_value(wsk, i)) == NULL ||
- (w->cert != NULL && !sk_X509_push(xsk, w->cert)))
- goto fail;
-
- return xsk;
-
- fail:
- sk_X509_free(xsk);
- return NULL;
-}
-
-/**
- * Free a walk context stack, decrementing reference counts of each
- * frame on it.
- */
-static void walk_ctx_stack_free(STACK_OF(walk_ctx_t) *wsk)
-{
- sk_walk_ctx_t_pop_free(wsk, walk_ctx_detach);
-}
-
-
-
-static int rsync_count_running(const rcynic_ctx_t *);
-
-/**
- * Add a task to the task queue.
- */
-static int task_add(const rcynic_ctx_t *rc,
- void (*handler)(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *),
- STACK_OF(walk_ctx_t) *wsk)
-{
- task_t *t = malloc(sizeof(*t));
-
- assert(rc && rc->task_queue && handler);
-
- assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
-
- if (!t)
- return 0;
-
- t->handler = handler;
- t->wsk = wsk;
-
- if (sk_task_t_push(rc->task_queue, t))
- return 1;
-
- free(t);
- return 0;
-}
-
-/**
- * Run tasks until queue is empty.
- */
-static void task_run_q(rcynic_ctx_t *rc)
-{
- task_t *t;
- assert(rc && rc->task_queue);
- while ((t = sk_task_t_shift(rc->task_queue)) != NULL) {
- t->handler(rc, t->wsk);
- free(t);
- }
-}
-
-
-
-/**
- * Record that we've already synced a particular rsync URI.
- */
-
-static void rsync_cache_add(const rcynic_ctx_t *rc, const uri_t *uri)
-{
- uri_t uribuf;
- char *s;
-
- assert(rc && uri && rc->rsync_cache);
- uribuf = *uri;
- while ((s = strrchr(uribuf.s, '/')) != NULL && s[1] == '\0')
- *s = '\0';
- assert(strlen(uribuf.s) > SIZEOF_RSYNC);
- if (!sk_OPENSSL_STRING_push_strdup(rc->rsync_cache, uribuf.s + SIZEOF_RSYNC))
- logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri->s);
-}
-
-/**
- * Maintain a cache of URIs we've already fetched.
- */
-static int rsync_cached_string(const rcynic_ctx_t *rc,
- const char *string)
-{
- char *s, buffer[URI_MAX];
-
- assert(rc && rc->rsync_cache && strlen(string) < sizeof(buffer));
- strcpy(buffer, string);
- if ((s = strrchr(buffer, '/')) != NULL && s[1] == '\0')
- *s = '\0';
- while (sk_OPENSSL_STRING_find(rc->rsync_cache, buffer) < 0) {
- if ((s = strrchr(buffer, '/')) == NULL)
- return 0;
- *s = '\0';
- }
- return 1;
-}
-
-/**
- * Check whether a particular URI has been cached.
- */
-static int rsync_cached_uri(const rcynic_ctx_t *rc,
- const uri_t *uri)
-{
- return is_rsync(uri->s) && rsync_cached_string(rc, uri->s + SIZEOF_RSYNC);
-}
-
-/**
- * Return count of how many rsync contexts are in running.
- */
-static int rsync_count_running(const rcynic_ctx_t *rc)
-{
- const rsync_ctx_t *ctx;
- int i, n = 0;
-
- assert(rc && rc->rsync_queue);
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
- switch (ctx->state) {
- case rsync_state_running:
- case rsync_state_terminating:
- n++;
- default:
- continue;
- }
- }
-
- return n;
-}
-
-/**
- * Test whether an rsync context conflicts with anything that's
- * currently runable.
- */
-static int rsync_conflicts(const rcynic_ctx_t *rc,
- const rsync_ctx_t *ctx)
-{
- const rsync_ctx_t *c;
- int i;
-
- assert(rc && ctx && rc->rsync_queue);
-
- for (i = 0; (c = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
- if (c != ctx &&
- (c->state == rsync_state_initial ||
- c->state == rsync_state_running) &&
- conflicting_uris(&c->uri, &ctx->uri))
- return 1;
-
- return 0;
-}
-
-/**
- * Test whether a rsync context is runable at this time.
- */
-static int rsync_runable(const rcynic_ctx_t *rc,
- const rsync_ctx_t *ctx)
-{
- assert(rc && ctx);
-
- switch (ctx->state) {
-
- case rsync_state_initial:
- case rsync_state_running:
- return 1;
-
- case rsync_state_retry_wait:
- return ctx->deadline <= time(0);
-
- case rsync_state_terminating:
- return 0;
-
- case rsync_state_conflict_wait:
- return !rsync_conflicts(rc, ctx);
- }
-
- return 0;
-}
-
-/**
- * Return count of runable rsync contexts.
- */
-static int rsync_count_runable(const rcynic_ctx_t *rc)
-{
- const rsync_ctx_t *ctx;
- int i, n = 0;
-
- assert(rc && rc->rsync_queue);
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
- if (rsync_runable(rc, ctx))
- n++;
-
- return n;
-}
-
-/**
- * Run an rsync process.
- */
-static void rsync_run(const rcynic_ctx_t *rc,
- rsync_ctx_t *ctx)
-{
- static const char * const rsync_cmd[] = {
- "rsync", "--update", "--times", "--copy-links", "--itemize-changes"
- };
- static const char * const rsync_tree_args[] = {
- "--recursive", "--delete"
- };
-
- const char *argv[10];
- path_t path;
- int i, argc = 0, flags, pipe_fds[2];
-
- pipe_fds[0] = pipe_fds[1] = -1;
-
- assert(rc && ctx && ctx->pid == 0 && ctx->state != rsync_state_running && rsync_runable(rc, ctx));
-
- assert(rsync_count_running(rc) < rc->max_parallel_fetches);
-
- logmsg(rc, log_telemetry, "Fetching %s", ctx->uri.s);
-
- memset(argv, 0, sizeof(argv));
-
- for (i = 0; i < sizeof(rsync_cmd)/sizeof(*rsync_cmd); i++) {
- assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = rsync_cmd[i];
- }
- if (endswith(ctx->uri.s, "/")) {
- for (i = 0; i < sizeof(rsync_tree_args)/sizeof(*rsync_tree_args); i++) {
- assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = rsync_tree_args[i];
- }
- }
-
- if (rc->rsync_program)
- argv[0] = rc->rsync_program;
-
- if (!uri_to_filename(rc, &ctx->uri, &path, &rc->unauthenticated)) {
- logmsg(rc, log_data_err, "Couldn't extract filename from URI: %s", ctx->uri.s);
- goto lose;
- }
-
- assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = ctx->uri.s;
-
- assert(argc < sizeof(argv)/sizeof(*argv));
- argv[argc++] = path.s;
-
- if (!mkdir_maybe(rc, &path)) {
- logmsg(rc, log_sys_err, "Couldn't make target directory: %s", path.s);
- goto lose;
- }
-
- for (i = 0; i < argc; i++)
- logmsg(rc, log_verbose, "rsync argv[%d]: %s", i, argv[i]);
-
- if (pipe(pipe_fds) < 0) {
- logmsg(rc, log_sys_err, "pipe() failed: %s", strerror(errno));
- goto lose;
- }
- ctx->fd = pipe_fds[0];
-
- if ((flags = fcntl(ctx->fd, F_GETFL, 0)) == -1) {
- logmsg(rc, log_sys_err, "fcntl(F_GETFL) failed: %s",
- strerror(errno));
- goto lose;
- }
- flags |= O_NONBLOCK;
- if (fcntl(ctx->fd, F_SETFL, flags) == -1) {
- logmsg(rc, log_sys_err, "fcntl(F_SETFL) failed: %s",
- strerror(errno));
- goto lose;
- }
-
- switch ((ctx->pid = vfork())) {
-
- case -1:
- logmsg(rc, log_sys_err, "vfork() failed: %s", strerror(errno));
- goto lose;
-
- case 0:
- /*
- * Child
- */
-#define whine(msg) ((void) write(2, msg, sizeof(msg) - 1))
- if (close(pipe_fds[0]) < 0)
- whine("close(pipe_fds[0]) failed\n");
- else if (dup2(pipe_fds[1], 1) < 0)
- whine("dup2(pipe_fds[1], 1) failed\n");
- else if (dup2(pipe_fds[1], 2) < 0)
- whine("dup2(pipe_fds[1], 2) failed\n");
- else if (close(pipe_fds[1]) < 0)
- whine("close(pipe_fds[1]) failed\n");
- else if (execvp(argv[0], (char * const *) argv) < 0)
- whine("execvp(argv[0], (char * const *) argv) failed\n");
- whine("last system error: ");
- write(2, strerror(errno), strlen(strerror(errno)));
- whine("\n");
- _exit(1);
-#undef whine
-
- default:
- /*
- * Parent
- */
- (void) close(pipe_fds[1]);
- pipe_fds[1] = -1;
- ctx->state = rsync_state_running;
- ctx->problem = rsync_problem_none;
- if (rc->rsync_timeout)
- ctx->deadline = time(0) + rc->rsync_timeout;
- logmsg(rc, log_debug, "Subprocess %u started, queued %d, runable %d, running %d, max %d, URI %s",
- (unsigned) ctx->pid, sk_rsync_ctx_t_num(rc->rsync_queue), rsync_count_runable(rc), rsync_count_running(rc), rc->max_parallel_fetches, ctx->uri.s);
- if (ctx->handler)
- ctx->handler(rc, ctx, rsync_status_pending, &ctx->uri, ctx->wsk);
- return;
-
- }
-
- lose:
- if (pipe_fds[0] != -1)
- (void) close(pipe_fds[0]);
- if (pipe_fds[1] != -1)
- (void) close(pipe_fds[1]);
- if (rc->rsync_queue && ctx)
- (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
- if (ctx && ctx->handler)
- ctx->handler(rc, ctx, rsync_status_failed, &ctx->uri, ctx->wsk);
- if (ctx)
- free(ctx);
-}
-
-/**
- * Process one line of rsync's output. This is a separate function
- * primarily to centralize scraping for magic error strings.
- */
-static void do_one_rsync_log_line(const rcynic_ctx_t *rc,
- rsync_ctx_t *ctx)
-{
- unsigned u;
- char *s;
-
- /*
- * Send line to our log unless it's empty.
- */
- if (ctx->buffer[strspn(ctx->buffer, " \t\n\r")] != '\0')
- logmsg(rc, log_telemetry, "rsync[%u]: %s", ctx->pid, ctx->buffer);
-
- /*
- * Check for magic error strings
- */
- if ((s = strstr(ctx->buffer, "@ERROR: max connections")) != NULL) {
- ctx->problem = rsync_problem_refused;
- if (sscanf(s, "@ERROR: max connections (%u) reached -- try again later", &u) == 1)
- logmsg(rc, log_debug, "Subprocess %u reported limit of %u for %s", ctx->pid, u, ctx->uri.s);
- }
-}
-
-/**
- * Construct select() arguments.
- */
-static int rsync_construct_select(const rcynic_ctx_t *rc,
- const time_t now,
- fd_set *rfds,
- struct timeval *tv)
-{
- rsync_ctx_t *ctx;
- time_t when = 0;
- int i, n = 0;
-
- assert(rc && rc->rsync_queue && rfds && tv);
-
- FD_ZERO(rfds);
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
- switch (ctx->state) {
-
- case rsync_state_running:
- if (ctx->fd >= 0) {
- FD_SET(ctx->fd, rfds);
- if (ctx->fd > n)
- n = ctx->fd;
- }
- if (!rc->rsync_timeout)
- continue;
- /* Fall through */
-
- case rsync_state_retry_wait:
- if (when == 0 || ctx->deadline < when)
- when = ctx->deadline;
- /* Fall through */
-
- default:
- continue;
- }
- }
-
- tv->tv_sec = when ? when - now : 0;
- tv->tv_usec = 0;
- return n;
-}
-
-/**
- * Manager for queue of rsync tasks in progress.
- *
- * General plan here is to process one completed child, or output
- * accumulated from children, or block if there is absolutely nothing
- * to do, on the theory that caller had nothing to do either or would
- * not have called us. Once we've done something allegedly useful, we
- * return, because this is not the event loop; if and when the event
- * loop has nothing more important to do, we'll be called again.
- *
- * So this is the only place where the program blocks waiting for
- * children, but we only do it when we know there's nothing else
- * useful that we could be doing while we wait.
- */
-static void rsync_mgr(const rcynic_ctx_t *rc)
-{
- time_t now = time(0);
- int i, n, pid_status = -1;
- rsync_ctx_t *ctx = NULL;
- struct timeval tv;
- fd_set rfds;
- pid_t pid;
- char *s;
-
- assert(rc && rc->rsync_queue);
-
- /*
- * Check for exited subprocesses.
- */
-
- while ((pid = waitpid(-1, &pid_status, WNOHANG)) > 0) {
-
- /*
- * Child exited, handle it.
- */
-
- logmsg(rc, log_debug, "Subprocess %d exited with status %d", pid, WEXITSTATUS(pid_status));
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
- if (ctx->pid == pid)
- break;
- if (ctx == NULL) {
- assert(i == sk_rsync_ctx_t_num(rc->rsync_queue));
- logmsg(rc, log_sys_err, "Couldn't find rsync context for pid %d", pid);
- continue;
- }
-
- close(ctx->fd);
- ctx->fd = -1;
-
- if (ctx->buflen > 0) {
- assert(ctx->buflen < sizeof(ctx->buffer));
- ctx->buffer[ctx->buflen] = '\0';
- do_one_rsync_log_line(rc, ctx);
- ctx->buflen = 0;
- }
-
- switch (WEXITSTATUS(pid_status)) {
-
- case 0:
- log_validation_status(rc, &ctx->uri,
- (ctx->problem == rsync_problem_timed_out
- ? rsync_timed_out
- : rsync_succeeded),
- object_generation_null);
- break;
-
- case 5: /* "Error starting client-server protocol" */
- /*
- * Handle remote rsyncd refusing to talk to us because we've
- * exceeded its connection limit. Back off for a short
- * interval, then retry.
- */
- if (ctx->problem == rsync_problem_refused && ctx->tries < rc->max_retries) {
- unsigned char r;
- if (!RAND_bytes(&r, sizeof(r)))
- r = 60;
- ctx->deadline = time(0) + rc->retry_wait_min + r;
- ctx->state = rsync_state_retry_wait;
- ctx->problem = rsync_problem_none;
- ctx->pid = 0;
- ctx->tries++;
- logmsg(rc, log_telemetry, "Scheduling retry for %s", ctx->uri.s);
- continue;
- }
-
- /* Otherwise, fall through */
-
- case 2: /* "Protocol incompatibility" */
- case 4: /* "Requested action not supported" */
- case 10: /* "Error in socket I/O" */
- case 11: /* "Error in file I/O" */
- case 12: /* "Error in rsync protocol data stream" */
- case 21: /* "Some error returned by waitpid()" */
- case 30: /* "Timeout in data send/receive" */
- case 35: /* "Timeout waiting for daemon connection" */
- logmsg(rc, log_telemetry, "Adding %s to dead host cache", ctx->uri.s);
- dead_host_add(rc, &ctx->uri);
-
- /* Fall through */
-
- default:
- logmsg(rc, log_data_err, "rsync %u exited with status %d fetching %s",
- (unsigned) pid, WEXITSTATUS(pid_status), ctx->uri.s);
- log_validation_status(rc, &ctx->uri,
- (rc->rsync_timeout && now >= ctx->deadline
- ? rsync_timed_out
- : rsync_failed),
- object_generation_null);
- break;
- }
-
- rsync_cache_add(rc, &ctx->uri);
- if (ctx->handler)
- ctx->handler(rc, ctx, (ctx->problem == rsync_problem_timed_out
- ? rsync_status_timed_out
- : WEXITSTATUS(pid_status) != 0
- ? rsync_status_failed
- : rsync_status_done),
- &ctx->uri, ctx->wsk);
- (void) sk_rsync_ctx_t_delete_ptr(rc->rsync_queue, ctx);
- free(ctx);
- ctx = NULL;
- }
-
- if (pid == -1 && errno != EINTR && errno != ECHILD)
- logmsg(rc, log_sys_err, "waitpid() returned error: %s", strerror(errno));
-
- assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
-
- /*
- * Look for rsync contexts that have become runable.
- */
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i)
- if (ctx->state != rsync_state_running &&
- rsync_runable(rc, ctx) &&
- rsync_count_running(rc) < rc->max_parallel_fetches)
- rsync_run(rc, ctx);
-
- assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
-
- /*
- * Check for log text from subprocesses.
- */
-
- n = rsync_construct_select(rc, now, &rfds, &tv);
-
- if (n > 0 || tv.tv_sec)
- n = select(n + 1, &rfds, NULL, NULL, tv.tv_sec ? &tv : NULL);
-
- if (n > 0) {
-
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
- if (ctx->fd <= 0 || !FD_ISSET(ctx->fd, &rfds))
- continue;
-
- assert(ctx->buflen < sizeof(ctx->buffer) - 1);
-
- while ((n = read(ctx->fd, ctx->buffer + ctx->buflen, sizeof(ctx->buffer) - 1 - ctx->buflen)) > 0) {
- ctx->buflen += n;
- assert(ctx->buflen < sizeof(ctx->buffer));
- ctx->buffer[ctx->buflen] = '\0';
-
- while ((s = strchr(ctx->buffer, '\n')) != NULL) {
- *s++ = '\0';
- do_one_rsync_log_line(rc, ctx);
- assert(s > ctx->buffer && s < ctx->buffer + sizeof(ctx->buffer));
- ctx->buflen -= s - ctx->buffer;
- assert(ctx->buflen < sizeof(ctx->buffer));
- if (ctx->buflen > 0)
- memmove(ctx->buffer, s, ctx->buflen);
- ctx->buffer[ctx->buflen] = '\0';
- }
-
- if (ctx->buflen == sizeof(ctx->buffer) - 1) {
- ctx->buffer[sizeof(ctx->buffer) - 1] = '\0';
- do_one_rsync_log_line(rc, ctx);
- ctx->buflen = 0;
- }
- }
-
- if (n == 0) {
- (void) close(ctx->fd);
- ctx->fd = -1;
- }
- }
- }
-
- assert(rsync_count_running(rc) <= rc->max_parallel_fetches);
-
- /*
- * Deal with children that have been running too long.
- */
- if (rc->rsync_timeout) {
- for (i = 0; (ctx = sk_rsync_ctx_t_value(rc->rsync_queue, i)) != NULL; ++i) {
- int sig;
- if (ctx->pid <= 0 || now < ctx->deadline)
- continue;
- sig = ctx->tries++ < KILL_MAX ? SIGTERM : SIGKILL;
- if (ctx->state != rsync_state_terminating) {
- ctx->problem = rsync_problem_timed_out;
- ctx->state = rsync_state_terminating;
- ctx->tries = 0;
- logmsg(rc, log_telemetry, "Subprocess %u is taking too long fetching %s, whacking it", (unsigned) ctx->pid, ctx->uri.s);
- dead_host_add(rc, &ctx->uri);
- } else if (sig == SIGTERM) {
- logmsg(rc, log_telemetry, "Whacking subprocess %u again", (unsigned) ctx->pid);
- } else {
- logmsg(rc, log_telemetry, "Whacking subprocess %u with big hammer", (unsigned) ctx->pid);
- }
- (void) kill(ctx->pid, sig);
- ctx->deadline = now + 1;
- }
- }
-}
-
-/**
- * Set up rsync context and attempt to start it.
- */
-static void rsync_init(const rcynic_ctx_t *rc,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- void (*handler)(const rcynic_ctx_t *, const rsync_ctx_t *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *))
-{
- rsync_ctx_t *ctx = NULL;
-
- assert(rc && uri && strlen(uri->s) > SIZEOF_RSYNC);
-
- if (!rc->run_rsync) {
- logmsg(rc, log_verbose, "rsync disabled, skipping %s", uri->s);
- rsync_cache_add(rc, uri);
- if (handler)
- handler(rc, NULL, rsync_status_skipped, uri, wsk);
- return;
- }
-
- if (rsync_cached_uri(rc, uri)) {
- logmsg(rc, log_verbose, "rsync cache hit for %s", uri->s);
- if (handler)
- handler(rc, NULL, rsync_status_done, uri, wsk);
- return;
- }
-
- if (dead_host_check(rc, uri)) {
- logmsg(rc, log_verbose, "Dead host cache hit for %s", uri->s);
- rsync_cache_add(rc, uri);
- if (handler)
- handler(rc, NULL, rsync_status_skipped, uri, wsk);
- return;
- }
-
- if ((ctx = malloc(sizeof(*ctx))) == NULL) {
- logmsg(rc, log_sys_err, "malloc(rsync_ctxt_t) failed");
- if (handler)
- handler(rc, NULL, rsync_status_failed, uri, wsk);
- return;
- }
-
- memset(ctx, 0, sizeof(*ctx));
- ctx->uri = *uri;
- ctx->handler = handler;
- ctx->wsk = wsk;
- ctx->fd = -1;
-
- if (!sk_rsync_ctx_t_push(rc->rsync_queue, ctx)) {
- logmsg(rc, log_sys_err, "Couldn't push rsync state object onto queue, punting %s", ctx->uri.s);
- if (handler)
- handler(rc, ctx, rsync_status_failed, uri, wsk);
- free(ctx);
- return;
- }
-
- if (rsync_conflicts(rc, ctx)) {
- logmsg(rc, log_debug, "New rsync context %s is feeling conflicted", ctx->uri.s);
- ctx->state = rsync_state_conflict_wait;
- }
-
-
-#if 0
- if (rsync_runable(rc, ctx) && rsync_count_running(rc) < rc->max_parallel_fetches);
- rsync_run(rc, ctx);
-#endif
-}
-
-/**
- * rsync a single file (trust anchor, CRL, manifest, ROA, whatever).
- */
-static void rsync_file(const rcynic_ctx_t *rc,
- const uri_t *uri)
-{
- assert(!endswith(uri->s, "/"));
- rsync_init(rc, uri, NULL, NULL);
-}
-
-/**
- * rsync an entire subtree, generally rooted at a SIA collection.
- */
-static void rsync_tree(const rcynic_ctx_t *rc,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- void (*handler)(const rcynic_ctx_t *, const rsync_ctx_t *, const rsync_status_t, const uri_t *, STACK_OF(walk_ctx_t) *))
-{
- assert(endswith(uri->s, "/"));
- rsync_init(rc, uri, wsk, handler);
-}
-
-
-
-/**
- * Clean up old stuff from previous rsync runs. --delete doesn't help
- * if the URI changes and we never visit the old URI again.
- */
-static int prune_unauthenticated(const rcynic_ctx_t *rc,
- const path_t *name,
- const size_t baselen)
-{
- path_t path;
- struct dirent *d;
- size_t len;
- DIR *dir;
- int need_slash;
-
- assert(rc && name && baselen > 0);
- len = strlen(name->s);
- assert(len >= baselen && len < sizeof(path.s));
- need_slash = name->s[len - 1] != '/';
-
- if (rsync_cached_string(rc, name->s + baselen)) {
- logmsg(rc, log_debug, "prune: cache hit for %s, not cleaning", name->s);
- return 1;
- }
-
- if (rmdir(name->s) == 0) {
- logmsg(rc, log_debug, "prune: removed %s", name->s);
- return 1;
- }
-
- switch (errno) {
- case ENOENT:
- logmsg(rc, log_debug, "prune: nonexistant %s", name->s);
- return 1;
- case ENOTEMPTY:
- break;
- default:
- logmsg(rc, log_debug, "prune: other error %s: %s", name->s, strerror(errno));
- return 0;
- }
-
- if ((dir = opendir(name->s)) == NULL)
- return 0;
-
- while ((d = readdir(dir)) != NULL) {
- if (d->d_name[0] == '.' && (d->d_name[1] == '\0' || (d->d_name[1] == '.' && d->d_name[2] == '\0')))
- continue;
- if (len + strlen(d->d_name) + need_slash >= sizeof(path)) {
- logmsg(rc, log_debug, "prune: %s%s%s too long", name->s, (need_slash ? "/" : ""), d->d_name);
- goto done;
- }
- strcpy(path.s, name->s);
- if (need_slash)
- strcat(path.s, "/");
- strcat(path.s, d->d_name);
- switch (d->d_type) {
- case DT_DIR:
- if (!prune_unauthenticated(rc, &path, baselen))
- goto done;
- continue;
- default:
- if (rsync_cached_string(rc, path.s + baselen)) {
- logmsg(rc, log_debug, "prune: cache hit %s", path.s);
- continue;
- }
- if (unlink(path.s) < 0) {
- logmsg(rc, log_debug, "prune: removing %s failed: %s", path.s, strerror(errno));
- goto done;
- }
- logmsg(rc, log_debug, "prune: removed %s", path.s);
- continue;
- }
- }
-
- if (rmdir(name->s) < 0 && errno != ENOTEMPTY)
- logmsg(rc, log_debug, "prune: couldn't remove %s: %s", name->s, strerror(errno));
-
- done:
- closedir(dir);
- return !d;
-}
-
-
-
-/**
- * Read a DER object using a BIO pipeline that hashes the file content
- * as we read it. Returns the internal form of the parsed DER object,
- * sets the hash buffer (if specified) as a side effect. The default
- * hash algorithm is SHA-256.
- */
-static void *read_file_with_hash(const path_t *filename,
- const ASN1_ITEM *it,
- const EVP_MD *md,
- hashbuf_t *hash)
-{
- void *result = NULL;
- BIO *b;
-
- if ((b = BIO_new_file(filename->s, "rb")) == NULL)
- goto error;
-
- if (hash != NULL) {
- BIO *b2 = BIO_new(BIO_f_md());
- if (b2 == NULL)
- goto error;
- if (md == NULL)
- md = EVP_sha256();
- if (!BIO_set_md(b2, md)) {
- BIO_free(b2);
- goto error;
- }
- BIO_push(b2, b);
- b = b2;
- }
-
- if ((result = ASN1_item_d2i_bio(it, b, NULL)) == NULL)
- goto error;
-
- if (hash != NULL) {
- memset(hash, 0, sizeof(*hash));
- BIO_gets(b, (char *) hash, sizeof(hash->h));
- }
-
- error:
- BIO_free_all(b);
- return result;
-}
-
-/**
- * Read and hash a certificate.
- */
-static X509 *read_cert(const path_t *filename, hashbuf_t *hash)
-{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(X509), NULL, hash);
-}
-
-/**
- * Read and hash a CRL.
- */
-static X509_CRL *read_crl(const path_t *filename, hashbuf_t *hash)
-{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(X509_CRL), NULL, hash);
-}
-
-/**
- * Read and hash a CMS message.
- */
-static CMS_ContentInfo *read_cms(const path_t *filename, hashbuf_t *hash)
-{
- return read_file_with_hash(filename, ASN1_ITEM_rptr(CMS_ContentInfo), NULL, hash);
-}
-
-
-
-/**
- * Extract CRLDP data from a certificate.
- */
-static void extract_crldp_uri(const rcynic_ctx_t *rc,
- const uri_t *uri,
- const object_generation_t generation,
- const STACK_OF(DIST_POINT) *crldp,
- uri_t *result)
-{
- DIST_POINT *d;
- int i;
-
- assert(crldp);
-
- if (sk_DIST_POINT_num(crldp) != 1) {
- log_validation_status(rc, uri, malformed_crldp, generation);
- return;
- }
-
- d = sk_DIST_POINT_value(crldp, 0);
-
- if (d->reasons || d->CRLissuer || !d->distpoint || d->distpoint->type != 0) {
- log_validation_status(rc, uri, malformed_crldp, generation);
- return;
- }
-
- for (i = 0; i < sk_GENERAL_NAME_num(d->distpoint->name.fullname); i++) {
- GENERAL_NAME *n = sk_GENERAL_NAME_value(d->distpoint->name.fullname, i);
- assert(n != NULL);
- if (n->type != GEN_URI) {
- log_validation_status(rc, uri, malformed_crldp, generation);
- return;
- }
- if (!is_rsync((char *) n->d.uniformResourceIdentifier->data)) {
- logmsg(rc, log_verbose, "Skipping non-rsync URI %s for %s",
- (char *) n->d.uniformResourceIdentifier->data, uri->s);
- continue;
- }
- if (sizeof(result->s) <= n->d.uniformResourceIdentifier->length) {
- log_validation_status(rc, uri, uri_too_long, generation);
- continue;
- }
- strcpy(result->s, (char *) n->d.uniformResourceIdentifier->data);
- return;
- }
-}
-
-/**
- * Extract SIA or AIA data from a certificate.
- */
-static void extract_access_uri(const rcynic_ctx_t *rc,
- const uri_t *uri,
- const object_generation_t generation,
- const AUTHORITY_INFO_ACCESS *xia,
- const unsigned char *oid,
- const int oidlen,
- uri_t *result)
-{
- int i;
-
- if (!xia)
- return;
-
- for (i = 0; i < sk_ACCESS_DESCRIPTION_num(xia); i++) {
- ACCESS_DESCRIPTION *a = sk_ACCESS_DESCRIPTION_value(xia, i);
- assert(a != NULL);
- if (a->location->type != GEN_URI)
- return;
- if (oid_cmp(a->method, oid, oidlen))
- continue;
- if (!is_rsync((char *) a->location->d.uniformResourceIdentifier->data)) {
- logmsg(rc, log_verbose, "Skipping non-rsync URI %s for %s",
- a->location->d.uniformResourceIdentifier->data, uri->s);
- continue;
- }
- if (sizeof(result->s) <= a->location->d.uniformResourceIdentifier->length) {
- log_validation_status(rc, uri, uri_too_long, generation);
- continue;
- }
- strcpy(result->s, (char *) a->location->d.uniformResourceIdentifier->data);
- return;
- }
-}
-
-/**
- * Parse interesting stuff from a certificate.
- */
-static void parse_cert(const rcynic_ctx_t *rc, X509 *x, certinfo_t *c, const uri_t *uri, const object_generation_t generation)
-{
- STACK_OF(DIST_POINT) *crldp;
- AUTHORITY_INFO_ACCESS *xia;
-
- assert(x != NULL && c != NULL && uri != NULL);
- memset(c, 0, sizeof(*c));
-
- c->ca = X509_check_ca(x) == 1;
- c->uri = *uri;
- c->generation = generation;
-
- if ((xia = X509_get_ext_d2i(x, NID_info_access, NULL, NULL)) != NULL) {
- extract_access_uri(rc, uri, generation, xia, id_ad_caIssuers, sizeof(id_ad_caIssuers), &c->aia);
- sk_ACCESS_DESCRIPTION_pop_free(xia, ACCESS_DESCRIPTION_free);
- }
-
- if ((xia = X509_get_ext_d2i(x, NID_sinfo_access, NULL, NULL)) != NULL) {
- extract_access_uri(rc, uri, generation, xia, id_ad_caRepository, sizeof(id_ad_caRepository), &c->sia);
- extract_access_uri(rc, uri, generation, xia, id_ad_rpkiManifest, sizeof(id_ad_rpkiManifest), &c->manifest);
- sk_ACCESS_DESCRIPTION_pop_free(xia, ACCESS_DESCRIPTION_free);
- }
-
- if ((crldp = X509_get_ext_d2i(x, NID_crl_distribution_points, NULL, NULL)) != NULL) {
- extract_crldp_uri(rc, uri, generation, crldp, &c->crldp);
- sk_DIST_POINT_pop_free(crldp, DIST_POINT_free);
- }
-}
-
-
-
-/**
- * Attempt to read and check one CRL from disk.
- */
-
-static X509_CRL *check_crl_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- X509 *issuer,
- const unsigned char *hash,
- const size_t hashlen,
- const object_generation_t generation)
-{
- hashbuf_t hashbuf;
- X509_CRL *crl = NULL;
- EVP_PKEY *pkey;
- int ret;
-
- assert(uri && path && issuer);
-
- if (!uri_to_filename(rc, uri, path, prefix))
- goto punt;
-
- if (hashlen > sizeof(hashbuf.h)) {
- log_validation_status(rc, uri, hash_too_long, generation);
- goto punt;
- }
-
- if (hash)
- crl = read_crl(path, &hashbuf);
- else
- crl = read_crl(path, NULL);
-
- if (!crl)
- goto punt;
-
- if (hash && memcmp(hashbuf.h, hash, hashlen)) {
- log_validation_status(rc, uri, crl_digest_mismatch, generation);
- goto punt;
- }
-
- if (X509_cmp_current_time(X509_CRL_get_lastUpdate(crl)) > 0) {
- log_validation_status(rc, uri, crl_not_yet_valid, generation);
- goto punt;
- }
-
- if (X509_cmp_current_time(X509_CRL_get_nextUpdate(crl)) < 0) {
- log_validation_status(rc, uri, stale_crl, generation);
- if (!rc->allow_stale_crl)
- goto punt;
- }
-
- if ((pkey = X509_get_pubkey(issuer)) == NULL)
- goto punt;
- ret = X509_CRL_verify(crl, pkey);
- EVP_PKEY_free(pkey);
-
- if (ret > 0)
- return crl;
-
- punt:
- X509_CRL_free(crl);
- return NULL;
-}
-
-/**
- * Check whether we already have a particular CRL, attempt to fetch it
- * and check issuer's signature if we don't.
- */
-static X509_CRL *check_crl(const rcynic_ctx_t *rc,
- const uri_t *uri,
- X509 *issuer,
- const unsigned char *hash,
- const size_t hashlen)
-{
- path_t path;
- X509_CRL *crl;
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- (crl = read_crl(&path, NULL)) != NULL)
- return crl;
-
- logmsg(rc, log_telemetry, "Checking CRL %s", uri->s);
-
- if ((crl = check_crl_1(rc, uri, &path, &rc->unauthenticated,
- issuer, hash, hashlen, object_generation_current))) {
- install_object(rc, uri, &path, object_accepted, object_generation_current);
- return crl;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_current);
- }
-
- if ((crl = check_crl_1(rc, uri, &path, &rc->old_authenticated,
- issuer, hash, hashlen, object_generation_backup))) {
- install_object(rc, uri, &path, object_accepted, object_generation_backup);
- return crl;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_backup);
- }
-
- return NULL;
-}
-
-
-
-/**
- * Check whether extensions in a certificate are allowed by profile.
- * Also returns failure in a few null-pointer cases that can't
- * possibly conform to profile.
- */
-static int check_allowed_extensions(const X509 *x, const int allow_eku)
-{
- int i;
-
- if (x == NULL || x->cert_info == NULL || x->cert_info->extensions == NULL)
- return 0;
-
- for (i = 0; i < sk_X509_EXTENSION_num(x->cert_info->extensions); i++) {
- switch (OBJ_obj2nid(sk_X509_EXTENSION_value(x->cert_info->extensions,
- i)->object)) {
- case NID_basic_constraints:
- case NID_subject_key_identifier:
- case NID_authority_key_identifier:
- case NID_key_usage:
- case NID_crl_distribution_points:
- case NID_info_access:
- case NID_sinfo_access:
- case NID_certificate_policies:
- case NID_sbgp_ipAddrBlock:
- case NID_sbgp_autonomousSysNum:
- continue;
- case NID_ext_key_usage:
- if (allow_eku)
- continue;
- else
- return 0;
- default:
- return 0;
- }
- }
-
- return 1;
-}
-
-/**
- * Check whether a Distinguished Name conforms to the rescert profile.
- * The profile is very restrictive: it only allows one mandatory
- * CommonName field and one optional SerialNumber field, both of which
- * must be of type PrintableString.
- */
-static int check_allowed_dn(X509_NAME *dn)
-{
- X509_NAME_ENTRY *ne;
- ASN1_STRING *s;
- int loc;
-
- if (dn == NULL)
- return 0;
-
- switch (X509_NAME_entry_count(dn)) {
-
- case 2:
- if ((loc = X509_NAME_get_index_by_NID(dn, NID_serialNumber, -1)) < 0 ||
- (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
- (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
- ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
- return 0;
-
- /* Fall through */
-
- case 1:
- if ((loc = X509_NAME_get_index_by_NID(dn, NID_commonName, -1)) < 0 ||
- (ne = X509_NAME_get_entry(dn, loc)) == NULL ||
- (s = X509_NAME_ENTRY_get_data(ne)) == NULL ||
- ASN1_STRING_type(s) != V_ASN1_PRINTABLESTRING)
- return 0;
-
- return 1;
-
- default:
- return 0;
- }
-}
-
-
-
-/**
- * Validation callback function for use with x509_verify_cert().
- */
-static int check_x509_cb(int ok, X509_STORE_CTX *ctx)
-{
- rcynic_x509_store_ctx_t *rctx = (rcynic_x509_store_ctx_t *) ctx;
- mib_counter_t code;
-
- assert(rctx != NULL);
-
- switch (ctx->error) {
- case X509_V_OK:
- return ok;
-
- case X509_V_ERR_SUBJECT_ISSUER_MISMATCH:
- /*
- * Informational events, not really errors. ctx->check_issued()
- * is called in many places where failure to find an issuer is not
- * a failure for the calling function. Just leave these alone.
- */
- return ok;
-
- case X509_V_ERR_CRL_HAS_EXPIRED:
- /*
- * This isn't really an error, exactly. CRLs don't really
- * "expire". What OpenSSL really means by this error is just
- * "it's now later than the issuer said it intended to publish a
- * new CRL". Whether we treat this as an error or not is
- * configurable, see the allow_stale_crl parameter.
- *
- * Deciding whether to allow stale CRLs is check_crl_1()'s job,
- * not ours. By the time this callback occurs, we've already
- * accepted the CRL; this callback is just notifying us that the
- * object being checked is tainted by a stale CRL. So we mark the
- * object as tainted and carry on.
- */
- log_validation_status(rctx->rc, &rctx->subject->uri, tainted_by_stale_crl, rctx->subject->generation);
- ok = 1;
- return ok;
-
- case X509_V_ERR_UNABLE_TO_GET_ISSUER_CERT:
- /*
- * This is another error that's only an error in the strange world
- * of OpenSSL, but a more serious one. By default, OpenSSL
- * expects all trust anchors to be self-signed. This is not a
- * PKIX requirement, it's just an OpenSSL thing, but one violates
- * it at one's peril, because the only way to convince OpenSSL to
- * allow a non-self-signed trust anchor is to intercept this
- * "error" in the verify callback handler.
- *
- * So this program supports non-self-signed trust anchors, but be
- * warned that enabling this feature may cause this program's
- * output not to work with other OpenSSL-based applications.
- */
- if (rctx->rc->allow_non_self_signed_trust_anchor)
- ok = 1;
- log_validation_status(rctx->rc, &rctx->subject->uri, trust_anchor_not_self_signed, rctx->subject->generation);
- return ok;
-
- /*
- * Select correct MIB counter for every known OpenSSL verify errors
- * except the ones we handle explicitly above, then fall through to
- * common handling for all of these.
- */
-#define QV(x) \
- case x: \
- code = mib_openssl_##x; \
- break;
- MIB_COUNTERS_FROM_OPENSSL;
-#undef QV
-
- default:
- code = unknown_verify_error;
- break;
- }
-
- log_validation_status(rctx->rc, &rctx->subject->uri, code, rctx->subject->generation);
- return ok;
-}
-
-/**
- * Check crypto aspects of a certificate, policy OID, RFC 3779 path
- * validation, and conformance to the RPKI certificate profile.
- */
-static int check_x509(const rcynic_ctx_t *rc,
- STACK_OF(X509) *certs,
- X509 *x,
- const certinfo_t *subject,
- const certinfo_t *issuer_certinfo)
-{
- rcynic_x509_store_ctx_t rctx;
- STACK_OF(X509_CRL) *crls = NULL;
- EVP_PKEY *pkey = NULL;
- X509_CRL *crl = NULL;
- unsigned long flags = (X509_V_FLAG_POLICY_CHECK | X509_V_FLAG_EXPLICIT_POLICY | X509_V_FLAG_X509_STRICT);
- X509 *issuer;
- int ret = 0;
-
- assert(rc && certs && x && subject);
-
- if (!X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, x, NULL))
- return 0;
- rctx.rc = rc;
- rctx.subject = subject;
-
- issuer = sk_X509_value(certs, sk_X509_num(certs) - 1);
- assert(issuer != NULL);
-
- if (subject->sia.s[0] && subject->sia.s[strlen(subject->sia.s) - 1] != '/') {
- log_validation_status(rc, &subject->uri, malformed_sia, subject->generation);
- goto done;
- }
-
- if (!subject->ta && !subject->aia.s[0]) {
- log_validation_status(rc, &subject->uri, aia_missing, subject->generation);
- goto done;
- }
-
- if (!issuer_certinfo->ta && strcmp(issuer_certinfo->uri.s, subject->aia.s)) {
- log_validation_status(rc, &subject->uri, aia_mismatch, subject->generation);
- goto done;
- }
-
- if (subject->ca && !subject->sia.s[0]) {
- log_validation_status(rc, &subject->uri, sia_missing, subject->generation);
- goto done;
- }
-
- if (subject->ca && !subject->manifest.s[0]) {
- log_validation_status(rc, &subject->uri, manifest_missing, subject->generation);
- goto done;
- }
-
- if (subject->ca && !startswith(subject->manifest.s, subject->sia.s)) {
- log_validation_status(rc, &subject->uri, manifest_mismatch, subject->generation);
- goto done;
- }
-
- if (!check_allowed_extensions(x, !subject->ca)) {
- log_validation_status(rc, &subject->uri, disallowed_extension, subject->generation);
- goto done;
- }
-
- if (!check_allowed_dn(X509_get_subject_name(x)))
- log_validation_status(rc, &subject->uri, nonconformant_subject_name, subject->generation);
-
- if (!check_allowed_dn(X509_get_issuer_name(x)))
- log_validation_status(rc, &subject->uri, nonconformant_issuer_name, subject->generation);
-
- if (subject->ta) {
-
- if (subject->crldp.s[0]) {
- log_validation_status(rc, &subject->uri, trust_anchor_with_crldp, subject->generation);
- goto done;
- }
-
- } else {
-
- if (!subject->crldp.s[0]) {
- log_validation_status(rc, &subject->uri, crldp_missing, subject->generation);
- goto done;
- }
-
- if (!subject->ca && !startswith(subject->crldp.s, issuer_certinfo->sia.s)) {
- log_validation_status(rc, &subject->uri, crldp_mismatch, subject->generation);
- goto done;
- }
-
- flags |= X509_V_FLAG_CRL_CHECK;
-
- if ((pkey = X509_get_pubkey(issuer)) == NULL || X509_verify(x, pkey) <= 0) {
- log_validation_status(rc, &subject->uri, certificate_bad_signature, subject->generation);
- goto done;
- }
-
- if ((crl = check_crl(rc, &subject->crldp, issuer, NULL, 0)) == NULL) {
- log_validation_status(rc, &subject->uri, certificate_bad_crl, subject->generation);
- goto done;
- }
-
- if ((crls = sk_X509_CRL_new_null()) == NULL || !sk_X509_CRL_push(crls, crl)) {
- logmsg(rc, log_sys_err,
- "Internal allocation error setting up CRL for validation");
- goto done;
- }
- crl = NULL;
-
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
-
- }
-
- X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
-
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param, flags);
-
- X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
-
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- log_validation_status(rc, &subject->uri, certificate_failed_validation, subject->generation);
- goto done;
- }
-
- ret = 1;
-
- done:
- sk_X509_CRL_pop_free(crls, X509_CRL_free);
- X509_STORE_CTX_cleanup(&rctx.ctx);
- EVP_PKEY_free(pkey);
- X509_CRL_free(crl);
-
- return ret;
-}
-
-/**
- * Load certificate, check against manifest, then run it through all
- * the check_x509() tests.
- */
-static X509 *check_cert_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- STACK_OF(X509) *certs,
- const certinfo_t *issuer,
- certinfo_t *subject,
- const unsigned char *hash,
- const size_t hashlen,
- object_generation_t generation)
-{
- hashbuf_t hashbuf;
- X509 *x = NULL;
-
- assert(uri && path && certs && issuer && subject);
-
- if (!uri_to_filename(rc, uri, path, prefix))
- return NULL;
-
- if (access(path->s, R_OK))
- return NULL;
-
- if (hashlen > sizeof(hashbuf.h)) {
- log_validation_status(rc, uri, hash_too_long, generation);
- goto punt;
- }
-
- if (hash)
- x = read_cert(path, &hashbuf);
- else
- x = read_cert(path, NULL);
-
- if (!x) {
- logmsg(rc, log_sys_err, "Can't read certificate %s", path->s);
- goto punt;
- }
-
- if (hash && memcmp(hashbuf.h, hash, hashlen)) {
- log_validation_status(rc, uri, certificate_digest_mismatch, generation);
- goto punt;
- }
-
- parse_cert(rc, x, subject, uri, generation);
-
- if (check_x509(rc, certs, x, subject, issuer))
- return x;
-
- punt:
- X509_free(x);
- return NULL;
-}
-
-/**
- * Try to find a good copy of a certificate either in fresh data or in
- * backup data from a previous run of this program.
- */
-static X509 *check_cert(rcynic_ctx_t *rc,
- uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- certinfo_t *subject,
- const unsigned char *hash,
- const size_t hashlen)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
- object_generation_t generation;
- const certinfo_t *issuer = NULL;
- STACK_OF(X509) *certs = NULL;
- const path_t *prefix = NULL;
- path_t path;
- X509 *x;
-
- assert(rc && uri && wsk && w && subject);
-
- issuer = &w->certinfo;
-
- switch (w->state) {
- case walk_state_current:
- prefix = &rc->unauthenticated;
- generation = object_generation_current;
- break;
- case walk_state_backup:
- prefix = &rc->old_authenticated;
- generation = object_generation_backup;
- break;
- default:
- return NULL;
- }
-
- /*
- * If target file already exists and we're not here to recheck with
- * better data, just get out now.
- */
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- !access(path.s, R_OK)) {
- if (w->state == walk_state_backup || sk_OPENSSL_STRING_find(rc->backup_cache, uri->s) < 0)
- return NULL;
- assert(generation == object_generation_current);
- log_validation_status(rc, uri, current_cert_recheck, generation);
- logmsg(rc, log_telemetry, "Rechecking %s", uri->s);
- } else {
- logmsg(rc, log_telemetry, "Checking %s", uri->s);
- }
-
- if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
- return NULL;
-
- if ((x = check_cert_1(rc, uri, &path, prefix, certs, issuer, subject, hash, hashlen, generation)) != NULL) {
- install_object(rc, uri, &path, object_accepted, generation);
- if (w->state == walk_state_current)
- sk_OPENSSL_STRING_remove(rc->backup_cache, uri->s);
- else if (!sk_OPENSSL_STRING_push_strdup(rc->backup_cache, uri->s))
- logmsg(rc, log_sys_err, "Couldn't cache URI %s, blundering onward", uri->s);
-
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, generation);
- }
-
- sk_X509_free(certs);
- certs = NULL;
-
- return x;
-}
-
-
-
-/**
- * Read and check one manifest from disk.
- */
-static Manifest *check_manifest_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- STACK_OF(X509) *certs,
- const object_generation_t generation)
-{
- CMS_ContentInfo *cms = NULL;
- const ASN1_OBJECT *eContentType = NULL;
- STACK_OF(X509) *signers = NULL;
- STACK_OF(X509_CRL) *crls = NULL;
- X509_CRL *crl = NULL;
- Manifest *manifest = NULL, *result = NULL;
- BIO *bio = NULL;
- rcynic_x509_store_ctx_t rctx;
- certinfo_t certinfo;
- int i, initialized_store_ctx = 0;
- FileAndHash *fah = NULL;
- char *crl_tail;
-
- assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
-
- if (!uri_to_filename(rc, uri, path, prefix) ||
- (cms = read_cms(path, NULL)) == NULL)
- goto done;
-
- if ((eContentType = CMS_get0_eContentType(cms)) == NULL ||
- oid_cmp(eContentType, id_ct_rpkiManifest, sizeof(id_ct_rpkiManifest))) {
- log_validation_status(rc, uri, manifest_bad_econtenttype, generation);
- goto done;
- }
-
- if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for manifest %s", uri->s);
- goto done;
- }
-
- if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- log_validation_status(rc, uri, manifest_invalid_cms, generation);
- goto done;
- }
-
- if ((signers = CMS_get0_signers(cms)) == NULL || sk_X509_num(signers) != 1) {
- log_validation_status(rc, uri, manifest_missing_signer, generation);
- goto done;
- }
-
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
-
- if (!certinfo.crldp.s[0]) {
- log_validation_status(rc, uri, manifest_missing_crldp, generation);
- goto done;
- }
-
- if ((crl_tail = strrchr(certinfo.crldp.s, '/')) == NULL) {
- log_validation_status(rc, uri, manifest_malformed_crldp, generation);
- goto done;
- }
- crl_tail++;
-
- if ((manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL)) == NULL) {
- log_validation_status(rc, uri, manifest_decode_error, generation);
- goto done;
- }
-
- if (manifest->version) {
- log_validation_status(rc, uri, manifest_wrong_version, generation);
- goto done;
- }
-
- if (X509_cmp_current_time(manifest->thisUpdate) > 0) {
- log_validation_status(rc, uri, manifest_not_yet_valid, generation);
- goto done;
- }
-
- if (X509_cmp_current_time(manifest->nextUpdate) < 0) {
- log_validation_status(rc, uri, stale_manifest, generation);
- if (!rc->allow_stale_manifest)
- goto done;
- }
-
- if (manifest->fileHashAlg == NULL ||
- oid_cmp(manifest->fileHashAlg, id_sha256, sizeof(id_sha256)))
- goto done;
-
- for (i = 0; (fah = sk_FileAndHash_value(manifest->fileList, i)) != NULL; i++)
- if (!strcmp((char *) fah->file->data, crl_tail))
- break;
-
- if (fah) {
- crl = check_crl(rc, &certinfo.crldp,
- sk_X509_value(certs, sk_X509_num(certs) - 1),
- fah->hash->data, fah->hash->length);
- } else {
- log_validation_status(rc, uri, crl_not_in_manifest, generation);
- if (rc->require_crl_in_manifest)
- goto done;
- crl = check_crl(rc, &certinfo.crldp,
- sk_X509_value(certs, sk_X509_num(certs) - 1),
- NULL, 0);
- }
-
- if (!crl)
- goto done;
-
- if ((crls = sk_X509_CRL_new_null()) == NULL || !sk_X509_CRL_push(crls, crl))
- goto done;
- crl = NULL;
-
- if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
- goto done;
-
- rctx.rc = rc;
- rctx.subject = &certinfo;
-
- X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
- X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
-
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
- X509_V_FLAG_CRL_CHECK |
- X509_V_FLAG_POLICY_CHECK |
- X509_V_FLAG_EXPLICIT_POLICY |
- X509_V_FLAG_X509_STRICT);
-
- X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
-
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- /*
- * Redundant error message?
- */
- log_validation_status(rc, uri, manifest_invalid_ee, generation);
- goto done;
- }
-
- result = manifest;
- manifest = NULL;
-
- done:
- if (initialized_store_ctx)
- X509_STORE_CTX_cleanup(&rctx.ctx);
- BIO_free(bio);
- Manifest_free(manifest);
- CMS_ContentInfo_free(cms);
- sk_X509_free(signers);
- sk_X509_CRL_pop_free(crls, X509_CRL_free);
-
- return result;
-}
-
-/**
- * Check whether we already have a particular manifest, attempt to fetch it
- * and check issuer's signature if we don't.
- */
-static Manifest *check_manifest(const rcynic_ctx_t *rc,
- STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
- CMS_ContentInfo *cms = NULL;
- Manifest *manifest = NULL;
- STACK_OF(X509) *certs = NULL;
- BIO *bio = NULL;
- path_t path;
- uri_t *uri;
-
- assert(rc && wsk && w);
-
- uri = &w->certinfo.manifest;
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- (cms = read_cms(&path, NULL)) != NULL &&
- (bio = BIO_new(BIO_s_mem()))!= NULL &&
- CMS_verify(cms, NULL, NULL, NULL, bio,
- CMS_NO_SIGNER_CERT_VERIFY |
- CMS_NO_ATTR_VERIFY |
- CMS_NO_CONTENT_VERIFY) > 0)
- manifest = ASN1_item_d2i_bio(ASN1_ITEM_rptr(Manifest), bio, NULL);
-
- CMS_ContentInfo_free(cms);
- BIO_free(bio);
-
- if (manifest != NULL)
- return manifest;
-
- logmsg(rc, log_telemetry, "Checking manifest %s", uri->s);
-
- if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
- return NULL;
-
- if (manifest == NULL) {
- if ((manifest = check_manifest_1(rc, uri, &path,
- &rc->unauthenticated, certs, object_generation_current)) != NULL)
- install_object(rc, uri, &path, object_accepted, object_generation_current);
- else if (!access(path.s, F_OK))
- log_validation_status(rc, uri, object_rejected, object_generation_current);
- }
-
- if (manifest == NULL) {
- if ((manifest = check_manifest_1(rc, uri, &path,
- &rc->old_authenticated, certs, object_generation_backup)) != NULL)
- install_object(rc, uri, &path, object_accepted, object_generation_backup);
- else if (!access(path.s, F_OK))
- log_validation_status(rc, uri, object_rejected, object_generation_backup);
- }
-
- sk_X509_free(certs);
- certs = NULL;
-
- return manifest;
-}
-
-
-
-/**
- * Extract a ROA prefix from the ASN.1 bitstring encoding.
- */
-static int extract_roa_prefix(unsigned char *addr,
- unsigned *prefixlen,
- const ASN1_BIT_STRING *bs,
- const unsigned afi)
-{
- unsigned length;
-
- switch (afi) {
- case IANA_AFI_IPV4: length = 4; break;
- case IANA_AFI_IPV6: length = 16; break;
- default: return 0;
- }
-
- if (bs->length < 0 || bs->length > length)
- return 0;
-
- if (bs->length > 0) {
- memcpy(addr, bs->data, bs->length);
- if ((bs->flags & 7) != 0) {
- unsigned char mask = 0xFF >> (8 - (bs->flags & 7));
- addr[bs->length - 1] &= ~mask;
- }
- }
-
- memset(addr + bs->length, 0, length - bs->length);
-
- *prefixlen = (bs->length * 8) - (bs->flags & 7);
-
- return 1;
-}
-
-/**
- * Read and check one ROA from disk.
- */
-static int check_roa_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- STACK_OF(X509) *certs,
- const unsigned char *hash,
- const size_t hashlen,
- const object_generation_t generation)
-{
- unsigned char addrbuf[ADDR_RAW_BUF_LEN];
- const ASN1_OBJECT *eContentType = NULL;
- STACK_OF(IPAddressFamily) *roa_resources = NULL, *ee_resources = NULL;
- STACK_OF(X509_CRL) *crls = NULL;
- STACK_OF(X509) *signers = NULL;
- CMS_ContentInfo *cms = NULL;
- X509_CRL *crl = NULL;
- hashbuf_t hashbuf;
- ROA *roa = NULL;
- BIO *bio = NULL;
- rcynic_x509_store_ctx_t rctx;
- certinfo_t certinfo;
- int i, j, initialized_store_ctx = 0, result = 0;
- unsigned afi, *safi = NULL, safi_, prefixlen;
- ROAIPAddressFamily *rf;
- ROAIPAddress *ra;
-
- assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
-
- if (!uri_to_filename(rc, uri, path, prefix))
- goto error;
-
- if (hashlen > sizeof(hashbuf.h)) {
- log_validation_status(rc, uri, hash_too_long, generation);
- goto error;
- }
-
- if (hash)
- cms = read_cms(path, &hashbuf);
- else
- cms = read_cms(path, NULL);
-
- if (!cms)
- goto error;
-
- if (hash && memcmp(hashbuf.h, hash, hashlen)) {
- log_validation_status(rc, uri, roa_digest_mismatch, generation);
- goto error;
- }
-
- if (!(eContentType = CMS_get0_eContentType(cms)) ||
- oid_cmp(eContentType, id_ct_routeOriginAttestation,
- sizeof(id_ct_routeOriginAttestation))) {
- log_validation_status(rc, uri, roa_bad_econtenttype, generation);
- goto error;
- }
-
- if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for ROA %s", uri->s);
- goto error;
- }
-
- if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- log_validation_status(rc, uri, roa_invalid_cms, generation);
- goto error;
- }
-
- if (!(signers = CMS_get0_signers(cms)) || sk_X509_num(signers) != 1) {
- log_validation_status(rc, uri, roa_missing_signer, generation);
- goto error;
- }
-
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
-
- if (!(roa = ASN1_item_d2i_bio(ASN1_ITEM_rptr(ROA), bio, NULL))) {
- log_validation_status(rc, uri, roa_decode_error, generation);
- goto error;
- }
-
- if (roa->version) {
- log_validation_status(rc, uri, roa_wrong_version, generation);
- goto error;
- }
-
- /*
- * ROA issuer doesn't need rights to the ASN, so we don't need to
- * check the asID field.
- */
-
- ee_resources = X509_get_ext_d2i(sk_X509_value(signers, 0), NID_sbgp_ipAddrBlock, NULL, NULL);
-
- /*
- * Extract prefixes from ROA and convert them into a resource set.
- */
-
- if (!(roa_resources = sk_IPAddressFamily_new_null()))
- goto error;
-
- for (i = 0; i < sk_ROAIPAddressFamily_num(roa->ipAddrBlocks); i++) {
- rf = sk_ROAIPAddressFamily_value(roa->ipAddrBlocks, i);
- if (!rf || !rf->addressFamily || rf->addressFamily->length < 2 || rf->addressFamily->length > 3) {
- log_validation_status(rc, uri, malformed_roa_addressfamily, generation);
- goto error;
- }
- afi = (rf->addressFamily->data[0] << 8) | (rf->addressFamily->data[1]);
- if (rf->addressFamily->length == 3)
- *(safi = &safi_) = rf->addressFamily->data[2];
- for (j = 0; j < sk_ROAIPAddress_num(rf->addresses); j++) {
- ra = sk_ROAIPAddress_value(rf->addresses, j);
- if (!ra ||
- !extract_roa_prefix(addrbuf, &prefixlen, ra->IPAddress, afi) ||
- !v3_addr_add_prefix(roa_resources, afi, safi, addrbuf, prefixlen)) {
- log_validation_status(rc, uri, roa_resources_malformed, generation);
- goto error;
- }
- }
- }
-
- /*
- * ROAs can include nested prefixes, so direct translation to
- * resource sets could include overlapping ranges, which is illegal.
- * So we have to remove nested stuff before whacking into canonical
- * form. Fortunately, this is relatively easy, since we know these
- * are just prefixes, not ranges: in a list of prefixes sorted by
- * the RFC 3779 rules, the first element of a set of nested prefixes
- * will always be the least specific.
- */
-
- for (i = 0; i < sk_IPAddressFamily_num(roa_resources); i++) {
- IPAddressFamily *f = sk_IPAddressFamily_value(roa_resources, i);
-
- if ((afi = v3_addr_get_afi(f)) == 0) {
- log_validation_status(rc, uri, roa_bad_afi, generation);
- goto error;
- }
-
- if (f->ipAddressChoice->type == IPAddressChoice_addressesOrRanges) {
- IPAddressOrRanges *aors = f->ipAddressChoice->u.addressesOrRanges;
-
- sk_IPAddressOrRange_sort(aors);
-
- for (j = 0; j < sk_IPAddressOrRange_num(aors) - 1; j++) {
- IPAddressOrRange *a = sk_IPAddressOrRange_value(aors, j);
- IPAddressOrRange *b = sk_IPAddressOrRange_value(aors, j + 1);
- unsigned char a_min[ADDR_RAW_BUF_LEN], a_max[ADDR_RAW_BUF_LEN];
- unsigned char b_min[ADDR_RAW_BUF_LEN], b_max[ADDR_RAW_BUF_LEN];
- int length;
-
- if ((length = v3_addr_get_range(a, afi, a_min, a_max, ADDR_RAW_BUF_LEN)) == 0 ||
- (length = v3_addr_get_range(b, afi, b_min, b_max, ADDR_RAW_BUF_LEN)) == 0) {
- log_validation_status(rc, uri, roa_resources_malformed, generation);
- goto error;
- }
-
- if (memcmp(a_max, b_max, length) >= 0) {
- (void) sk_IPAddressOrRange_delete(aors, j + 1);
- IPAddressOrRange_free(b);
- --j;
- }
- }
- }
- }
-
- if (!v3_addr_canonize(roa_resources)) {
- log_validation_status(rc, uri, roa_resources_malformed, generation);
- goto error;
- }
-
- if (!v3_addr_subset(roa_resources, ee_resources)) {
- log_validation_status(rc, uri, roa_not_nested, generation);
- goto error;
- }
-
- if (!(crl = check_crl(rc, &certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
- log_validation_status(rc, uri, roa_bad_crl, generation);
- goto error;
- }
-
- if (!(crls = sk_X509_CRL_new_null()) || !sk_X509_CRL_push(crls, crl))
- goto error;
- crl = NULL;
-
- if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
- goto error;
-
- rctx.rc = rc;
- rctx.subject = &certinfo;
-
- X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
- X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
-
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
- X509_V_FLAG_CRL_CHECK |
- X509_V_FLAG_POLICY_CHECK |
- X509_V_FLAG_EXPLICIT_POLICY |
- X509_V_FLAG_X509_STRICT);
-
- X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
-
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- /*
- * Redundant error message?
- */
- log_validation_status(rc, uri, roa_invalid_ee, generation);
- goto error;
- }
-
- result = 1;
-
- error:
- if (initialized_store_ctx)
- X509_STORE_CTX_cleanup(&rctx.ctx);
- BIO_free(bio);
- ROA_free(roa);
- CMS_ContentInfo_free(cms);
- sk_X509_free(signers);
- sk_X509_CRL_pop_free(crls, X509_CRL_free);
- sk_IPAddressFamily_pop_free(roa_resources, IPAddressFamily_free);
- sk_IPAddressFamily_pop_free(ee_resources, IPAddressFamily_free);
-
- return result;
-}
-
-/**
- * Check whether we already have a particular ROA, attempt to fetch it
- * and check issuer's signature if we don't.
- */
-static void check_roa(const rcynic_ctx_t *rc,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- const unsigned char *hash,
- const size_t hashlen)
-{
- STACK_OF(X509) *certs = NULL;
- path_t path;
-
- assert(rc && uri && wsk);
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- !access(path.s, F_OK))
- return;
-
- logmsg(rc, log_telemetry, "Checking ROA %s", uri->s);
-
- if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
- return;
-
- if (check_roa_1(rc, uri, &path, &rc->unauthenticated,
- certs, hash, hashlen, object_generation_current)) {
- install_object(rc, uri, &path, object_accepted, object_generation_current);
- goto done;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_current);
- }
-
- if (check_roa_1(rc, uri, &path, &rc->old_authenticated,
- certs, hash, hashlen, object_generation_backup)) {
- install_object(rc, uri, &path, object_accepted, object_generation_backup);
- goto done;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_backup);
- }
-
- done:
- sk_X509_free(certs);
-}
-
-
-
-/**
- * Read and check one Ghostbuster record from disk.
- */
-static int check_ghostbuster_1(const rcynic_ctx_t *rc,
- const uri_t *uri,
- path_t *path,
- const path_t *prefix,
- STACK_OF(X509) *certs,
- const unsigned char *hash,
- const size_t hashlen,
- const object_generation_t generation)
-{
- const ASN1_OBJECT *eContentType = NULL;
- STACK_OF(X509_CRL) *crls = NULL;
- STACK_OF(X509) *signers = NULL;
- CMS_ContentInfo *cms = NULL;
- X509_CRL *crl = NULL;
- hashbuf_t hashbuf;
- BIO *bio = NULL;
- rcynic_x509_store_ctx_t rctx;
- certinfo_t certinfo;
- int initialized_store_ctx = 0, result = 0;
-
- assert(rc && uri && path && prefix && certs && sk_X509_num(certs));
-
- if (!uri_to_filename(rc, uri, path, prefix))
- goto error;
-
- if (hashlen > sizeof(hashbuf.h)) {
- log_validation_status(rc, uri, hash_too_long, generation);
- goto error;
- }
-
- if (hash)
- cms = read_cms(path, &hashbuf);
- else
- cms = read_cms(path, NULL);
-
- if (!cms)
- goto error;
-
- if (hash && memcmp(hashbuf.h, hash, hashlen)) {
- log_validation_status(rc, uri, ghostbuster_digest_mismatch, generation);
- goto error;
- }
-
- if (!(eContentType = CMS_get0_eContentType(cms)) ||
- oid_cmp(eContentType, id_ct_rpkiGhostbusters,
- sizeof(id_ct_rpkiGhostbusters))) {
- log_validation_status(rc, uri, ghostbuster_bad_econtenttype, generation);
- goto error;
- }
-
-#if 0
- /*
- * May want this later if we're going to inspect the VCard. For now,
- * just leave this NULL and the right thing should happen.
- */
- if ((bio = BIO_new(BIO_s_mem())) == NULL) {
- logmsg(rc, log_sys_err, "Couldn't allocate BIO for Ghostbuster record %s", uri->s);
- goto error;
- }
-#endif
-
- if (CMS_verify(cms, NULL, NULL, NULL, bio, CMS_NO_SIGNER_CERT_VERIFY) <= 0) {
- log_validation_status(rc, uri, ghostbuster_invalid_cms, generation);
- goto error;
- }
-
- if (!(signers = CMS_get0_signers(cms)) || sk_X509_num(signers) != 1) {
- log_validation_status(rc, uri, ghostbuster_missing_signer, generation);
- goto error;
- }
-
- parse_cert(rc, sk_X509_value(signers, 0), &certinfo, uri, generation);
-
-#if 0
- /*
- * Here is where we would read the VCard from the bio returned by
- * CMS_verify() so that we could check the VCard.
- */
-#endif
-
- if (!(crl = check_crl(rc, &certinfo.crldp, sk_X509_value(certs, sk_X509_num(certs) - 1), NULL, 0))) {
- log_validation_status(rc, uri, ghostbuster_bad_crl, generation);
- goto error;
- }
-
- if (!(crls = sk_X509_CRL_new_null()) || !sk_X509_CRL_push(crls, crl))
- goto error;
- crl = NULL;
-
- if (!(initialized_store_ctx = X509_STORE_CTX_init(&rctx.ctx, rc->x509_store, sk_X509_value(signers, 0), NULL)))
- goto error;
-
- rctx.rc = rc;
- rctx.subject = &certinfo;
-
- X509_STORE_CTX_trusted_stack(&rctx.ctx, certs);
- X509_STORE_CTX_set0_crls(&rctx.ctx, crls);
- X509_STORE_CTX_set_verify_cb(&rctx.ctx, check_x509_cb);
-
- X509_VERIFY_PARAM_set_flags(rctx.ctx.param,
- X509_V_FLAG_CRL_CHECK |
- X509_V_FLAG_POLICY_CHECK |
- X509_V_FLAG_EXPLICIT_POLICY |
- X509_V_FLAG_X509_STRICT);
-
- X509_VERIFY_PARAM_add0_policy(rctx.ctx.param, OBJ_txt2obj(rpki_policy_oid, 1));
-
- if (X509_verify_cert(&rctx.ctx) <= 0) {
- log_validation_status(rc, uri, ghostbuster_invalid_ee, generation);
- goto error;
- }
-
- result = 1;
-
- error:
- if (initialized_store_ctx)
- X509_STORE_CTX_cleanup(&rctx.ctx);
- BIO_free(bio);
- CMS_ContentInfo_free(cms);
- sk_X509_free(signers);
- sk_X509_CRL_pop_free(crls, X509_CRL_free);
-
- return result;
-}
-
-/**
- * Check whether we already have a particular Ghostbuster record,
- * attempt to fetch it and check issuer's signature if we don't.
- */
-static void check_ghostbuster(const rcynic_ctx_t *rc,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk,
- const unsigned char *hash,
- const size_t hashlen)
-{
- STACK_OF(X509) *certs = NULL;
- path_t path;
-
- assert(rc && uri && wsk);
-
- if (uri_to_filename(rc, uri, &path, &rc->new_authenticated) &&
- !access(path.s, F_OK))
- return;
-
- logmsg(rc, log_telemetry, "Checking Ghostbuster record %s", uri->s);
-
- if ((certs = walk_ctx_stack_certs(wsk)) == NULL)
- return;
-
- if (check_ghostbuster_1(rc, uri, &path, &rc->unauthenticated,
- certs, hash, hashlen, object_generation_current)) {
- install_object(rc, uri, &path, object_accepted, object_generation_current);
- goto done;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_current);
- }
-
- if (check_ghostbuster_1(rc, uri, &path, &rc->old_authenticated,
- certs, hash, hashlen, object_generation_backup)) {
- install_object(rc, uri, &path, object_accepted, object_generation_backup);
- goto done;
- } else if (!access(path.s, F_OK)) {
- log_validation_status(rc, uri, object_rejected, object_generation_backup);
- }
-
- done:
- sk_X509_free(certs);
-}
-
-
-
-static void walk_cert(rcynic_ctx_t *, STACK_OF(walk_ctx_t) *);
-
-/**
- * rsync callback for fetching SIA tree.
- */
-static void rsync_sia_callback(const rcynic_ctx_t *rc,
- const rsync_ctx_t *ctx,
- const rsync_status_t status,
- const uri_t *uri,
- STACK_OF(walk_ctx_t) *wsk)
-{
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
-
- assert(rc && wsk);
-
- switch (status) {
-
- case rsync_status_pending:
- if (rsync_count_runable(rc) >= rc->max_parallel_fetches)
- return;
-
- assert(rsync_count_running(rc) < rc->max_parallel_fetches);
-
- if ((wsk = walk_ctx_stack_clone(wsk)) == NULL) {
- logmsg(rc, log_sys_err, "walk_ctx_stack_clone() failed, probably memory exhaustion, blundering onwards without forking stack");
- return;
- }
-
- walk_ctx_stack_pop(wsk);
- task_add(rc, walk_cert, wsk);
- return;
-
- case rsync_status_failed:
- log_validation_status(rc, uri, rsync_failed, object_generation_null);
- break;
-
- case rsync_status_timed_out:
- log_validation_status(rc, uri, rsync_timed_out, object_generation_null);
- break;
-
- case rsync_status_skipped:
- log_validation_status(rc, uri, rsync_skipped, object_generation_null);
- break;
-
- case rsync_status_done:
- break;
- }
-
- w->state++;
- task_add(rc, walk_cert, wsk);
-}
-
-/**
- * Recursive walk of certificate hierarchy (core of the program).
- *
- * Walk all products of the current certificate, starting with the
- * ones named in the manifest and continuing with any that we find in
- * the publication directory but which are not named in the manifest.
- *
- * Dispatch to correct checking code for the object named by URI,
- * based on the filename extension in the uri. CRLs are a special
- * case because we've already checked them by the time we get here, so
- * we just ignore them. Other objects are either certificates or
- * CMS-signed objects of one kind or another.
- */
-static void walk_cert(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
-{
- const unsigned char *hash = NULL;
- object_generation_t generation;
- size_t hashlen;
- walk_ctx_t *w;
- uri_t uri;
-
- assert(rc && wsk);
-
- while ((w = walk_ctx_stack_head(wsk)) != NULL) {
-
- switch (w->state) {
- case walk_state_current:
- generation = object_generation_current;
- break;
- case walk_state_backup:
- generation = object_generation_backup;
- break;
- default:
- generation = object_generation_null;
- break;
- }
-
- switch (w->state) {
-
- case walk_state_initial:
-
- if (!w->certinfo.sia.s[0] || !w->certinfo.ca) {
- w->state = walk_state_done;
- continue;
- }
-
- if (!w->certinfo.manifest.s[0]) {
- log_validation_status(rc, &w->certinfo.uri, manifest_missing, w->certinfo.generation);
- w->state = walk_state_done;
- continue;
- }
-
- w->state++;
- continue;
-
- case walk_state_rsync:
-
- rsync_tree(rc, &w->certinfo.sia, wsk, rsync_sia_callback);
- return;
-
- case walk_state_ready:
-
- walk_ctx_loop_init(rc, wsk); /* sets w->state */
- continue;
-
- case walk_state_current:
- case walk_state_backup:
-
- if (!walk_ctx_loop_this(rc, wsk, &uri, &hash, &hashlen)) {
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- if (endswith(uri.s, ".crl") || endswith(uri.s, ".mft") || endswith(uri.s, ".mnf")) {
- walk_ctx_loop_next(rc, wsk);
- continue; /* CRLs and manifests checked elsewhere */
- }
-
- if (hash == NULL)
- log_validation_status(rc, &uri, tainted_by_not_being_in_manifest, generation);
- else if (w->stale_manifest)
- log_validation_status(rc, &uri, tainted_by_stale_manifest, generation);
-
- if (hash == NULL && !rc->allow_object_not_in_manifest) {
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- if (endswith(uri.s, ".roa")) {
- check_roa(rc, &uri, wsk, hash, hashlen);
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- if (endswith(uri.s, ".gbr")) {
- check_ghostbuster(rc, &uri, wsk, hash, hashlen);
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- if (endswith(uri.s, ".cer")) {
- certinfo_t subject;
- X509 *x = check_cert(rc, &uri, wsk, &subject, hash, hashlen);
- if (!walk_ctx_stack_push(wsk, x, &subject))
- walk_ctx_loop_next(rc, wsk);
- continue;
- }
-
- log_validation_status(rc, &uri, unknown_object_type_skipped, object_generation_null);
- walk_ctx_loop_next(rc, wsk);
- continue;
-
- case walk_state_done:
-
- walk_ctx_stack_pop(wsk); /* Resume our issuer's state */
- continue;
-
- }
- }
-
- assert(walk_ctx_stack_head(wsk) == NULL);
- walk_ctx_stack_free(wsk);
-}
-
-/**
- * Check a trust anchor. Yes, we trust it, by definition, but it
- * still needs to conform to the certificate profile, the
- * self-signature must be correct, etcetera.
- */
-static void check_ta(rcynic_ctx_t *rc, STACK_OF(walk_ctx_t) *wsk)
-{
- STACK_OF(X509) *certs = walk_ctx_stack_certs(wsk);
- walk_ctx_t *w = walk_ctx_stack_head(wsk);
- int ok = 0;
-
- if (certs != NULL && w != NULL)
- ok = check_x509(rc, certs, w->cert, &w->certinfo, &w->certinfo);
-
- sk_X509_free(certs);
-
- if (!ok)
- return;
-
- task_add(rc, walk_cert, wsk);
-
- while (sk_task_t_num(rc->task_queue) > 0 || sk_rsync_ctx_t_num(rc->rsync_queue) > 0) {
- task_run_q(rc);
- rsync_mgr(rc);
- }
-}
-
-
-
-/**
- * Read a trust anchor from disk and compare with known public key.
- * NB: EVP_PKEY_cmp() returns 1 for match, not 0 like every other
- * xyz_cmp() function in the entire OpenSSL library. Go figure.
- */
-static X509 *read_ta(const rcynic_ctx_t *rc, const uri_t *uri, const path_t *path, const EVP_PKEY *pkey, object_generation_t generation)
-
-{
- EVP_PKEY *xpkey = NULL;
- X509 *x = NULL;
- int match = 0;
-
- if ((x = read_cert(path, NULL)) == NULL || (xpkey = X509_get_pubkey(x)) == NULL) {
- log_validation_status(rc, uri, unreadable_trust_anchor, generation);
- } else {
- match = EVP_PKEY_cmp(pkey, xpkey) == 1;
- if (!match)
- log_validation_status(rc, uri, trust_anchor_key_mismatch, generation);
- }
-
- EVP_PKEY_free(xpkey);
- if (match)
- return x;
- X509_free(x);
- return NULL;
-}
-
-
-
-/**
- * Main program. Parse command line, read config file, iterate over
- * trust anchors found via config file and do a tree walk for each
- * trust anchor.
- */
-int main(int argc, char *argv[])
-{
- int opt_jitter = 0, use_syslog = 0, use_stderr = 0, syslog_facility = 0;
- int opt_syslog = 0, opt_stderr = 0, opt_level = 0, prune = 1;
- char *cfg_file = "rcynic.conf";
- char *lockfile = NULL, *xmlfile = NULL;
- int c, i, j, ret = 1, jitter = 600, lockfd = -1;
- STACK_OF(CONF_VALUE) *cfg_section = NULL;
- STACK_OF(walk_ctx_t) *wsk = NULL;
- CONF *cfg_handle = NULL;
- walk_ctx_t *w = NULL;
- time_t start = 0, finish;
- unsigned long hash;
- rcynic_ctx_t rc;
- unsigned delay;
- long eline = 0;
- BIO *bio = NULL;
-
- memset(&rc, 0, sizeof(rc));
-
- if ((rc.jane = strrchr(argv[0], '/')) == NULL)
- rc.jane = argv[0];
- else
- rc.jane++;
-
- rc.log_level = log_data_err;
- rc.allow_stale_crl = 1;
- rc.allow_stale_manifest = 1;
- rc.max_parallel_fetches = 1;
- rc.max_retries = 3;
- rc.retry_wait_min = 30;
- rc.run_rsync = 1;
- rc.rsync_timeout = 300;
-
-#define QQ(x,y) rc.priority[x] = y;
- LOG_LEVELS;
-#undef QQ
-
- if (!set_directory(&rc, &rc.authenticated, "rcynic-data/authenticated", 0) ||
- !set_directory(&rc, &rc.unauthenticated, "rcynic-data/unauthenticated/", 1))
- goto done;
-
- OpenSSL_add_all_algorithms();
- ERR_load_crypto_strings();
-
- while ((c = getopt(argc, argv, "c:l:sej:V")) > 0) {
- switch (c) {
- case 'c':
- cfg_file = optarg;
- break;
- case 'l':
- opt_level = 1;
- if (!configure_logmsg(&rc, optarg))
- goto done;
- break;
- case 's':
- use_syslog = opt_syslog = 1;
- break;
- case 'e':
- use_stderr = opt_stderr = 1;
- break;
- case 'j':
- if (!configure_integer(&rc, &jitter, optarg))
- goto done;
- opt_jitter = 1;
- break;
- case 'V':
- puts(svn_id);
- ret = 0;
- goto done;
- default:
- logmsg(&rc, log_usage_err,
- "usage: %s [-c configfile] [-s] [-e] [-l loglevel] [-j jitter] [-V]",
- rc.jane);
- goto done;
- }
- }
-
- if ((cfg_handle = NCONF_new(NULL)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't create CONF opbject");
- goto done;
- }
-
- if (NCONF_load(cfg_handle, cfg_file, &eline) <= 0) {
- if (eline <= 0)
- logmsg(&rc, log_usage_err, "Couldn't load config file %s", cfg_file);
- else
- logmsg(&rc, log_usage_err, "Error on line %ld of config file %s", eline, cfg_file);
- goto done;
- }
-
- if (CONF_modules_load(cfg_handle, NULL, 0) <= 0) {
- logmsg(&rc, log_sys_err, "Couldn't configure OpenSSL");
- goto done;
- }
-
- if ((cfg_section = NCONF_get_section(cfg_handle, "rcynic")) == NULL) {
- logmsg(&rc, log_usage_err, "Couldn't load rcynic section from config file");
- goto done;
- }
-
- for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
- CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
-
- assert(val && val->name && val->value);
-
- if (!name_cmp(val->name, "authenticated") &&
- !set_directory(&rc, &rc.authenticated, val->value, 0))
- goto done;
-
- else if (!name_cmp(val->name, "unauthenticated") &&
- !set_directory(&rc, &rc.unauthenticated, val->value, 1))
- goto done;
-
- else if (!name_cmp(val->name, "rsync-timeout") &&
- !configure_integer(&rc, &rc.rsync_timeout, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "max-parallel-fetches") &&
- !configure_integer(&rc, &rc.max_parallel_fetches, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "rsync-program"))
- rc.rsync_program = strdup(val->value);
-
- else if (!name_cmp(val->name, "lockfile"))
- lockfile = strdup(val->value);
-
- else if (!opt_jitter &&
- !name_cmp(val->name, "jitter") &&
- !configure_integer(&rc, &jitter, val->value))
- goto done;
-
- else if (!opt_level &&
- !name_cmp(val->name, "log-level") &&
- !configure_logmsg(&rc, val->value))
- goto done;
-
- else if (!opt_syslog &&
- !name_cmp(val->name, "use-syslog") &&
- !configure_boolean(&rc, &use_syslog, val->value))
- goto done;
-
- else if (!opt_stderr &&
- !name_cmp(val->name, "use-stderr") &&
- !configure_boolean(&rc, &use_stderr, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "syslog-facility") &&
- !configure_syslog(&rc, &syslog_facility,
- facilitynames, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "xml-summary"))
- xmlfile = strdup(val->value);
-
- else if (!name_cmp(val->name, "allow-stale-crl") &&
- !configure_boolean(&rc, &rc.allow_stale_crl, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "allow-stale-manifest") &&
- !configure_boolean(&rc, &rc.allow_stale_manifest, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "allow-non-self-signed-trust-anchor") &&
- !configure_boolean(&rc, &rc.allow_non_self_signed_trust_anchor, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "require-crl-in-manifest") &&
- !configure_boolean(&rc, &rc.require_crl_in_manifest, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "allow-object-not-in-manifest") &&
- !configure_boolean(&rc, &rc.allow_object_not_in_manifest, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "use-links") &&
- !configure_boolean(&rc, &rc.use_links, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "prune") &&
- !configure_boolean(&rc, &prune, val->value))
- goto done;
-
- else if (!name_cmp(val->name, "run-rsync") &&
- !configure_boolean(&rc, &rc.run_rsync, val->value))
- goto done;
-
- /*
- * Ugly, but the easiest way to handle all these strings.
- */
-
-#define QQ(x,y) \
- else if (!name_cmp(val->name, "syslog-priority-" #x) && \
- !configure_syslog(&rc, &rc.priority[x], \
- prioritynames, val->value)) \
- goto done;
-
- LOG_LEVELS; /* the semicolon is for emacs */
-
-#undef QQ
-
- }
-
- if ((rc.rsync_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate rsync_cache stack");
- goto done;
- }
-
- if ((rc.backup_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate backup_cache stack");
- goto done;
- }
-
- if ((rc.dead_host_cache = sk_OPENSSL_STRING_new(uri_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate dead_host_cache stack");
- goto done;
- }
-
- if (xmlfile != NULL) {
- if ((rc.validation_status = sk_validation_status_t_new(validation_status_cmp)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate validation_status stack");
- goto done;
- }
- }
-
- if ((rc.x509_store = X509_STORE_new()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate X509_STORE");
- goto done;
- }
-
- if ((rc.rsync_queue = sk_rsync_ctx_t_new_null()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate rsync_queue");
- goto done;
- }
-
- if ((rc.task_queue = sk_task_t_new_null()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate task_queue");
- goto done;
- }
-
- rc.use_syslog = use_syslog;
-
- if (use_syslog)
- openlog(rc.jane,
- LOG_PID | (use_stderr ? LOG_PERROR : 0),
- (syslog_facility ? syslog_facility : LOG_LOCAL0));
-
- if (jitter > 0) {
- if (RAND_bytes((unsigned char *) &delay, sizeof(delay)) <= 0) {
- logmsg(&rc, log_sys_err, "Couldn't read random bytes");
- goto done;
- }
- delay %= jitter;
- logmsg(&rc, log_telemetry, "Delaying %u seconds before startup", delay);
- while (delay > 0)
- delay = sleep(delay);
- }
-
- if (lockfile &&
- ((lockfd = open(lockfile, O_RDWR|O_CREAT|O_NONBLOCK, 0666)) < 0 ||
- lockf(lockfd, F_TLOCK, 0) < 0)) {
- if (lockfd >= 0 && errno == EAGAIN)
- logmsg(&rc, log_telemetry, "Lock %s held by another process", lockfile);
- else
- logmsg(&rc, log_sys_err, "Problem locking %s: %s", lockfile, strerror(errno));
- lockfd = -1;
- goto done;
- }
-
- start = time(0);
- logmsg(&rc, log_telemetry, "Starting");
-
- if (!construct_directory_names(&rc))
- goto done;
-
- if (!access(rc.new_authenticated.s, F_OK)) {
- logmsg(&rc, log_sys_err, "Timestamped output directory %s already exists! Clock went backwards?", rc.new_authenticated.s);
- goto done;
- }
-
- if (!mkdir_maybe(&rc, &rc.new_authenticated)) {
- logmsg(&rc, log_sys_err, "Couldn't prepare directory %s: %s",
- rc.new_authenticated.s, strerror(errno));
- goto done;
- }
-
- for (i = 0; i < sk_CONF_VALUE_num(cfg_section); i++) {
- CONF_VALUE *val = sk_CONF_VALUE_value(cfg_section, i);
- object_generation_t generation = object_generation_null;
- path_t path1, path2;
- certinfo_t ta_certinfo;
- uri_t uri;
- X509 *x = NULL;
-
- assert(val && val->name && val->value);
-
- if (!name_cmp(val->name, "trust-anchor-uri-with-key") ||
- !name_cmp(val->name, "indirect-trust-anchor")) {
- /*
- * Obsolete syntax. If you're reading this comment because you
- * had an old rcynic.conf and got this error message:
- *
- * "indirect-trust-anchor" is exactly the same as
- * "trust-anchor-locator", the name was changed to settle a
- * terminology fight in the IETF SIDR WG.
- *
- * "trust-anchor-uri-with-key" is semantically identical to
- * "trust-anchor-locator" (and was the original form of this
- * mechanism), but the syntax and local file format is
- * different.
- *
- * If you're seeing this error, you should just obtain current
- * TAL files. Also see the "make-tal.sh" script.
- */
- logmsg(&rc, log_usage_err,
- "Directive \"%s\" is obsolete -- please use \"trust-anchor-locator\" instead",
- val->name);
- goto done;
- }
-
- if (!name_cmp(val->name, "trust-anchor")) {
- /*
- * Local file trust anchor method.
- */
- logmsg(&rc, log_telemetry, "Processing trust anchor from local file %s", val->value);
- if (strlen(val->value) >= sizeof(path1.s)) {
- logmsg(&rc, log_usage_err, "Trust anchor path name too long %s", val->value);
- goto done;
- }
- strcpy(path1.s, val->value);
-
- /* Construct file:// URI for logging */
- assert(sizeof("file://") < sizeof(uri.s));
- strcpy(uri.s, "file://");
- if (path1.s[0] != '/') {
- if (getcwd(uri.s + strlen(uri.s), sizeof(uri.s) - strlen(uri.s)) == NULL ||
- (!endswith(uri.s, "/") && strlen(uri.s) >= sizeof(uri.s) - 1))
- uri.s[0] = '\0';
- else
- strcat(uri.s, "/");
- }
- if (uri.s[0] != '\0' && strlen(uri.s) + strlen(path1.s) < sizeof(uri.s))
- strcat(uri.s, path1.s);
- else
- uri.s[0] = '\0';
-
- if ((x = read_cert(&path1, NULL)) == NULL) {
- log_validation_status(&rc, &uri, unreadable_trust_anchor, generation);
- continue;
- }
- hash = X509_subject_name_hash(x);
- for (j = 0; j < INT_MAX; j++) {
- if (snprintf(path2.s, sizeof(path2.s), "%s%lx.%d.cer",
- rc.new_authenticated.s, hash, j) == sizeof(path2.s)) {
- logmsg(&rc, log_sys_err,
- "Couldn't construct path name for trust anchor %s", path1.s);
- goto done;
- }
- if (access(path2.s, F_OK))
- break;
- }
- if (j == INT_MAX) {
- logmsg(&rc, log_sys_err, "Couldn't find a free name for trust anchor %s", path1.s);
- goto done;
- }
- }
-
- if (!name_cmp(val->name, "trust-anchor-locator")) {
- /*
- * Trust anchor locator (URI + public key) method.
- */
- EVP_PKEY *pkey = NULL;
- char *fn;
- path_t path3;
-
- fn = val->value;
- bio = BIO_new_file(fn, "r");
- if (!bio || BIO_gets(bio, uri.s, sizeof(uri.s)) <= 0) {
- log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
- BIO_free(bio);
- bio = NULL;
- continue;
- }
- uri.s[strcspn(uri.s, " \t\r\n")] = '\0';
- bio = BIO_push(BIO_new(BIO_f_linebreak()), bio);
- bio = BIO_push(BIO_new(BIO_f_base64()), bio);
- if (!uri_to_filename(&rc, &uri, &path1, &rc.unauthenticated) ||
- !uri_to_filename(&rc, &uri, &path2, &rc.new_authenticated) ||
- !uri_to_filename(&rc, &uri, &path3, &rc.old_authenticated)) {
- log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
- BIO_free_all(bio);
- bio = NULL;
- continue;
- }
- logmsg(&rc, log_telemetry, "Processing trust anchor from URI %s", uri.s);
- rsync_file(&rc, &uri);
- while (sk_rsync_ctx_t_num(rc.rsync_queue) > 0)
- rsync_mgr(&rc);
- if (bio)
- pkey = d2i_PUBKEY_bio(bio, NULL);
- BIO_free_all(bio);
- bio = NULL;
- if (!pkey) {
- log_validation_status(&rc, &uri, unreadable_trust_anchor_locator, object_generation_null);
- continue;
- }
- generation = object_generation_current;
- if ((x = read_ta(&rc, &uri, &path1, pkey, generation)) == NULL) {
- generation = object_generation_backup;
- path1 = path3;
- x = read_ta(&rc, &uri, &path1, pkey, generation);
- }
- EVP_PKEY_free(pkey);
- if (!x)
- continue;
- }
-
- if (!x)
- continue;
-
- logmsg(&rc, log_telemetry, "Copying trust anchor %s to %s", path1.s, path2.s);
-
- if (!mkdir_maybe(&rc, &path2) || !cp_ln(&rc, &path1, &path2))
- goto done;
-
- if ((wsk = walk_ctx_stack_new()) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't allocate walk context stack");
- goto done;
- }
-
- parse_cert(&rc, x, &ta_certinfo, &uri, generation);
- ta_certinfo.ta = 1;
-
- if ((w = walk_ctx_stack_push(wsk, x, &ta_certinfo)) == NULL) {
- logmsg(&rc, log_sys_err, "Couldn't push walk context stack");
- goto done;
- }
-
- check_ta(&rc, wsk);
- wsk = NULL; /* Ownership of wsk passed to check_ta() */
- }
-
- if (!finalize_directories(&rc))
- goto done;
-
- if (prune && !prune_unauthenticated(&rc, &rc.unauthenticated,
- strlen(rc.unauthenticated.s))) {
- logmsg(&rc, log_sys_err, "Trouble pruning old unauthenticated data");
- goto done;
- }
-
- ret = 0;
-
- done:
- log_openssl_errors(&rc);
-
- if (xmlfile != NULL) {
-
- char tad[sizeof("2006-10-13T11:22:33Z") + 1];
- time_t tad_time = time(0);
- struct tm *tad_tm = gmtime(&tad_time);
- int ok = 1, use_stdout = !strcmp(xmlfile, "-");
- hostname_t hostname;
- mib_counter_t code;
- FILE *f = NULL;
-
- strftime(tad, sizeof(tad), "%Y-%m-%dT%H:%M:%SZ", tad_tm);
-
- ok &= gethostname(hostname.s, sizeof(hostname.s)) == 0;
-
- if (use_stdout)
- f = stdout;
- else if (ok)
- ok &= (f = fopen(xmlfile, "w")) != NULL;
-
- if (ok)
- logmsg(&rc, log_telemetry, "Writing XML summary to %s",
- (use_stdout ? "standard output" : xmlfile));
-
- if (ok)
- ok &= fprintf(f, "<?xml version=\"1.0\" ?>\n"
- "<rcynic-summary date=\"%s\" rcynic-version=\"%s\""
- " summary-version=\"%d\" reporting-hostname=\"%s\">\n"
- " <labels>\n",
- tad, svn_id, XML_SUMMARY_VERSION, hostname.s) != EOF;
-
- for (j = 0; ok && j < MIB_COUNTER_T_MAX; ++j)
- if (ok)
- ok &= fprintf(f, " <%s kind=\"%s\">%s</%s>\n",
- mib_counter_label[j], mib_counter_kind[j],
- (mib_counter_desc[j]
- ? mib_counter_desc[j]
- : X509_verify_cert_error_string(mib_counter_openssl[j])),
- mib_counter_label[j]) != EOF;
-
- if (ok)
- ok &= fprintf(f, " </labels>\n") != EOF;
-
- for (i = 0; ok && i < sk_validation_status_t_num(rc.validation_status); i++) {
- validation_status_t *v = sk_validation_status_t_value(rc.validation_status, i);
- assert(v);
-
- tad_tm = gmtime(&v->timestamp);
- strftime(tad, sizeof(tad), "%Y-%m-%dT%H:%M:%SZ", tad_tm);
-
- for (code = (mib_counter_t) 0; ok && code < MIB_COUNTER_T_MAX; code++) {
- if (validation_status_get_code(v, code)) {
- if (ok)
- ok &= fprintf(f, " <validation_status timestamp=\"%s\" status=\"%s\"",
- tad, mib_counter_label[code]) != EOF;
- if (ok && (v->generation == object_generation_current ||
- v->generation == object_generation_backup))
- ok &= fprintf(f, " generation=\"%s\"",
- object_generation_label[v->generation]) != EOF;
- if (ok)
- ok &= fprintf(f, ">%s</validation_status>\n", v->uri.s) != EOF;
- }
- }
- }
-
- if (ok)
- ok &= fprintf(f, "</rcynic-summary>\n") != EOF;
-
- if (f && !use_stdout)
- ok &= fclose(f) != EOF;
-
- if (!ok)
- logmsg(&rc, log_sys_err, "Couldn't write XML summary to %s: %s",
- xmlfile, strerror(errno));
-
- }
-
- /*
- * Do NOT free cfg_section, NCONF_free() takes care of that
- */
- sk_OPENSSL_STRING_pop_free(rc.rsync_cache, OPENSSL_STRING_free);
- sk_OPENSSL_STRING_pop_free(rc.backup_cache, OPENSSL_STRING_free);
- sk_OPENSSL_STRING_pop_free(rc.dead_host_cache, OPENSSL_STRING_free);
- sk_validation_status_t_pop_free(rc.validation_status, validation_status_t_free);
- X509_STORE_free(rc.x509_store);
- NCONF_free(cfg_handle);
- CONF_modules_free();
- BIO_free(bio);
- EVP_cleanup();
- ERR_free_strings();
- if (rc.rsync_program)
- free(rc.rsync_program);
- if (lockfile && lockfd >= 0)
- unlink(lockfile);
- if (lockfile)
- free(lockfile);
- if (xmlfile)
- free(xmlfile);
-
- if (start) {
- finish = time(0);
- logmsg(&rc, log_telemetry,
- "Finished, elapsed time %u:%02u:%02u",
- (unsigned) ((finish - start) / 3600),
- (unsigned) ((finish - start) / 60 % 60),
- (unsigned) ((finish - start) % 60));
- }
-
- return ret;
-}
diff --git a/rcynic-ng/rcynic.xsl b/rcynic-ng/rcynic.xsl
deleted file mode 100644
index 487cd2f6..00000000
--- a/rcynic-ng/rcynic.xsl
+++ /dev/null
@@ -1,312 +0,0 @@
-<?xml version="1.0"?>
-<!--
- - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
- -
- - Portions copyright (C) 2006 American Registry for Internet Numbers ("ARIN")
- -
- - Permission to use, copy, modify, and distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ARIN DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ARIN BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
- -->
-
-<!-- $Id$ -->
-
-<!--
- - XSL stylesheet to render rcynic's xml-summary output as basic (X)HTML.
- -
- - This is a bit more complicated than strictly necessary, because I wanted
- - the ability to drop out columns that are nothing but zeros.
- - There's probably some clever way of using XPath to simplify this,
- - but I don't expect the data sets to be large enough for performance
- - to be an issue here. Feel free to show me how to do better.
- -->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform"
- version="1.0"
- xmlns:com="http://exslt.org/common"
- xmlns:str="http://exslt.org/strings"
- exclude-result-prefixes="com str">
-
- <xsl:output omit-xml-declaration="yes" indent="yes" method="xml" encoding="US-ASCII"
- doctype-public="-//W3C//DTD XHTML 1.0 Strict//EN"
- doctype-system="http://www.w3.org/TR/xhtml1/DTD/xhtml1-strict.dtd"/>
-
- <xsl:param name="refresh" select="1800"/>
- <xsl:param name="suppress-zero-columns" select="1"/>
- <xsl:param name="show-total" select="1"/>
- <xsl:param name="use-colors" select="1"/>
- <xsl:param name="show-detailed-status" select="1"/>
- <xsl:param name="show-problems" select="0"/>
- <xsl:param name="show-summary" select="1"/>
-
- <xsl:template match="/">
- <xsl:comment>Generators</xsl:comment>
- <xsl:comment><xsl:value-of select="rcynic-summary/@rcynic-version"/></xsl:comment>
- <xsl:comment>$Id$</xsl:comment>
- <html>
- <xsl:variable name="title">
- <xsl:text>rcynic summary </xsl:text>
- <xsl:value-of select="rcynic-summary/@date"/>
- </xsl:variable>
- <head>
- <title>
- <xsl:value-of select="$title"/>
- </title>
- <xsl:if test="$refresh != 0">
- <meta http-equiv="Refresh" content="{$refresh}"/>
- </xsl:if>
- <style type="text/css">
- td { text-align: center; padding: 4px }
- td.uri { text-align: left }
- td.host { text-align: left }
- <xsl:if test="$use-colors != 0">
- tr.good,td.good { background-color: #77ff77 }
- tr.warn,td.warn { background-color: yellow }
- tr.bad,td.bad { background-color: #ff5500 }
- </xsl:if>
- </style>
- </head>
- <body>
- <h1><xsl:value-of select="$title"/></h1>
-
- <!-- Summary output, old host-oriented format -->
- <xsl:if test="$show-summary != 0">
-
- <!-- Collect data we need to display -->
- <xsl:variable name="host-data">
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:sort order="ascending" data-type="text" select="."/>
- <xsl:variable name="uri" select="string(.)"/>
- <xsl:if test="starts-with($uri, 'rsync://')">
- <xsl:variable name="hostname" select="str:tokenize($uri, ':/')[2]"/>
- <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = current()/@status]/@kind"/>
- <xsl:variable name="fn2">
- <xsl:if test="substring($uri, string-length($uri) - 3, 1) = '.' and @generation != ''">
- <xsl:value-of select="substring($uri, string-length($uri) - 3)"/>
- </xsl:if>
- </xsl:variable>
- <x hostname="{$hostname}" timestamp="{@timestamp}" uri="{$uri}" status="{@status}" mood="{$mood}" fn2="{$fn2}" generation="{@generation}"/>
- </xsl:if>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Calculate set of unique hostnames -->
- <xsl:variable name="unique-hostnames">
- <xsl:for-each select="com:node-set($host-data)/x[not(@hostname = following::x/@hostname)]">
- <x hostname="{@hostname}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Calculate set of unique filename types -->
- <xsl:variable name="unique-fn2s">
- <xsl:for-each select="com:node-set($host-data)/x[not(@fn2 = following::x/@fn2)]">
- <x fn2="{@fn2}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Generation names -->
- <xsl:variable name="unique-generations">
- <xsl:for-each select="com:node-set($host-data)/x[not(@generation = following::x/@generation)]">
- <x generation="{@generation}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Calculate grand totals, figure out which columns to display -->
- <xsl:variable name="totals">
- <xsl:for-each select="rcynic-summary/labels/*">
- <xsl:variable name="sum" select="count(com:node-set($host-data)/x[@status = name(current())])"/>
- <xsl:variable name="show">
- <xsl:choose>
- <xsl:when test="$suppress-zero-columns = 0 or $sum &gt; 0">
- <xsl:text>1</xsl:text>
- </xsl:when>
- <xsl:otherwise>
- <xsl:text>0</xsl:text>
- </xsl:otherwise>
- </xsl:choose>
- </xsl:variable>
- <x name="{name(current())}" sum="{$sum}" text="{.}" show="{$show}" mood="{@kind}"/>
- </xsl:for-each>
- </xsl:variable>
-
- <!-- Calculate how many columns we'll be displaying -->
- <xsl:variable name="columns" select="count(com:node-set($totals)/x[@show = 1])"/>
-
- <!-- Show the total -->
- <xsl:if test="$show-total != 0">
- <br/>
- <h2>Grand Totals</h2>
- <table class="summary" rules="all" border="1">
- <thead>
- <tr>
- <td/> <!-- was hostname -->
- <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
- <td><b><xsl:value-of select="@text"/></b></td>
- </xsl:for-each>
- </tr>
- </thead>
- <tbody>
- <tr>
- <td><b>Total</b></td>
- <xsl:for-each select="com:node-set($totals)/x">
- <xsl:if test="$suppress-zero-columns = 0 or @sum &gt; 0">
- <td class="{@mood}"><xsl:value-of select="@sum"/></td>
- </xsl:if>
- </xsl:for-each>
- </tr>
- </tbody>
- </table>
- </xsl:if>
-
- <!-- Generate the HTML -->
- <br/>
- <h2>Summaries by Repository Host</h2>
- <xsl:for-each select="com:node-set($unique-hostnames)/x">
- <xsl:sort order="ascending" data-type="text" select="@hostname"/>
- <xsl:variable name="hostname" select="@hostname"/>
- <br/>
- <h3><xsl:value-of select="$hostname"/></h3>
- <table class="summary" rules="all" border="1">
- <thead>
- <tr>
- <td/> <!-- was hostname -->
- <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
- <td><b><xsl:value-of select="@text"/></b></td>
- </xsl:for-each>
- </tr>
- </thead>
- <tbody>
- <xsl:for-each select="com:node-set($unique-fn2s)/x">
- <xsl:sort order="ascending" data-type="text" select="@fn2"/>
- <xsl:variable name="fn2" select="@fn2"/>
- <xsl:for-each select="com:node-set($unique-generations)/x">
- <xsl:sort order="ascending" data-type="text" select="@generation"/>
- <xsl:variable name="generation" select="@generation"/>
- <xsl:if test="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation])">
- <tr>
- <td><xsl:value-of select="concat($generation, ' ', $fn2)"/></td>
- <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
- <xsl:variable name="label" select="@name"/>
- <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @fn2 = $fn2 and @generation = $generation and @status = $label])"/>
- <xsl:choose>
- <xsl:when test="$value != 0">
- <td class="{@mood}">
- <xsl:value-of select="$value"/>
- </td>
- </xsl:when>
- <xsl:otherwise>
- <td/>
- </xsl:otherwise>
- </xsl:choose>
- </xsl:for-each>
- </tr>
- </xsl:if>
- </xsl:for-each>
- </xsl:for-each>
- <tr>
- <td>Total</td>
- <xsl:for-each select="com:node-set($totals)/x[@show = 1]">
- <xsl:variable name="label" select="@name"/>
- <xsl:variable name="value" select="count(com:node-set($host-data)/x[@hostname = $hostname and @status = $label])"/>
- <xsl:choose>
- <xsl:when test="$value != 0">
- <td class="{@mood}">
- <xsl:value-of select="$value"/>
- </td>
- </xsl:when>
- <xsl:otherwise>
- <td/>
- </xsl:otherwise>
- </xsl:choose>
- </xsl:for-each>
- </tr>
- </tbody>
- </table>
- </xsl:for-each>
-
- <!-- "Problems" display -->
- <xsl:if test="$show-problems != 0">
- <br/>
- <h2>Problems</h2>
- <table class="problems" rules="all" border="1" >
- <thead>
- <tr>
- <td class="status"><b>Status</b></td>
- <td class="uri"><b>URI</b></td>
- </tr>
- </thead>
- <tbody>
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:variable name="status" select="@status"/>
- <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
- <xsl:if test="$mood != 'good'">
- <tr class="{$mood}">
- <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
- <td class="uri"><xsl:value-of select="."/></td>
- </tr>
- </xsl:if>
- </xsl:for-each>
- </tbody>
- </table>
- </xsl:if>
- </xsl:if>
-
- <!-- Detailed status display -->
- <xsl:if test="$show-detailed-status != 0">
- <br/>
- <h2>Validation Status</h2>
- <table class="details" rules="all" border="1" >
- <thead>
- <tr>
- <td class="timestamp"><b>Timestamp</b></td>
- <td class="generation"><b>Generation</b></td>
- <td class="status"><b>Status</b></td>
- <td class="uri"><b>URI</b></td>
- </tr>
- </thead>
- <tbody>
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:variable name="status" select="@status"/>
- <xsl:variable name="mood" select="/rcynic-summary/labels/*[name() = $status]/@kind"/>
- <tr class="{$mood}">
- <td class="timestamp"><xsl:value-of select="@timestamp"/></td>
- <td class="generation"><xsl:value-of select="@generation"/></td>
- <td class="status"><xsl:value-of select="/rcynic-summary/labels/*[name() = $status] "/></td>
- <td class="uri"><xsl:value-of select="."/></td>
- </tr>
- </xsl:for-each>
- </tbody>
- </table>
- </xsl:if>
-
- </body>
- </html>
- </xsl:template>
-
-</xsl:stylesheet>
-
-<!--
- - Local variables:
- - mode: sgml
- - End:
- -->
diff --git a/rcynic-ng/sample-trust-anchors/README b/rcynic-ng/sample-trust-anchors/README
deleted file mode 100644
index b0c45dd3..00000000
--- a/rcynic-ng/sample-trust-anchors/README
+++ /dev/null
@@ -1,13 +0,0 @@
-$Id$
-
-These are SAMPLE trust anchors for testing rcynic. Any resemblance
-between these files and real trust anchors living, dead, or wandering
-the night in ghostly torment is purely coincidental.
-
-Remember: it's only a trust anchor if a replying party says it is.
-You're the relying party, so it's your call.
-
-The .cer files (if any) are self-signed X.509 trust anchors.
-
-The .tal files are "trust anchor locators", formerly known as
-"indirect trust anchors" -- see draft-ietf-sidr-ta.
diff --git a/rcynic-ng/sample-trust-anchors/afrinic.tal b/rcynic-ng/sample-trust-anchors/afrinic.tal
deleted file mode 100644
index 0f202c73..00000000
--- a/rcynic-ng/sample-trust-anchors/afrinic.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://rpki.afrinic.net/repository/AfriNIC.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAxsAqAhWIO+ON2Ef9oRDM
-pKxv+AfmSLIdLWJtjrvUyDxJPBjgR+kVrOHUeTaujygFUp49tuN5H2C1rUuQavTH
-vve6xNF5fU3OkTcqEzMOZy+ctkbde2SRMVdvbO22+TH9gNhKDc9l7Vu01qU4LeJH
-k3X0f5uu5346YrGAOSv6AaYBXVgXxa0s9ZvgqFpim50pReQe/WI3QwFKNgpPzfQL
-6Y7fDPYdYaVOXPXSKtx7P4s4KLA/ZWmRL/bobw/i2fFviAGhDrjqqqum+/9w1hEl
-L/vqihVnV18saKTnLvkItA/Bf5i11Yhw2K7qv573YWxyuqCknO/iYLTR1DToBZcZ
-UQIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/apnic.tal b/rcynic-ng/sample-trust-anchors/apnic.tal
deleted file mode 100644
index 9e061061..00000000
--- a/rcynic-ng/sample-trust-anchors/apnic.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://rpki.apnic.net/repository/APNIC.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAr36frxrKWB/g+b2F4TSN
-TJYm5vcq0QjSb6Sas3O84doujITy5FmAbcFlTUc5m/hhDrWO6OBowAFmLt8KowCj
-Gf0aBFGeXULYiqcZFCiP1LPqvS2M5WN5PLrz88f21EUpqsm+Fq/5fwdH6OHZvSbC
-BE2grVw1tQM5LCSqCs4vmrdvGi1tVcFF2FlDFaCpKN7p0Eg7YmLedf4ANkbpNxqk
-mjbDVybK1qL6p6j85uBTbPoGHHr/AUw2UXdnl1Z+mPj1lZv01/AQe7X4KobJW+Tc
-KljtK6Zccs6+/LiC1zh59VHdbIn1K4KyXVTeNAOHzu3ZOOlcTMMfO0dCnqtKoab/
-eQIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/bbn-testbed.tal b/rcynic-ng/sample-trust-anchors/bbn-testbed.tal
deleted file mode 100644
index 21302ea5..00000000
--- a/rcynic-ng/sample-trust-anchors/bbn-testbed.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://rpki.bbn.com/rpki/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAvEjkk0oC2N8Ij+ruAkrX
-cI7lSOIG1sQLC8xJJqklMlOBLao+3If0AVKSEP5+pb/hJwpXPomt6Lm4+fuZ6vX/
-q15llQL3YWkHZ49t/X47yCqxvM0txRaZafffbk6lJrofKhKKrYEcrr/rHiG+IQXn
-U1MGaQ/a8DfRDWiCQxonh7CthrLi7tr+QI9s9fDvvHPCK9OmmhZvCUDOydf+/vMn
-VShQ57KsUFcuZ0EX9UwsaIGCYGyvsYwk54UtoIGovg2IavfZK8ai0/5XEVpvKQiR
-8AixZpg5bSe7555+LhzpL5+LdqX6/hVRah0JrdR8KiXvr16Kmcitj+JLqVc0Wkd8
-zwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/lacnic.tal b/rcynic-ng/sample-trust-anchors/lacnic.tal
deleted file mode 100644
index 77a142d2..00000000
--- a/rcynic-ng/sample-trust-anchors/lacnic.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://repository.lacnic.net/rpki/lacnic/RTA_LACNIC_RPKI.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA1AuR49ZoKS59Vnpq8M0X
-djeV3ROqtElwx6sNmUXvWBFPQlZLs2tR5/0MwprIWRi91WnMBVWjsECcLBe7Pu+u
-V/tTvPMJRXm/c+l8nR+FhAj7pn4M5A2pHFBndCPc1UrFD+BLACx9DSNiUjzKr1t7
-wjHTW+F0NMnZ9g9hKdxDNCFi66BGx2f3TTW3uGns/IPfkxrRCeYtJcBpQ5mKoc8g
-QOndiEG/33uXDS9EOe1dycmnaw9EQqxqHp+Bj0TIVoFyfDNuT+soJ3uwtQr2g5Ys
-AIxJtmBAZrLj+acmLeQrYC0xQuK118dSAS9r6GSm476m2aGEYtb083fLodeYSEjM
-/wIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal b/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal
deleted file mode 100644
index ebdb9f45..00000000
--- a/rcynic-ng/sample-trust-anchors/ripe-ncc-root.tal
+++ /dev/null
@@ -1,9 +0,0 @@
-rsync://rpki.ripe.net/ta/ripe-ncc-ta.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA0URYSGqUz2m
-yBsOzeW1jQ6NsxNvlLMyhWknvnl8NiBCs/T/S2XuNKQNZ+wBZxIgPPV
-2pFBFeQAvoH/WK83HwA26V2siwm/MY2nKZ+Olw+wlpzlZ1p3Ipj2eNc
-Krmit8BwBC8xImzuCGaV0jkRB0GZ0hoH6Ml03umLprRsn6v0xOP0+l6
-Qc1ZHMFVFb385IQ7FQQTcVIxrdeMsoyJq9eMkE6DoclHhF/NlSllXub
-ASQ9KUWqJ0+Ot3QCXr4LXECMfkpkVR2TZT+v5v658bHVs6ZxRD1b6Uk
-1uQKAyHUbn/tXvP8lrjAibGzVsXDT2L0x4Edx+QdixPgOji3gBMyL2V
-wIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-apnic.tal b/rcynic-ng/sample-trust-anchors/testbed-apnic.tal
deleted file mode 100644
index 3e57b05e..00000000
--- a/rcynic-ng/sample-trust-anchors/testbed-apnic.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://apnic.rpki.net/rpki/apnic/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
-GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
-Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
-nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
-BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
-ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
-aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal b/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal
deleted file mode 100644
index ae16a302..00000000
--- a/rcynic-ng/sample-trust-anchors/testbed-apnicrpki.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://apnicrpki.rand.apnic.net/repository/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEApQofhU9VeRGZxlNgkrFR
-ShSIkGtRUwg1mkISvieM2oxA6LFhHoqFV25BdK/3IhOh/0I/E9WX7uWPD72MFGBB
-LsFc6JpZFkvg8+9KUIHquk46wn4bEvA8xMsPbGo+mK0CAkqOdHQSemC7nqUlR9VH
-5zH1t8aYS4mrVN59LfClmiEE7QElgmVyvccfaEd+gMGaxsDvSylWIdvVxYfFG2rB
-LiEsmfXwdn2a8b1Zx5eaFD80XV9Z7h15ESP1epSScGzMp2XB0FYMC3f9k7l5sydX
-Rj5BYo1rWaM+Y6AoHe/d8G0xm6iX6b/JqT4Ywdt8/7DbsweaIk73VnpnXrYATCt9
-VwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-arin.tal b/rcynic-ng/sample-trust-anchors/testbed-arin.tal
deleted file mode 100644
index 5ec14f36..00000000
--- a/rcynic-ng/sample-trust-anchors/testbed-arin.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://arin.rpki.net/rpki/arin/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
-GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
-Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
-nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
-BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
-ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
-aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/sample-trust-anchors/testbed-ripe.tal b/rcynic-ng/sample-trust-anchors/testbed-ripe.tal
deleted file mode 100644
index d52aba62..00000000
--- a/rcynic-ng/sample-trust-anchors/testbed-ripe.tal
+++ /dev/null
@@ -1,8 +0,0 @@
-rsync://ripe.rpki.net/rpki/ripe/root.cer
-MIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEAovWQL2lh6knDx
-GUG5hbtCXvvh4AOzjhDkSHlj22gn/1oiM9IeDATIwP44vhQ6L/xvuk7W6
-Kfa5ygmqQ+xOZOwTWPcrUbqaQyPNxokuivzyvqVZVDecOEqs78q58mSp9
-nbtxmLRW7B67SJCBSzfa5XpVyXYEgYAjkk3fpmefU+AcxtxvvHB5OVPIa
-BfPcs80ICMgHQX+fphvute9XLxjfJKJWkhZqZ0v7pZm2uhkcPx1PMGcrG
-ee0WSDC3fr3erLueagpiLsFjwwpX6F+Ms8vqz45H+DKmYKvPSstZjCCq9
-aJ0qANT9OtnfSDOS+aLRPjZryCNyvvBHxZXqj5YCGKtwIDAQAB
diff --git a/rcynic-ng/show.awk b/rcynic-ng/show.awk
deleted file mode 100644
index 84d33cda..00000000
--- a/rcynic-ng/show.awk
+++ /dev/null
@@ -1,93 +0,0 @@
-# $Id$
-#
-# Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# Prettyprint tab-delimited rcynic summary data. This is intended as
-# a postprocessor for show.xsl.
-
-BEGIN {
- FS = "\t";
- label[++nlabels] = "hostname";
- head["hostname", 1] = "Repository";
- head["hostname", 2] = "Host";
- width["hostname"] = length(head["hostname", 1]);
-}
-
-!NF {
- got_labels = 1;
- next;
-}
-
-!got_labels {
- label[++nlabels] = $1;
- nh = split($2, h, /[ \t]+/);
- for (i = 1; i <= nh; i++) {
- head[$1, i] = h[i];
- if (length(h[i]) > width[$1])
- width[$1] = length(h[i]);
- }
-}
-
-got_labels && $3 ~ /^rsync:\/\/.+/ {
- hostname = $3;
- sub(/^rsync:\/\//, "", hostname);
- sub(/\/.*$/, "", hostname);
- if (!seen[hostname])
- seen[hostname] = ++nhostnames;
- data[hostname, $2]++;
- total[$2]++;
- if (length(hostname) > width["hostname"])
- width["hostname"] = length(hostname);
-}
-
-END {
-
- for (i = 1;; i++) {
- blank = 1;
- for (j = 1; j <= nlabels; j++)
- if (head[label[j], i] && (j == 1 || total[label[j]] > 0))
- blank = 0;
- if (blank)
- break;
- for (j = 1; j <= nlabels; j++) {
- if (j == 1)
- printf "%-*s", width[label[j]], head[label[j], i];
- else if (total[label[j]] > 0)
- printf " %*s", width[label[j]], head[label[j], i];
- }
- print "";
- }
-
- for (i in seen)
- hostnames[seen[i]] = i;
-
- for (i = 1; i <= nhostnames; i++) {
- for (j = 1; j <= nlabels; j++) {
- if (j == 1)
- printf "%-*s", width[label[j]], hostnames[i];
- else if (total[label[j]] > 0)
- printf " %*s", width[label[j]], data[hostnames[i], label[j]];
- }
- print "";
- }
-
- for (j = 1; j <= nlabels; j++) {
- if (j == 1)
- printf "%-*s", width[label[j]], "Total";
- else if (total[label[j]] > 0)
- printf " %*s", width[label[j]], total[label[j]];
- }
- print "";
-}
diff --git a/rcynic-ng/show.sh.in b/rcynic-ng/show.sh.in
deleted file mode 100644
index c62a325b..00000000
--- a/rcynic-ng/show.sh.in
+++ /dev/null
@@ -1,20 +0,0 @@
-#!/bin/sh -
-# $Id$
-#
-# Copyright (C) 2010 Internet Systems Consortium, Inc. ("ISC")
-#
-# Permission to use, copy, modify, and/or distribute this software for any
-# purpose with or without fee is hereby granted, provided that the above
-# copyright notice and this permission notice appear in all copies.
-#
-# THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
-# REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
-# AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
-# INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
-# LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
-# OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
-# PERFORMANCE OF THIS SOFTWARE.
-
-# Prettyprint rcynic summary data as flat text.
-
-@XSLTPROC@ @abs_builddir@/show.xsl ${1-rcynic.xml} | @AWK@ -f @abs_builddir@/show.awk
diff --git a/rcynic-ng/show.xsl b/rcynic-ng/show.xsl
deleted file mode 100644
index 06d69893..00000000
--- a/rcynic-ng/show.xsl
+++ /dev/null
@@ -1,59 +0,0 @@
-<?xml version="1.0"?>
-<!--
- - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
- -->
-
-<!-- $Id$ -->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-
- <xsl:output method="text" encoding="US-ASCII"/>
-
- <!-- Translate rcynic XML into tab-delimited flat text -->
- <xsl:template match="/">
-
- <!-- Write labels as two columns: <label> <tab> <text> -->
- <xsl:for-each select="rcynic-summary/labels/*">
- <xsl:value-of select="name()"/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="."/>
- <xsl:text>&#10;</xsl:text>
- </xsl:for-each>
-
- <!-- Blank line between sections -->
- <xsl:text>&#10;</xsl:text>
-
- <!-- Write status as three colums: <timestamp> <tab> <status> <tab> <uri> -->
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:sort order="ascending" data-type="text" select="."/>
- <xsl:value-of select="@timestamp"/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="@status"/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="."/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="@generation"/>
- <xsl:text>&#10;</xsl:text>
- </xsl:for-each>
-
- </xsl:template>
-
-</xsl:stylesheet>
-
-<!--
- - Local variables:
- - mode: sgml
- - End:
- -->
diff --git a/rcynic-ng/static-rsync/Makefile.in b/rcynic-ng/static-rsync/Makefile.in
deleted file mode 100644
index 9c8556e0..00000000
--- a/rcynic-ng/static-rsync/Makefile.in
+++ /dev/null
@@ -1,41 +0,0 @@
-# $Id$
-
-VERSION = 2.6.9
-
-CFG_ENV = CFLAGS='@CFLAGS@' LDFLAGS='@LDFLAGS@ @LD_STATIC_FLAG@'
-CFG_ARG =
-
-TARBALL = rsync-${VERSION}.tar.gz
-DIRNAME = rsync-${VERSION}
-
-CFG_LOG = > ../config.log 2>&1
-BIN_LOG = > ../build.log 2>&1
-
-BIN = rsync
-
-all: ${BIN}
-
-${BIN}: ${DIRNAME}/${BIN}
- ln ${DIRNAME}/${BIN} $@
- file $@
-
-${DIRNAME}/${BIN}: configured.stamp
- cd ${DIRNAME} && ${MAKE} ${BIN_LOG}
-
-extracted.stamp: ${TARBALL}
- gzip -c -d ${TARBALL} | tar -xf -
- touch $@
-
-patched.stamp: extracted.stamp
- for i in patches/patch-*; do if test -f "$$i"; then patch -d ${DIRNAME} <"$$i"; else :; fi; done
- touch $@
-
-configured.stamp: patched.stamp
- cd ${DIRNAME} && ${CFG_ENV} ./configure ${CFG_ARG} ${CFG_LOG}
- touch $@
-
-clean:
- rm -rf ${BIN} ${DIRNAME} *.stamp *.log
-
-distclean: clean
- rm -f Makefile
diff --git a/rcynic-ng/static-rsync/README b/rcynic-ng/static-rsync/README
deleted file mode 100644
index 9ff5afa8..00000000
--- a/rcynic-ng/static-rsync/README
+++ /dev/null
@@ -1,15 +0,0 @@
-$Id$
-
-Hack to build a static rsync binary suitable for use in a chroot jail.
-
-The default configuration is for gcc, since that's the most widely
-used compiler on the platforms we use. I've provided hooks intended
-to make it simple to support other compilers just by overriding make
-variables on the command line: if you need to do something more
-drastic than this to get your compiler working, please tell me.
-
-If your platform doesn't support static binaries at all, you're on
-your own (and should whine at your OS vendor, as this is nuts).
-
-We try to stick with rsync release code, but apply security patches
-when necessary.
diff --git a/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091 b/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091
deleted file mode 100644
index 201af96a..00000000
--- a/rcynic-ng/static-rsync/patches/patch-CVE-2007-4091
+++ /dev/null
@@ -1,60 +0,0 @@
---- sender.c 2006-09-20 03:53:32.000000000 +0200
-+++ sender.c 2007-07-25 15:33:05.000000000 +0200
-@@ -123,6 +123,7 @@
- char fname[MAXPATHLEN];
- struct file_struct *file;
- unsigned int offset;
-+ size_t l = 0;
-
- if (ndx < 0 || ndx >= the_file_list->count)
- return;
-@@ -133,6 +134,20 @@
- file->dir.root, "/", NULL);
- } else
- offset = 0;
-+
-+ l = offset + 1;
-+ if (file) {
-+ if (file->dirname)
-+ l += strlen(file->dirname);
-+ if (file->basename)
-+ l += strlen(file->basename);
-+ }
-+
-+ if (l >= sizeof(fname)) {
-+ rprintf(FERROR, "Overlong pathname\n");
-+ exit_cleanup(RERR_FILESELECT);
-+ }
-+
- f_name(file, fname + offset);
- if (remove_source_files) {
- if (do_unlink(fname) == 0) {
-@@ -224,6 +239,7 @@
- enum logcode log_code = log_before_transfer ? FLOG : FINFO;
- int f_xfer = write_batch < 0 ? batch_fd : f_out;
- int i, j;
-+ size_t l = 0;
-
- if (verbose > 2)
- rprintf(FINFO, "send_files starting\n");
-@@ -259,6 +275,20 @@
- fname[offset++] = '/';
- } else
- offset = 0;
-+
-+ l = offset + 1;
-+ if (file) {
-+ if (file->dirname)
-+ l += strlen(file->dirname);
-+ if (file->basename)
-+ l += strlen(file->basename);
-+ }
-+
-+ if (l >= sizeof(fname)) {
-+ rprintf(FERROR, "Overlong pathname\n");
-+ exit_cleanup(RERR_FILESELECT);
-+ }
-+
- fname2 = f_name(file, fname + offset);
-
- if (verbose > 2)
diff --git a/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz b/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz
deleted file mode 100644
index 6377f639..00000000
--- a/rcynic-ng/static-rsync/rsync-2.6.9.tar.gz
+++ /dev/null
Binary files differ
diff --git a/rcynic-ng/validation_status.xsl b/rcynic-ng/validation_status.xsl
deleted file mode 100644
index 737b1467..00000000
--- a/rcynic-ng/validation_status.xsl
+++ /dev/null
@@ -1,47 +0,0 @@
-<?xml version="1.0"?>
-<!--
- - Copyright (C) 2010-2011 Internet Systems Consortium, Inc. ("ISC")
- -
- - Permission to use, copy, modify, and/or distribute this software for any
- - purpose with or without fee is hereby granted, provided that the above
- - copyright notice and this permission notice appear in all copies.
- -
- - THE SOFTWARE IS PROVIDED "AS IS" AND ISC DISCLAIMS ALL WARRANTIES WITH
- - REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF MERCHANTABILITY
- - AND FITNESS. IN NO EVENT SHALL ISC BE LIABLE FOR ANY SPECIAL, DIRECT,
- - INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES WHATSOEVER RESULTING FROM
- - LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION OF CONTRACT, NEGLIGENCE
- - OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN CONNECTION WITH THE USE OR
- - PERFORMANCE OF THIS SOFTWARE.
- -->
-
-<!-- $Id$ -->
-
-<!--
- - XSL stylesheet to render rcynic's xml-summary output as tab-delimited text.
- -->
-
-<xsl:stylesheet xmlns:xsl="http://www.w3.org/1999/XSL/Transform" version="1.0">
-
- <xsl:output method="text" encoding="US-ASCII"/>
-
- <xsl:template match="/">
- <xsl:for-each select="rcynic-summary/validation_status">
- <xsl:value-of select="@timestamp"/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="@status"/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="."/>
- <xsl:text>&#9;</xsl:text>
- <xsl:value-of select="@generation"/>
- <xsl:text>&#10;</xsl:text>
- </xsl:for-each>
- </xsl:template>
-
-</xsl:stylesheet>
-
-<!--
- - Local variables:
- - mode: sgml
- - End:
- -->