From b46deb1417dc3596e9ac9fe2fe8cc0b7f42457e7 Mon Sep 17 00:00:00 2001 From: Rob Austein Date: Mon, 26 Oct 2015 06:29:00 +0000 Subject: "Any programmer who fails to comply with the standard naming, formatting, or commenting conventions should be shot. If it so happens that it is inconvenient to shoot him, then he is to be politely requested to recode his program in adherence to the above standard." -- Michael Spier, Digital Equipment Corporation svn path=/branches/tk705/; revision=6152 --- rpki/adns.py | 594 +++--- rpki/cli.py | 402 ++-- rpki/config.py | 412 ++-- rpki/csv_utils.py | 156 +- rpki/daemonize.py | 88 +- rpki/django_settings/common.py | 2 +- rpki/exceptions.py | 154 +- rpki/fields.py | 204 +- rpki/gui/app/forms.py | 198 +- rpki/gui/app/views.py | 78 +- rpki/gui/cacheview/tests.py | 1 - rpki/http_simple.py | 150 +- rpki/ipaddrs.py | 142 +- rpki/irdb/models.py | 704 +++---- rpki/irdb/router.py | 128 +- rpki/irdb/zookeeper.py | 2945 ++++++++++++++--------------- rpki/irdbd.py | 360 ++-- rpki/left_right.py | 10 +- rpki/log.py | 372 ++-- rpki/myrpki.py | 4 +- rpki/mysql_import.py | 8 +- rpki/oids.py | 22 +- rpki/old_irdbd.py | 486 ++--- rpki/pubd.py | 484 ++--- rpki/pubdb/models.py | 476 ++--- rpki/publication.py | 54 +- rpki/publication_control.py | 42 +- rpki/rcynic.py | 434 ++--- rpki/relaxng.py | 76 +- rpki/relaxng_parser.py | 32 +- rpki/resource_set.py | 1968 +++++++++---------- rpki/rootd.py | 810 ++++---- rpki/rpkic.py | 1433 +++++++------- rpki/rpkid.py | 1218 ++++++------ rpki/rpkid_tasks.py | 912 ++++----- rpki/rpkidb/models.py | 4060 ++++++++++++++++++++-------------------- rpki/rtr/bgpdump.py | 482 ++--- rpki/rtr/channels.py | 364 ++-- rpki/rtr/client.py | 816 ++++---- rpki/rtr/generator.py | 948 +++++----- rpki/rtr/main.py | 110 +- rpki/rtr/pdus.py | 960 +++++----- rpki/rtr/server.py | 872 ++++----- rpki/sundial.py | 456 ++--- rpki/up_down.py | 140 +- rpki/x509.py | 3474 +++++++++++++++++----------------- 46 files changed, 14121 insertions(+), 14120 deletions(-) (limited to 'rpki') diff --git a/rpki/adns.py b/rpki/adns.py index c5af3549..b0f235e7 100644 --- a/rpki/adns.py +++ b/rpki/adns.py @@ -31,14 +31,14 @@ import rpki.sundial import rpki.log try: - import dns.resolver, dns.rdatatype, dns.rdataclass, dns.name, dns.message - import dns.inet, dns.exception, dns.query, dns.rcode, dns.ipv4, dns.ipv6 + import dns.resolver, dns.rdatatype, dns.rdataclass, dns.name, dns.message + import dns.inet, dns.exception, dns.query, dns.rcode, dns.ipv4, dns.ipv6 except ImportError: - if __name__ == "__main__": - sys.stderr.write("DNSPython not available, skipping rpki.adns unit test\n") - sys.exit(0) - else: - raise + if __name__ == "__main__": + sys.stderr.write("DNSPython not available, skipping rpki.adns unit test\n") + sys.exit(0) + else: + raise logger = logging.getLogger(__name__) @@ -47,7 +47,7 @@ logger = logging.getLogger(__name__) resolver = dns.resolver.Resolver() if resolver.cache is None: - resolver.cache = dns.resolver.Cache() + resolver.cache = dns.resolver.Cache() ## @var nameservers # Nameservers from resolver.nameservers converted to (af, address) @@ -58,327 +58,327 @@ if resolver.cache is None: nameservers = [] for ns in resolver.nameservers: - try: - nameservers.append((socket.AF_INET, dns.ipv4.inet_aton(ns))) - continue - except: - pass - try: - nameservers.append((socket.AF_INET6, dns.ipv6.inet_aton(ns))) - continue - except: - pass - logger.error("Couldn't parse nameserver address %r", ns) + try: + nameservers.append((socket.AF_INET, dns.ipv4.inet_aton(ns))) + continue + except: + pass + try: + nameservers.append((socket.AF_INET6, dns.ipv6.inet_aton(ns))) + continue + except: + pass + logger.error("Couldn't parse nameserver address %r", ns) class dispatcher(asyncore.dispatcher): - """ - Basic UDP socket reader for use with asyncore. - """ - - def __init__(self, cb, eb, af, bufsize = 65535): - asyncore.dispatcher.__init__(self) - self.cb = cb - self.eb = eb - self.af = af - self.bufsize = bufsize - self.create_socket(af, socket.SOCK_DGRAM) - - def handle_read(self): - """ - Receive a packet, hand it off to query class callback. - """ - - wire, from_address = self.recvfrom(self.bufsize) - self.cb(self.af, from_address[0], from_address[1], wire) - - def handle_error(self): - """ - Pass errors to query class errback. - """ - - self.eb(sys.exc_info()[1]) - - def handle_connect(self): - """ - Quietly ignore UDP "connection" events. - """ - - pass - - def writable(self): """ - We don't need to hear about UDP socket becoming writable. + Basic UDP socket reader for use with asyncore. """ - return False + def __init__(self, cb, eb, af, bufsize = 65535): + asyncore.dispatcher.__init__(self) + self.cb = cb + self.eb = eb + self.af = af + self.bufsize = bufsize + self.create_socket(af, socket.SOCK_DGRAM) + def handle_read(self): + """ + Receive a packet, hand it off to query class callback. + """ -class query(object): - """ - Simplified (no search paths) asynchronous adaptation of - dns.resolver.Resolver.query() (q.v.). - """ - - def __init__(self, cb, eb, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN): - if isinstance(qname, (str, unicode)): - qname = dns.name.from_text(qname) - if isinstance(qtype, str): - qtype = dns.rdatatype.from_text(qtype) - if isinstance(qclass, str): - qclass = dns.rdataclass.from_text(qclass) - assert qname.is_absolute() - self.cb = cb - self.eb = eb - self.qname = qname - self.qtype = qtype - self.qclass = qclass - self.start = time.time() - rpki.async.event_defer(self.go) - - def go(self): - """ - Start running the query. Check our cache before doing network - query; if we find an answer there, just return it. Otherwise - start the network query. - """ - - if resolver.cache: - answer = resolver.cache.get((self.qname, self.qtype, self.qclass)) - else: - answer = None - if answer: - self.cb(self, answer) - else: - self.timer = rpki.async.timer() - self.sockets = {} - self.request = dns.message.make_query(self.qname, self.qtype, self.qclass) - if resolver.keyname is not None: - self.request.use_tsig(resolver.keyring, resolver.keyname, resolver.keyalgorithm) - self.request.use_edns(resolver.edns, resolver.ednsflags, resolver.payload) - self.response = None - self.backoff = 0.10 - self.nameservers = nameservers[:] - self.loop1() - - def loop1(self): - """ - Outer loop. If we haven't got a response yet and still have - nameservers to check, start inner loop. Otherwise, we're done. - """ + wire, from_address = self.recvfrom(self.bufsize) + self.cb(self.af, from_address[0], from_address[1], wire) - self.timer.cancel() - if self.response is None and self.nameservers: - self.iterator = rpki.async.iterator(self.nameservers[:], self.loop2, self.done2) - else: - self.done1() + def handle_error(self): + """ + Pass errors to query class errback. + """ - def loop2(self, iterator, nameserver): - """ - Inner loop. Send query to next nameserver in our list, unless - we've hit the overall timeout for this query. - """ + self.eb(sys.exc_info()[1]) - self.timer.cancel() - try: - timeout = resolver._compute_timeout(self.start) - except dns.resolver.Timeout, e: - self.lose(e) - else: - af, addr = nameserver - if af not in self.sockets: - self.sockets[af] = dispatcher(self.socket_cb, self.socket_eb, af) - self.sockets[af].sendto(self.request.to_wire(), - (dns.inet.inet_ntop(af, addr), resolver.port)) - self.timer.set_handler(self.socket_timeout) - self.timer.set_errback(self.socket_eb) - self.timer.set(rpki.sundial.timedelta(seconds = timeout)) - - def socket_timeout(self): - """ - No answer from nameserver, move on to next one (inner loop). - """ + def handle_connect(self): + """ + Quietly ignore UDP "connection" events. + """ - self.response = None - self.iterator() + pass - def socket_eb(self, e): - """ - UDP socket signaled error. If it really is some kind of socket - error, handle as if we've timed out on this nameserver; otherwise, - pass error back to caller. - """ + def writable(self): + """ + We don't need to hear about UDP socket becoming writable. + """ - self.timer.cancel() - if isinstance(e, socket.error): - self.response = None - self.iterator() - else: - self.lose(e) + return False - def socket_cb(self, af, from_host, from_port, wire): - """ - Received a packet that might be a DNS message. If it doesn't look - like it came from one of our nameservers, just drop it and leave - the timer running. Otherwise, try parsing it: if it's an answer, - we're done, otherwise handle error appropriately and move on to - next nameserver. - """ - sender = (af, dns.inet.inet_pton(af, from_host)) - if from_port != resolver.port or sender not in self.nameservers: - return - self.timer.cancel() - try: - self.response = dns.message.from_wire(wire, keyring = self.request.keyring, request_mac = self.request.mac, one_rr_per_rrset = False) - except dns.exception.FormError: - self.nameservers.remove(sender) - else: - rcode = self.response.rcode() - if rcode in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN): - self.done1() - return - if rcode != dns.rcode.SERVFAIL: - self.nameservers.remove(sender) - self.response = None - self.iterator() - - def done2(self): +class query(object): """ - Done with inner loop. If we still haven't got an answer and - haven't (yet?) eliminated all of our nameservers, wait a little - while before starting the cycle again, unless we've hit the - timeout threshold for the whole query. + Simplified (no search paths) asynchronous adaptation of + dns.resolver.Resolver.query() (q.v.). """ - if self.response is None and self.nameservers: - try: - delay = rpki.sundial.timedelta(seconds = min(resolver._compute_timeout(self.start), self.backoff)) - self.backoff *= 2 - self.timer.set_handler(self.loop1) - self.timer.set_errback(self.lose) - self.timer.set(delay) - except dns.resolver.Timeout, e: - self.lose(e) - else: - self.loop1() - - def cleanup(self): - """ - Shut down our timer and sockets. - """ + def __init__(self, cb, eb, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN): + if isinstance(qname, (str, unicode)): + qname = dns.name.from_text(qname) + if isinstance(qtype, str): + qtype = dns.rdatatype.from_text(qtype) + if isinstance(qclass, str): + qclass = dns.rdataclass.from_text(qclass) + assert qname.is_absolute() + self.cb = cb + self.eb = eb + self.qname = qname + self.qtype = qtype + self.qclass = qclass + self.start = time.time() + rpki.async.event_defer(self.go) + + def go(self): + """ + Start running the query. Check our cache before doing network + query; if we find an answer there, just return it. Otherwise + start the network query. + """ + + if resolver.cache: + answer = resolver.cache.get((self.qname, self.qtype, self.qclass)) + else: + answer = None + if answer: + self.cb(self, answer) + else: + self.timer = rpki.async.timer() + self.sockets = {} + self.request = dns.message.make_query(self.qname, self.qtype, self.qclass) + if resolver.keyname is not None: + self.request.use_tsig(resolver.keyring, resolver.keyname, resolver.keyalgorithm) + self.request.use_edns(resolver.edns, resolver.ednsflags, resolver.payload) + self.response = None + self.backoff = 0.10 + self.nameservers = nameservers[:] + self.loop1() + + def loop1(self): + """ + Outer loop. If we haven't got a response yet and still have + nameservers to check, start inner loop. Otherwise, we're done. + """ + + self.timer.cancel() + if self.response is None and self.nameservers: + self.iterator = rpki.async.iterator(self.nameservers[:], self.loop2, self.done2) + else: + self.done1() + + def loop2(self, iterator, nameserver): + """ + Inner loop. Send query to next nameserver in our list, unless + we've hit the overall timeout for this query. + """ + + self.timer.cancel() + try: + timeout = resolver._compute_timeout(self.start) + except dns.resolver.Timeout, e: + self.lose(e) + else: + af, addr = nameserver + if af not in self.sockets: + self.sockets[af] = dispatcher(self.socket_cb, self.socket_eb, af) + self.sockets[af].sendto(self.request.to_wire(), + (dns.inet.inet_ntop(af, addr), resolver.port)) + self.timer.set_handler(self.socket_timeout) + self.timer.set_errback(self.socket_eb) + self.timer.set(rpki.sundial.timedelta(seconds = timeout)) + + def socket_timeout(self): + """ + No answer from nameserver, move on to next one (inner loop). + """ + + self.response = None + self.iterator() + + def socket_eb(self, e): + """ + UDP socket signaled error. If it really is some kind of socket + error, handle as if we've timed out on this nameserver; otherwise, + pass error back to caller. + """ + + self.timer.cancel() + if isinstance(e, socket.error): + self.response = None + self.iterator() + else: + self.lose(e) + + def socket_cb(self, af, from_host, from_port, wire): + """ + Received a packet that might be a DNS message. If it doesn't look + like it came from one of our nameservers, just drop it and leave + the timer running. Otherwise, try parsing it: if it's an answer, + we're done, otherwise handle error appropriately and move on to + next nameserver. + """ + + sender = (af, dns.inet.inet_pton(af, from_host)) + if from_port != resolver.port or sender not in self.nameservers: + return + self.timer.cancel() + try: + self.response = dns.message.from_wire(wire, keyring = self.request.keyring, request_mac = self.request.mac, one_rr_per_rrset = False) + except dns.exception.FormError: + self.nameservers.remove(sender) + else: + rcode = self.response.rcode() + if rcode in (dns.rcode.NOERROR, dns.rcode.NXDOMAIN): + self.done1() + return + if rcode != dns.rcode.SERVFAIL: + self.nameservers.remove(sender) + self.response = None + self.iterator() + + def done2(self): + """ + Done with inner loop. If we still haven't got an answer and + haven't (yet?) eliminated all of our nameservers, wait a little + while before starting the cycle again, unless we've hit the + timeout threshold for the whole query. + """ + + if self.response is None and self.nameservers: + try: + delay = rpki.sundial.timedelta(seconds = min(resolver._compute_timeout(self.start), self.backoff)) + self.backoff *= 2 + self.timer.set_handler(self.loop1) + self.timer.set_errback(self.lose) + self.timer.set(delay) + except dns.resolver.Timeout, e: + self.lose(e) + else: + self.loop1() + + def cleanup(self): + """ + Shut down our timer and sockets. + """ + + self.timer.cancel() + for s in self.sockets.itervalues(): + s.close() - self.timer.cancel() - for s in self.sockets.itervalues(): - s.close() + def lose(self, e): + """ + Something bad happened. Clean up, then pass error back to caller. + """ + + self.cleanup() + self.eb(self, e) + + def done1(self): + """ + Done with outer loop. If we got a useful answer, cache it, then + pass it back to caller; if we got an error, pass the appropriate + exception back to caller. + """ + + self.cleanup() + try: + if not self.nameservers: + raise dns.resolver.NoNameservers + if self.response.rcode() == dns.rcode.NXDOMAIN: + raise dns.resolver.NXDOMAIN + answer = dns.resolver.Answer(self.qname, self.qtype, self.qclass, self.response) + if resolver.cache: + resolver.cache.put((self.qname, self.qtype, self.qclass), answer) + self.cb(self, answer) + except (rpki.async.ExitNow, SystemExit): + raise + except Exception, e: + self.lose(e) - def lose(self, e): - """ - Something bad happened. Clean up, then pass error back to caller. - """ +class getaddrinfo(object): - self.cleanup() - self.eb(self, e) + typemap = { dns.rdatatype.A : socket.AF_INET, + dns.rdatatype.AAAA : socket.AF_INET6 } + + def __init__(self, cb, eb, host, address_families = typemap.values()): + self.cb = cb + self.eb = eb + self.host = host + self.result = [] + self.queries = [query(self.done, self.lose, host, qtype) + for qtype in self.typemap + if self.typemap[qtype] in address_families] + + def done(self, q, answer): + if answer is not None: + for a in answer: + self.result.append((self.typemap[a.rdtype], a.address)) + self.queries.remove(q) + if not self.queries: + self.cb(self.result) - def done1(self): - """ - Done with outer loop. If we got a useful answer, cache it, then - pass it back to caller; if we got an error, pass the appropriate - exception back to caller. - """ + def lose(self, q, e): + if isinstance(e, dns.resolver.NoAnswer): + self.done(q, None) + else: + for q in self.queries: + q.cleanup() + self.eb(e) - self.cleanup() - try: - if not self.nameservers: - raise dns.resolver.NoNameservers - if self.response.rcode() == dns.rcode.NXDOMAIN: - raise dns.resolver.NXDOMAIN - answer = dns.resolver.Answer(self.qname, self.qtype, self.qclass, self.response) - if resolver.cache: - resolver.cache.put((self.qname, self.qtype, self.qclass), answer) - self.cb(self, answer) - except (rpki.async.ExitNow, SystemExit): - raise - except Exception, e: - self.lose(e) +if __name__ == "__main__": -class getaddrinfo(object): + rpki.log.init("test-adns") + print "Some adns tests may take a minute or two, please be patient" - typemap = { dns.rdatatype.A : socket.AF_INET, - dns.rdatatype.AAAA : socket.AF_INET6 } - - def __init__(self, cb, eb, host, address_families = typemap.values()): - self.cb = cb - self.eb = eb - self.host = host - self.result = [] - self.queries = [query(self.done, self.lose, host, qtype) - for qtype in self.typemap - if self.typemap[qtype] in address_families] - - def done(self, q, answer): - if answer is not None: - for a in answer: - self.result.append((self.typemap[a.rdtype], a.address)) - self.queries.remove(q) - if not self.queries: - self.cb(self.result) - - def lose(self, q, e): - if isinstance(e, dns.resolver.NoAnswer): - self.done(q, None) - else: - for q in self.queries: - q.cleanup() - self.eb(e) + class test_getaddrinfo(object): -if __name__ == "__main__": + def __init__(self, qname): + self.qname = qname + getaddrinfo(self.done, self.lose, qname) - rpki.log.init("test-adns") - print "Some adns tests may take a minute or two, please be patient" + def done(self, result): + print "getaddrinfo(%s) returned: %s" % ( + self.qname, + ", ".join(str(r) for r in result)) - class test_getaddrinfo(object): + def lose(self, e): + print "getaddrinfo(%s) failed: %r" % (self.qname, e) - def __init__(self, qname): - self.qname = qname - getaddrinfo(self.done, self.lose, qname) + class test_query(object): - def done(self, result): - print "getaddrinfo(%s) returned: %s" % ( - self.qname, - ", ".join(str(r) for r in result)) + def __init__(self, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN): + self.qname = qname + self.qtype = qtype + self.qclass = qclass + query(self.done, self.lose, qname, qtype = qtype, qclass = qclass) - def lose(self, e): - print "getaddrinfo(%s) failed: %r" % (self.qname, e) + def done(self, q, result): + print "query(%s, %s, %s) returned: %s" % ( + self.qname, + dns.rdatatype.to_text(self.qtype), + dns.rdataclass.to_text(self.qclass), + ", ".join(str(r) for r in result)) - class test_query(object): + def lose(self, q, e): + print "getaddrinfo(%s, %s, %s) failed: %r" % ( + self.qname, + dns.rdatatype.to_text(self.qtype), + dns.rdataclass.to_text(self.qclass), + e) - def __init__(self, qname, qtype = dns.rdatatype.A, qclass = dns.rdataclass.IN): - self.qname = qname - self.qtype = qtype - self.qclass = qclass - query(self.done, self.lose, qname, qtype = qtype, qclass = qclass) + if True: + for t in (dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.HINFO): + test_query("subvert-rpki.hactrn.net", t) + test_query("nonexistant.rpki.net") + test_query("subvert-rpki.hactrn.net", qclass = dns.rdataclass.CH) - def done(self, q, result): - print "query(%s, %s, %s) returned: %s" % ( - self.qname, - dns.rdatatype.to_text(self.qtype), - dns.rdataclass.to_text(self.qclass), - ", ".join(str(r) for r in result)) + for h in ("subvert-rpki.hactrn.net", "nonexistant.rpki.net"): + test_getaddrinfo(h) - def lose(self, q, e): - print "getaddrinfo(%s, %s, %s) failed: %r" % ( - self.qname, - dns.rdatatype.to_text(self.qtype), - dns.rdataclass.to_text(self.qclass), - e) - - if True: - for t in (dns.rdatatype.A, dns.rdatatype.AAAA, dns.rdatatype.HINFO): - test_query("subvert-rpki.hactrn.net", t) - test_query("nonexistant.rpki.net") - test_query("subvert-rpki.hactrn.net", qclass = dns.rdataclass.CH) - - for h in ("subvert-rpki.hactrn.net", "nonexistant.rpki.net"): - test_getaddrinfo(h) - - rpki.async.event_loop() + rpki.async.event_loop() diff --git a/rpki/cli.py b/rpki/cli.py index 35999cb0..51ac0367 100644 --- a/rpki/cli.py +++ b/rpki/cli.py @@ -28,244 +28,244 @@ import argparse import traceback try: - import readline - have_readline = True + import readline + have_readline = True except ImportError: - have_readline = False + have_readline = False class BadCommandSyntax(Exception): - "Bad command line syntax." + "Bad command line syntax." class ExitArgparse(Exception): - "Exit method from ArgumentParser." + "Exit method from ArgumentParser." - def __init__(self, message = None, status = 0): - super(ExitArgparse, self).__init__() - self.message = message - self.status = status + def __init__(self, message = None, status = 0): + super(ExitArgparse, self).__init__() + self.message = message + self.status = status class Cmd(cmd.Cmd): - """ - Customized subclass of Python cmd module. - """ + """ + Customized subclass of Python cmd module. + """ - emptyline_repeats_last_command = False + emptyline_repeats_last_command = False - EOF_exits_command_loop = True + EOF_exits_command_loop = True - identchars = cmd.IDENTCHARS + "/-." + identchars = cmd.IDENTCHARS + "/-." - histfile = None + histfile = None - last_command_failed = False + last_command_failed = False - def onecmd(self, line): - """ - Wrap error handling around cmd.Cmd.onecmd(). Might want to do - something kinder than showing a traceback, eventually. - """ + def onecmd(self, line): + """ + Wrap error handling around cmd.Cmd.onecmd(). Might want to do + something kinder than showing a traceback, eventually. + """ - self.last_command_failed = False - try: - return cmd.Cmd.onecmd(self, line) - except SystemExit: - raise - except ExitArgparse, e: - if e.message is not None: - print e.message - self.last_command_failed = e.status != 0 - return False - except BadCommandSyntax, e: - print e - except: - traceback.print_exc() - self.last_command_failed = True - return False - - def do_EOF(self, arg): - if self.EOF_exits_command_loop and self.prompt: - print - return self.EOF_exits_command_loop - - def do_exit(self, arg): - """ - Exit program. - """ + self.last_command_failed = False + try: + return cmd.Cmd.onecmd(self, line) + except SystemExit: + raise + except ExitArgparse, e: + if e.message is not None: + print e.message + self.last_command_failed = e.status != 0 + return False + except BadCommandSyntax, e: + print e + except: + traceback.print_exc() + self.last_command_failed = True + return False + + def do_EOF(self, arg): + if self.EOF_exits_command_loop and self.prompt: + print + return self.EOF_exits_command_loop + + def do_exit(self, arg): + """ + Exit program. + """ + + return True + + do_quit = do_exit + + def emptyline(self): + """ + Handle an empty line. cmd module default is to repeat the last + command, which I find to be violation of the principal of least + astonishment, so my preference is that an empty line does nothing. + """ + + if self.emptyline_repeats_last_command: + cmd.Cmd.emptyline(self) + + def filename_complete(self, text, line, begidx, endidx): + """ + Filename completion handler, with hack to restore what I consider + the normal (bash-like) behavior when one hits the completion key + and there's only one match. + """ + + result = glob.glob(text + "*") + if len(result) == 1: + path = result.pop() + if os.path.isdir(path) or (os.path.islink(path) and os.path.isdir(os.path.join(path, "."))): + result.append(path + os.path.sep) + else: + result.append(path + " ") + return result + + def completenames(self, text, *ignored): + """ + Command name completion handler, with hack to restore what I + consider the normal (bash-like) behavior when one hits the + completion key and there's only one match. + """ + + result = cmd.Cmd.completenames(self, text, *ignored) + if len(result) == 1: + result[0] += " " + return result + + def help_help(self): + """ + Type "help [topic]" for help on a command, + or just "help" for a list of commands. + """ + + self.stdout.write(self.help_help.__doc__ + "\n") + + def complete_help(self, *args): + """ + Better completion function for help command arguments. + """ + + text = args[0] + names = self.get_names() + result = [] + for prefix in ("do_", "help_"): + result.extend(s[len(prefix):] for s in names if s.startswith(prefix + text) and s != "do_EOF") + return result + + if have_readline: + + def cmdloop_with_history(self): + """ + Better command loop, with history file and tweaked readline + completion delimiters. + """ + + old_completer_delims = readline.get_completer_delims() + if self.histfile is not None: + try: + readline.read_history_file(self.histfile) + except IOError: + pass + try: + readline.set_completer_delims("".join(set(old_completer_delims) - set(self.identchars))) + self.cmdloop() + finally: + if self.histfile is not None and readline.get_current_history_length(): + readline.write_history_file(self.histfile) + readline.set_completer_delims(old_completer_delims) + + else: + + cmdloop_with_history = cmd.Cmd.cmdloop - return True - do_quit = do_exit - def emptyline(self): +def yes_or_no(prompt, default = None, require_full_word = False): """ - Handle an empty line. cmd module default is to repeat the last - command, which I find to be violation of the principal of least - astonishment, so my preference is that an empty line does nothing. + Ask a yes-or-no question. """ - if self.emptyline_repeats_last_command: - cmd.Cmd.emptyline(self) + prompt = prompt.rstrip() + _yes_or_no_prompts[default] + while True: + answer = raw_input(prompt).strip().lower() + if not answer and default is not None: + return default + if answer == "yes" or (not require_full_word and answer.startswith("y")): + return True + if answer == "no" or (not require_full_word and answer.startswith("n")): + return False + print 'Please answer "yes" or "no"' - def filename_complete(self, text, line, begidx, endidx): - """ - Filename completion handler, with hack to restore what I consider - the normal (bash-like) behavior when one hits the completion key - and there's only one match. - """ +_yes_or_no_prompts = { + True : ' ("yes" or "no" ["yes"]) ', + False : ' ("yes" or "no" ["no"]) ', + None : ' ("yes" or "no") ' } - result = glob.glob(text + "*") - if len(result) == 1: - path = result.pop() - if os.path.isdir(path) or (os.path.islink(path) and os.path.isdir(os.path.join(path, "."))): - result.append(path + os.path.sep) - else: - result.append(path + " ") - return result - def completenames(self, text, *ignored): +class NonExitingArgumentParser(argparse.ArgumentParser): """ - Command name completion handler, with hack to restore what I - consider the normal (bash-like) behavior when one hits the - completion key and there's only one match. + ArgumentParser tweaked to throw ExitArgparse exception + rather than using sys.exit(), for use with command loop. """ - result = cmd.Cmd.completenames(self, text, *ignored) - if len(result) == 1: - result[0] += " " - return result + def exit(self, status = 0, message = None): + raise ExitArgparse(status = status, message = message) - def help_help(self): - """ - Type "help [topic]" for help on a command, - or just "help" for a list of commands. - """ - - self.stdout.write(self.help_help.__doc__ + "\n") - def complete_help(self, *args): - """ - Better completion function for help command arguments. +def parsecmd(subparsers, *arg_clauses): """ + Decorator to combine the argparse and cmd modules. - text = args[0] - names = self.get_names() - result = [] - for prefix in ("do_", "help_"): - result.extend(s[len(prefix):] for s in names if s.startswith(prefix + text) and s != "do_EOF") - return result + subparsers is an instance of argparse.ArgumentParser (or subclass) which was + returned by calling the .add_subparsers() method on an ArgumentParser instance + intended to handle parsing for the entire program on the command line. - if have_readline: - - def cmdloop_with_history(self): - """ - Better command loop, with history file and tweaked readline - completion delimiters. - """ - - old_completer_delims = readline.get_completer_delims() - if self.histfile is not None: - try: - readline.read_history_file(self.histfile) - except IOError: - pass - try: - readline.set_completer_delims("".join(set(old_completer_delims) - set(self.identchars))) - self.cmdloop() - finally: - if self.histfile is not None and readline.get_current_history_length(): - readline.write_history_file(self.histfile) - readline.set_completer_delims(old_completer_delims) - - else: - - cmdloop_with_history = cmd.Cmd.cmdloop + arg_clauses is a series of defarg() invocations defining arguments to be parsed + by the argparse code. + The decorator will use arg_clauses to construct two separate argparse parser + instances: one will be attached to the global parser as a subparser, the + other will be used to parse arguments for this command when invoked by cmd. + The decorator will replace the original do_whatever method with a wrapped version + which uses the local argparse instance to parse the single string supplied by + the cmd module. -def yes_or_no(prompt, default = None, require_full_word = False): - """ - Ask a yes-or-no question. - """ - - prompt = prompt.rstrip() + _yes_or_no_prompts[default] - while True: - answer = raw_input(prompt).strip().lower() - if not answer and default is not None: - return default - if answer == "yes" or (not require_full_word and answer.startswith("y")): - return True - if answer == "no" or (not require_full_word and answer.startswith("n")): - return False - print 'Please answer "yes" or "no"' - -_yes_or_no_prompts = { - True : ' ("yes" or "no" ["yes"]) ', - False : ' ("yes" or "no" ["no"]) ', - None : ' ("yes" or "no") ' } - - -class NonExitingArgumentParser(argparse.ArgumentParser): - """ - ArgumentParser tweaked to throw ExitArgparse exception - rather than using sys.exit(), for use with command loop. - """ - - def exit(self, status = 0, message = None): - raise ExitArgparse(status = status, message = message) + The intent is that, from the command's point of view, all of this should work + pretty much the same way regardless of whether the command was invoked from + the global command line or from within the cmd command loop. Either way, + the command method should get an argparse.Namespace object. + In theory, we could generate a completion handler from the argparse definitions, + much as the separate argcomplete package does. In practice this is a lot of + work and I'm not ready to get into that just yet. + """ -def parsecmd(subparsers, *arg_clauses): - """ - Decorator to combine the argparse and cmd modules. - - subparsers is an instance of argparse.ArgumentParser (or subclass) which was - returned by calling the .add_subparsers() method on an ArgumentParser instance - intended to handle parsing for the entire program on the command line. - - arg_clauses is a series of defarg() invocations defining arguments to be parsed - by the argparse code. - - The decorator will use arg_clauses to construct two separate argparse parser - instances: one will be attached to the global parser as a subparser, the - other will be used to parse arguments for this command when invoked by cmd. - - The decorator will replace the original do_whatever method with a wrapped version - which uses the local argparse instance to parse the single string supplied by - the cmd module. - - The intent is that, from the command's point of view, all of this should work - pretty much the same way regardless of whether the command was invoked from - the global command line or from within the cmd command loop. Either way, - the command method should get an argparse.Namespace object. - - In theory, we could generate a completion handler from the argparse definitions, - much as the separate argcomplete package does. In practice this is a lot of - work and I'm not ready to get into that just yet. - """ - - def decorate(func): - assert func.__name__.startswith("do_") - parser = NonExitingArgumentParser(description = func.__doc__, - prog = func.__name__[3:], - add_help = False) - subparser = subparsers.add_parser(func.__name__[3:], - description = func.__doc__, - help = func.__doc__.lstrip().partition("\n")[0]) - for positional, keywords in arg_clauses: - parser.add_argument(*positional, **keywords) - subparser.add_argument(*positional, **keywords) - subparser.set_defaults(func = func) - def wrapped(self, arg): - return func(self, parser.parse_args(shlex.split(arg))) - wrapped.argparser = parser - wrapped.__doc__ = func.__doc__ - return wrapped - return decorate + def decorate(func): + assert func.__name__.startswith("do_") + parser = NonExitingArgumentParser(description = func.__doc__, + prog = func.__name__[3:], + add_help = False) + subparser = subparsers.add_parser(func.__name__[3:], + description = func.__doc__, + help = func.__doc__.lstrip().partition("\n")[0]) + for positional, keywords in arg_clauses: + parser.add_argument(*positional, **keywords) + subparser.add_argument(*positional, **keywords) + subparser.set_defaults(func = func) + def wrapped(self, arg): + return func(self, parser.parse_args(shlex.split(arg))) + wrapped.argparser = parser + wrapped.__doc__ = func.__doc__ + return wrapped + return decorate def cmdarg(*positional, **keywords): - """ - Syntactic sugar to let us use keyword arguments normally when constructing - arguments for deferred calls to argparse.ArgumentParser.add_argument(). - """ + """ + Syntactic sugar to let us use keyword arguments normally when constructing + arguments for deferred calls to argparse.ArgumentParser.add_argument(). + """ - return positional, keywords + return positional, keywords diff --git a/rpki/config.py b/rpki/config.py index 99041259..5dd03a6d 100644 --- a/rpki/config.py +++ b/rpki/config.py @@ -33,10 +33,10 @@ logger = logging.getLogger(__name__) # Default name of config file if caller doesn't specify one explictly. try: - import rpki.autoconf - default_filename = os.path.join(rpki.autoconf.sysconfdir, "rpki.conf") + import rpki.autoconf + default_filename = os.path.join(rpki.autoconf.sysconfdir, "rpki.conf") except ImportError: - default_filename = None + default_filename = None ## @var rpki_conf_envname # Name of environment variable containing config file name. @@ -44,230 +44,230 @@ except ImportError: rpki_conf_envname = "RPKI_CONF" class parser(object): - """ - Extensions to stock Python ConfigParser: - - Read config file and set default section while initializing parser object. - - Support for OpenSSL-style subscripted options and a limited form of - OpenSSL-style indirect variable references (${section::option}). - - get-methods with default values and default section name. - - If no filename is given to the constructor (filename and - set_filename both None), we check for an environment variable naming - the config file, then finally we check for a global config file if - autoconf provided a directory name to check. - - NB: Programs which accept a configuration filename on the command - lines should pass that filename using set_filename so that we can - set the magic environment variable. Constraints from some external - libraries (principally Django) sometimes require library code to - look things up in the configuration file without the knowledge of - the controlling program, but setting the environment variable - insures that everybody's reading from the same script, as it were. - """ - - # Odd keyword-only calling sequence is a defense against old code - # that thinks it knows how __init__() handles positional arguments. - - def __init__(self, **kwargs): - section = kwargs.pop("section", None) - allow_missing = kwargs.pop("allow_missing", False) - set_filename = kwargs.pop("set_filename", None) - filename = kwargs.pop("filename", set_filename) - - assert not kwargs, "Unexpected keyword arguments: " + ", ".join("%s = %r" % kv for kv in kwargs.iteritems()) - - if set_filename is not None: - os.environ[rpki_conf_envname] = set_filename - - self.cfg = ConfigParser.RawConfigParser() - self.default_section = section - - self.filename = filename or os.getenv(rpki_conf_envname) or default_filename - - try: - with open(self.filename, "r") as f: - self.cfg.readfp(f) - except IOError: - if allow_missing: - self.filename = None - else: - raise - - - def has_section(self, section): """ - Test whether a section exists. - """ - - return self.cfg.has_section(section) + Extensions to stock Python ConfigParser: + Read config file and set default section while initializing parser object. - def has_option(self, option, section = None): - """ - Test whether an option exists. - """ + Support for OpenSSL-style subscripted options and a limited form of + OpenSSL-style indirect variable references (${section::option}). - if section is None: - section = self.default_section - return self.cfg.has_option(section, option) + get-methods with default values and default section name. + If no filename is given to the constructor (filename and + set_filename both None), we check for an environment variable naming + the config file, then finally we check for a global config file if + autoconf provided a directory name to check. - def multiget(self, option, section = None): + NB: Programs which accept a configuration filename on the command + lines should pass that filename using set_filename so that we can + set the magic environment variable. Constraints from some external + libraries (principally Django) sometimes require library code to + look things up in the configuration file without the knowledge of + the controlling program, but setting the environment variable + insures that everybody's reading from the same script, as it were. """ - Parse OpenSSL-style foo.0, foo.1, ... subscripted options. - Returns iteration of values matching the specified option name. - """ + # Odd keyword-only calling sequence is a defense against old code + # that thinks it knows how __init__() handles positional arguments. - matches = [] - if section is None: - section = self.default_section - if self.cfg.has_option(section, option): - yield self.cfg.get(section, option) - option += "." - matches = [o for o in self.cfg.options(section) if o.startswith(option) and o[len(option):].isdigit()] - matches.sort() - for option in matches: - yield self.cfg.get(section, option) + def __init__(self, **kwargs): + section = kwargs.pop("section", None) + allow_missing = kwargs.pop("allow_missing", False) + set_filename = kwargs.pop("set_filename", None) + filename = kwargs.pop("filename", set_filename) + assert not kwargs, "Unexpected keyword arguments: " + ", ".join("%s = %r" % kv for kv in kwargs.iteritems()) - _regexp = re.compile("\\${(.*?)::(.*?)}") + if set_filename is not None: + os.environ[rpki_conf_envname] = set_filename - def _repl(self, m): - """ - Replacement function for indirect variable substitution. - This is intended for use with re.subn(). - """ + self.cfg = ConfigParser.RawConfigParser() + self.default_section = section - section, option = m.group(1, 2) - if section == "ENV": - return os.getenv(option, "") - else: - return self.cfg.get(section, option) + self.filename = filename or os.getenv(rpki_conf_envname) or default_filename + try: + with open(self.filename, "r") as f: + self.cfg.readfp(f) + except IOError: + if allow_missing: + self.filename = None + else: + raise - def get(self, option, default = None, section = None): - """ - Get an option, perhaps with a default value. - """ - if section is None: - section = self.default_section - if default is not None and not self.cfg.has_option(section, option): - return default - val = self.cfg.get(section, option) - while True: - val, modified = self._regexp.subn(self._repl, val, 1) - if not modified: - return val + def has_section(self, section): + """ + Test whether a section exists. + """ + return self.cfg.has_section(section) - def getboolean(self, option, default = None, section = None): - """ - Get a boolean option, perhaps with a default value. - """ - v = self.get(option, default, section) - if isinstance(v, str): - v = v.lower() - if v not in self.cfg._boolean_states: - raise ValueError("Not a boolean: %s" % v) - v = self.cfg._boolean_states[v] - return v + def has_option(self, option, section = None): + """ + Test whether an option exists. + """ + if section is None: + section = self.default_section + return self.cfg.has_option(section, option) - def getint(self, option, default = None, section = None): - """ - Get an integer option, perhaps with a default value. - """ - return int(self.get(option, default, section)) + def multiget(self, option, section = None): + """ + Parse OpenSSL-style foo.0, foo.1, ... subscripted options. + Returns iteration of values matching the specified option name. + """ - def getlong(self, option, default = None, section = None): - """ - Get a long integer option, perhaps with a default value. - """ - - return long(self.get(option, default, section)) - - - def set_global_flags(self): - """ - Consolidated control for all the little global control flags - scattered through the libraries. This isn't a particularly good - place for this function to live, but it has to live somewhere and - making it a method of the config parser from which it gets all of - its data is less silly than the available alternatives. - """ - - # pylint: disable=W0621 - import rpki.x509 - import rpki.log - import rpki.daemonize - - for line in self.multiget("configure_logger"): - try: - name, level = line.split() - logging.getLogger(name).setLevel(getattr(logging, level.upper())) - except Exception, e: - logger.warning("Could not process configure_logger line %r: %s", line, e) - - try: - rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs") - except ConfigParser.NoOptionError: - pass - - try: - rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms")) - except OSError, e: - logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_outbound_cms"), e) - except ConfigParser.NoOptionError: - pass - - try: - rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms")) - except OSError, e: - logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_inbound_cms"), e) - except ConfigParser.NoOptionError: - pass - - try: - rpki.x509.XML_CMS_object.check_inbound_schema = self.getboolean("check_inbound_schema") - except ConfigParser.NoOptionError: - pass - - try: - rpki.x509.XML_CMS_object.check_outbound_schema = self.getboolean("check_outbound_schema") - except ConfigParser.NoOptionError: - pass - - try: - rpki.log.enable_tracebacks = self.getboolean("enable_tracebacks") - except ConfigParser.NoOptionError: - pass - - try: - rpki.daemonize.default_pid_directory = self.get("pid_directory") - except ConfigParser.NoOptionError: - pass - - try: - rpki.daemonize.pid_filename = self.get("pid_filename") - except ConfigParser.NoOptionError: - pass - - try: - rpki.x509.generate_insecure_debug_only_rsa_key = rpki.x509.insecure_debug_only_rsa_key_generator(*self.get("insecure-debug-only-rsa-key-db").split()) - except ConfigParser.NoOptionError: - pass - except: # pylint: disable=W0702 - logger.warning("insecure-debug-only-rsa-key-db configured but initialization failed, check for corrupted database file") - - try: - rpki.up_down.content_type = self.get("up_down_content_type") - except ConfigParser.NoOptionError: - pass + matches = [] + if section is None: + section = self.default_section + if self.cfg.has_option(section, option): + yield self.cfg.get(section, option) + option += "." + matches = [o for o in self.cfg.options(section) if o.startswith(option) and o[len(option):].isdigit()] + matches.sort() + for option in matches: + yield self.cfg.get(section, option) + + + _regexp = re.compile("\\${(.*?)::(.*?)}") + + def _repl(self, m): + """ + Replacement function for indirect variable substitution. + This is intended for use with re.subn(). + """ + + section, option = m.group(1, 2) + if section == "ENV": + return os.getenv(option, "") + else: + return self.cfg.get(section, option) + + + def get(self, option, default = None, section = None): + """ + Get an option, perhaps with a default value. + """ + + if section is None: + section = self.default_section + if default is not None and not self.cfg.has_option(section, option): + return default + val = self.cfg.get(section, option) + while True: + val, modified = self._regexp.subn(self._repl, val, 1) + if not modified: + return val + + + def getboolean(self, option, default = None, section = None): + """ + Get a boolean option, perhaps with a default value. + """ + + v = self.get(option, default, section) + if isinstance(v, str): + v = v.lower() + if v not in self.cfg._boolean_states: + raise ValueError("Not a boolean: %s" % v) + v = self.cfg._boolean_states[v] + return v + + + def getint(self, option, default = None, section = None): + """ + Get an integer option, perhaps with a default value. + """ + + return int(self.get(option, default, section)) + + + def getlong(self, option, default = None, section = None): + """ + Get a long integer option, perhaps with a default value. + """ + + return long(self.get(option, default, section)) + + + def set_global_flags(self): + """ + Consolidated control for all the little global control flags + scattered through the libraries. This isn't a particularly good + place for this function to live, but it has to live somewhere and + making it a method of the config parser from which it gets all of + its data is less silly than the available alternatives. + """ + + # pylint: disable=W0621 + import rpki.x509 + import rpki.log + import rpki.daemonize + + for line in self.multiget("configure_logger"): + try: + name, level = line.split() + logging.getLogger(name).setLevel(getattr(logging, level.upper())) + except Exception, e: + logger.warning("Could not process configure_logger line %r: %s", line, e) + + try: + rpki.x509.CMS_object.debug_cms_certs = self.getboolean("debug_cms_certs") + except ConfigParser.NoOptionError: + pass + + try: + rpki.x509.XML_CMS_object.dump_outbound_cms = rpki.x509.DeadDrop(self.get("dump_outbound_cms")) + except OSError, e: + logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_outbound_cms"), e) + except ConfigParser.NoOptionError: + pass + + try: + rpki.x509.XML_CMS_object.dump_inbound_cms = rpki.x509.DeadDrop(self.get("dump_inbound_cms")) + except OSError, e: + logger.warning("Couldn't initialize mailbox %s: %s", self.get("dump_inbound_cms"), e) + except ConfigParser.NoOptionError: + pass + + try: + rpki.x509.XML_CMS_object.check_inbound_schema = self.getboolean("check_inbound_schema") + except ConfigParser.NoOptionError: + pass + + try: + rpki.x509.XML_CMS_object.check_outbound_schema = self.getboolean("check_outbound_schema") + except ConfigParser.NoOptionError: + pass + + try: + rpki.log.enable_tracebacks = self.getboolean("enable_tracebacks") + except ConfigParser.NoOptionError: + pass + + try: + rpki.daemonize.default_pid_directory = self.get("pid_directory") + except ConfigParser.NoOptionError: + pass + + try: + rpki.daemonize.pid_filename = self.get("pid_filename") + except ConfigParser.NoOptionError: + pass + + try: + rpki.x509.generate_insecure_debug_only_rsa_key = rpki.x509.insecure_debug_only_rsa_key_generator(*self.get("insecure-debug-only-rsa-key-db").split()) + except ConfigParser.NoOptionError: + pass + except: # pylint: disable=W0702 + logger.warning("insecure-debug-only-rsa-key-db configured but initialization failed, check for corrupted database file") + + try: + rpki.up_down.content_type = self.get("up_down_content_type") + except ConfigParser.NoOptionError: + pass diff --git a/rpki/csv_utils.py b/rpki/csv_utils.py index 9034e96b..2864693c 100644 --- a/rpki/csv_utils.py +++ b/rpki/csv_utils.py @@ -22,93 +22,93 @@ import csv import os class BadCSVSyntax(Exception): - """ - Bad CSV syntax. - """ + """ + Bad CSV syntax. + """ class csv_reader(object): - """ - Reader for tab-delimited text that's (slightly) friendlier than the - stock Python csv module (which isn't intended for direct use by - humans anyway, and neither was this package originally, but that - seems to be the way that it has evolved...). - - Columns parameter specifies how many columns users of the reader - expect to see; lines with fewer columns will be padded with None - values. - - Original API design for this class courtesy of Warren Kumari, but - don't blame him if you don't like what I did with his ideas. - """ - - def __init__(self, filename, columns = None, min_columns = None, comment_characters = "#;"): - assert columns is None or isinstance(columns, int) - assert min_columns is None or isinstance(min_columns, int) - if columns is not None and min_columns is None: - min_columns = columns - self.filename = filename - self.columns = columns - self.min_columns = min_columns - self.comment_characters = comment_characters - self.file = open(filename, "r") - - def __iter__(self): - line_number = 0 - for line in self.file: - line_number += 1 - line = line.strip() - if not line or line[0] in self.comment_characters: - continue - fields = line.split() - if self.min_columns is not None and len(fields) < self.min_columns: - raise BadCSVSyntax("%s:%d: Not enough columns in line %r" % (self.filename, line_number, line)) - if self.columns is not None and len(fields) > self.columns: - raise BadCSVSyntax("%s:%d: Too many columns in line %r" % (self.filename, line_number, line)) - if self.columns is not None and len(fields) < self.columns: - fields += tuple(None for i in xrange(self.columns - len(fields))) - yield fields - - def __enter__(self): - return self - - def __exit__(self, _type, value, traceback): - self.file.close() + """ + Reader for tab-delimited text that's (slightly) friendlier than the + stock Python csv module (which isn't intended for direct use by + humans anyway, and neither was this package originally, but that + seems to be the way that it has evolved...). + + Columns parameter specifies how many columns users of the reader + expect to see; lines with fewer columns will be padded with None + values. + + Original API design for this class courtesy of Warren Kumari, but + don't blame him if you don't like what I did with his ideas. + """ + + def __init__(self, filename, columns = None, min_columns = None, comment_characters = "#;"): + assert columns is None or isinstance(columns, int) + assert min_columns is None or isinstance(min_columns, int) + if columns is not None and min_columns is None: + min_columns = columns + self.filename = filename + self.columns = columns + self.min_columns = min_columns + self.comment_characters = comment_characters + self.file = open(filename, "r") + + def __iter__(self): + line_number = 0 + for line in self.file: + line_number += 1 + line = line.strip() + if not line or line[0] in self.comment_characters: + continue + fields = line.split() + if self.min_columns is not None and len(fields) < self.min_columns: + raise BadCSVSyntax("%s:%d: Not enough columns in line %r" % (self.filename, line_number, line)) + if self.columns is not None and len(fields) > self.columns: + raise BadCSVSyntax("%s:%d: Too many columns in line %r" % (self.filename, line_number, line)) + if self.columns is not None and len(fields) < self.columns: + fields += tuple(None for i in xrange(self.columns - len(fields))) + yield fields + + def __enter__(self): + return self + + def __exit__(self, _type, value, traceback): + self.file.close() class csv_writer(object): - """ - Writer object for tab delimited text. We just use the stock CSV - module in excel-tab mode for this. + """ + Writer object for tab delimited text. We just use the stock CSV + module in excel-tab mode for this. - If "renmwo" is set (default), the file will be written to - a temporary name and renamed to the real filename after closing. - """ + If "renmwo" is set (default), the file will be written to + a temporary name and renamed to the real filename after closing. + """ - def __init__(self, filename, renmwo = True): - self.filename = filename - self.renmwo = "%s.~renmwo%d~" % (filename, os.getpid()) if renmwo else filename - self.file = open(self.renmwo, "w") - self.writer = csv.writer(self.file, dialect = csv.get_dialect("excel-tab")) + def __init__(self, filename, renmwo = True): + self.filename = filename + self.renmwo = "%s.~renmwo%d~" % (filename, os.getpid()) if renmwo else filename + self.file = open(self.renmwo, "w") + self.writer = csv.writer(self.file, dialect = csv.get_dialect("excel-tab")) - def __enter__(self): - return self + def __enter__(self): + return self - def __exit__(self, _type, value, traceback): - self.close() + def __exit__(self, _type, value, traceback): + self.close() - def close(self): - """ - Close this writer. - """ + def close(self): + """ + Close this writer. + """ - if self.file is not None: - self.file.close() - self.file = None - if self.filename != self.renmwo: - os.rename(self.renmwo, self.filename) + if self.file is not None: + self.file.close() + self.file = None + if self.filename != self.renmwo: + os.rename(self.renmwo, self.filename) - def __getattr__(self, attr): - """ - Fake inheritance from whatever object csv.writer deigns to give us. - """ + def __getattr__(self, attr): + """ + Fake inheritance from whatever object csv.writer deigns to give us. + """ - return getattr(self.writer, attr) + return getattr(self.writer, attr) diff --git a/rpki/daemonize.py b/rpki/daemonize.py index 6a825566..bd59fca0 100644 --- a/rpki/daemonize.py +++ b/rpki/daemonize.py @@ -80,56 +80,56 @@ default_pid_directory = "/var/run/rpki" pid_filename = None def daemon(nochdir = False, noclose = False, pidfile = None): - """ - Make this program become a daemon, like 4.4BSD daemon(3), and - write its pid out to a file with cleanup on exit. - """ - - if pidfile is None: - if pid_filename is None: - prog = os.path.splitext(os.path.basename(sys.argv[0]))[0] - pidfile = os.path.join(default_pid_directory, "%s.pid" % prog) + """ + Make this program become a daemon, like 4.4BSD daemon(3), and + write its pid out to a file with cleanup on exit. + """ + + if pidfile is None: + if pid_filename is None: + prog = os.path.splitext(os.path.basename(sys.argv[0]))[0] + pidfile = os.path.join(default_pid_directory, "%s.pid" % prog) + else: + pidfile = pid_filename + + old_sighup_action = signal.signal(signal.SIGHUP, signal.SIG_IGN) + + try: + pid = os.fork() + except OSError, e: + sys.exit("fork() failed: %d (%s)" % (e.errno, e.strerror)) else: - pidfile = pid_filename + if pid > 0: + os._exit(0) - old_sighup_action = signal.signal(signal.SIGHUP, signal.SIG_IGN) + if not nochdir: + os.chdir("/") - try: - pid = os.fork() - except OSError, e: - sys.exit("fork() failed: %d (%s)" % (e.errno, e.strerror)) - else: - if pid > 0: - os._exit(0) + os.setsid() - if not nochdir: - os.chdir("/") + if not noclose: + sys.stdout.flush() + sys.stderr.flush() + fd = os.open(os.devnull, os.O_RDWR) + os.dup2(fd, 0) + os.dup2(fd, 1) + os.dup2(fd, 2) + if fd > 2: + os.close(fd) - os.setsid() + signal.signal(signal.SIGHUP, old_sighup_action) - if not noclose: - sys.stdout.flush() - sys.stderr.flush() - fd = os.open(os.devnull, os.O_RDWR) - os.dup2(fd, 0) - os.dup2(fd, 1) - os.dup2(fd, 2) - if fd > 2: - os.close(fd) + def delete_pid_file(): + try: + os.unlink(pidfile) + except OSError: + pass - signal.signal(signal.SIGHUP, old_sighup_action) + atexit.register(delete_pid_file) - def delete_pid_file(): try: - os.unlink(pidfile) - except OSError: - pass - - atexit.register(delete_pid_file) - - try: - f = open(pidfile, "w") - f.write("%d\n" % os.getpid()) - f.close() - except IOError, e: - logger.warning("Couldn't write PID file %s: %s", pidfile, e.strerror) + f = open(pidfile, "w") + f.write("%d\n" % os.getpid()) + f.close() + except IOError, e: + logger.warning("Couldn't write PID file %s: %s", pidfile, e.strerror) diff --git a/rpki/django_settings/common.py b/rpki/django_settings/common.py index 2f676660..3860d40b 100644 --- a/rpki/django_settings/common.py +++ b/rpki/django_settings/common.py @@ -58,7 +58,7 @@ class DatabaseConfigurator(object): default_sql_engine = "mysql" - def configure(self, cfg, section): + def configure(self, cfg, section): # pylint: disable=W0621 self.cfg = cfg self.section = section engine = cfg.get("sql-engine", section = section, diff --git a/rpki/exceptions.py b/rpki/exceptions.py index f456dfc5..cbdb9f83 100644 --- a/rpki/exceptions.py +++ b/rpki/exceptions.py @@ -22,222 +22,222 @@ Exception definitions for RPKI modules. """ class RPKI_Exception(Exception): - "Base class for RPKI exceptions." + "Base class for RPKI exceptions." class NotInDatabase(RPKI_Exception): - "Lookup failed for an object expected to be in the database." + "Lookup failed for an object expected to be in the database." class BadURISyntax(RPKI_Exception): - "Illegal syntax for a URI." + "Illegal syntax for a URI." class BadStatusCode(RPKI_Exception): - "Unrecognized protocol status code." + "Unrecognized protocol status code." class BadQuery(RPKI_Exception): - "Unexpected protocol query." + "Unexpected protocol query." class DBConsistancyError(RPKI_Exception): - "Found multiple matches for a database query that shouldn't ever return that." + "Found multiple matches for a database query that shouldn't ever return that." class CMSVerificationFailed(RPKI_Exception): - "Verification of a CMS message failed." + "Verification of a CMS message failed." class HTTPRequestFailed(RPKI_Exception): - "HTTP request failed." + "HTTP request failed." class DERObjectConversionError(RPKI_Exception): - "Error trying to convert a DER-based object from one representation to another." + "Error trying to convert a DER-based object from one representation to another." class NotACertificateChain(RPKI_Exception): - "Certificates don't form a proper chain." + "Certificates don't form a proper chain." class BadContactURL(RPKI_Exception): - "Error trying to parse contact URL." + "Error trying to parse contact URL." class BadClassNameSyntax(RPKI_Exception): - "Illegal syntax for a class_name." + "Illegal syntax for a class_name." class BadIssueResponse(RPKI_Exception): - "issue_response PDU with wrong number of classes or certificates." + "issue_response PDU with wrong number of classes or certificates." class NotImplementedYet(RPKI_Exception): - "Internal error -- not implemented yet." + "Internal error -- not implemented yet." class BadPKCS10(RPKI_Exception): - "Bad PKCS #10 object." + "Bad PKCS #10 object." class UpstreamError(RPKI_Exception): - "Received an error from upstream." + "Received an error from upstream." class ChildNotFound(RPKI_Exception): - "Could not find specified child in database." + "Could not find specified child in database." class BSCNotFound(RPKI_Exception): - "Could not find specified BSC in database." + "Could not find specified BSC in database." class BadSender(RPKI_Exception): - "Unexpected XML sender value." + "Unexpected XML sender value." class ClassNameMismatch(RPKI_Exception): - "class_name does not match child context." + "class_name does not match child context." class ClassNameUnknown(RPKI_Exception): - "Unknown class_name." + "Unknown class_name." class SKIMismatch(RPKI_Exception): - "SKI value in response does not match request." + "SKI value in response does not match request." class SubprocessError(RPKI_Exception): - "Subprocess returned unexpected error." + "Subprocess returned unexpected error." class BadIRDBReply(RPKI_Exception): - "Unexpected reply to IRDB query." + "Unexpected reply to IRDB query." class NotFound(RPKI_Exception): - "Object not found in database." + "Object not found in database." class MustBePrefix(RPKI_Exception): - "Resource range cannot be expressed as a prefix." + "Resource range cannot be expressed as a prefix." class TLSValidationError(RPKI_Exception): - "TLS certificate validation error." + "TLS certificate validation error." class MultipleTLSEECert(TLSValidationError): - "Received more than one TLS EE certificate." + "Received more than one TLS EE certificate." class ReceivedTLSCACert(TLSValidationError): - "Received CA certificate via TLS." + "Received CA certificate via TLS." class WrongEContentType(RPKI_Exception): - "Received wrong CMS eContentType." + "Received wrong CMS eContentType." class EmptyPEM(RPKI_Exception): - "Couldn't find PEM block to convert." + "Couldn't find PEM block to convert." class UnexpectedCMSCerts(RPKI_Exception): - "Received CMS certs when not expecting any." + "Received CMS certs when not expecting any." class UnexpectedCMSCRLs(RPKI_Exception): - "Received CMS CRLs when not expecting any." + "Received CMS CRLs when not expecting any." class MissingCMSEEcert(RPKI_Exception): - "Didn't receive CMS EE cert when expecting one." + "Didn't receive CMS EE cert when expecting one." class MissingCMSCRL(RPKI_Exception): - "Didn't receive CMS CRL when expecting one." + "Didn't receive CMS CRL when expecting one." class UnparsableCMSDER(RPKI_Exception): - "Alleged CMS DER wasn't parsable." + "Alleged CMS DER wasn't parsable." class CMSCRLNotSet(RPKI_Exception): - "CMS CRL has not been configured." + "CMS CRL has not been configured." class ServerShuttingDown(RPKI_Exception): - "Server is shutting down." + "Server is shutting down." class NoActiveCA(RPKI_Exception): - "No active ca_detail for specified class." + "No active ca_detail for specified class." class BadClientURL(RPKI_Exception): - "URL given to HTTP client does not match profile." + "URL given to HTTP client does not match profile." class ClientNotFound(RPKI_Exception): - "Could not find specified client in database." + "Could not find specified client in database." class BadExtension(RPKI_Exception): - "Forbidden X.509 extension." + "Forbidden X.509 extension." class ForbiddenURI(RPKI_Exception): - "Forbidden URI, does not start with correct base URI." + "Forbidden URI, does not start with correct base URI." class HTTPClientAborted(RPKI_Exception): - "HTTP client connection closed while in request-sent state." + "HTTP client connection closed while in request-sent state." class BadPublicationReply(RPKI_Exception): - "Unexpected reply to publication query." + "Unexpected reply to publication query." class DuplicateObject(RPKI_Exception): - "Attempt to create an object that already exists." + "Attempt to create an object that already exists." class EmptyROAPrefixList(RPKI_Exception): - "Can't create ROA with an empty prefix list." + "Can't create ROA with an empty prefix list." class NoCoveringCertForROA(RPKI_Exception): - "Couldn't find a covering certificate to generate ROA." + "Couldn't find a covering certificate to generate ROA." class BSCNotReady(RPKI_Exception): - "BSC not yet in a usable state, signing_cert not set." + "BSC not yet in a usable state, signing_cert not set." class HTTPUnexpectedState(RPKI_Exception): - "HTTP event occurred in an unexpected state." + "HTTP event occurred in an unexpected state." class HTTPBadVersion(RPKI_Exception): - "HTTP couldn't parse HTTP version." + "HTTP couldn't parse HTTP version." class HandleTranslationError(RPKI_Exception): - "Internal error translating protocol handle -> SQL id." + "Internal error translating protocol handle -> SQL id." class NoObjectAtURI(RPKI_Exception): - "No object published at specified URI." + "No object published at specified URI." class ExistingObjectAtURI(RPKI_Exception): - "An object has already been published at specified URI." + "An object has already been published at specified URI." class DifferentObjectAtURI(RPKI_Exception): - "An object with a different hash exists at specified URI." + "An object with a different hash exists at specified URI." class CMSContentNotSet(RPKI_Exception): - """ - Inner content of a CMS_object has not been set. If object is known - to be valid, the .extract() method should be able to set the - content; otherwise, only the .verify() method (which checks - signatures) is safe. - """ + """ + Inner content of a CMS_object has not been set. If object is known + to be valid, the .extract() method should be able to set the + content; otherwise, only the .verify() method (which checks + signatures) is safe. + """ class HTTPTimeout(RPKI_Exception): - "HTTP connection timed out." + "HTTP connection timed out." class BadIPResource(RPKI_Exception): - "Parse failure for alleged IP resource string." + "Parse failure for alleged IP resource string." class BadROAPrefix(RPKI_Exception): - "Parse failure for alleged ROA prefix string." + "Parse failure for alleged ROA prefix string." class CommandParseFailure(RPKI_Exception): - "Failed to parse command line." + "Failed to parse command line." class CMSCertHasExpired(RPKI_Exception): - "CMS certificate has expired." + "CMS certificate has expired." class TrustedCMSCertHasExpired(RPKI_Exception): - "Trusted CMS certificate has expired." + "Trusted CMS certificate has expired." class MultipleCMSEECert(RPKI_Exception): - "Can't have more than one CMS EE certificate in validation chain." + "Can't have more than one CMS EE certificate in validation chain." class ResourceOverlap(RPKI_Exception): - "Overlapping resources in resource_set." + "Overlapping resources in resource_set." class CMSReplay(RPKI_Exception): - "Possible CMS replay attack detected." + "Possible CMS replay attack detected." class PastNotAfter(RPKI_Exception): - "Requested notAfter value is already in the past." + "Requested notAfter value is already in the past." class NullValidityInterval(RPKI_Exception): - "Requested validity interval is null." + "Requested validity interval is null." class BadX510DN(RPKI_Exception): - "X.510 distinguished name does not match profile." + "X.510 distinguished name does not match profile." class BadAutonomousSystemNumber(RPKI_Exception): - "Bad AutonomousSystem number." + "Bad AutonomousSystem number." class WrongEKU(RPKI_Exception): - "Extended Key Usage extension does not match profile." + "Extended Key Usage extension does not match profile." class UnexpectedUpDownResponse(RPKI_Exception): - "Up-down message is not of the expected type." + "Up-down message is not of the expected type." class BadContentType(RPKI_Exception): - "Bad HTTP Content-Type." + "Bad HTTP Content-Type." diff --git a/rpki/fields.py b/rpki/fields.py index a470e272..1390d4ac 100644 --- a/rpki/fields.py +++ b/rpki/fields.py @@ -35,78 +35,78 @@ logger = logging.getLogger(__name__) class EnumField(models.PositiveSmallIntegerField): - """ - An enumeration type that uses strings in Python and small integers - in SQL. - """ + """ + An enumeration type that uses strings in Python and small integers + in SQL. + """ - description = "An enumeration type" + description = "An enumeration type" - __metaclass__ = models.SubfieldBase + __metaclass__ = models.SubfieldBase - def __init__(self, *args, **kwargs): - if isinstance(kwargs.get("choices"), (tuple, list)) and isinstance(kwargs["choices"][0], (str, unicode)): - kwargs["choices"] = tuple(enumerate(kwargs["choices"], 1)) - # Might need something here to handle string-valued default parameter - models.PositiveSmallIntegerField.__init__(self, *args, **kwargs) - self.enum_i2s = dict(self.flatchoices) - self.enum_s2i = dict((v, k) for k, v in self.flatchoices) + def __init__(self, *args, **kwargs): + if isinstance(kwargs.get("choices"), (tuple, list)) and isinstance(kwargs["choices"][0], (str, unicode)): + kwargs["choices"] = tuple(enumerate(kwargs["choices"], 1)) + # Might need something here to handle string-valued default parameter + models.PositiveSmallIntegerField.__init__(self, *args, **kwargs) + self.enum_i2s = dict(self.flatchoices) + self.enum_s2i = dict((v, k) for k, v in self.flatchoices) - def to_python(self, value): - return self.enum_i2s.get(value, value) + def to_python(self, value): + return self.enum_i2s.get(value, value) - def get_prep_value(self, value): - return self.enum_s2i.get(value, value) + def get_prep_value(self, value): + return self.enum_s2i.get(value, value) class SundialField(models.DateTimeField): - """ - A field type for our customized datetime objects. - """ - __metaclass__ = models.SubfieldBase + """ + A field type for our customized datetime objects. + """ + __metaclass__ = models.SubfieldBase - description = "A datetime type using our customized datetime objects" + description = "A datetime type using our customized datetime objects" - def to_python(self, value): - if isinstance(value, rpki.sundial.pydatetime.datetime): - return rpki.sundial.datetime.from_datetime( - models.DateTimeField.to_python(self, value)) - else: - return value + def to_python(self, value): + if isinstance(value, rpki.sundial.pydatetime.datetime): + return rpki.sundial.datetime.from_datetime( + models.DateTimeField.to_python(self, value)) + else: + return value - def get_prep_value(self, value): - if isinstance(value, rpki.sundial.datetime): - return value.to_datetime() - else: - return value + def get_prep_value(self, value): + if isinstance(value, rpki.sundial.datetime): + return value.to_datetime() + else: + return value class BlobField(models.Field): - """ - Old BLOB field type, predating Django's BinaryField type. + """ + Old BLOB field type, predating Django's BinaryField type. - Do not use, this is only here for backwards compatabilty during migrations. - """ + Do not use, this is only here for backwards compatabilty during migrations. + """ - __metaclass__ = models.SubfieldBase - description = "Raw BLOB type without ASN.1 encoding/decoding" + __metaclass__ = models.SubfieldBase + description = "Raw BLOB type without ASN.1 encoding/decoding" - def __init__(self, *args, **kwargs): - self.blob_type = kwargs.pop("blob_type", None) - kwargs["serialize"] = False - kwargs["blank"] = True - kwargs["default"] = None - models.Field.__init__(self, *args, **kwargs) + def __init__(self, *args, **kwargs): + self.blob_type = kwargs.pop("blob_type", None) + kwargs["serialize"] = False + kwargs["blank"] = True + kwargs["default"] = None + models.Field.__init__(self, *args, **kwargs) - def db_type(self, connection): - if self.blob_type is not None: - return self.blob_type - elif connection.settings_dict['ENGINE'] == "django.db.backends.mysql": - return "LONGBLOB" - elif connection.settings_dict['ENGINE'] == "django.db.backends.posgresql": - return "bytea" - else: - return "BLOB" + def db_type(self, connection): + if self.blob_type is not None: + return self.blob_type + elif connection.settings_dict['ENGINE'] == "django.db.backends.mysql": + return "LONGBLOB" + elif connection.settings_dict['ENGINE'] == "django.db.backends.posgresql": + return "bytea" + else: + return "BLOB" # For reasons which now escape me, I had a few fields in the old @@ -124,70 +124,70 @@ class BlobField(models.Field): # backwards compatability during migrations, class DERField(models.BinaryField): - """ - Field class for DER objects, with automatic translation between - ASN.1 and Python types. This is an abstract class, concrete field - classes are derived from it. - """ - - def __init__(self, *args, **kwargs): - kwargs["blank"] = True - kwargs["default"] = None - super(DERField, self).__init__(*args, **kwargs) - - def deconstruct(self): - name, path, args, kwargs = super(DERField, self).deconstruct() - del kwargs["blank"] - del kwargs["default"] - return name, path, args, kwargs - - def from_db_value(self, value, expression, connection, context): - if value is not None: - value = self.rpki_type(DER = str(value)) - return value - - def to_python(self, value): - value = super(DERField, self).to_python(value) - if value is not None and not isinstance(value, self.rpki_type): - value = self.rpki_type(DER = str(value)) - return value - - def get_prep_value(self, value): - if value is not None: - value = value.get_DER() - return super(DERField, self).get_prep_value(value) + """ + Field class for DER objects, with automatic translation between + ASN.1 and Python types. This is an abstract class, concrete field + classes are derived from it. + """ + + def __init__(self, *args, **kwargs): + kwargs["blank"] = True + kwargs["default"] = None + super(DERField, self).__init__(*args, **kwargs) + + def deconstruct(self): + name, path, args, kwargs = super(DERField, self).deconstruct() + del kwargs["blank"] + del kwargs["default"] + return name, path, args, kwargs + + def from_db_value(self, value, expression, connection, context): + if value is not None: + value = self.rpki_type(DER = str(value)) + return value + + def to_python(self, value): + value = super(DERField, self).to_python(value) + if value is not None and not isinstance(value, self.rpki_type): + value = self.rpki_type(DER = str(value)) + return value + + def get_prep_value(self, value): + if value is not None: + value = value.get_DER() + return super(DERField, self).get_prep_value(value) class CertificateField(DERField): - description = "X.509 certificate" - rpki_type = rpki.x509.X509 + description = "X.509 certificate" + rpki_type = rpki.x509.X509 class RSAPrivateKeyField(DERField): - description = "RSA keypair" - rpki_type = rpki.x509.RSA + description = "RSA keypair" + rpki_type = rpki.x509.RSA KeyField = RSAPrivateKeyField class PublicKeyField(DERField): - description = "RSA keypair" - rpki_type = rpki.x509.PublicKey + description = "RSA keypair" + rpki_type = rpki.x509.PublicKey class CRLField(DERField): - description = "Certificate Revocation List" - rpki_type = rpki.x509.CRL + description = "Certificate Revocation List" + rpki_type = rpki.x509.CRL class PKCS10Field(DERField): - description = "PKCS #10 certificate request" - rpki_type = rpki.x509.PKCS10 + description = "PKCS #10 certificate request" + rpki_type = rpki.x509.PKCS10 class ManifestField(DERField): - description = "RPKI Manifest" - rpki_type = rpki.x509.SignedManifest + description = "RPKI Manifest" + rpki_type = rpki.x509.SignedManifest class ROAField(DERField): - description = "ROA" - rpki_type = rpki.x509.ROA + description = "ROA" + rpki_type = rpki.x509.ROA class GhostbusterField(DERField): - description = "Ghostbuster Record" - rpki_type = rpki.x509.Ghostbuster + description = "Ghostbuster Record" + rpki_type = rpki.x509.Ghostbuster diff --git a/rpki/gui/app/forms.py b/rpki/gui/app/forms.py index 306b8dce..4a95c8da 100644 --- a/rpki/gui/app/forms.py +++ b/rpki/gui/app/forms.py @@ -170,105 +170,105 @@ def ROARequestFormFactory(conf): """ class Cls(forms.Form): - """Form for entering a ROA request. - - Handles both IPv4 and IPv6.""" - - prefix = forms.CharField( - widget=forms.TextInput(attrs={ - 'autofocus': 'true', 'placeholder': 'Prefix', - 'class': 'span4' - }) - ) - max_prefixlen = forms.CharField( - required=False, - widget=forms.TextInput(attrs={ - 'placeholder': 'Max len', - 'class': 'span1' - }) - ) - asn = forms.IntegerField( - widget=forms.TextInput(attrs={ - 'placeholder': 'ASN', - 'class': 'span1' - }) - ) - protect_children = forms.BooleanField(required=False) - - def __init__(self, *args, **kwargs): - kwargs['auto_id'] = False - super(Cls, self).__init__(*args, **kwargs) - self.conf = conf # conf is the arg to ROARequestFormFactory - self.inline = True - self.use_table = False - - def _as_resource_range(self): - """Convert the prefix in the form to a - rpki.resource_set.resource_range_ip object. - - If there is no mask provided, assume the closest classful mask. - - """ - prefix = self.cleaned_data.get('prefix') - if '/' not in prefix: - p = IPAddress(prefix) - - # determine the first nonzero bit starting from the lsb and - # subtract from the address size to find the closest classful - # mask that contains this single address - prefixlen = 0 - while (p != 0) and (p & 1) == 0: - prefixlen = prefixlen + 1 - p = p >> 1 - mask = p.bits - (8 * (prefixlen / 8)) - prefix = prefix + '/' + str(mask) - - return resource_range_ip.parse_str(prefix) - - def clean_asn(self): - value = self.cleaned_data.get('asn') - if value < 0: - raise forms.ValidationError('AS must be a positive value or 0') - return value - - def clean_prefix(self): - try: - r = self._as_resource_range() - except: - raise forms.ValidationError('invalid prefix') - - manager = models.ResourceRangeAddressV4 if r.version == 4 else models.ResourceRangeAddressV6 - if not manager.objects.filter(cert__conf=self.conf, - prefix_min__lte=r.min, - prefix_max__gte=r.max).exists(): - raise forms.ValidationError('prefix is not allocated to you') - return str(r) - - def clean_max_prefixlen(self): - v = self.cleaned_data.get('max_prefixlen') - if v: - if v[0] == '/': - v = v[1:] # allow user to specify /24 - try: - if int(v) < 0: - raise forms.ValidationError('max prefix length must be positive or 0') - except ValueError: - raise forms.ValidationError('invalid integer value') - return v - - def clean(self): - if 'prefix' in self.cleaned_data: - r = self._as_resource_range() - max_prefixlen = self.cleaned_data.get('max_prefixlen') - max_prefixlen = int(max_prefixlen) if max_prefixlen else r.prefixlen() - if max_prefixlen < r.prefixlen(): - raise forms.ValidationError( - 'max prefix length must be greater than or equal to the prefix length') - if max_prefixlen > r.min.bits: - raise forms.ValidationError( - 'max prefix length (%d) is out of range for IP version (%d)' % (max_prefixlen, r.min.bits)) - self.cleaned_data['max_prefixlen'] = str(max_prefixlen) - return self.cleaned_data + """Form for entering a ROA request. + + Handles both IPv4 and IPv6.""" + + prefix = forms.CharField( + widget=forms.TextInput(attrs={ + 'autofocus': 'true', 'placeholder': 'Prefix', + 'class': 'span4' + }) + ) + max_prefixlen = forms.CharField( + required=False, + widget=forms.TextInput(attrs={ + 'placeholder': 'Max len', + 'class': 'span1' + }) + ) + asn = forms.IntegerField( + widget=forms.TextInput(attrs={ + 'placeholder': 'ASN', + 'class': 'span1' + }) + ) + protect_children = forms.BooleanField(required=False) + + def __init__(self, *args, **kwargs): + kwargs['auto_id'] = False + super(Cls, self).__init__(*args, **kwargs) + self.conf = conf # conf is the arg to ROARequestFormFactory + self.inline = True + self.use_table = False + + def _as_resource_range(self): + """Convert the prefix in the form to a + rpki.resource_set.resource_range_ip object. + + If there is no mask provided, assume the closest classful mask. + + """ + prefix = self.cleaned_data.get('prefix') + if '/' not in prefix: + p = IPAddress(prefix) + + # determine the first nonzero bit starting from the lsb and + # subtract from the address size to find the closest classful + # mask that contains this single address + prefixlen = 0 + while (p != 0) and (p & 1) == 0: + prefixlen = prefixlen + 1 + p = p >> 1 + mask = p.bits - (8 * (prefixlen / 8)) + prefix = prefix + '/' + str(mask) + + return resource_range_ip.parse_str(prefix) + + def clean_asn(self): + value = self.cleaned_data.get('asn') + if value < 0: + raise forms.ValidationError('AS must be a positive value or 0') + return value + + def clean_prefix(self): + try: + r = self._as_resource_range() + except: + raise forms.ValidationError('invalid prefix') + + manager = models.ResourceRangeAddressV4 if r.version == 4 else models.ResourceRangeAddressV6 + if not manager.objects.filter(cert__conf=self.conf, + prefix_min__lte=r.min, + prefix_max__gte=r.max).exists(): + raise forms.ValidationError('prefix is not allocated to you') + return str(r) + + def clean_max_prefixlen(self): + v = self.cleaned_data.get('max_prefixlen') + if v: + if v[0] == '/': + v = v[1:] # allow user to specify /24 + try: + if int(v) < 0: + raise forms.ValidationError('max prefix length must be positive or 0') + except ValueError: + raise forms.ValidationError('invalid integer value') + return v + + def clean(self): + if 'prefix' in self.cleaned_data: + r = self._as_resource_range() + max_prefixlen = self.cleaned_data.get('max_prefixlen') + max_prefixlen = int(max_prefixlen) if max_prefixlen else r.prefixlen() + if max_prefixlen < r.prefixlen(): + raise forms.ValidationError( + 'max prefix length must be greater than or equal to the prefix length') + if max_prefixlen > r.min.bits: + raise forms.ValidationError( + 'max prefix length (%d) is out of range for IP version (%d)' % (max_prefixlen, r.min.bits)) + self.cleaned_data['max_prefixlen'] = str(max_prefixlen) + return self.cleaned_data return Cls diff --git a/rpki/gui/app/views.py b/rpki/gui/app/views.py index 28b8a498..1d468a07 100644 --- a/rpki/gui/app/views.py +++ b/rpki/gui/app/views.py @@ -148,27 +148,27 @@ def generic_import(request, queryset, configure, form_class=None, if handle == '': handle = None try: - # configure_repository returns None, so can't use tuple expansion - # here. Unpack the tuple below if post_import_redirect is None. - r = configure(z, tmpf.name, handle) + # configure_repository returns None, so can't use tuple expansion + # here. Unpack the tuple below if post_import_redirect is None. + r = configure(z, tmpf.name, handle) except lxml.etree.XMLSyntaxError as e: - logger.exception('caught XMLSyntaxError while parsing uploaded file') + logger.exception('caught XMLSyntaxError while parsing uploaded file') messages.error( request, 'The uploaded file has an invalid XML syntax' ) else: - # force rpkid run now - z.synchronize_ca(poke=True) - if post_import_redirect: - url = post_import_redirect - else: - _, handle = r - url = queryset.get(issuer=conf, - handle=handle).get_absolute_url() - return http.HttpResponseRedirect(url) + # force rpkid run now + z.synchronize_ca(poke=True) + if post_import_redirect: + url = post_import_redirect + else: + _, handle = r + url = queryset.get(issuer=conf, + handle=handle).get_absolute_url() + return http.HttpResponseRedirect(url) finally: - os.remove(tmpf.name) + os.remove(tmpf.name) else: form = form_class() @@ -474,10 +474,10 @@ def child_add_prefix(request, pk): child.address_ranges.create(start_ip=str(r.min), end_ip=str(r.max), version=version) Zookeeper( - handle=conf.handle, - logstream=logstream, - disable_signal_handlers=True - ).run_rpkid_now() + handle=conf.handle, + logstream=logstream, + disable_signal_handlers=True + ).run_rpkid_now() return http.HttpResponseRedirect(child.get_absolute_url()) else: form = forms.AddNetForm(child=child) @@ -497,10 +497,10 @@ def child_add_asn(request, pk): r = resource_range_as.parse_str(asns) child.asns.create(start_as=r.min, end_as=r.max) Zookeeper( - handle=conf.handle, - logstream=logstream, - disable_signal_handlers=True - ).run_rpkid_now() + handle=conf.handle, + logstream=logstream, + disable_signal_handlers=True + ).run_rpkid_now() return http.HttpResponseRedirect(child.get_absolute_url()) else: form = forms.AddASNForm(child=child) @@ -531,10 +531,10 @@ def child_edit(request, pk): models.ChildASN.objects.filter(child=child).exclude(pk__in=form.cleaned_data.get('as_ranges')).delete() models.ChildNet.objects.filter(child=child).exclude(pk__in=form.cleaned_data.get('address_ranges')).delete() Zookeeper( - handle=conf.handle, - logstream=logstream, - disable_signal_handlers=True - ).run_rpkid_now() + handle=conf.handle, + logstream=logstream, + disable_signal_handlers=True + ).run_rpkid_now() return http.HttpResponseRedirect(child.get_absolute_url()) else: form = form_class(initial={ @@ -713,27 +713,27 @@ def roa_create_multi(request): v = [] rng.chop_into_prefixes(v) init.extend([{'asn': asn, 'prefix': str(p)} for p in v]) - extra = 0 if init else 1 + extra = 0 if init else 1 formset = formset_factory(forms.ROARequestFormFactory(conf), extra=extra)(initial=init) elif request.method == 'POST': formset = formset_factory(forms.ROARequestFormFactory(conf), extra=0)(request.POST, request.FILES) - # We need to check .has_changed() because .is_valid() will return true - # if the user clicks the Preview button without filling in the blanks - # in the ROA form, leaving the form invalid from this view's POV. + # We need to check .has_changed() because .is_valid() will return true + # if the user clicks the Preview button without filling in the blanks + # in the ROA form, leaving the form invalid from this view's POV. if formset.has_changed() and formset.is_valid(): routes = [] v = [] query = Q() # for matching routes roas = [] for form in formset: - asn = form.cleaned_data['asn'] - rng = resource_range_ip.parse_str(form.cleaned_data['prefix']) - max_prefixlen = int(form.cleaned_data['max_prefixlen']) + asn = form.cleaned_data['asn'] + rng = resource_range_ip.parse_str(form.cleaned_data['prefix']) + max_prefixlen = int(form.cleaned_data['max_prefixlen']) protect_children = form.cleaned_data['protect_children'] roas.append((rng, max_prefixlen, asn, protect_children)) - v.append({'prefix': str(rng), 'max_prefixlen': max_prefixlen, - 'asn': asn}) + v.append({'prefix': str(rng), 'max_prefixlen': max_prefixlen, + 'asn': asn}) query |= Q(prefix_min__gte=rng.min, prefix_max__lte=rng.max) @@ -1451,10 +1451,10 @@ class RouterImportView(FormView): def form_valid(self, form): conf = get_conf(self.request.user, self.request.session['handle']) - tmpf = NamedTemporaryFile(prefix='import', suffix='.xml', - delete=False) - tmpf.write(form.cleaned_data['xml'].read()) - tmpf.close() + tmpf = NamedTemporaryFile(prefix='import', suffix='.xml', + delete=False) + tmpf.write(form.cleaned_data['xml'].read()) + tmpf.close() z = Zookeeper(handle=conf.handle, disable_signal_handlers=True) z.add_router_certificate_request(tmpf.name) z.run_rpkid_now() diff --git a/rpki/gui/cacheview/tests.py b/rpki/gui/cacheview/tests.py index daca07bf..c2958c72 100644 --- a/rpki/gui/cacheview/tests.py +++ b/rpki/gui/cacheview/tests.py @@ -21,4 +21,3 @@ Another way to test that 1 + 1 is equal to 2. >>> 1 + 1 == 2 True """} - diff --git a/rpki/http_simple.py b/rpki/http_simple.py index ee9cac35..6f73def5 100644 --- a/rpki/http_simple.py +++ b/rpki/http_simple.py @@ -31,106 +31,106 @@ default_content_type = "application/x-rpki" class HTTPRequestHandler(BaseHTTPServer.BaseHTTPRequestHandler): - """ - HTTP request handler simple RPKI servers. - """ - - def do_POST(self): - try: - content_type = self.headers.get("Content-Type") - content_length = self.headers.get("Content-Length") - for handler_path, handler, handler_content_type in self.rpki_handlers: - if self.path.startswith(handler_path) and content_type in handler_content_type: - return handler(self, - self.rfile.read() - if content_length is None else - self.rfile.read(int(content_length))) - self.send_error(404, "No handler for path %s" % self.path) - except Exception, e: - logger.exception("Unhandled exception") - self.send_error(501, "Unhandled exception %s" % e) - - def send_cms_response(self, der): - self.send_response(200) - self.send_header("Content-Type", default_content_type) - self.send_header("Content-Length", str(len(der))) - self.end_headers() - self.wfile.write(der) - - def log_message(self, *args): - logger.info(*args, extra = dict(context = "%s:%s" % self.client_address)) - - def send_error(self, code, message = None): - # BaseHTTPRequestHandler.send_error() generates HTML error messages, - # which we don't want, so we override the method to suppress this. - self.send_response(code, message) - self.send_header("Content-Type", default_content_type) - self.send_header("Connection", "close") - self.end_headers() + """ + HTTP request handler simple RPKI servers. + """ + + def do_POST(self): + try: + content_type = self.headers.get("Content-Type") + content_length = self.headers.get("Content-Length") + for handler_path, handler, handler_content_type in self.rpki_handlers: + if self.path.startswith(handler_path) and content_type in handler_content_type: + return handler(self, + self.rfile.read() + if content_length is None else + self.rfile.read(int(content_length))) + self.send_error(404, "No handler for path %s" % self.path) + except Exception, e: + logger.exception("Unhandled exception") + self.send_error(501, "Unhandled exception %s" % e) + + def send_cms_response(self, der): + self.send_response(200) + self.send_header("Content-Type", default_content_type) + self.send_header("Content-Length", str(len(der))) + self.end_headers() + self.wfile.write(der) + + def log_message(self, *args): + logger.info(*args, extra = dict(context = "%s:%s" % self.client_address)) + + def send_error(self, code, message = None): + # BaseHTTPRequestHandler.send_error() generates HTML error messages, + # which we don't want, so we override the method to suppress this. + self.send_response(code, message) + self.send_header("Content-Type", default_content_type) + self.send_header("Connection", "close") + self.end_headers() def server(handlers, port, host = ""): - """ - Run an HTTP server and wait (forever) for connections. - """ + """ + Run an HTTP server and wait (forever) for connections. + """ - if isinstance(handlers, (tuple, list)): - handlers = tuple(h[:3] if len(h) > 2 else (h[0], h[1], default_content_type) - for h in handlers) - else: - handlers = (("/", handlers, default_content_type),) + if isinstance(handlers, (tuple, list)): + handlers = tuple(h[:3] if len(h) > 2 else (h[0], h[1], default_content_type) + for h in handlers) + else: + handlers = (("/", handlers, default_content_type),) - class RequestHandler(HTTPRequestHandler): - rpki_handlers = handlers + class RequestHandler(HTTPRequestHandler): + rpki_handlers = handlers - BaseHTTPServer.HTTPServer((host, port), RequestHandler).serve_forever() + BaseHTTPServer.HTTPServer((host, port), RequestHandler).serve_forever() class BadURL(Exception): - "Bad contact URL" + "Bad contact URL" class RequestFailed(Exception): - "HTTP returned failure" + "HTTP returned failure" class BadContentType(Exception): - "Wrong HTTP Content-Type" + "Wrong HTTP Content-Type" def client(proto_cms_msg, client_key, client_cert, server_ta, server_cert, url, q_msg, debug = False, replay_track = None, client_crl = None, content_type = default_content_type): - """ - Issue single a query and return the response, handling all the CMS and XML goo. - """ + """ + Issue single a query and return the response, handling all the CMS and XML goo. + """ - u = urlparse.urlparse(url) + u = urlparse.urlparse(url) - if u.scheme not in ("", "http") or u.username or u.password or u.params or u.query or u.fragment: - raise BadURL("Unusable URL %s", url) + if u.scheme not in ("", "http") or u.username or u.password or u.params or u.query or u.fragment: + raise BadURL("Unusable URL %s", url) - q_cms = proto_cms_msg() - q_der = q_cms.wrap(q_msg, client_key, client_cert, client_crl) + q_cms = proto_cms_msg() + q_der = q_cms.wrap(q_msg, client_key, client_cert, client_crl) - if debug: - debug.write("\n" + q_cms.pretty_print_content() + "\n") + if debug: + debug.write("\n" + q_cms.pretty_print_content() + "\n") - http = httplib.HTTPConnection(u.hostname, u.port or httplib.HTTP_PORT) - http.request("POST", u.path, q_der, {"Content-Type" : content_type}) - r = http.getresponse() + http = httplib.HTTPConnection(u.hostname, u.port or httplib.HTTP_PORT) + http.request("POST", u.path, q_der, {"Content-Type" : content_type}) + r = http.getresponse() - if r.status != 200: - raise RequestFailed("HTTP request failed with status %r reason %r" % (r.status, r.reason)) + if r.status != 200: + raise RequestFailed("HTTP request failed with status %r reason %r" % (r.status, r.reason)) - if r.getheader("Content-Type") != content_type: - raise BadContentType("HTTP Content-Type %r, expected %r" % (r.getheader("Content-Type"), content_type)) + if r.getheader("Content-Type") != content_type: + raise BadContentType("HTTP Content-Type %r, expected %r" % (r.getheader("Content-Type"), content_type)) - r_der = r.read() - r_cms = proto_cms_msg(DER = r_der) - r_msg = r_cms.unwrap((server_ta, server_cert)) + r_der = r.read() + r_cms = proto_cms_msg(DER = r_der) + r_msg = r_cms.unwrap((server_ta, server_cert)) - if replay_track is not None: - replay_track.cms_timestamp = r_cms.check_replay(replay_track.cms_timestamp, url) + if replay_track is not None: + replay_track.cms_timestamp = r_cms.check_replay(replay_track.cms_timestamp, url) - if debug: - debug.write("\n" + r_cms.pretty_print_content() + "\n") + if debug: + debug.write("\n" + r_cms.pretty_print_content() + "\n") - return r_msg + return r_msg diff --git a/rpki/ipaddrs.py b/rpki/ipaddrs.py index 25eefd0d..5117585c 100644 --- a/rpki/ipaddrs.py +++ b/rpki/ipaddrs.py @@ -48,99 +48,99 @@ once, here, thus avoiding a lot of duplicate code elsewhere. import socket, struct class v4addr(long): - """ - IPv4 address. + """ + IPv4 address. - Derived from long, but supports IPv4 print syntax. - """ + Derived from long, but supports IPv4 print syntax. + """ - bits = 32 - ipversion = 4 + bits = 32 + ipversion = 4 - def __new__(cls, x): - """ - Construct a v4addr object. - """ + def __new__(cls, x): + """ + Construct a v4addr object. + """ - if isinstance(x, unicode): - x = x.encode("ascii") - if isinstance(x, str): - return cls.from_bytes(socket.inet_pton(socket.AF_INET, ".".join(str(int(i)) for i in x.split(".")))) - else: - return long.__new__(cls, x) + if isinstance(x, unicode): + x = x.encode("ascii") + if isinstance(x, str): + return cls.from_bytes(socket.inet_pton(socket.AF_INET, ".".join(str(int(i)) for i in x.split(".")))) + else: + return long.__new__(cls, x) - def to_bytes(self): - """ - Convert a v4addr object to a raw byte string. - """ + def to_bytes(self): + """ + Convert a v4addr object to a raw byte string. + """ - return struct.pack("!I", long(self)) + return struct.pack("!I", long(self)) - @classmethod - def from_bytes(cls, x): - """ - Convert from a raw byte string to a v4addr object. - """ + @classmethod + def from_bytes(cls, x): + """ + Convert from a raw byte string to a v4addr object. + """ - return cls(struct.unpack("!I", x)[0]) + return cls(struct.unpack("!I", x)[0]) - def __str__(self): - """ - Convert a v4addr object to string format. - """ + def __str__(self): + """ + Convert a v4addr object to string format. + """ - return socket.inet_ntop(socket.AF_INET, self.to_bytes()) + return socket.inet_ntop(socket.AF_INET, self.to_bytes()) class v6addr(long): - """ - IPv6 address. + """ + IPv6 address. - Derived from long, but supports IPv6 print syntax. - """ + Derived from long, but supports IPv6 print syntax. + """ - bits = 128 - ipversion = 6 + bits = 128 + ipversion = 6 - def __new__(cls, x): - """ - Construct a v6addr object. - """ + def __new__(cls, x): + """ + Construct a v6addr object. + """ - if isinstance(x, unicode): - x = x.encode("ascii") - if isinstance(x, str): - return cls.from_bytes(socket.inet_pton(socket.AF_INET6, x)) - else: - return long.__new__(cls, x) + if isinstance(x, unicode): + x = x.encode("ascii") + if isinstance(x, str): + return cls.from_bytes(socket.inet_pton(socket.AF_INET6, x)) + else: + return long.__new__(cls, x) - def to_bytes(self): - """ - Convert a v6addr object to a raw byte string. - """ + def to_bytes(self): + """ + Convert a v6addr object to a raw byte string. + """ - return struct.pack("!QQ", long(self) >> 64, long(self) & 0xFFFFFFFFFFFFFFFF) + return struct.pack("!QQ", long(self) >> 64, long(self) & 0xFFFFFFFFFFFFFFFF) - @classmethod - def from_bytes(cls, x): - """ - Convert from a raw byte string to a v6addr object. - """ + @classmethod + def from_bytes(cls, x): + """ + Convert from a raw byte string to a v6addr object. + """ - x = struct.unpack("!QQ", x) - return cls((x[0] << 64) | x[1]) + x = struct.unpack("!QQ", x) + return cls((x[0] << 64) | x[1]) - def __str__(self): - """ - Convert a v6addr object to string format. - """ + def __str__(self): + """ + Convert a v6addr object to string format. + """ - return socket.inet_ntop(socket.AF_INET6, self.to_bytes()) + return socket.inet_ntop(socket.AF_INET6, self.to_bytes()) def parse(s): - """ - Parse a string as either an IPv4 or IPv6 address, and return object of appropriate class. - """ + """ + Parse a string as either an IPv4 or IPv6 address, and return object of appropriate class. + """ - if isinstance(s, unicode): - s = s.encode("ascii") - return v6addr(s) if ":" in s else v4addr(s) + if isinstance(s, unicode): + s = s.encode("ascii") + return v6addr(s) if ":" in s else v4addr(s) diff --git a/rpki/irdb/models.py b/rpki/irdb/models.py index d2d6256b..4ff5734a 100644 --- a/rpki/irdb/models.py +++ b/rpki/irdb/models.py @@ -65,480 +65,480 @@ ee_certificate_lifetime = rpki.sundial.timedelta(days = 60) # Field classes class HandleField(django.db.models.CharField): - """ - A handle field class. Replace this with SlugField? - """ + """ + A handle field class. Replace this with SlugField? + """ - description = 'A "handle" in one of the RPKI protocols' + description = 'A "handle" in one of the RPKI protocols' - def __init__(self, *args, **kwargs): - kwargs["max_length"] = 120 - django.db.models.CharField.__init__(self, *args, **kwargs) + def __init__(self, *args, **kwargs): + kwargs["max_length"] = 120 + django.db.models.CharField.__init__(self, *args, **kwargs) class SignedReferralField(DERField): - description = "CMS signed object containing XML" - rpki_type = rpki.x509.SignedReferral + description = "CMS signed object containing XML" + rpki_type = rpki.x509.SignedReferral # Custom managers class CertificateManager(django.db.models.Manager): - def get_or_certify(self, **kwargs): - """ - Sort of like .get_or_create(), but for models containing - certificates which need to be generated based on other fields. - - Takes keyword arguments like .get(), checks for existing object. - If none, creates a new one; if found an existing object but some - of the non-key fields don't match, updates the existing object. - Runs certification method for new or updated objects. Returns a - tuple consisting of the object and a boolean indicating whether - anything has changed. - """ + def get_or_certify(self, **kwargs): + """ + Sort of like .get_or_create(), but for models containing + certificates which need to be generated based on other fields. + + Takes keyword arguments like .get(), checks for existing object. + If none, creates a new one; if found an existing object but some + of the non-key fields don't match, updates the existing object. + Runs certification method for new or updated objects. Returns a + tuple consisting of the object and a boolean indicating whether + anything has changed. + """ - changed = False + changed = False - try: - obj = self.get(**self._get_or_certify_keys(kwargs)) + try: + obj = self.get(**self._get_or_certify_keys(kwargs)) - except self.model.DoesNotExist: - obj = self.model(**kwargs) - changed = True + except self.model.DoesNotExist: + obj = self.model(**kwargs) + changed = True - else: - for k in kwargs: - if getattr(obj, k) != kwargs[k]: - setattr(obj, k, kwargs[k]) - changed = True + else: + for k in kwargs: + if getattr(obj, k) != kwargs[k]: + setattr(obj, k, kwargs[k]) + changed = True - if changed: - obj.avow() - obj.save() + if changed: + obj.avow() + obj.save() - return obj, changed + return obj, changed - def _get_or_certify_keys(self, kwargs): - assert len(self.model._meta.unique_together) == 1 - return dict((k, kwargs[k]) for k in self.model._meta.unique_together[0]) + def _get_or_certify_keys(self, kwargs): + assert len(self.model._meta.unique_together) == 1 + return dict((k, kwargs[k]) for k in self.model._meta.unique_together[0]) class ResourceHolderCAManager(CertificateManager): - def _get_or_certify_keys(self, kwargs): - return { "handle" : kwargs["handle"] } + def _get_or_certify_keys(self, kwargs): + return { "handle" : kwargs["handle"] } class ServerCAManager(CertificateManager): - def _get_or_certify_keys(self, kwargs): - return { "pk" : 1 } + def _get_or_certify_keys(self, kwargs): + return { "pk" : 1 } class ResourceHolderEEManager(CertificateManager): - def _get_or_certify_keys(self, kwargs): - return { "issuer" : kwargs["issuer"] } + def _get_or_certify_keys(self, kwargs): + return { "issuer" : kwargs["issuer"] } ### class CA(django.db.models.Model): - certificate = CertificateField() - private_key = RSAPrivateKeyField() - latest_crl = CRLField() - - # Might want to bring these into line with what rpkid does. Current - # variables here were chosen to map easily to what OpenSSL command - # line tool was keeping on disk. - - next_serial = django.db.models.BigIntegerField(default = 1) - next_crl_number = django.db.models.BigIntegerField(default = 1) - last_crl_update = SundialField() - next_crl_update = SundialField() - - class Meta: - abstract = True - - def avow(self): - if self.private_key is None: - self.private_key = rpki.x509.RSA.generate(quiet = True) - now = rpki.sundial.now() - notAfter = now + ca_certificate_lifetime - self.certificate = rpki.x509.X509.bpki_self_certify( - keypair = self.private_key, - subject_name = self.subject_name, - serial = self.next_serial, - now = now, - notAfter = notAfter) - self.next_serial += 1 - self.generate_crl() - return self.certificate - - def certify(self, subject_name, subject_key, validity_interval, is_ca, pathLenConstraint = None): - now = rpki.sundial.now() - notAfter = now + validity_interval - result = self.certificate.bpki_certify( - keypair = self.private_key, - subject_name = subject_name, - subject_key = subject_key, - serial = self.next_serial, - now = now, - notAfter = notAfter, - is_ca = is_ca, - pathLenConstraint = pathLenConstraint) - self.next_serial += 1 - return result - - def revoke(self, cert): - Revocation.objects.create( - issuer = self, - revoked = rpki.sundial.now(), - serial = cert.certificate.getSerial(), - expires = cert.certificate.getNotAfter() + crl_interval) - cert.delete() - self.generate_crl() - - def generate_crl(self): - now = rpki.sundial.now() - self.revocations.filter(expires__lt = now).delete() - revoked = [(r.serial, r.revoked) for r in self.revocations.all()] - self.latest_crl = rpki.x509.CRL.generate( - keypair = self.private_key, - issuer = self.certificate, - serial = self.next_crl_number, - thisUpdate = now, - nextUpdate = now + crl_interval, - revokedCertificates = revoked) - self.last_crl_update = now - self.next_crl_update = now + crl_interval - self.next_crl_number += 1 + certificate = CertificateField() + private_key = RSAPrivateKeyField() + latest_crl = CRLField() + + # Might want to bring these into line with what rpkid does. Current + # variables here were chosen to map easily to what OpenSSL command + # line tool was keeping on disk. + + next_serial = django.db.models.BigIntegerField(default = 1) + next_crl_number = django.db.models.BigIntegerField(default = 1) + last_crl_update = SundialField() + next_crl_update = SundialField() + + class Meta: + abstract = True + + def avow(self): + if self.private_key is None: + self.private_key = rpki.x509.RSA.generate(quiet = True) + now = rpki.sundial.now() + notAfter = now + ca_certificate_lifetime + self.certificate = rpki.x509.X509.bpki_self_certify( + keypair = self.private_key, + subject_name = self.subject_name, + serial = self.next_serial, + now = now, + notAfter = notAfter) + self.next_serial += 1 + self.generate_crl() + return self.certificate + + def certify(self, subject_name, subject_key, validity_interval, is_ca, pathLenConstraint = None): + now = rpki.sundial.now() + notAfter = now + validity_interval + result = self.certificate.bpki_certify( + keypair = self.private_key, + subject_name = subject_name, + subject_key = subject_key, + serial = self.next_serial, + now = now, + notAfter = notAfter, + is_ca = is_ca, + pathLenConstraint = pathLenConstraint) + self.next_serial += 1 + return result + + def revoke(self, cert): + Revocation.objects.create( + issuer = self, + revoked = rpki.sundial.now(), + serial = cert.certificate.getSerial(), + expires = cert.certificate.getNotAfter() + crl_interval) + cert.delete() + self.generate_crl() + + def generate_crl(self): + now = rpki.sundial.now() + self.revocations.filter(expires__lt = now).delete() + revoked = [(r.serial, r.revoked) for r in self.revocations.all()] + self.latest_crl = rpki.x509.CRL.generate( + keypair = self.private_key, + issuer = self.certificate, + serial = self.next_crl_number, + thisUpdate = now, + nextUpdate = now + crl_interval, + revokedCertificates = revoked) + self.last_crl_update = now + self.next_crl_update = now + crl_interval + self.next_crl_number += 1 class ServerCA(CA): - objects = ServerCAManager() + objects = ServerCAManager() - def __unicode__(self): - return "" + def __unicode__(self): + return "" - @property - def subject_name(self): - if self.certificate is not None: - return self.certificate.getSubject() - else: - return rpki.x509.X501DN.from_cn("%s BPKI server CA" % socket.gethostname()) + @property + def subject_name(self): + if self.certificate is not None: + return self.certificate.getSubject() + else: + return rpki.x509.X501DN.from_cn("%s BPKI server CA" % socket.gethostname()) class ResourceHolderCA(CA): - handle = HandleField(unique = True) - objects = ResourceHolderCAManager() + handle = HandleField(unique = True) + objects = ResourceHolderCAManager() - def __unicode__(self): - return self.handle + def __unicode__(self): + return self.handle - @property - def subject_name(self): - if self.certificate is not None: - return self.certificate.getSubject() - else: - return rpki.x509.X501DN.from_cn("%s BPKI resource CA" % self.handle) + @property + def subject_name(self): + if self.certificate is not None: + return self.certificate.getSubject() + else: + return rpki.x509.X501DN.from_cn("%s BPKI resource CA" % self.handle) class Certificate(django.db.models.Model): - certificate = CertificateField() - objects = CertificateManager() + certificate = CertificateField() + objects = CertificateManager() - class Meta: - abstract = True - unique_together = ("issuer", "handle") + class Meta: + abstract = True + unique_together = ("issuer", "handle") - def revoke(self): - self.issuer.revoke(self) + def revoke(self): + self.issuer.revoke(self) class CrossCertification(Certificate): - handle = HandleField() - ta = CertificateField() + handle = HandleField() + ta = CertificateField() - class Meta: - abstract = True + class Meta: + abstract = True - def avow(self): - self.certificate = self.issuer.certify( - subject_name = self.ta.getSubject(), - subject_key = self.ta.getPublicKey(), - validity_interval = ee_certificate_lifetime, - is_ca = True, - pathLenConstraint = 0) + def avow(self): + self.certificate = self.issuer.certify( + subject_name = self.ta.getSubject(), + subject_key = self.ta.getPublicKey(), + validity_interval = ee_certificate_lifetime, + is_ca = True, + pathLenConstraint = 0) - def __unicode__(self): - return self.handle + def __unicode__(self): + return self.handle class HostedCA(Certificate): - issuer = django.db.models.ForeignKey(ServerCA) - hosted = django.db.models.OneToOneField(ResourceHolderCA, related_name = "hosted_by") + issuer = django.db.models.ForeignKey(ServerCA) + hosted = django.db.models.OneToOneField(ResourceHolderCA, related_name = "hosted_by") - def avow(self): - self.certificate = self.issuer.certify( - subject_name = self.hosted.certificate.getSubject(), - subject_key = self.hosted.certificate.getPublicKey(), - validity_interval = ee_certificate_lifetime, - is_ca = True, - pathLenConstraint = 1) + def avow(self): + self.certificate = self.issuer.certify( + subject_name = self.hosted.certificate.getSubject(), + subject_key = self.hosted.certificate.getPublicKey(), + validity_interval = ee_certificate_lifetime, + is_ca = True, + pathLenConstraint = 1) - class Meta: - unique_together = ("issuer", "hosted") + class Meta: + unique_together = ("issuer", "hosted") - def __unicode__(self): - return self.hosted.handle + def __unicode__(self): + return self.hosted.handle class Revocation(django.db.models.Model): - serial = django.db.models.BigIntegerField() - revoked = SundialField() - expires = SundialField() + serial = django.db.models.BigIntegerField() + revoked = SundialField() + expires = SundialField() - class Meta: - abstract = True - unique_together = ("issuer", "serial") + class Meta: + abstract = True + unique_together = ("issuer", "serial") class ServerRevocation(Revocation): - issuer = django.db.models.ForeignKey(ServerCA, related_name = "revocations") + issuer = django.db.models.ForeignKey(ServerCA, related_name = "revocations") class ResourceHolderRevocation(Revocation): - issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "revocations") + issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "revocations") class EECertificate(Certificate): - private_key = RSAPrivateKeyField() + private_key = RSAPrivateKeyField() - class Meta: - abstract = True + class Meta: + abstract = True - def avow(self): - if self.private_key is None: - self.private_key = rpki.x509.RSA.generate(quiet = True) - self.certificate = self.issuer.certify( - subject_name = self.subject_name, - subject_key = self.private_key.get_public(), - validity_interval = ee_certificate_lifetime, - is_ca = False) + def avow(self): + if self.private_key is None: + self.private_key = rpki.x509.RSA.generate(quiet = True) + self.certificate = self.issuer.certify( + subject_name = self.subject_name, + subject_key = self.private_key.get_public(), + validity_interval = ee_certificate_lifetime, + is_ca = False) class ServerEE(EECertificate): - issuer = django.db.models.ForeignKey(ServerCA, related_name = "ee_certificates") - purpose = EnumField(choices = ("rpkid", "pubd", "irdbd", "irbe")) + issuer = django.db.models.ForeignKey(ServerCA, related_name = "ee_certificates") + purpose = EnumField(choices = ("rpkid", "pubd", "irdbd", "irbe")) - class Meta: - unique_together = ("issuer", "purpose") + class Meta: + unique_together = ("issuer", "purpose") - @property - def subject_name(self): - return rpki.x509.X501DN.from_cn("%s BPKI %s EE" % (socket.gethostname(), - self.get_purpose_display())) + @property + def subject_name(self): + return rpki.x509.X501DN.from_cn("%s BPKI %s EE" % (socket.gethostname(), + self.get_purpose_display())) class Referral(EECertificate): - issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "referral_certificate") - objects = ResourceHolderEEManager() + issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "referral_certificate") + objects = ResourceHolderEEManager() - @property - def subject_name(self): - return rpki.x509.X501DN.from_cn("%s BPKI Referral EE" % self.issuer.handle) + @property + def subject_name(self): + return rpki.x509.X501DN.from_cn("%s BPKI Referral EE" % self.issuer.handle) class Turtle(django.db.models.Model): - service_uri = django.db.models.CharField(max_length = 255) + service_uri = django.db.models.CharField(max_length = 255) class Rootd(EECertificate, Turtle): - issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "rootd") - objects = ResourceHolderEEManager() + issuer = django.db.models.OneToOneField(ResourceHolderCA, related_name = "rootd") + objects = ResourceHolderEEManager() - @property - def subject_name(self): - return rpki.x509.X501DN.from_cn("%s BPKI rootd EE" % self.issuer.handle) + @property + def subject_name(self): + return rpki.x509.X501DN.from_cn("%s BPKI rootd EE" % self.issuer.handle) class BSC(Certificate): - issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "bscs") - handle = HandleField() - pkcs10 = PKCS10Field() + issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "bscs") + handle = HandleField() + pkcs10 = PKCS10Field() - def avow(self): - self.certificate = self.issuer.certify( - subject_name = self.pkcs10.getSubject(), - subject_key = self.pkcs10.getPublicKey(), - validity_interval = ee_certificate_lifetime, - is_ca = False) + def avow(self): + self.certificate = self.issuer.certify( + subject_name = self.pkcs10.getSubject(), + subject_key = self.pkcs10.getPublicKey(), + validity_interval = ee_certificate_lifetime, + is_ca = False) - def __unicode__(self): - return self.handle + def __unicode__(self): + return self.handle class ResourceSet(django.db.models.Model): - valid_until = SundialField() + valid_until = SundialField() - class Meta: - abstract = True + class Meta: + abstract = True - @property - def resource_bag(self): - raw_asn, raw_net = self._select_resource_bag() - asns = rpki.resource_set.resource_set_as.from_django( - (a.start_as, a.end_as) for a in raw_asn) - ipv4 = rpki.resource_set.resource_set_ipv4.from_django( - (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv4") - ipv6 = rpki.resource_set.resource_set_ipv6.from_django( - (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv6") - return rpki.resource_set.resource_bag( - valid_until = self.valid_until, asn = asns, v4 = ipv4, v6 = ipv6) + @property + def resource_bag(self): + raw_asn, raw_net = self._select_resource_bag() + asns = rpki.resource_set.resource_set_as.from_django( + (a.start_as, a.end_as) for a in raw_asn) + ipv4 = rpki.resource_set.resource_set_ipv4.from_django( + (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv4") + ipv6 = rpki.resource_set.resource_set_ipv6.from_django( + (a.start_ip, a.end_ip) for a in raw_net if a.version == "IPv6") + return rpki.resource_set.resource_bag( + valid_until = self.valid_until, asn = asns, v4 = ipv4, v6 = ipv6) - # Writing of .setter method deferred until something needs it. + # Writing of .setter method deferred until something needs it. class ResourceSetASN(django.db.models.Model): - start_as = django.db.models.BigIntegerField() - end_as = django.db.models.BigIntegerField() + start_as = django.db.models.BigIntegerField() + end_as = django.db.models.BigIntegerField() - class Meta: - abstract = True + class Meta: + abstract = True - def as_resource_range(self): - return rpki.resource_set.resource_range_as(self.start_as, self.end_as) + def as_resource_range(self): + return rpki.resource_set.resource_range_as(self.start_as, self.end_as) class ResourceSetNet(django.db.models.Model): - start_ip = django.db.models.CharField(max_length = 40) - end_ip = django.db.models.CharField(max_length = 40) - version = EnumField(choices = ip_version_choices) + start_ip = django.db.models.CharField(max_length = 40) + end_ip = django.db.models.CharField(max_length = 40) + version = EnumField(choices = ip_version_choices) - class Meta: - abstract = True + class Meta: + abstract = True - def as_resource_range(self): - return rpki.resource_set.resource_range_ip.from_strings(self.start_ip, self.end_ip) + def as_resource_range(self): + return rpki.resource_set.resource_range_ip.from_strings(self.start_ip, self.end_ip) class Child(CrossCertification, ResourceSet): - issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "children") - name = django.db.models.TextField(null = True, blank = True) - - def _select_resource_bag(self): - child_asn = rpki.irdb.models.ChildASN.objects.raw(""" - SELECT * - FROM irdb_childasn - WHERE child_id = %s - """, [self.id]) - child_net = list(rpki.irdb.models.ChildNet.objects.raw(""" - SELECT * - FROM irdb_childnet - WHERE child_id = %s - """, [self.id])) - return child_asn, child_net - - class Meta: - unique_together = ("issuer", "handle") + issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "children") + name = django.db.models.TextField(null = True, blank = True) + + def _select_resource_bag(self): + child_asn = rpki.irdb.models.ChildASN.objects.raw(""" + SELECT * + FROM irdb_childasn + WHERE child_id = %s + """, [self.id]) + child_net = list(rpki.irdb.models.ChildNet.objects.raw(""" + SELECT * + FROM irdb_childnet + WHERE child_id = %s + """, [self.id])) + return child_asn, child_net + + class Meta: + unique_together = ("issuer", "handle") class ChildASN(ResourceSetASN): - child = django.db.models.ForeignKey(Child, related_name = "asns") + child = django.db.models.ForeignKey(Child, related_name = "asns") - class Meta: - unique_together = ("child", "start_as", "end_as") + class Meta: + unique_together = ("child", "start_as", "end_as") class ChildNet(ResourceSetNet): - child = django.db.models.ForeignKey(Child, related_name = "address_ranges") + child = django.db.models.ForeignKey(Child, related_name = "address_ranges") - class Meta: - unique_together = ("child", "start_ip", "end_ip", "version") + class Meta: + unique_together = ("child", "start_ip", "end_ip", "version") class Parent(CrossCertification, Turtle): - issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "parents") - parent_handle = HandleField() - child_handle = HandleField() - repository_type = EnumField(choices = ("none", "offer", "referral")) - referrer = HandleField(null = True, blank = True) - referral_authorization = SignedReferralField(null = True, blank = True) + issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "parents") + parent_handle = HandleField() + child_handle = HandleField() + repository_type = EnumField(choices = ("none", "offer", "referral")) + referrer = HandleField(null = True, blank = True) + referral_authorization = SignedReferralField(null = True, blank = True) - # This shouldn't be necessary - class Meta: - unique_together = ("issuer", "handle") + # This shouldn't be necessary + class Meta: + unique_together = ("issuer", "handle") class ROARequest(django.db.models.Model): - issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "roa_requests") - asn = django.db.models.BigIntegerField() - - @property - def roa_prefix_bag(self): - prefixes = list(rpki.irdb.models.ROARequestPrefix.objects.raw(""" - SELECT * - FROM irdb_roarequestprefix - WHERE roa_request_id = %s - """, [self.id])) - v4 = rpki.resource_set.roa_prefix_set_ipv4.from_django( - (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv4") - v6 = rpki.resource_set.roa_prefix_set_ipv6.from_django( - (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv6") - return rpki.resource_set.roa_prefix_bag(v4 = v4, v6 = v6) - - # Writing of .setter method deferred until something needs it. + issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "roa_requests") + asn = django.db.models.BigIntegerField() + + @property + def roa_prefix_bag(self): + prefixes = list(rpki.irdb.models.ROARequestPrefix.objects.raw(""" + SELECT * + FROM irdb_roarequestprefix + WHERE roa_request_id = %s + """, [self.id])) + v4 = rpki.resource_set.roa_prefix_set_ipv4.from_django( + (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv4") + v6 = rpki.resource_set.roa_prefix_set_ipv6.from_django( + (p.prefix, p.prefixlen, p.max_prefixlen) for p in prefixes if p.version == "IPv6") + return rpki.resource_set.roa_prefix_bag(v4 = v4, v6 = v6) + + # Writing of .setter method deferred until something needs it. class ROARequestPrefix(django.db.models.Model): - roa_request = django.db.models.ForeignKey(ROARequest, related_name = "prefixes") - version = EnumField(choices = ip_version_choices) - prefix = django.db.models.CharField(max_length = 40) - prefixlen = django.db.models.PositiveSmallIntegerField() - max_prefixlen = django.db.models.PositiveSmallIntegerField() + roa_request = django.db.models.ForeignKey(ROARequest, related_name = "prefixes") + version = EnumField(choices = ip_version_choices) + prefix = django.db.models.CharField(max_length = 40) + prefixlen = django.db.models.PositiveSmallIntegerField() + max_prefixlen = django.db.models.PositiveSmallIntegerField() - def as_roa_prefix(self): - if self.version == 'IPv4': - return rpki.resource_set.roa_prefix_ipv4(rpki.POW.IPAddress(self.prefix), self.prefixlen, self.max_prefixlen) - else: - return rpki.resource_set.roa_prefix_ipv6(rpki.POW.IPAddress(self.prefix), self.prefixlen, self.max_prefixlen) + def as_roa_prefix(self): + if self.version == 'IPv4': + return rpki.resource_set.roa_prefix_ipv4(rpki.POW.IPAddress(self.prefix), self.prefixlen, self.max_prefixlen) + else: + return rpki.resource_set.roa_prefix_ipv6(rpki.POW.IPAddress(self.prefix), self.prefixlen, self.max_prefixlen) - def as_resource_range(self): - return self.as_roa_prefix().to_resource_range() + def as_resource_range(self): + return self.as_roa_prefix().to_resource_range() - class Meta: - unique_together = ("roa_request", "version", "prefix", "prefixlen", "max_prefixlen") + class Meta: + unique_together = ("roa_request", "version", "prefix", "prefixlen", "max_prefixlen") class GhostbusterRequest(django.db.models.Model): - issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ghostbuster_requests") - parent = django.db.models.ForeignKey(Parent, related_name = "ghostbuster_requests", null = True) - vcard = django.db.models.TextField() + issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ghostbuster_requests") + parent = django.db.models.ForeignKey(Parent, related_name = "ghostbuster_requests", null = True) + vcard = django.db.models.TextField() class EECertificateRequest(ResourceSet): - issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ee_certificate_requests") - pkcs10 = PKCS10Field() - gski = django.db.models.CharField(max_length = 27) - cn = django.db.models.CharField(max_length = 64) - sn = django.db.models.CharField(max_length = 64) - eku = django.db.models.TextField(null = True) - - def _select_resource_bag(self): - ee_asn = rpki.irdb.models.EECertificateRequestASN.objects.raw(""" - SELECT * - FROM irdb_eecertificaterequestasn - WHERE ee_certificate_request_id = %s - """, [self.id]) - ee_net = rpki.irdb.models.EECertificateRequestNet.objects.raw(""" - SELECT * - FROM irdb_eecertificaterequestnet - WHERE ee_certificate_request_id = %s - """, [self.id]) - return ee_asn, ee_net - - class Meta: - unique_together = ("issuer", "gski") + issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "ee_certificate_requests") + pkcs10 = PKCS10Field() + gski = django.db.models.CharField(max_length = 27) + cn = django.db.models.CharField(max_length = 64) + sn = django.db.models.CharField(max_length = 64) + eku = django.db.models.TextField(null = True) + + def _select_resource_bag(self): + ee_asn = rpki.irdb.models.EECertificateRequestASN.objects.raw(""" + SELECT * + FROM irdb_eecertificaterequestasn + WHERE ee_certificate_request_id = %s + """, [self.id]) + ee_net = rpki.irdb.models.EECertificateRequestNet.objects.raw(""" + SELECT * + FROM irdb_eecertificaterequestnet + WHERE ee_certificate_request_id = %s + """, [self.id]) + return ee_asn, ee_net + + class Meta: + unique_together = ("issuer", "gski") class EECertificateRequestASN(ResourceSetASN): - ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "asns") + ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "asns") - class Meta: - unique_together = ("ee_certificate_request", "start_as", "end_as") + class Meta: + unique_together = ("ee_certificate_request", "start_as", "end_as") class EECertificateRequestNet(ResourceSetNet): - ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "address_ranges") + ee_certificate_request = django.db.models.ForeignKey(EECertificateRequest, related_name = "address_ranges") - class Meta: - unique_together = ("ee_certificate_request", "start_ip", "end_ip", "version") + class Meta: + unique_together = ("ee_certificate_request", "start_ip", "end_ip", "version") class Repository(CrossCertification): - issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "repositories") - client_handle = HandleField() - service_uri = django.db.models.CharField(max_length = 255) - sia_base = django.db.models.TextField() - rrdp_notification_uri = django.db.models.TextField(null = True) - turtle = django.db.models.OneToOneField(Turtle, related_name = "repository") + issuer = django.db.models.ForeignKey(ResourceHolderCA, related_name = "repositories") + client_handle = HandleField() + service_uri = django.db.models.CharField(max_length = 255) + sia_base = django.db.models.TextField() + rrdp_notification_uri = django.db.models.TextField(null = True) + turtle = django.db.models.OneToOneField(Turtle, related_name = "repository") - # This shouldn't be necessary - class Meta: - unique_together = ("issuer", "handle") + # This shouldn't be necessary + class Meta: + unique_together = ("issuer", "handle") class Client(CrossCertification): - issuer = django.db.models.ForeignKey(ServerCA, related_name = "clients") - sia_base = django.db.models.TextField() + issuer = django.db.models.ForeignKey(ServerCA, related_name = "clients") + sia_base = django.db.models.TextField() - # This shouldn't be necessary - class Meta: - unique_together = ("issuer", "handle") + # This shouldn't be necessary + class Meta: + unique_together = ("issuer", "handle") diff --git a/rpki/irdb/router.py b/rpki/irdb/router.py index 0aaf53ce..3cbd52f9 100644 --- a/rpki/irdb/router.py +++ b/rpki/irdb/router.py @@ -27,69 +27,69 @@ accomplishes this. """ class DBContextRouter(object): - """ - A Django database router for use with multiple IRDBs. - - This router is designed to work in conjunction with the - rpki.irdb.database context handler (q.v.). - """ - - _app = "irdb" - - _database = None - - def db_for_read(self, model, **hints): - if model._meta.app_label == self._app: - return self._database - else: - return None - - def db_for_write(self, model, **hints): - if model._meta.app_label == self._app: - return self._database - else: - return None - - def allow_relation(self, obj1, obj2, **hints): - if self._database is None: - return None - elif obj1._meta.app_label == self._app and obj2._meta.app_label == self._app: - return True - else: - return None - - def allow_migrate(self, db, model): - if db == self._database and model._meta.app_label == self._app: - return True - else: - return None + """ + A Django database router for use with multiple IRDBs. + + This router is designed to work in conjunction with the + rpki.irdb.database context handler (q.v.). + """ + + _app = "irdb" + + _database = None + + def db_for_read(self, model, **hints): + if model._meta.app_label == self._app: + return self._database + else: + return None + + def db_for_write(self, model, **hints): + if model._meta.app_label == self._app: + return self._database + else: + return None + + def allow_relation(self, obj1, obj2, **hints): + if self._database is None: + return None + elif obj1._meta.app_label == self._app and obj2._meta.app_label == self._app: + return True + else: + return None + + def allow_migrate(self, db, model): + if db == self._database and model._meta.app_label == self._app: + return True + else: + return None class database(object): - """ - Context manager for use with DBContextRouter. Use thusly: - - with rpki.irdb.database("blarg"): - do_stuff() - - This binds IRDB operations to database blarg for the duration of - the call to do_stuff(), then restores the prior state. - """ - - def __init__(self, name, on_entry = None, on_exit = None): - if not isinstance(name, str): - raise ValueError("database name must be a string, not %r" % name) - self.name = name - self.on_entry = on_entry - self.on_exit = on_exit - - def __enter__(self): - if self.on_entry is not None: - self.on_entry() - self.former = DBContextRouter._database - DBContextRouter._database = self.name - - def __exit__(self, _type, value, traceback): - assert DBContextRouter._database is self.name - DBContextRouter._database = self.former - if self.on_exit is not None: - self.on_exit() + """ + Context manager for use with DBContextRouter. Use thusly: + + with rpki.irdb.database("blarg"): + do_stuff() + + This binds IRDB operations to database blarg for the duration of + the call to do_stuff(), then restores the prior state. + """ + + def __init__(self, name, on_entry = None, on_exit = None): + if not isinstance(name, str): + raise ValueError("database name must be a string, not %r" % name) + self.name = name + self.on_entry = on_entry + self.on_exit = on_exit + + def __enter__(self): + if self.on_entry is not None: + self.on_entry() + self.former = DBContextRouter._database + DBContextRouter._database = self.name + + def __exit__(self, _type, value, traceback): + assert DBContextRouter._database is self.name + DBContextRouter._database = self.former + if self.on_exit is not None: + self.on_exit() diff --git a/rpki/irdb/zookeeper.py b/rpki/irdb/zookeeper.py index 7202f421..a65f1f5f 100644 --- a/rpki/irdb/zookeeper.py +++ b/rpki/irdb/zookeeper.py @@ -96,1651 +96,1654 @@ class CouldntFindRepoParent(Exception): "Couldn't find repository's parent." def B64Element(e, tag, obj, **kwargs): - """ - Create an XML element containing Base64 encoded data taken from a - DER object. - """ - - if e is None: - se = Element(tag, **kwargs) - else: - se = SubElement(e, tag, **kwargs) - if e is not None and e.text is None: - e.text = "\n" - se.text = "\n" + obj.get_Base64() - se.tail = "\n" - return se - -class PEM_writer(object): - """ - Write PEM files to disk, keeping track of which ones we've already - written and setting the file mode appropriately. - - Comparing the old file with what we're about to write serves no real - purpose except to calm users who find repeated messages about - writing the same file confusing. - """ - - def __init__(self, logstream = None): - self.wrote = set() - self.logstream = logstream - - def __call__(self, filename, obj, compare = True): - filename = os.path.realpath(filename) - if filename in self.wrote: - return - tempname = filename - pem = obj.get_PEM() - if not filename.startswith("/dev/"): - try: - if compare and pem == open(filename, "r").read(): - return - except: # pylint: disable=W0702 - pass - tempname += ".%s.tmp" % os.getpid() - mode = 0400 if filename.endswith(".key") else 0444 - if self.logstream is not None: - self.logstream.write("Writing %s\n" % filename) - f = os.fdopen(os.open(tempname, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, mode), "w") - f.write(pem) - f.close() - if tempname != filename: - os.rename(tempname, filename) - self.wrote.add(filename) - - -def etree_read(filename_or_etree_wrapper, schema = rpki.relaxng.oob_setup): - """ - Read an etree from a file, verifying then stripping XML namespace - cruft. As a convenience, we also accept an etree_wrapper object in - place of a filename, in which case we deepcopy the etree directly - from the etree_wrapper and there's no need for a file. - """ - - if isinstance(filename_or_etree_wrapper, etree_wrapper): - e = copy.deepcopy(filename_or_etree_wrapper.etree) - else: - e = ElementTree(file = filename_or_etree_wrapper).getroot() - schema.assertValid(e) - return e - - -class etree_wrapper(object): - """ - Wrapper for ETree objects so we can return them as function results - without requiring the caller to understand much about them. - """ - - def __init__(self, e, msg = None, debug = False, schema = rpki.relaxng.oob_setup): - self.msg = msg - e = copy.deepcopy(e) - if debug: - print ElementToString(e) - schema.assertValid(e) - self.etree = e - - def __str__(self): - return ElementToString(self.etree) - - def save(self, filename, logstream = None): - filename = os.path.realpath(filename) - tempname = filename - if not filename.startswith("/dev/"): - tempname += ".%s.tmp" % os.getpid() - ElementTree(self.etree).write(tempname) - if tempname != filename: - os.rename(tempname, filename) - if logstream is not None: - logstream.write("Wrote %s\n" % filename) - if self.msg is not None: - logstream.write(self.msg + "\n") - - @property - def file(self): - from cStringIO import StringIO - return StringIO(ElementToString(self.etree)) - - -class Zookeeper(object): - - ## @var show_xml - # If not None, a file-like object to which to prettyprint XML, for debugging. - - show_xml = None - - def __init__(self, cfg = None, handle = None, logstream = None, disable_signal_handlers = False): - - if cfg is None: - cfg = rpki.config.parser() - - if handle is None: - handle = cfg.get("handle", section = myrpki_section) - - self.cfg = cfg - - self.logstream = logstream - self.disable_signal_handlers = disable_signal_handlers - - self.run_rpkid = cfg.getboolean("run_rpkid", section = myrpki_section) - self.run_pubd = cfg.getboolean("run_pubd", section = myrpki_section) - self.run_rootd = cfg.getboolean("run_rootd", section = myrpki_section) - - if self.run_rootd and (not self.run_pubd or not self.run_rpkid): - raise CantRunRootd("Can't run rootd unless also running rpkid and pubd") - - self.default_repository = cfg.get("default_repository", "", section = myrpki_section) - self.pubd_contact_info = cfg.get("pubd_contact_info", "", section = myrpki_section) - - self.rsync_module = cfg.get("publication_rsync_module", section = myrpki_section) - self.rsync_server = cfg.get("publication_rsync_server", section = myrpki_section) - - self.reset_identity(handle) - - - def reset_identity(self, handle): - """ - Select handle of current resource holding entity. - """ - - if handle is None: - raise MissingHandle - self.handle = handle - - - def set_logstream(self, logstream): - """ - Set log stream for this Zookeeper. The log stream is a file-like - object, or None to suppress all logging. - """ - - self.logstream = logstream - - - def log(self, msg): - """ - Send some text to this Zookeeper's log stream, if one is set. - """ - - if self.logstream is not None: - self.logstream.write(msg) - self.logstream.write("\n") - - - @property - def resource_ca(self): - """ - Get ResourceHolderCA object associated with current handle. - """ - - if self.handle is None: - raise HandleNotSet - return rpki.irdb.models.ResourceHolderCA.objects.get(handle = self.handle) - - - @property - def server_ca(self): - """ - Get ServerCA object. - """ - - return rpki.irdb.models.ServerCA.objects.get() - - - @django.db.transaction.atomic - def initialize_server_bpki(self): """ - Initialize server BPKI portion of an RPKI installation. Reads the - configuration file and generates the initial BPKI server - certificates needed to start daemons. + Create an XML element containing Base64 encoded data taken from a + DER object. """ - if self.run_rpkid or self.run_pubd: - server_ca, created = rpki.irdb.models.ServerCA.objects.get_or_certify() - rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irbe") - - if self.run_rpkid: - rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "rpkid") - rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irdbd") - - if self.run_pubd: - rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "pubd") - - - @django.db.transaction.atomic - def initialize_resource_bpki(self): - """ - Initialize the resource-holding BPKI for an RPKI installation. - Returns XML describing the resource holder. - - This method is present primarily for backwards compatibility with - the old combined initialize() method which initialized both the - server BPKI and the default resource-holding BPKI in a single - method call. In the long run we want to replace this with - something that takes a handle as argument and creates the - resource-holding BPKI idenity if needed. - """ - - resource_ca, created = rpki.irdb.models.ResourceHolderCA.objects.get_or_certify(handle = self.handle) - return self.generate_identity() - - - def initialize(self): - """ - Backwards compatibility wrapper: calls initialize_server_bpki() - and initialize_resource_bpki(), returns latter's result. - """ - - self.initialize_server_bpki() - return self.initialize_resource_bpki() - - - def generate_identity(self): - """ - Generate identity XML. Broken out of .initialize() because it's - easier for the GUI this way. - """ - - e = Element(tag_oob_child_request, nsmap = oob_nsmap, version = oob_version, - child_handle = self.handle) - B64Element(e, tag_oob_child_bpki_ta, self.resource_ca.certificate) - return etree_wrapper(e, msg = 'This is the "identity" file you will need to send to your parent') - - - @django.db.transaction.atomic - def delete_tenant(self): - """ - Delete the ResourceHolderCA object corresponding to the current handle. - This corresponds to deleting an rpkid object. - - This code assumes the normal Django cascade-on-delete behavior, - that is, we assume that deleting the ResourceHolderCA object - deletes all the subordinate objects that refer to it via foreign - key relationships. - """ - - resource_ca = self.resource_ca - if resource_ca is not None: - resource_ca.delete() + if e is None: + se = Element(tag, **kwargs) else: - self.log("No such ResourceHolderCA \"%s\"" % self.handle) - - - @django.db.transaction.atomic - def configure_rootd(self): - - assert self.run_rpkid and self.run_pubd and self.run_rootd - - rpki.irdb.models.Rootd.objects.get_or_certify( - issuer = self.resource_ca, - service_uri = "http://localhost:%s/" % self.cfg.get("rootd_server_port", - section = myrpki_section)) - - return self.generate_rootd_repository_offer() - - - def generate_rootd_repository_offer(self): - """ - Generate repository offer for rootd. Split out of - configure_rootd() because that's easier for the GUI. - """ - - try: - self.resource_ca.repositories.get(handle = self.handle) - return None - - except rpki.irdb.models.Repository.DoesNotExist: - e = Element(tag_oob_publisher_request, nsmap = oob_nsmap, version = oob_version, - publisher_handle = self.handle) - B64Element(e, tag_oob_publisher_bpki_ta, self.resource_ca.certificate) - return etree_wrapper(e, msg = 'This is the "repository offer" file for you to use if you want to publish in your own repository') - - - def write_bpki_files(self): - """ - Write out BPKI certificate, key, and CRL files for daemons that - need them. - """ + se = SubElement(e, tag, **kwargs) + if e is not None and e.text is None: + e.text = "\n" + se.text = "\n" + obj.get_Base64() + se.tail = "\n" + return se - writer = PEM_writer(self.logstream) - - if self.run_rpkid: - rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid") - writer(self.cfg.get("bpki-ta", section = rpkid_section), self.server_ca.certificate) - writer(self.cfg.get("rpkid-key", section = rpkid_section), rpkid.private_key) - writer(self.cfg.get("rpkid-cert", section = rpkid_section), rpkid.certificate) - writer(self.cfg.get("irdb-cert", section = rpkid_section), - self.server_ca.ee_certificates.get(purpose = "irdbd").certificate) - writer(self.cfg.get("irbe-cert", section = rpkid_section), - self.server_ca.ee_certificates.get(purpose = "irbe").certificate) - - if self.run_pubd: - pubd = self.server_ca.ee_certificates.get(purpose = "pubd") - writer(self.cfg.get("bpki-ta", section = pubd_section), self.server_ca.certificate) - writer(self.cfg.get("pubd-key", section = pubd_section), pubd.private_key) - writer(self.cfg.get("pubd-cert", section = pubd_section), pubd.certificate) - writer(self.cfg.get("irbe-cert", section = pubd_section), - self.server_ca.ee_certificates.get(purpose = "irbe").certificate) - - if self.run_rootd: - try: - rootd = rpki.irdb.models.ResourceHolderCA.objects.get(handle = self.handle).rootd - writer(self.cfg.get("bpki-ta", section = rootd_section), self.server_ca.certificate) - writer(self.cfg.get("rootd-bpki-crl", section = rootd_section), self.server_ca.latest_crl) - writer(self.cfg.get("rootd-bpki-key", section = rootd_section), rootd.private_key) - writer(self.cfg.get("rootd-bpki-cert", section = rootd_section), rootd.certificate) - writer(self.cfg.get("child-bpki-cert", section = rootd_section), rootd.issuer.certificate) - except rpki.irdb.models.ResourceHolderCA.DoesNotExist: - self.log("rootd enabled but resource holding entity not yet configured, skipping rootd setup") - except rpki.irdb.models.Rootd.DoesNotExist: - self.log("rootd enabled but not yet configured, skipping rootd setup") - - - @django.db.transaction.atomic - def update_bpki(self): +class PEM_writer(object): """ - Update BPKI certificates. Assumes an existing RPKI installation. - - Basic plan here is to reissue all BPKI certificates we can, right - now. In the long run we might want to be more clever about only - touching ones that need maintenance, but this will do for a start. + Write PEM files to disk, keeping track of which ones we've already + written and setting the file mode appropriately. + + Comparing the old file with what we're about to write serves no real + purpose except to calm users who find repeated messages about + writing the same file confusing. + """ + + def __init__(self, logstream = None): + self.wrote = set() + self.logstream = logstream + + def __call__(self, filename, obj, compare = True): + filename = os.path.realpath(filename) + if filename in self.wrote: + return + tempname = filename + pem = obj.get_PEM() + if not filename.startswith("/dev/"): + try: + if compare and pem == open(filename, "r").read(): + return + except: # pylint: disable=W0702 + pass + tempname += ".%s.tmp" % os.getpid() + mode = 0400 if filename.endswith(".key") else 0444 + if self.logstream is not None: + self.logstream.write("Writing %s\n" % filename) + f = os.fdopen(os.open(tempname, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, mode), "w") + f.write(pem) + f.close() + if tempname != filename: + os.rename(tempname, filename) + self.wrote.add(filename) - We also reissue CRLs for all CAs. - - Most likely this should be run under cron. - """ - for model in (rpki.irdb.models.ServerCA, - rpki.irdb.models.ResourceHolderCA, - rpki.irdb.models.ServerEE, - rpki.irdb.models.Referral, - rpki.irdb.models.Rootd, - rpki.irdb.models.HostedCA, - rpki.irdb.models.BSC, - rpki.irdb.models.Child, - rpki.irdb.models.Parent, - rpki.irdb.models.Client, - rpki.irdb.models.Repository): - for obj in model.objects.all(): - self.log("Regenerating BPKI certificate %s" % obj.certificate.getSubject()) - obj.avow() - obj.save() - - self.log("Regenerating Server BPKI CRL") - self.server_ca.generate_crl() - self.server_ca.save() - - for ca in rpki.irdb.models.ResourceHolderCA.objects.all(): - self.log("Regenerating BPKI CRL for Resource Holder %s" % ca.handle) - ca.generate_crl() - ca.save() - - - @staticmethod - def _compose_left_right_query(): +def etree_read(filename_or_etree_wrapper, schema = rpki.relaxng.oob_setup): """ - Compose top level element of a left-right query. + Read an etree from a file, verifying then stripping XML namespace + cruft. As a convenience, we also accept an etree_wrapper object in + place of a filename, in which case we deepcopy the etree directly + from the etree_wrapper and there's no need for a file. """ - return Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap, - type = "query", version = rpki.left_right.version) + if isinstance(filename_or_etree_wrapper, etree_wrapper): + e = copy.deepcopy(filename_or_etree_wrapper.etree) + else: + e = ElementTree(file = filename_or_etree_wrapper).getroot() + schema.assertValid(e) + return e - @staticmethod - def _compose_publication_control_query(): +class etree_wrapper(object): """ - Compose top level element of a publication-control query. + Wrapper for ETree objects so we can return them as function results + without requiring the caller to understand much about them. """ - return Element(rpki.publication_control.tag_msg, nsmap = rpki.publication_control.nsmap, - type = "query", version = rpki.publication_control.version) + def __init__(self, e, msg = None, debug = False, schema = rpki.relaxng.oob_setup): + self.msg = msg + e = copy.deepcopy(e) + if debug: + print ElementToString(e) + schema.assertValid(e) + self.etree = e + def __str__(self): + return ElementToString(self.etree) - @django.db.transaction.atomic - def synchronize_bpki(self): - """ - Synchronize BPKI updates. This is separate from .update_bpki() - because this requires rpkid to be running and none of the other - BPKI update stuff does; there may be circumstances under which it - makes sense to do the rest of the BPKI update and allow this to - fail with a warning. - """ + def save(self, filename, logstream = None): + filename = os.path.realpath(filename) + tempname = filename + if not filename.startswith("/dev/"): + tempname += ".%s.tmp" % os.getpid() + ElementTree(self.etree).write(tempname) + if tempname != filename: + os.rename(tempname, filename) + if logstream is not None: + logstream.write("Wrote %s\n" % filename) + if self.msg is not None: + logstream.write(self.msg + "\n") - if self.run_rpkid: - q_msg = self._compose_left_right_query() - - for ca in rpki.irdb.models.ResourceHolderCA.objects.all(): - q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant, - action = "set", - tag = "%s__tenant" % ca.handle, - tenant_handle = ca.handle) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.certificate.get_Base64() - - for bsc in rpki.irdb.models.BSC.objects.all(): - q_pdu = SubElement(q_msg, rpki.left_right.tag_bsc, - action = "set", - tag = "%s__bsc__%s" % (bsc.issuer.handle, bsc.handle), - tenant_handle = bsc.issuer.handle, - bsc_handle = bsc.handle) - SubElement(q_pdu, rpki.left_right.tag_signing_cert).text = bsc.certificate.get_Base64() - SubElement(q_pdu, rpki.left_right.tag_signing_cert_crl).text = bsc.issuer.latest_crl.get_Base64() - - for repository in rpki.irdb.models.Repository.objects.all(): - q_pdu = SubElement(q_msg, rpki.left_right.tag_repository, - action = "set", - tag = "%s__repository__%s" % (repository.issuer.handle, repository.handle), - tenant_handle = repository.issuer.handle, - repository_handle = repository.handle) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = repository.certificate.get_Base64() - - for parent in rpki.irdb.models.Parent.objects.all(): - q_pdu = SubElement(q_msg, rpki.left_right.tag_parent, - action = "set", - tag = "%s__parent__%s" % (parent.issuer.handle, parent.handle), - tenant_handle = parent.issuer.handle, - parent_handle = parent.handle) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = parent.certificate.get_Base64() - - for rootd in rpki.irdb.models.Rootd.objects.all(): - q_pdu = SubElement(q_msg, rpki.left_right.tag_parent, - action = "set", - tag = "%s__rootd" % rootd.issuer.handle, - tenant_handle = rootd.issuer.handle, - parent_handle = rootd.issuer.handle) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = rootd.certificate.get_Base64() - - for child in rpki.irdb.models.Child.objects.all(): - q_pdu = SubElement(q_msg, rpki.left_right.tag_child, - action = "set", - tag = "%s__child__%s" % (child.issuer.handle, child.handle), - tenant_handle = child.issuer.handle, - child_handle = child.handle) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = child.certificate.get_Base64() - - if len(q_msg) > 0: - self.call_rpkid(q_msg) + @property + def file(self): + from cStringIO import StringIO + return StringIO(ElementToString(self.etree)) - if self.run_pubd: - q_msg = self._compose_publication_control_query() - for client in self.server_ca.clients.all(): - q_pdu = SubElement(q_msg, rpki.publication_control.tag_client, action = "set", client_handle = client.handle) - SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = client.certificate.get_Base64() +class Zookeeper(object): - if len(q_msg) > 0: - self.call_pubd(q_msg) + ## @var show_xml + # If not None, a file-like object to which to prettyprint XML, for debugging. + show_xml = None - @django.db.transaction.atomic - def configure_child(self, filename, child_handle = None, valid_until = None): - """ - Configure a new child of this RPKI entity, given the child's XML - identity file as an input. Extracts the child's data from the - XML, cross-certifies the child's resource-holding BPKI - certificate, and generates an XML file describing the relationship - between the child and this parent, including this parent's BPKI - data and up-down protocol service URI. - """ + def __init__(self, cfg = None, handle = None, logstream = None, disable_signal_handlers = False): - x = etree_read(filename) + if cfg is None: + cfg = rpki.config.parser() - if x.tag != tag_oob_child_request: - raise BadXMLMessage("Expected %s, got %s", tag_oob_child_request, x.tag) + if handle is None: + handle = cfg.get("handle", section = myrpki_section) - if child_handle is None: - child_handle = x.get("child_handle") + self.cfg = cfg - if valid_until is None: - valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365) - else: - valid_until = rpki.sundial.datetime.fromXMLtime(valid_until) - if valid_until < rpki.sundial.now(): - raise PastExpiration("Specified new expiration time %s has passed" % valid_until) + self.logstream = logstream + self.disable_signal_handlers = disable_signal_handlers - self.log("Child calls itself %r, we call it %r" % (x.get("child_handle"), child_handle)) + self.run_rpkid = cfg.getboolean("run_rpkid", section = myrpki_section) + self.run_pubd = cfg.getboolean("run_pubd", section = myrpki_section) + self.run_rootd = cfg.getboolean("run_rootd", section = myrpki_section) - child, created = rpki.irdb.models.Child.objects.get_or_certify( - issuer = self.resource_ca, - handle = child_handle, - ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_child_bpki_ta)), - valid_until = valid_until) + if self.run_rootd and (not self.run_pubd or not self.run_rpkid): + raise CantRunRootd("Can't run rootd unless also running rpkid and pubd") - return self.generate_parental_response(child), child_handle + self.default_repository = cfg.get("default_repository", "", section = myrpki_section) + self.pubd_contact_info = cfg.get("pubd_contact_info", "", section = myrpki_section) + self.rsync_module = cfg.get("publication_rsync_module", section = myrpki_section) + self.rsync_server = cfg.get("publication_rsync_server", section = myrpki_section) - @django.db.transaction.atomic - def generate_parental_response(self, child): - """ - Generate parental response XML. Broken out of .configure_child() - for GUI. - """ + self.reset_identity(handle) - service_uri = "http://%s:%s/up-down/%s/%s" % ( - self.cfg.get("rpkid_server_host", section = myrpki_section), - self.cfg.get("rpkid_server_port", section = myrpki_section), - self.handle, child.handle) - e = Element(tag_oob_parent_response, nsmap = oob_nsmap, version = oob_version, - service_uri = service_uri, - child_handle = child.handle, - parent_handle = self.handle) - B64Element(e, tag_oob_parent_bpki_ta, self.resource_ca.certificate) + def reset_identity(self, handle): + """ + Select handle of current resource holding entity. + """ - try: - if self.default_repository: - repo = self.resource_ca.repositories.get(handle = self.default_repository) - else: - repo = self.resource_ca.repositories.get() - except rpki.irdb.models.Repository.DoesNotExist: - repo = None + if handle is None: + raise MissingHandle + self.handle = handle - if repo is None: - self.log("Couldn't find any usable repositories, not giving referral") - elif repo.handle == self.handle: - SubElement(e, tag_oob_offer) + def set_logstream(self, logstream): + """ + Set log stream for this Zookeeper. The log stream is a file-like + object, or None to suppress all logging. + """ - else: - proposed_sia_base = repo.sia_base + child.handle + "/" - referral_cert, created = rpki.irdb.models.Referral.objects.get_or_certify(issuer = self.resource_ca) - auth = rpki.x509.SignedReferral() - auth.set_content(B64Element(None, tag_oob_authorization, child.ta, - nsmap = oob_nsmap, version = oob_version, - authorized_sia_base = proposed_sia_base)) - auth.schema_check() - auth.sign(referral_cert.private_key, referral_cert.certificate, self.resource_ca.latest_crl) - B64Element(e, tag_oob_referral, auth, referrer = repo.client_handle) + self.logstream = logstream - return etree_wrapper(e, msg = "Send this file back to the child you just configured") + def log(self, msg): + """ + Send some text to this Zookeeper's log stream, if one is set. + """ - @django.db.transaction.atomic - def delete_child(self, child_handle): - """ - Delete a child of this RPKI entity. - """ - - self.resource_ca.children.get(handle = child_handle).delete() + if self.logstream is not None: + self.logstream.write(msg) + self.logstream.write("\n") - @django.db.transaction.atomic - def configure_parent(self, filename, parent_handle = None): - """ - Configure a new parent of this RPKI entity, given the output of - the parent's configure_child command as input. Reads the parent's - response XML, extracts the parent's BPKI and service URI - information, cross-certifies the parent's BPKI data into this - entity's BPKI, and checks for offers or referrals of publication - service. If a publication offer or referral is present, we - generate a request-for-service message to that repository, in case - the user wants to avail herself of the referral or offer. - """ + @property + def resource_ca(self): + """ + Get ResourceHolderCA object associated with current handle. + """ - x = etree_read(filename) + if self.handle is None: + raise HandleNotSet + return rpki.irdb.models.ResourceHolderCA.objects.get(handle = self.handle) - if x.tag != tag_oob_parent_response: - raise BadXMLMessage("Expected %s, got %s", tag_oob_parent_response, x.tag) - if parent_handle is None: - parent_handle = x.get("parent_handle") + @property + def server_ca(self): + """ + Get ServerCA object. + """ - offer = x.find(tag_oob_offer) - referral = x.find(tag_oob_referral) + return rpki.irdb.models.ServerCA.objects.get() - if offer is not None: - repository_type = "offer" - referrer = None - referral_authorization = None - elif referral is not None: - repository_type = "referral" - referrer = referral.get("referrer") - referral_authorization = rpki.x509.SignedReferral(Base64 = referral.text) + @django.db.transaction.atomic + def initialize_server_bpki(self): + """ + Initialize server BPKI portion of an RPKI installation. Reads the + configuration file and generates the initial BPKI server + certificates needed to start daemons. + """ - else: - repository_type = "none" - referrer = None - referral_authorization = None + if self.run_rpkid or self.run_pubd: + server_ca, created = rpki.irdb.models.ServerCA.objects.get_or_certify() + rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irbe") - self.log("Parent calls itself %r, we call it %r" % (x.get("parent_handle"), parent_handle)) - self.log("Parent calls us %r" % x.get("child_handle")) + if self.run_rpkid: + rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "rpkid") + rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "irdbd") - parent, created = rpki.irdb.models.Parent.objects.get_or_certify( - issuer = self.resource_ca, - handle = parent_handle, - child_handle = x.get("child_handle"), - parent_handle = x.get("parent_handle"), - service_uri = x.get("service_uri"), - ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_parent_bpki_ta)), - repository_type = repository_type, - referrer = referrer, - referral_authorization = referral_authorization) + if self.run_pubd: + rpki.irdb.models.ServerEE.objects.get_or_certify(issuer = server_ca, purpose = "pubd") - return self.generate_repository_request(parent), parent_handle + @django.db.transaction.atomic + def initialize_resource_bpki(self): + """ + Initialize the resource-holding BPKI for an RPKI installation. + Returns XML describing the resource holder. - def generate_repository_request(self, parent): - """ - Generate repository request for a given parent. - """ + This method is present primarily for backwards compatibility with + the old combined initialize() method which initialized both the + server BPKI and the default resource-holding BPKI in a single + method call. In the long run we want to replace this with + something that takes a handle as argument and creates the + resource-holding BPKI idenity if needed. + """ - e = Element(tag_oob_publisher_request, nsmap = oob_nsmap, version = oob_version, - publisher_handle = self.handle) - B64Element(e, tag_oob_publisher_bpki_ta, self.resource_ca.certificate) - if parent.repository_type == "referral": - B64Element(e, tag_oob_referral, parent.referral_authorization, - referrer = parent.referrer) + resource_ca, created = rpki.irdb.models.ResourceHolderCA.objects.get_or_certify(handle = self.handle) + return self.generate_identity() - return etree_wrapper(e, msg = "This is the file to send to the repository operator") + def initialize(self): + """ + Backwards compatibility wrapper: calls initialize_server_bpki() + and initialize_resource_bpki(), returns latter's result. + """ - @django.db.transaction.atomic - def delete_parent(self, parent_handle): - """ - Delete a parent of this RPKI entity. - """ + self.initialize_server_bpki() + return self.initialize_resource_bpki() - self.resource_ca.parents.get(handle = parent_handle).delete() + def generate_identity(self): + """ + Generate identity XML. Broken out of .initialize() because it's + easier for the GUI this way. + """ - @django.db.transaction.atomic - def delete_rootd(self): - """ - Delete rootd associated with this RPKI entity. - """ + e = Element(tag_oob_child_request, nsmap = oob_nsmap, version = oob_version, + child_handle = self.handle) + B64Element(e, tag_oob_child_bpki_ta, self.resource_ca.certificate) + return etree_wrapper(e, msg = 'This is the "identity" file you will need to send to your parent') - self.resource_ca.rootd.delete() + @django.db.transaction.atomic + def delete_tenant(self): + """ + Delete the ResourceHolderCA object corresponding to the current handle. + This corresponds to deleting an rpkid object. - @django.db.transaction.atomic - def configure_publication_client(self, filename, sia_base = None, flat = False): - """ - Configure publication server to know about a new client, given the - client's request-for-service message as input. Reads the client's - request for service, cross-certifies the client's BPKI data, and - generates a response message containing the repository's BPKI data - and service URI. - """ + This code assumes the normal Django cascade-on-delete behavior, + that is, we assume that deleting the ResourceHolderCA object + deletes all the subordinate objects that refer to it via foreign + key relationships. + """ - x = etree_read(filename) - - if x.tag != tag_oob_publisher_request: - raise BadXMLMessage("Expected %s, got %s", tag_oob_publisher_request, x.tag) - - client_ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_publisher_bpki_ta)) - - referral = x.find(tag_oob_referral) - - default_sia_base = "rsync://{self.rsync_server}/{self.rsync_module}/{handle}/".format( - self = self, handle = x.get("publisher_handle")) - - if sia_base is None and flat: - self.log("Flat publication structure forced, homing client at top-level") - sia_base = default_sia_base - - if sia_base is None and referral is not None: - self.log("This looks like a referral, checking") - try: - referrer = referral.get("referrer") - referrer = self.server_ca.clients.get(handle = referrer) - referral = rpki.x509.SignedReferral(Base64 = referral.text) - referral = referral.unwrap(ta = (referrer.certificate, self.server_ca.certificate)) - if rpki.x509.X509(Base64 = referral.text) != client_ta: - raise BadXMLMessage("Referral trust anchor does not match") - sia_base = referral.get("authorized_sia_base") - except rpki.irdb.models.Client.DoesNotExist: - self.log("We have no record of the client ({}) alleged to have made this referral".format(referrer)) - - if sia_base is None and referral is None: - self.log("This might be an offer, checking") - try: - parent = rpki.irdb.models.ResourceHolderCA.objects.get(children__ta = client_ta) - if "/" in parent.repositories.get(ta = self.server_ca.certificate).client_handle: - self.log("Client's parent is not top-level, this is not a valid offer") + resource_ca = self.resource_ca + if resource_ca is not None: + resource_ca.delete() else: - self.log("Found client and its parent, nesting") - sia_base = "rsync://{self.rsync_server}/{self.rsync_module}/{parent_handle}/{client_handle}/".format( - self = self, parent_handle = parent.handle, client_handle = x.get("publisher_handle")) - except rpki.irdb.models.Repository.DoesNotExist: - self.log("Found client's parent, but repository isn't set, this shouldn't happen!") - except rpki.irdb.models.ResourceHolderCA.DoesNotExist: - try: - rpki.irdb.models.Rootd.objects.get(issuer__certificate = client_ta) - self.log("This client's parent is rootd") - sia_base = default_sia_base - except rpki.irdb.models.Rootd.DoesNotExist: - self.log("We don't host this client's parent, so we didn't make an offer") - - if sia_base is None: - self.log("Don't know where else to nest this client, so defaulting to top-level") - sia_base = default_sia_base + self.log("No such ResourceHolderCA \"%s\"" % self.handle) - if not sia_base.startswith("rsync://"): - raise BadXMLMessage("Malformed sia_base parameter %r, should start with 'rsync://'" % sia_base) - client_handle = "/".join(sia_base.rstrip("/").split("/")[4:]) + @django.db.transaction.atomic + def configure_rootd(self): - self.log("Client calls itself %r, we call it %r" % ( - x.get("publisher_handle"), client_handle)) + assert self.run_rpkid and self.run_pubd and self.run_rootd - client, created = rpki.irdb.models.Client.objects.get_or_certify( - issuer = self.server_ca, - handle = client_handle, - ta = client_ta, - sia_base = sia_base) + rpki.irdb.models.Rootd.objects.get_or_certify( + issuer = self.resource_ca, + service_uri = "http://localhost:%s/" % self.cfg.get("rootd_server_port", + section = myrpki_section)) - return self.generate_repository_response(client), client_handle + return self.generate_rootd_repository_offer() - def generate_repository_response(self, client): - """ - Generate repository response XML to a given client. - """ - - service_uri = "http://{host}:{port}/client/{handle}".format( - host = self.cfg.get("pubd_server_host", section = myrpki_section), - port = self.cfg.get("pubd_server_port", section = myrpki_section), - handle = client.handle) - - rrdp_uri = self.cfg.get("publication_rrdp_notification_uri", section = myrpki_section, - default = "") or None - - e = Element(tag_oob_repository_response, nsmap = oob_nsmap, version = oob_version, - service_uri = service_uri, - publisher_handle = client.handle, - sia_base = client.sia_base) - - if rrdp_uri is not None: - e.set("rrdp_notification_uri", rrdp_uri) - - B64Element(e, tag_oob_repository_bpki_ta, self.server_ca.certificate) - return etree_wrapper(e, msg = "Send this file back to the publication client you just configured") + def generate_rootd_repository_offer(self): + """ + Generate repository offer for rootd. Split out of + configure_rootd() because that's easier for the GUI. + """ + try: + self.resource_ca.repositories.get(handle = self.handle) + return None - @django.db.transaction.atomic - def delete_publication_client(self, client_handle): - """ - Delete a publication client of this RPKI entity. - """ + except rpki.irdb.models.Repository.DoesNotExist: + e = Element(tag_oob_publisher_request, nsmap = oob_nsmap, version = oob_version, + publisher_handle = self.handle) + B64Element(e, tag_oob_publisher_bpki_ta, self.resource_ca.certificate) + return etree_wrapper(e, msg = 'This is the "repository offer" file for you to use if you want to publish in your own repository') + + + def write_bpki_files(self): + """ + Write out BPKI certificate, key, and CRL files for daemons that + need them. + """ + + writer = PEM_writer(self.logstream) + + if self.run_rpkid: + rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid") + writer(self.cfg.get("bpki-ta", section = rpkid_section), self.server_ca.certificate) + writer(self.cfg.get("rpkid-key", section = rpkid_section), rpkid.private_key) + writer(self.cfg.get("rpkid-cert", section = rpkid_section), rpkid.certificate) + writer(self.cfg.get("irdb-cert", section = rpkid_section), + self.server_ca.ee_certificates.get(purpose = "irdbd").certificate) + writer(self.cfg.get("irbe-cert", section = rpkid_section), + self.server_ca.ee_certificates.get(purpose = "irbe").certificate) + + if self.run_pubd: + pubd = self.server_ca.ee_certificates.get(purpose = "pubd") + writer(self.cfg.get("bpki-ta", section = pubd_section), self.server_ca.certificate) + writer(self.cfg.get("pubd-key", section = pubd_section), pubd.private_key) + writer(self.cfg.get("pubd-cert", section = pubd_section), pubd.certificate) + writer(self.cfg.get("irbe-cert", section = pubd_section), + self.server_ca.ee_certificates.get(purpose = "irbe").certificate) + + if self.run_rootd: + try: + rootd = rpki.irdb.models.ResourceHolderCA.objects.get(handle = self.handle).rootd + writer(self.cfg.get("bpki-ta", section = rootd_section), self.server_ca.certificate) + writer(self.cfg.get("rootd-bpki-crl", section = rootd_section), self.server_ca.latest_crl) + writer(self.cfg.get("rootd-bpki-key", section = rootd_section), rootd.private_key) + writer(self.cfg.get("rootd-bpki-cert", section = rootd_section), rootd.certificate) + writer(self.cfg.get("child-bpki-cert", section = rootd_section), rootd.issuer.certificate) + except rpki.irdb.models.ResourceHolderCA.DoesNotExist: + self.log("rootd enabled but resource holding entity not yet configured, skipping rootd setup") + except rpki.irdb.models.Rootd.DoesNotExist: + self.log("rootd enabled but not yet configured, skipping rootd setup") + + + @django.db.transaction.atomic + def update_bpki(self): + """ + Update BPKI certificates. Assumes an existing RPKI installation. + + Basic plan here is to reissue all BPKI certificates we can, right + now. In the long run we might want to be more clever about only + touching ones that need maintenance, but this will do for a start. + + We also reissue CRLs for all CAs. + + Most likely this should be run under cron. + """ + + for model in (rpki.irdb.models.ServerCA, + rpki.irdb.models.ResourceHolderCA, + rpki.irdb.models.ServerEE, + rpki.irdb.models.Referral, + rpki.irdb.models.Rootd, + rpki.irdb.models.HostedCA, + rpki.irdb.models.BSC, + rpki.irdb.models.Child, + rpki.irdb.models.Parent, + rpki.irdb.models.Client, + rpki.irdb.models.Repository): + for obj in model.objects.all(): + self.log("Regenerating BPKI certificate %s" % obj.certificate.getSubject()) + obj.avow() + obj.save() + + self.log("Regenerating Server BPKI CRL") + self.server_ca.generate_crl() + self.server_ca.save() + + for ca in rpki.irdb.models.ResourceHolderCA.objects.all(): + self.log("Regenerating BPKI CRL for Resource Holder %s" % ca.handle) + ca.generate_crl() + ca.save() + + + @staticmethod + def _compose_left_right_query(): + """ + Compose top level element of a left-right query. + """ + + return Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap, + type = "query", version = rpki.left_right.version) + + + @staticmethod + def _compose_publication_control_query(): + """ + Compose top level element of a publication-control query. + """ + + return Element(rpki.publication_control.tag_msg, nsmap = rpki.publication_control.nsmap, + type = "query", version = rpki.publication_control.version) + + + @django.db.transaction.atomic + def synchronize_bpki(self): + """ + Synchronize BPKI updates. This is separate from .update_bpki() + because this requires rpkid to be running and none of the other + BPKI update stuff does; there may be circumstances under which it + makes sense to do the rest of the BPKI update and allow this to + fail with a warning. + """ + + if self.run_rpkid: + q_msg = self._compose_left_right_query() + + for ca in rpki.irdb.models.ResourceHolderCA.objects.all(): + q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant, + action = "set", + tag = "%s__tenant" % ca.handle, + tenant_handle = ca.handle) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.certificate.get_Base64() + + for bsc in rpki.irdb.models.BSC.objects.all(): + q_pdu = SubElement(q_msg, rpki.left_right.tag_bsc, + action = "set", + tag = "%s__bsc__%s" % (bsc.issuer.handle, bsc.handle), + tenant_handle = bsc.issuer.handle, + bsc_handle = bsc.handle) + SubElement(q_pdu, rpki.left_right.tag_signing_cert).text = bsc.certificate.get_Base64() + SubElement(q_pdu, rpki.left_right.tag_signing_cert_crl).text = bsc.issuer.latest_crl.get_Base64() + + for repository in rpki.irdb.models.Repository.objects.all(): + q_pdu = SubElement(q_msg, rpki.left_right.tag_repository, + action = "set", + tag = "%s__repository__%s" % (repository.issuer.handle, repository.handle), + tenant_handle = repository.issuer.handle, + repository_handle = repository.handle) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = repository.certificate.get_Base64() + + for parent in rpki.irdb.models.Parent.objects.all(): + q_pdu = SubElement(q_msg, rpki.left_right.tag_parent, + action = "set", + tag = "%s__parent__%s" % (parent.issuer.handle, parent.handle), + tenant_handle = parent.issuer.handle, + parent_handle = parent.handle) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = parent.certificate.get_Base64() + + for rootd in rpki.irdb.models.Rootd.objects.all(): + q_pdu = SubElement(q_msg, rpki.left_right.tag_parent, + action = "set", + tag = "%s__rootd" % rootd.issuer.handle, + tenant_handle = rootd.issuer.handle, + parent_handle = rootd.issuer.handle) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = rootd.certificate.get_Base64() + + for child in rpki.irdb.models.Child.objects.all(): + q_pdu = SubElement(q_msg, rpki.left_right.tag_child, + action = "set", + tag = "%s__child__%s" % (child.issuer.handle, child.handle), + tenant_handle = child.issuer.handle, + child_handle = child.handle) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = child.certificate.get_Base64() + + if len(q_msg) > 0: + self.call_rpkid(q_msg) + + if self.run_pubd: + q_msg = self._compose_publication_control_query() + + for client in self.server_ca.clients.all(): + q_pdu = SubElement(q_msg, rpki.publication_control.tag_client, action = "set", client_handle = client.handle) + SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = client.certificate.get_Base64() + + if len(q_msg) > 0: + self.call_pubd(q_msg) + + + @django.db.transaction.atomic + def configure_child(self, filename, child_handle = None, valid_until = None): + """ + Configure a new child of this RPKI entity, given the child's XML + identity file as an input. Extracts the child's data from the + XML, cross-certifies the child's resource-holding BPKI + certificate, and generates an XML file describing the relationship + between the child and this parent, including this parent's BPKI + data and up-down protocol service URI. + """ + + x = etree_read(filename) + + if x.tag != tag_oob_child_request: + raise BadXMLMessage("Expected %s, got %s", tag_oob_child_request, x.tag) + + if child_handle is None: + child_handle = x.get("child_handle") + + if valid_until is None: + valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365) + else: + valid_until = rpki.sundial.datetime.fromXMLtime(valid_until) + if valid_until < rpki.sundial.now(): + raise PastExpiration("Specified new expiration time %s has passed" % valid_until) - self.server_ca.clients.get(handle = client_handle).delete() + self.log("Child calls itself %r, we call it %r" % (x.get("child_handle"), child_handle)) + child, created = rpki.irdb.models.Child.objects.get_or_certify( + issuer = self.resource_ca, + handle = child_handle, + ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_child_bpki_ta)), + valid_until = valid_until) - @django.db.transaction.atomic - def configure_repository(self, filename, parent_handle = None): - """ - Configure a publication repository for this RPKI entity, given the - repository's response to our request-for-service message as input. - Reads the repository's response, extracts and cross-certifies the - BPKI data and service URI, and links the repository data with the - corresponding parent data in our local database. - """ + return self.generate_parental_response(child), child_handle - x = etree_read(filename) - if x.tag != tag_oob_repository_response: - raise BadXMLMessage("Expected %s, got %s", tag_oob_repository_response, x.tag) + @django.db.transaction.atomic + def generate_parental_response(self, child): + """ + Generate parental response XML. Broken out of .configure_child() + for GUI. + """ - self.log("Repository calls us %r" % (x.get("publisher_handle"))) + service_uri = "http://%s:%s/up-down/%s/%s" % ( + self.cfg.get("rpkid_server_host", section = myrpki_section), + self.cfg.get("rpkid_server_port", section = myrpki_section), + self.handle, child.handle) - if parent_handle is not None: - self.log("Explicit parent_handle given") - try: - if parent_handle == self.handle: - turtle = self.resource_ca.rootd - else: - turtle = self.resource_ca.parents.get(handle = parent_handle) - except (rpki.irdb.models.Parent.DoesNotExist, rpki.irdb.models.Rootd.DoesNotExist): - self.log("Could not find parent %r in our database" % parent_handle) - raise CouldntFindRepoParent + e = Element(tag_oob_parent_response, nsmap = oob_nsmap, version = oob_version, + service_uri = service_uri, + child_handle = child.handle, + parent_handle = self.handle) + B64Element(e, tag_oob_parent_bpki_ta, self.resource_ca.certificate) - else: - turtles = [] - for parent in self.resource_ca.parents.all(): try: - _ = parent.repository + if self.default_repository: + repo = self.resource_ca.repositories.get(handle = self.default_repository) + else: + repo = self.resource_ca.repositories.get() except rpki.irdb.models.Repository.DoesNotExist: - turtles.append(parent) - try: - _ = self.resource_ca.rootd.repository - except rpki.irdb.models.Repository.DoesNotExist: - turtles.append(self.resource_ca.rootd) - except rpki.irdb.models.Rootd.DoesNotExist: - pass - if len(turtles) != 1: - self.log("No explicit parent_handle given and unable to guess") - raise CouldntFindRepoParent - turtle = turtles[0] - if isinstance(turtle, rpki.irdb.models.Rootd): - parent_handle = self.handle - else: - parent_handle = turtle.handle - self.log("No explicit parent_handle given, guessing parent {}".format(parent_handle)) - - rpki.irdb.models.Repository.objects.get_or_certify( - issuer = self.resource_ca, - handle = parent_handle, - client_handle = x.get("publisher_handle"), - service_uri = x.get("service_uri"), - sia_base = x.get("sia_base"), - rrdp_notification_uri = x.get("rrdp_notification_uri"), - ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_repository_bpki_ta)), - turtle = turtle) - - - @django.db.transaction.atomic - def delete_repository(self, repository_handle): - """ - Delete a repository of this RPKI entity. - """ - - self.resource_ca.repositories.get(handle = repository_handle).delete() - - - @django.db.transaction.atomic - def renew_children(self, child_handle, valid_until = None): - """ - Update validity period for one child entity or, if child_handle is - None, for all child entities. - """ - - if child_handle is None: - children = self.resource_ca.children.all() - else: - children = self.resource_ca.children.filter(handle = child_handle) - - if valid_until is None: - valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365) - else: - valid_until = rpki.sundial.datetime.fromXMLtime(valid_until) - if valid_until < rpki.sundial.now(): - raise PastExpiration("Specified new expiration time %s has passed" % valid_until) - - self.log("New validity date %s" % valid_until) + repo = None - for child in children: - child.valid_until = valid_until - child.save() - - - @django.db.transaction.atomic - def load_prefixes(self, filename, ignore_missing_children = False): - """ - Whack IRDB to match prefixes.csv. - """ + if repo is None: + self.log("Couldn't find any usable repositories, not giving referral") - grouped4 = {} - grouped6 = {} + elif repo.handle == self.handle: + SubElement(e, tag_oob_offer) - for handle, prefix in csv_reader(filename, columns = 2): - grouped = grouped6 if ":" in prefix else grouped4 - if handle not in grouped: - grouped[handle] = [] - grouped[handle].append(prefix) - - primary_keys = [] - - for version, grouped, rset in ((4, grouped4, rpki.resource_set.resource_set_ipv4), - (6, grouped6, rpki.resource_set.resource_set_ipv6)): - for handle, prefixes in grouped.iteritems(): - try: - child = self.resource_ca.children.get(handle = handle) - except rpki.irdb.models.Child.DoesNotExist: - if not ignore_missing_children: - raise else: - for prefix in rset(",".join(prefixes)): - obj, created = rpki.irdb.models.ChildNet.objects.get_or_create( - child = child, - start_ip = str(prefix.min), - end_ip = str(prefix.max), - version = version) - primary_keys.append(obj.pk) - - q = rpki.irdb.models.ChildNet.objects - q = q.filter(child__issuer = self.resource_ca) - q = q.exclude(pk__in = primary_keys) - q.delete() - - - @django.db.transaction.atomic - def load_asns(self, filename, ignore_missing_children = False): - """ - Whack IRDB to match asns.csv. - """ - - grouped = {} - - for handle, asn in csv_reader(filename, columns = 2): - if handle not in grouped: - grouped[handle] = [] - grouped[handle].append(asn) - - primary_keys = [] - - for handle, asns in grouped.iteritems(): - try: - child = self.resource_ca.children.get(handle = handle) - except rpki.irdb.models.Child.DoesNotExist: - if not ignore_missing_children: - raise - else: - for asn in rpki.resource_set.resource_set_as(",".join(asns)): - obj, created = rpki.irdb.models.ChildASN.objects.get_or_create( - child = child, - start_as = str(asn.min), - end_as = str(asn.max)) - primary_keys.append(obj.pk) - - q = rpki.irdb.models.ChildASN.objects - q = q.filter(child__issuer = self.resource_ca) - q = q.exclude(pk__in = primary_keys) - q.delete() - - - @django.db.transaction.atomic - def load_roa_requests(self, filename): - """ - Whack IRDB to match roa.csv. - """ - - grouped = {} - - # format: p/n-m asn group - for pnm, asn, group in csv_reader(filename, columns = 3): - key = (asn, group) - if key not in grouped: - grouped[key] = [] - grouped[key].append(pnm) - - # Deleting and recreating all the ROA requests is inefficient, - # but rpkid's current representation of ROA requests is wrong - # (see #32), so it's not worth a lot of effort here as we're - # just going to have to rewrite this soon anyway. + proposed_sia_base = repo.sia_base + child.handle + "/" + referral_cert, created = rpki.irdb.models.Referral.objects.get_or_certify(issuer = self.resource_ca) + auth = rpki.x509.SignedReferral() + auth.set_content(B64Element(None, tag_oob_authorization, child.ta, + nsmap = oob_nsmap, version = oob_version, + authorized_sia_base = proposed_sia_base)) + auth.schema_check() + auth.sign(referral_cert.private_key, referral_cert.certificate, self.resource_ca.latest_crl) + B64Element(e, tag_oob_referral, auth, referrer = repo.client_handle) + + return etree_wrapper(e, msg = "Send this file back to the child you just configured") + + + @django.db.transaction.atomic + def delete_child(self, child_handle): + """ + Delete a child of this RPKI entity. + """ + + self.resource_ca.children.get(handle = child_handle).delete() + + + @django.db.transaction.atomic + def configure_parent(self, filename, parent_handle = None): + """ + Configure a new parent of this RPKI entity, given the output of + the parent's configure_child command as input. Reads the parent's + response XML, extracts the parent's BPKI and service URI + information, cross-certifies the parent's BPKI data into this + entity's BPKI, and checks for offers or referrals of publication + service. If a publication offer or referral is present, we + generate a request-for-service message to that repository, in case + the user wants to avail herself of the referral or offer. + """ + + x = etree_read(filename) + + if x.tag != tag_oob_parent_response: + raise BadXMLMessage("Expected %s, got %s", tag_oob_parent_response, x.tag) + + if parent_handle is None: + parent_handle = x.get("parent_handle") + + offer = x.find(tag_oob_offer) + referral = x.find(tag_oob_referral) + + if offer is not None: + repository_type = "offer" + referrer = None + referral_authorization = None + + elif referral is not None: + repository_type = "referral" + referrer = referral.get("referrer") + referral_authorization = rpki.x509.SignedReferral(Base64 = referral.text) - self.resource_ca.roa_requests.all().delete() - - for key, pnms in grouped.iteritems(): - asn, group = key - - roa_request = self.resource_ca.roa_requests.create(asn = asn) - - for pnm in pnms: - if ":" in pnm: - p = rpki.resource_set.roa_prefix_ipv6.parse_str(pnm) - v = 6 else: - p = rpki.resource_set.roa_prefix_ipv4.parse_str(pnm) - v = 4 - roa_request.prefixes.create( - version = v, - prefix = str(p.prefix), - prefixlen = int(p.prefixlen), - max_prefixlen = int(p.max_prefixlen)) - + repository_type = "none" + referrer = None + referral_authorization = None + + self.log("Parent calls itself %r, we call it %r" % (x.get("parent_handle"), parent_handle)) + self.log("Parent calls us %r" % x.get("child_handle")) + + parent, created = rpki.irdb.models.Parent.objects.get_or_certify( + issuer = self.resource_ca, + handle = parent_handle, + child_handle = x.get("child_handle"), + parent_handle = x.get("parent_handle"), + service_uri = x.get("service_uri"), + ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_parent_bpki_ta)), + repository_type = repository_type, + referrer = referrer, + referral_authorization = referral_authorization) + + return self.generate_repository_request(parent), parent_handle + + + def generate_repository_request(self, parent): + """ + Generate repository request for a given parent. + """ + + e = Element(tag_oob_publisher_request, nsmap = oob_nsmap, version = oob_version, + publisher_handle = self.handle) + B64Element(e, tag_oob_publisher_bpki_ta, self.resource_ca.certificate) + if parent.repository_type == "referral": + B64Element(e, tag_oob_referral, parent.referral_authorization, + referrer = parent.referrer) + + return etree_wrapper(e, msg = "This is the file to send to the repository operator") + + + @django.db.transaction.atomic + def delete_parent(self, parent_handle): + """ + Delete a parent of this RPKI entity. + """ + + self.resource_ca.parents.get(handle = parent_handle).delete() + + + @django.db.transaction.atomic + def delete_rootd(self): + """ + Delete rootd associated with this RPKI entity. + """ + + self.resource_ca.rootd.delete() + + + @django.db.transaction.atomic + def configure_publication_client(self, filename, sia_base = None, flat = False): + """ + Configure publication server to know about a new client, given the + client's request-for-service message as input. Reads the client's + request for service, cross-certifies the client's BPKI data, and + generates a response message containing the repository's BPKI data + and service URI. + """ + + x = etree_read(filename) + + if x.tag != tag_oob_publisher_request: + raise BadXMLMessage("Expected %s, got %s", tag_oob_publisher_request, x.tag) + + client_ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_publisher_bpki_ta)) + + referral = x.find(tag_oob_referral) + + default_sia_base = "rsync://{self.rsync_server}/{self.rsync_module}/{handle}/".format( + self = self, + handle = x.get("publisher_handle")) + + if sia_base is None and flat: + self.log("Flat publication structure forced, homing client at top-level") + sia_base = default_sia_base + + if sia_base is None and referral is not None: + self.log("This looks like a referral, checking") + try: + referrer = referral.get("referrer") + referrer = self.server_ca.clients.get(handle = referrer) + referral = rpki.x509.SignedReferral(Base64 = referral.text) + referral = referral.unwrap(ta = (referrer.certificate, self.server_ca.certificate)) + if rpki.x509.X509(Base64 = referral.text) != client_ta: + raise BadXMLMessage("Referral trust anchor does not match") + sia_base = referral.get("authorized_sia_base") + except rpki.irdb.models.Client.DoesNotExist: + self.log("We have no record of the client ({}) alleged to have made this referral".format(referrer)) + + if sia_base is None and referral is None: + self.log("This might be an offer, checking") + try: + parent = rpki.irdb.models.ResourceHolderCA.objects.get(children__ta = client_ta) + if "/" in parent.repositories.get(ta = self.server_ca.certificate).client_handle: + self.log("Client's parent is not top-level, this is not a valid offer") + else: + self.log("Found client and its parent, nesting") + sia_base = "rsync://{self.rsync_server}/{self.rsync_module}/{parent_handle}/{client_handle}/".format( + self = self, + parent_handle = parent.handle, + client_handle = x.get("publisher_handle")) + except rpki.irdb.models.Repository.DoesNotExist: + self.log("Found client's parent, but repository isn't set, this shouldn't happen!") + except rpki.irdb.models.ResourceHolderCA.DoesNotExist: + try: + rpki.irdb.models.Rootd.objects.get(issuer__certificate = client_ta) + self.log("This client's parent is rootd") + sia_base = default_sia_base + except rpki.irdb.models.Rootd.DoesNotExist: + self.log("We don't host this client's parent, so we didn't make an offer") + + if sia_base is None: + self.log("Don't know where else to nest this client, so defaulting to top-level") + sia_base = default_sia_base + + if not sia_base.startswith("rsync://"): + raise BadXMLMessage("Malformed sia_base parameter %r, should start with 'rsync://'" % sia_base) + + client_handle = "/".join(sia_base.rstrip("/").split("/")[4:]) + + self.log("Client calls itself %r, we call it %r" % ( + x.get("publisher_handle"), client_handle)) + + client, created = rpki.irdb.models.Client.objects.get_or_certify( + issuer = self.server_ca, + handle = client_handle, + ta = client_ta, + sia_base = sia_base) + + return self.generate_repository_response(client), client_handle + + + def generate_repository_response(self, client): + """ + Generate repository response XML to a given client. + """ + + service_uri = "http://{host}:{port}/client/{handle}".format( + host = self.cfg.get("pubd_server_host", section = myrpki_section), + port = self.cfg.get("pubd_server_port", section = myrpki_section), + handle = client.handle) + + rrdp_uri = self.cfg.get("publication_rrdp_notification_uri", section = myrpki_section, + default = "") or None + + e = Element(tag_oob_repository_response, nsmap = oob_nsmap, version = oob_version, + service_uri = service_uri, + publisher_handle = client.handle, + sia_base = client.sia_base) + + if rrdp_uri is not None: + e.set("rrdp_notification_uri", rrdp_uri) + + B64Element(e, tag_oob_repository_bpki_ta, self.server_ca.certificate) + return etree_wrapper(e, msg = "Send this file back to the publication client you just configured") + + + @django.db.transaction.atomic + def delete_publication_client(self, client_handle): + """ + Delete a publication client of this RPKI entity. + """ + + self.server_ca.clients.get(handle = client_handle).delete() + + + @django.db.transaction.atomic + def configure_repository(self, filename, parent_handle = None): + """ + Configure a publication repository for this RPKI entity, given the + repository's response to our request-for-service message as input. + Reads the repository's response, extracts and cross-certifies the + BPKI data and service URI, and links the repository data with the + corresponding parent data in our local database. + """ + + x = etree_read(filename) + + if x.tag != tag_oob_repository_response: + raise BadXMLMessage("Expected %s, got %s", tag_oob_repository_response, x.tag) + + self.log("Repository calls us %r" % (x.get("publisher_handle"))) + + if parent_handle is not None: + self.log("Explicit parent_handle given") + try: + if parent_handle == self.handle: + turtle = self.resource_ca.rootd + else: + turtle = self.resource_ca.parents.get(handle = parent_handle) + except (rpki.irdb.models.Parent.DoesNotExist, rpki.irdb.models.Rootd.DoesNotExist): + self.log("Could not find parent %r in our database" % parent_handle) + raise CouldntFindRepoParent - @django.db.transaction.atomic - def load_ghostbuster_requests(self, filename, parent = None): - """ - Whack IRDB to match ghostbusters.vcard. - - This accepts one or more vCards from a file. - """ - - self.resource_ca.ghostbuster_requests.filter(parent = parent).delete() + else: + turtles = [] + for parent in self.resource_ca.parents.all(): + try: + _ = parent.repository + except rpki.irdb.models.Repository.DoesNotExist: + turtles.append(parent) + try: + _ = self.resource_ca.rootd.repository + except rpki.irdb.models.Repository.DoesNotExist: + turtles.append(self.resource_ca.rootd) + except rpki.irdb.models.Rootd.DoesNotExist: + pass + if len(turtles) != 1: + self.log("No explicit parent_handle given and unable to guess") + raise CouldntFindRepoParent + turtle = turtles[0] + if isinstance(turtle, rpki.irdb.models.Rootd): + parent_handle = self.handle + else: + parent_handle = turtle.handle + self.log("No explicit parent_handle given, guessing parent {}".format(parent_handle)) + + rpki.irdb.models.Repository.objects.get_or_certify( + issuer = self.resource_ca, + handle = parent_handle, + client_handle = x.get("publisher_handle"), + service_uri = x.get("service_uri"), + sia_base = x.get("sia_base"), + rrdp_notification_uri = x.get("rrdp_notification_uri"), + ta = rpki.x509.X509(Base64 = x.findtext(tag_oob_repository_bpki_ta)), + turtle = turtle) + + + @django.db.transaction.atomic + def delete_repository(self, repository_handle): + """ + Delete a repository of this RPKI entity. + """ + + self.resource_ca.repositories.get(handle = repository_handle).delete() + + + @django.db.transaction.atomic + def renew_children(self, child_handle, valid_until = None): + """ + Update validity period for one child entity or, if child_handle is + None, for all child entities. + """ + + if child_handle is None: + children = self.resource_ca.children.all() + else: + children = self.resource_ca.children.filter(handle = child_handle) - vcard = [] + if valid_until is None: + valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365) + else: + valid_until = rpki.sundial.datetime.fromXMLtime(valid_until) + if valid_until < rpki.sundial.now(): + raise PastExpiration("Specified new expiration time %s has passed" % valid_until) + + self.log("New validity date %s" % valid_until) + + for child in children: + child.valid_until = valid_until + child.save() + + + @django.db.transaction.atomic + def load_prefixes(self, filename, ignore_missing_children = False): + """ + Whack IRDB to match prefixes.csv. + """ + + grouped4 = {} + grouped6 = {} + + for handle, prefix in csv_reader(filename, columns = 2): + grouped = grouped6 if ":" in prefix else grouped4 + if handle not in grouped: + grouped[handle] = [] + grouped[handle].append(prefix) + + primary_keys = [] + + for version, grouped, rset in ((4, grouped4, rpki.resource_set.resource_set_ipv4), + (6, grouped6, rpki.resource_set.resource_set_ipv6)): + for handle, prefixes in grouped.iteritems(): + try: + child = self.resource_ca.children.get(handle = handle) + except rpki.irdb.models.Child.DoesNotExist: + if not ignore_missing_children: + raise + else: + for prefix in rset(",".join(prefixes)): + obj, created = rpki.irdb.models.ChildNet.objects.get_or_create( + child = child, + start_ip = str(prefix.min), + end_ip = str(prefix.max), + version = version) + primary_keys.append(obj.pk) + + q = rpki.irdb.models.ChildNet.objects + q = q.filter(child__issuer = self.resource_ca) + q = q.exclude(pk__in = primary_keys) + q.delete() + + + @django.db.transaction.atomic + def load_asns(self, filename, ignore_missing_children = False): + """ + Whack IRDB to match asns.csv. + """ + + grouped = {} + + for handle, asn in csv_reader(filename, columns = 2): + if handle not in grouped: + grouped[handle] = [] + grouped[handle].append(asn) + + primary_keys = [] + + for handle, asns in grouped.iteritems(): + try: + child = self.resource_ca.children.get(handle = handle) + except rpki.irdb.models.Child.DoesNotExist: + if not ignore_missing_children: + raise + else: + for asn in rpki.resource_set.resource_set_as(",".join(asns)): + obj, created = rpki.irdb.models.ChildASN.objects.get_or_create( + child = child, + start_as = str(asn.min), + end_as = str(asn.max)) + primary_keys.append(obj.pk) + + q = rpki.irdb.models.ChildASN.objects + q = q.filter(child__issuer = self.resource_ca) + q = q.exclude(pk__in = primary_keys) + q.delete() + + + @django.db.transaction.atomic + def load_roa_requests(self, filename): + """ + Whack IRDB to match roa.csv. + """ + + grouped = {} + + # format: p/n-m asn group + for pnm, asn, group in csv_reader(filename, columns = 3): + key = (asn, group) + if key not in grouped: + grouped[key] = [] + grouped[key].append(pnm) + + # Deleting and recreating all the ROA requests is inefficient, + # but rpkid's current representation of ROA requests is wrong + # (see #32), so it's not worth a lot of effort here as we're + # just going to have to rewrite this soon anyway. + + self.resource_ca.roa_requests.all().delete() + + for key, pnms in grouped.iteritems(): + asn, group = key + + roa_request = self.resource_ca.roa_requests.create(asn = asn) + + for pnm in pnms: + if ":" in pnm: + p = rpki.resource_set.roa_prefix_ipv6.parse_str(pnm) + v = 6 + else: + p = rpki.resource_set.roa_prefix_ipv4.parse_str(pnm) + v = 4 + roa_request.prefixes.create( + version = v, + prefix = str(p.prefix), + prefixlen = int(p.prefixlen), + max_prefixlen = int(p.max_prefixlen)) + + + @django.db.transaction.atomic + def load_ghostbuster_requests(self, filename, parent = None): + """ + Whack IRDB to match ghostbusters.vcard. + + This accepts one or more vCards from a file. + """ + + self.resource_ca.ghostbuster_requests.filter(parent = parent).delete() - for line in open(filename, "r"): - if not vcard and not line.upper().startswith("BEGIN:VCARD"): - continue - vcard.append(line) - if line.upper().startswith("END:VCARD"): - self.resource_ca.ghostbuster_requests.create(vcard = "".join(vcard), parent = parent) vcard = [] + for line in open(filename, "r"): + if not vcard and not line.upper().startswith("BEGIN:VCARD"): + continue + vcard.append(line) + if line.upper().startswith("END:VCARD"): + self.resource_ca.ghostbuster_requests.create(vcard = "".join(vcard), parent = parent) + vcard = [] - def call_rpkid(self, q_msg, suppress_error_check = False): - """ - Issue a call to rpkid, return result. - """ - url = "http://%s:%s/left-right" % ( - self.cfg.get("rpkid_server_host", section = myrpki_section), - self.cfg.get("rpkid_server_port", section = myrpki_section)) + def call_rpkid(self, q_msg, suppress_error_check = False): + """ + Issue a call to rpkid, return result. + """ - rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid") - irbe = self.server_ca.ee_certificates.get(purpose = "irbe") + url = "http://%s:%s/left-right" % ( + self.cfg.get("rpkid_server_host", section = myrpki_section), + self.cfg.get("rpkid_server_port", section = myrpki_section)) - r_msg = rpki.http_simple.client( - proto_cms_msg = rpki.left_right.cms_msg, - client_key = irbe.private_key, - client_cert = irbe.certificate, - server_ta = self.server_ca.certificate, - server_cert = rpkid.certificate, - url = url, - q_msg = q_msg, - debug = self.show_xml) + rpkid = self.server_ca.ee_certificates.get(purpose = "rpkid") + irbe = self.server_ca.ee_certificates.get(purpose = "irbe") - if not suppress_error_check: - self.check_error_report(r_msg) - return r_msg + r_msg = rpki.http_simple.client( + proto_cms_msg = rpki.left_right.cms_msg, + client_key = irbe.private_key, + client_cert = irbe.certificate, + server_ta = self.server_ca.certificate, + server_cert = rpkid.certificate, + url = url, + q_msg = q_msg, + debug = self.show_xml) + if not suppress_error_check: + self.check_error_report(r_msg) + return r_msg - def _rpkid_tenant_control(self, *bools): - assert all(isinstance(b, str) for b in bools) - q_msg = self._compose_left_right_query() - q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", tenant_handle = self.handle) - for b in bools: - q_pdu.set(b, "yes") - return self.call_rpkid(q_msg) + def _rpkid_tenant_control(self, *bools): + assert all(isinstance(b, str) for b in bools) + q_msg = self._compose_left_right_query() + q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", tenant_handle = self.handle) + for b in bools: + q_pdu.set(b, "yes") + return self.call_rpkid(q_msg) - def run_rpkid_now(self): - """ - Poke rpkid to immediately run the cron job for the current handle. - This method is used by the GUI when a user has changed something in the - IRDB (ghostbuster, roa) which does not require a full synchronize() call, - to force the object to be immediately issued. - """ + def run_rpkid_now(self): + """ + Poke rpkid to immediately run the cron job for the current handle. - return self._rpkid_tenant_control("run_now") + This method is used by the GUI when a user has changed something in the + IRDB (ghostbuster, roa) which does not require a full synchronize() call, + to force the object to be immediately issued. + """ + return self._rpkid_tenant_control("run_now") - def publish_world_now(self): - """ - Poke rpkid to (re)publish everything for the current handle. - """ - return self._rpkid_tenant_control("publish_world_now") + def publish_world_now(self): + """ + Poke rpkid to (re)publish everything for the current handle. + """ + return self._rpkid_tenant_control("publish_world_now") - def reissue(self): - """ - Poke rpkid to reissue everything for the current handle. - """ - return self._rpkid_tenant_control("reissue") + def reissue(self): + """ + Poke rpkid to reissue everything for the current handle. + """ + return self._rpkid_tenant_control("reissue") - def rekey(self): - """ - Poke rpkid to rekey all RPKI certificates received for the current - handle. - """ - - return self._rpkid_tenant_control("rekey") - - - def revoke(self): - """ - Poke rpkid to revoke old RPKI keys for the current handle. - """ - - return self._rpkid_tenant_control("revoke") + def rekey(self): + """ + Poke rpkid to rekey all RPKI certificates received for the current + handle. + """ + + return self._rpkid_tenant_control("rekey") - def revoke_forgotten(self): - """ - Poke rpkid to revoke old forgotten RPKI keys for the current handle. - """ - return self._rpkid_tenant_control("revoke_forgotten") + def revoke(self): + """ + Poke rpkid to revoke old RPKI keys for the current handle. + """ + return self._rpkid_tenant_control("revoke") - def clear_all_sql_cms_replay_protection(self): - """ - Tell rpkid and pubd to clear replay protection for all SQL-based - entities. This is a fairly blunt instrument, but as we don't - expect this to be necessary except in the case of gross - misconfiguration, it should suffice. - """ - if self.run_rpkid: - q_msg = self._compose_left_right_query() - for ca in rpki.irdb.models.ResourceHolderCA.objects.all(): - SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", - tenant_handle = ca.handle, clear_replay_protection = "yes") - self.call_rpkid(q_msg) + def revoke_forgotten(self): + """ + Poke rpkid to revoke old forgotten RPKI keys for the current handle. + """ + + return self._rpkid_tenant_control("revoke_forgotten") - if self.run_pubd: - q_msg = self._compose_publication_control_query() - for client in self.server_ca.clients.all(): - SubElement(q_msg, rpki.publication_control.tag_client, action = "set", - client_handle = client.handle, clear_reply_protection = "yes") - self.call_pubd(q_msg) + def clear_all_sql_cms_replay_protection(self): + """ + Tell rpkid and pubd to clear replay protection for all SQL-based + entities. This is a fairly blunt instrument, but as we don't + expect this to be necessary except in the case of gross + misconfiguration, it should suffice. + """ - def call_pubd(self, q_msg): - """ - Issue a call to pubd, return result. - """ + if self.run_rpkid: + q_msg = self._compose_left_right_query() + for ca in rpki.irdb.models.ResourceHolderCA.objects.all(): + SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", + tenant_handle = ca.handle, clear_replay_protection = "yes") + self.call_rpkid(q_msg) - url = "http://%s:%s/control" % ( - self.cfg.get("pubd_server_host", section = myrpki_section), - self.cfg.get("pubd_server_port", section = myrpki_section)) + if self.run_pubd: + q_msg = self._compose_publication_control_query() + for client in self.server_ca.clients.all(): + SubElement(q_msg, rpki.publication_control.tag_client, action = "set", + client_handle = client.handle, clear_reply_protection = "yes") + self.call_pubd(q_msg) - pubd = self.server_ca.ee_certificates.get(purpose = "pubd") - irbe = self.server_ca.ee_certificates.get(purpose = "irbe") - r_msg = rpki.http_simple.client( - proto_cms_msg = rpki.publication_control.cms_msg, - client_key = irbe.private_key, - client_cert = irbe.certificate, - server_ta = self.server_ca.certificate, - server_cert = pubd.certificate, - url = url, - q_msg = q_msg, - debug = self.show_xml) + def call_pubd(self, q_msg): + """ + Issue a call to pubd, return result. + """ - self.check_error_report(r_msg) - return r_msg + url = "http://%s:%s/control" % ( + self.cfg.get("pubd_server_host", section = myrpki_section), + self.cfg.get("pubd_server_port", section = myrpki_section)) + pubd = self.server_ca.ee_certificates.get(purpose = "pubd") + irbe = self.server_ca.ee_certificates.get(purpose = "irbe") - def check_error_report(self, r_msg): - """ - Check a response from rpkid or pubd for error_report PDUs, log and - throw exceptions as needed. - """ + r_msg = rpki.http_simple.client( + proto_cms_msg = rpki.publication_control.cms_msg, + client_key = irbe.private_key, + client_cert = irbe.certificate, + server_ta = self.server_ca.certificate, + server_cert = pubd.certificate, + url = url, + q_msg = q_msg, + debug = self.show_xml) - failed = False - for r_pdu in r_msg.getiterator(rpki.left_right.tag_report_error): - failed = True - self.log("rpkid reported failure: %s" % r_pdu.get("error_code")) - if r_pdu.text: - self.log(r_pdu.text) - for r_pdu in r_msg.getiterator(rpki.publication_control.tag_report_error): - failed = True - self.log("pubd reported failure: %s" % r_pdu.get("error_code")) - if r_pdu.text: - self.log(r_pdu.text) - if failed: - raise CouldntTalkToDaemon - - - @django.db.transaction.atomic - def synchronize(self, *handles_to_poke): - """ - Configure RPKI daemons with the data built up by the other - commands in this program. Commands which modify the IRDB and want - to whack everything into sync should call this when they're done, - but be warned that this can be slow with a lot of CAs. + self.check_error_report(r_msg) + return r_msg - Any arguments given are handles of CAs which should be poked with a - operation. - """ - for ca in rpki.irdb.models.ResourceHolderCA.objects.all(): - self.synchronize_rpkid_one_ca_core(ca, ca.handle in handles_to_poke) - self.synchronize_pubd_core() - self.synchronize_rpkid_deleted_core() + def check_error_report(self, r_msg): + """ + Check a response from rpkid or pubd for error_report PDUs, log and + throw exceptions as needed. + """ + failed = False + for r_pdu in r_msg.getiterator(rpki.left_right.tag_report_error): + failed = True + self.log("rpkid reported failure: %s" % r_pdu.get("error_code")) + if r_pdu.text: + self.log(r_pdu.text) + for r_pdu in r_msg.getiterator(rpki.publication_control.tag_report_error): + failed = True + self.log("pubd reported failure: %s" % r_pdu.get("error_code")) + if r_pdu.text: + self.log(r_pdu.text) + if failed: + raise CouldntTalkToDaemon - @django.db.transaction.atomic - def synchronize_ca(self, ca = None, poke = False): - """ - Synchronize one CA. Most commands which modify a CA should call - this. CA to synchronize defaults to the current resource CA. - """ - if ca is None: - ca = self.resource_ca - self.synchronize_rpkid_one_ca_core(ca, poke) + @django.db.transaction.atomic + def synchronize(self, *handles_to_poke): + """ + Configure RPKI daemons with the data built up by the other + commands in this program. Commands which modify the IRDB and want + to whack everything into sync should call this when they're done, + but be warned that this can be slow with a lot of CAs. + + Any arguments given are handles of CAs which should be poked with a + operation. + """ + + for ca in rpki.irdb.models.ResourceHolderCA.objects.all(): + self.synchronize_rpkid_one_ca_core(ca, ca.handle in handles_to_poke) + self.synchronize_pubd_core() + self.synchronize_rpkid_deleted_core() - @django.db.transaction.atomic - def synchronize_deleted_ca(self): - """ - Delete CAs which are present in rpkid's database but not in the - IRDB. - """ + @django.db.transaction.atomic + def synchronize_ca(self, ca = None, poke = False): + """ + Synchronize one CA. Most commands which modify a CA should call + this. CA to synchronize defaults to the current resource CA. + """ + + if ca is None: + ca = self.resource_ca + self.synchronize_rpkid_one_ca_core(ca, poke) + + + @django.db.transaction.atomic + def synchronize_deleted_ca(self): + """ + Delete CAs which are present in rpkid's database but not in the + IRDB. + """ + + self.synchronize_rpkid_deleted_core() + + + @django.db.transaction.atomic + def synchronize_pubd(self): + """ + Synchronize pubd. Most commands which modify pubd should call this. + """ + + self.synchronize_pubd_core() + + + def synchronize_rpkid_one_ca_core(self, ca, poke = False): + """ + Synchronize one CA. This is the core synchronization code. Don't + call this directly, instead call one of the methods that calls + this inside a Django commit wrapper. + + This method configures rpkid with data built up by the other + commands in this program. Most commands which modify IRDB values + related to rpkid should call this when they're done. + + If poke is True, we append a left-right run_now operation for this + CA to the end of whatever other commands this method generates. + """ + + # We can use a single BSC for everything -- except BSC key + # rollovers. Drive off that bridge when we get to it. + + bsc_handle = "bsc" + + # A default RPKI CRL cycle time of six hours seems sane. One + # might make a case for a day instead, but we've been running with + # six hours for a while now and haven't seen a lot of whining. + + tenant_crl_interval = self.cfg.getint("tenant_crl_interval", 6 * 60 * 60, section = myrpki_section) + + # regen_margin now just controls how long before RPKI certificate + # expiration we should regenerate; it used to control the interval + # before RPKI CRL staleness at which to regenerate the CRL, but + # using the same timer value for both of these is hopeless. + # + # A default regeneration margin of two weeks gives enough time for + # humans to react. We add a two hour fudge factor in the hope + # that this will regenerate certificates just *before* the + # companion cron job warns of impending doom. + + tenant_regen_margin = self.cfg.getint("tenant_regen_margin", 14 * 24 * 60 * 60 + 2 * 60, section = myrpki_section) + + # See what rpkid already has on file for this entity. + + q_msg = self._compose_left_right_query() + SubElement(q_msg, rpki.left_right.tag_tenant, action = "get", tenant_handle = ca.handle) + SubElement(q_msg, rpki.left_right.tag_bsc, action = "list", tenant_handle = ca.handle) + SubElement(q_msg, rpki.left_right.tag_repository, action = "list", tenant_handle = ca.handle) + SubElement(q_msg, rpki.left_right.tag_parent, action = "list", tenant_handle = ca.handle) + SubElement(q_msg, rpki.left_right.tag_child, action = "list", tenant_handle = ca.handle) + + r_msg = self.call_rpkid(q_msg, suppress_error_check = True) + + self.check_error_report(r_msg) + + tenant_pdu = r_msg.find(rpki.left_right.tag_tenant) + + bsc_pdus = dict((r_pdu.get("bsc_handle"), r_pdu) + for r_pdu in r_msg.getiterator(rpki.left_right.tag_bsc)) + repository_pdus = dict((r_pdu.get("repository_handle"), r_pdu) + for r_pdu in r_msg.getiterator(rpki.left_right.tag_repository)) + parent_pdus = dict((r_pdu.get("parent_handle"), r_pdu) + for r_pdu in r_msg.getiterator(rpki.left_right.tag_parent)) + child_pdus = dict((r_pdu.get("child_handle"), r_pdu) + for r_pdu in r_msg.getiterator(rpki.left_right.tag_child)) + + q_msg = self._compose_left_right_query() + + tenant_cert, created = rpki.irdb.models.HostedCA.objects.get_or_certify( + issuer = self.server_ca, + hosted = ca) + + # There should be exactly one object per hosted entity, by definition + + if (tenant_pdu is None or + tenant_pdu.get("crl_interval") != str(tenant_crl_interval) or + tenant_pdu.get("regen_margin") != str(tenant_regen_margin) or + tenant_pdu.findtext(rpki.left_right.tag_bpki_cert, "").decode("base64") != tenant_cert.certificate.get_DER()): + q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant, + action = "create" if tenant_pdu is None else "set", + tag = "tenant", + tenant_handle = ca.handle, + crl_interval = str(tenant_crl_interval), + regen_margin = str(tenant_regen_margin)) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.certificate.get_Base64() + + # In general we only need one per . BSC objects + # are a little unusual in that the keypair and PKCS #10 + # subelement are generated by rpkid, so complete setup requires + # two round trips. + + bsc_pdu = bsc_pdus.pop(bsc_handle, None) + + if bsc_pdu is None or bsc_pdu.find(rpki.left_right.tag_pkcs10_request) is None: + SubElement(q_msg, rpki.left_right.tag_bsc, + action = "create" if bsc_pdu is None else "set", + tag = "bsc", + tenant_handle = ca.handle, + bsc_handle = bsc_handle, + generate_keypair = "yes") + + for bsc_handle in bsc_pdus: + SubElement(q_msg, rpki.left_right.tag_bsc, + action = "destroy", tenant_handle = ca.handle, bsc_handle = bsc_handle) + + # If we've already got actions queued up, run them now, so we + # can finish setting up the BSC before anything tries to use it. + + if len(q_msg) > 0: + SubElement(q_msg, rpki.left_right.tag_bsc, action = "list", tag = "bsc", tenant_handle = ca.handle) + r_msg = self.call_rpkid(q_msg) + bsc_pdus = dict((r_pdu.get("bsc_handle"), r_pdu) + for r_pdu in r_msg.getiterator(rpki.left_right.tag_bsc) + if r_pdu.get("action") == "list") + bsc_pdu = bsc_pdus.pop(bsc_handle, None) + + q_msg = self._compose_left_right_query() + + bsc_pkcs10 = bsc_pdu.find(rpki.left_right.tag_pkcs10_request) + assert bsc_pkcs10 is not None + + bsc, created = rpki.irdb.models.BSC.objects.get_or_certify( + issuer = ca, + handle = bsc_handle, + pkcs10 = rpki.x509.PKCS10(Base64 = bsc_pkcs10.text)) + + if (bsc_pdu.findtext(rpki.left_right.tag_signing_cert, "").decode("base64") != bsc.certificate.get_DER() or + bsc_pdu.findtext(rpki.left_right.tag_signing_cert_crl, "").decode("base64") != ca.latest_crl.get_DER()): + q_pdu = SubElement(q_msg, rpki.left_right.tag_bsc, + action = "set", + tag = "bsc", + tenant_handle = ca.handle, + bsc_handle = bsc_handle) + SubElement(q_pdu, rpki.left_right.tag_signing_cert).text = bsc.certificate.get_Base64() + SubElement(q_pdu, rpki.left_right.tag_signing_cert_crl).text = ca.latest_crl.get_Base64() + + # At present we need one per , not because + # rpkid requires that, but because pubd does. pubd probably should + # be fixed to support a single client allowed to update multiple + # trees, but for the moment the easiest way forward is just to + # enforce a 1:1 mapping between and objects + + for repository in ca.repositories.all(): + + repository_pdu = repository_pdus.pop(repository.handle, None) + + if (repository_pdu is None or + repository_pdu.get("bsc_handle") != bsc_handle or + repository_pdu.get("peer_contact_uri") != repository.service_uri or + repository_pdu.get("rrdp_notification_uri") != repository.rrdp_notification_uri or + repository_pdu.findtext(rpki.left_right.tag_bpki_cert, "").decode("base64") != repository.certificate.get_DER()): + q_pdu = SubElement(q_msg, rpki.left_right.tag_repository, + action = "create" if repository_pdu is None else "set", + tag = repository.handle, + tenant_handle = ca.handle, + repository_handle = repository.handle, + bsc_handle = bsc_handle, + peer_contact_uri = repository.service_uri) + if repository.rrdp_notification_uri: + q_pdu.set("rrdp_notification_uri", repository.rrdp_notification_uri) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = repository.certificate.get_Base64() + + for repository_handle in repository_pdus: + SubElement(q_msg, rpki.left_right.tag_repository, action = "destroy", + tenant_handle = ca.handle, repository_handle = repository_handle) + + # setup code currently assumes 1:1 mapping between + # and , and further assumes that the handles + # for an associated pair are the identical (that is: + # parent.repository_handle == parent.parent_handle). + # + # If no such repository exists, our choices are to ignore the + # parent entry or throw an error. For now, we ignore the parent. + + for parent in ca.parents.all(): + + try: + parent_pdu = parent_pdus.pop(parent.handle, None) + + if (parent_pdu is None or + parent_pdu.get("bsc_handle") != bsc_handle or + parent_pdu.get("repository_handle") != parent.handle or + parent_pdu.get("peer_contact_uri") != parent.service_uri or + parent_pdu.get("sia_base") != parent.repository.sia_base or + parent_pdu.get("sender_name") != parent.child_handle or + parent_pdu.get("recipient_name") != parent.parent_handle or + parent_pdu.findtext(rpki.left_right.tag_bpki_cert, "").decode("base64") != parent.certificate.get_DER()): + q_pdu = SubElement(q_msg, rpki.left_right.tag_parent, + action = "create" if parent_pdu is None else "set", + tag = parent.handle, + tenant_handle = ca.handle, + parent_handle = parent.handle, + bsc_handle = bsc_handle, + repository_handle = parent.handle, + peer_contact_uri = parent.service_uri, + sia_base = parent.repository.sia_base, + sender_name = parent.child_handle, + recipient_name = parent.parent_handle) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = parent.certificate.get_Base64() + + except rpki.irdb.models.Repository.DoesNotExist: + pass - self.synchronize_rpkid_deleted_core() - - - @django.db.transaction.atomic - def synchronize_pubd(self): - """ - Synchronize pubd. Most commands which modify pubd should call this. - """ - - self.synchronize_pubd_core() - - - def synchronize_rpkid_one_ca_core(self, ca, poke = False): - """ - Synchronize one CA. This is the core synchronization code. Don't - call this directly, instead call one of the methods that calls - this inside a Django commit wrapper. - - This method configures rpkid with data built up by the other - commands in this program. Most commands which modify IRDB values - related to rpkid should call this when they're done. - - If poke is True, we append a left-right run_now operation for this - CA to the end of whatever other commands this method generates. - """ - - # We can use a single BSC for everything -- except BSC key - # rollovers. Drive off that bridge when we get to it. - - bsc_handle = "bsc" - - # A default RPKI CRL cycle time of six hours seems sane. One - # might make a case for a day instead, but we've been running with - # six hours for a while now and haven't seen a lot of whining. - - tenant_crl_interval = self.cfg.getint("tenant_crl_interval", 6 * 60 * 60, section = myrpki_section) - - # regen_margin now just controls how long before RPKI certificate - # expiration we should regenerate; it used to control the interval - # before RPKI CRL staleness at which to regenerate the CRL, but - # using the same timer value for both of these is hopeless. - # - # A default regeneration margin of two weeks gives enough time for - # humans to react. We add a two hour fudge factor in the hope - # that this will regenerate certificates just *before* the - # companion cron job warns of impending doom. - - tenant_regen_margin = self.cfg.getint("tenant_regen_margin", 14 * 24 * 60 * 60 + 2 * 60, section = myrpki_section) - - # See what rpkid already has on file for this entity. - - q_msg = self._compose_left_right_query() - SubElement(q_msg, rpki.left_right.tag_tenant, action = "get", tenant_handle = ca.handle) - SubElement(q_msg, rpki.left_right.tag_bsc, action = "list", tenant_handle = ca.handle) - SubElement(q_msg, rpki.left_right.tag_repository, action = "list", tenant_handle = ca.handle) - SubElement(q_msg, rpki.left_right.tag_parent, action = "list", tenant_handle = ca.handle) - SubElement(q_msg, rpki.left_right.tag_child, action = "list", tenant_handle = ca.handle) - - r_msg = self.call_rpkid(q_msg, suppress_error_check = True) - - self.check_error_report(r_msg) - - tenant_pdu = r_msg.find(rpki.left_right.tag_tenant) - - bsc_pdus = dict((r_pdu.get("bsc_handle"), r_pdu) - for r_pdu in r_msg.getiterator(rpki.left_right.tag_bsc)) - repository_pdus = dict((r_pdu.get("repository_handle"), r_pdu) - for r_pdu in r_msg.getiterator(rpki.left_right.tag_repository)) - parent_pdus = dict((r_pdu.get("parent_handle"), r_pdu) - for r_pdu in r_msg.getiterator(rpki.left_right.tag_parent)) - child_pdus = dict((r_pdu.get("child_handle"), r_pdu) - for r_pdu in r_msg.getiterator(rpki.left_right.tag_child)) - - q_msg = self._compose_left_right_query() - - tenant_cert, created = rpki.irdb.models.HostedCA.objects.get_or_certify( - issuer = self.server_ca, - hosted = ca) - - # There should be exactly one object per hosted entity, by definition - - if (tenant_pdu is None or - tenant_pdu.get("crl_interval") != str(tenant_crl_interval) or - tenant_pdu.get("regen_margin") != str(tenant_regen_margin) or - tenant_pdu.findtext(rpki.left_right.tag_bpki_cert, "").decode("base64") != tenant_cert.certificate.get_DER()): - q_pdu = SubElement(q_msg, rpki.left_right.tag_tenant, - action = "create" if tenant_pdu is None else "set", - tag = "tenant", - tenant_handle = ca.handle, - crl_interval = str(tenant_crl_interval), - regen_margin = str(tenant_regen_margin)) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.certificate.get_Base64() - - # In general we only need one per . BSC objects - # are a little unusual in that the keypair and PKCS #10 - # subelement are generated by rpkid, so complete setup requires - # two round trips. - - bsc_pdu = bsc_pdus.pop(bsc_handle, None) - - if bsc_pdu is None or bsc_pdu.find(rpki.left_right.tag_pkcs10_request) is None: - SubElement(q_msg, rpki.left_right.tag_bsc, - action = "create" if bsc_pdu is None else "set", - tag = "bsc", - tenant_handle = ca.handle, - bsc_handle = bsc_handle, - generate_keypair = "yes") - - for bsc_handle in bsc_pdus: - SubElement(q_msg, rpki.left_right.tag_bsc, - action = "destroy", tenant_handle = ca.handle, bsc_handle = bsc_handle) - - # If we've already got actions queued up, run them now, so we - # can finish setting up the BSC before anything tries to use it. - - if len(q_msg) > 0: - SubElement(q_msg, rpki.left_right.tag_bsc, action = "list", tag = "bsc", tenant_handle = ca.handle) - r_msg = self.call_rpkid(q_msg) - bsc_pdus = dict((r_pdu.get("bsc_handle"), r_pdu) - for r_pdu in r_msg.getiterator(rpki.left_right.tag_bsc) - if r_pdu.get("action") == "list") - bsc_pdu = bsc_pdus.pop(bsc_handle, None) - - q_msg = self._compose_left_right_query() - - bsc_pkcs10 = bsc_pdu.find(rpki.left_right.tag_pkcs10_request) - assert bsc_pkcs10 is not None - - bsc, created = rpki.irdb.models.BSC.objects.get_or_certify( - issuer = ca, - handle = bsc_handle, - pkcs10 = rpki.x509.PKCS10(Base64 = bsc_pkcs10.text)) - - if (bsc_pdu.findtext(rpki.left_right.tag_signing_cert, "").decode("base64") != bsc.certificate.get_DER() or - bsc_pdu.findtext(rpki.left_right.tag_signing_cert_crl, "").decode("base64") != ca.latest_crl.get_DER()): - q_pdu = SubElement(q_msg, rpki.left_right.tag_bsc, - action = "set", - tag = "bsc", - tenant_handle = ca.handle, - bsc_handle = bsc_handle) - SubElement(q_pdu, rpki.left_right.tag_signing_cert).text = bsc.certificate.get_Base64() - SubElement(q_pdu, rpki.left_right.tag_signing_cert_crl).text = ca.latest_crl.get_Base64() - - # At present we need one per , not because - # rpkid requires that, but because pubd does. pubd probably should - # be fixed to support a single client allowed to update multiple - # trees, but for the moment the easiest way forward is just to - # enforce a 1:1 mapping between and objects - - for repository in ca.repositories.all(): - - repository_pdu = repository_pdus.pop(repository.handle, None) - - if (repository_pdu is None or - repository_pdu.get("bsc_handle") != bsc_handle or - repository_pdu.get("peer_contact_uri") != repository.service_uri or - repository_pdu.get("rrdp_notification_uri") != repository.rrdp_notification_uri or - repository_pdu.findtext(rpki.left_right.tag_bpki_cert, "").decode("base64") != repository.certificate.get_DER()): - q_pdu = SubElement(q_msg, rpki.left_right.tag_repository, - action = "create" if repository_pdu is None else "set", - tag = repository.handle, - tenant_handle = ca.handle, - repository_handle = repository.handle, - bsc_handle = bsc_handle, - peer_contact_uri = repository.service_uri) - if repository.rrdp_notification_uri: - q_pdu.set("rrdp_notification_uri", repository.rrdp_notification_uri) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = repository.certificate.get_Base64() - - for repository_handle in repository_pdus: - SubElement(q_msg, rpki.left_right.tag_repository, action = "destroy", - tenant_handle = ca.handle, repository_handle = repository_handle) - - # setup code currently assumes 1:1 mapping between - # and , and further assumes that the handles - # for an associated pair are the identical (that is: - # parent.repository_handle == parent.parent_handle). - # - # If no such repository exists, our choices are to ignore the - # parent entry or throw an error. For now, we ignore the parent. - - for parent in ca.parents.all(): - - try: - parent_pdu = parent_pdus.pop(parent.handle, None) - - if (parent_pdu is None or - parent_pdu.get("bsc_handle") != bsc_handle or - parent_pdu.get("repository_handle") != parent.handle or - parent_pdu.get("peer_contact_uri") != parent.service_uri or - parent_pdu.get("sia_base") != parent.repository.sia_base or - parent_pdu.get("sender_name") != parent.child_handle or - parent_pdu.get("recipient_name") != parent.parent_handle or - parent_pdu.findtext(rpki.left_right.tag_bpki_cert, "").decode("base64") != parent.certificate.get_DER()): - q_pdu = SubElement(q_msg, rpki.left_right.tag_parent, - action = "create" if parent_pdu is None else "set", - tag = parent.handle, - tenant_handle = ca.handle, - parent_handle = parent.handle, - bsc_handle = bsc_handle, - repository_handle = parent.handle, - peer_contact_uri = parent.service_uri, - sia_base = parent.repository.sia_base, - sender_name = parent.child_handle, - recipient_name = parent.parent_handle) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = parent.certificate.get_Base64() - - except rpki.irdb.models.Repository.DoesNotExist: - pass - - try: - - parent_pdu = parent_pdus.pop(ca.handle, None) - - if (parent_pdu is None or - parent_pdu.get("bsc_handle") != bsc_handle or - parent_pdu.get("repository_handle") != ca.handle or - parent_pdu.get("peer_contact_uri") != ca.rootd.service_uri or - parent_pdu.get("sia_base") != ca.rootd.repository.sia_base or - parent_pdu.get("sender_name") != ca.handle or - parent_pdu.get("recipient_name") != ca.handle or - parent_pdu.findtext(rpki.left_right.tag_bpki_cert).decode("base64") != ca.rootd.certificate.get_DER()): - q_pdu = SubElement(q_msg, rpki.left_right.tag_parent, - action = "create" if parent_pdu is None else "set", - tag = ca.handle, - tenant_handle = ca.handle, - parent_handle = ca.handle, - bsc_handle = bsc_handle, - repository_handle = ca.handle, - peer_contact_uri = ca.rootd.service_uri, - sia_base = ca.rootd.repository.sia_base, - sender_name = ca.handle, - recipient_name = ca.handle) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.rootd.certificate.get_Base64() - - except rpki.irdb.models.Rootd.DoesNotExist: - pass - - for parent_handle in parent_pdus: - SubElement(q_msg, rpki.left_right.tag_parent, action = "destroy", - tenant_handle = ca.handle, parent_handle = parent_handle) - - # Children are simpler than parents, because they call us, so no URL - # to construct and figuring out what certificate to use is their - # problem, not ours. - - for child in ca.children.all(): - - child_pdu = child_pdus.pop(child.handle, None) - - if (child_pdu is None or - child_pdu.get("bsc_handle") != bsc_handle or - child_pdu.findtext(rpki.left_right.tag_bpki_cert).decode("base64") != child.certificate.get_DER()): - q_pdu = SubElement(q_msg, rpki.left_right.tag_child, - action = "create" if child_pdu is None else "set", - tag = child.handle, - tenant_handle = ca.handle, - child_handle = child.handle, - bsc_handle = bsc_handle) - SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = child.certificate.get_Base64() - - for child_handle in child_pdus: - SubElement(q_msg, rpki.left_right.tag_child, action = "destroy", - tenant_handle = ca.handle, child_handle = child_handle) - - # If caller wants us to poke rpkid, add that to the very end of the message - - if poke: - SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", tenant_handle = ca.handle, run_now = "yes") - - # If we changed anything, ship updates off to rpkid. - - if len(q_msg) > 0: - self.call_rpkid(q_msg) - - - def synchronize_pubd_core(self): - """ - Configure pubd with data built up by the other commands in this - program. This is the core synchronization code. Don't call this - directly, instead call a methods that calls this inside a Django - commit wrapper. - - This method configures pubd with data built up by the other - commands in this program. Commands which modify IRDB fields - related to pubd should call this when they're done. - """ - - # If we're not running pubd, the rest of this is a waste of time - - if not self.run_pubd: - return - - # See what pubd already has on file - - q_msg = self._compose_publication_control_query() - SubElement(q_msg, rpki.publication_control.tag_client, action = "list") - r_msg = self.call_pubd(q_msg) - client_pdus = dict((r_pdu.get("client_handle"), r_pdu) - for r_pdu in r_msg) + try: - # Check all clients + parent_pdu = parent_pdus.pop(ca.handle, None) + + if (parent_pdu is None or + parent_pdu.get("bsc_handle") != bsc_handle or + parent_pdu.get("repository_handle") != ca.handle or + parent_pdu.get("peer_contact_uri") != ca.rootd.service_uri or + parent_pdu.get("sia_base") != ca.rootd.repository.sia_base or + parent_pdu.get("sender_name") != ca.handle or + parent_pdu.get("recipient_name") != ca.handle or + parent_pdu.findtext(rpki.left_right.tag_bpki_cert).decode("base64") != ca.rootd.certificate.get_DER()): + q_pdu = SubElement(q_msg, rpki.left_right.tag_parent, + action = "create" if parent_pdu is None else "set", + tag = ca.handle, + tenant_handle = ca.handle, + parent_handle = ca.handle, + bsc_handle = bsc_handle, + repository_handle = ca.handle, + peer_contact_uri = ca.rootd.service_uri, + sia_base = ca.rootd.repository.sia_base, + sender_name = ca.handle, + recipient_name = ca.handle) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = ca.rootd.certificate.get_Base64() - q_msg = self._compose_publication_control_query() + except rpki.irdb.models.Rootd.DoesNotExist: + pass - for client in self.server_ca.clients.all(): + for parent_handle in parent_pdus: + SubElement(q_msg, rpki.left_right.tag_parent, action = "destroy", + tenant_handle = ca.handle, parent_handle = parent_handle) - client_pdu = client_pdus.pop(client.handle, None) + # Children are simpler than parents, because they call us, so no URL + # to construct and figuring out what certificate to use is their + # problem, not ours. - if (client_pdu is None or - client_pdu.get("base_uri") != client.sia_base or - client_pdu.findtext(rpki.publication_control.tag_bpki_cert, "").decode("base64") != client.certificate.get_DER()): - q_pdu = SubElement(q_msg, rpki.publication_control.tag_client, - action = "create" if client_pdu is None else "set", - client_handle = client.handle, - base_uri = client.sia_base) - SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = client.certificate.get_Base64() + for child in ca.children.all(): - # rootd instances are also a weird sort of client + child_pdu = child_pdus.pop(child.handle, None) - for rootd in rpki.irdb.models.Rootd.objects.all(): + if (child_pdu is None or + child_pdu.get("bsc_handle") != bsc_handle or + child_pdu.findtext(rpki.left_right.tag_bpki_cert).decode("base64") != child.certificate.get_DER()): + q_pdu = SubElement(q_msg, rpki.left_right.tag_child, + action = "create" if child_pdu is None else "set", + tag = child.handle, + tenant_handle = ca.handle, + child_handle = child.handle, + bsc_handle = bsc_handle) + SubElement(q_pdu, rpki.left_right.tag_bpki_cert).text = child.certificate.get_Base64() - client_handle = rootd.issuer.handle + "-root" - client_pdu = client_pdus.pop(client_handle, None) - sia_base = "rsync://%s/%s/%s/" % (self.rsync_server, self.rsync_module, client_handle) + for child_handle in child_pdus: + SubElement(q_msg, rpki.left_right.tag_child, action = "destroy", + tenant_handle = ca.handle, child_handle = child_handle) - if (client_pdu is None or - client_pdu.get("base_uri") != sia_base or - client_pdu.findtext(rpki.publication_control.tag_bpki_cert, "").decode("base64") != rootd.issuer.certificate.get_DER()): - q_pdu = SubElement(q_msg, rpki.publication_control.tag_client, - action = "create" if client_pdu is None else "set", - client_handle = client_handle, - base_uri = sia_base) - SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = rootd.issuer.certificate.get_Base64() + # If caller wants us to poke rpkid, add that to the very end of the message - # Delete any unknown clients + if poke: + SubElement(q_msg, rpki.left_right.tag_tenant, action = "set", tenant_handle = ca.handle, run_now = "yes") - for client_handle in client_pdus: - SubElement(q_msg, rpki.publication_control.tag_client, action = "destroy", client_handle = client_handle) + # If we changed anything, ship updates off to rpkid. - # If we changed anything, ship updates off to pubd + if len(q_msg) > 0: + self.call_rpkid(q_msg) - if len(q_msg) > 0: - self.call_pubd(q_msg) + def synchronize_pubd_core(self): + """ + Configure pubd with data built up by the other commands in this + program. This is the core synchronization code. Don't call this + directly, instead call a methods that calls this inside a Django + commit wrapper. - def synchronize_rpkid_deleted_core(self): - """ - Remove any objects present in rpkid's database but not - present in the IRDB. This is the core synchronization code. - Don't call this directly, instead call a methods that calls this - inside a Django commit wrapper. - """ + This method configures pubd with data built up by the other + commands in this program. Commands which modify IRDB fields + related to pubd should call this when they're done. + """ - q_msg = self._compose_left_right_query() - SubElement(q_msg, rpki.left_right.tag_tenant, action = "list") - self.call_rpkid(q_msg) + # If we're not running pubd, the rest of this is a waste of time - tenant_handles = set(s.get("tenant_handle") for s in q_msg) - ca_handles = set(ca.handle for ca in rpki.irdb.models.ResourceHolderCA.objects.all()) - assert ca_handles <= tenant_handles + if not self.run_pubd: + return - q_msg = self._compose_left_right_query() - for handle in (tenant_handles - ca_handles): - SubElement(q_msg, rpki.left_right.tag_tenant, action = "destroy", tenant_handle = handle) + # See what pubd already has on file - if len(q_msg) > 0: - self.call_rpkid(q_msg) + q_msg = self._compose_publication_control_query() + SubElement(q_msg, rpki.publication_control.tag_client, action = "list") + r_msg = self.call_pubd(q_msg) + client_pdus = dict((r_pdu.get("client_handle"), r_pdu) + for r_pdu in r_msg) + # Check all clients - @django.db.transaction.atomic - def add_ee_certificate_request(self, pkcs10, resources): - """ - Check a PKCS #10 request to see if it complies with the - specification for a RPKI EE certificate; if it does, add an - EECertificateRequest for it to the IRDB. + q_msg = self._compose_publication_control_query() - Not yet sure what we want for update and delete semantics here, so - for the moment this is straight addition. See methods like - .load_asns() and .load_prefixes() for other strategies. - """ + for client in self.server_ca.clients.all(): - pkcs10.check_valid_request_ee() - ee_request = self.resource_ca.ee_certificate_requests.create( - pkcs10 = pkcs10, - gski = pkcs10.gSKI(), - valid_until = resources.valid_until) - for r in resources.asn: - ee_request.asns.create(start_as = str(r.min), end_as = str(r.max)) - for r in resources.v4: - ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 4) - for r in resources.v6: - ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 6) - - - @django.db.transaction.atomic - def add_router_certificate_request(self, router_certificate_request_xml, valid_until = None): - """ - Read XML file containing one or more router certificate requests, - attempt to add request(s) to IRDB. + client_pdu = client_pdus.pop(client.handle, None) - Check each PKCS #10 request to see if it complies with the - specification for a router certificate; if it does, create an EE - certificate request for it along with the ASN resources and - router-ID supplied in the XML. - """ + if (client_pdu is None or + client_pdu.get("base_uri") != client.sia_base or + client_pdu.findtext(rpki.publication_control.tag_bpki_cert, "").decode("base64") != client.certificate.get_DER()): + q_pdu = SubElement(q_msg, rpki.publication_control.tag_client, + action = "create" if client_pdu is None else "set", + client_handle = client.handle, + base_uri = client.sia_base) + SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = client.certificate.get_Base64() - x = etree_read(router_certificate_request_xml, schema = rpki.relaxng.router_certificate) + # rootd instances are also a weird sort of client - for x in x.getiterator(tag_router_certificate_request): + for rootd in rpki.irdb.models.Rootd.objects.all(): - pkcs10 = rpki.x509.PKCS10(Base64 = x.text) - router_id = long(x.get("router_id")) - asns = rpki.resource_set.resource_set_as(x.get("asn")) - if not valid_until: - valid_until = x.get("valid_until") + client_handle = rootd.issuer.handle + "-root" + client_pdu = client_pdus.pop(client_handle, None) + sia_base = "rsync://%s/%s/%s/" % (self.rsync_server, self.rsync_module, client_handle) - if valid_until and isinstance(valid_until, (str, unicode)): - valid_until = rpki.sundial.datetime.fromXMLtime(valid_until) + if (client_pdu is None or + client_pdu.get("base_uri") != sia_base or + client_pdu.findtext(rpki.publication_control.tag_bpki_cert, "").decode("base64") != rootd.issuer.certificate.get_DER()): + q_pdu = SubElement(q_msg, rpki.publication_control.tag_client, + action = "create" if client_pdu is None else "set", + client_handle = client_handle, + base_uri = sia_base) + SubElement(q_pdu, rpki.publication_control.tag_bpki_cert).text = rootd.issuer.certificate.get_Base64() - if not valid_until: - valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365) - elif valid_until < rpki.sundial.now(): - raise PastExpiration("Specified expiration date %s has already passed" % valid_until) + # Delete any unknown clients - pkcs10.check_valid_request_router() + for client_handle in client_pdus: + SubElement(q_msg, rpki.publication_control.tag_client, action = "destroy", client_handle = client_handle) - cn = "ROUTER-%08x" % asns[0].min - sn = "%08x" % router_id + # If we changed anything, ship updates off to pubd - ee_request = self.resource_ca.ee_certificate_requests.create( - pkcs10 = pkcs10, - gski = pkcs10.gSKI(), - valid_until = valid_until, - cn = cn, - sn = sn, - eku = rpki.oids.id_kp_bgpsec_router) + if len(q_msg) > 0: + self.call_pubd(q_msg) - for r in asns: - ee_request.asns.create(start_as = str(r.min), end_as = str(r.max)) + def synchronize_rpkid_deleted_core(self): + """ + Remove any objects present in rpkid's database but not + present in the IRDB. This is the core synchronization code. + Don't call this directly, instead call a methods that calls this + inside a Django commit wrapper. + """ - @django.db.transaction.atomic - def delete_router_certificate_request(self, gski): - """ - Delete a router certificate request from this RPKI entity. - """ + q_msg = self._compose_left_right_query() + SubElement(q_msg, rpki.left_right.tag_tenant, action = "list") + self.call_rpkid(q_msg) - self.resource_ca.ee_certificate_requests.get(gski = gski).delete() + tenant_handles = set(s.get("tenant_handle") for s in q_msg) + ca_handles = set(ca.handle for ca in rpki.irdb.models.ResourceHolderCA.objects.all()) + assert ca_handles <= tenant_handles + + q_msg = self._compose_left_right_query() + for handle in (tenant_handles - ca_handles): + SubElement(q_msg, rpki.left_right.tag_tenant, action = "destroy", tenant_handle = handle) + + if len(q_msg) > 0: + self.call_rpkid(q_msg) + + + @django.db.transaction.atomic + def add_ee_certificate_request(self, pkcs10, resources): + """ + Check a PKCS #10 request to see if it complies with the + specification for a RPKI EE certificate; if it does, add an + EECertificateRequest for it to the IRDB. + + Not yet sure what we want for update and delete semantics here, so + for the moment this is straight addition. See methods like + .load_asns() and .load_prefixes() for other strategies. + """ + + pkcs10.check_valid_request_ee() + ee_request = self.resource_ca.ee_certificate_requests.create( + pkcs10 = pkcs10, + gski = pkcs10.gSKI(), + valid_until = resources.valid_until) + for r in resources.asn: + ee_request.asns.create(start_as = str(r.min), end_as = str(r.max)) + for r in resources.v4: + ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 4) + for r in resources.v6: + ee_request.address_ranges.create(start_ip = str(r.min), end_ip = str(r.max), version = 6) + + + @django.db.transaction.atomic + def add_router_certificate_request(self, router_certificate_request_xml, valid_until = None): + """ + Read XML file containing one or more router certificate requests, + attempt to add request(s) to IRDB. + + Check each PKCS #10 request to see if it complies with the + specification for a router certificate; if it does, create an EE + certificate request for it along with the ASN resources and + router-ID supplied in the XML. + """ + + x = etree_read(router_certificate_request_xml, schema = rpki.relaxng.router_certificate) + + for x in x.getiterator(tag_router_certificate_request): + + pkcs10 = rpki.x509.PKCS10(Base64 = x.text) + router_id = long(x.get("router_id")) + asns = rpki.resource_set.resource_set_as(x.get("asn")) + if not valid_until: + valid_until = x.get("valid_until") + + if valid_until and isinstance(valid_until, (str, unicode)): + valid_until = rpki.sundial.datetime.fromXMLtime(valid_until) + + if not valid_until: + valid_until = rpki.sundial.now() + rpki.sundial.timedelta(days = 365) + elif valid_until < rpki.sundial.now(): + raise PastExpiration("Specified expiration date %s has already passed" % valid_until) + + pkcs10.check_valid_request_router() + + cn = "ROUTER-%08x" % asns[0].min + sn = "%08x" % router_id + + ee_request = self.resource_ca.ee_certificate_requests.create( + pkcs10 = pkcs10, + gski = pkcs10.gSKI(), + valid_until = valid_until, + cn = cn, + sn = sn, + eku = rpki.oids.id_kp_bgpsec_router) + + for r in asns: + ee_request.asns.create(start_as = str(r.min), end_as = str(r.max)) + + + @django.db.transaction.atomic + def delete_router_certificate_request(self, gski): + """ + Delete a router certificate request from this RPKI entity. + """ + + self.resource_ca.ee_certificate_requests.get(gski = gski).delete() diff --git a/rpki/irdbd.py b/rpki/irdbd.py index 96757477..91859f5d 100644 --- a/rpki/irdbd.py +++ b/rpki/irdbd.py @@ -41,183 +41,183 @@ logger = logging.getLogger(__name__) class main(object): - def handle_list_resources(self, q_pdu, r_msg): - tenant_handle = q_pdu.get("tenant_handle") - child_handle = q_pdu.get("child_handle") - child = rpki.irdb.models.Child.objects.get(issuer__handle = tenant_handle, handle = child_handle) - resources = child.resource_bag - r_pdu = SubElement(r_msg, rpki.left_right.tag_list_resources, tenant_handle = tenant_handle, child_handle = child_handle, - valid_until = child.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")) - for k, v in (("asn", resources.asn), - ("ipv4", resources.v4), - ("ipv6", resources.v6), - ("tag", q_pdu.get("tag"))): - if v: - r_pdu.set(k, str(v)) - - def handle_list_roa_requests(self, q_pdu, r_msg): - tenant_handle = q_pdu.get("tenant_handle") - for request in rpki.irdb.models.ROARequest.objects.raw(""" - SELECT irdb_roarequest.* - FROM irdb_roarequest, irdb_resourceholderca - WHERE irdb_roarequest.issuer_id = irdb_resourceholderca.id - AND irdb_resourceholderca.handle = %s - """, [tenant_handle]): - prefix_bag = request.roa_prefix_bag - r_pdu = SubElement(r_msg, rpki.left_right.tag_list_roa_requests, tenant_handle = tenant_handle, asn = str(request.asn)) - for k, v in (("ipv4", prefix_bag.v4), - ("ipv6", prefix_bag.v6), - ("tag", q_pdu.get("tag"))): - if v: - r_pdu.set(k, str(v)) - - def handle_list_ghostbuster_requests(self, q_pdu, r_msg): - tenant_handle = q_pdu.get("tenant_handle") - parent_handle = q_pdu.get("parent_handle") - ghostbusters = rpki.irdb.models.GhostbusterRequest.objects.filter(issuer__handle = tenant_handle, parent__handle = parent_handle) - if ghostbusters.count() == 0: - ghostbusters = rpki.irdb.models.GhostbusterRequest.objects.filter(issuer__handle = tenant_handle, parent = None) - for ghostbuster in ghostbusters: - r_pdu = SubElement(r_msg, q_pdu.tag, tenant_handle = tenant_handle, parent_handle = parent_handle) - if q_pdu.get("tag"): - r_pdu.set("tag", q_pdu.get("tag")) - r_pdu.text = ghostbuster.vcard - - def handle_list_ee_certificate_requests(self, q_pdu, r_msg): - tenant_handle = q_pdu.get("tenant_handle") - for ee_req in rpki.irdb.models.EECertificateRequest.objects.filter(issuer__handle = tenant_handle): - resources = ee_req.resource_bag - r_pdu = SubElement(r_msg, q_pdu.tag, tenant_handle = tenant_handle, gski = ee_req.gski, - valid_until = ee_req.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ"), - cn = ee_req.cn, sn = ee_req.sn) - for k, v in (("asn", resources.asn), - ("ipv4", resources.v4), - ("ipv6", resources.v6), - ("eku", ee_req.eku), - ("tag", q_pdu.get("tag"))): - if v: - r_pdu.set(k, str(v)) - SubElement(r_pdu, rpki.left_right.tag_pkcs10).text = ee_req.pkcs10.get_Base64() - - def handler(self, request, q_der): - try: - from django.db import connection - connection.cursor() # Reconnect to mysqld if necessary - self.start_new_transaction() - serverCA = rpki.irdb.models.ServerCA.objects.get() - rpkid = serverCA.ee_certificates.get(purpose = "rpkid") - irdbd = serverCA.ee_certificates.get(purpose = "irdbd") - q_cms = rpki.left_right.cms_msg(DER = q_der) - q_msg = q_cms.unwrap((serverCA.certificate, rpkid.certificate)) - self.cms_timestamp = q_cms.check_replay(self.cms_timestamp, request.path) - if q_msg.get("type") != "query": - raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type")) - r_msg = Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap, - type = "reply", version = rpki.left_right.version) - try: - for q_pdu in q_msg: - getattr(self, "handle_" + q_pdu.tag[len(rpki.left_right.xmlns):])(q_pdu, r_msg) - - except Exception, e: - logger.exception("Exception processing PDU %r", q_pdu) - r_pdu = SubElement(r_msg, rpki.left_right.tag_report_error, error_code = e.__class__.__name__) - r_pdu.text = str(e) - if q_pdu.get("tag") is not None: - r_pdu.set("tag", q_pdu.get("tag")) - - request.send_cms_response(rpki.left_right.cms_msg().wrap(r_msg, irdbd.private_key, irdbd.certificate)) - - except Exception, e: - logger.exception("Unhandled exception while processing HTTP request") - request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e)) - - def __init__(self, **kwargs): - - global rpki # pylint: disable=W0602 - - os.environ.update(TZ = "UTC", - DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb") - time.tzset() - - parser = argparse.ArgumentParser(description = __doc__) - parser.add_argument("-c", "--config", - help = "override default location of configuration file") - parser.add_argument("-f", "--foreground", action = "store_true", - help = "do not daemonize") - parser.add_argument("--pidfile", - help = "override default location of pid file") - parser.add_argument("--profile", - help = "enable profiling, saving data to PROFILE") - rpki.log.argparse_setup(parser) - args = parser.parse_args() - - rpki.log.init("irdbd", args) - - self.cfg = rpki.config.parser(set_filename = args.config, section = "irdbd") - self.cfg.set_global_flags() - - if not args.foreground: - rpki.daemonize.daemon(pidfile = args.pidfile) - - if args.profile: - import cProfile - prof = cProfile.Profile() - try: - prof.runcall(self.main) - finally: - prof.dump_stats(args.profile) - logger.info("Dumped profile data to %s", args.profile) - else: - self.main() - - def main(self): - - startup_msg = self.cfg.get("startup-message", "") - if startup_msg: - logger.info(startup_msg) - - # Now that we know which configuration file to use, it's OK to - # load modules that require Django's settings module. - - import django - django.setup() - - global rpki # pylint: disable=W0602 - import rpki.irdb # pylint: disable=W0621 - - self.http_server_host = self.cfg.get("server-host", "") - self.http_server_port = self.cfg.getint("server-port") - - self.cms_timestamp = None - - rpki.http_simple.server( - host = self.http_server_host, - port = self.http_server_port, - handlers = self.handler) - - def start_new_transaction(self): - - # Entirely too much fun with read-only access to transactional databases. - # - # http://stackoverflow.com/questions/3346124/how-do-i-force-django-to-ignore-any-caches-and-reload-data - # http://devblog.resolversystems.com/?p=439 - # http://groups.google.com/group/django-users/browse_thread/thread/e25cec400598c06d - # http://stackoverflow.com/questions/1028671/python-mysqldb-update-query-fails - # http://dev.mysql.com/doc/refman/5.0/en/set-transaction.html - # - # It turns out that MySQL is doing us a favor with this weird - # transactional behavior on read, because without it there's a - # race condition if multiple updates are committed to the IRDB - # while we're in the middle of processing a query. Note that - # proper transaction management by the committers doesn't protect - # us, this is a transactional problem on read. So we need to use - # explicit transaction management. Since irdbd is a read-only - # consumer of IRDB data, this means we need to commit an empty - # transaction at the beginning of processing each query, to reset - # the transaction isolation snapshot. - - import django.db.transaction - - with django.db.transaction.atomic(): - #django.db.transaction.commit() - pass + def handle_list_resources(self, q_pdu, r_msg): + tenant_handle = q_pdu.get("tenant_handle") + child_handle = q_pdu.get("child_handle") + child = rpki.irdb.models.Child.objects.get(issuer__handle = tenant_handle, handle = child_handle) + resources = child.resource_bag + r_pdu = SubElement(r_msg, rpki.left_right.tag_list_resources, tenant_handle = tenant_handle, child_handle = child_handle, + valid_until = child.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ")) + for k, v in (("asn", resources.asn), + ("ipv4", resources.v4), + ("ipv6", resources.v6), + ("tag", q_pdu.get("tag"))): + if v: + r_pdu.set(k, str(v)) + + def handle_list_roa_requests(self, q_pdu, r_msg): + tenant_handle = q_pdu.get("tenant_handle") + for request in rpki.irdb.models.ROARequest.objects.raw(""" + SELECT irdb_roarequest.* + FROM irdb_roarequest, irdb_resourceholderca + WHERE irdb_roarequest.issuer_id = irdb_resourceholderca.id + AND irdb_resourceholderca.handle = %s + """, [tenant_handle]): + prefix_bag = request.roa_prefix_bag + r_pdu = SubElement(r_msg, rpki.left_right.tag_list_roa_requests, tenant_handle = tenant_handle, asn = str(request.asn)) + for k, v in (("ipv4", prefix_bag.v4), + ("ipv6", prefix_bag.v6), + ("tag", q_pdu.get("tag"))): + if v: + r_pdu.set(k, str(v)) + + def handle_list_ghostbuster_requests(self, q_pdu, r_msg): + tenant_handle = q_pdu.get("tenant_handle") + parent_handle = q_pdu.get("parent_handle") + ghostbusters = rpki.irdb.models.GhostbusterRequest.objects.filter(issuer__handle = tenant_handle, parent__handle = parent_handle) + if ghostbusters.count() == 0: + ghostbusters = rpki.irdb.models.GhostbusterRequest.objects.filter(issuer__handle = tenant_handle, parent = None) + for ghostbuster in ghostbusters: + r_pdu = SubElement(r_msg, q_pdu.tag, tenant_handle = tenant_handle, parent_handle = parent_handle) + if q_pdu.get("tag"): + r_pdu.set("tag", q_pdu.get("tag")) + r_pdu.text = ghostbuster.vcard + + def handle_list_ee_certificate_requests(self, q_pdu, r_msg): + tenant_handle = q_pdu.get("tenant_handle") + for ee_req in rpki.irdb.models.EECertificateRequest.objects.filter(issuer__handle = tenant_handle): + resources = ee_req.resource_bag + r_pdu = SubElement(r_msg, q_pdu.tag, tenant_handle = tenant_handle, gski = ee_req.gski, + valid_until = ee_req.valid_until.strftime("%Y-%m-%dT%H:%M:%SZ"), + cn = ee_req.cn, sn = ee_req.sn) + for k, v in (("asn", resources.asn), + ("ipv4", resources.v4), + ("ipv6", resources.v6), + ("eku", ee_req.eku), + ("tag", q_pdu.get("tag"))): + if v: + r_pdu.set(k, str(v)) + SubElement(r_pdu, rpki.left_right.tag_pkcs10).text = ee_req.pkcs10.get_Base64() + + def handler(self, request, q_der): + try: + from django.db import connection + connection.cursor() # Reconnect to mysqld if necessary + self.start_new_transaction() + serverCA = rpki.irdb.models.ServerCA.objects.get() + rpkid = serverCA.ee_certificates.get(purpose = "rpkid") + irdbd = serverCA.ee_certificates.get(purpose = "irdbd") + q_cms = rpki.left_right.cms_msg(DER = q_der) + q_msg = q_cms.unwrap((serverCA.certificate, rpkid.certificate)) + self.cms_timestamp = q_cms.check_replay(self.cms_timestamp, request.path) + if q_msg.get("type") != "query": + raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type")) + r_msg = Element(rpki.left_right.tag_msg, nsmap = rpki.left_right.nsmap, + type = "reply", version = rpki.left_right.version) + try: + for q_pdu in q_msg: + getattr(self, "handle_" + q_pdu.tag[len(rpki.left_right.xmlns):])(q_pdu, r_msg) + + except Exception, e: + logger.exception("Exception processing PDU %r", q_pdu) + r_pdu = SubElement(r_msg, rpki.left_right.tag_report_error, error_code = e.__class__.__name__) + r_pdu.text = str(e) + if q_pdu.get("tag") is not None: + r_pdu.set("tag", q_pdu.get("tag")) + + request.send_cms_response(rpki.left_right.cms_msg().wrap(r_msg, irdbd.private_key, irdbd.certificate)) + + except Exception, e: + logger.exception("Unhandled exception while processing HTTP request") + request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e)) + + def __init__(self, **kwargs): + + global rpki # pylint: disable=W0602 + + os.environ.update(TZ = "UTC", + DJANGO_SETTINGS_MODULE = "rpki.django_settings.irdb") + time.tzset() + + parser = argparse.ArgumentParser(description = __doc__) + parser.add_argument("-c", "--config", + help = "override default location of configuration file") + parser.add_argument("-f", "--foreground", action = "store_true", + help = "do not daemonize") + parser.add_argument("--pidfile", + help = "override default location of pid file") + parser.add_argument("--profile", + help = "enable profiling, saving data to PROFILE") + rpki.log.argparse_setup(parser) + args = parser.parse_args() + + rpki.log.init("irdbd", args) + + self.cfg = rpki.config.parser(set_filename = args.config, section = "irdbd") + self.cfg.set_global_flags() + + if not args.foreground: + rpki.daemonize.daemon(pidfile = args.pidfile) + + if args.profile: + import cProfile + prof = cProfile.Profile() + try: + prof.runcall(self.main) + finally: + prof.dump_stats(args.profile) + logger.info("Dumped profile data to %s", args.profile) + else: + self.main() + + def main(self): + + startup_msg = self.cfg.get("startup-message", "") + if startup_msg: + logger.info(startup_msg) + + # Now that we know which configuration file to use, it's OK to + # load modules that require Django's settings module. + + import django + django.setup() + + global rpki # pylint: disable=W0602 + import rpki.irdb # pylint: disable=W0621 + + self.http_server_host = self.cfg.get("server-host", "") + self.http_server_port = self.cfg.getint("server-port") + + self.cms_timestamp = None + + rpki.http_simple.server( + host = self.http_server_host, + port = self.http_server_port, + handlers = self.handler) + + def start_new_transaction(self): + + # Entirely too much fun with read-only access to transactional databases. + # + # http://stackoverflow.com/questions/3346124/how-do-i-force-django-to-ignore-any-caches-and-reload-data + # http://devblog.resolversystems.com/?p=439 + # http://groups.google.com/group/django-users/browse_thread/thread/e25cec400598c06d + # http://stackoverflow.com/questions/1028671/python-mysqldb-update-query-fails + # http://dev.mysql.com/doc/refman/5.0/en/set-transaction.html + # + # It turns out that MySQL is doing us a favor with this weird + # transactional behavior on read, because without it there's a + # race condition if multiple updates are committed to the IRDB + # while we're in the middle of processing a query. Note that + # proper transaction management by the committers doesn't protect + # us, this is a transactional problem on read. So we need to use + # explicit transaction management. Since irdbd is a read-only + # consumer of IRDB data, this means we need to commit an empty + # transaction at the beginning of processing each query, to reset + # the transaction isolation snapshot. + + import django.db.transaction + + with django.db.transaction.atomic(): + #django.db.transaction.commit() + pass diff --git a/rpki/left_right.py b/rpki/left_right.py index 387e908f..3572ee98 100644 --- a/rpki/left_right.py +++ b/rpki/left_right.py @@ -71,9 +71,9 @@ allowed_content_types = (content_type,) class cms_msg(rpki.x509.XML_CMS_object): - """ - CMS-signed left-right PDU. - """ + """ + CMS-signed left-right PDU. + """ - encoding = "us-ascii" - schema = rpki.relaxng.left_right + encoding = "us-ascii" + schema = rpki.relaxng.left_right diff --git a/rpki/log.py b/rpki/log.py index 828982da..8afee4ba 100644 --- a/rpki/log.py +++ b/rpki/log.py @@ -30,12 +30,12 @@ import argparse import traceback as tb try: - have_setproctitle = False - if os.getenv("DISABLE_SETPROCTITLE") is None: - import setproctitle # pylint: disable=F0401 - have_setproctitle = True + have_setproctitle = False + if os.getenv("DISABLE_SETPROCTITLE") is None: + import setproctitle # pylint: disable=F0401 + have_setproctitle = True except ImportError: - pass + pass logger = logging.getLogger(__name__) @@ -67,234 +67,234 @@ proctitle_extra = os.path.basename(os.getcwd()) class Formatter(object): - """ - Reimplementation (easier than subclassing in this case) of - logging.Formatter. - - It turns out that the logging code only cares about this class's - .format(record) method, everything else is internal; so long as - .format() converts a record into a properly formatted string, the - logging code is happy. - - So, rather than mess around with dynamically constructing and - deconstructing and tweaking format strings and ten zillion options - we don't use, we just provide our own implementation that supports - what we do need. - """ - - converter = time.gmtime - - def __init__(self, ident, handler): - self.ident = ident - self.is_syslog = isinstance(handler, logging.handlers.SysLogHandler) - - def format(self, record): - return "".join(self.coformat(record)).rstrip("\n") - - def coformat(self, record): - - try: - if not self.is_syslog: - yield time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime(record.created)) - except: # pylint: disable=W0702 - yield "[$!$Time format failed]" - - try: - yield "%s[%d]: " % (self.ident, record.process) - except: # pylint: disable=W0702 - yield "[$!$ident format failed]" - - try: - if isinstance(record.context, (str, unicode)): - yield record.context + " " - else: - yield repr(record.context) + " " - except AttributeError: - pass - except: # pylint: disable=W0702 - yield "[$!$context format failed]" - - try: - yield record.getMessage() - except: # pylint: disable=W0702 - yield "[$!$record.getMessage() failed]" - - try: - if record.exc_info: - if self.is_syslog or not enable_tracebacks: - lines = tb.format_exception_only(record.exc_info[0], record.exc_info[1]) - lines.insert(0, ": ") - else: - lines = tb.format_exception(record.exc_info[0], record.exc_info[1], record.exc_info[2]) - lines.insert(0, "\n") - for line in lines: - yield line - except: # pylint: disable=W0702 - yield "[$!$exception formatting failed]" + """ + Reimplementation (easier than subclassing in this case) of + logging.Formatter. + + It turns out that the logging code only cares about this class's + .format(record) method, everything else is internal; so long as + .format() converts a record into a properly formatted string, the + logging code is happy. + + So, rather than mess around with dynamically constructing and + deconstructing and tweaking format strings and ten zillion options + we don't use, we just provide our own implementation that supports + what we do need. + """ + + converter = time.gmtime + + def __init__(self, ident, handler): + self.ident = ident + self.is_syslog = isinstance(handler, logging.handlers.SysLogHandler) + + def format(self, record): + return "".join(self.coformat(record)).rstrip("\n") + + def coformat(self, record): + + try: + if not self.is_syslog: + yield time.strftime("%Y-%m-%d %H:%M:%S ", time.gmtime(record.created)) + except: # pylint: disable=W0702 + yield "[$!$Time format failed]" + + try: + yield "%s[%d]: " % (self.ident, record.process) + except: # pylint: disable=W0702 + yield "[$!$ident format failed]" + + try: + if isinstance(record.context, (str, unicode)): + yield record.context + " " + else: + yield repr(record.context) + " " + except AttributeError: + pass + except: # pylint: disable=W0702 + yield "[$!$context format failed]" + + try: + yield record.getMessage() + except: # pylint: disable=W0702 + yield "[$!$record.getMessage() failed]" + + try: + if record.exc_info: + if self.is_syslog or not enable_tracebacks: + lines = tb.format_exception_only(record.exc_info[0], record.exc_info[1]) + lines.insert(0, ": ") + else: + lines = tb.format_exception(record.exc_info[0], record.exc_info[1], record.exc_info[2]) + lines.insert(0, "\n") + for line in lines: + yield line + except: # pylint: disable=W0702 + yield "[$!$exception formatting failed]" def argparse_setup(parser, default_thunk = None): - """ - Set up argparse stuff for functionality in this module. + """ + Set up argparse stuff for functionality in this module. - Default logging destination is syslog, but you can change this - by setting default_thunk to a callable which takes no arguments - and which returns a instance of a logging.Handler subclass. + Default logging destination is syslog, but you can change this + by setting default_thunk to a callable which takes no arguments + and which returns a instance of a logging.Handler subclass. - Also see rpki.log.init(). - """ + Also see rpki.log.init(). + """ - class LogLevelAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string = None): - setattr(namespace, self.dest, getattr(logging, values.upper())) + class LogLevelAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string = None): + setattr(namespace, self.dest, getattr(logging, values.upper())) - parser.add_argument("--log-level", default = logging.WARNING, action = LogLevelAction, - choices = ("debug", "info", "warning", "error", "critical"), - help = "how verbosely to log") + parser.add_argument("--log-level", default = logging.WARNING, action = LogLevelAction, + choices = ("debug", "info", "warning", "error", "critical"), + help = "how verbosely to log") - group = parser.add_mutually_exclusive_group() + group = parser.add_mutually_exclusive_group() - syslog_address = "/dev/log" if os.path.exists("/dev/log") else ("localhost", logging.handlers.SYSLOG_UDP_PORT) + syslog_address = "/dev/log" if os.path.exists("/dev/log") else ("localhost", logging.handlers.SYSLOG_UDP_PORT) - class SyslogAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string = None): - namespace.log_handler = lambda: logging.handlers.SysLogHandler(address = syslog_address, facility = values) + class SyslogAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string = None): + namespace.log_handler = lambda: logging.handlers.SysLogHandler(address = syslog_address, facility = values) - group.add_argument("--log-syslog", nargs = "?", const = "daemon", action = SyslogAction, - choices = sorted(logging.handlers.SysLogHandler.facility_names.keys()), - help = "send logging to syslog") + group.add_argument("--log-syslog", nargs = "?", const = "daemon", action = SyslogAction, + choices = sorted(logging.handlers.SysLogHandler.facility_names.keys()), + help = "send logging to syslog") - class StreamAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string = None): - namespace.log_handler = lambda: logging.StreamHandler(stream = self.const) + class StreamAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string = None): + namespace.log_handler = lambda: logging.StreamHandler(stream = self.const) - group.add_argument("--log-stderr", nargs = 0, action = StreamAction, const = sys.stderr, - help = "send logging to standard error") + group.add_argument("--log-stderr", nargs = 0, action = StreamAction, const = sys.stderr, + help = "send logging to standard error") - group.add_argument("--log-stdout", nargs = 0, action = StreamAction, const = sys.stdout, - help = "send logging to standard output") + group.add_argument("--log-stdout", nargs = 0, action = StreamAction, const = sys.stdout, + help = "send logging to standard output") - class WatchedFileAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string = None): - namespace.log_handler = lambda: logging.handlers.WatchedFileHandler(filename = values) + class WatchedFileAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string = None): + namespace.log_handler = lambda: logging.handlers.WatchedFileHandler(filename = values) - group.add_argument("--log-file", action = WatchedFileAction, - help = "send logging to a file, reopening if rotated away") + group.add_argument("--log-file", action = WatchedFileAction, + help = "send logging to a file, reopening if rotated away") - class RotatingFileAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string = None): - namespace.log_handler = lambda: logging.handlers.RotatingFileHandler( - filename = values[0], - maxBytes = int(values[1]) * 1024, - backupCount = int(values[2])) + class RotatingFileAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string = None): + namespace.log_handler = lambda: logging.handlers.RotatingFileHandler( + filename = values[0], + maxBytes = int(values[1]) * 1024, + backupCount = int(values[2])) - group.add_argument("--log-rotating-file", action = RotatingFileAction, - nargs = 3, metavar = ("FILENAME", "KBYTES", "COUNT"), - help = "send logging to rotating file") + group.add_argument("--log-rotating-file", action = RotatingFileAction, + nargs = 3, metavar = ("FILENAME", "KBYTES", "COUNT"), + help = "send logging to rotating file") - class TimedRotatingFileAction(argparse.Action): - def __call__(self, parser, namespace, values, option_string = None): - namespace.log_handler = lambda: logging.handlers.TimedRotatingFileHandler( - filename = values[0], - interval = int(values[1]), - backupCount = int(values[2]), - when = "H", - utc = True) + class TimedRotatingFileAction(argparse.Action): + def __call__(self, parser, namespace, values, option_string = None): + namespace.log_handler = lambda: logging.handlers.TimedRotatingFileHandler( + filename = values[0], + interval = int(values[1]), + backupCount = int(values[2]), + when = "H", + utc = True) - group.add_argument("--log-timed-rotating-file", action = TimedRotatingFileAction, - nargs = 3, metavar = ("FILENAME", "HOURS", "COUNT"), - help = "send logging to timed rotating file") + group.add_argument("--log-timed-rotating-file", action = TimedRotatingFileAction, + nargs = 3, metavar = ("FILENAME", "HOURS", "COUNT"), + help = "send logging to timed rotating file") - if default_thunk is None: - default_thunk = lambda: logging.handlers.SysLogHandler(address = syslog_address, facility = "daemon") + if default_thunk is None: + default_thunk = lambda: logging.handlers.SysLogHandler(address = syslog_address, facility = "daemon") - parser.set_defaults(log_handler = default_thunk) + parser.set_defaults(log_handler = default_thunk) def init(ident = None, args = None): - """ - Initialize logging system. + """ + Initialize logging system. - Default logging destination is stderr if "args" is not specified. - """ + Default logging destination is stderr if "args" is not specified. + """ - # pylint: disable=E1103 + # pylint: disable=E1103 - if ident is None: - ident = os.path.basename(sys.argv[0]) + if ident is None: + ident = os.path.basename(sys.argv[0]) - if args is None: - args = argparse.Namespace(log_level = logging.WARNING, - log_handler = logging.StreamHandler) + if args is None: + args = argparse.Namespace(log_level = logging.WARNING, + log_handler = logging.StreamHandler) - handler = args.log_handler() - handler.setFormatter(Formatter(ident, handler)) + handler = args.log_handler() + handler.setFormatter(Formatter(ident, handler)) - root_logger = logging.getLogger() - root_logger.addHandler(handler) - root_logger.setLevel(args.log_level) + root_logger = logging.getLogger() + root_logger.addHandler(handler) + root_logger.setLevel(args.log_level) - if ident and have_setproctitle and use_setproctitle: - if proctitle_extra: - setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra)) - else: - setproctitle.setproctitle(ident) + if ident and have_setproctitle and use_setproctitle: + if proctitle_extra: + setproctitle.setproctitle("%s (%s)" % (ident, proctitle_extra)) + else: + setproctitle.setproctitle(ident) def class_logger(module_logger, attribute = "logger"): - """ - Class decorator to add a class-level Logger object as a class - attribute. This allows control of debugging messages at the class - level rather than just the module level. + """ + Class decorator to add a class-level Logger object as a class + attribute. This allows control of debugging messages at the class + level rather than just the module level. - This decorator takes the module logger as an argument. - """ + This decorator takes the module logger as an argument. + """ - def decorator(cls): - setattr(cls, attribute, module_logger.getChild(cls.__name__)) - return cls - return decorator + def decorator(cls): + setattr(cls, attribute, module_logger.getChild(cls.__name__)) + return cls + return decorator def log_repr(obj, *tokens): - """ - Constructor for __repr__() strings, handles suppression of Python - IDs as needed, includes tenant_handle when available. - """ + """ + Constructor for __repr__() strings, handles suppression of Python + IDs as needed, includes tenant_handle when available. + """ - # pylint: disable=W0702 + # pylint: disable=W0702 - words = ["%s.%s" % (obj.__class__.__module__, obj.__class__.__name__)] - try: - words.append("{%s}" % obj.tenant.tenant_handle) - except: - pass + words = ["%s.%s" % (obj.__class__.__module__, obj.__class__.__name__)] + try: + words.append("{%s}" % obj.tenant.tenant_handle) + except: + pass - for token in tokens: - if token is not None: - try: - s = str(token) - except: - s = "???" - logger.exception("Failed to generate repr() string for object of type %r", type(token)) - if s: - words.append(s) + for token in tokens: + if token is not None: + try: + s = str(token) + except: + s = "???" + logger.exception("Failed to generate repr() string for object of type %r", type(token)) + if s: + words.append(s) - if show_python_ids: - words.append(" at %#x" % id(obj)) + if show_python_ids: + words.append(" at %#x" % id(obj)) - return "<" + " ".join(words) + ">" + return "<" + " ".join(words) + ">" def show_stack(stack_logger = None): - """ - Log a stack trace. - """ + """ + Log a stack trace. + """ - if stack_logger is None: - stack_logger = logger + if stack_logger is None: + stack_logger = logger - for frame in tb.format_stack(): - for line in frame.split("\n"): - if line: - stack_logger.debug("%s", line.rstrip()) + for frame in tb.format_stack(): + for line in frame.split("\n"): + if line: + stack_logger.debug("%s", line.rstrip()) diff --git a/rpki/myrpki.py b/rpki/myrpki.py index 2ae912f0..929c2a70 100644 --- a/rpki/myrpki.py +++ b/rpki/myrpki.py @@ -19,5 +19,5 @@ This is a tombstone for a program that no longer exists. """ if __name__ != "__main__": # sic -- don't break regression tests - import sys - sys.exit('"myrpki" is obsolete. Please use "rpkic" instead.') + import sys + sys.exit('"myrpki" is obsolete. Please use "rpkic" instead.') diff --git a/rpki/mysql_import.py b/rpki/mysql_import.py index 538e1916..bbb7ac22 100644 --- a/rpki/mysql_import.py +++ b/rpki/mysql_import.py @@ -52,11 +52,11 @@ from __future__ import with_statement import warnings if hasattr(warnings, "catch_warnings"): - with warnings.catch_warnings(): - warnings.simplefilter("ignore", DeprecationWarning) - import MySQLdb + with warnings.catch_warnings(): + warnings.simplefilter("ignore", DeprecationWarning) + import MySQLdb else: - import MySQLdb + import MySQLdb import _mysql_exceptions diff --git a/rpki/oids.py b/rpki/oids.py index afb95020..abc928bc 100644 --- a/rpki/oids.py +++ b/rpki/oids.py @@ -82,22 +82,22 @@ id_sha256 = "2.16.840.1.101.3.4.2.1" _oid2name = {} for _sym in dir(): - if not _sym.startswith("_"): - _val = globals()[_sym] - if not isinstance(_val, str) or not all(_v.isdigit() for _v in _val.split(".")): - raise ValueError("Bad OID definition: %s = %r" % (_sym, _val)) - _oid2name[_val] = _sym.replace("_", "-") + if not _sym.startswith("_"): + _val = globals()[_sym] + if not isinstance(_val, str) or not all(_v.isdigit() for _v in _val.split(".")): + raise ValueError("Bad OID definition: %s = %r" % (_sym, _val)) + _oid2name[_val] = _sym.replace("_", "-") # pylint: disable=W0631 del _sym del _val def oid2name(oid): - """ - Translate an OID into a string suitable for printing. - """ + """ + Translate an OID into a string suitable for printing. + """ - if not isinstance(oid, (str, unicode)) or not all(o.isdigit() for o in oid.split(".")): - raise ValueError("Parameter does not look like an OID string: " + repr(oid)) + if not isinstance(oid, (str, unicode)) or not all(o.isdigit() for o in oid.split(".")): + raise ValueError("Parameter does not look like an OID string: " + repr(oid)) - return _oid2name.get(oid, oid) + return _oid2name.get(oid, oid) diff --git a/rpki/old_irdbd.py b/rpki/old_irdbd.py index 9294ee84..fca1f1d9 100644 --- a/rpki/old_irdbd.py +++ b/rpki/old_irdbd.py @@ -46,270 +46,270 @@ logger = logging.getLogger(__name__) class main(object): - def handle_list_resources(self, q_pdu, r_msg): - - r_pdu = rpki.left_right.list_resources_elt() - r_pdu.tag = q_pdu.tag - r_pdu.self_handle = q_pdu.self_handle - r_pdu.child_handle = q_pdu.child_handle - - self.cur.execute( - """ - SELECT registrant_id, valid_until - FROM registrant - WHERE registry_handle = %s AND registrant_handle = %s - """, - (q_pdu.self_handle, q_pdu.child_handle)) - - if self.cur.rowcount != 1: - raise rpki.exceptions.NotInDatabase( - "This query should have produced a single exact match, something's messed up" - " (rowcount = %d, self_handle = %s, child_handle = %s)" - % (self.cur.rowcount, q_pdu.self_handle, q_pdu.child_handle)) - - registrant_id, valid_until = self.cur.fetchone() - - r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ") - - r_pdu.asn = rpki.resource_set.resource_set_as.from_sql( - self.cur, - """ - SELECT start_as, end_as - FROM registrant_asn - WHERE registrant_id = %s - """, - (registrant_id,)) - - r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql( - self.cur, - """ - SELECT start_ip, end_ip - FROM registrant_net - WHERE registrant_id = %s AND version = 4 - """, - (registrant_id,)) - - r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql( - self.cur, - """ - SELECT start_ip, end_ip - FROM registrant_net - WHERE registrant_id = %s AND version = 6 - """, - (registrant_id,)) - - r_msg.append(r_pdu) - - - def handle_list_roa_requests(self, q_pdu, r_msg): - - self.cur.execute( - "SELECT roa_request_id, asn FROM roa_request WHERE self_handle = %s", - (q_pdu.self_handle,)) - - for roa_request_id, asn in self.cur.fetchall(): - - r_pdu = rpki.left_right.list_roa_requests_elt() - r_pdu.tag = q_pdu.tag - r_pdu.self_handle = q_pdu.self_handle - r_pdu.asn = asn - - r_pdu.ipv4 = rpki.resource_set.roa_prefix_set_ipv4.from_sql( - self.cur, - """ - SELECT prefix, prefixlen, max_prefixlen - FROM roa_request_prefix - WHERE roa_request_id = %s AND version = 4 - """, - (roa_request_id,)) - - r_pdu.ipv6 = rpki.resource_set.roa_prefix_set_ipv6.from_sql( - self.cur, - """ - SELECT prefix, prefixlen, max_prefixlen - FROM roa_request_prefix - WHERE roa_request_id = %s AND version = 6 - """, - (roa_request_id,)) - - r_msg.append(r_pdu) - - - def handle_list_ghostbuster_requests(self, q_pdu, r_msg): - - self.cur.execute( - """ - SELECT vcard - FROM ghostbuster_request - WHERE self_handle = %s AND parent_handle = %s - """, - (q_pdu.self_handle, q_pdu.parent_handle)) - - vcards = [result[0] for result in self.cur.fetchall()] - - if not vcards: - - self.cur.execute( - """ - SELECT vcard - FROM ghostbuster_request - WHERE self_handle = %s AND parent_handle IS NULL - """, - (q_pdu.self_handle,)) - - vcards = [result[0] for result in self.cur.fetchall()] - - for vcard in vcards: - r_pdu = rpki.left_right.list_ghostbuster_requests_elt() - r_pdu.tag = q_pdu.tag - r_pdu.self_handle = q_pdu.self_handle - r_pdu.parent_handle = q_pdu.parent_handle - r_pdu.vcard = vcard - r_msg.append(r_pdu) - - - def handle_list_ee_certificate_requests(self, q_pdu, r_msg): - - self.cur.execute( - """ - SELECT ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until - FROM ee_certificate - WHERE self_handle = %s - """, - (q_pdu.self_handle,)) - - for ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until in self.cur.fetchall(): - - r_pdu = rpki.left_right.list_ee_certificate_requests_elt() - r_pdu.tag = q_pdu.tag - r_pdu.self_handle = q_pdu.self_handle - r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ") - r_pdu.pkcs10 = rpki.x509.PKCS10(DER = pkcs10) - r_pdu.gski = gski - r_pdu.cn = cn - r_pdu.sn = sn - r_pdu.eku = eku - - r_pdu.asn = rpki.resource_set.resource_set_as.from_sql( - self.cur, - """ - SELECT start_as, end_as - FROM ee_certificate_asn - WHERE ee_certificate_id = %s - """, - (ee_certificate_id,)) + def handle_list_resources(self, q_pdu, r_msg): + + r_pdu = rpki.left_right.list_resources_elt() + r_pdu.tag = q_pdu.tag + r_pdu.self_handle = q_pdu.self_handle + r_pdu.child_handle = q_pdu.child_handle + + self.cur.execute( + """ + SELECT registrant_id, valid_until + FROM registrant + WHERE registry_handle = %s AND registrant_handle = %s + """, + (q_pdu.self_handle, q_pdu.child_handle)) + + if self.cur.rowcount != 1: + raise rpki.exceptions.NotInDatabase( + "This query should have produced a single exact match, something's messed up" + " (rowcount = %d, self_handle = %s, child_handle = %s)" + % (self.cur.rowcount, q_pdu.self_handle, q_pdu.child_handle)) + + registrant_id, valid_until = self.cur.fetchone() + + r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ") + + r_pdu.asn = rpki.resource_set.resource_set_as.from_sql( + self.cur, + """ + SELECT start_as, end_as + FROM registrant_asn + WHERE registrant_id = %s + """, + (registrant_id,)) + + r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql( + self.cur, + """ + SELECT start_ip, end_ip + FROM registrant_net + WHERE registrant_id = %s AND version = 4 + """, + (registrant_id,)) + + r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql( + self.cur, + """ + SELECT start_ip, end_ip + FROM registrant_net + WHERE registrant_id = %s AND version = 6 + """, + (registrant_id,)) + + r_msg.append(r_pdu) + + + def handle_list_roa_requests(self, q_pdu, r_msg): + + self.cur.execute( + "SELECT roa_request_id, asn FROM roa_request WHERE self_handle = %s", + (q_pdu.self_handle,)) + + for roa_request_id, asn in self.cur.fetchall(): + + r_pdu = rpki.left_right.list_roa_requests_elt() + r_pdu.tag = q_pdu.tag + r_pdu.self_handle = q_pdu.self_handle + r_pdu.asn = asn + + r_pdu.ipv4 = rpki.resource_set.roa_prefix_set_ipv4.from_sql( + self.cur, + """ + SELECT prefix, prefixlen, max_prefixlen + FROM roa_request_prefix + WHERE roa_request_id = %s AND version = 4 + """, + (roa_request_id,)) + + r_pdu.ipv6 = rpki.resource_set.roa_prefix_set_ipv6.from_sql( + self.cur, + """ + SELECT prefix, prefixlen, max_prefixlen + FROM roa_request_prefix + WHERE roa_request_id = %s AND version = 6 + """, + (roa_request_id,)) + + r_msg.append(r_pdu) + + + def handle_list_ghostbuster_requests(self, q_pdu, r_msg): + + self.cur.execute( + """ + SELECT vcard + FROM ghostbuster_request + WHERE self_handle = %s AND parent_handle = %s + """, + (q_pdu.self_handle, q_pdu.parent_handle)) + + vcards = [result[0] for result in self.cur.fetchall()] + + if not vcards: + + self.cur.execute( + """ + SELECT vcard + FROM ghostbuster_request + WHERE self_handle = %s AND parent_handle IS NULL + """, + (q_pdu.self_handle,)) + + vcards = [result[0] for result in self.cur.fetchall()] + + for vcard in vcards: + r_pdu = rpki.left_right.list_ghostbuster_requests_elt() + r_pdu.tag = q_pdu.tag + r_pdu.self_handle = q_pdu.self_handle + r_pdu.parent_handle = q_pdu.parent_handle + r_pdu.vcard = vcard + r_msg.append(r_pdu) + + + def handle_list_ee_certificate_requests(self, q_pdu, r_msg): + + self.cur.execute( + """ + SELECT ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until + FROM ee_certificate + WHERE self_handle = %s + """, + (q_pdu.self_handle,)) + + for ee_certificate_id, pkcs10, gski, cn, sn, eku, valid_until in self.cur.fetchall(): + + r_pdu = rpki.left_right.list_ee_certificate_requests_elt() + r_pdu.tag = q_pdu.tag + r_pdu.self_handle = q_pdu.self_handle + r_pdu.valid_until = valid_until.strftime("%Y-%m-%dT%H:%M:%SZ") + r_pdu.pkcs10 = rpki.x509.PKCS10(DER = pkcs10) + r_pdu.gski = gski + r_pdu.cn = cn + r_pdu.sn = sn + r_pdu.eku = eku + + r_pdu.asn = rpki.resource_set.resource_set_as.from_sql( + self.cur, + """ + SELECT start_as, end_as + FROM ee_certificate_asn + WHERE ee_certificate_id = %s + """, + (ee_certificate_id,)) + + r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql( + self.cur, + """ + SELECT start_ip, end_ip + FROM ee_certificate_net + WHERE ee_certificate_id = %s AND version = 4 + """, + (ee_certificate_id,)) + + r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql( + self.cur, + """ + SELECT start_ip, end_ip + FROM ee_certificate_net + WHERE ee_certificate_id = %s AND version = 6 + """, + (ee_certificate_id,)) + + r_msg.append(r_pdu) + + + handle_dispatch = { + rpki.left_right.list_resources_elt : handle_list_resources, + rpki.left_right.list_roa_requests_elt : handle_list_roa_requests, + rpki.left_right.list_ghostbuster_requests_elt : handle_list_ghostbuster_requests, + rpki.left_right.list_ee_certificate_requests_elt : handle_list_ee_certificate_requests } + + def handler(self, request, q_der): + try: + + self.db.ping(True) + + r_msg = rpki.left_right.msg.reply() - r_pdu.ipv4 = rpki.resource_set.resource_set_ipv4.from_sql( - self.cur, - """ - SELECT start_ip, end_ip - FROM ee_certificate_net - WHERE ee_certificate_id = %s AND version = 4 - """, - (ee_certificate_id,)) - - r_pdu.ipv6 = rpki.resource_set.resource_set_ipv6.from_sql( - self.cur, - """ - SELECT start_ip, end_ip - FROM ee_certificate_net - WHERE ee_certificate_id = %s AND version = 6 - """, - (ee_certificate_id,)) - - r_msg.append(r_pdu) - - - handle_dispatch = { - rpki.left_right.list_resources_elt : handle_list_resources, - rpki.left_right.list_roa_requests_elt : handle_list_roa_requests, - rpki.left_right.list_ghostbuster_requests_elt : handle_list_ghostbuster_requests, - rpki.left_right.list_ee_certificate_requests_elt : handle_list_ee_certificate_requests } - - def handler(self, request, q_der): - try: - - self.db.ping(True) + try: - r_msg = rpki.left_right.msg.reply() + q_msg = rpki.left_right.cms_msg_saxify(DER = q_der).unwrap((self.bpki_ta, self.rpkid_cert)) - try: + if not isinstance(q_msg, rpki.left_right.msg) or not q_msg.is_query(): + raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_msg) - q_msg = rpki.left_right.cms_msg_saxify(DER = q_der).unwrap((self.bpki_ta, self.rpkid_cert)) - - if not isinstance(q_msg, rpki.left_right.msg) or not q_msg.is_query(): - raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_msg) - - for q_pdu in q_msg: + for q_pdu in q_msg: - try: + try: - try: - h = self.handle_dispatch[type(q_pdu)] - except KeyError: - raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_pdu) - else: - h(self, q_pdu, r_msg) + try: + h = self.handle_dispatch[type(q_pdu)] + except KeyError: + raise rpki.exceptions.BadQuery("Unexpected %r PDU" % q_pdu) + else: + h(self, q_pdu, r_msg) - except Exception, e: - logger.exception("Exception serving PDU %r", q_pdu) - r_msg.append(rpki.left_right.report_error_elt.from_exception(e, q_pdu.self_handle, q_pdu.tag)) + except Exception, e: + logger.exception("Exception serving PDU %r", q_pdu) + r_msg.append(rpki.left_right.report_error_elt.from_exception(e, q_pdu.self_handle, q_pdu.tag)) - except Exception, e: - logger.exception("Exception decoding query") - r_msg.append(rpki.left_right.report_error_elt.from_exception(e)) + except Exception, e: + logger.exception("Exception decoding query") + r_msg.append(rpki.left_right.report_error_elt.from_exception(e)) - request.send_cms_response(rpki.left_right.cms_msg_saxify().wrap(r_msg, self.irdbd_key, self.irdbd_cert)) + request.send_cms_response(rpki.left_right.cms_msg_saxify().wrap(r_msg, self.irdbd_key, self.irdbd_cert)) - except Exception, e: - logger.exception("Unhandled exception, returning HTTP failure") - request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e)) + except Exception, e: + logger.exception("Unhandled exception, returning HTTP failure") + request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e)) - def __init__(self): + def __init__(self): - os.environ["TZ"] = "UTC" - time.tzset() + os.environ["TZ"] = "UTC" + time.tzset() - parser = argparse.ArgumentParser(description = __doc__) - parser.add_argument("-c", "--config", - help = "override default location of configuration file") - parser.add_argument("-f", "--foreground", action = "store_true", - help = "do not daemonize (ignored, old_irdbd never daemonizes)") - rpki.log.argparse_setup(parser) - args = parser.parse_args() + parser = argparse.ArgumentParser(description = __doc__) + parser.add_argument("-c", "--config", + help = "override default location of configuration file") + parser.add_argument("-f", "--foreground", action = "store_true", + help = "do not daemonize (ignored, old_irdbd never daemonizes)") + rpki.log.argparse_setup(parser) + args = parser.parse_args() - rpki.log.init("irdbd", args) + rpki.log.init("irdbd", args) - self.cfg = rpki.config.parser(set_filename = args.config, section = "irdbd") + self.cfg = rpki.config.parser(set_filename = args.config, section = "irdbd") - startup_msg = self.cfg.get("startup-message", "") - if startup_msg: - logger.info(startup_msg) + startup_msg = self.cfg.get("startup-message", "") + if startup_msg: + logger.info(startup_msg) - self.cfg.set_global_flags() + self.cfg.set_global_flags() - self.db = MySQLdb.connect(user = self.cfg.get("sql-username"), - db = self.cfg.get("sql-database"), - passwd = self.cfg.get("sql-password")) + self.db = MySQLdb.connect(user = self.cfg.get("sql-username"), + db = self.cfg.get("sql-database"), + passwd = self.cfg.get("sql-password")) - self.cur = self.db.cursor() - self.db.autocommit(True) + self.cur = self.db.cursor() + self.db.autocommit(True) - self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta")) - self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert")) - self.irdbd_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdbd-cert")) - self.irdbd_key = rpki.x509.RSA( Auto_update = self.cfg.get("irdbd-key")) + self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta")) + self.rpkid_cert = rpki.x509.X509(Auto_update = self.cfg.get("rpkid-cert")) + self.irdbd_cert = rpki.x509.X509(Auto_update = self.cfg.get("irdbd-cert")) + self.irdbd_key = rpki.x509.RSA( Auto_update = self.cfg.get("irdbd-key")) - u = urlparse.urlparse(self.cfg.get("http-url")) + u = urlparse.urlparse(self.cfg.get("http-url")) - assert u.scheme in ("", "http") and \ - u.username is None and \ - u.password is None and \ - u.params == "" and \ - u.query == "" and \ - u.fragment == "" + assert u.scheme in ("", "http") and \ + u.username is None and \ + u.password is None and \ + u.params == "" and \ + u.query == "" and \ + u.fragment == "" - rpki.http_simple.server(host = u.hostname or "localhost", - port = u.port or 443, - handlers = ((u.path, self.handler),)) + rpki.http_simple.server(host = u.hostname or "localhost", + port = u.port or 443, + handlers = ((u.path, self.handler),)) diff --git a/rpki/pubd.py b/rpki/pubd.py index f917c18d..ee258f26 100644 --- a/rpki/pubd.py +++ b/rpki/pubd.py @@ -45,252 +45,252 @@ logger = logging.getLogger(__name__) class main(object): - """ - Main program for pubd. - """ - - def __init__(self): - - os.environ.update(TZ = "UTC", - DJANGO_SETTINGS_MODULE = "rpki.django_settings.pubd") - time.tzset() - - self.irbe_cms_timestamp = None - - parser = argparse.ArgumentParser(description = __doc__) - parser.add_argument("-c", "--config", - help = "override default location of configuration file") - parser.add_argument("-f", "--foreground", action = "store_true", - help = "do not daemonize") - parser.add_argument("--pidfile", - help = "override default location of pid file") - parser.add_argument("--profile", - help = "enable profiling, saving data to PROFILE") - rpki.log.argparse_setup(parser) - args = parser.parse_args() - - self.profile = args.profile - - rpki.log.init("pubd", args) - - self.cfg = rpki.config.parser(set_filename = args.config, section = "pubd") - self.cfg.set_global_flags() - - if not args.foreground: - rpki.daemonize.daemon(pidfile = args.pidfile) - - if self.profile: - import cProfile - prof = cProfile.Profile() - try: - prof.runcall(self.main) - finally: - prof.dump_stats(self.profile) - logger.info("Dumped profile data to %s", self.profile) - else: - self.main() - - def main(self): - - if self.profile: - logger.info("Running in profile mode with output to %s", self.profile) - - import django - django.setup() - - global rpki # pylint: disable=W0602 - import rpki.pubdb # pylint: disable=W0621 - - self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta")) - self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert")) - self.pubd_cert = rpki.x509.X509(Auto_update = self.cfg.get("pubd-cert")) - self.pubd_key = rpki.x509.RSA( Auto_update = self.cfg.get("pubd-key")) - self.pubd_crl = rpki.x509.CRL( Auto_update = self.cfg.get("pubd-crl")) - - self.http_server_host = self.cfg.get("server-host", "") - self.http_server_port = self.cfg.getint("server-port") - - self.publication_base = self.cfg.get("publication-base", "publication/") - - self.rrdp_uri_base = self.cfg.get("rrdp-uri-base", - "http://%s/rrdp/" % socket.getfqdn()) - self.rrdp_expiration_interval = rpki.sundial.timedelta.parse(self.cfg.get("rrdp-expiration-interval", "6h")) - self.rrdp_publication_base = self.cfg.get("rrdp-publication-base", - "rrdp-publication/") - - try: - self.session = rpki.pubdb.models.Session.objects.get() - except rpki.pubdb.models.Session.DoesNotExist: - self.session = rpki.pubdb.models.Session.objects.create(uuid = str(uuid.uuid4()), serial = 0) - - rpki.http_simple.server( - host = self.http_server_host, - port = self.http_server_port, - handlers = (("/control", self.control_handler), - ("/client/", self.client_handler))) - - - def control_handler(self, request, q_der): - """ - Process one PDU from the IRBE. """ - - from django.db import transaction, connection - - try: - connection.cursor() # Reconnect to mysqld if necessary - q_cms = rpki.publication_control.cms_msg(DER = q_der) - q_msg = q_cms.unwrap((self.bpki_ta, self.irbe_cert)) - self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, "control") - if q_msg.get("type") != "query": - raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type")) - r_msg = Element(rpki.publication_control.tag_msg, nsmap = rpki.publication_control.nsmap, - type = "reply", version = rpki.publication_control.version) - - try: - q_pdu = None - with transaction.atomic(): - - for q_pdu in q_msg: - if q_pdu.tag != rpki.publication_control.tag_client: - raise rpki.exceptions.BadQuery("PDU is %s, expected client" % q_pdu.tag) - client_handle = q_pdu.get("client_handle") - action = q_pdu.get("action") - if client_handle is None: - logger.info("Control %s request", action) - else: - logger.info("Control %s request for %s", action, client_handle) - - if action in ("get", "list"): - if action == "get": - clients = rpki.pubdb.models.Client.objects.get(client_handle = client_handle), - else: - clients = rpki.pubdb.models.Client.objects.all() - for client in clients: - r_pdu = SubElement(r_msg, q_pdu.tag, action = action, - client_handle = client.client_handle, base_uri = client.base_uri) - if q_pdu.get("tag"): - r_pdu.set("tag", q_pdu.get("tag")) - SubElement(r_pdu, rpki.publication_control.tag_bpki_cert).text = client.bpki_cert.get_Base64() - if client.bpki_glue is not None: - SubElement(r_pdu, rpki.publication_control.tag_bpki_glue).text = client.bpki_glue.get_Base64() - - if action in ("create", "set"): - if action == "create": - client = rpki.pubdb.models.Client(client_handle = client_handle) - else: - client = rpki.pubdb.models.Client.objects.get(client_handle = client_handle) - if q_pdu.get("base_uri"): - client.base_uri = q_pdu.get("base_uri") - bpki_cert = q_pdu.find(rpki.publication_control.tag_bpki_cert) - if bpki_cert is not None: - client.bpki_cert = rpki.x509.X509(Base64 = bpki_cert.text) - bpki_glue = q_pdu.find(rpki.publication_control.tag_bpki_glue) - if bpki_glue is not None: - client.bpki_glue = rpki.x509.X509(Base64 = bpki_glue.text) - if q_pdu.get("clear_replay_protection") == "yes": - client.last_cms_timestamp = None - client.save() - logger.debug("Stored client_handle %s, base_uri %s, bpki_cert %r, bpki_glue %r, last_cms_timestamp %s", - client.client_handle, client.base_uri, client.bpki_cert, client.bpki_glue, - client.last_cms_timestamp) - r_pdu = SubElement(r_msg, q_pdu.tag, action = action, client_handle = client_handle) - if q_pdu.get("tag"): - r_pdu.set("tag", q_pdu.get("tag")) - - if action == "destroy": - rpki.pubdb.models.Client.objects.filter(client_handle = client_handle).delete() - r_pdu = SubElement(r_msg, q_pdu.tag, action = action, client_handle = client_handle) - if q_pdu.get("tag"): - r_pdu.set("tag", q_pdu.get("tag")) - - except Exception, e: - logger.exception("Exception processing PDU %r", q_pdu) - r_pdu = SubElement(r_msg, rpki.publication_control.tag_report_error, error_code = e.__class__.__name__) - r_pdu.text = str(e) - if q_pdu.get("tag") is not None: - r_pdu.set("tag", q_pdu.get("tag")) - - request.send_cms_response(rpki.publication_control.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert)) - - except Exception, e: - logger.exception("Unhandled exception processing control query, path %r", request.path) - request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e)) - - - client_url_regexp = re.compile("/client/([-A-Z0-9_/]+)$", re.I) - - def client_handler(self, request, q_der): - """ - Process one PDU from a client. + Main program for pubd. """ - from django.db import transaction, connection - - try: - connection.cursor() # Reconnect to mysqld if necessary - match = self.client_url_regexp.search(request.path) - if match is None: - raise rpki.exceptions.BadContactURL("Bad path: %s" % request.path) - client = rpki.pubdb.models.Client.objects.get(client_handle = match.group(1)) - q_cms = rpki.publication.cms_msg(DER = q_der) - q_msg = q_cms.unwrap((self.bpki_ta, client.bpki_cert, client.bpki_glue)) - client.last_cms_timestamp = q_cms.check_replay(client.last_cms_timestamp, client.client_handle) - client.save() - if q_msg.get("type") != "query": - raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type")) - r_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap, - type = "reply", version = rpki.publication.version) - delta = None - try: - with transaction.atomic(): - for q_pdu in q_msg: - if q_pdu.get("uri"): - logger.info("Client %s request for %s", q_pdu.tag, q_pdu.get("uri")) - else: - logger.info("Client %s request", q_pdu.tag) + def __init__(self): - if q_pdu.tag == rpki.publication.tag_list: - for obj in client.publishedobject_set.all(): - r_pdu = SubElement(r_msg, q_pdu.tag, uri = obj.uri, hash = obj.hash) + os.environ.update(TZ = "UTC", + DJANGO_SETTINGS_MODULE = "rpki.django_settings.pubd") + time.tzset() + + self.irbe_cms_timestamp = None + + parser = argparse.ArgumentParser(description = __doc__) + parser.add_argument("-c", "--config", + help = "override default location of configuration file") + parser.add_argument("-f", "--foreground", action = "store_true", + help = "do not daemonize") + parser.add_argument("--pidfile", + help = "override default location of pid file") + parser.add_argument("--profile", + help = "enable profiling, saving data to PROFILE") + rpki.log.argparse_setup(parser) + args = parser.parse_args() + + self.profile = args.profile + + rpki.log.init("pubd", args) + + self.cfg = rpki.config.parser(set_filename = args.config, section = "pubd") + self.cfg.set_global_flags() + + if not args.foreground: + rpki.daemonize.daemon(pidfile = args.pidfile) + + if self.profile: + import cProfile + prof = cProfile.Profile() + try: + prof.runcall(self.main) + finally: + prof.dump_stats(self.profile) + logger.info("Dumped profile data to %s", self.profile) + else: + self.main() + + def main(self): + + if self.profile: + logger.info("Running in profile mode with output to %s", self.profile) + + import django + django.setup() + + global rpki # pylint: disable=W0602 + import rpki.pubdb # pylint: disable=W0621 + + self.bpki_ta = rpki.x509.X509(Auto_update = self.cfg.get("bpki-ta")) + self.irbe_cert = rpki.x509.X509(Auto_update = self.cfg.get("irbe-cert")) + self.pubd_cert = rpki.x509.X509(Auto_update = self.cfg.get("pubd-cert")) + self.pubd_key = rpki.x509.RSA( Auto_update = self.cfg.get("pubd-key")) + self.pubd_crl = rpki.x509.CRL( Auto_update = self.cfg.get("pubd-crl")) + + self.http_server_host = self.cfg.get("server-host", "") + self.http_server_port = self.cfg.getint("server-port") + + self.publication_base = self.cfg.get("publication-base", "publication/") + + self.rrdp_uri_base = self.cfg.get("rrdp-uri-base", + "http://%s/rrdp/" % socket.getfqdn()) + self.rrdp_expiration_interval = rpki.sundial.timedelta.parse(self.cfg.get("rrdp-expiration-interval", "6h")) + self.rrdp_publication_base = self.cfg.get("rrdp-publication-base", + "rrdp-publication/") + + try: + self.session = rpki.pubdb.models.Session.objects.get() + except rpki.pubdb.models.Session.DoesNotExist: + self.session = rpki.pubdb.models.Session.objects.create(uuid = str(uuid.uuid4()), serial = 0) + + rpki.http_simple.server( + host = self.http_server_host, + port = self.http_server_port, + handlers = (("/control", self.control_handler), + ("/client/", self.client_handler))) + + + def control_handler(self, request, q_der): + """ + Process one PDU from the IRBE. + """ + + from django.db import transaction, connection + + try: + connection.cursor() # Reconnect to mysqld if necessary + q_cms = rpki.publication_control.cms_msg(DER = q_der) + q_msg = q_cms.unwrap((self.bpki_ta, self.irbe_cert)) + self.irbe_cms_timestamp = q_cms.check_replay(self.irbe_cms_timestamp, "control") + if q_msg.get("type") != "query": + raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type")) + r_msg = Element(rpki.publication_control.tag_msg, nsmap = rpki.publication_control.nsmap, + type = "reply", version = rpki.publication_control.version) + + try: + q_pdu = None + with transaction.atomic(): + + for q_pdu in q_msg: + if q_pdu.tag != rpki.publication_control.tag_client: + raise rpki.exceptions.BadQuery("PDU is %s, expected client" % q_pdu.tag) + client_handle = q_pdu.get("client_handle") + action = q_pdu.get("action") + if client_handle is None: + logger.info("Control %s request", action) + else: + logger.info("Control %s request for %s", action, client_handle) + + if action in ("get", "list"): + if action == "get": + clients = rpki.pubdb.models.Client.objects.get(client_handle = client_handle), + else: + clients = rpki.pubdb.models.Client.objects.all() + for client in clients: + r_pdu = SubElement(r_msg, q_pdu.tag, action = action, + client_handle = client.client_handle, base_uri = client.base_uri) + if q_pdu.get("tag"): + r_pdu.set("tag", q_pdu.get("tag")) + SubElement(r_pdu, rpki.publication_control.tag_bpki_cert).text = client.bpki_cert.get_Base64() + if client.bpki_glue is not None: + SubElement(r_pdu, rpki.publication_control.tag_bpki_glue).text = client.bpki_glue.get_Base64() + + if action in ("create", "set"): + if action == "create": + client = rpki.pubdb.models.Client(client_handle = client_handle) + else: + client = rpki.pubdb.models.Client.objects.get(client_handle = client_handle) + if q_pdu.get("base_uri"): + client.base_uri = q_pdu.get("base_uri") + bpki_cert = q_pdu.find(rpki.publication_control.tag_bpki_cert) + if bpki_cert is not None: + client.bpki_cert = rpki.x509.X509(Base64 = bpki_cert.text) + bpki_glue = q_pdu.find(rpki.publication_control.tag_bpki_glue) + if bpki_glue is not None: + client.bpki_glue = rpki.x509.X509(Base64 = bpki_glue.text) + if q_pdu.get("clear_replay_protection") == "yes": + client.last_cms_timestamp = None + client.save() + logger.debug("Stored client_handle %s, base_uri %s, bpki_cert %r, bpki_glue %r, last_cms_timestamp %s", + client.client_handle, client.base_uri, client.bpki_cert, client.bpki_glue, + client.last_cms_timestamp) + r_pdu = SubElement(r_msg, q_pdu.tag, action = action, client_handle = client_handle) + if q_pdu.get("tag"): + r_pdu.set("tag", q_pdu.get("tag")) + + if action == "destroy": + rpki.pubdb.models.Client.objects.filter(client_handle = client_handle).delete() + r_pdu = SubElement(r_msg, q_pdu.tag, action = action, client_handle = client_handle) + if q_pdu.get("tag"): + r_pdu.set("tag", q_pdu.get("tag")) + + except Exception, e: + logger.exception("Exception processing PDU %r", q_pdu) + r_pdu = SubElement(r_msg, rpki.publication_control.tag_report_error, error_code = e.__class__.__name__) + r_pdu.text = str(e) if q_pdu.get("tag") is not None: - r_pdu.set("tag", q_pdu.get("tag")) + r_pdu.set("tag", q_pdu.get("tag")) + + request.send_cms_response(rpki.publication_control.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert)) + + except Exception, e: + logger.exception("Unhandled exception processing control query, path %r", request.path) + request.send_error(500, "Unhandled exception %s: %s" % (e.__class__.__name__, e)) + + + client_url_regexp = re.compile("/client/([-A-Z0-9_/]+)$", re.I) + + def client_handler(self, request, q_der): + """ + Process one PDU from a client. + """ + + from django.db import transaction, connection + + try: + connection.cursor() # Reconnect to mysqld if necessary + match = self.client_url_regexp.search(request.path) + if match is None: + raise rpki.exceptions.BadContactURL("Bad path: %s" % request.path) + client = rpki.pubdb.models.Client.objects.get(client_handle = match.group(1)) + q_cms = rpki.publication.cms_msg(DER = q_der) + q_msg = q_cms.unwrap((self.bpki_ta, client.bpki_cert, client.bpki_glue)) + client.last_cms_timestamp = q_cms.check_replay(client.last_cms_timestamp, client.client_handle) + client.save() + if q_msg.get("type") != "query": + raise rpki.exceptions.BadQuery("Message type is %s, expected query" % q_msg.get("type")) + r_msg = Element(rpki.publication.tag_msg, nsmap = rpki.publication.nsmap, + type = "reply", version = rpki.publication.version) + delta = None + try: + with transaction.atomic(): + for q_pdu in q_msg: + if q_pdu.get("uri"): + logger.info("Client %s request for %s", q_pdu.tag, q_pdu.get("uri")) + else: + logger.info("Client %s request", q_pdu.tag) + + if q_pdu.tag == rpki.publication.tag_list: + for obj in client.publishedobject_set.all(): + r_pdu = SubElement(r_msg, q_pdu.tag, uri = obj.uri, hash = obj.hash) + if q_pdu.get("tag") is not None: + r_pdu.set("tag", q_pdu.get("tag")) + + else: + assert q_pdu.tag in (rpki.publication.tag_publish, rpki.publication.tag_withdraw) + if delta is None: + delta = self.session.new_delta(rpki.sundial.now() + self.rrdp_expiration_interval) + client.check_allowed_uri(q_pdu.get("uri")) + if q_pdu.tag == rpki.publication.tag_publish: + der = q_pdu.text.decode("base64") + logger.info("Publishing %s", rpki.x509.uri_dispatch(q_pdu.get("uri"))(DER = der).tracking_data(q_pdu.get("uri"))) + delta.publish(client, der, q_pdu.get("uri"), q_pdu.get("hash")) + else: + logger.info("Withdrawing %s", q_pdu.get("uri")) + delta.withdraw(client, q_pdu.get("uri"), q_pdu.get("hash")) + r_pdu = SubElement(r_msg, q_pdu.tag, uri = q_pdu.get("uri")) + if q_pdu.get("tag") is not None: + r_pdu.set("tag", q_pdu.get("tag")) + + if delta is not None: + delta.activate() + self.session.generate_snapshot() + self.session.expire_deltas() + + except Exception, e: + logger.exception("Exception processing PDU %r", q_pdu) + r_pdu = SubElement(r_msg, rpki.publication.tag_report_error, error_code = e.__class__.__name__) + r_pdu.text = str(e) + if q_pdu.get("tag") is not None: + r_pdu.set("tag", q_pdu.get("tag")) else: - assert q_pdu.tag in (rpki.publication.tag_publish, rpki.publication.tag_withdraw) - if delta is None: - delta = self.session.new_delta(rpki.sundial.now() + self.rrdp_expiration_interval) - client.check_allowed_uri(q_pdu.get("uri")) - if q_pdu.tag == rpki.publication.tag_publish: - der = q_pdu.text.decode("base64") - logger.info("Publishing %s", rpki.x509.uri_dispatch(q_pdu.get("uri"))(DER = der).tracking_data(q_pdu.get("uri"))) - delta.publish(client, der, q_pdu.get("uri"), q_pdu.get("hash")) - else: - logger.info("Withdrawing %s", q_pdu.get("uri")) - delta.withdraw(client, q_pdu.get("uri"), q_pdu.get("hash")) - r_pdu = SubElement(r_msg, q_pdu.tag, uri = q_pdu.get("uri")) - if q_pdu.get("tag") is not None: - r_pdu.set("tag", q_pdu.get("tag")) - - if delta is not None: - delta.activate() - self.session.generate_snapshot() - self.session.expire_deltas() - - except Exception, e: - logger.exception("Exception processing PDU %r", q_pdu) - r_pdu = SubElement(r_msg, rpki.publication.tag_report_error, error_code = e.__class__.__name__) - r_pdu.text = str(e) - if q_pdu.get("tag") is not None: - r_pdu.set("tag", q_pdu.get("tag")) - - else: - if delta is not None: - self.session.synchronize_rrdp_files(self.rrdp_publication_base, self.rrdp_uri_base) - delta.update_rsync_files(self.publication_base) - - request.send_cms_response(rpki.publication.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert, self.pubd_crl)) - - except Exception, e: - logger.exception("Unhandled exception processing client query, path %r", request.path) - request.send_error(500, "Could not process PDU: %s" % e) + if delta is not None: + self.session.synchronize_rrdp_files(self.rrdp_publication_base, self.rrdp_uri_base) + delta.update_rsync_files(self.publication_base) + + request.send_cms_response(rpki.publication.cms_msg().wrap(r_msg, self.pubd_key, self.pubd_cert, self.pubd_crl)) + + except Exception, e: + logger.exception("Unhandled exception processing client query, path %r", request.path) + request.send_error(500, "Could not process PDU: %s" % e) diff --git a/rpki/pubdb/models.py b/rpki/pubdb/models.py index 2b6d67e4..46dcf493 100644 --- a/rpki/pubdb/models.py +++ b/rpki/pubdb/models.py @@ -48,266 +48,266 @@ rrdp_tag_withdraw = rrdp_xmlns + "withdraw" # sure quite where to put it at the moment. def DERSubElement(elt, name, der, attrib = None, **kwargs): - """ - Convenience wrapper around SubElement for use with Base64 text. - """ + """ + Convenience wrapper around SubElement for use with Base64 text. + """ - se = SubElement(elt, name, attrib, **kwargs) - se.text = rpki.x509.base64_with_linebreaks(der) - se.tail = "\n" - return se + se = SubElement(elt, name, attrib, **kwargs) + se.text = rpki.x509.base64_with_linebreaks(der) + se.tail = "\n" + return se class Client(models.Model): - client_handle = models.CharField(unique = True, max_length = 255) - base_uri = models.TextField() - bpki_cert = CertificateField() - bpki_glue = CertificateField(null = True) - last_cms_timestamp = SundialField(blank = True, null = True) + client_handle = models.CharField(unique = True, max_length = 255) + base_uri = models.TextField() + bpki_cert = CertificateField() + bpki_glue = CertificateField(null = True) + last_cms_timestamp = SundialField(blank = True, null = True) - def check_allowed_uri(self, uri): - """ - Make sure that a target URI is within this client's allowed URI space. - """ + def check_allowed_uri(self, uri): + """ + Make sure that a target URI is within this client's allowed URI space. + """ - if not uri.startswith(self.base_uri): - raise rpki.exceptions.ForbiddenURI + if not uri.startswith(self.base_uri): + raise rpki.exceptions.ForbiddenURI class Session(models.Model): - uuid = models.CharField(unique = True, max_length=36) - serial = models.BigIntegerField() - snapshot = models.TextField(blank = True) - hash = models.CharField(max_length = 64, blank = True) - - ## @var keep_all_rrdp_files - # Debugging flag to prevent expiration of old RRDP files. - # This simplifies debugging delta code. Need for this - # may go away once RRDP is fully integrated into rcynic. - keep_all_rrdp_files = False - - def new_delta(self, expires): - """ - Construct a new delta associated with this session. - """ + uuid = models.CharField(unique = True, max_length=36) + serial = models.BigIntegerField() + snapshot = models.TextField(blank = True) + hash = models.CharField(max_length = 64, blank = True) + + ## @var keep_all_rrdp_files + # Debugging flag to prevent expiration of old RRDP files. + # This simplifies debugging delta code. Need for this + # may go away once RRDP is fully integrated into rcynic. + keep_all_rrdp_files = False + + def new_delta(self, expires): + """ + Construct a new delta associated with this session. + """ + + delta = Delta(session = self, + serial = self.serial + 1, + expires = expires) + delta.elt = Element(rrdp_tag_delta, + nsmap = rrdp_nsmap, + version = rrdp_version, + session_id = self.uuid, + serial = str(delta.serial)) + return delta + + + def expire_deltas(self): + """ + Delete deltas whose expiration date has passed. + """ + + self.delta_set.filter(expires__lt = rpki.sundial.now()).delete() + + + def generate_snapshot(self): + """ + Generate an XML snapshot of this session. + """ + + xml = Element(rrdp_tag_snapshot, nsmap = rrdp_nsmap, + version = rrdp_version, + session_id = self.uuid, + serial = str(self.serial)) + xml.text = "\n" + for obj in self.publishedobject_set.all(): + DERSubElement(xml, rrdp_tag_publish, + der = obj.der, + uri = obj.uri) + rpki.relaxng.rrdp.assertValid(xml) + self.snapshot = ElementToString(xml, pretty_print = True) + self.hash = rpki.x509.sha256(self.snapshot).encode("hex") + self.save() + + + @property + def snapshot_fn(self): + return "%s/snapshot/%s.xml" % (self.uuid, self.serial) + + + @property + def notification_fn(self): + return "notify.xml" + + + @staticmethod + def _write_rrdp_file(fn, text, rrdp_publication_base, overwrite = False): + if overwrite or not os.path.exists(os.path.join(rrdp_publication_base, fn)): + tn = os.path.join(rrdp_publication_base, fn + ".%s.tmp" % os.getpid()) + if not os.path.isdir(os.path.dirname(tn)): + os.makedirs(os.path.dirname(tn)) + with open(tn, "w") as f: + f.write(text) + os.rename(tn, os.path.join(rrdp_publication_base, fn)) + + + @staticmethod + def _rrdp_filename_to_uri(fn, rrdp_uri_base): + return "%s/%s" % (rrdp_uri_base.rstrip("/"), fn) + + + def _generate_update_xml(self, rrdp_uri_base): + xml = Element(rrdp_tag_notification, nsmap = rrdp_nsmap, + version = rrdp_version, + session_id = self.uuid, + serial = str(self.serial)) + SubElement(xml, rrdp_tag_snapshot, + uri = self._rrdp_filename_to_uri(self.snapshot_fn, rrdp_uri_base), + hash = self.hash) + for delta in self.delta_set.all(): + SubElement(xml, rrdp_tag_delta, + uri = self._rrdp_filename_to_uri(delta.fn, rrdp_uri_base), + hash = delta.hash, + serial = str(delta.serial)) + rpki.relaxng.rrdp.assertValid(xml) + return ElementToString(xml, pretty_print = True) + + + def synchronize_rrdp_files(self, rrdp_publication_base, rrdp_uri_base): + """ + Write current RRDP files to disk, clean up old files and directories. + """ + + current_filenames = set() + + for delta in self.delta_set.all(): + self._write_rrdp_file(delta.fn, delta.xml, rrdp_publication_base) + current_filenames.add(delta.fn) + + self._write_rrdp_file(self.snapshot_fn, self.snapshot, rrdp_publication_base) + current_filenames.add(self.snapshot_fn) + + self._write_rrdp_file(self.notification_fn, self._generate_update_xml(rrdp_uri_base), + rrdp_publication_base, overwrite = True) + current_filenames.add(self.notification_fn) + + if not self.keep_all_rrdp_files: + for root, dirs, files in os.walk(rrdp_publication_base, topdown = False): + for fn in files: + fn = os.path.join(root, fn) + if fn[len(rrdp_publication_base):].lstrip("/") not in current_filenames: + os.remove(fn) + for dn in dirs: + try: + os.rmdir(os.path.join(root, dn)) + except OSError: + pass - delta = Delta(session = self, - serial = self.serial + 1, - expires = expires) - delta.elt = Element(rrdp_tag_delta, - nsmap = rrdp_nsmap, - version = rrdp_version, - session_id = self.uuid, - serial = str(delta.serial)) - return delta +class Delta(models.Model): + serial = models.BigIntegerField() + xml = models.TextField() + hash = models.CharField(max_length = 64) + expires = SundialField() + session = models.ForeignKey(Session) - def expire_deltas(self): - """ - Delete deltas whose expiration date has passed. - """ - self.delta_set.filter(expires__lt = rpki.sundial.now()).delete() + @staticmethod + def _uri_to_filename(uri, publication_base): + if not uri.startswith("rsync://"): + raise rpki.exceptions.BadURISyntax(uri) + path = uri.split("/")[4:] + path.insert(0, publication_base.rstrip("/")) + filename = "/".join(path) + if "/../" in filename or filename.endswith("/.."): + raise rpki.exceptions.BadURISyntax(filename) + return filename - def generate_snapshot(self): - """ - Generate an XML snapshot of this session. - """ + @property + def fn(self): + return "%s/deltas/%s.xml" % (self.session.uuid, self.serial) - xml = Element(rrdp_tag_snapshot, nsmap = rrdp_nsmap, - version = rrdp_version, - session_id = self.uuid, - serial = str(self.serial)) - xml.text = "\n" - for obj in self.publishedobject_set.all(): - DERSubElement(xml, rrdp_tag_publish, - der = obj.der, - uri = obj.uri) - rpki.relaxng.rrdp.assertValid(xml) - self.snapshot = ElementToString(xml, pretty_print = True) - self.hash = rpki.x509.sha256(self.snapshot).encode("hex") - self.save() - - - @property - def snapshot_fn(self): - return "%s/snapshot/%s.xml" % (self.uuid, self.serial) - - - @property - def notification_fn(self): - return "notify.xml" - - - @staticmethod - def _write_rrdp_file(fn, text, rrdp_publication_base, overwrite = False): - if overwrite or not os.path.exists(os.path.join(rrdp_publication_base, fn)): - tn = os.path.join(rrdp_publication_base, fn + ".%s.tmp" % os.getpid()) - if not os.path.isdir(os.path.dirname(tn)): - os.makedirs(os.path.dirname(tn)) - with open(tn, "w") as f: - f.write(text) - os.rename(tn, os.path.join(rrdp_publication_base, fn)) - - - @staticmethod - def _rrdp_filename_to_uri(fn, rrdp_uri_base): - return "%s/%s" % (rrdp_uri_base.rstrip("/"), fn) - - - def _generate_update_xml(self, rrdp_uri_base): - xml = Element(rrdp_tag_notification, nsmap = rrdp_nsmap, - version = rrdp_version, - session_id = self.uuid, - serial = str(self.serial)) - SubElement(xml, rrdp_tag_snapshot, - uri = self._rrdp_filename_to_uri(self.snapshot_fn, rrdp_uri_base), - hash = self.hash) - for delta in self.delta_set.all(): - SubElement(xml, rrdp_tag_delta, - uri = self._rrdp_filename_to_uri(delta.fn, rrdp_uri_base), - hash = delta.hash, - serial = str(delta.serial)) - rpki.relaxng.rrdp.assertValid(xml) - return ElementToString(xml, pretty_print = True) - - - def synchronize_rrdp_files(self, rrdp_publication_base, rrdp_uri_base): - """ - Write current RRDP files to disk, clean up old files and directories. - """ - current_filenames = set() - - for delta in self.delta_set.all(): - self._write_rrdp_file(delta.fn, delta.xml, rrdp_publication_base) - current_filenames.add(delta.fn) - - self._write_rrdp_file(self.snapshot_fn, self.snapshot, rrdp_publication_base) - current_filenames.add(self.snapshot_fn) - - self._write_rrdp_file(self.notification_fn, self._generate_update_xml(rrdp_uri_base), - rrdp_publication_base, overwrite = True) - current_filenames.add(self.notification_fn) - - if not self.keep_all_rrdp_files: - for root, dirs, files in os.walk(rrdp_publication_base, topdown = False): - for fn in files: - fn = os.path.join(root, fn) - if fn[len(rrdp_publication_base):].lstrip("/") not in current_filenames: - os.remove(fn) - for dn in dirs: - try: - os.rmdir(os.path.join(root, dn)) - except OSError: - pass + def activate(self): + rpki.relaxng.rrdp.assertValid(self.elt) + self.xml = ElementToString(self.elt, pretty_print = True) + self.hash = rpki.x509.sha256(self.xml).encode("hex") + self.save() + self.session.serial += 1 + self.session.save() -class Delta(models.Model): - serial = models.BigIntegerField() - xml = models.TextField() - hash = models.CharField(max_length = 64) - expires = SundialField() - session = models.ForeignKey(Session) - - - @staticmethod - def _uri_to_filename(uri, publication_base): - if not uri.startswith("rsync://"): - raise rpki.exceptions.BadURISyntax(uri) - path = uri.split("/")[4:] - path.insert(0, publication_base.rstrip("/")) - filename = "/".join(path) - if "/../" in filename or filename.endswith("/.."): - raise rpki.exceptions.BadURISyntax(filename) - return filename - - - @property - def fn(self): - return "%s/deltas/%s.xml" % (self.session.uuid, self.serial) - - - def activate(self): - rpki.relaxng.rrdp.assertValid(self.elt) - self.xml = ElementToString(self.elt, pretty_print = True) - self.hash = rpki.x509.sha256(self.xml).encode("hex") - self.save() - self.session.serial += 1 - self.session.save() - - - def publish(self, client, der, uri, obj_hash): - try: - obj = client.publishedobject_set.get(session = self.session, uri = uri) - if obj.hash == obj_hash: - obj.delete() - elif obj_hash is None: - raise rpki.exceptions.ExistingObjectAtURI("Object already published at %s" % uri) - else: - raise rpki.exceptions.DifferentObjectAtURI("Found different object at %s (old %s, new %s)" % (uri, obj.hash, obj_hash)) - except rpki.pubdb.models.PublishedObject.DoesNotExist: - pass - logger.debug("Publishing %s", uri) - PublishedObject.objects.create(session = self.session, client = client, der = der, uri = uri, - hash = rpki.x509.sha256(der).encode("hex")) - se = DERSubElement(self.elt, rrdp_tag_publish, der = der, uri = uri) - if obj_hash is not None: - se.set("hash", obj_hash) - rpki.relaxng.rrdp.assertValid(self.elt) - - - def withdraw(self, client, uri, obj_hash): - obj = client.publishedobject_set.get(session = self.session, uri = uri) - if obj.hash != obj_hash: - raise rpki.exceptions.DifferentObjectAtURI("Found different object at %s (old %s, new %s)" % (uri, obj.hash, obj_hash)) - logger.debug("Withdrawing %s", uri) - obj.delete() - SubElement(self.elt, rrdp_tag_withdraw, uri = uri, hash = obj_hash).tail = "\n" - rpki.relaxng.rrdp.assertValid(self.elt) - - - def update_rsync_files(self, publication_base): - from errno import ENOENT - min_path_len = len(publication_base.rstrip("/")) - for pdu in self.elt: - assert pdu.tag in (rrdp_tag_publish, rrdp_tag_withdraw) - fn = self._uri_to_filename(pdu.get("uri"), publication_base) - if pdu.tag == rrdp_tag_publish: - tn = fn + ".tmp" - dn = os.path.dirname(fn) - if not os.path.isdir(dn): - os.makedirs(dn) - with open(tn, "wb") as f: - f.write(pdu.text.decode("base64")) - os.rename(tn, fn) - else: + def publish(self, client, der, uri, obj_hash): try: - os.remove(fn) - except OSError, e: - if e.errno != ENOENT: - raise - dn = os.path.dirname(fn) - while len(dn) > min_path_len: - try: - os.rmdir(dn) - except OSError: - break - else: - dn = os.path.dirname(dn) - del self.elt + obj = client.publishedobject_set.get(session = self.session, uri = uri) + if obj.hash == obj_hash: + obj.delete() + elif obj_hash is None: + raise rpki.exceptions.ExistingObjectAtURI("Object already published at %s" % uri) + else: + raise rpki.exceptions.DifferentObjectAtURI("Found different object at %s (old %s, new %s)" % (uri, obj.hash, obj_hash)) + except rpki.pubdb.models.PublishedObject.DoesNotExist: + pass + logger.debug("Publishing %s", uri) + PublishedObject.objects.create(session = self.session, client = client, der = der, uri = uri, + hash = rpki.x509.sha256(der).encode("hex")) + se = DERSubElement(self.elt, rrdp_tag_publish, der = der, uri = uri) + if obj_hash is not None: + se.set("hash", obj_hash) + rpki.relaxng.rrdp.assertValid(self.elt) + + + def withdraw(self, client, uri, obj_hash): + obj = client.publishedobject_set.get(session = self.session, uri = uri) + if obj.hash != obj_hash: + raise rpki.exceptions.DifferentObjectAtURI("Found different object at %s (old %s, new %s)" % (uri, obj.hash, obj_hash)) + logger.debug("Withdrawing %s", uri) + obj.delete() + SubElement(self.elt, rrdp_tag_withdraw, uri = uri, hash = obj_hash).tail = "\n" + rpki.relaxng.rrdp.assertValid(self.elt) + + + def update_rsync_files(self, publication_base): + from errno import ENOENT + min_path_len = len(publication_base.rstrip("/")) + for pdu in self.elt: + assert pdu.tag in (rrdp_tag_publish, rrdp_tag_withdraw) + fn = self._uri_to_filename(pdu.get("uri"), publication_base) + if pdu.tag == rrdp_tag_publish: + tn = fn + ".tmp" + dn = os.path.dirname(fn) + if not os.path.isdir(dn): + os.makedirs(dn) + with open(tn, "wb") as f: + f.write(pdu.text.decode("base64")) + os.rename(tn, fn) + else: + try: + os.remove(fn) + except OSError, e: + if e.errno != ENOENT: + raise + dn = os.path.dirname(fn) + while len(dn) > min_path_len: + try: + os.rmdir(dn) + except OSError: + break + else: + dn = os.path.dirname(dn) + del self.elt class PublishedObject(models.Model): - uri = models.CharField(max_length = 255) - der = models.BinaryField() - hash = models.CharField(max_length = 64) - client = models.ForeignKey(Client) - session = models.ForeignKey(Session) - - class Meta: # pylint: disable=C1001,W0232 - unique_together = (("session", "hash"), - ("session", "uri")) + uri = models.CharField(max_length = 255) + der = models.BinaryField() + hash = models.CharField(max_length = 64) + client = models.ForeignKey(Client) + session = models.ForeignKey(Session) + + class Meta: # pylint: disable=C1001,W0232 + unique_together = (("session", "hash"), + ("session", "uri")) diff --git a/rpki/publication.py b/rpki/publication.py index 16824d05..393e078e 100644 --- a/rpki/publication.py +++ b/rpki/publication.py @@ -51,34 +51,34 @@ allowed_content_types = (content_type,) def raise_if_error(pdu): - """ - Raise an appropriate error if this is a PDU. - - As a convenience, this will also accept a PDU and raise an - appropriate error if it contains any PDUs or if - the is not a reply. - """ - - if pdu.tag == tag_report_error: - code = pdu.get("error_code") - logger.debug(" code %r", code) - e = getattr(rpki.exceptions, code, None) - if e is not None and issubclass(e, rpki.exceptions.RPKI_Exception): - raise e(pdu.text) - else: - raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %r, %r" % (code, pdu)) - - if pdu.tag == tag_msg: - if pdu.get("type") != "reply": - raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: expected reply, got %r" % pdu.get("type")) - for p in pdu: - raise_if_error(p) + """ + Raise an appropriate error if this is a PDU. + + As a convenience, this will also accept a PDU and raise an + appropriate error if it contains any PDUs or if + the is not a reply. + """ + + if pdu.tag == tag_report_error: + code = pdu.get("error_code") + logger.debug(" code %r", code) + e = getattr(rpki.exceptions, code, None) + if e is not None and issubclass(e, rpki.exceptions.RPKI_Exception): + raise e(pdu.text) + else: + raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %r, %r" % (code, pdu)) + + if pdu.tag == tag_msg: + if pdu.get("type") != "reply": + raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: expected reply, got %r" % pdu.get("type")) + for p in pdu: + raise_if_error(p) class cms_msg(rpki.x509.XML_CMS_object): - """ - CMS-signed publication PDU. - """ + """ + CMS-signed publication PDU. + """ - encoding = "us-ascii" - schema = rpki.relaxng.publication + encoding = "us-ascii" + schema = rpki.relaxng.publication diff --git a/rpki/publication_control.py b/rpki/publication_control.py index ddb9d417..b0668eef 100644 --- a/rpki/publication_control.py +++ b/rpki/publication_control.py @@ -44,31 +44,31 @@ tag_report_error = rpki.relaxng.publication_control.xmlns + "report_error" def raise_if_error(pdu): - """ - Raise an appropriate error if this is a PDU. + """ + Raise an appropriate error if this is a PDU. - As a convience, this will also accept a PDU and raise an - appropriate error if it contains any PDUs. - """ + As a convience, this will also accept a PDU and raise an + appropriate error if it contains any PDUs. + """ - if pdu.tag == tag_report_error: - code = pdu.get("error_code") - logger.debug(" code %r", code) - e = getattr(rpki.exceptions, code, None) - if e is not None and issubclass(e, rpki.exceptions.RPKI_Exception): - raise e(pdu.text) - else: - raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %r, %r" % (code, pdu)) + if pdu.tag == tag_report_error: + code = pdu.get("error_code") + logger.debug(" code %r", code) + e = getattr(rpki.exceptions, code, None) + if e is not None and issubclass(e, rpki.exceptions.RPKI_Exception): + raise e(pdu.text) + else: + raise rpki.exceptions.BadPublicationReply("Unexpected response from pubd: %r, %r" % (code, pdu)) - if pdu.tag == tag_msg: - for p in pdu: - raise_if_error(p) + if pdu.tag == tag_msg: + for p in pdu: + raise_if_error(p) class cms_msg(rpki.x509.XML_CMS_object): - """ - CMS-signed publication control PDU. - """ + """ + CMS-signed publication control PDU. + """ - encoding = "us-ascii" - schema = rpki.relaxng.publication_control + encoding = "us-ascii" + schema = rpki.relaxng.publication_control diff --git a/rpki/rcynic.py b/rpki/rcynic.py index a36e4a4e..3307e926 100644 --- a/rpki/rcynic.py +++ b/rpki/rcynic.py @@ -25,142 +25,142 @@ import rpki.resource_set from xml.etree.ElementTree import ElementTree class UnknownObject(rpki.exceptions.RPKI_Exception): - """ - Unrecognized object in rcynic result cache. - """ + """ + Unrecognized object in rcynic result cache. + """ class NotRsyncURI(rpki.exceptions.RPKI_Exception): - """ - URI is not an rsync URI. - """ + """ + URI is not an rsync URI. + """ class rcynic_object(object): - """ - An object read from rcynic cache. - """ + """ + An object read from rcynic cache. + """ - def __init__(self, filename, **kwargs): - self.filename = filename - for k, v in kwargs.iteritems(): - setattr(self, k, v) - self.obj = self.obj_class(DER_file = filename) + def __init__(self, filename, **kwargs): + self.filename = filename + for k, v in kwargs.iteritems(): + setattr(self, k, v) + self.obj = self.obj_class(DER_file = filename) - def __repr__(self): - return "<%s %s %s at 0x%x>" % (self.__class__.__name__, self.uri, self.resources, id(self)) + def __repr__(self): + return "<%s %s %s at 0x%x>" % (self.__class__.__name__, self.uri, self.resources, id(self)) - def show_attrs(self, *attrs): - """ - Print a bunch of object attributes, quietly ignoring any that - might be missing. - """ + def show_attrs(self, *attrs): + """ + Print a bunch of object attributes, quietly ignoring any that + might be missing. + """ - for a in attrs: - try: - print "%s: %s" % (a.capitalize(), getattr(self, a)) - except AttributeError: - pass + for a in attrs: + try: + print "%s: %s" % (a.capitalize(), getattr(self, a)) + except AttributeError: + pass - def show(self): - """ - Print common object attributes. - """ + def show(self): + """ + Print common object attributes. + """ - self.show_attrs("filename", "uri", "status", "timestamp") + self.show_attrs("filename", "uri", "status", "timestamp") class rcynic_certificate(rcynic_object): - """ - A certificate from rcynic cache. - """ - - obj_class = rpki.x509.X509 - - def __init__(self, filename, **kwargs): - rcynic_object.__init__(self, filename, **kwargs) - self.notBefore = self.obj.getNotBefore() - self.notAfter = self.obj.getNotAfter() - self.aia_uri = self.obj.get_aia_uri() - self.sia_directory_uri = self.obj.get_sia_directory_uri() - self.manifest_uri = self.obj.get_sia_manifest_uri() - self.resources = self.obj.get_3779resources() - self.is_ca = self.obj.is_CA() - self.serial = self.obj.getSerial() - self.issuer = self.obj.getIssuer() - self.subject = self.obj.getSubject() - self.ski = self.obj.hSKI() - self.aki = self.obj.hAKI() - - def show(self): """ - Print certificate attributes. + A certificate from rcynic cache. """ - rcynic_object.show(self) - self.show_attrs("notBefore", "notAfter", "aia_uri", "sia_directory_uri", "resources") + obj_class = rpki.x509.X509 + + def __init__(self, filename, **kwargs): + rcynic_object.__init__(self, filename, **kwargs) + self.notBefore = self.obj.getNotBefore() + self.notAfter = self.obj.getNotAfter() + self.aia_uri = self.obj.get_aia_uri() + self.sia_directory_uri = self.obj.get_sia_directory_uri() + self.manifest_uri = self.obj.get_sia_manifest_uri() + self.resources = self.obj.get_3779resources() + self.is_ca = self.obj.is_CA() + self.serial = self.obj.getSerial() + self.issuer = self.obj.getIssuer() + self.subject = self.obj.getSubject() + self.ski = self.obj.hSKI() + self.aki = self.obj.hAKI() + + def show(self): + """ + Print certificate attributes. + """ + + rcynic_object.show(self) + self.show_attrs("notBefore", "notAfter", "aia_uri", "sia_directory_uri", "resources") class rcynic_roa(rcynic_object): - """ - A ROA from rcynic cache. - """ - - obj_class = rpki.x509.ROA - - def __init__(self, filename, **kwargs): - rcynic_object.__init__(self, filename, **kwargs) - self.obj.extract() - self.asID = self.obj.get_POW().getASID() - self.prefix_sets = [] - v4, v6 = self.obj.get_POW().getPrefixes() - if v4: - self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv4([ - rpki.resource_set.roa_prefix_ipv4(p[0], p[1], p[2]) for p in v4])) - if v6: - self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv6([ - rpki.resource_set.roa_prefix_ipv6(p[0], p[1], p[2]) for p in v6])) - self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0]) - self.notBefore = self.ee.getNotBefore() - self.notAfter = self.ee.getNotAfter() - self.aia_uri = self.ee.get_aia_uri() - self.resources = self.ee.get_3779resources() - self.issuer = self.ee.getIssuer() - self.serial = self.ee.getSerial() - self.subject = self.ee.getSubject() - self.aki = self.ee.hAKI() - self.ski = self.ee.hSKI() - - def show(self): """ - Print ROA attributes. + A ROA from rcynic cache. """ - rcynic_object.show(self) - self.show_attrs("notBefore", "notAfter", "aia_uri", "resources", "asID") - if self.prefix_sets: - print "Prefixes:", ",".join(str(i) for i in self.prefix_sets) + obj_class = rpki.x509.ROA + + def __init__(self, filename, **kwargs): + rcynic_object.__init__(self, filename, **kwargs) + self.obj.extract() + self.asID = self.obj.get_POW().getASID() + self.prefix_sets = [] + v4, v6 = self.obj.get_POW().getPrefixes() + if v4: + self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv4([ + rpki.resource_set.roa_prefix_ipv4(p[0], p[1], p[2]) for p in v4])) + if v6: + self.prefix_sets.append(rpki.resource_set.roa_prefix_set_ipv6([ + rpki.resource_set.roa_prefix_ipv6(p[0], p[1], p[2]) for p in v6])) + self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0]) + self.notBefore = self.ee.getNotBefore() + self.notAfter = self.ee.getNotAfter() + self.aia_uri = self.ee.get_aia_uri() + self.resources = self.ee.get_3779resources() + self.issuer = self.ee.getIssuer() + self.serial = self.ee.getSerial() + self.subject = self.ee.getSubject() + self.aki = self.ee.hAKI() + self.ski = self.ee.hSKI() + + def show(self): + """ + Print ROA attributes. + """ + + rcynic_object.show(self) + self.show_attrs("notBefore", "notAfter", "aia_uri", "resources", "asID") + if self.prefix_sets: + print "Prefixes:", ",".join(str(i) for i in self.prefix_sets) class rcynic_ghostbuster(rcynic_object): - """ - Ghostbuster record from the rcynic cache. - """ - - obj_class = rpki.x509.Ghostbuster - - def __init__(self, *args, **kwargs): - rcynic_object.__init__(self, *args, **kwargs) - self.obj.extract() - self.vcard = self.obj.get_content() - self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0]) - self.notBefore = self.ee.getNotBefore() - self.notAfter = self.ee.getNotAfter() - self.aia_uri = self.ee.get_aia_uri() - self.issuer = self.ee.getIssuer() - self.serial = self.ee.getSerial() - self.subject = self.ee.getSubject() - self.aki = self.ee.hAKI() - self.ski = self.ee.hSKI() - - def show(self): - rcynic_object.show(self) - self.show_attrs("notBefore", "notAfter", "vcard") + """ + Ghostbuster record from the rcynic cache. + """ + + obj_class = rpki.x509.Ghostbuster + + def __init__(self, *args, **kwargs): + rcynic_object.__init__(self, *args, **kwargs) + self.obj.extract() + self.vcard = self.obj.get_content() + self.ee = rpki.x509.X509(POW = self.obj.get_POW().certs()[0]) + self.notBefore = self.ee.getNotBefore() + self.notAfter = self.ee.getNotAfter() + self.aia_uri = self.ee.get_aia_uri() + self.issuer = self.ee.getIssuer() + self.serial = self.ee.getSerial() + self.subject = self.ee.getSubject() + self.aki = self.ee.hAKI() + self.ski = self.ee.hSKI() + + def show(self): + rcynic_object.show(self) + self.show_attrs("notBefore", "notAfter", "vcard") file_name_classes = { ".cer" : rcynic_certificate, @@ -168,112 +168,112 @@ file_name_classes = { ".roa" : rcynic_roa } class rcynic_file_iterator(object): - """ - Iterate over files in an rcynic output tree, yielding a Python - representation of each object found. - """ - - def __init__(self, rcynic_root, - authenticated_subdir = "authenticated"): - self.rcynic_dir = os.path.join(rcynic_root, authenticated_subdir) - - def __iter__(self): - for root, dirs, files in os.walk(self.rcynic_dir): # pylint: disable=W0612 - for filename in files: - filename = os.path.join(root, filename) - ext = os.path.splitext(filename)[1] - if ext in file_name_classes: - yield file_name_classes[ext](filename) + """ + Iterate over files in an rcynic output tree, yielding a Python + representation of each object found. + """ + + def __init__(self, rcynic_root, + authenticated_subdir = "authenticated"): + self.rcynic_dir = os.path.join(rcynic_root, authenticated_subdir) + + def __iter__(self): + for root, dirs, files in os.walk(self.rcynic_dir): # pylint: disable=W0612 + for filename in files: + filename = os.path.join(root, filename) + ext = os.path.splitext(filename)[1] + if ext in file_name_classes: + yield file_name_classes[ext](filename) class validation_status_element(object): - def __init__(self, *args, **kwargs): - self.attrs = [] - for k, v in kwargs.iteritems(): - setattr(self, k, v) - # attribute names are saved so that the __repr__ method can - # display the subset of attributes the user specified - self.attrs.append(k) - self._obj = None - - def get_obj(self): - if not self._obj: - self._obj = self.file_class(filename=self.filename, uri=self.uri) - return self._obj - - def __repr__(self): - v = [self.__class__.__name__, 'id=%s' % str(id(self))] - v.extend(['%s=%s' % (x, getattr(self, x)) for x in self.attrs]) - return '<%s>' % (' '.join(v),) - - obj = property(get_obj) + def __init__(self, *args, **kwargs): + self.attrs = [] + for k, v in kwargs.iteritems(): + setattr(self, k, v) + # attribute names are saved so that the __repr__ method can + # display the subset of attributes the user specified + self.attrs.append(k) + self._obj = None + + def get_obj(self): + if not self._obj: + self._obj = self.file_class(filename=self.filename, uri=self.uri) + return self._obj + + def __repr__(self): + v = [self.__class__.__name__, 'id=%s' % str(id(self))] + v.extend(['%s=%s' % (x, getattr(self, x)) for x in self.attrs]) + return '<%s>' % (' '.join(v),) + + obj = property(get_obj) class rcynic_xml_iterator(object): - """ - Iterate over validation_status entries in the XML output from an - rcynic run. Yields a tuple for each entry: - - timestamp, generation, status, object - - where URI, status, and timestamp are the corresponding values from - the XML element, OK is a boolean indicating whether validation was - considered succesful, and object is a Python representation of the - object in question. If OK is True, object will be from rcynic's - authenticated output tree; otherwise, object will be from rcynic's - unauthenticated output tree. - - Note that it is possible for the same URI to appear in more than one - validation_status element; in such cases, the succesful case (OK - True) should be the last entry (as rcynic will stop trying once it - gets a good copy), but there may be multiple failures, which might - or might not have different status codes. - """ - - def __init__(self, rcynic_root, xml_file, - authenticated_old_subdir = "authenticated.old", - unauthenticated_subdir = "unauthenticated"): - self.rcynic_root = rcynic_root - self.xml_file = xml_file - self.authenticated_subdir = os.path.join(rcynic_root, 'authenticated') - self.authenticated_old_subdir = os.path.join(rcynic_root, authenticated_old_subdir) - self.unauthenticated_subdir = os.path.join(rcynic_root, unauthenticated_subdir) - - base_uri = "rsync://" - - def uri_to_filename(self, uri): - if uri.startswith(self.base_uri): - return uri[len(self.base_uri):] - else: - raise NotRsyncURI("Not an rsync URI %r" % uri) - - def __iter__(self): - for validation_status in ElementTree(file=self.xml_file).getroot().getiterator("validation_status"): - timestamp = validation_status.get("timestamp") - status = validation_status.get("status") - uri = validation_status.text.strip() - generation = validation_status.get("generation") - - # determine the path to this object - if status == 'object_accepted': - d = self.authenticated_subdir - elif generation == 'backup': - d = self.authenticated_old_subdir - else: - d = self.unauthenticated_subdir - - filename = os.path.join(d, self.uri_to_filename(uri)) - - ext = os.path.splitext(filename)[1] - if ext in file_name_classes: - yield validation_status_element(timestamp = timestamp, generation = generation, - uri=uri, status = status, filename = filename, - file_class = file_name_classes[ext]) + """ + Iterate over validation_status entries in the XML output from an + rcynic run. Yields a tuple for each entry: + + timestamp, generation, status, object + + where URI, status, and timestamp are the corresponding values from + the XML element, OK is a boolean indicating whether validation was + considered succesful, and object is a Python representation of the + object in question. If OK is True, object will be from rcynic's + authenticated output tree; otherwise, object will be from rcynic's + unauthenticated output tree. + + Note that it is possible for the same URI to appear in more than one + validation_status element; in such cases, the succesful case (OK + True) should be the last entry (as rcynic will stop trying once it + gets a good copy), but there may be multiple failures, which might + or might not have different status codes. + """ + + def __init__(self, rcynic_root, xml_file, + authenticated_old_subdir = "authenticated.old", + unauthenticated_subdir = "unauthenticated"): + self.rcynic_root = rcynic_root + self.xml_file = xml_file + self.authenticated_subdir = os.path.join(rcynic_root, 'authenticated') + self.authenticated_old_subdir = os.path.join(rcynic_root, authenticated_old_subdir) + self.unauthenticated_subdir = os.path.join(rcynic_root, unauthenticated_subdir) + + base_uri = "rsync://" + + def uri_to_filename(self, uri): + if uri.startswith(self.base_uri): + return uri[len(self.base_uri):] + else: + raise NotRsyncURI("Not an rsync URI %r" % uri) + + def __iter__(self): + for validation_status in ElementTree(file=self.xml_file).getroot().getiterator("validation_status"): + timestamp = validation_status.get("timestamp") + status = validation_status.get("status") + uri = validation_status.text.strip() + generation = validation_status.get("generation") + + # determine the path to this object + if status == 'object_accepted': + d = self.authenticated_subdir + elif generation == 'backup': + d = self.authenticated_old_subdir + else: + d = self.unauthenticated_subdir + + filename = os.path.join(d, self.uri_to_filename(uri)) + + ext = os.path.splitext(filename)[1] + if ext in file_name_classes: + yield validation_status_element(timestamp = timestamp, generation = generation, + uri=uri, status = status, filename = filename, + file_class = file_name_classes[ext]) def label_iterator(xml_file): - """ - Returns an iterator which contains all defined labels from an rcynic XML - output file. Each item is a tuple of the form - (label, kind, description). - """ - - for label in ElementTree(file=xml_file).find("labels"): - yield label.tag, label.get("kind"), label.text.strip() + """ + Returns an iterator which contains all defined labels from an rcynic XML + output file. Each item is a tuple of the form + (label, kind, description). + """ + + for label in ElementTree(file=xml_file).find("labels"): + yield label.tag, label.get("kind"), label.text.strip() diff --git a/rpki/relaxng.py b/rpki/relaxng.py index 566be90f..49ea88d8 100644 --- a/rpki/relaxng.py +++ b/rpki/relaxng.py @@ -7,17 +7,17 @@ from rpki.relaxng_parser import RelaxNGParser left_right = RelaxNGParser(r''' @@ -1945,29 +1945,29 @@ publication_control = RelaxNGParser(r''' publication = RelaxNGParser(r'''