[ipython/f13/master] update to 0.10.1 and unbundle a bit differently

tomspur tomspur at fedoraproject.org
Wed Oct 13 08:23:17 UTC 2010


commit dd958701eb092008c0cfb486619d86c8c255d160
Author: Thomas Spura <tomspur at fedoraproject.org>
Date:   Wed Oct 13 10:12:44 2010 +0200

    update to 0.10.1 and unbundle a bit differently
    
    Signed-off-by: Thomas Spura <tomspur at fedoraproject.org>

 .gitignore                             |    1 +
 ipython-unbundle-external-module.patch |17336 --------------------------------
 ipython.spec                           |   30 +-
 sources                                |    2 +-
 4 files changed, 30 insertions(+), 17339 deletions(-)
---
diff --git a/.gitignore b/.gitignore
index 07a356f..0a3cb4a 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,2 @@
 ipython-0.10.tar.gz
+/ipython-0.10.1.tar.gz
diff --git a/ipython-unbundle-external-module.patch b/ipython-unbundle-external-module.patch
index 028a7d0..6fc8011 100644
--- a/ipython-unbundle-external-module.patch
+++ b/ipython-unbundle-external-module.patch
@@ -1,2224 +1,3 @@
-Index: ipython-0.10/IPython/external/argparse/_argparse.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/argparse/_argparse.py
-@@ -0,0 +1,2216 @@
-+# -*- coding: utf-8 -*-
-+
-+# Copyright © 2006-2009 Steven J. Bethard <steven.bethard at gmail.com>.
-+#
-+# Licensed under the Apache License, Version 2.0 (the "License"); you may not
-+# use this file except in compliance with the License. You may obtain a copy
-+# of the License at
-+#
-+#     http://www.apache.org/licenses/LICENSE-2.0
-+#
-+# Unless required by applicable law or agreed to in writing, software
-+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-+# License for the specific language governing permissions and limitations
-+# under the License.
-+
-+"""Command-line parsing library
-+
-+This module is an optparse-inspired command-line parsing library that:
-+
-+    - handles both optional and positional arguments
-+    - produces highly informative usage messages
-+    - supports parsers that dispatch to sub-parsers
-+
-+The following is a simple usage example that sums integers from the
-+command-line and writes the result to a file::
-+
-+    parser = argparse.ArgumentParser(
-+        description='sum the integers at the command line')
-+    parser.add_argument(
-+        'integers', metavar='int', nargs='+', type=int,
-+        help='an integer to be summed')
-+    parser.add_argument(
-+        '--log', default=sys.stdout, type=argparse.FileType('w'),
-+        help='the file where the sum should be written')
-+    args = parser.parse_args()
-+    args.log.write('%s' % sum(args.integers))
-+    args.log.close()
-+
-+The module contains the following public classes:
-+
-+    - ArgumentParser -- The main entry point for command-line parsing. As the
-+        example above shows, the add_argument() method is used to populate
-+        the parser with actions for optional and positional arguments. Then
-+        the parse_args() method is invoked to convert the args at the
-+        command-line into an object with attributes.
-+
-+    - ArgumentError -- The exception raised by ArgumentParser objects when
-+        there are errors with the parser's actions. Errors raised while
-+        parsing the command-line are caught by ArgumentParser and emitted
-+        as command-line messages.
-+
-+    - FileType -- A factory for defining types of files to be created. As the
-+        example above shows, instances of FileType are typically passed as
-+        the type= argument of add_argument() calls.
-+
-+    - Action -- The base class for parser actions. Typically actions are
-+        selected by passing strings like 'store_true' or 'append_const' to
-+        the action= argument of add_argument(). However, for greater
-+        customization of ArgumentParser actions, subclasses of Action may
-+        be defined and passed as the action= argument.
-+
-+    - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
-+        ArgumentDefaultsHelpFormatter -- Formatter classes which
-+        may be passed as the formatter_class= argument to the
-+        ArgumentParser constructor. HelpFormatter is the default,
-+        RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
-+        not to change the formatting for help text, and
-+        ArgumentDefaultsHelpFormatter adds information about argument defaults
-+        to the help.
-+
-+All other classes in this module are considered implementation details.
-+(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
-+considered public as object names -- the API of the formatter objects is
-+still considered an implementation detail.)
-+"""
-+
-+__version__ = '1.0'
-+__all__ = [
-+    'ArgumentParser',
-+    'ArgumentError',
-+    'Namespace',
-+    'Action',
-+    'FileType',
-+    'HelpFormatter',
-+    'RawDescriptionHelpFormatter',
-+    'RawTextHelpFormatter',
-+    'ArgumentDefaultsHelpFormatter',
-+]
-+
-+
-+import copy as _copy
-+import os as _os
-+import re as _re
-+import sys as _sys
-+import textwrap as _textwrap
-+
-+from gettext import gettext as _
-+
-+try:
-+    _set = set
-+except NameError:
-+    from sets import Set as _set
-+
-+try:
-+    _basestring = basestring
-+except NameError:
-+    _basestring = str
-+
-+try:
-+    _sorted = sorted
-+except NameError:
-+
-+    def _sorted(iterable, reverse=False):
-+        result = list(iterable)
-+        result.sort()
-+        if reverse:
-+            result.reverse()
-+        return result
-+
-+
-+SUPPRESS = '==SUPPRESS=='
-+
-+OPTIONAL = '?'
-+ZERO_OR_MORE = '*'
-+ONE_OR_MORE = '+'
-+PARSER = '==PARSER=='
-+
-+# =============================
-+# Utility functions and classes
-+# =============================
-+
-+class _AttributeHolder(object):
-+    """Abstract base class that provides __repr__.
-+
-+    The __repr__ method returns a string in the format::
-+        ClassName(attr=name, attr=name, ...)
-+    The attributes are determined either by a class-level attribute,
-+    '_kwarg_names', or by inspecting the instance __dict__.
-+    """
-+
-+    def __repr__(self):
-+        type_name = type(self).__name__
-+        arg_strings = []
-+        for arg in self._get_args():
-+            arg_strings.append(repr(arg))
-+        for name, value in self._get_kwargs():
-+            arg_strings.append('%s=%r' % (name, value))
-+        return '%s(%s)' % (type_name, ', '.join(arg_strings))
-+
-+    def _get_kwargs(self):
-+        return _sorted(self.__dict__.items())
-+
-+    def _get_args(self):
-+        return []
-+
-+
-+def _ensure_value(namespace, name, value):
-+    if getattr(namespace, name, None) is None:
-+        setattr(namespace, name, value)
-+    return getattr(namespace, name)
-+
-+
-+# ===============
-+# Formatting Help
-+# ===============
-+
-+class HelpFormatter(object):
-+    """Formatter for generating usage messages and argument help strings.
-+
-+    Only the name of this class is considered a public API. All the methods
-+    provided by the class are considered an implementation detail.
-+    """
-+
-+    def __init__(self,
-+                 prog,
-+                 indent_increment=2,
-+                 max_help_position=24,
-+                 width=None):
-+
-+        # default setting for width
-+        if width is None:
-+            try:
-+                width = int(_os.environ['COLUMNS'])
-+            except (KeyError, ValueError):
-+                width = 80
-+            width -= 2
-+
-+        self._prog = prog
-+        self._indent_increment = indent_increment
-+        self._max_help_position = max_help_position
-+        self._width = width
-+
-+        self._current_indent = 0
-+        self._level = 0
-+        self._action_max_length = 0
-+
-+        self._root_section = self._Section(self, None)
-+        self._current_section = self._root_section
-+
-+        self._whitespace_matcher = _re.compile(r'\s+')
-+        self._long_break_matcher = _re.compile(r'\n\n\n+')
-+
-+    # ===============================
-+    # Section and indentation methods
-+    # ===============================
-+    def _indent(self):
-+        self._current_indent += self._indent_increment
-+        self._level += 1
-+
-+    def _dedent(self):
-+        self._current_indent -= self._indent_increment
-+        assert self._current_indent >= 0, 'Indent decreased below 0.'
-+        self._level -= 1
-+
-+    class _Section(object):
-+
-+        def __init__(self, formatter, parent, heading=None):
-+            self.formatter = formatter
-+            self.parent = parent
-+            self.heading = heading
-+            self.items = []
-+
-+        def format_help(self):
-+            # format the indented section
-+            if self.parent is not None:
-+                self.formatter._indent()
-+            join = self.formatter._join_parts
-+            for func, args in self.items:
-+                func(*args)
-+            item_help = join([func(*args) for func, args in self.items])
-+            if self.parent is not None:
-+                self.formatter._dedent()
-+
-+            # return nothing if the section was empty
-+            if not item_help:
-+                return ''
-+
-+            # add the heading if the section was non-empty
-+            if self.heading is not SUPPRESS and self.heading is not None:
-+                current_indent = self.formatter._current_indent
-+                heading = '%*s%s:\n' % (current_indent, '', self.heading)
-+            else:
-+                heading = ''
-+
-+            # join the section-initial newline, the heading and the help
-+            return join(['\n', heading, item_help, '\n'])
-+
-+    def _add_item(self, func, args):
-+        self._current_section.items.append((func, args))
-+
-+    # ========================
-+    # Message building methods
-+    # ========================
-+    def start_section(self, heading):
-+        self._indent()
-+        section = self._Section(self, self._current_section, heading)
-+        self._add_item(section.format_help, [])
-+        self._current_section = section
-+
-+    def end_section(self):
-+        self._current_section = self._current_section.parent
-+        self._dedent()
-+
-+    def add_text(self, text):
-+        if text is not SUPPRESS and text is not None:
-+            self._add_item(self._format_text, [text])
-+
-+    def add_usage(self, usage, actions, groups, prefix=None):
-+        if usage is not SUPPRESS:
-+            args = usage, actions, groups, prefix
-+            self._add_item(self._format_usage, args)
-+
-+    def add_argument(self, action):
-+        if action.help is not SUPPRESS:
-+
-+            # find all invocations
-+            get_invocation = self._format_action_invocation
-+            invocations = [get_invocation(action)]
-+            for subaction in self._iter_indented_subactions(action):
-+                invocations.append(get_invocation(subaction))
-+
-+            # update the maximum item length
-+            invocation_length = max([len(s) for s in invocations])
-+            action_length = invocation_length + self._current_indent
-+            self._action_max_length = max(self._action_max_length,
-+                                          action_length)
-+
-+            # add the item to the list
-+            self._add_item(self._format_action, [action])
-+
-+    def add_arguments(self, actions):
-+        for action in actions:
-+            self.add_argument(action)
-+
-+    # =======================
-+    # Help-formatting methods
-+    # =======================
-+    def format_help(self):
-+        help = self._root_section.format_help() % dict(prog=self._prog)
-+        if help:
-+            help = self._long_break_matcher.sub('\n\n', help)
-+            help = help.strip('\n') + '\n'
-+        return help
-+
-+    def _join_parts(self, part_strings):
-+        return ''.join([part
-+                        for part in part_strings
-+                        if part and part is not SUPPRESS])
-+
-+    def _format_usage(self, usage, actions, groups, prefix):
-+        if prefix is None:
-+            prefix = _('usage: ')
-+
-+        # if no optionals or positionals are available, usage is just prog
-+        if usage is None and not actions:
-+            usage = '%(prog)s'
-+
-+        # if optionals and positionals are available, calculate usage
-+        elif usage is None:
-+            usage = '%(prog)s' % dict(prog=self._prog)
-+
-+            # split optionals from positionals
-+            optionals = []
-+            positionals = []
-+            for action in actions:
-+                if action.option_strings:
-+                    optionals.append(action)
-+                else:
-+                    positionals.append(action)
-+
-+            # determine width of "usage: PROG" and width of text
-+            prefix_width = len(prefix) + len(usage) + 1
-+            prefix_indent = self._current_indent + prefix_width
-+            text_width = self._width - self._current_indent
-+
-+            # put them on one line if they're short enough
-+            format = self._format_actions_usage
-+            action_usage = format(optionals + positionals, groups)
-+            if prefix_width + len(action_usage) + 1 < text_width:
-+                usage = '%s %s' % (usage, action_usage)
-+
-+            # if they're long, wrap optionals and positionals individually
-+            else:
-+                optional_usage = format(optionals, groups)
-+                positional_usage = format(positionals, groups)
-+                indent = ' ' * prefix_indent
-+
-+                # usage is made of PROG, optionals and positionals
-+                parts = [usage, ' ']
-+
-+                # options always get added right after PROG
-+                if optional_usage:
-+                    parts.append(_textwrap.fill(
-+                        optional_usage, text_width,
-+                        initial_indent=indent,
-+                        subsequent_indent=indent).lstrip())
-+
-+                # if there were options, put arguments on the next line
-+                # otherwise, start them right after PROG
-+                if positional_usage:
-+                    part = _textwrap.fill(
-+                        positional_usage, text_width,
-+                        initial_indent=indent,
-+                        subsequent_indent=indent).lstrip()
-+                    if optional_usage:
-+                        part = '\n' + indent + part
-+                    parts.append(part)
-+                usage = ''.join(parts)
-+
-+        # prefix with 'usage:'
-+        return '%s%s\n\n' % (prefix, usage)
-+
-+    def _format_actions_usage(self, actions, groups):
-+        # find group indices and identify actions in groups
-+        group_actions = _set()
-+        inserts = {}
-+        for group in groups:
-+            try:
-+                start = actions.index(group._group_actions[0])
-+            except ValueError:
-+                continue
-+            else:
-+                end = start + len(group._group_actions)
-+                if actions[start:end] == group._group_actions:
-+                    for action in group._group_actions:
-+                        group_actions.add(action)
-+                    if not group.required:
-+                        inserts[start] = '['
-+                        inserts[end] = ']'
-+                    else:
-+                        inserts[start] = '('
-+                        inserts[end] = ')'
-+                    for i in range(start + 1, end):
-+                        inserts[i] = '|'
-+
-+        # collect all actions format strings
-+        parts = []
-+        for i, action in enumerate(actions):
-+
-+            # suppressed arguments are marked with None
-+            # remove | separators for suppressed arguments
-+            if action.help is SUPPRESS:
-+                parts.append(None)
-+                if inserts.get(i) == '|':
-+                    inserts.pop(i)
-+                elif inserts.get(i + 1) == '|':
-+                    inserts.pop(i + 1)
-+
-+            # produce all arg strings
-+            elif not action.option_strings:
-+                part = self._format_args(action, action.dest)
-+
-+                # if it's in a group, strip the outer []
-+                if action in group_actions:
-+                    if part[0] == '[' and part[-1] == ']':
-+                        part = part[1:-1]
-+
-+                # add the action string to the list
-+                parts.append(part)
-+
-+            # produce the first way to invoke the option in brackets
-+            else:
-+                option_string = action.option_strings[0]
-+
-+                # if the Optional doesn't take a value, format is:
-+                #    -s or --long
-+                if action.nargs == 0:
-+                    part = '%s' % option_string
-+
-+                # if the Optional takes a value, format is:
-+                #    -s ARGS or --long ARGS
-+                else:
-+                    default = action.dest.upper()
-+                    args_string = self._format_args(action, default)
-+                    part = '%s %s' % (option_string, args_string)
-+
-+                # make it look optional if it's not required or in a group
-+                if not action.required and action not in group_actions:
-+                    part = '[%s]' % part
-+
-+                # add the action string to the list
-+                parts.append(part)
-+
-+        # insert things at the necessary indices
-+        for i in _sorted(inserts, reverse=True):
-+            parts[i:i] = [inserts[i]]
-+
-+        # join all the action items with spaces
-+        text = ' '.join([item for item in parts if item is not None])
-+
-+        # clean up separators for mutually exclusive groups
-+        open = r'[\[(]'
-+        close = r'[\])]'
-+        text = _re.sub(r'(%s) ' % open, r'\1', text)
-+        text = _re.sub(r' (%s)' % close, r'\1', text)
-+        text = _re.sub(r'%s *%s' % (open, close), r'', text)
-+        text = _re.sub(r'\(([^|]*)\)', r'\1', text)
-+        text = text.strip()
-+
-+        # return the text
-+        return text
-+
-+    def _format_text(self, text):
-+        text_width = self._width - self._current_indent
-+        indent = ' ' * self._current_indent
-+        return self._fill_text(text, text_width, indent) + '\n\n'
-+
-+    def _format_action(self, action):
-+        # determine the required width and the entry label
-+        help_position = min(self._action_max_length + 2,
-+                            self._max_help_position)
-+        help_width = self._width - help_position
-+        action_width = help_position - self._current_indent - 2
-+        action_header = self._format_action_invocation(action)
-+
-+        # ho nelp; start on same line and add a final newline
-+        if not action.help:
-+            tup = self._current_indent, '', action_header
-+            action_header = '%*s%s\n' % tup
-+
-+        # short action name; start on the same line and pad two spaces
-+        elif len(action_header) <= action_width:
-+            tup = self._current_indent, '', action_width, action_header
-+            action_header = '%*s%-*s  ' % tup
-+            indent_first = 0
-+
-+        # long action name; start on the next line
-+        else:
-+            tup = self._current_indent, '', action_header
-+            action_header = '%*s%s\n' % tup
-+            indent_first = help_position
-+
-+        # collect the pieces of the action help
-+        parts = [action_header]
-+
-+        # if there was help for the action, add lines of help text
-+        if action.help:
-+            help_text = self._expand_help(action)
-+            help_lines = self._split_lines(help_text, help_width)
-+            parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
-+            for line in help_lines[1:]:
-+                parts.append('%*s%s\n' % (help_position, '', line))
-+
-+        # or add a newline if the description doesn't end with one
-+        elif not action_header.endswith('\n'):
-+            parts.append('\n')
-+
-+        # if there are any sub-actions, add their help as well
-+        for subaction in self._iter_indented_subactions(action):
-+            parts.append(self._format_action(subaction))
-+
-+        # return a single string
-+        return self._join_parts(parts)
-+
-+    def _format_action_invocation(self, action):
-+        if not action.option_strings:
-+            metavar, = self._metavar_formatter(action, action.dest)(1)
-+            return metavar
-+
-+        else:
-+            parts = []
-+
-+            # if the Optional doesn't take a value, format is:
-+            #    -s, --long
-+            if action.nargs == 0:
-+                parts.extend(action.option_strings)
-+
-+            # if the Optional takes a value, format is:
-+            #    -s ARGS, --long ARGS
-+            else:
-+                default = action.dest.upper()
-+                args_string = self._format_args(action, default)
-+                for option_string in action.option_strings:
-+                    parts.append('%s %s' % (option_string, args_string))
-+
-+            return ', '.join(parts)
-+
-+    def _metavar_formatter(self, action, default_metavar):
-+        if action.metavar is not None:
-+            result = action.metavar
-+        elif action.choices is not None:
-+            choice_strs = [str(choice) for choice in action.choices]
-+            result = '{%s}' % ','.join(choice_strs)
-+        else:
-+            result = default_metavar
-+
-+        def format(tuple_size):
-+            if isinstance(result, tuple):
-+                return result
-+            else:
-+                return (result, ) * tuple_size
-+        return format
-+
-+    def _format_args(self, action, default_metavar):
-+        get_metavar = self._metavar_formatter(action, default_metavar)
-+        if action.nargs is None:
-+            result = '%s' % get_metavar(1)
-+        elif action.nargs == OPTIONAL:
-+            result = '[%s]' % get_metavar(1)
-+        elif action.nargs == ZERO_OR_MORE:
-+            result = '[%s [%s ...]]' % get_metavar(2)
-+        elif action.nargs == ONE_OR_MORE:
-+            result = '%s [%s ...]' % get_metavar(2)
-+        elif action.nargs is PARSER:
-+            result = '%s ...' % get_metavar(1)
-+        else:
-+            formats = ['%s' for _ in range(action.nargs)]
-+            result = ' '.join(formats) % get_metavar(action.nargs)
-+        return result
-+
-+    def _expand_help(self, action):
-+        params = dict(vars(action), prog=self._prog)
-+        for name in list(params):
-+            if params[name] is SUPPRESS:
-+                del params[name]
-+        if params.get('choices') is not None:
-+            choices_str = ', '.join([str(c) for c in params['choices']])
-+            params['choices'] = choices_str
-+        return self._get_help_string(action) % params
-+
-+    def _iter_indented_subactions(self, action):
-+        try:
-+            get_subactions = action._get_subactions
-+        except AttributeError:
-+            pass
-+        else:
-+            self._indent()
-+            for subaction in get_subactions():
-+                yield subaction
-+            self._dedent()
-+
-+    def _split_lines(self, text, width):
-+        text = self._whitespace_matcher.sub(' ', text).strip()
-+        return _textwrap.wrap(text, width)
-+
-+    def _fill_text(self, text, width, indent):
-+        text = self._whitespace_matcher.sub(' ', text).strip()
-+        return _textwrap.fill(text, width, initial_indent=indent,
-+                                           subsequent_indent=indent)
-+
-+    def _get_help_string(self, action):
-+        return action.help
-+
-+
-+class RawDescriptionHelpFormatter(HelpFormatter):
-+    """Help message formatter which retains any formatting in descriptions.
-+
-+    Only the name of this class is considered a public API. All the methods
-+    provided by the class are considered an implementation detail.
-+    """
-+
-+    def _fill_text(self, text, width, indent):
-+        return ''.join([indent + line for line in text.splitlines(True)])
-+
-+
-+class RawTextHelpFormatter(RawDescriptionHelpFormatter):
-+    """Help message formatter which retains formatting of all help text.
-+
-+    Only the name of this class is considered a public API. All the methods
-+    provided by the class are considered an implementation detail.
-+    """
-+
-+    def _split_lines(self, text, width):
-+        return text.splitlines()
-+
-+
-+class ArgumentDefaultsHelpFormatter(HelpFormatter):
-+    """Help message formatter which adds default values to argument help.
-+
-+    Only the name of this class is considered a public API. All the methods
-+    provided by the class are considered an implementation detail.
-+    """
-+
-+    def _get_help_string(self, action):
-+        help = action.help
-+        if '%(default)' not in action.help:
-+            if action.default is not SUPPRESS:
-+                defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
-+                if action.option_strings or action.nargs in defaulting_nargs:
-+                    help += ' (default: %(default)s)'
-+        return help
-+
-+
-+# =====================
-+# Options and Arguments
-+# =====================
-+
-+def _get_action_name(argument):
-+    if argument is None:
-+        return None
-+    elif argument.option_strings:
-+        return  '/'.join(argument.option_strings)
-+    elif argument.metavar not in (None, SUPPRESS):
-+        return argument.metavar
-+    elif argument.dest not in (None, SUPPRESS):
-+        return argument.dest
-+    else:
-+        return None
-+
-+
-+class ArgumentError(Exception):
-+    """An error from creating or using an argument (optional or positional).
-+
-+    The string value of this exception is the message, augmented with
-+    information about the argument that caused it.
-+    """
-+
-+    def __init__(self, argument, message):
-+        self.argument_name = _get_action_name(argument)
-+        self.message = message
-+
-+    def __str__(self):
-+        if self.argument_name is None:
-+            format = '%(message)s'
-+        else:
-+            format = 'argument %(argument_name)s: %(message)s'
-+        return format % dict(message=self.message,
-+                             argument_name=self.argument_name)
-+
-+# ==============
-+# Action classes
-+# ==============
-+
-+class Action(_AttributeHolder):
-+    """Information about how to convert command line strings to Python objects.
-+
-+    Action objects are used by an ArgumentParser to represent the information
-+    needed to parse a single argument from one or more strings from the
-+    command line. The keyword arguments to the Action constructor are also
-+    all attributes of Action instances.
-+
-+    Keyword Arguments:
-+
-+        - option_strings -- A list of command-line option strings which
-+            should be associated with this action.
-+
-+        - dest -- The name of the attribute to hold the created object(s)
-+
-+        - nargs -- The number of command-line arguments that should be
-+            consumed. By default, one argument will be consumed and a single
-+            value will be produced.  Other values include:
-+                - N (an integer) consumes N arguments (and produces a list)
-+                - '?' consumes zero or one arguments
-+                - '*' consumes zero or more arguments (and produces a list)
-+                - '+' consumes one or more arguments (and produces a list)
-+            Note that the difference between the default and nargs=1 is that
-+            with the default, a single value will be produced, while with
-+            nargs=1, a list containing a single value will be produced.
-+
-+        - const -- The value to be produced if the option is specified and the
-+            option uses an action that takes no values.
-+
-+        - default -- The value to be produced if the option is not specified.
-+
-+        - type -- The type which the command-line arguments should be converted
-+            to, should be one of 'string', 'int', 'float', 'complex' or a
-+            callable object that accepts a single string argument. If None,
-+            'string' is assumed.
-+
-+        - choices -- A container of values that should be allowed. If not None,
-+            after a command-line argument has been converted to the appropriate
-+            type, an exception will be raised if it is not a member of this
-+            collection.
-+
-+        - required -- True if the action must always be specified at the
-+            command line. This is only meaningful for optional command-line
-+            arguments.
-+
-+        - help -- The help string describing the argument.
-+
-+        - metavar -- The name to be used for the option's argument with the
-+            help string. If None, the 'dest' value will be used as the name.
-+    """
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest,
-+                 nargs=None,
-+                 const=None,
-+                 default=None,
-+                 type=None,
-+                 choices=None,
-+                 required=False,
-+                 help=None,
-+                 metavar=None):
-+        self.option_strings = option_strings
-+        self.dest = dest
-+        self.nargs = nargs
-+        self.const = const
-+        self.default = default
-+        self.type = type
-+        self.choices = choices
-+        self.required = required
-+        self.help = help
-+        self.metavar = metavar
-+
-+    def _get_kwargs(self):
-+        names = [
-+            'option_strings',
-+            'dest',
-+            'nargs',
-+            'const',
-+            'default',
-+            'type',
-+            'choices',
-+            'help',
-+            'metavar',
-+        ]
-+        return [(name, getattr(self, name)) for name in names]
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        raise NotImplementedError(_('.__call__() not defined'))
-+
-+
-+class _StoreAction(Action):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest,
-+                 nargs=None,
-+                 const=None,
-+                 default=None,
-+                 type=None,
-+                 choices=None,
-+                 required=False,
-+                 help=None,
-+                 metavar=None):
-+        if nargs == 0:
-+            raise ValueError('nargs must be > 0')
-+        if const is not None and nargs != OPTIONAL:
-+            raise ValueError('nargs must be %r to supply const' % OPTIONAL)
-+        super(_StoreAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            nargs=nargs,
-+            const=const,
-+            default=default,
-+            type=type,
-+            choices=choices,
-+            required=required,
-+            help=help,
-+            metavar=metavar)
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        setattr(namespace, self.dest, values)
-+
-+
-+class _StoreConstAction(Action):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest,
-+                 const,
-+                 default=None,
-+                 required=False,
-+                 help=None,
-+                 metavar=None):
-+        super(_StoreConstAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            nargs=0,
-+            const=const,
-+            default=default,
-+            required=required,
-+            help=help)
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        setattr(namespace, self.dest, self.const)
-+
-+
-+class _StoreTrueAction(_StoreConstAction):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest,
-+                 default=False,
-+                 required=False,
-+                 help=None):
-+        super(_StoreTrueAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            const=True,
-+            default=default,
-+            required=required,
-+            help=help)
-+
-+
-+class _StoreFalseAction(_StoreConstAction):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest,
-+                 default=True,
-+                 required=False,
-+                 help=None):
-+        super(_StoreFalseAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            const=False,
-+            default=default,
-+            required=required,
-+            help=help)
-+
-+
-+class _AppendAction(Action):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest,
-+                 nargs=None,
-+                 const=None,
-+                 default=None,
-+                 type=None,
-+                 choices=None,
-+                 required=False,
-+                 help=None,
-+                 metavar=None):
-+        if nargs == 0:
-+            raise ValueError('nargs must be > 0')
-+        if const is not None and nargs != OPTIONAL:
-+            raise ValueError('nargs must be %r to supply const' % OPTIONAL)
-+        super(_AppendAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            nargs=nargs,
-+            const=const,
-+            default=default,
-+            type=type,
-+            choices=choices,
-+            required=required,
-+            help=help,
-+            metavar=metavar)
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        items = _copy.copy(_ensure_value(namespace, self.dest, []))
-+        items.append(values)
-+        setattr(namespace, self.dest, items)
-+
-+
-+class _AppendConstAction(Action):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest,
-+                 const,
-+                 default=None,
-+                 required=False,
-+                 help=None,
-+                 metavar=None):
-+        super(_AppendConstAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            nargs=0,
-+            const=const,
-+            default=default,
-+            required=required,
-+            help=help,
-+            metavar=metavar)
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        items = _copy.copy(_ensure_value(namespace, self.dest, []))
-+        items.append(self.const)
-+        setattr(namespace, self.dest, items)
-+
-+
-+class _CountAction(Action):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest,
-+                 default=None,
-+                 required=False,
-+                 help=None):
-+        super(_CountAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            nargs=0,
-+            default=default,
-+            required=required,
-+            help=help)
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        new_count = _ensure_value(namespace, self.dest, 0) + 1
-+        setattr(namespace, self.dest, new_count)
-+
-+
-+class _HelpAction(Action):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest=SUPPRESS,
-+                 default=SUPPRESS,
-+                 help=None):
-+        super(_HelpAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            default=default,
-+            nargs=0,
-+            help=help)
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        parser.print_help()
-+        parser.exit()
-+
-+
-+class _VersionAction(Action):
-+
-+    def __init__(self,
-+                 option_strings,
-+                 dest=SUPPRESS,
-+                 default=SUPPRESS,
-+                 help=None):
-+        super(_VersionAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            default=default,
-+            nargs=0,
-+            help=help)
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        parser.print_version()
-+        parser.exit()
-+
-+
-+class _SubParsersAction(Action):
-+
-+    class _ChoicesPseudoAction(Action):
-+
-+        def __init__(self, name, help):
-+            sup = super(_SubParsersAction._ChoicesPseudoAction, self)
-+            sup.__init__(option_strings=[], dest=name, help=help)
-+
-+    def __init__(self,
-+                 option_strings,
-+                 prog,
-+                 parser_class,
-+                 dest=SUPPRESS,
-+                 help=None,
-+                 metavar=None):
-+
-+        self._prog_prefix = prog
-+        self._parser_class = parser_class
-+        self._name_parser_map = {}
-+        self._choices_actions = []
-+
-+        super(_SubParsersAction, self).__init__(
-+            option_strings=option_strings,
-+            dest=dest,
-+            nargs=PARSER,
-+            choices=self._name_parser_map,
-+            help=help,
-+            metavar=metavar)
-+
-+    def add_parser(self, name, **kwargs):
-+        # set prog from the existing prefix
-+        if kwargs.get('prog') is None:
-+            kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
-+
-+        # create a pseudo-action to hold the choice help
-+        if 'help' in kwargs:
-+            help = kwargs.pop('help')
-+            choice_action = self._ChoicesPseudoAction(name, help)
-+            self._choices_actions.append(choice_action)
-+
-+        # create the parser and add it to the map
-+        parser = self._parser_class(**kwargs)
-+        self._name_parser_map[name] = parser
-+        return parser
-+
-+    def _get_subactions(self):
-+        return self._choices_actions
-+
-+    def __call__(self, parser, namespace, values, option_string=None):
-+        parser_name = values[0]
-+        arg_strings = values[1:]
-+
-+        # set the parser name if requested
-+        if self.dest is not SUPPRESS:
-+            setattr(namespace, self.dest, parser_name)
-+
-+        # select the parser
-+        try:
-+            parser = self._name_parser_map[parser_name]
-+        except KeyError:
-+            tup = parser_name, ', '.join(self._name_parser_map)
-+            msg = _('unknown parser %r (choices: %s)' % tup)
-+            raise ArgumentError(self, msg)
-+
-+        # parse all the remaining options into the namespace
-+        parser.parse_args(arg_strings, namespace)
-+
-+
-+# ==============
-+# Type classes
-+# ==============
-+
-+class FileType(object):
-+    """Factory for creating file object types
-+
-+    Instances of FileType are typically passed as type= arguments to the
-+    ArgumentParser add_argument() method.
-+
-+    Keyword Arguments:
-+        - mode -- A string indicating how the file is to be opened. Accepts the
-+            same values as the builtin open() function.
-+        - bufsize -- The file's desired buffer size. Accepts the same values as
-+            the builtin open() function.
-+    """
-+
-+    def __init__(self, mode='r', bufsize=None):
-+        self._mode = mode
-+        self._bufsize = bufsize
-+
-+    def __call__(self, string):
-+        # the special argument "-" means sys.std{in,out}
-+        if string == '-':
-+            if 'r' in self._mode:
-+                return _sys.stdin
-+            elif 'w' in self._mode:
-+                return _sys.stdout
-+            else:
-+                msg = _('argument "-" with mode %r' % self._mode)
-+                raise ValueError(msg)
-+
-+        # all other arguments are used as file names
-+        if self._bufsize:
-+            return open(string, self._mode, self._bufsize)
-+        else:
-+            return open(string, self._mode)
-+
-+    def __repr__(self):
-+        args = [self._mode, self._bufsize]
-+        args_str = ', '.join([repr(arg) for arg in args if arg is not None])
-+        return '%s(%s)' % (type(self).__name__, args_str)
-+
-+# ===========================
-+# Optional and Positional Parsing
-+# ===========================
-+
-+class Namespace(_AttributeHolder):
-+    """Simple object for storing attributes.
-+
-+    Implements equality by attribute names and values, and provides a simple
-+    string representation.
-+    """
-+
-+    def __init__(self, **kwargs):
-+        for name in kwargs:
-+            setattr(self, name, kwargs[name])
-+
-+    def __eq__(self, other):
-+        return vars(self) == vars(other)
-+
-+    def __ne__(self, other):
-+        return not (self == other)
-+
-+
-+class _ActionsContainer(object):
-+
-+    def __init__(self,
-+                 description,
-+                 prefix_chars,
-+                 argument_default,
-+                 conflict_handler):
-+        super(_ActionsContainer, self).__init__()
-+
-+        self.description = description
-+        self.argument_default = argument_default
-+        self.prefix_chars = prefix_chars
-+        self.conflict_handler = conflict_handler
-+
-+        # set up registries
-+        self._registries = {}
-+
-+        # register actions
-+        self.register('action', None, _StoreAction)
-+        self.register('action', 'store', _StoreAction)
-+        self.register('action', 'store_const', _StoreConstAction)
-+        self.register('action', 'store_true', _StoreTrueAction)
-+        self.register('action', 'store_false', _StoreFalseAction)
-+        self.register('action', 'append', _AppendAction)
-+        self.register('action', 'append_const', _AppendConstAction)
-+        self.register('action', 'count', _CountAction)
-+        self.register('action', 'help', _HelpAction)
-+        self.register('action', 'version', _VersionAction)
-+        self.register('action', 'parsers', _SubParsersAction)
-+
-+        # raise an exception if the conflict handler is invalid
-+        self._get_handler()
-+
-+        # action storage
-+        self._actions = []
-+        self._option_string_actions = {}
-+
-+        # groups
-+        self._action_groups = []
-+        self._mutually_exclusive_groups = []
-+
-+        # defaults storage
-+        self._defaults = {}
-+
-+        # determines whether an "option" looks like a negative number
-+        self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')
-+
-+        # whether or not there are any optionals that look like negative
-+        # numbers -- uses a list so it can be shared and edited
-+        self._has_negative_number_optionals = []
-+
-+    # ====================
-+    # Registration methods
-+    # ====================
-+    def register(self, registry_name, value, object):
-+        registry = self._registries.setdefault(registry_name, {})
-+        registry[value] = object
-+
-+    def _registry_get(self, registry_name, value, default=None):
-+        return self._registries[registry_name].get(value, default)
-+
-+    # ==================================
-+    # Namespace default settings methods
-+    # ==================================
-+    def set_defaults(self, **kwargs):
-+        self._defaults.update(kwargs)
-+
-+        # if these defaults match any existing arguments, replace
-+        # the previous default on the object with the new one
-+        for action in self._actions:
-+            if action.dest in kwargs:
-+                action.default = kwargs[action.dest]
-+
-+    # =======================
-+    # Adding argument actions
-+    # =======================
-+    def add_argument(self, *args, **kwargs):
-+        """
-+        add_argument(dest, ..., name=value, ...)
-+        add_argument(option_string, option_string, ..., name=value, ...)
-+        """
-+
-+        # if no positional args are supplied or only one is supplied and
-+        # it doesn't look like an option string, parse a positional
-+        # argument
-+        chars = self.prefix_chars
-+        if not args or len(args) == 1 and args[0][0] not in chars:
-+            kwargs = self._get_positional_kwargs(*args, **kwargs)
-+
-+        # otherwise, we're adding an optional argument
-+        else:
-+            kwargs = self._get_optional_kwargs(*args, **kwargs)
-+
-+        # if no default was supplied, use the parser-level default
-+        if 'default' not in kwargs:
-+            dest = kwargs['dest']
-+            if dest in self._defaults:
-+                kwargs['default'] = self._defaults[dest]
-+            elif self.argument_default is not None:
-+                kwargs['default'] = self.argument_default
-+
-+        # create the action object, and add it to the parser
-+        action_class = self._pop_action_class(kwargs)
-+        action = action_class(**kwargs)
-+        return self._add_action(action)
-+
-+    def add_argument_group(self, *args, **kwargs):
-+        group = _ArgumentGroup(self, *args, **kwargs)
-+        self._action_groups.append(group)
-+        return group
-+
-+    def add_mutually_exclusive_group(self, **kwargs):
-+        group = _MutuallyExclusiveGroup(self, **kwargs)
-+        self._mutually_exclusive_groups.append(group)
-+        return group
-+
-+    def _add_action(self, action):
-+        # resolve any conflicts
-+        self._check_conflict(action)
-+
-+        # add to actions list
-+        self._actions.append(action)
-+        action.container = self
-+
-+        # index the action by any option strings it has
-+        for option_string in action.option_strings:
-+            self._option_string_actions[option_string] = action
-+
-+        # set the flag if any option strings look like negative numbers
-+        for option_string in action.option_strings:
-+            if self._negative_number_matcher.match(option_string):
-+                if not self._has_negative_number_optionals:
-+                    self._has_negative_number_optionals.append(True)
-+
-+        # return the created action
-+        return action
-+
-+    def _remove_action(self, action):
-+        self._actions.remove(action)
-+
-+    def _add_container_actions(self, container):
-+        # collect groups by titles
-+        title_group_map = {}
-+        for group in self._action_groups:
-+            if group.title in title_group_map:
-+                msg = _('cannot merge actions - two groups are named %r')
-+                raise ValueError(msg % (group.title))
-+            title_group_map[group.title] = group
-+
-+        # map each action to its group
-+        group_map = {}
-+        for group in container._action_groups:
-+
-+            # if a group with the title exists, use that, otherwise
-+            # create a new group matching the container's group
-+            if group.title not in title_group_map:
-+                title_group_map[group.title] = self.add_argument_group(
-+                    title=group.title,
-+                    description=group.description,
-+                    conflict_handler=group.conflict_handler)
-+
-+            # map the actions to their new group
-+            for action in group._group_actions:
-+                group_map[action] = title_group_map[group.title]
-+
-+        # add all actions to this container or their group
-+        for action in container._actions:
-+            group_map.get(action, self)._add_action(action)
-+
-+    def _get_positional_kwargs(self, dest, **kwargs):
-+        # make sure required is not specified
-+        if 'required' in kwargs:
-+            msg = _("'required' is an invalid argument for positionals")
-+            raise TypeError(msg)
-+
-+        # mark positional arguments as required if at least one is
-+        # always required
-+        if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
-+            kwargs['required'] = True
-+        if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
-+            kwargs['required'] = True
-+
-+        # return the keyword arguments with no option strings
-+        return dict(kwargs, dest=dest, option_strings=[])
-+
-+    def _get_optional_kwargs(self, *args, **kwargs):
-+        # determine short and long option strings
-+        option_strings = []
-+        long_option_strings = []
-+        for option_string in args:
-+            # error on one-or-fewer-character option strings
-+            if len(option_string) < 2:
-+                msg = _('invalid option string %r: '
-+                        'must be at least two characters long')
-+                raise ValueError(msg % option_string)
-+
-+            # error on strings that don't start with an appropriate prefix
-+            if not option_string[0] in self.prefix_chars:
-+                msg = _('invalid option string %r: '
-+                        'must start with a character %r')
-+                tup = option_string, self.prefix_chars
-+                raise ValueError(msg % tup)
-+
-+            # error on strings that are all prefix characters
-+            if not (_set(option_string) - _set(self.prefix_chars)):
-+                msg = _('invalid option string %r: '
-+                        'must contain characters other than %r')
-+                tup = option_string, self.prefix_chars
-+                raise ValueError(msg % tup)
-+
-+            # strings starting with two prefix characters are long options
-+            option_strings.append(option_string)
-+            if option_string[0] in self.prefix_chars:
-+                if option_string[1] in self.prefix_chars:
-+                    long_option_strings.append(option_string)
-+
-+        # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
-+        dest = kwargs.pop('dest', None)
-+        if dest is None:
-+            if long_option_strings:
-+                dest_option_string = long_option_strings[0]
-+            else:
-+                dest_option_string = option_strings[0]
-+            dest = dest_option_string.lstrip(self.prefix_chars)
-+            dest = dest.replace('-', '_')
-+
-+        # return the updated keyword arguments
-+        return dict(kwargs, dest=dest, option_strings=option_strings)
-+
-+    def _pop_action_class(self, kwargs, default=None):
-+        action = kwargs.pop('action', default)
-+        return self._registry_get('action', action, action)
-+
-+    def _get_handler(self):
-+        # determine function from conflict handler string
-+        handler_func_name = '_handle_conflict_%s' % self.conflict_handler
-+        try:
-+            return getattr(self, handler_func_name)
-+        except AttributeError:
-+            msg = _('invalid conflict_resolution value: %r')
-+            raise ValueError(msg % self.conflict_handler)
-+
-+    def _check_conflict(self, action):
-+
-+        # find all options that conflict with this option
-+        confl_optionals = []
-+        for option_string in action.option_strings:
-+            if option_string in self._option_string_actions:
-+                confl_optional = self._option_string_actions[option_string]
-+                confl_optionals.append((option_string, confl_optional))
-+
-+        # resolve any conflicts
-+        if confl_optionals:
-+            conflict_handler = self._get_handler()
-+            conflict_handler(action, confl_optionals)
-+
-+    def _handle_conflict_error(self, action, conflicting_actions):
-+        message = _('conflicting option string(s): %s')
-+        conflict_string = ', '.join([option_string
-+                                     for option_string, action
-+                                     in conflicting_actions])
-+        raise ArgumentError(action, message % conflict_string)
-+
-+    def _handle_conflict_resolve(self, action, conflicting_actions):
-+
-+        # remove all conflicting options
-+        for option_string, action in conflicting_actions:
-+
-+            # remove the conflicting option
-+            action.option_strings.remove(option_string)
-+            self._option_string_actions.pop(option_string, None)
-+
-+            # if the option now has no option string, remove it from the
-+            # container holding it
-+            if not action.option_strings:
-+                action.container._remove_action(action)
-+
-+
-+class _ArgumentGroup(_ActionsContainer):
-+
-+    def __init__(self, container, title=None, description=None, **kwargs):
-+        # add any missing keyword arguments by checking the container
-+        update = kwargs.setdefault
-+        update('conflict_handler', container.conflict_handler)
-+        update('prefix_chars', container.prefix_chars)
-+        update('argument_default', container.argument_default)
-+        super_init = super(_ArgumentGroup, self).__init__
-+        super_init(description=description, **kwargs)
-+
-+        # group attributes
-+        self.title = title
-+        self._group_actions = []
-+
-+        # share most attributes with the container
-+        self._registries = container._registries
-+        self._actions = container._actions
-+        self._option_string_actions = container._option_string_actions
-+        self._defaults = container._defaults
-+        self._has_negative_number_optionals = \
-+            container._has_negative_number_optionals
-+
-+    def _add_action(self, action):
-+        action = super(_ArgumentGroup, self)._add_action(action)
-+        self._group_actions.append(action)
-+        return action
-+
-+    def _remove_action(self, action):
-+        super(_ArgumentGroup, self)._remove_action(action)
-+        self._group_actions.remove(action)
-+
-+
-+class _MutuallyExclusiveGroup(_ArgumentGroup):
-+
-+    def __init__(self, container, required=False):
-+        super(_MutuallyExclusiveGroup, self).__init__(container)
-+        self.required = required
-+        self._container = container
-+
-+    def _add_action(self, action):
-+        if action.required:
-+            msg = _('mutually exclusive arguments must be optional')
-+            raise ValueError(msg)
-+        action = self._container._add_action(action)
-+        self._group_actions.append(action)
-+        return action
-+
-+    def _remove_action(self, action):
-+        self._container._remove_action(action)
-+        self._group_actions.remove(action)
-+
-+
-+class ArgumentParser(_AttributeHolder, _ActionsContainer):
-+    """Object for parsing command line strings into Python objects.
-+
-+    Keyword Arguments:
-+        - prog -- The name of the program (default: sys.argv[0])
-+        - usage -- A usage message (default: auto-generated from arguments)
-+        - description -- A description of what the program does
-+        - epilog -- Text following the argument descriptions
-+        - version -- Add a -v/--version option with the given version string
-+        - parents -- Parsers whose arguments should be copied into this one
-+        - formatter_class -- HelpFormatter class for printing help messages
-+        - prefix_chars -- Characters that prefix optional arguments
-+        - fromfile_prefix_chars -- Characters that prefix files containing
-+            additional arguments
-+        - argument_default -- The default value for all arguments
-+        - conflict_handler -- String indicating how to handle conflicts
-+        - add_help -- Add a -h/-help option
-+    """
-+
-+    def __init__(self,
-+                 prog=None,
-+                 usage=None,
-+                 description=None,
-+                 epilog=None,
-+                 version=None,
-+                 parents=[],
-+                 formatter_class=HelpFormatter,
-+                 prefix_chars='-',
-+                 fromfile_prefix_chars=None,
-+                 argument_default=None,
-+                 conflict_handler='error',
-+                 add_help=True):
-+
-+        superinit = super(ArgumentParser, self).__init__
-+        superinit(description=description,
-+                  prefix_chars=prefix_chars,
-+                  argument_default=argument_default,
-+                  conflict_handler=conflict_handler)
-+
-+        # default setting for prog
-+        if prog is None:
-+            prog = _os.path.basename(_sys.argv[0])
-+
-+        self.prog = prog
-+        self.usage = usage
-+        self.epilog = epilog
-+        self.version = version
-+        self.formatter_class = formatter_class
-+        self.fromfile_prefix_chars = fromfile_prefix_chars
-+        self.add_help = add_help
-+
-+        add_group = self.add_argument_group
-+        self._positionals = add_group(_('positional arguments'))
-+        self._optionals = add_group(_('optional arguments'))
-+        self._subparsers = None
-+
-+        # register types
-+        def identity(string):
-+            return string
-+        self.register('type', None, identity)
-+
-+        # add help and version arguments if necessary
-+        # (using explicit default to override global argument_default)
-+        if self.add_help:
-+            self.add_argument(
-+                '-h', '--help', action='help', default=SUPPRESS,
-+                help=_('show this help message and exit'))
-+        if self.version:
-+            self.add_argument(
-+                '-v', '--version', action='version', default=SUPPRESS,
-+                help=_("show program's version number and exit"))
-+
-+        # add parent arguments and defaults
-+        for parent in parents:
-+            self._add_container_actions(parent)
-+            try:
-+                defaults = parent._defaults
-+            except AttributeError:
-+                pass
-+            else:
-+                self._defaults.update(defaults)
-+
-+    # =======================
-+    # Pretty __repr__ methods
-+    # =======================
-+    def _get_kwargs(self):
-+        names = [
-+            'prog',
-+            'usage',
-+            'description',
-+            'version',
-+            'formatter_class',
-+            'conflict_handler',
-+            'add_help',
-+        ]
-+        return [(name, getattr(self, name)) for name in names]
-+
-+    # ==================================
-+    # Optional/Positional adding methods
-+    # ==================================
-+    def add_subparsers(self, **kwargs):
-+        if self._subparsers is not None:
-+            self.error(_('cannot have multiple subparser arguments'))
-+
-+        # add the parser class to the arguments if it's not present
-+        kwargs.setdefault('parser_class', type(self))
-+
-+        if 'title' in kwargs or 'description' in kwargs:
-+            title = _(kwargs.pop('title', 'subcommands'))
-+            description = _(kwargs.pop('description', None))
-+            self._subparsers = self.add_argument_group(title, description)
-+        else:
-+            self._subparsers = self._positionals
-+
-+        # prog defaults to the usage message of this parser, skipping
-+        # optional arguments and with no "usage:" prefix
-+        if kwargs.get('prog') is None:
-+            formatter = self._get_formatter()
-+            positionals = self._get_positional_actions()
-+            groups = self._mutually_exclusive_groups
-+            formatter.add_usage(self.usage, positionals, groups, '')
-+            kwargs['prog'] = formatter.format_help().strip()
-+
-+        # create the parsers action and add it to the positionals list
-+        parsers_class = self._pop_action_class(kwargs, 'parsers')
-+        action = parsers_class(option_strings=[], **kwargs)
-+        self._subparsers._add_action(action)
-+
-+        # return the created parsers action
-+        return action
-+
-+    def _add_action(self, action):
-+        if action.option_strings:
-+            self._optionals._add_action(action)
-+        else:
-+            self._positionals._add_action(action)
-+        return action
-+
-+    def _get_optional_actions(self):
-+        return [action
-+                for action in self._actions
-+                if action.option_strings]
-+
-+    def _get_positional_actions(self):
-+        return [action
-+                for action in self._actions
-+                if not action.option_strings]
-+
-+    # =====================================
-+    # Command line argument parsing methods
-+    # =====================================
-+    def parse_args(self, args=None, namespace=None):
-+        args, argv = self.parse_known_args(args, namespace)
-+        if argv:
-+            msg = _('unrecognized arguments: %s')
-+            self.error(msg % ' '.join(argv))
-+        return args
-+
-+    def parse_known_args(self, args=None, namespace=None):
-+        # args default to the system args
-+        if args is None:
-+            args = _sys.argv[1:]
-+
-+        # default Namespace built from parser defaults
-+        if namespace is None:
-+            namespace = Namespace()
-+
-+        # add any action defaults that aren't present
-+        for action in self._actions:
-+            if action.dest is not SUPPRESS:
-+                if not hasattr(namespace, action.dest):
-+                    if action.default is not SUPPRESS:
-+                        default = action.default
-+                        if isinstance(action.default, _basestring):
-+                            default = self._get_value(action, default)
-+                        setattr(namespace, action.dest, default)
-+
-+        # add any parser defaults that aren't present
-+        for dest in self._defaults:
-+            if not hasattr(namespace, dest):
-+                setattr(namespace, dest, self._defaults[dest])
-+
-+        # parse the arguments and exit if there are any errors
-+        try:
-+            return self._parse_known_args(args, namespace)
-+        except ArgumentError:
-+            err = _sys.exc_info()[1]
-+            self.error(str(err))
-+
-+    def _parse_known_args(self, arg_strings, namespace):
-+        # replace arg strings that are file references
-+        if self.fromfile_prefix_chars is not None:
-+            arg_strings = self._read_args_from_files(arg_strings)
-+
-+        # map all mutually exclusive arguments to the other arguments
-+        # they can't occur with
-+        action_conflicts = {}
-+        for mutex_group in self._mutually_exclusive_groups:
-+            group_actions = mutex_group._group_actions
-+            for i, mutex_action in enumerate(mutex_group._group_actions):
-+                conflicts = action_conflicts.setdefault(mutex_action, [])
-+                conflicts.extend(group_actions[:i])
-+                conflicts.extend(group_actions[i + 1:])
-+
-+        # find all option indices, and determine the arg_string_pattern
-+        # which has an 'O' if there is an option at an index,
-+        # an 'A' if there is an argument, or a '-' if there is a '--'
-+        option_string_indices = {}
-+        arg_string_pattern_parts = []
-+        arg_strings_iter = iter(arg_strings)
-+        for i, arg_string in enumerate(arg_strings_iter):
-+
-+            # all args after -- are non-options
-+            if arg_string == '--':
-+                arg_string_pattern_parts.append('-')
-+                for arg_string in arg_strings_iter:
-+                    arg_string_pattern_parts.append('A')
-+
-+            # otherwise, add the arg to the arg strings
-+            # and note the index if it was an option
-+            else:
-+                option_tuple = self._parse_optional(arg_string)
-+                if option_tuple is None:
-+                    pattern = 'A'
-+                else:
-+                    option_string_indices[i] = option_tuple
-+                    pattern = 'O'
-+                arg_string_pattern_parts.append(pattern)
-+
-+        # join the pieces together to form the pattern
-+        arg_strings_pattern = ''.join(arg_string_pattern_parts)
-+
-+        # converts arg strings to the appropriate and then takes the action
-+        seen_actions = _set()
-+        seen_non_default_actions = _set()
-+
-+        def take_action(action, argument_strings, option_string=None):
-+            seen_actions.add(action)
-+            argument_values = self._get_values(action, argument_strings)
-+
-+            # error if this argument is not allowed with other previously
-+            # seen arguments, assuming that actions that use the default
-+            # value don't really count as "present"
-+            if argument_values is not action.default:
-+                seen_non_default_actions.add(action)
-+                for conflict_action in action_conflicts.get(action, []):
-+                    if conflict_action in seen_non_default_actions:
-+                        msg = _('not allowed with argument %s')
-+                        action_name = _get_action_name(conflict_action)
-+                        raise ArgumentError(action, msg % action_name)
-+
-+            # take the action if we didn't receive a SUPPRESS value
-+            # (e.g. from a default)
-+            if argument_values is not SUPPRESS:
-+                action(self, namespace, argument_values, option_string)
-+
-+        # function to convert arg_strings into an optional action
-+        def consume_optional(start_index):
-+
-+            # get the optional identified at this index
-+            option_tuple = option_string_indices[start_index]
-+            action, option_string, explicit_arg = option_tuple
-+
-+            # identify additional optionals in the same arg string
-+            # (e.g. -xyz is the same as -x -y -z if no args are required)
-+            match_argument = self._match_argument
-+            action_tuples = []
-+            while True:
-+
-+                # if we found no optional action, skip it
-+                if action is None:
-+                    extras.append(arg_strings[start_index])
-+                    return start_index + 1
-+
-+                # if there is an explicit argument, try to match the
-+                # optional's string arguments to only this
-+                if explicit_arg is not None:
-+                    arg_count = match_argument(action, 'A')
-+
-+                    # if the action is a single-dash option and takes no
-+                    # arguments, try to parse more single-dash options out
-+                    # of the tail of the option string
-+                    chars = self.prefix_chars
-+                    if arg_count == 0 and option_string[1] not in chars:
-+                        action_tuples.append((action, [], option_string))
-+                        for char in self.prefix_chars:
-+                            option_string = char + explicit_arg[0]
-+                            explicit_arg = explicit_arg[1:] or None
-+                            optionals_map = self._option_string_actions
-+                            if option_string in optionals_map:
-+                                action = optionals_map[option_string]
-+                                break
-+                        else:
-+                            msg = _('ignored explicit argument %r')
-+                            raise ArgumentError(action, msg % explicit_arg)
-+
-+                    # if the action expect exactly one argument, we've
-+                    # successfully matched the option; exit the loop
-+                    elif arg_count == 1:
-+                        stop = start_index + 1
-+                        args = [explicit_arg]
-+                        action_tuples.append((action, args, option_string))
-+                        break
-+
-+                    # error if a double-dash option did not use the
-+                    # explicit argument
-+                    else:
-+                        msg = _('ignored explicit argument %r')
-+                        raise ArgumentError(action, msg % explicit_arg)
-+
-+                # if there is no explicit argument, try to match the
-+                # optional's string arguments with the following strings
-+                # if successful, exit the loop
-+                else:
-+                    start = start_index + 1
-+                    selected_patterns = arg_strings_pattern[start:]
-+                    arg_count = match_argument(action, selected_patterns)
-+                    stop = start + arg_count
-+                    args = arg_strings[start:stop]
-+                    action_tuples.append((action, args, option_string))
-+                    break
-+
-+            # add the Optional to the list and return the index at which
-+            # the Optional's string args stopped
-+            assert action_tuples
-+            for action, args, option_string in action_tuples:
-+                take_action(action, args, option_string)
-+            return stop
-+
-+        # the list of Positionals left to be parsed; this is modified
-+        # by consume_positionals()
-+        positionals = self._get_positional_actions()
-+
-+        # function to convert arg_strings into positional actions
-+        def consume_positionals(start_index):
-+            # match as many Positionals as possible
-+            match_partial = self._match_arguments_partial
-+            selected_pattern = arg_strings_pattern[start_index:]
-+            arg_counts = match_partial(positionals, selected_pattern)
-+
-+            # slice off the appropriate arg strings for each Positional
-+            # and add the Positional and its args to the list
-+            for action, arg_count in zip(positionals, arg_counts):
-+                args = arg_strings[start_index: start_index + arg_count]
-+                start_index += arg_count
-+                take_action(action, args)
-+
-+            # slice off the Positionals that we just parsed and return the
-+            # index at which the Positionals' string args stopped
-+            positionals[:] = positionals[len(arg_counts):]
-+            return start_index
-+
-+        # consume Positionals and Optionals alternately, until we have
-+        # passed the last option string
-+        extras = []
-+        start_index = 0
-+        if option_string_indices:
-+            max_option_string_index = max(option_string_indices)
-+        else:
-+            max_option_string_index = -1
-+        while start_index <= max_option_string_index:
-+
-+            # consume any Positionals preceding the next option
-+            next_option_string_index = min([
-+                index
-+                for index in option_string_indices
-+                if index >= start_index])
-+            if start_index != next_option_string_index:
-+                positionals_end_index = consume_positionals(start_index)
-+
-+                # only try to parse the next optional if we didn't consume
-+                # the option string during the positionals parsing
-+                if positionals_end_index > start_index:
-+                    start_index = positionals_end_index
-+                    continue
-+                else:
-+                    start_index = positionals_end_index
-+
-+            # if we consumed all the positionals we could and we're not
-+            # at the index of an option string, there were extra arguments
-+            if start_index not in option_string_indices:
-+                strings = arg_strings[start_index:next_option_string_index]
-+                extras.extend(strings)
-+                start_index = next_option_string_index
-+
-+            # consume the next optional and any arguments for it
-+            start_index = consume_optional(start_index)
-+
-+        # consume any positionals following the last Optional
-+        stop_index = consume_positionals(start_index)
-+
-+        # if we didn't consume all the argument strings, there were extras
-+        extras.extend(arg_strings[stop_index:])
-+
-+        # if we didn't use all the Positional objects, there were too few
-+        # arg strings supplied.
-+        if positionals:
-+            self.error(_('too few arguments'))
-+
-+        # make sure all required actions were present
-+        for action in self._actions:
-+            if action.required:
-+                if action not in seen_actions:
-+                    name = _get_action_name(action)
-+                    self.error(_('argument %s is required') % name)
-+
-+        # make sure all required groups had one option present
-+        for group in self._mutually_exclusive_groups:
-+            if group.required:
-+                for action in group._group_actions:
-+                    if action in seen_non_default_actions:
-+                        break
-+
-+                # if no actions were used, report the error
-+                else:
-+                    names = [_get_action_name(action)
-+                             for action in group._group_actions
-+                             if action.help is not SUPPRESS]
-+                    msg = _('one of the arguments %s is required')
-+                    self.error(msg % ' '.join(names))
-+
-+        # return the updated namespace and the extra arguments
-+        return namespace, extras
-+
-+    def _read_args_from_files(self, arg_strings):
-+        # expand arguments referencing files
-+        new_arg_strings = []
-+        for arg_string in arg_strings:
-+
-+            # for regular arguments, just add them back into the list
-+            if arg_string[0] not in self.fromfile_prefix_chars:
-+                new_arg_strings.append(arg_string)
-+
-+            # replace arguments referencing files with the file content
-+            else:
-+                try:
-+                    args_file = open(arg_string[1:])
-+                    try:
-+                        arg_strings = args_file.read().splitlines()
-+                        arg_strings = self._read_args_from_files(arg_strings)
-+                        new_arg_strings.extend(arg_strings)
-+                    finally:
-+                        args_file.close()
-+                except IOError:
-+                    err = _sys.exc_info()[1]
-+                    self.error(str(err))
-+
-+        # return the modified argument list
-+        return new_arg_strings
-+
-+    def _match_argument(self, action, arg_strings_pattern):
-+        # match the pattern for this action to the arg strings
-+        nargs_pattern = self._get_nargs_pattern(action)
-+        match = _re.match(nargs_pattern, arg_strings_pattern)
-+
-+        # raise an exception if we weren't able to find a match
-+        if match is None:
-+            nargs_errors = {
-+                None: _('expected one argument'),
-+                OPTIONAL: _('expected at most one argument'),
-+                ONE_OR_MORE: _('expected at least one argument'),
-+            }
-+            default = _('expected %s argument(s)') % action.nargs
-+            msg = nargs_errors.get(action.nargs, default)
-+            raise ArgumentError(action, msg)
-+
-+        # return the number of arguments matched
-+        return len(match.group(1))
-+
-+    def _match_arguments_partial(self, actions, arg_strings_pattern):
-+        # progressively shorten the actions list by slicing off the
-+        # final actions until we find a match
-+        result = []
-+        for i in range(len(actions), 0, -1):
-+            actions_slice = actions[:i]
-+            pattern = ''.join([self._get_nargs_pattern(action)
-+                               for action in actions_slice])
-+            match = _re.match(pattern, arg_strings_pattern)
-+            if match is not None:
-+                result.extend([len(string) for string in match.groups()])
-+                break
-+
-+        # return the list of arg string counts
-+        return result
-+
-+    def _parse_optional(self, arg_string):
-+        # if it's an empty string, it was meant to be a positional
-+        if not arg_string:
-+            return None
-+
-+        # if it doesn't start with a prefix, it was meant to be positional
-+        if not arg_string[0] in self.prefix_chars:
-+            return None
-+
-+        # if it's just dashes, it was meant to be positional
-+        if not arg_string.strip('-'):
-+            return None
-+
-+        # if the option string is present in the parser, return the action
-+        if arg_string in self._option_string_actions:
-+            action = self._option_string_actions[arg_string]
-+            return action, arg_string, None
-+
-+        # search through all possible prefixes of the option string
-+        # and all actions in the parser for possible interpretations
-+        option_tuples = self._get_option_tuples(arg_string)
-+
-+        # if multiple actions match, the option string was ambiguous
-+        if len(option_tuples) > 1:
-+            options = ', '.join([option_string
-+                for action, option_string, explicit_arg in option_tuples])
-+            tup = arg_string, options
-+            self.error(_('ambiguous option: %s could match %s') % tup)
-+
-+        # if exactly one action matched, this segmentation is good,
-+        # so return the parsed action
-+        elif len(option_tuples) == 1:
-+            option_tuple, = option_tuples
-+            return option_tuple
-+
-+        # if it was not found as an option, but it looks like a negative
-+        # number, it was meant to be positional
-+        # unless there are negative-number-like options
-+        if self._negative_number_matcher.match(arg_string):
-+            if not self._has_negative_number_optionals:
-+                return None
-+
-+        # if it contains a space, it was meant to be a positional
-+        if ' ' in arg_string:
-+            return None
-+
-+        # it was meant to be an optional but there is no such option
-+        # in this parser (though it might be a valid option in a subparser)
-+        return None, arg_string, None
-+
-+    def _get_option_tuples(self, option_string):
-+        result = []
-+
-+        # option strings starting with two prefix characters are only
-+        # split at the '='
-+        chars = self.prefix_chars
-+        if option_string[0] in chars and option_string[1] in chars:
-+            if '=' in option_string:
-+                option_prefix, explicit_arg = option_string.split('=', 1)
-+            else:
-+                option_prefix = option_string
-+                explicit_arg = None
-+            for option_string in self._option_string_actions:
-+                if option_string.startswith(option_prefix):
-+                    action = self._option_string_actions[option_string]
-+                    tup = action, option_string, explicit_arg
-+                    result.append(tup)
-+
-+        # single character options can be concatenated with their arguments
-+        # but multiple character options always have to have their argument
-+        # separate
-+        elif option_string[0] in chars and option_string[1] not in chars:
-+            option_prefix = option_string
-+            explicit_arg = None
-+            short_option_prefix = option_string[:2]
-+            short_explicit_arg = option_string[2:]
-+
-+            for option_string in self._option_string_actions:
-+                if option_string == short_option_prefix:
-+                    action = self._option_string_actions[option_string]
-+                    tup = action, option_string, short_explicit_arg
-+                    result.append(tup)
-+                elif option_string.startswith(option_prefix):
-+                    action = self._option_string_actions[option_string]
-+                    tup = action, option_string, explicit_arg
-+                    result.append(tup)
-+
-+        # shouldn't ever get here
-+        else:
-+            self.error(_('unexpected option string: %s') % option_string)
-+
-+        # return the collected option tuples
-+        return result
-+
-+    def _get_nargs_pattern(self, action):
-+        # in all examples below, we have to allow for '--' args
-+        # which are represented as '-' in the pattern
-+        nargs = action.nargs
-+
-+        # the default (None) is assumed to be a single argument
-+        if nargs is None:
-+            nargs_pattern = '(-*A-*)'
-+
-+        # allow zero or one arguments
-+        elif nargs == OPTIONAL:
-+            nargs_pattern = '(-*A?-*)'
-+
-+        # allow zero or more arguments
-+        elif nargs == ZERO_OR_MORE:
-+            nargs_pattern = '(-*[A-]*)'
-+
-+        # allow one or more arguments
-+        elif nargs == ONE_OR_MORE:
-+            nargs_pattern = '(-*A[A-]*)'
-+
-+        # allow one argument followed by any number of options or arguments
-+        elif nargs is PARSER:
-+            nargs_pattern = '(-*A[-AO]*)'
-+
-+        # all others should be integers
-+        else:
-+            nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
-+
-+        # if this is an optional action, -- is not allowed
-+        if action.option_strings:
-+            nargs_pattern = nargs_pattern.replace('-*', '')
-+            nargs_pattern = nargs_pattern.replace('-', '')
-+
-+        # return the pattern
-+        return nargs_pattern
-+
-+    # ========================
-+    # Value conversion methods
-+    # ========================
-+    def _get_values(self, action, arg_strings):
-+        # for everything but PARSER args, strip out '--'
-+        if action.nargs is not PARSER:
-+            arg_strings = [s for s in arg_strings if s != '--']
-+
-+        # optional argument produces a default when not present
-+        if not arg_strings and action.nargs == OPTIONAL:
-+            if action.option_strings:
-+                value = action.const
-+            else:
-+                value = action.default
-+            if isinstance(value, _basestring):
-+                value = self._get_value(action, value)
-+                self._check_value(action, value)
-+
-+        # when nargs='*' on a positional, if there were no command-line
-+        # args, use the default if it is anything other than None
-+        elif (not arg_strings and action.nargs == ZERO_OR_MORE and
-+              not action.option_strings):
-+            if action.default is not None:
-+                value = action.default
-+            else:
-+                value = arg_strings
-+            self._check_value(action, value)
-+
-+        # single argument or optional argument produces a single value
-+        elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
-+            arg_string, = arg_strings
-+            value = self._get_value(action, arg_string)
-+            self._check_value(action, value)
-+
-+        # PARSER arguments convert all values, but check only the first
-+        elif action.nargs is PARSER:
-+            value = [self._get_value(action, v) for v in arg_strings]
-+            self._check_value(action, value[0])
-+
-+        # all other types of nargs produce a list
-+        else:
-+            value = [self._get_value(action, v) for v in arg_strings]
-+            for v in value:
-+                self._check_value(action, v)
-+
-+        # return the converted value
-+        return value
-+
-+    def _get_value(self, action, arg_string):
-+        type_func = self._registry_get('type', action.type, action.type)
-+        if not hasattr(type_func, '__call__'):
-+            msg = _('%r is not callable')
-+            raise ArgumentError(action, msg % type_func)
-+
-+        # convert the value to the appropriate type
-+        try:
-+            result = type_func(arg_string)
-+
-+        # TypeErrors or ValueErrors indicate errors
-+        except (TypeError, ValueError):
-+            name = getattr(action.type, '__name__', repr(action.type))
-+            msg = _('invalid %s value: %r')
-+            raise ArgumentError(action, msg % (name, arg_string))
-+
-+        # return the converted value
-+        return result
-+
-+    def _check_value(self, action, value):
-+        # converted value must be one of the choices (if specified)
-+        if action.choices is not None and value not in action.choices:
-+            tup = value, ', '.join(map(repr, action.choices))
-+            msg = _('invalid choice: %r (choose from %s)') % tup
-+            raise ArgumentError(action, msg)
-+
-+    # =======================
-+    # Help-formatting methods
-+    # =======================
-+    def format_usage(self):
-+        formatter = self._get_formatter()
-+        formatter.add_usage(self.usage, self._actions,
-+                            self._mutually_exclusive_groups)
-+        return formatter.format_help()
-+
-+    def format_help(self):
-+        formatter = self._get_formatter()
-+
-+        # usage
-+        formatter.add_usage(self.usage, self._actions,
-+                            self._mutually_exclusive_groups)
-+
-+        # description
-+        formatter.add_text(self.description)
-+
-+        # positionals, optionals and user-defined groups
-+        for action_group in self._action_groups:
-+            formatter.start_section(action_group.title)
-+            formatter.add_text(action_group.description)
-+            formatter.add_arguments(action_group._group_actions)
-+            formatter.end_section()
-+
-+        # epilog
-+        formatter.add_text(self.epilog)
-+
-+        # determine help from format above
-+        return formatter.format_help()
-+
-+    def format_version(self):
-+        formatter = self._get_formatter()
-+        formatter.add_text(self.version)
-+        return formatter.format_help()
-+
-+    def _get_formatter(self):
-+        return self.formatter_class(prog=self.prog)
-+
-+    # =====================
-+    # Help-printing methods
-+    # =====================
-+    def print_usage(self, file=None):
-+        self._print_message(self.format_usage(), file)
-+
-+    def print_help(self, file=None):
-+        self._print_message(self.format_help(), file)
-+
-+    def print_version(self, file=None):
-+        self._print_message(self.format_version(), file)
-+
-+    def _print_message(self, message, file=None):
-+        if message:
-+            if file is None:
-+                file = _sys.stderr
-+            file.write(message)
-+
-+    # ===============
-+    # Exiting methods
-+    # ===============
-+    def exit(self, status=0, message=None):
-+        if message:
-+            _sys.stderr.write(message)
-+        _sys.exit(status)
-+
-+    def error(self, message):
-+        """error(message: string)
-+
-+        Prints a usage message incorporating the message to stderr and
-+        exits.
-+
-+        If you override this in a subclass, it should not return -- it
-+        should either exit or raise an exception.
-+        """
-+        self.print_usage(_sys.stderr)
-+        self.exit(2, _('%s: error: %s\n') % (self.prog, message))
 Index: ipython-0.10/IPython/external/argparse/__init__.py
 ===================================================================
 --- /dev/null
@@ -2235,4733 +14,6 @@ Index: ipython-0.10/IPython/external/argparse/__init__.py
 +    from argparse import *
 +except ImportError:
 +    from _argparse import *
-Index: ipython-0.10/IPython/external/argparse.py
-===================================================================
---- ipython-0.10.orig/IPython/external/argparse.py
-+++ /dev/null
-@@ -1,2216 +0,0 @@
--# -*- coding: utf-8 -*-
--
--# Copyright © 2006-2009 Steven J. Bethard <steven.bethard at gmail.com>.
--#
--# Licensed under the Apache License, Version 2.0 (the "License"); you may not
--# use this file except in compliance with the License. You may obtain a copy
--# of the License at
--#
--#     http://www.apache.org/licenses/LICENSE-2.0
--#
--# Unless required by applicable law or agreed to in writing, software
--# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
--# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
--# License for the specific language governing permissions and limitations
--# under the License.
--
--"""Command-line parsing library
--
--This module is an optparse-inspired command-line parsing library that:
--
--    - handles both optional and positional arguments
--    - produces highly informative usage messages
--    - supports parsers that dispatch to sub-parsers
--
--The following is a simple usage example that sums integers from the
--command-line and writes the result to a file::
--
--    parser = argparse.ArgumentParser(
--        description='sum the integers at the command line')
--    parser.add_argument(
--        'integers', metavar='int', nargs='+', type=int,
--        help='an integer to be summed')
--    parser.add_argument(
--        '--log', default=sys.stdout, type=argparse.FileType('w'),
--        help='the file where the sum should be written')
--    args = parser.parse_args()
--    args.log.write('%s' % sum(args.integers))
--    args.log.close()
--
--The module contains the following public classes:
--
--    - ArgumentParser -- The main entry point for command-line parsing. As the
--        example above shows, the add_argument() method is used to populate
--        the parser with actions for optional and positional arguments. Then
--        the parse_args() method is invoked to convert the args at the
--        command-line into an object with attributes.
--
--    - ArgumentError -- The exception raised by ArgumentParser objects when
--        there are errors with the parser's actions. Errors raised while
--        parsing the command-line are caught by ArgumentParser and emitted
--        as command-line messages.
--
--    - FileType -- A factory for defining types of files to be created. As the
--        example above shows, instances of FileType are typically passed as
--        the type= argument of add_argument() calls.
--
--    - Action -- The base class for parser actions. Typically actions are
--        selected by passing strings like 'store_true' or 'append_const' to
--        the action= argument of add_argument(). However, for greater
--        customization of ArgumentParser actions, subclasses of Action may
--        be defined and passed as the action= argument.
--
--    - HelpFormatter, RawDescriptionHelpFormatter, RawTextHelpFormatter,
--        ArgumentDefaultsHelpFormatter -- Formatter classes which
--        may be passed as the formatter_class= argument to the
--        ArgumentParser constructor. HelpFormatter is the default,
--        RawDescriptionHelpFormatter and RawTextHelpFormatter tell the parser
--        not to change the formatting for help text, and
--        ArgumentDefaultsHelpFormatter adds information about argument defaults
--        to the help.
--
--All other classes in this module are considered implementation details.
--(Also note that HelpFormatter and RawDescriptionHelpFormatter are only
--considered public as object names -- the API of the formatter objects is
--still considered an implementation detail.)
--"""
--
--__version__ = '1.0'
--__all__ = [
--    'ArgumentParser',
--    'ArgumentError',
--    'Namespace',
--    'Action',
--    'FileType',
--    'HelpFormatter',
--    'RawDescriptionHelpFormatter',
--    'RawTextHelpFormatter'
--    'ArgumentDefaultsHelpFormatter',
--]
--
--
--import copy as _copy
--import os as _os
--import re as _re
--import sys as _sys
--import textwrap as _textwrap
--
--from gettext import gettext as _
--
--try:
--    _set = set
--except NameError:
--    from sets import Set as _set
--
--try:
--    _basestring = basestring
--except NameError:
--    _basestring = str
--
--try:
--    _sorted = sorted
--except NameError:
--
--    def _sorted(iterable, reverse=False):
--        result = list(iterable)
--        result.sort()
--        if reverse:
--            result.reverse()
--        return result
--
--
--SUPPRESS = '==SUPPRESS=='
--
--OPTIONAL = '?'
--ZERO_OR_MORE = '*'
--ONE_OR_MORE = '+'
--PARSER = '==PARSER=='
--
--# =============================
--# Utility functions and classes
--# =============================
--
--class _AttributeHolder(object):
--    """Abstract base class that provides __repr__.
--
--    The __repr__ method returns a string in the format::
--        ClassName(attr=name, attr=name, ...)
--    The attributes are determined either by a class-level attribute,
--    '_kwarg_names', or by inspecting the instance __dict__.
--    """
--
--    def __repr__(self):
--        type_name = type(self).__name__
--        arg_strings = []
--        for arg in self._get_args():
--            arg_strings.append(repr(arg))
--        for name, value in self._get_kwargs():
--            arg_strings.append('%s=%r' % (name, value))
--        return '%s(%s)' % (type_name, ', '.join(arg_strings))
--
--    def _get_kwargs(self):
--        return _sorted(self.__dict__.items())
--
--    def _get_args(self):
--        return []
--
--
--def _ensure_value(namespace, name, value):
--    if getattr(namespace, name, None) is None:
--        setattr(namespace, name, value)
--    return getattr(namespace, name)
--
--
--# ===============
--# Formatting Help
--# ===============
--
--class HelpFormatter(object):
--    """Formatter for generating usage messages and argument help strings.
--
--    Only the name of this class is considered a public API. All the methods
--    provided by the class are considered an implementation detail.
--    """
--
--    def __init__(self,
--                 prog,
--                 indent_increment=2,
--                 max_help_position=24,
--                 width=None):
--
--        # default setting for width
--        if width is None:
--            try:
--                width = int(_os.environ['COLUMNS'])
--            except (KeyError, ValueError):
--                width = 80
--            width -= 2
--
--        self._prog = prog
--        self._indent_increment = indent_increment
--        self._max_help_position = max_help_position
--        self._width = width
--
--        self._current_indent = 0
--        self._level = 0
--        self._action_max_length = 0
--
--        self._root_section = self._Section(self, None)
--        self._current_section = self._root_section
--
--        self._whitespace_matcher = _re.compile(r'\s+')
--        self._long_break_matcher = _re.compile(r'\n\n\n+')
--
--    # ===============================
--    # Section and indentation methods
--    # ===============================
--    def _indent(self):
--        self._current_indent += self._indent_increment
--        self._level += 1
--
--    def _dedent(self):
--        self._current_indent -= self._indent_increment
--        assert self._current_indent >= 0, 'Indent decreased below 0.'
--        self._level -= 1
--
--    class _Section(object):
--
--        def __init__(self, formatter, parent, heading=None):
--            self.formatter = formatter
--            self.parent = parent
--            self.heading = heading
--            self.items = []
--
--        def format_help(self):
--            # format the indented section
--            if self.parent is not None:
--                self.formatter._indent()
--            join = self.formatter._join_parts
--            for func, args in self.items:
--                func(*args)
--            item_help = join([func(*args) for func, args in self.items])
--            if self.parent is not None:
--                self.formatter._dedent()
--
--            # return nothing if the section was empty
--            if not item_help:
--                return ''
--
--            # add the heading if the section was non-empty
--            if self.heading is not SUPPRESS and self.heading is not None:
--                current_indent = self.formatter._current_indent
--                heading = '%*s%s:\n' % (current_indent, '', self.heading)
--            else:
--                heading = ''
--
--            # join the section-initial newline, the heading and the help
--            return join(['\n', heading, item_help, '\n'])
--
--    def _add_item(self, func, args):
--        self._current_section.items.append((func, args))
--
--    # ========================
--    # Message building methods
--    # ========================
--    def start_section(self, heading):
--        self._indent()
--        section = self._Section(self, self._current_section, heading)
--        self._add_item(section.format_help, [])
--        self._current_section = section
--
--    def end_section(self):
--        self._current_section = self._current_section.parent
--        self._dedent()
--
--    def add_text(self, text):
--        if text is not SUPPRESS and text is not None:
--            self._add_item(self._format_text, [text])
--
--    def add_usage(self, usage, actions, groups, prefix=None):
--        if usage is not SUPPRESS:
--            args = usage, actions, groups, prefix
--            self._add_item(self._format_usage, args)
--
--    def add_argument(self, action):
--        if action.help is not SUPPRESS:
--
--            # find all invocations
--            get_invocation = self._format_action_invocation
--            invocations = [get_invocation(action)]
--            for subaction in self._iter_indented_subactions(action):
--                invocations.append(get_invocation(subaction))
--
--            # update the maximum item length
--            invocation_length = max([len(s) for s in invocations])
--            action_length = invocation_length + self._current_indent
--            self._action_max_length = max(self._action_max_length,
--                                          action_length)
--
--            # add the item to the list
--            self._add_item(self._format_action, [action])
--
--    def add_arguments(self, actions):
--        for action in actions:
--            self.add_argument(action)
--
--    # =======================
--    # Help-formatting methods
--    # =======================
--    def format_help(self):
--        help = self._root_section.format_help() % dict(prog=self._prog)
--        if help:
--            help = self._long_break_matcher.sub('\n\n', help)
--            help = help.strip('\n') + '\n'
--        return help
--
--    def _join_parts(self, part_strings):
--        return ''.join([part
--                        for part in part_strings
--                        if part and part is not SUPPRESS])
--
--    def _format_usage(self, usage, actions, groups, prefix):
--        if prefix is None:
--            prefix = _('usage: ')
--
--        # if no optionals or positionals are available, usage is just prog
--        if usage is None and not actions:
--            usage = '%(prog)s'
--
--        # if optionals and positionals are available, calculate usage
--        elif usage is None:
--            usage = '%(prog)s' % dict(prog=self._prog)
--
--            # split optionals from positionals
--            optionals = []
--            positionals = []
--            for action in actions:
--                if action.option_strings:
--                    optionals.append(action)
--                else:
--                    positionals.append(action)
--
--            # determine width of "usage: PROG" and width of text
--            prefix_width = len(prefix) + len(usage) + 1
--            prefix_indent = self._current_indent + prefix_width
--            text_width = self._width - self._current_indent
--
--            # put them on one line if they're short enough
--            format = self._format_actions_usage
--            action_usage = format(optionals + positionals, groups)
--            if prefix_width + len(action_usage) + 1 < text_width:
--                usage = '%s %s' % (usage, action_usage)
--
--            # if they're long, wrap optionals and positionals individually
--            else:
--                optional_usage = format(optionals, groups)
--                positional_usage = format(positionals, groups)
--                indent = ' ' * prefix_indent
--
--                # usage is made of PROG, optionals and positionals
--                parts = [usage, ' ']
--
--                # options always get added right after PROG
--                if optional_usage:
--                    parts.append(_textwrap.fill(
--                        optional_usage, text_width,
--                        initial_indent=indent,
--                        subsequent_indent=indent).lstrip())
--
--                # if there were options, put arguments on the next line
--                # otherwise, start them right after PROG
--                if positional_usage:
--                    part = _textwrap.fill(
--                        positional_usage, text_width,
--                        initial_indent=indent,
--                        subsequent_indent=indent).lstrip()
--                    if optional_usage:
--                        part = '\n' + indent + part
--                    parts.append(part)
--                usage = ''.join(parts)
--
--        # prefix with 'usage:'
--        return '%s%s\n\n' % (prefix, usage)
--
--    def _format_actions_usage(self, actions, groups):
--        # find group indices and identify actions in groups
--        group_actions = _set()
--        inserts = {}
--        for group in groups:
--            try:
--                start = actions.index(group._group_actions[0])
--            except ValueError:
--                continue
--            else:
--                end = start + len(group._group_actions)
--                if actions[start:end] == group._group_actions:
--                    for action in group._group_actions:
--                        group_actions.add(action)
--                    if not group.required:
--                        inserts[start] = '['
--                        inserts[end] = ']'
--                    else:
--                        inserts[start] = '('
--                        inserts[end] = ')'
--                    for i in range(start + 1, end):
--                        inserts[i] = '|'
--
--        # collect all actions format strings
--        parts = []
--        for i, action in enumerate(actions):
--
--            # suppressed arguments are marked with None
--            # remove | separators for suppressed arguments
--            if action.help is SUPPRESS:
--                parts.append(None)
--                if inserts.get(i) == '|':
--                    inserts.pop(i)
--                elif inserts.get(i + 1) == '|':
--                    inserts.pop(i + 1)
--
--            # produce all arg strings
--            elif not action.option_strings:
--                part = self._format_args(action, action.dest)
--
--                # if it's in a group, strip the outer []
--                if action in group_actions:
--                    if part[0] == '[' and part[-1] == ']':
--                        part = part[1:-1]
--
--                # add the action string to the list
--                parts.append(part)
--
--            # produce the first way to invoke the option in brackets
--            else:
--                option_string = action.option_strings[0]
--
--                # if the Optional doesn't take a value, format is:
--                #    -s or --long
--                if action.nargs == 0:
--                    part = '%s' % option_string
--
--                # if the Optional takes a value, format is:
--                #    -s ARGS or --long ARGS
--                else:
--                    default = action.dest.upper()
--                    args_string = self._format_args(action, default)
--                    part = '%s %s' % (option_string, args_string)
--
--                # make it look optional if it's not required or in a group
--                if not action.required and action not in group_actions:
--                    part = '[%s]' % part
--
--                # add the action string to the list
--                parts.append(part)
--
--        # insert things at the necessary indices
--        for i in _sorted(inserts, reverse=True):
--            parts[i:i] = [inserts[i]]
--
--        # join all the action items with spaces
--        text = ' '.join([item for item in parts if item is not None])
--
--        # clean up separators for mutually exclusive groups
--        open = r'[\[(]'
--        close = r'[\])]'
--        text = _re.sub(r'(%s) ' % open, r'\1', text)
--        text = _re.sub(r' (%s)' % close, r'\1', text)
--        text = _re.sub(r'%s *%s' % (open, close), r'', text)
--        text = _re.sub(r'\(([^|]*)\)', r'\1', text)
--        text = text.strip()
--
--        # return the text
--        return text
--
--    def _format_text(self, text):
--        text_width = self._width - self._current_indent
--        indent = ' ' * self._current_indent
--        return self._fill_text(text, text_width, indent) + '\n\n'
--
--    def _format_action(self, action):
--        # determine the required width and the entry label
--        help_position = min(self._action_max_length + 2,
--                            self._max_help_position)
--        help_width = self._width - help_position
--        action_width = help_position - self._current_indent - 2
--        action_header = self._format_action_invocation(action)
--
--        # ho nelp; start on same line and add a final newline
--        if not action.help:
--            tup = self._current_indent, '', action_header
--            action_header = '%*s%s\n' % tup
--
--        # short action name; start on the same line and pad two spaces
--        elif len(action_header) <= action_width:
--            tup = self._current_indent, '', action_width, action_header
--            action_header = '%*s%-*s  ' % tup
--            indent_first = 0
--
--        # long action name; start on the next line
--        else:
--            tup = self._current_indent, '', action_header
--            action_header = '%*s%s\n' % tup
--            indent_first = help_position
--
--        # collect the pieces of the action help
--        parts = [action_header]
--
--        # if there was help for the action, add lines of help text
--        if action.help:
--            help_text = self._expand_help(action)
--            help_lines = self._split_lines(help_text, help_width)
--            parts.append('%*s%s\n' % (indent_first, '', help_lines[0]))
--            for line in help_lines[1:]:
--                parts.append('%*s%s\n' % (help_position, '', line))
--
--        # or add a newline if the description doesn't end with one
--        elif not action_header.endswith('\n'):
--            parts.append('\n')
--
--        # if there are any sub-actions, add their help as well
--        for subaction in self._iter_indented_subactions(action):
--            parts.append(self._format_action(subaction))
--
--        # return a single string
--        return self._join_parts(parts)
--
--    def _format_action_invocation(self, action):
--        if not action.option_strings:
--            metavar, = self._metavar_formatter(action, action.dest)(1)
--            return metavar
--
--        else:
--            parts = []
--
--            # if the Optional doesn't take a value, format is:
--            #    -s, --long
--            if action.nargs == 0:
--                parts.extend(action.option_strings)
--
--            # if the Optional takes a value, format is:
--            #    -s ARGS, --long ARGS
--            else:
--                default = action.dest.upper()
--                args_string = self._format_args(action, default)
--                for option_string in action.option_strings:
--                    parts.append('%s %s' % (option_string, args_string))
--
--            return ', '.join(parts)
--
--    def _metavar_formatter(self, action, default_metavar):
--        if action.metavar is not None:
--            result = action.metavar
--        elif action.choices is not None:
--            choice_strs = [str(choice) for choice in action.choices]
--            result = '{%s}' % ','.join(choice_strs)
--        else:
--            result = default_metavar
--
--        def format(tuple_size):
--            if isinstance(result, tuple):
--                return result
--            else:
--                return (result, ) * tuple_size
--        return format
--
--    def _format_args(self, action, default_metavar):
--        get_metavar = self._metavar_formatter(action, default_metavar)
--        if action.nargs is None:
--            result = '%s' % get_metavar(1)
--        elif action.nargs == OPTIONAL:
--            result = '[%s]' % get_metavar(1)
--        elif action.nargs == ZERO_OR_MORE:
--            result = '[%s [%s ...]]' % get_metavar(2)
--        elif action.nargs == ONE_OR_MORE:
--            result = '%s [%s ...]' % get_metavar(2)
--        elif action.nargs is PARSER:
--            result = '%s ...' % get_metavar(1)
--        else:
--            formats = ['%s' for _ in range(action.nargs)]
--            result = ' '.join(formats) % get_metavar(action.nargs)
--        return result
--
--    def _expand_help(self, action):
--        params = dict(vars(action), prog=self._prog)
--        for name in list(params):
--            if params[name] is SUPPRESS:
--                del params[name]
--        if params.get('choices') is not None:
--            choices_str = ', '.join([str(c) for c in params['choices']])
--            params['choices'] = choices_str
--        return self._get_help_string(action) % params
--
--    def _iter_indented_subactions(self, action):
--        try:
--            get_subactions = action._get_subactions
--        except AttributeError:
--            pass
--        else:
--            self._indent()
--            for subaction in get_subactions():
--                yield subaction
--            self._dedent()
--
--    def _split_lines(self, text, width):
--        text = self._whitespace_matcher.sub(' ', text).strip()
--        return _textwrap.wrap(text, width)
--
--    def _fill_text(self, text, width, indent):
--        text = self._whitespace_matcher.sub(' ', text).strip()
--        return _textwrap.fill(text, width, initial_indent=indent,
--                                           subsequent_indent=indent)
--
--    def _get_help_string(self, action):
--        return action.help
--
--
--class RawDescriptionHelpFormatter(HelpFormatter):
--    """Help message formatter which retains any formatting in descriptions.
--
--    Only the name of this class is considered a public API. All the methods
--    provided by the class are considered an implementation detail.
--    """
--
--    def _fill_text(self, text, width, indent):
--        return ''.join([indent + line for line in text.splitlines(True)])
--
--
--class RawTextHelpFormatter(RawDescriptionHelpFormatter):
--    """Help message formatter which retains formatting of all help text.
--
--    Only the name of this class is considered a public API. All the methods
--    provided by the class are considered an implementation detail.
--    """
--
--    def _split_lines(self, text, width):
--        return text.splitlines()
--
--
--class ArgumentDefaultsHelpFormatter(HelpFormatter):
--    """Help message formatter which adds default values to argument help.
--
--    Only the name of this class is considered a public API. All the methods
--    provided by the class are considered an implementation detail.
--    """
--
--    def _get_help_string(self, action):
--        help = action.help
--        if '%(default)' not in action.help:
--            if action.default is not SUPPRESS:
--                defaulting_nargs = [OPTIONAL, ZERO_OR_MORE]
--                if action.option_strings or action.nargs in defaulting_nargs:
--                    help += ' (default: %(default)s)'
--        return help
--
--
--# =====================
--# Options and Arguments
--# =====================
--
--def _get_action_name(argument):
--    if argument is None:
--        return None
--    elif argument.option_strings:
--        return  '/'.join(argument.option_strings)
--    elif argument.metavar not in (None, SUPPRESS):
--        return argument.metavar
--    elif argument.dest not in (None, SUPPRESS):
--        return argument.dest
--    else:
--        return None
--
--
--class ArgumentError(Exception):
--    """An error from creating or using an argument (optional or positional).
--
--    The string value of this exception is the message, augmented with
--    information about the argument that caused it.
--    """
--
--    def __init__(self, argument, message):
--        self.argument_name = _get_action_name(argument)
--        self.message = message
--
--    def __str__(self):
--        if self.argument_name is None:
--            format = '%(message)s'
--        else:
--            format = 'argument %(argument_name)s: %(message)s'
--        return format % dict(message=self.message,
--                             argument_name=self.argument_name)
--
--# ==============
--# Action classes
--# ==============
--
--class Action(_AttributeHolder):
--    """Information about how to convert command line strings to Python objects.
--
--    Action objects are used by an ArgumentParser to represent the information
--    needed to parse a single argument from one or more strings from the
--    command line. The keyword arguments to the Action constructor are also
--    all attributes of Action instances.
--
--    Keyword Arguments:
--
--        - option_strings -- A list of command-line option strings which
--            should be associated with this action.
--
--        - dest -- The name of the attribute to hold the created object(s)
--
--        - nargs -- The number of command-line arguments that should be
--            consumed. By default, one argument will be consumed and a single
--            value will be produced.  Other values include:
--                - N (an integer) consumes N arguments (and produces a list)
--                - '?' consumes zero or one arguments
--                - '*' consumes zero or more arguments (and produces a list)
--                - '+' consumes one or more arguments (and produces a list)
--            Note that the difference between the default and nargs=1 is that
--            with the default, a single value will be produced, while with
--            nargs=1, a list containing a single value will be produced.
--
--        - const -- The value to be produced if the option is specified and the
--            option uses an action that takes no values.
--
--        - default -- The value to be produced if the option is not specified.
--
--        - type -- The type which the command-line arguments should be converted
--            to, should be one of 'string', 'int', 'float', 'complex' or a
--            callable object that accepts a single string argument. If None,
--            'string' is assumed.
--
--        - choices -- A container of values that should be allowed. If not None,
--            after a command-line argument has been converted to the appropriate
--            type, an exception will be raised if it is not a member of this
--            collection.
--
--        - required -- True if the action must always be specified at the
--            command line. This is only meaningful for optional command-line
--            arguments.
--
--        - help -- The help string describing the argument.
--
--        - metavar -- The name to be used for the option's argument with the
--            help string. If None, the 'dest' value will be used as the name.
--    """
--
--    def __init__(self,
--                 option_strings,
--                 dest,
--                 nargs=None,
--                 const=None,
--                 default=None,
--                 type=None,
--                 choices=None,
--                 required=False,
--                 help=None,
--                 metavar=None):
--        self.option_strings = option_strings
--        self.dest = dest
--        self.nargs = nargs
--        self.const = const
--        self.default = default
--        self.type = type
--        self.choices = choices
--        self.required = required
--        self.help = help
--        self.metavar = metavar
--
--    def _get_kwargs(self):
--        names = [
--            'option_strings',
--            'dest',
--            'nargs',
--            'const',
--            'default',
--            'type',
--            'choices',
--            'help',
--            'metavar',
--        ]
--        return [(name, getattr(self, name)) for name in names]
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        raise NotImplementedError(_('.__call__() not defined'))
--
--
--class _StoreAction(Action):
--
--    def __init__(self,
--                 option_strings,
--                 dest,
--                 nargs=None,
--                 const=None,
--                 default=None,
--                 type=None,
--                 choices=None,
--                 required=False,
--                 help=None,
--                 metavar=None):
--        if nargs == 0:
--            raise ValueError('nargs must be > 0')
--        if const is not None and nargs != OPTIONAL:
--            raise ValueError('nargs must be %r to supply const' % OPTIONAL)
--        super(_StoreAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            nargs=nargs,
--            const=const,
--            default=default,
--            type=type,
--            choices=choices,
--            required=required,
--            help=help,
--            metavar=metavar)
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        setattr(namespace, self.dest, values)
--
--
--class _StoreConstAction(Action):
--
--    def __init__(self,
--                 option_strings,
--                 dest,
--                 const,
--                 default=None,
--                 required=False,
--                 help=None,
--                 metavar=None):
--        super(_StoreConstAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            nargs=0,
--            const=const,
--            default=default,
--            required=required,
--            help=help)
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        setattr(namespace, self.dest, self.const)
--
--
--class _StoreTrueAction(_StoreConstAction):
--
--    def __init__(self,
--                 option_strings,
--                 dest,
--                 default=False,
--                 required=False,
--                 help=None):
--        super(_StoreTrueAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            const=True,
--            default=default,
--            required=required,
--            help=help)
--
--
--class _StoreFalseAction(_StoreConstAction):
--
--    def __init__(self,
--                 option_strings,
--                 dest,
--                 default=True,
--                 required=False,
--                 help=None):
--        super(_StoreFalseAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            const=False,
--            default=default,
--            required=required,
--            help=help)
--
--
--class _AppendAction(Action):
--
--    def __init__(self,
--                 option_strings,
--                 dest,
--                 nargs=None,
--                 const=None,
--                 default=None,
--                 type=None,
--                 choices=None,
--                 required=False,
--                 help=None,
--                 metavar=None):
--        if nargs == 0:
--            raise ValueError('nargs must be > 0')
--        if const is not None and nargs != OPTIONAL:
--            raise ValueError('nargs must be %r to supply const' % OPTIONAL)
--        super(_AppendAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            nargs=nargs,
--            const=const,
--            default=default,
--            type=type,
--            choices=choices,
--            required=required,
--            help=help,
--            metavar=metavar)
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        items = _copy.copy(_ensure_value(namespace, self.dest, []))
--        items.append(values)
--        setattr(namespace, self.dest, items)
--
--
--class _AppendConstAction(Action):
--
--    def __init__(self,
--                 option_strings,
--                 dest,
--                 const,
--                 default=None,
--                 required=False,
--                 help=None,
--                 metavar=None):
--        super(_AppendConstAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            nargs=0,
--            const=const,
--            default=default,
--            required=required,
--            help=help,
--            metavar=metavar)
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        items = _copy.copy(_ensure_value(namespace, self.dest, []))
--        items.append(self.const)
--        setattr(namespace, self.dest, items)
--
--
--class _CountAction(Action):
--
--    def __init__(self,
--                 option_strings,
--                 dest,
--                 default=None,
--                 required=False,
--                 help=None):
--        super(_CountAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            nargs=0,
--            default=default,
--            required=required,
--            help=help)
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        new_count = _ensure_value(namespace, self.dest, 0) + 1
--        setattr(namespace, self.dest, new_count)
--
--
--class _HelpAction(Action):
--
--    def __init__(self,
--                 option_strings,
--                 dest=SUPPRESS,
--                 default=SUPPRESS,
--                 help=None):
--        super(_HelpAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            default=default,
--            nargs=0,
--            help=help)
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        parser.print_help()
--        parser.exit()
--
--
--class _VersionAction(Action):
--
--    def __init__(self,
--                 option_strings,
--                 dest=SUPPRESS,
--                 default=SUPPRESS,
--                 help=None):
--        super(_VersionAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            default=default,
--            nargs=0,
--            help=help)
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        parser.print_version()
--        parser.exit()
--
--
--class _SubParsersAction(Action):
--
--    class _ChoicesPseudoAction(Action):
--
--        def __init__(self, name, help):
--            sup = super(_SubParsersAction._ChoicesPseudoAction, self)
--            sup.__init__(option_strings=[], dest=name, help=help)
--
--    def __init__(self,
--                 option_strings,
--                 prog,
--                 parser_class,
--                 dest=SUPPRESS,
--                 help=None,
--                 metavar=None):
--
--        self._prog_prefix = prog
--        self._parser_class = parser_class
--        self._name_parser_map = {}
--        self._choices_actions = []
--
--        super(_SubParsersAction, self).__init__(
--            option_strings=option_strings,
--            dest=dest,
--            nargs=PARSER,
--            choices=self._name_parser_map,
--            help=help,
--            metavar=metavar)
--
--    def add_parser(self, name, **kwargs):
--        # set prog from the existing prefix
--        if kwargs.get('prog') is None:
--            kwargs['prog'] = '%s %s' % (self._prog_prefix, name)
--
--        # create a pseudo-action to hold the choice help
--        if 'help' in kwargs:
--            help = kwargs.pop('help')
--            choice_action = self._ChoicesPseudoAction(name, help)
--            self._choices_actions.append(choice_action)
--
--        # create the parser and add it to the map
--        parser = self._parser_class(**kwargs)
--        self._name_parser_map[name] = parser
--        return parser
--
--    def _get_subactions(self):
--        return self._choices_actions
--
--    def __call__(self, parser, namespace, values, option_string=None):
--        parser_name = values[0]
--        arg_strings = values[1:]
--
--        # set the parser name if requested
--        if self.dest is not SUPPRESS:
--            setattr(namespace, self.dest, parser_name)
--
--        # select the parser
--        try:
--            parser = self._name_parser_map[parser_name]
--        except KeyError:
--            tup = parser_name, ', '.join(self._name_parser_map)
--            msg = _('unknown parser %r (choices: %s)' % tup)
--            raise ArgumentError(self, msg)
--
--        # parse all the remaining options into the namespace
--        parser.parse_args(arg_strings, namespace)
--
--
--# ==============
--# Type classes
--# ==============
--
--class FileType(object):
--    """Factory for creating file object types
--
--    Instances of FileType are typically passed as type= arguments to the
--    ArgumentParser add_argument() method.
--
--    Keyword Arguments:
--        - mode -- A string indicating how the file is to be opened. Accepts the
--            same values as the builtin open() function.
--        - bufsize -- The file's desired buffer size. Accepts the same values as
--            the builtin open() function.
--    """
--
--    def __init__(self, mode='r', bufsize=None):
--        self._mode = mode
--        self._bufsize = bufsize
--
--    def __call__(self, string):
--        # the special argument "-" means sys.std{in,out}
--        if string == '-':
--            if 'r' in self._mode:
--                return _sys.stdin
--            elif 'w' in self._mode:
--                return _sys.stdout
--            else:
--                msg = _('argument "-" with mode %r' % self._mode)
--                raise ValueError(msg)
--
--        # all other arguments are used as file names
--        if self._bufsize:
--            return open(string, self._mode, self._bufsize)
--        else:
--            return open(string, self._mode)
--
--    def __repr__(self):
--        args = [self._mode, self._bufsize]
--        args_str = ', '.join([repr(arg) for arg in args if arg is not None])
--        return '%s(%s)' % (type(self).__name__, args_str)
--
--# ===========================
--# Optional and Positional Parsing
--# ===========================
--
--class Namespace(_AttributeHolder):
--    """Simple object for storing attributes.
--
--    Implements equality by attribute names and values, and provides a simple
--    string representation.
--    """
--
--    def __init__(self, **kwargs):
--        for name in kwargs:
--            setattr(self, name, kwargs[name])
--
--    def __eq__(self, other):
--        return vars(self) == vars(other)
--
--    def __ne__(self, other):
--        return not (self == other)
--
--
--class _ActionsContainer(object):
--
--    def __init__(self,
--                 description,
--                 prefix_chars,
--                 argument_default,
--                 conflict_handler):
--        super(_ActionsContainer, self).__init__()
--
--        self.description = description
--        self.argument_default = argument_default
--        self.prefix_chars = prefix_chars
--        self.conflict_handler = conflict_handler
--
--        # set up registries
--        self._registries = {}
--
--        # register actions
--        self.register('action', None, _StoreAction)
--        self.register('action', 'store', _StoreAction)
--        self.register('action', 'store_const', _StoreConstAction)
--        self.register('action', 'store_true', _StoreTrueAction)
--        self.register('action', 'store_false', _StoreFalseAction)
--        self.register('action', 'append', _AppendAction)
--        self.register('action', 'append_const', _AppendConstAction)
--        self.register('action', 'count', _CountAction)
--        self.register('action', 'help', _HelpAction)
--        self.register('action', 'version', _VersionAction)
--        self.register('action', 'parsers', _SubParsersAction)
--
--        # raise an exception if the conflict handler is invalid
--        self._get_handler()
--
--        # action storage
--        self._actions = []
--        self._option_string_actions = {}
--
--        # groups
--        self._action_groups = []
--        self._mutually_exclusive_groups = []
--
--        # defaults storage
--        self._defaults = {}
--
--        # determines whether an "option" looks like a negative number
--        self._negative_number_matcher = _re.compile(r'^-\d+|-\d*.\d+$')
--
--        # whether or not there are any optionals that look like negative
--        # numbers -- uses a list so it can be shared and edited
--        self._has_negative_number_optionals = []
--
--    # ====================
--    # Registration methods
--    # ====================
--    def register(self, registry_name, value, object):
--        registry = self._registries.setdefault(registry_name, {})
--        registry[value] = object
--
--    def _registry_get(self, registry_name, value, default=None):
--        return self._registries[registry_name].get(value, default)
--
--    # ==================================
--    # Namespace default settings methods
--    # ==================================
--    def set_defaults(self, **kwargs):
--        self._defaults.update(kwargs)
--
--        # if these defaults match any existing arguments, replace
--        # the previous default on the object with the new one
--        for action in self._actions:
--            if action.dest in kwargs:
--                action.default = kwargs[action.dest]
--
--    # =======================
--    # Adding argument actions
--    # =======================
--    def add_argument(self, *args, **kwargs):
--        """
--        add_argument(dest, ..., name=value, ...)
--        add_argument(option_string, option_string, ..., name=value, ...)
--        """
--
--        # if no positional args are supplied or only one is supplied and
--        # it doesn't look like an option string, parse a positional
--        # argument
--        chars = self.prefix_chars
--        if not args or len(args) == 1 and args[0][0] not in chars:
--            kwargs = self._get_positional_kwargs(*args, **kwargs)
--
--        # otherwise, we're adding an optional argument
--        else:
--            kwargs = self._get_optional_kwargs(*args, **kwargs)
--
--        # if no default was supplied, use the parser-level default
--        if 'default' not in kwargs:
--            dest = kwargs['dest']
--            if dest in self._defaults:
--                kwargs['default'] = self._defaults[dest]
--            elif self.argument_default is not None:
--                kwargs['default'] = self.argument_default
--
--        # create the action object, and add it to the parser
--        action_class = self._pop_action_class(kwargs)
--        action = action_class(**kwargs)
--        return self._add_action(action)
--
--    def add_argument_group(self, *args, **kwargs):
--        group = _ArgumentGroup(self, *args, **kwargs)
--        self._action_groups.append(group)
--        return group
--
--    def add_mutually_exclusive_group(self, **kwargs):
--        group = _MutuallyExclusiveGroup(self, **kwargs)
--        self._mutually_exclusive_groups.append(group)
--        return group
--
--    def _add_action(self, action):
--        # resolve any conflicts
--        self._check_conflict(action)
--
--        # add to actions list
--        self._actions.append(action)
--        action.container = self
--
--        # index the action by any option strings it has
--        for option_string in action.option_strings:
--            self._option_string_actions[option_string] = action
--
--        # set the flag if any option strings look like negative numbers
--        for option_string in action.option_strings:
--            if self._negative_number_matcher.match(option_string):
--                if not self._has_negative_number_optionals:
--                    self._has_negative_number_optionals.append(True)
--
--        # return the created action
--        return action
--
--    def _remove_action(self, action):
--        self._actions.remove(action)
--
--    def _add_container_actions(self, container):
--        # collect groups by titles
--        title_group_map = {}
--        for group in self._action_groups:
--            if group.title in title_group_map:
--                msg = _('cannot merge actions - two groups are named %r')
--                raise ValueError(msg % (group.title))
--            title_group_map[group.title] = group
--
--        # map each action to its group
--        group_map = {}
--        for group in container._action_groups:
--
--            # if a group with the title exists, use that, otherwise
--            # create a new group matching the container's group
--            if group.title not in title_group_map:
--                title_group_map[group.title] = self.add_argument_group(
--                    title=group.title,
--                    description=group.description,
--                    conflict_handler=group.conflict_handler)
--
--            # map the actions to their new group
--            for action in group._group_actions:
--                group_map[action] = title_group_map[group.title]
--
--        # add all actions to this container or their group
--        for action in container._actions:
--            group_map.get(action, self)._add_action(action)
--
--    def _get_positional_kwargs(self, dest, **kwargs):
--        # make sure required is not specified
--        if 'required' in kwargs:
--            msg = _("'required' is an invalid argument for positionals")
--            raise TypeError(msg)
--
--        # mark positional arguments as required if at least one is
--        # always required
--        if kwargs.get('nargs') not in [OPTIONAL, ZERO_OR_MORE]:
--            kwargs['required'] = True
--        if kwargs.get('nargs') == ZERO_OR_MORE and 'default' not in kwargs:
--            kwargs['required'] = True
--
--        # return the keyword arguments with no option strings
--        return dict(kwargs, dest=dest, option_strings=[])
--
--    def _get_optional_kwargs(self, *args, **kwargs):
--        # determine short and long option strings
--        option_strings = []
--        long_option_strings = []
--        for option_string in args:
--            # error on one-or-fewer-character option strings
--            if len(option_string) < 2:
--                msg = _('invalid option string %r: '
--                        'must be at least two characters long')
--                raise ValueError(msg % option_string)
--
--            # error on strings that don't start with an appropriate prefix
--            if not option_string[0] in self.prefix_chars:
--                msg = _('invalid option string %r: '
--                        'must start with a character %r')
--                tup = option_string, self.prefix_chars
--                raise ValueError(msg % tup)
--
--            # error on strings that are all prefix characters
--            if not (_set(option_string) - _set(self.prefix_chars)):
--                msg = _('invalid option string %r: '
--                        'must contain characters other than %r')
--                tup = option_string, self.prefix_chars
--                raise ValueError(msg % tup)
--
--            # strings starting with two prefix characters are long options
--            option_strings.append(option_string)
--            if option_string[0] in self.prefix_chars:
--                if option_string[1] in self.prefix_chars:
--                    long_option_strings.append(option_string)
--
--        # infer destination, '--foo-bar' -> 'foo_bar' and '-x' -> 'x'
--        dest = kwargs.pop('dest', None)
--        if dest is None:
--            if long_option_strings:
--                dest_option_string = long_option_strings[0]
--            else:
--                dest_option_string = option_strings[0]
--            dest = dest_option_string.lstrip(self.prefix_chars)
--            dest = dest.replace('-', '_')
--
--        # return the updated keyword arguments
--        return dict(kwargs, dest=dest, option_strings=option_strings)
--
--    def _pop_action_class(self, kwargs, default=None):
--        action = kwargs.pop('action', default)
--        return self._registry_get('action', action, action)
--
--    def _get_handler(self):
--        # determine function from conflict handler string
--        handler_func_name = '_handle_conflict_%s' % self.conflict_handler
--        try:
--            return getattr(self, handler_func_name)
--        except AttributeError:
--            msg = _('invalid conflict_resolution value: %r')
--            raise ValueError(msg % self.conflict_handler)
--
--    def _check_conflict(self, action):
--
--        # find all options that conflict with this option
--        confl_optionals = []
--        for option_string in action.option_strings:
--            if option_string in self._option_string_actions:
--                confl_optional = self._option_string_actions[option_string]
--                confl_optionals.append((option_string, confl_optional))
--
--        # resolve any conflicts
--        if confl_optionals:
--            conflict_handler = self._get_handler()
--            conflict_handler(action, confl_optionals)
--
--    def _handle_conflict_error(self, action, conflicting_actions):
--        message = _('conflicting option string(s): %s')
--        conflict_string = ', '.join([option_string
--                                     for option_string, action
--                                     in conflicting_actions])
--        raise ArgumentError(action, message % conflict_string)
--
--    def _handle_conflict_resolve(self, action, conflicting_actions):
--
--        # remove all conflicting options
--        for option_string, action in conflicting_actions:
--
--            # remove the conflicting option
--            action.option_strings.remove(option_string)
--            self._option_string_actions.pop(option_string, None)
--
--            # if the option now has no option string, remove it from the
--            # container holding it
--            if not action.option_strings:
--                action.container._remove_action(action)
--
--
--class _ArgumentGroup(_ActionsContainer):
--
--    def __init__(self, container, title=None, description=None, **kwargs):
--        # add any missing keyword arguments by checking the container
--        update = kwargs.setdefault
--        update('conflict_handler', container.conflict_handler)
--        update('prefix_chars', container.prefix_chars)
--        update('argument_default', container.argument_default)
--        super_init = super(_ArgumentGroup, self).__init__
--        super_init(description=description, **kwargs)
--
--        # group attributes
--        self.title = title
--        self._group_actions = []
--
--        # share most attributes with the container
--        self._registries = container._registries
--        self._actions = container._actions
--        self._option_string_actions = container._option_string_actions
--        self._defaults = container._defaults
--        self._has_negative_number_optionals = \
--            container._has_negative_number_optionals
--
--    def _add_action(self, action):
--        action = super(_ArgumentGroup, self)._add_action(action)
--        self._group_actions.append(action)
--        return action
--
--    def _remove_action(self, action):
--        super(_ArgumentGroup, self)._remove_action(action)
--        self._group_actions.remove(action)
--
--
--class _MutuallyExclusiveGroup(_ArgumentGroup):
--
--    def __init__(self, container, required=False):
--        super(_MutuallyExclusiveGroup, self).__init__(container)
--        self.required = required
--        self._container = container
--
--    def _add_action(self, action):
--        if action.required:
--            msg = _('mutually exclusive arguments must be optional')
--            raise ValueError(msg)
--        action = self._container._add_action(action)
--        self._group_actions.append(action)
--        return action
--
--    def _remove_action(self, action):
--        self._container._remove_action(action)
--        self._group_actions.remove(action)
--
--
--class ArgumentParser(_AttributeHolder, _ActionsContainer):
--    """Object for parsing command line strings into Python objects.
--
--    Keyword Arguments:
--        - prog -- The name of the program (default: sys.argv[0])
--        - usage -- A usage message (default: auto-generated from arguments)
--        - description -- A description of what the program does
--        - epilog -- Text following the argument descriptions
--        - version -- Add a -v/--version option with the given version string
--        - parents -- Parsers whose arguments should be copied into this one
--        - formatter_class -- HelpFormatter class for printing help messages
--        - prefix_chars -- Characters that prefix optional arguments
--        - fromfile_prefix_chars -- Characters that prefix files containing
--            additional arguments
--        - argument_default -- The default value for all arguments
--        - conflict_handler -- String indicating how to handle conflicts
--        - add_help -- Add a -h/-help option
--    """
--
--    def __init__(self,
--                 prog=None,
--                 usage=None,
--                 description=None,
--                 epilog=None,
--                 version=None,
--                 parents=[],
--                 formatter_class=HelpFormatter,
--                 prefix_chars='-',
--                 fromfile_prefix_chars=None,
--                 argument_default=None,
--                 conflict_handler='error',
--                 add_help=True):
--
--        superinit = super(ArgumentParser, self).__init__
--        superinit(description=description,
--                  prefix_chars=prefix_chars,
--                  argument_default=argument_default,
--                  conflict_handler=conflict_handler)
--
--        # default setting for prog
--        if prog is None:
--            prog = _os.path.basename(_sys.argv[0])
--
--        self.prog = prog
--        self.usage = usage
--        self.epilog = epilog
--        self.version = version
--        self.formatter_class = formatter_class
--        self.fromfile_prefix_chars = fromfile_prefix_chars
--        self.add_help = add_help
--
--        add_group = self.add_argument_group
--        self._positionals = add_group(_('positional arguments'))
--        self._optionals = add_group(_('optional arguments'))
--        self._subparsers = None
--
--        # register types
--        def identity(string):
--            return string
--        self.register('type', None, identity)
--
--        # add help and version arguments if necessary
--        # (using explicit default to override global argument_default)
--        if self.add_help:
--            self.add_argument(
--                '-h', '--help', action='help', default=SUPPRESS,
--                help=_('show this help message and exit'))
--        if self.version:
--            self.add_argument(
--                '-v', '--version', action='version', default=SUPPRESS,
--                help=_("show program's version number and exit"))
--
--        # add parent arguments and defaults
--        for parent in parents:
--            self._add_container_actions(parent)
--            try:
--                defaults = parent._defaults
--            except AttributeError:
--                pass
--            else:
--                self._defaults.update(defaults)
--
--    # =======================
--    # Pretty __repr__ methods
--    # =======================
--    def _get_kwargs(self):
--        names = [
--            'prog',
--            'usage',
--            'description',
--            'version',
--            'formatter_class',
--            'conflict_handler',
--            'add_help',
--        ]
--        return [(name, getattr(self, name)) for name in names]
--
--    # ==================================
--    # Optional/Positional adding methods
--    # ==================================
--    def add_subparsers(self, **kwargs):
--        if self._subparsers is not None:
--            self.error(_('cannot have multiple subparser arguments'))
--
--        # add the parser class to the arguments if it's not present
--        kwargs.setdefault('parser_class', type(self))
--
--        if 'title' in kwargs or 'description' in kwargs:
--            title = _(kwargs.pop('title', 'subcommands'))
--            description = _(kwargs.pop('description', None))
--            self._subparsers = self.add_argument_group(title, description)
--        else:
--            self._subparsers = self._positionals
--
--        # prog defaults to the usage message of this parser, skipping
--        # optional arguments and with no "usage:" prefix
--        if kwargs.get('prog') is None:
--            formatter = self._get_formatter()
--            positionals = self._get_positional_actions()
--            groups = self._mutually_exclusive_groups
--            formatter.add_usage(self.usage, positionals, groups, '')
--            kwargs['prog'] = formatter.format_help().strip()
--
--        # create the parsers action and add it to the positionals list
--        parsers_class = self._pop_action_class(kwargs, 'parsers')
--        action = parsers_class(option_strings=[], **kwargs)
--        self._subparsers._add_action(action)
--
--        # return the created parsers action
--        return action
--
--    def _add_action(self, action):
--        if action.option_strings:
--            self._optionals._add_action(action)
--        else:
--            self._positionals._add_action(action)
--        return action
--
--    def _get_optional_actions(self):
--        return [action
--                for action in self._actions
--                if action.option_strings]
--
--    def _get_positional_actions(self):
--        return [action
--                for action in self._actions
--                if not action.option_strings]
--
--    # =====================================
--    # Command line argument parsing methods
--    # =====================================
--    def parse_args(self, args=None, namespace=None):
--        args, argv = self.parse_known_args(args, namespace)
--        if argv:
--            msg = _('unrecognized arguments: %s')
--            self.error(msg % ' '.join(argv))
--        return args
--
--    def parse_known_args(self, args=None, namespace=None):
--        # args default to the system args
--        if args is None:
--            args = _sys.argv[1:]
--
--        # default Namespace built from parser defaults
--        if namespace is None:
--            namespace = Namespace()
--
--        # add any action defaults that aren't present
--        for action in self._actions:
--            if action.dest is not SUPPRESS:
--                if not hasattr(namespace, action.dest):
--                    if action.default is not SUPPRESS:
--                        default = action.default
--                        if isinstance(action.default, _basestring):
--                            default = self._get_value(action, default)
--                        setattr(namespace, action.dest, default)
--
--        # add any parser defaults that aren't present
--        for dest in self._defaults:
--            if not hasattr(namespace, dest):
--                setattr(namespace, dest, self._defaults[dest])
--
--        # parse the arguments and exit if there are any errors
--        try:
--            return self._parse_known_args(args, namespace)
--        except ArgumentError:
--            err = _sys.exc_info()[1]
--            self.error(str(err))
--
--    def _parse_known_args(self, arg_strings, namespace):
--        # replace arg strings that are file references
--        if self.fromfile_prefix_chars is not None:
--            arg_strings = self._read_args_from_files(arg_strings)
--
--        # map all mutually exclusive arguments to the other arguments
--        # they can't occur with
--        action_conflicts = {}
--        for mutex_group in self._mutually_exclusive_groups:
--            group_actions = mutex_group._group_actions
--            for i, mutex_action in enumerate(mutex_group._group_actions):
--                conflicts = action_conflicts.setdefault(mutex_action, [])
--                conflicts.extend(group_actions[:i])
--                conflicts.extend(group_actions[i + 1:])
--
--        # find all option indices, and determine the arg_string_pattern
--        # which has an 'O' if there is an option at an index,
--        # an 'A' if there is an argument, or a '-' if there is a '--'
--        option_string_indices = {}
--        arg_string_pattern_parts = []
--        arg_strings_iter = iter(arg_strings)
--        for i, arg_string in enumerate(arg_strings_iter):
--
--            # all args after -- are non-options
--            if arg_string == '--':
--                arg_string_pattern_parts.append('-')
--                for arg_string in arg_strings_iter:
--                    arg_string_pattern_parts.append('A')
--
--            # otherwise, add the arg to the arg strings
--            # and note the index if it was an option
--            else:
--                option_tuple = self._parse_optional(arg_string)
--                if option_tuple is None:
--                    pattern = 'A'
--                else:
--                    option_string_indices[i] = option_tuple
--                    pattern = 'O'
--                arg_string_pattern_parts.append(pattern)
--
--        # join the pieces together to form the pattern
--        arg_strings_pattern = ''.join(arg_string_pattern_parts)
--
--        # converts arg strings to the appropriate and then takes the action
--        seen_actions = _set()
--        seen_non_default_actions = _set()
--
--        def take_action(action, argument_strings, option_string=None):
--            seen_actions.add(action)
--            argument_values = self._get_values(action, argument_strings)
--
--            # error if this argument is not allowed with other previously
--            # seen arguments, assuming that actions that use the default
--            # value don't really count as "present"
--            if argument_values is not action.default:
--                seen_non_default_actions.add(action)
--                for conflict_action in action_conflicts.get(action, []):
--                    if conflict_action in seen_non_default_actions:
--                        msg = _('not allowed with argument %s')
--                        action_name = _get_action_name(conflict_action)
--                        raise ArgumentError(action, msg % action_name)
--
--            # take the action if we didn't receive a SUPPRESS value
--            # (e.g. from a default)
--            if argument_values is not SUPPRESS:
--                action(self, namespace, argument_values, option_string)
--
--        # function to convert arg_strings into an optional action
--        def consume_optional(start_index):
--
--            # get the optional identified at this index
--            option_tuple = option_string_indices[start_index]
--            action, option_string, explicit_arg = option_tuple
--
--            # identify additional optionals in the same arg string
--            # (e.g. -xyz is the same as -x -y -z if no args are required)
--            match_argument = self._match_argument
--            action_tuples = []
--            while True:
--
--                # if we found no optional action, skip it
--                if action is None:
--                    extras.append(arg_strings[start_index])
--                    return start_index + 1
--
--                # if there is an explicit argument, try to match the
--                # optional's string arguments to only this
--                if explicit_arg is not None:
--                    arg_count = match_argument(action, 'A')
--
--                    # if the action is a single-dash option and takes no
--                    # arguments, try to parse more single-dash options out
--                    # of the tail of the option string
--                    chars = self.prefix_chars
--                    if arg_count == 0 and option_string[1] not in chars:
--                        action_tuples.append((action, [], option_string))
--                        for char in self.prefix_chars:
--                            option_string = char + explicit_arg[0]
--                            explicit_arg = explicit_arg[1:] or None
--                            optionals_map = self._option_string_actions
--                            if option_string in optionals_map:
--                                action = optionals_map[option_string]
--                                break
--                        else:
--                            msg = _('ignored explicit argument %r')
--                            raise ArgumentError(action, msg % explicit_arg)
--
--                    # if the action expect exactly one argument, we've
--                    # successfully matched the option; exit the loop
--                    elif arg_count == 1:
--                        stop = start_index + 1
--                        args = [explicit_arg]
--                        action_tuples.append((action, args, option_string))
--                        break
--
--                    # error if a double-dash option did not use the
--                    # explicit argument
--                    else:
--                        msg = _('ignored explicit argument %r')
--                        raise ArgumentError(action, msg % explicit_arg)
--
--                # if there is no explicit argument, try to match the
--                # optional's string arguments with the following strings
--                # if successful, exit the loop
--                else:
--                    start = start_index + 1
--                    selected_patterns = arg_strings_pattern[start:]
--                    arg_count = match_argument(action, selected_patterns)
--                    stop = start + arg_count
--                    args = arg_strings[start:stop]
--                    action_tuples.append((action, args, option_string))
--                    break
--
--            # add the Optional to the list and return the index at which
--            # the Optional's string args stopped
--            assert action_tuples
--            for action, args, option_string in action_tuples:
--                take_action(action, args, option_string)
--            return stop
--
--        # the list of Positionals left to be parsed; this is modified
--        # by consume_positionals()
--        positionals = self._get_positional_actions()
--
--        # function to convert arg_strings into positional actions
--        def consume_positionals(start_index):
--            # match as many Positionals as possible
--            match_partial = self._match_arguments_partial
--            selected_pattern = arg_strings_pattern[start_index:]
--            arg_counts = match_partial(positionals, selected_pattern)
--
--            # slice off the appropriate arg strings for each Positional
--            # and add the Positional and its args to the list
--            for action, arg_count in zip(positionals, arg_counts):
--                args = arg_strings[start_index: start_index + arg_count]
--                start_index += arg_count
--                take_action(action, args)
--
--            # slice off the Positionals that we just parsed and return the
--            # index at which the Positionals' string args stopped
--            positionals[:] = positionals[len(arg_counts):]
--            return start_index
--
--        # consume Positionals and Optionals alternately, until we have
--        # passed the last option string
--        extras = []
--        start_index = 0
--        if option_string_indices:
--            max_option_string_index = max(option_string_indices)
--        else:
--            max_option_string_index = -1
--        while start_index <= max_option_string_index:
--
--            # consume any Positionals preceding the next option
--            next_option_string_index = min([
--                index
--                for index in option_string_indices
--                if index >= start_index])
--            if start_index != next_option_string_index:
--                positionals_end_index = consume_positionals(start_index)
--
--                # only try to parse the next optional if we didn't consume
--                # the option string during the positionals parsing
--                if positionals_end_index > start_index:
--                    start_index = positionals_end_index
--                    continue
--                else:
--                    start_index = positionals_end_index
--
--            # if we consumed all the positionals we could and we're not
--            # at the index of an option string, there were extra arguments
--            if start_index not in option_string_indices:
--                strings = arg_strings[start_index:next_option_string_index]
--                extras.extend(strings)
--                start_index = next_option_string_index
--
--            # consume the next optional and any arguments for it
--            start_index = consume_optional(start_index)
--
--        # consume any positionals following the last Optional
--        stop_index = consume_positionals(start_index)
--
--        # if we didn't consume all the argument strings, there were extras
--        extras.extend(arg_strings[stop_index:])
--
--        # if we didn't use all the Positional objects, there were too few
--        # arg strings supplied.
--        if positionals:
--            self.error(_('too few arguments'))
--
--        # make sure all required actions were present
--        for action in self._actions:
--            if action.required:
--                if action not in seen_actions:
--                    name = _get_action_name(action)
--                    self.error(_('argument %s is required') % name)
--
--        # make sure all required groups had one option present
--        for group in self._mutually_exclusive_groups:
--            if group.required:
--                for action in group._group_actions:
--                    if action in seen_non_default_actions:
--                        break
--
--                # if no actions were used, report the error
--                else:
--                    names = [_get_action_name(action)
--                             for action in group._group_actions
--                             if action.help is not SUPPRESS]
--                    msg = _('one of the arguments %s is required')
--                    self.error(msg % ' '.join(names))
--
--        # return the updated namespace and the extra arguments
--        return namespace, extras
--
--    def _read_args_from_files(self, arg_strings):
--        # expand arguments referencing files
--        new_arg_strings = []
--        for arg_string in arg_strings:
--
--            # for regular arguments, just add them back into the list
--            if arg_string[0] not in self.fromfile_prefix_chars:
--                new_arg_strings.append(arg_string)
--
--            # replace arguments referencing files with the file content
--            else:
--                try:
--                    args_file = open(arg_string[1:])
--                    try:
--                        arg_strings = args_file.read().splitlines()
--                        arg_strings = self._read_args_from_files(arg_strings)
--                        new_arg_strings.extend(arg_strings)
--                    finally:
--                        args_file.close()
--                except IOError:
--                    err = _sys.exc_info()[1]
--                    self.error(str(err))
--
--        # return the modified argument list
--        return new_arg_strings
--
--    def _match_argument(self, action, arg_strings_pattern):
--        # match the pattern for this action to the arg strings
--        nargs_pattern = self._get_nargs_pattern(action)
--        match = _re.match(nargs_pattern, arg_strings_pattern)
--
--        # raise an exception if we weren't able to find a match
--        if match is None:
--            nargs_errors = {
--                None: _('expected one argument'),
--                OPTIONAL: _('expected at most one argument'),
--                ONE_OR_MORE: _('expected at least one argument'),
--            }
--            default = _('expected %s argument(s)') % action.nargs
--            msg = nargs_errors.get(action.nargs, default)
--            raise ArgumentError(action, msg)
--
--        # return the number of arguments matched
--        return len(match.group(1))
--
--    def _match_arguments_partial(self, actions, arg_strings_pattern):
--        # progressively shorten the actions list by slicing off the
--        # final actions until we find a match
--        result = []
--        for i in range(len(actions), 0, -1):
--            actions_slice = actions[:i]
--            pattern = ''.join([self._get_nargs_pattern(action)
--                               for action in actions_slice])
--            match = _re.match(pattern, arg_strings_pattern)
--            if match is not None:
--                result.extend([len(string) for string in match.groups()])
--                break
--
--        # return the list of arg string counts
--        return result
--
--    def _parse_optional(self, arg_string):
--        # if it's an empty string, it was meant to be a positional
--        if not arg_string:
--            return None
--
--        # if it doesn't start with a prefix, it was meant to be positional
--        if not arg_string[0] in self.prefix_chars:
--            return None
--
--        # if it's just dashes, it was meant to be positional
--        if not arg_string.strip('-'):
--            return None
--
--        # if the option string is present in the parser, return the action
--        if arg_string in self._option_string_actions:
--            action = self._option_string_actions[arg_string]
--            return action, arg_string, None
--
--        # search through all possible prefixes of the option string
--        # and all actions in the parser for possible interpretations
--        option_tuples = self._get_option_tuples(arg_string)
--
--        # if multiple actions match, the option string was ambiguous
--        if len(option_tuples) > 1:
--            options = ', '.join([option_string
--                for action, option_string, explicit_arg in option_tuples])
--            tup = arg_string, options
--            self.error(_('ambiguous option: %s could match %s') % tup)
--
--        # if exactly one action matched, this segmentation is good,
--        # so return the parsed action
--        elif len(option_tuples) == 1:
--            option_tuple, = option_tuples
--            return option_tuple
--
--        # if it was not found as an option, but it looks like a negative
--        # number, it was meant to be positional
--        # unless there are negative-number-like options
--        if self._negative_number_matcher.match(arg_string):
--            if not self._has_negative_number_optionals:
--                return None
--
--        # if it contains a space, it was meant to be a positional
--        if ' ' in arg_string:
--            return None
--
--        # it was meant to be an optional but there is no such option
--        # in this parser (though it might be a valid option in a subparser)
--        return None, arg_string, None
--
--    def _get_option_tuples(self, option_string):
--        result = []
--
--        # option strings starting with two prefix characters are only
--        # split at the '='
--        chars = self.prefix_chars
--        if option_string[0] in chars and option_string[1] in chars:
--            if '=' in option_string:
--                option_prefix, explicit_arg = option_string.split('=', 1)
--            else:
--                option_prefix = option_string
--                explicit_arg = None
--            for option_string in self._option_string_actions:
--                if option_string.startswith(option_prefix):
--                    action = self._option_string_actions[option_string]
--                    tup = action, option_string, explicit_arg
--                    result.append(tup)
--
--        # single character options can be concatenated with their arguments
--        # but multiple character options always have to have their argument
--        # separate
--        elif option_string[0] in chars and option_string[1] not in chars:
--            option_prefix = option_string
--            explicit_arg = None
--            short_option_prefix = option_string[:2]
--            short_explicit_arg = option_string[2:]
--
--            for option_string in self._option_string_actions:
--                if option_string == short_option_prefix:
--                    action = self._option_string_actions[option_string]
--                    tup = action, option_string, short_explicit_arg
--                    result.append(tup)
--                elif option_string.startswith(option_prefix):
--                    action = self._option_string_actions[option_string]
--                    tup = action, option_string, explicit_arg
--                    result.append(tup)
--
--        # shouldn't ever get here
--        else:
--            self.error(_('unexpected option string: %s') % option_string)
--
--        # return the collected option tuples
--        return result
--
--    def _get_nargs_pattern(self, action):
--        # in all examples below, we have to allow for '--' args
--        # which are represented as '-' in the pattern
--        nargs = action.nargs
--
--        # the default (None) is assumed to be a single argument
--        if nargs is None:
--            nargs_pattern = '(-*A-*)'
--
--        # allow zero or one arguments
--        elif nargs == OPTIONAL:
--            nargs_pattern = '(-*A?-*)'
--
--        # allow zero or more arguments
--        elif nargs == ZERO_OR_MORE:
--            nargs_pattern = '(-*[A-]*)'
--
--        # allow one or more arguments
--        elif nargs == ONE_OR_MORE:
--            nargs_pattern = '(-*A[A-]*)'
--
--        # allow one argument followed by any number of options or arguments
--        elif nargs is PARSER:
--            nargs_pattern = '(-*A[-AO]*)'
--
--        # all others should be integers
--        else:
--            nargs_pattern = '(-*%s-*)' % '-*'.join('A' * nargs)
--
--        # if this is an optional action, -- is not allowed
--        if action.option_strings:
--            nargs_pattern = nargs_pattern.replace('-*', '')
--            nargs_pattern = nargs_pattern.replace('-', '')
--
--        # return the pattern
--        return nargs_pattern
--
--    # ========================
--    # Value conversion methods
--    # ========================
--    def _get_values(self, action, arg_strings):
--        # for everything but PARSER args, strip out '--'
--        if action.nargs is not PARSER:
--            arg_strings = [s for s in arg_strings if s != '--']
--
--        # optional argument produces a default when not present
--        if not arg_strings and action.nargs == OPTIONAL:
--            if action.option_strings:
--                value = action.const
--            else:
--                value = action.default
--            if isinstance(value, _basestring):
--                value = self._get_value(action, value)
--                self._check_value(action, value)
--
--        # when nargs='*' on a positional, if there were no command-line
--        # args, use the default if it is anything other than None
--        elif (not arg_strings and action.nargs == ZERO_OR_MORE and
--              not action.option_strings):
--            if action.default is not None:
--                value = action.default
--            else:
--                value = arg_strings
--            self._check_value(action, value)
--
--        # single argument or optional argument produces a single value
--        elif len(arg_strings) == 1 and action.nargs in [None, OPTIONAL]:
--            arg_string, = arg_strings
--            value = self._get_value(action, arg_string)
--            self._check_value(action, value)
--
--        # PARSER arguments convert all values, but check only the first
--        elif action.nargs is PARSER:
--            value = [self._get_value(action, v) for v in arg_strings]
--            self._check_value(action, value[0])
--
--        # all other types of nargs produce a list
--        else:
--            value = [self._get_value(action, v) for v in arg_strings]
--            for v in value:
--                self._check_value(action, v)
--
--        # return the converted value
--        return value
--
--    def _get_value(self, action, arg_string):
--        type_func = self._registry_get('type', action.type, action.type)
--        if not hasattr(type_func, '__call__'):
--            msg = _('%r is not callable')
--            raise ArgumentError(action, msg % type_func)
--
--        # convert the value to the appropriate type
--        try:
--            result = type_func(arg_string)
--
--        # TypeErrors or ValueErrors indicate errors
--        except (TypeError, ValueError):
--            name = getattr(action.type, '__name__', repr(action.type))
--            msg = _('invalid %s value: %r')
--            raise ArgumentError(action, msg % (name, arg_string))
--
--        # return the converted value
--        return result
--
--    def _check_value(self, action, value):
--        # converted value must be one of the choices (if specified)
--        if action.choices is not None and value not in action.choices:
--            tup = value, ', '.join(map(repr, action.choices))
--            msg = _('invalid choice: %r (choose from %s)') % tup
--            raise ArgumentError(action, msg)
--
--    # =======================
--    # Help-formatting methods
--    # =======================
--    def format_usage(self):
--        formatter = self._get_formatter()
--        formatter.add_usage(self.usage, self._actions,
--                            self._mutually_exclusive_groups)
--        return formatter.format_help()
--
--    def format_help(self):
--        formatter = self._get_formatter()
--
--        # usage
--        formatter.add_usage(self.usage, self._actions,
--                            self._mutually_exclusive_groups)
--
--        # description
--        formatter.add_text(self.description)
--
--        # positionals, optionals and user-defined groups
--        for action_group in self._action_groups:
--            formatter.start_section(action_group.title)
--            formatter.add_text(action_group.description)
--            formatter.add_arguments(action_group._group_actions)
--            formatter.end_section()
--
--        # epilog
--        formatter.add_text(self.epilog)
--
--        # determine help from format above
--        return formatter.format_help()
--
--    def format_version(self):
--        formatter = self._get_formatter()
--        formatter.add_text(self.version)
--        return formatter.format_help()
--
--    def _get_formatter(self):
--        return self.formatter_class(prog=self.prog)
--
--    # =====================
--    # Help-printing methods
--    # =====================
--    def print_usage(self, file=None):
--        self._print_message(self.format_usage(), file)
--
--    def print_help(self, file=None):
--        self._print_message(self.format_help(), file)
--
--    def print_version(self, file=None):
--        self._print_message(self.format_version(), file)
--
--    def _print_message(self, message, file=None):
--        if message:
--            if file is None:
--                file = _sys.stderr
--            file.write(message)
--
--    # ===============
--    # Exiting methods
--    # ===============
--    def exit(self, status=0, message=None):
--        if message:
--            _sys.stderr.write(message)
--        _sys.exit(status)
--
--    def error(self, message):
--        """error(message: string)
--
--        Prints a usage message incorporating the message to stderr and
--        exits.
--
--        If you override this in a subclass, it should not return -- it
--        should either exit or raise an exception.
--        """
--        self.print_usage(_sys.stderr)
--        self.exit(2, _('%s: error: %s\n') % (self.prog, message))
-Index: ipython-0.10/IPython/external/configobj/_configobj.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/configobj/_configobj.py
-@@ -0,0 +1,2501 @@
-+# configobj.py
-+# A config file reader/writer that supports nested sections in config files.
-+# Copyright (C) 2005-2008 Michael Foord, Nicola Larosa
-+# E-mail: fuzzyman AT voidspace DOT org DOT uk
-+#         nico AT tekNico DOT net
-+
-+# ConfigObj 4
-+# http://www.voidspace.org.uk/python/configobj.html
-+
-+# Released subject to the BSD License
-+# Please see http://www.voidspace.org.uk/python/license.shtml
-+
-+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
-+# For information about bugfixes, updates and support, please join the
-+# ConfigObj mailing list:
-+# http://lists.sourceforge.net/lists/listinfo/configobj-develop
-+# Comments, suggestions and bug reports welcome.
-+
-+from __future__ import generators
-+
-+import sys
-+INTP_VER = sys.version_info[:2]
-+if INTP_VER < (2, 2):
-+    raise RuntimeError("Python v.2.2 or later needed")
-+
-+import os, re
-+compiler = None
-+try:
-+    import compiler
-+except ImportError:
-+    # for IronPython
-+    pass
-+from types import StringTypes
-+from warnings import warn
-+try:
-+    from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
-+except ImportError:
-+    # Python 2.2 does not have these
-+    # UTF-8
-+    BOM_UTF8 = '\xef\xbb\xbf'
-+    # UTF-16, little endian
-+    BOM_UTF16_LE = '\xff\xfe'
-+    # UTF-16, big endian
-+    BOM_UTF16_BE = '\xfe\xff'
-+    if sys.byteorder == 'little':
-+        # UTF-16, native endianness
-+        BOM_UTF16 = BOM_UTF16_LE
-+    else:
-+        # UTF-16, native endianness
-+        BOM_UTF16 = BOM_UTF16_BE
-+
-+# A dictionary mapping BOM to
-+# the encoding to decode with, and what to set the
-+# encoding attribute to.
-+BOMS = {
-+    BOM_UTF8: ('utf_8', None),
-+    BOM_UTF16_BE: ('utf16_be', 'utf_16'),
-+    BOM_UTF16_LE: ('utf16_le', 'utf_16'),
-+    BOM_UTF16: ('utf_16', 'utf_16'),
-+    }
-+# All legal variants of the BOM codecs.
-+# TODO: the list of aliases is not meant to be exhaustive, is there a
-+#   better way ?
-+BOM_LIST = {
-+    'utf_16': 'utf_16',
-+    'u16': 'utf_16',
-+    'utf16': 'utf_16',
-+    'utf-16': 'utf_16',
-+    'utf16_be': 'utf16_be',
-+    'utf_16_be': 'utf16_be',
-+    'utf-16be': 'utf16_be',
-+    'utf16_le': 'utf16_le',
-+    'utf_16_le': 'utf16_le',
-+    'utf-16le': 'utf16_le',
-+    'utf_8': 'utf_8',
-+    'u8': 'utf_8',
-+    'utf': 'utf_8',
-+    'utf8': 'utf_8',
-+    'utf-8': 'utf_8',
-+    }
-+
-+# Map of encodings to the BOM to write.
-+BOM_SET = {
-+    'utf_8': BOM_UTF8,
-+    'utf_16': BOM_UTF16,
-+    'utf16_be': BOM_UTF16_BE,
-+    'utf16_le': BOM_UTF16_LE,
-+    None: BOM_UTF8
-+    }
-+
-+
-+def match_utf8(encoding):
-+    return BOM_LIST.get(encoding.lower()) == 'utf_8'
-+
-+
-+# Quote strings used for writing values
-+squot = "'%s'"
-+dquot = '"%s"'
-+noquot = "%s"
-+wspace_plus = ' \r\t\n\v\t\'"'
-+tsquot = '"""%s"""'
-+tdquot = "'''%s'''"
-+
-+try:
-+    enumerate
-+except NameError:
-+    def enumerate(obj):
-+        """enumerate for Python 2.2."""
-+        i = -1
-+        for item in obj:
-+            i += 1
-+            yield i, item
-+
-+try:
-+    True, False
-+except NameError:
-+    True, False = 1, 0
-+
-+
-+__version__ = '4.5.2'
-+
-+__revision__ = '$Id: configobj.py 156 2006-01-31 14:57:08Z fuzzyman $'
-+
-+__docformat__ = "restructuredtext en"
-+
-+__all__ = (
-+    '__version__',
-+    'DEFAULT_INDENT_TYPE',
-+    'DEFAULT_INTERPOLATION',
-+    'ConfigObjError',
-+    'NestingError',
-+    'ParseError',
-+    'DuplicateError',
-+    'ConfigspecError',
-+    'ConfigObj',
-+    'SimpleVal',
-+    'InterpolationError',
-+    'InterpolationLoopError',
-+    'MissingInterpolationOption',
-+    'RepeatSectionError',
-+    'ReloadError',
-+    'UnreprError',
-+    'UnknownType',
-+    '__docformat__',
-+    'flatten_errors',
-+)
-+
-+DEFAULT_INTERPOLATION = 'configparser'
-+DEFAULT_INDENT_TYPE = '    '
-+MAX_INTERPOL_DEPTH = 10
-+
-+OPTION_DEFAULTS = {
-+    'interpolation': True,
-+    'raise_errors': False,
-+    'list_values': True,
-+    'create_empty': False,
-+    'file_error': False,
-+    'configspec': None,
-+    'stringify': True,
-+    # option may be set to one of ('', ' ', '\t')
-+    'indent_type': None,
-+    'encoding': None,
-+    'default_encoding': None,
-+    'unrepr': False,
-+    'write_empty_values': False,
-+}
-+
-+
-+
-+def getObj(s):
-+    s = "a=" + s
-+    if compiler is None:
-+        raise ImportError('compiler module not available')
-+    p = compiler.parse(s)
-+    return p.getChildren()[1].getChildren()[0].getChildren()[1]
-+
-+
-+class UnknownType(Exception):
-+    pass
-+
-+
-+class Builder(object):
-+    
-+    def build(self, o):
-+        m = getattr(self, 'build_' + o.__class__.__name__, None)
-+        if m is None:
-+            raise UnknownType(o.__class__.__name__)
-+        return m(o)
-+    
-+    def build_List(self, o):
-+        return map(self.build, o.getChildren())
-+    
-+    def build_Const(self, o):
-+        return o.value
-+    
-+    def build_Dict(self, o):
-+        d = {}
-+        i = iter(map(self.build, o.getChildren()))
-+        for el in i:
-+            d[el] = i.next()
-+        return d
-+    
-+    def build_Tuple(self, o):
-+        return tuple(self.build_List(o))
-+    
-+    def build_Name(self, o):
-+        if o.name == 'None':
-+            return None
-+        if o.name == 'True':
-+            return True
-+        if o.name == 'False':
-+            return False
-+        
-+        # An undefined Name
-+        raise UnknownType('Undefined Name')
-+    
-+    def build_Add(self, o):
-+        real, imag = map(self.build_Const, o.getChildren())
-+        try:
-+            real = float(real)
-+        except TypeError:
-+            raise UnknownType('Add')
-+        if not isinstance(imag, complex) or imag.real != 0.0:
-+            raise UnknownType('Add')
-+        return real+imag
-+    
-+    def build_Getattr(self, o):
-+        parent = self.build(o.expr)
-+        return getattr(parent, o.attrname)
-+    
-+    def build_UnarySub(self, o):
-+        return -self.build_Const(o.getChildren()[0])
-+    
-+    def build_UnaryAdd(self, o):
-+        return self.build_Const(o.getChildren()[0])
-+
-+
-+_builder = Builder()
-+
-+
-+def unrepr(s):
-+    if not s:
-+        return s
-+    return _builder.build(getObj(s))
-+
-+
-+
-+class ConfigObjError(SyntaxError):
-+    """
-+    This is the base class for all errors that ConfigObj raises.
-+    It is a subclass of SyntaxError.
-+    """
-+    def __init__(self, message='', line_number=None, line=''):
-+        self.line = line
-+        self.line_number = line_number
-+        self.message = message
-+        SyntaxError.__init__(self, message)
-+
-+
-+class NestingError(ConfigObjError):
-+    """
-+    This error indicates a level of nesting that doesn't match.
-+    """
-+
-+
-+class ParseError(ConfigObjError):
-+    """
-+    This error indicates that a line is badly written.
-+    It is neither a valid ``key = value`` line,
-+    nor a valid section marker line.
-+    """
-+
-+
-+class ReloadError(IOError):
-+    """
-+    A 'reload' operation failed.
-+    This exception is a subclass of ``IOError``.
-+    """
-+    def __init__(self):
-+        IOError.__init__(self, 'reload failed, filename is not set.')
-+
-+
-+class DuplicateError(ConfigObjError):
-+    """
-+    The keyword or section specified already exists.
-+    """
-+
-+
-+class ConfigspecError(ConfigObjError):
-+    """
-+    An error occured whilst parsing a configspec.
-+    """
-+
-+
-+class InterpolationError(ConfigObjError):
-+    """Base class for the two interpolation errors."""
-+
-+
-+class InterpolationLoopError(InterpolationError):
-+    """Maximum interpolation depth exceeded in string interpolation."""
-+
-+    def __init__(self, option):
-+        InterpolationError.__init__(
-+            self,
-+            'interpolation loop detected in value "%s".' % option)
-+
-+
-+class RepeatSectionError(ConfigObjError):
-+    """
-+    This error indicates additional sections in a section with a
-+    ``__many__`` (repeated) section.
-+    """
-+
-+
-+class MissingInterpolationOption(InterpolationError):
-+    """A value specified for interpolation was missing."""
-+
-+    def __init__(self, option):
-+        InterpolationError.__init__(
-+            self,
-+            'missing option "%s" in interpolation.' % option)
-+
-+
-+class UnreprError(ConfigObjError):
-+    """An error parsing in unrepr mode."""
-+
-+
-+
-+class InterpolationEngine(object):
-+    """
-+    A helper class to help perform string interpolation.
-+
-+    This class is an abstract base class; its descendants perform
-+    the actual work.
-+    """
-+
-+    # compiled regexp to use in self.interpolate()
-+    _KEYCRE = re.compile(r"%\(([^)]*)\)s")
-+
-+    def __init__(self, section):
-+        # the Section instance that "owns" this engine
-+        self.section = section
-+
-+
-+    def interpolate(self, key, value):
-+        def recursive_interpolate(key, value, section, backtrail):
-+            """The function that does the actual work.
-+
-+            ``value``: the string we're trying to interpolate.
-+            ``section``: the section in which that string was found
-+            ``backtrail``: a dict to keep track of where we've been,
-+            to detect and prevent infinite recursion loops
-+
-+            This is similar to a depth-first-search algorithm.
-+            """
-+            # Have we been here already?
-+            if backtrail.has_key((key, section.name)):
-+                # Yes - infinite loop detected
-+                raise InterpolationLoopError(key)
-+            # Place a marker on our backtrail so we won't come back here again
-+            backtrail[(key, section.name)] = 1
-+
-+            # Now start the actual work
-+            match = self._KEYCRE.search(value)
-+            while match:
-+                # The actual parsing of the match is implementation-dependent,
-+                # so delegate to our helper function
-+                k, v, s = self._parse_match(match)
-+                if k is None:
-+                    # That's the signal that no further interpolation is needed
-+                    replacement = v
-+                else:
-+                    # Further interpolation may be needed to obtain final value
-+                    replacement = recursive_interpolate(k, v, s, backtrail)
-+                # Replace the matched string with its final value
-+                start, end = match.span()
-+                value = ''.join((value[:start], replacement, value[end:]))
-+                new_search_start = start + len(replacement)
-+                # Pick up the next interpolation key, if any, for next time
-+                # through the while loop
-+                match = self._KEYCRE.search(value, new_search_start)
-+
-+            # Now safe to come back here again; remove marker from backtrail
-+            del backtrail[(key, section.name)]
-+
-+            return value
-+
-+        # Back in interpolate(), all we have to do is kick off the recursive
-+        # function with appropriate starting values
-+        value = recursive_interpolate(key, value, self.section, {})
-+        return value
-+
-+
-+    def _fetch(self, key):
-+        """Helper function to fetch values from owning section.
-+
-+        Returns a 2-tuple: the value, and the section where it was found.
-+        """
-+        # switch off interpolation before we try and fetch anything !
-+        save_interp = self.section.main.interpolation
-+        self.section.main.interpolation = False
-+
-+        # Start at section that "owns" this InterpolationEngine
-+        current_section = self.section
-+        while True:
-+            # try the current section first
-+            val = current_section.get(key)
-+            if val is not None:
-+                break
-+            # try "DEFAULT" next
-+            val = current_section.get('DEFAULT', {}).get(key)
-+            if val is not None:
-+                break
-+            # move up to parent and try again
-+            # top-level's parent is itself
-+            if current_section.parent is current_section:
-+                # reached top level, time to give up
-+                break
-+            current_section = current_section.parent
-+
-+        # restore interpolation to previous value before returning
-+        self.section.main.interpolation = save_interp
-+        if val is None:
-+            raise MissingInterpolationOption(key)
-+        return val, current_section
-+
-+
-+    def _parse_match(self, match):
-+        """Implementation-dependent helper function.
-+
-+        Will be passed a match object corresponding to the interpolation
-+        key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
-+        key in the appropriate config file section (using the ``_fetch()``
-+        helper function) and return a 3-tuple: (key, value, section)
-+
-+        ``key`` is the name of the key we're looking for
-+        ``value`` is the value found for that key
-+        ``section`` is a reference to the section where it was found
-+
-+        ``key`` and ``section`` should be None if no further
-+        interpolation should be performed on the resulting value
-+        (e.g., if we interpolated "$$" and returned "$").
-+        """
-+        raise NotImplementedError()
-+    
-+
-+
-+class ConfigParserInterpolation(InterpolationEngine):
-+    """Behaves like ConfigParser."""
-+    _KEYCRE = re.compile(r"%\(([^)]*)\)s")
-+
-+    def _parse_match(self, match):
-+        key = match.group(1)
-+        value, section = self._fetch(key)
-+        return key, value, section
-+
-+
-+
-+class TemplateInterpolation(InterpolationEngine):
-+    """Behaves like string.Template."""
-+    _delimiter = '$'
-+    _KEYCRE = re.compile(r"""
-+        \$(?:
-+          (?P<escaped>\$)              |   # Two $ signs
-+          (?P<named>[_a-z][_a-z0-9]*)  |   # $name format
-+          {(?P<braced>[^}]*)}              # ${name} format
-+        )
-+        """, re.IGNORECASE | re.VERBOSE)
-+
-+    def _parse_match(self, match):
-+        # Valid name (in or out of braces): fetch value from section
-+        key = match.group('named') or match.group('braced')
-+        if key is not None:
-+            value, section = self._fetch(key)
-+            return key, value, section
-+        # Escaped delimiter (e.g., $$): return single delimiter
-+        if match.group('escaped') is not None:
-+            # Return None for key and section to indicate it's time to stop
-+            return None, self._delimiter, None
-+        # Anything else: ignore completely, just return it unchanged
-+        return None, match.group(), None
-+
-+
-+interpolation_engines = {
-+    'configparser': ConfigParserInterpolation,
-+    'template': TemplateInterpolation,
-+}
-+
-+
-+
-+class Section(dict):
-+    """
-+    A dictionary-like object that represents a section in a config file.
-+    
-+    It does string interpolation if the 'interpolation' attribute
-+    of the 'main' object is set to True.
-+    
-+    Interpolation is tried first from this object, then from the 'DEFAULT'
-+    section of this object, next from the parent and its 'DEFAULT' section,
-+    and so on until the main object is reached.
-+    
-+    A Section will behave like an ordered dictionary - following the
-+    order of the ``scalars`` and ``sections`` attributes.
-+    You can use this to change the order of members.
-+    
-+    Iteration follows the order: scalars, then sections.
-+    """
-+
-+    def __init__(self, parent, depth, main, indict=None, name=None):
-+        """
-+        * parent is the section above
-+        * depth is the depth level of this section
-+        * main is the main ConfigObj
-+        * indict is a dictionary to initialise the section with
-+        """
-+        if indict is None:
-+            indict = {}
-+        dict.__init__(self)
-+        # used for nesting level *and* interpolation
-+        self.parent = parent
-+        # used for the interpolation attribute
-+        self.main = main
-+        # level of nesting depth of this Section
-+        self.depth = depth
-+        # purely for information
-+        self.name = name
-+        #
-+        self._initialise()
-+        # we do this explicitly so that __setitem__ is used properly
-+        # (rather than just passing to ``dict.__init__``)
-+        for entry, value in indict.iteritems():
-+            self[entry] = value
-+            
-+            
-+    def _initialise(self):
-+        # the sequence of scalar values in this Section
-+        self.scalars = []
-+        # the sequence of sections in this Section
-+        self.sections = []
-+        # for comments :-)
-+        self.comments = {}
-+        self.inline_comments = {}
-+        # for the configspec
-+        self.configspec = {}
-+        self._order = []
-+        self._configspec_comments = {}
-+        self._configspec_inline_comments = {}
-+        self._cs_section_comments = {}
-+        self._cs_section_inline_comments = {}
-+        # for defaults
-+        self.defaults = []
-+        self.default_values = {}
-+
-+
-+    def _interpolate(self, key, value):
-+        try:
-+            # do we already have an interpolation engine?
-+            engine = self._interpolation_engine
-+        except AttributeError:
-+            # not yet: first time running _interpolate(), so pick the engine
-+            name = self.main.interpolation
-+            if name == True:  # note that "if name:" would be incorrect here
-+                # backwards-compatibility: interpolation=True means use default
-+                name = DEFAULT_INTERPOLATION
-+            name = name.lower()  # so that "Template", "template", etc. all work
-+            class_ = interpolation_engines.get(name, None)
-+            if class_ is None:
-+                # invalid value for self.main.interpolation
-+                self.main.interpolation = False
-+                return value
-+            else:
-+                # save reference to engine so we don't have to do this again
-+                engine = self._interpolation_engine = class_(self)
-+        # let the engine do the actual work
-+        return engine.interpolate(key, value)
-+
-+
-+    def __getitem__(self, key):
-+        """Fetch the item and do string interpolation."""
-+        val = dict.__getitem__(self, key)
-+        if self.main.interpolation and isinstance(val, StringTypes):
-+            return self._interpolate(key, val)
-+        return val
-+
-+
-+    def __setitem__(self, key, value, unrepr=False):
-+        """
-+        Correctly set a value.
-+        
-+        Making dictionary values Section instances.
-+        (We have to special case 'Section' instances - which are also dicts)
-+        
-+        Keys must be strings.
-+        Values need only be strings (or lists of strings) if
-+        ``main.stringify`` is set.
-+        
-+        `unrepr`` must be set when setting a value to a dictionary, without
-+        creating a new sub-section.
-+        """
-+        if not isinstance(key, StringTypes):
-+            raise ValueError('The key "%s" is not a string.' % key)
-+        
-+        # add the comment
-+        if not self.comments.has_key(key):
-+            self.comments[key] = []
-+            self.inline_comments[key] = ''
-+        # remove the entry from defaults
-+        if key in self.defaults:
-+            self.defaults.remove(key)
-+        #
-+        if isinstance(value, Section):
-+            if not self.has_key(key):
-+                self.sections.append(key)
-+            dict.__setitem__(self, key, value)
-+        elif isinstance(value, dict) and not unrepr:
-+            # First create the new depth level,
-+            # then create the section
-+            if not self.has_key(key):
-+                self.sections.append(key)
-+            new_depth = self.depth + 1
-+            dict.__setitem__(
-+                self,
-+                key,
-+                Section(
-+                    self,
-+                    new_depth,
-+                    self.main,
-+                    indict=value,
-+                    name=key))
-+        else:
-+            if not self.has_key(key):
-+                self.scalars.append(key)
-+            if not self.main.stringify:
-+                if isinstance(value, StringTypes):
-+                    pass
-+                elif isinstance(value, (list, tuple)):
-+                    for entry in value:
-+                        if not isinstance(entry, StringTypes):
-+                            raise TypeError('Value is not a string "%s".' % entry)
-+                else:
-+                    raise TypeError('Value is not a string "%s".' % value)
-+            dict.__setitem__(self, key, value)
-+
-+
-+    def __delitem__(self, key):
-+        """Remove items from the sequence when deleting."""
-+        dict. __delitem__(self, key)
-+        if key in self.scalars:
-+            self.scalars.remove(key)
-+        else:
-+            self.sections.remove(key)
-+        del self.comments[key]
-+        del self.inline_comments[key]
-+
-+
-+    def get(self, key, default=None):
-+        """A version of ``get`` that doesn't bypass string interpolation."""
-+        try:
-+            return self[key]
-+        except KeyError:
-+            return default
-+
-+
-+    def update(self, indict):
-+        """
-+        A version of update that uses our ``__setitem__``.
-+        """
-+        for entry in indict:
-+            self[entry] = indict[entry]
-+
-+
-+    def pop(self, key, *args):
-+        """
-+        'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
-+        If key is not found, d is returned if given, otherwise KeyError is raised'
-+        """
-+        val = dict.pop(self, key, *args)
-+        if key in self.scalars:
-+            del self.comments[key]
-+            del self.inline_comments[key]
-+            self.scalars.remove(key)
-+        elif key in self.sections:
-+            del self.comments[key]
-+            del self.inline_comments[key]
-+            self.sections.remove(key)
-+        if self.main.interpolation and isinstance(val, StringTypes):
-+            return self._interpolate(key, val)
-+        return val
-+
-+
-+    def popitem(self):
-+        """Pops the first (key,val)"""
-+        sequence = (self.scalars + self.sections)
-+        if not sequence:
-+            raise KeyError(": 'popitem(): dictionary is empty'")
-+        key = sequence[0]
-+        val =  self[key]
-+        del self[key]
-+        return key, val
-+
-+
-+    def clear(self):
-+        """
-+        A version of clear that also affects scalars/sections
-+        Also clears comments and configspec.
-+        
-+        Leaves other attributes alone :
-+            depth/main/parent are not affected
-+        """
-+        dict.clear(self)
-+        self.scalars = []
-+        self.sections = []
-+        self.comments = {}
-+        self.inline_comments = {}
-+        self.configspec = {}
-+
-+
-+    def setdefault(self, key, default=None):
-+        """A version of setdefault that sets sequence if appropriate."""
-+        try:
-+            return self[key]
-+        except KeyError:
-+            self[key] = default
-+            return self[key]
-+
-+
-+    def items(self):
-+        """D.items() -> list of D's (key, value) pairs, as 2-tuples"""
-+        return zip((self.scalars + self.sections), self.values())
-+
-+
-+    def keys(self):
-+        """D.keys() -> list of D's keys"""
-+        return (self.scalars + self.sections)
-+
-+
-+    def values(self):
-+        """D.values() -> list of D's values"""
-+        return [self[key] for key in (self.scalars + self.sections)]
-+
-+
-+    def iteritems(self):
-+        """D.iteritems() -> an iterator over the (key, value) items of D"""
-+        return iter(self.items())
-+
-+
-+    def iterkeys(self):
-+        """D.iterkeys() -> an iterator over the keys of D"""
-+        return iter((self.scalars + self.sections))
-+
-+    __iter__ = iterkeys
-+
-+
-+    def itervalues(self):
-+        """D.itervalues() -> an iterator over the values of D"""
-+        return iter(self.values())
-+
-+
-+    def __repr__(self):
-+        """x.__repr__() <==> repr(x)"""
-+        return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key])))
-+            for key in (self.scalars + self.sections)])
-+
-+    __str__ = __repr__
-+    __str__.__doc__ = "x.__str__() <==> str(x)"
-+
-+
-+    # Extra methods - not in a normal dictionary
-+
-+    def dict(self):
-+        """
-+        Return a deepcopy of self as a dictionary.
-+        
-+        All members that are ``Section`` instances are recursively turned to
-+        ordinary dictionaries - by calling their ``dict`` method.
-+        
-+        >>> n = a.dict()
-+        >>> n == a
-+        1
-+        >>> n is a
-+        0
-+        """
-+        newdict = {}
-+        for entry in self:
-+            this_entry = self[entry]
-+            if isinstance(this_entry, Section):
-+                this_entry = this_entry.dict()
-+            elif isinstance(this_entry, list):
-+                # create a copy rather than a reference
-+                this_entry = list(this_entry)
-+            elif isinstance(this_entry, tuple):
-+                # create a copy rather than a reference
-+                this_entry = tuple(this_entry)
-+            newdict[entry] = this_entry
-+        return newdict
-+
-+
-+    def merge(self, indict):
-+        """
-+        A recursive update - useful for merging config files.
-+        
-+        >>> a = '''[section1]
-+        ...     option1 = True
-+        ...     [[subsection]]
-+        ...     more_options = False
-+        ...     # end of file'''.splitlines()
-+        >>> b = '''# File is user.ini
-+        ...     [section1]
-+        ...     option1 = False
-+        ...     # end of file'''.splitlines()
-+        >>> c1 = ConfigObj(b)
-+        >>> c2 = ConfigObj(a)
-+        >>> c2.merge(c1)
-+        >>> c2
-+        {'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
-+        """
-+        for key, val in indict.items():
-+            if (key in self and isinstance(self[key], dict) and
-+                                isinstance(val, dict)):
-+                self[key].merge(val)
-+            else:   
-+                self[key] = val
-+
-+
-+    def rename(self, oldkey, newkey):
-+        """
-+        Change a keyname to another, without changing position in sequence.
-+        
-+        Implemented so that transformations can be made on keys,
-+        as well as on values. (used by encode and decode)
-+        
-+        Also renames comments.
-+        """
-+        if oldkey in self.scalars:
-+            the_list = self.scalars
-+        elif oldkey in self.sections:
-+            the_list = self.sections
-+        else:
-+            raise KeyError('Key "%s" not found.' % oldkey)
-+        pos = the_list.index(oldkey)
-+        #
-+        val = self[oldkey]
-+        dict.__delitem__(self, oldkey)
-+        dict.__setitem__(self, newkey, val)
-+        the_list.remove(oldkey)
-+        the_list.insert(pos, newkey)
-+        comm = self.comments[oldkey]
-+        inline_comment = self.inline_comments[oldkey]
-+        del self.comments[oldkey]
-+        del self.inline_comments[oldkey]
-+        self.comments[newkey] = comm
-+        self.inline_comments[newkey] = inline_comment
-+
-+
-+    def walk(self, function, raise_errors=True,
-+            call_on_sections=False, **keywargs):
-+        """
-+        Walk every member and call a function on the keyword and value.
-+        
-+        Return a dictionary of the return values
-+        
-+        If the function raises an exception, raise the errror
-+        unless ``raise_errors=False``, in which case set the return value to
-+        ``False``.
-+        
-+        Any unrecognised keyword arguments you pass to walk, will be pased on
-+        to the function you pass in.
-+        
-+        Note: if ``call_on_sections`` is ``True`` then - on encountering a
-+        subsection, *first* the function is called for the *whole* subsection,
-+        and then recurses into it's members. This means your function must be
-+        able to handle strings, dictionaries and lists. This allows you
-+        to change the key of subsections as well as for ordinary members. The
-+        return value when called on the whole subsection has to be discarded.
-+        
-+        See  the encode and decode methods for examples, including functions.
-+        
-+        .. caution::
-+        
-+            You can use ``walk`` to transform the names of members of a section
-+            but you mustn't add or delete members.
-+        
-+        >>> config = '''[XXXXsection]
-+        ... XXXXkey = XXXXvalue'''.splitlines()
-+        >>> cfg = ConfigObj(config)
-+        >>> cfg
-+        {'XXXXsection': {'XXXXkey': 'XXXXvalue'}}
-+        >>> def transform(section, key):
-+        ...     val = section[key]
-+        ...     newkey = key.replace('XXXX', 'CLIENT1')
-+        ...     section.rename(key, newkey)
-+        ...     if isinstance(val, (tuple, list, dict)):
-+        ...         pass
-+        ...     else:
-+        ...         val = val.replace('XXXX', 'CLIENT1')
-+        ...         section[newkey] = val
-+        >>> cfg.walk(transform, call_on_sections=True)
-+        {'CLIENT1section': {'CLIENT1key': None}}
-+        >>> cfg
-+        {'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}
-+        """
-+        out = {}
-+        # scalars first
-+        for i in range(len(self.scalars)):
-+            entry = self.scalars[i]
-+            try:
-+                val = function(self, entry, **keywargs)
-+                # bound again in case name has changed
-+                entry = self.scalars[i]
-+                out[entry] = val
-+            except Exception:
-+                if raise_errors:
-+                    raise
-+                else:
-+                    entry = self.scalars[i]
-+                    out[entry] = False
-+        # then sections
-+        for i in range(len(self.sections)):
-+            entry = self.sections[i]
-+            if call_on_sections:
-+                try:
-+                    function(self, entry, **keywargs)
-+                except Exception:
-+                    if raise_errors:
-+                        raise
-+                    else:
-+                        entry = self.sections[i]
-+                        out[entry] = False
-+                # bound again in case name has changed
-+                entry = self.sections[i]
-+            # previous result is discarded
-+            out[entry] = self[entry].walk(
-+                function,
-+                raise_errors=raise_errors,
-+                call_on_sections=call_on_sections,
-+                **keywargs)
-+        return out
-+
-+
-+    def decode(self, encoding):
-+        """
-+        Decode all strings and values to unicode, using the specified encoding.
-+        
-+        Works with subsections and list values.
-+        
-+        Uses the ``walk`` method.
-+        
-+        Testing ``encode`` and ``decode``.
-+        >>> m = ConfigObj(a)
-+        >>> m.decode('ascii')
-+        >>> def testuni(val):
-+        ...     for entry in val:
-+        ...         if not isinstance(entry, unicode):
-+        ...             print >> sys.stderr, type(entry)
-+        ...             raise AssertionError, 'decode failed.'
-+        ...         if isinstance(val[entry], dict):
-+        ...             testuni(val[entry])
-+        ...         elif not isinstance(val[entry], unicode):
-+        ...             raise AssertionError, 'decode failed.'
-+        >>> testuni(m)
-+        >>> m.encode('ascii')
-+        >>> a == m
-+        1
-+        """
-+        warn('use of ``decode`` is deprecated.', DeprecationWarning)
-+        def decode(section, key, encoding=encoding, warn=True):
-+            """ """
-+            val = section[key]
-+            if isinstance(val, (list, tuple)):
-+                newval = []
-+                for entry in val:
-+                    newval.append(entry.decode(encoding))
-+            elif isinstance(val, dict):
-+                newval = val
-+            else:
-+                newval = val.decode(encoding)
-+            newkey = key.decode(encoding)
-+            section.rename(key, newkey)
-+            section[newkey] = newval
-+        # using ``call_on_sections`` allows us to modify section names
-+        self.walk(decode, call_on_sections=True)
-+
-+
-+    def encode(self, encoding):
-+        """
-+        Encode all strings and values from unicode,
-+        using the specified encoding.
-+        
-+        Works with subsections and list values.
-+        Uses the ``walk`` method.
-+        """
-+        warn('use of ``encode`` is deprecated.', DeprecationWarning)
-+        def encode(section, key, encoding=encoding):
-+            """ """
-+            val = section[key]
-+            if isinstance(val, (list, tuple)):
-+                newval = []
-+                for entry in val:
-+                    newval.append(entry.encode(encoding))
-+            elif isinstance(val, dict):
-+                newval = val
-+            else:
-+                newval = val.encode(encoding)
-+            newkey = key.encode(encoding)
-+            section.rename(key, newkey)
-+            section[newkey] = newval
-+        self.walk(encode, call_on_sections=True)
-+
-+
-+    def istrue(self, key):
-+        """A deprecated version of ``as_bool``."""
-+        warn('use of ``istrue`` is deprecated. Use ``as_bool`` method '
-+                'instead.', DeprecationWarning)
-+        return self.as_bool(key)
-+
-+
-+    def as_bool(self, key):
-+        """
-+        Accepts a key as input. The corresponding value must be a string or
-+        the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
-+        retain compatibility with Python 2.2.
-+        
-+        If the string is one of  ``True``, ``On``, ``Yes``, or ``1`` it returns 
-+        ``True``.
-+        
-+        If the string is one of  ``False``, ``Off``, ``No``, or ``0`` it returns 
-+        ``False``.
-+        
-+        ``as_bool`` is not case sensitive.
-+        
-+        Any other input will raise a ``ValueError``.
-+        
-+        >>> a = ConfigObj()
-+        >>> a['a'] = 'fish'
-+        >>> a.as_bool('a')
-+        Traceback (most recent call last):
-+        ValueError: Value "fish" is neither True nor False
-+        >>> a['b'] = 'True'
-+        >>> a.as_bool('b')
-+        1
-+        >>> a['b'] = 'off'
-+        >>> a.as_bool('b')
-+        0
-+        """
-+        val = self[key]
-+        if val == True:
-+            return True
-+        elif val == False:
-+            return False
-+        else:
-+            try:
-+                if not isinstance(val, StringTypes):
-+                    # TODO: Why do we raise a KeyError here?
-+                    raise KeyError()
-+                else:
-+                    return self.main._bools[val.lower()]
-+            except KeyError:
-+                raise ValueError('Value "%s" is neither True nor False' % val)
-+
-+
-+    def as_int(self, key):
-+        """
-+        A convenience method which coerces the specified value to an integer.
-+        
-+        If the value is an invalid literal for ``int``, a ``ValueError`` will
-+        be raised.
-+        
-+        >>> a = ConfigObj()
-+        >>> a['a'] = 'fish'
-+        >>> a.as_int('a')
-+        Traceback (most recent call last):
-+        ValueError: invalid literal for int(): fish
-+        >>> a['b'] = '1'
-+        >>> a.as_int('b')
-+        1
-+        >>> a['b'] = '3.2'
-+        >>> a.as_int('b')
-+        Traceback (most recent call last):
-+        ValueError: invalid literal for int(): 3.2
-+        """
-+        return int(self[key])
-+
-+
-+    def as_float(self, key):
-+        """
-+        A convenience method which coerces the specified value to a float.
-+        
-+        If the value is an invalid literal for ``float``, a ``ValueError`` will
-+        be raised.
-+        
-+        >>> a = ConfigObj()
-+        >>> a['a'] = 'fish'
-+        >>> a.as_float('a')
-+        Traceback (most recent call last):
-+        ValueError: invalid literal for float(): fish
-+        >>> a['b'] = '1'
-+        >>> a.as_float('b')
-+        1.0
-+        >>> a['b'] = '3.2'
-+        >>> a.as_float('b')
-+        3.2000000000000002
-+        """
-+        return float(self[key])
-+
-+
-+    def restore_default(self, key):
-+        """
-+        Restore (and return) default value for the specified key.
-+        
-+        This method will only work for a ConfigObj that was created
-+        with a configspec and has been validated.
-+        
-+        If there is no default value for this key, ``KeyError`` is raised.
-+        """
-+        default = self.default_values[key]
-+        dict.__setitem__(self, key, default)
-+        if key not in self.defaults:
-+            self.defaults.append(key)
-+        return default
-+
-+    
-+    def restore_defaults(self):
-+        """
-+        Recursively restore default values to all members
-+        that have them.
-+        
-+        This method will only work for a ConfigObj that was created
-+        with a configspec and has been validated.
-+        
-+        It doesn't delete or modify entries without default values.
-+        """
-+        for key in self.default_values:
-+            self.restore_default(key)
-+            
-+        for section in self.sections:
-+            self[section].restore_defaults()
-+
-+
-+class ConfigObj(Section):
-+    """An object to read, create, and write config files."""
-+
-+    _keyword = re.compile(r'''^ # line start
-+        (\s*)                   # indentation
-+        (                       # keyword
-+            (?:".*?")|          # double quotes
-+            (?:'.*?')|          # single quotes
-+            (?:[^'"=].*?)       # no quotes
-+        )
-+        \s*=\s*                 # divider
-+        (.*)                    # value (including list values and comments)
-+        $   # line end
-+        ''',
-+        re.VERBOSE)
-+
-+    _sectionmarker = re.compile(r'''^
-+        (\s*)                     # 1: indentation
-+        ((?:\[\s*)+)              # 2: section marker open
-+        (                         # 3: section name open
-+            (?:"\s*\S.*?\s*")|    # at least one non-space with double quotes
-+            (?:'\s*\S.*?\s*')|    # at least one non-space with single quotes
-+            (?:[^'"\s].*?)        # at least one non-space unquoted
-+        )                         # section name close
-+        ((?:\s*\])+)              # 4: section marker close
-+        \s*(\#.*)?                # 5: optional comment
-+        $''',
-+        re.VERBOSE)
-+
-+    # this regexp pulls list values out as a single string
-+    # or single values and comments
-+    # FIXME: this regex adds a '' to the end of comma terminated lists
-+    #   workaround in ``_handle_value``
-+    _valueexp = re.compile(r'''^
-+        (?:
-+            (?:
-+                (
-+                    (?:
-+                        (?:
-+                            (?:".*?")|              # double quotes
-+                            (?:'.*?')|              # single quotes
-+                            (?:[^'",\#][^,\#]*?)    # unquoted
-+                        )
-+                        \s*,\s*                     # comma
-+                    )*      # match all list items ending in a comma (if any)
-+                )
-+                (
-+                    (?:".*?")|                      # double quotes
-+                    (?:'.*?')|                      # single quotes
-+                    (?:[^'",\#\s][^,]*?)|           # unquoted
-+                    (?:(?<!,))                      # Empty value
-+                )?          # last item in a list - or string value
-+            )|
-+            (,)             # alternatively a single comma - empty list
-+        )
-+        \s*(\#.*)?          # optional comment
-+        $''',
-+        re.VERBOSE)
-+
-+    # use findall to get the members of a list value
-+    _listvalueexp = re.compile(r'''
-+        (
-+            (?:".*?")|          # double quotes
-+            (?:'.*?')|          # single quotes
-+            (?:[^'",\#].*?)       # unquoted
-+        )
-+        \s*,\s*                 # comma
-+        ''',
-+        re.VERBOSE)
-+
-+    # this regexp is used for the value
-+    # when lists are switched off
-+    _nolistvalue = re.compile(r'''^
-+        (
-+            (?:".*?")|          # double quotes
-+            (?:'.*?')|          # single quotes
-+            (?:[^'"\#].*?)|     # unquoted
-+            (?:)                # Empty value
-+        )
-+        \s*(\#.*)?              # optional comment
-+        $''',
-+        re.VERBOSE)
-+
-+    # regexes for finding triple quoted values on one line
-+    _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
-+    _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
-+    _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
-+    _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
-+
-+    _triple_quote = {
-+        "'''": (_single_line_single, _multi_line_single),
-+        '"""': (_single_line_double, _multi_line_double),
-+    }
-+
-+    # Used by the ``istrue`` Section method
-+    _bools = {
-+        'yes': True, 'no': False,
-+        'on': True, 'off': False,
-+        '1': True, '0': False,
-+        'true': True, 'false': False,
-+        }
-+
-+
-+    def __init__(self, infile=None, options=None, **kwargs):
-+        """
-+        Parse a config file or create a config file object.
-+        
-+        ``ConfigObj(infile=None, options=None, **kwargs)``
-+        """
-+        # init the superclass
-+        Section.__init__(self, self, 0, self)
-+        
-+        if infile is None:
-+            infile = []
-+        if options is None:
-+            options = {}
-+        else:
-+            options = dict(options)
-+            
-+        # keyword arguments take precedence over an options dictionary
-+        options.update(kwargs)
-+        
-+        defaults = OPTION_DEFAULTS.copy()
-+        # TODO: check the values too.
-+        for entry in options:
-+            if entry not in defaults:
-+                raise TypeError('Unrecognised option "%s".' % entry)
-+        
-+        # Add any explicit options to the defaults
-+        defaults.update(options)
-+        self._initialise(defaults)
-+        configspec = defaults['configspec']
-+        self._original_configspec = configspec
-+        self._load(infile, configspec)
-+        
-+        
-+    def _load(self, infile, configspec):
-+        if isinstance(infile, StringTypes):
-+            self.filename = infile
-+            if os.path.isfile(infile):
-+                h = open(infile, 'rb')
-+                infile = h.read() or []
-+                h.close()
-+            elif self.file_error:
-+                # raise an error if the file doesn't exist
-+                raise IOError('Config file not found: "%s".' % self.filename)
-+            else:
-+                # file doesn't already exist
-+                if self.create_empty:
-+                    # this is a good test that the filename specified
-+                    # isn't impossible - like on a non-existent device
-+                    h = open(infile, 'w')
-+                    h.write('')
-+                    h.close()
-+                infile = []
-+                
-+        elif isinstance(infile, (list, tuple)):
-+            infile = list(infile)
-+            
-+        elif isinstance(infile, dict):
-+            # initialise self
-+            # the Section class handles creating subsections
-+            if isinstance(infile, ConfigObj):
-+                # get a copy of our ConfigObj
-+                infile = infile.dict()
-+                
-+            for entry in infile:
-+                self[entry] = infile[entry]
-+            del self._errors
-+            
-+            if configspec is not None:
-+                self._handle_configspec(configspec)
-+            else:
-+                self.configspec = None
-+            return
-+        
-+        elif hasattr(infile, 'read'):
-+            # This supports file like objects
-+            infile = infile.read() or []
-+            # needs splitting into lines - but needs doing *after* decoding
-+            # in case it's not an 8 bit encoding
-+        else:
-+            raise TypeError('infile must be a filename, file like object, or list of lines.')
-+        
-+        if infile:
-+            # don't do it for the empty ConfigObj
-+            infile = self._handle_bom(infile)
-+            # infile is now *always* a list
-+            #
-+            # Set the newlines attribute (first line ending it finds)
-+            # and strip trailing '\n' or '\r' from lines
-+            for line in infile:
-+                if (not line) or (line[-1] not in ('\r', '\n', '\r\n')):
-+                    continue
-+                for end in ('\r\n', '\n', '\r'):
-+                    if line.endswith(end):
-+                        self.newlines = end
-+                        break
-+                break
-+
-+            infile = [line.rstrip('\r\n') for line in infile]
-+            
-+        self._parse(infile)
-+        # if we had any errors, now is the time to raise them
-+        if self._errors:
-+            info = "at line %s." % self._errors[0].line_number
-+            if len(self._errors) > 1:
-+                msg = "Parsing failed with several errors.\nFirst error %s" % info
-+                error = ConfigObjError(msg)
-+            else:
-+                error = self._errors[0]
-+            # set the errors attribute; it's a list of tuples:
-+            # (error_type, message, line_number)
-+            error.errors = self._errors
-+            # set the config attribute
-+            error.config = self
-+            raise error
-+        # delete private attributes
-+        del self._errors
-+        
-+        if configspec is None:
-+            self.configspec = None
-+        else:
-+            self._handle_configspec(configspec)
-+    
-+    
-+    def _initialise(self, options=None):
-+        if options is None:
-+            options = OPTION_DEFAULTS
-+            
-+        # initialise a few variables
-+        self.filename = None
-+        self._errors = []
-+        self.raise_errors = options['raise_errors']
-+        self.interpolation = options['interpolation']
-+        self.list_values = options['list_values']
-+        self.create_empty = options['create_empty']
-+        self.file_error = options['file_error']
-+        self.stringify = options['stringify']
-+        self.indent_type = options['indent_type']
-+        self.encoding = options['encoding']
-+        self.default_encoding = options['default_encoding']
-+        self.BOM = False
-+        self.newlines = None
-+        self.write_empty_values = options['write_empty_values']
-+        self.unrepr = options['unrepr']
-+        
-+        self.initial_comment = []
-+        self.final_comment = []
-+        self.configspec = {}
-+        
-+        # Clear section attributes as well
-+        Section._initialise(self)
-+        
-+        
-+    def __repr__(self):
-+        return ('ConfigObj({%s})' % 
-+                ', '.join([('%s: %s' % (repr(key), repr(self[key]))) 
-+                for key in (self.scalars + self.sections)]))
-+    
-+    
-+    def _handle_bom(self, infile):
-+        """
-+        Handle any BOM, and decode if necessary.
-+        
-+        If an encoding is specified, that *must* be used - but the BOM should
-+        still be removed (and the BOM attribute set).
-+        
-+        (If the encoding is wrongly specified, then a BOM for an alternative
-+        encoding won't be discovered or removed.)
-+        
-+        If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
-+        removed. The BOM attribute will be set. UTF16 will be decoded to
-+        unicode.
-+        
-+        NOTE: This method must not be called with an empty ``infile``.
-+        
-+        Specifying the *wrong* encoding is likely to cause a
-+        ``UnicodeDecodeError``.
-+        
-+        ``infile`` must always be returned as a list of lines, but may be
-+        passed in as a single string.
-+        """
-+        if ((self.encoding is not None) and
-+            (self.encoding.lower() not in BOM_LIST)):
-+            # No need to check for a BOM
-+            # the encoding specified doesn't have one
-+            # just decode
-+            return self._decode(infile, self.encoding)
-+        
-+        if isinstance(infile, (list, tuple)):
-+            line = infile[0]
-+        else:
-+            line = infile
-+        if self.encoding is not None:
-+            # encoding explicitly supplied
-+            # And it could have an associated BOM
-+            # TODO: if encoding is just UTF16 - we ought to check for both
-+            # TODO: big endian and little endian versions.
-+            enc = BOM_LIST[self.encoding.lower()]
-+            if enc == 'utf_16':
-+                # For UTF16 we try big endian and little endian
-+                for BOM, (encoding, final_encoding) in BOMS.items():
-+                    if not final_encoding:
-+                        # skip UTF8
-+                        continue
-+                    if infile.startswith(BOM):
-+                        ### BOM discovered
-+                        ##self.BOM = True
-+                        # Don't need to remove BOM
-+                        return self._decode(infile, encoding)
-+                    
-+                # If we get this far, will *probably* raise a DecodeError
-+                # As it doesn't appear to start with a BOM
-+                return self._decode(infile, self.encoding)
-+            
-+            # Must be UTF8
-+            BOM = BOM_SET[enc]
-+            if not line.startswith(BOM):
-+                return self._decode(infile, self.encoding)
-+            
-+            newline = line[len(BOM):]
-+            
-+            # BOM removed
-+            if isinstance(infile, (list, tuple)):
-+                infile[0] = newline
-+            else:
-+                infile = newline
-+            self.BOM = True
-+            return self._decode(infile, self.encoding)
-+        
-+        # No encoding specified - so we need to check for UTF8/UTF16
-+        for BOM, (encoding, final_encoding) in BOMS.items():
-+            if not line.startswith(BOM):
-+                continue
-+            else:
-+                # BOM discovered
-+                self.encoding = final_encoding
-+                if not final_encoding:
-+                    self.BOM = True
-+                    # UTF8
-+                    # remove BOM
-+                    newline = line[len(BOM):]
-+                    if isinstance(infile, (list, tuple)):
-+                        infile[0] = newline
-+                    else:
-+                        infile = newline
-+                    # UTF8 - don't decode
-+                    if isinstance(infile, StringTypes):
-+                        return infile.splitlines(True)
-+                    else:
-+                        return infile
-+                # UTF16 - have to decode
-+                return self._decode(infile, encoding)
-+            
-+        # No BOM discovered and no encoding specified, just return
-+        if isinstance(infile, StringTypes):
-+            # infile read from a file will be a single string
-+            return infile.splitlines(True)
-+        return infile
-+
-+
-+    def _a_to_u(self, aString):
-+        """Decode ASCII strings to unicode if a self.encoding is specified."""
-+        if self.encoding:
-+            return aString.decode('ascii')
-+        else:
-+            return aString
-+
-+
-+    def _decode(self, infile, encoding):
-+        """
-+        Decode infile to unicode. Using the specified encoding.
-+        
-+        if is a string, it also needs converting to a list.
-+        """
-+        if isinstance(infile, StringTypes):
-+            # can't be unicode
-+            # NOTE: Could raise a ``UnicodeDecodeError``
-+            return infile.decode(encoding).splitlines(True)
-+        for i, line in enumerate(infile):
-+            if not isinstance(line, unicode):
-+                # NOTE: The isinstance test here handles mixed lists of unicode/string
-+                # NOTE: But the decode will break on any non-string values
-+                # NOTE: Or could raise a ``UnicodeDecodeError``
-+                infile[i] = line.decode(encoding)
-+        return infile
-+
-+
-+    def _decode_element(self, line):
-+        """Decode element to unicode if necessary."""
-+        if not self.encoding:
-+            return line
-+        if isinstance(line, str) and self.default_encoding:
-+            return line.decode(self.default_encoding)
-+        return line
-+
-+
-+    def _str(self, value):
-+        """
-+        Used by ``stringify`` within validate, to turn non-string values
-+        into strings.
-+        """
-+        if not isinstance(value, StringTypes):
-+            return str(value)
-+        else:
-+            return value
-+
-+
-+    def _parse(self, infile):
-+        """Actually parse the config file."""
-+        temp_list_values = self.list_values
-+        if self.unrepr:
-+            self.list_values = False
-+            
-+        comment_list = []
-+        done_start = False
-+        this_section = self
-+        maxline = len(infile) - 1
-+        cur_index = -1
-+        reset_comment = False
-+        
-+        while cur_index < maxline:
-+            if reset_comment:
-+                comment_list = []
-+            cur_index += 1
-+            line = infile[cur_index]
-+            sline = line.strip()
-+            # do we have anything on the line ?
-+            if not sline or sline.startswith('#'):
-+                reset_comment = False
-+                comment_list.append(line)
-+                continue
-+            
-+            if not done_start:
-+                # preserve initial comment
-+                self.initial_comment = comment_list
-+                comment_list = []
-+                done_start = True
-+                
-+            reset_comment = True
-+            # first we check if it's a section marker
-+            mat = self._sectionmarker.match(line)
-+            if mat is not None:
-+                # is a section line
-+                (indent, sect_open, sect_name, sect_close, comment) = mat.groups()
-+                if indent and (self.indent_type is None):
-+                    self.indent_type = indent
-+                cur_depth = sect_open.count('[')
-+                if cur_depth != sect_close.count(']'):
-+                    self._handle_error("Cannot compute the section depth at line %s.",
-+                                       NestingError, infile, cur_index)
-+                    continue
-+                
-+                if cur_depth < this_section.depth:
-+                    # the new section is dropping back to a previous level
-+                    try:
-+                        parent = self._match_depth(this_section,
-+                                                   cur_depth).parent
-+                    except SyntaxError:
-+                        self._handle_error("Cannot compute nesting level at line %s.",
-+                                           NestingError, infile, cur_index)
-+                        continue
-+                elif cur_depth == this_section.depth:
-+                    # the new section is a sibling of the current section
-+                    parent = this_section.parent
-+                elif cur_depth == this_section.depth + 1:
-+                    # the new section is a child the current section
-+                    parent = this_section
-+                else:
-+                    self._handle_error("Section too nested at line %s.",
-+                                       NestingError, infile, cur_index)
-+                    
-+                sect_name = self._unquote(sect_name)
-+                if parent.has_key(sect_name):
-+                    self._handle_error('Duplicate section name at line %s.',
-+                                       DuplicateError, infile, cur_index)
-+                    continue
-+                
-+                # create the new section
-+                this_section = Section(
-+                    parent,
-+                    cur_depth,
-+                    self,
-+                    name=sect_name)
-+                parent[sect_name] = this_section
-+                parent.inline_comments[sect_name] = comment
-+                parent.comments[sect_name] = comment_list
-+                continue
-+            #
-+            # it's not a section marker,
-+            # so it should be a valid ``key = value`` line
-+            mat = self._keyword.match(line)
-+            if mat is None:
-+                # it neither matched as a keyword
-+                # or a section marker
-+                self._handle_error(
-+                    'Invalid line at line "%s".',
-+                    ParseError, infile, cur_index)
-+            else:
-+                # is a keyword value
-+                # value will include any inline comment
-+                (indent, key, value) = mat.groups()
-+                if indent and (self.indent_type is None):
-+                    self.indent_type = indent
-+                # check for a multiline value
-+                if value[:3] in ['"""', "'''"]:
-+                    try:
-+                        (value, comment, cur_index) = self._multiline(
-+                            value, infile, cur_index, maxline)
-+                    except SyntaxError:
-+                        self._handle_error(
-+                            'Parse error in value at line %s.',
-+                            ParseError, infile, cur_index)
-+                        continue
-+                    else:
-+                        if self.unrepr:
-+                            comment = ''
-+                            try:
-+                                value = unrepr(value)
-+                            except Exception, e:
-+                                if type(e) == UnknownType:
-+                                    msg = 'Unknown name or type in value at line %s.'
-+                                else:
-+                                    msg = 'Parse error in value at line %s.'
-+                                self._handle_error(msg, UnreprError, infile,
-+                                    cur_index)
-+                                continue
-+                else:
-+                    if self.unrepr:
-+                        comment = ''
-+                        try:
-+                            value = unrepr(value)
-+                        except Exception, e:
-+                            if isinstance(e, UnknownType):
-+                                msg = 'Unknown name or type in value at line %s.'
-+                            else:
-+                                msg = 'Parse error in value at line %s.'
-+                            self._handle_error(msg, UnreprError, infile,
-+                                cur_index)
-+                            continue
-+                    else:
-+                        # extract comment and lists
-+                        try:
-+                            (value, comment) = self._handle_value(value)
-+                        except SyntaxError:
-+                            self._handle_error(
-+                                'Parse error in value at line %s.',
-+                                ParseError, infile, cur_index)
-+                            continue
-+                #
-+                key = self._unquote(key)
-+                if this_section.has_key(key):
-+                    self._handle_error(
-+                        'Duplicate keyword name at line %s.',
-+                        DuplicateError, infile, cur_index)
-+                    continue
-+                # add the key.
-+                # we set unrepr because if we have got this far we will never
-+                # be creating a new section
-+                this_section.__setitem__(key, value, unrepr=True)
-+                this_section.inline_comments[key] = comment
-+                this_section.comments[key] = comment_list
-+                continue
-+        #
-+        if self.indent_type is None:
-+            # no indentation used, set the type accordingly
-+            self.indent_type = ''
-+
-+        # preserve the final comment
-+        if not self and not self.initial_comment:
-+            self.initial_comment = comment_list
-+        elif not reset_comment:
-+            self.final_comment = comment_list
-+        self.list_values = temp_list_values
-+
-+
-+    def _match_depth(self, sect, depth):
-+        """
-+        Given a section and a depth level, walk back through the sections
-+        parents to see if the depth level matches a previous section.
-+        
-+        Return a reference to the right section,
-+        or raise a SyntaxError.
-+        """
-+        while depth < sect.depth:
-+            if sect is sect.parent:
-+                # we've reached the top level already
-+                raise SyntaxError()
-+            sect = sect.parent
-+        if sect.depth == depth:
-+            return sect
-+        # shouldn't get here
-+        raise SyntaxError()
-+
-+
-+    def _handle_error(self, text, ErrorClass, infile, cur_index):
-+        """
-+        Handle an error according to the error settings.
-+        
-+        Either raise the error or store it.
-+        The error will have occured at ``cur_index``
-+        """
-+        line = infile[cur_index]
-+        cur_index += 1
-+        message = text % cur_index
-+        error = ErrorClass(message, cur_index, line)
-+        if self.raise_errors:
-+            # raise the error - parsing stops here
-+            raise error
-+        # store the error
-+        # reraise when parsing has finished
-+        self._errors.append(error)
-+
-+
-+    def _unquote(self, value):
-+        """Return an unquoted version of a value"""
-+        if (value[0] == value[-1]) and (value[0] in ('"', "'")):
-+            value = value[1:-1]
-+        return value
-+
-+
-+    def _quote(self, value, multiline=True):
-+        """
-+        Return a safely quoted version of a value.
-+        
-+        Raise a ConfigObjError if the value cannot be safely quoted.
-+        If multiline is ``True`` (default) then use triple quotes
-+        if necessary.
-+        
-+        Don't quote values that don't need it.
-+        Recursively quote members of a list and return a comma joined list.
-+        Multiline is ``False`` for lists.
-+        Obey list syntax for empty and single member lists.
-+        
-+        If ``list_values=False`` then the value is only quoted if it contains
-+        a ``\n`` (is multiline) or '#'.
-+        
-+        If ``write_empty_values`` is set, and the value is an empty string, it
-+        won't be quoted.
-+        """
-+        if multiline and self.write_empty_values and value == '':
-+            # Only if multiline is set, so that it is used for values not
-+            # keys, and not values that are part of a list
-+            return ''
-+        
-+        if multiline and isinstance(value, (list, tuple)):
-+            if not value:
-+                return ','
-+            elif len(value) == 1:
-+                return self._quote(value[0], multiline=False) + ','
-+            return ', '.join([self._quote(val, multiline=False)
-+                for val in value])
-+        if not isinstance(value, StringTypes):
-+            if self.stringify:
-+                value = str(value)
-+            else:
-+                raise TypeError('Value "%s" is not a string.' % value)
-+
-+        if not value:
-+            return '""'
-+        
-+        no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
-+        need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
-+        hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
-+        check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
-+        
-+        if check_for_single:
-+            if not self.list_values:
-+                # we don't quote if ``list_values=False``
-+                quot = noquot
-+            # for normal values either single or double quotes will do
-+            elif '\n' in value:
-+                # will only happen if multiline is off - e.g. '\n' in key
-+                raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
-+            elif ((value[0] not in wspace_plus) and
-+                    (value[-1] not in wspace_plus) and
-+                    (',' not in value)):
-+                quot = noquot
-+            else:
-+                quot = self._get_single_quote(value)
-+        else:
-+            # if value has '\n' or "'" *and* '"', it will need triple quotes
-+            quot = self._get_triple_quote(value)
-+        
-+        if quot == noquot and '#' in value and self.list_values:
-+            quot = self._get_single_quote(value)
-+                
-+        return quot % value
-+    
-+    
-+    def _get_single_quote(self, value):
-+        if ("'" in value) and ('"' in value):
-+            raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
-+        elif '"' in value:
-+            quot = squot
-+        else:
-+            quot = dquot
-+        return quot
-+    
-+    
-+    def _get_triple_quote(self, value):
-+        if (value.find('"""') != -1) and (value.find("'''") != -1):
-+            raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
-+        if value.find('"""') == -1:
-+            quot = tdquot
-+        else:
-+            quot = tsquot 
-+        return quot
-+
-+
-+    def _handle_value(self, value):
-+        """
-+        Given a value string, unquote, remove comment,
-+        handle lists. (including empty and single member lists)
-+        """
-+        # do we look for lists in values ?
-+        if not self.list_values:
-+            mat = self._nolistvalue.match(value)
-+            if mat is None:
-+                raise SyntaxError()
-+            # NOTE: we don't unquote here
-+            return mat.groups()
-+        #
-+        mat = self._valueexp.match(value)
-+        if mat is None:
-+            # the value is badly constructed, probably badly quoted,
-+            # or an invalid list
-+            raise SyntaxError()
-+        (list_values, single, empty_list, comment) = mat.groups()
-+        if (list_values == '') and (single is None):
-+            # change this if you want to accept empty values
-+            raise SyntaxError()
-+        # NOTE: note there is no error handling from here if the regex
-+        # is wrong: then incorrect values will slip through
-+        if empty_list is not None:
-+            # the single comma - meaning an empty list
-+            return ([], comment)
-+        if single is not None:
-+            # handle empty values
-+            if list_values and not single:
-+                # FIXME: the '' is a workaround because our regex now matches
-+                #   '' at the end of a list if it has a trailing comma
-+                single = None
-+            else:
-+                single = single or '""'
-+                single = self._unquote(single)
-+        if list_values == '':
-+            # not a list value
-+            return (single, comment)
-+        the_list = self._listvalueexp.findall(list_values)
-+        the_list = [self._unquote(val) for val in the_list]
-+        if single is not None:
-+            the_list += [single]
-+        return (the_list, comment)
-+
-+
-+    def _multiline(self, value, infile, cur_index, maxline):
-+        """Extract the value, where we are in a multiline situation."""
-+        quot = value[:3]
-+        newvalue = value[3:]
-+        single_line = self._triple_quote[quot][0]
-+        multi_line = self._triple_quote[quot][1]
-+        mat = single_line.match(value)
-+        if mat is not None:
-+            retval = list(mat.groups())
-+            retval.append(cur_index)
-+            return retval
-+        elif newvalue.find(quot) != -1:
-+            # somehow the triple quote is missing
-+            raise SyntaxError()
-+        #
-+        while cur_index < maxline:
-+            cur_index += 1
-+            newvalue += '\n'
-+            line = infile[cur_index]
-+            if line.find(quot) == -1:
-+                newvalue += line
-+            else:
-+                # end of multiline, process it
-+                break
-+        else:
-+            # we've got to the end of the config, oops...
-+            raise SyntaxError()
-+        mat = multi_line.match(line)
-+        if mat is None:
-+            # a badly formed line
-+            raise SyntaxError()
-+        (value, comment) = mat.groups()
-+        return (newvalue + value, comment, cur_index)
-+
-+
-+    def _handle_configspec(self, configspec):
-+        """Parse the configspec."""
-+        # FIXME: Should we check that the configspec was created with the 
-+        #        correct settings ? (i.e. ``list_values=False``)
-+        if not isinstance(configspec, ConfigObj):
-+            try:
-+                configspec = ConfigObj(configspec,
-+                                       raise_errors=True,
-+                                       file_error=True,
-+                                       list_values=False)
-+            except ConfigObjError, e:
-+                # FIXME: Should these errors have a reference
-+                #        to the already parsed ConfigObj ?
-+                raise ConfigspecError('Parsing configspec failed: %s' % e)
-+            except IOError, e:
-+                raise IOError('Reading configspec failed: %s' % e)
-+        
-+        self._set_configspec_value(configspec, self)
-+
-+
-+    def _set_configspec_value(self, configspec, section):
-+        """Used to recursively set configspec values."""
-+        if '__many__' in configspec.sections:
-+            section.configspec['__many__'] = configspec['__many__']
-+            if len(configspec.sections) > 1:
-+                # FIXME: can we supply any useful information here ?
-+                raise RepeatSectionError()
-+            
-+        if hasattr(configspec, 'initial_comment'):
-+            section._configspec_initial_comment = configspec.initial_comment
-+            section._configspec_final_comment = configspec.final_comment
-+            section._configspec_encoding = configspec.encoding
-+            section._configspec_BOM = configspec.BOM
-+            section._configspec_newlines = configspec.newlines
-+            section._configspec_indent_type = configspec.indent_type
-+            
-+        for entry in configspec.scalars:
-+            section._configspec_comments[entry] = configspec.comments[entry]
-+            section._configspec_inline_comments[entry] = configspec.inline_comments[entry]
-+            section.configspec[entry] = configspec[entry]
-+            section._order.append(entry)
-+            
-+        for entry in configspec.sections:
-+            if entry == '__many__':
-+                continue
-+            
-+            section._cs_section_comments[entry] = configspec.comments[entry]
-+            section._cs_section_inline_comments[entry] = configspec.inline_comments[entry]
-+            if not section.has_key(entry):
-+                section[entry] = {}
-+            self._set_configspec_value(configspec[entry], section[entry])
-+
-+
-+    def _handle_repeat(self, section, configspec):
-+        """Dynamically assign configspec for repeated section."""
-+        try:
-+            section_keys = configspec.sections
-+            scalar_keys = configspec.scalars
-+        except AttributeError:
-+            section_keys = [entry for entry in configspec 
-+                                if isinstance(configspec[entry], dict)]
-+            scalar_keys = [entry for entry in configspec 
-+                                if not isinstance(configspec[entry], dict)]
-+            
-+        if '__many__' in section_keys and len(section_keys) > 1:
-+            # FIXME: can we supply any useful information here ?
-+            raise RepeatSectionError()
-+        
-+        scalars = {}
-+        sections = {}
-+        for entry in scalar_keys:
-+            val = configspec[entry]
-+            scalars[entry] = val
-+        for entry in section_keys:
-+            val = configspec[entry]
-+            if entry == '__many__':
-+                scalars[entry] = val
-+                continue
-+            sections[entry] = val
-+            
-+        section.configspec = scalars
-+        for entry in sections:
-+            if not section.has_key(entry):
-+                section[entry] = {}
-+            self._handle_repeat(section[entry], sections[entry])
-+
-+
-+    def _write_line(self, indent_string, entry, this_entry, comment):
-+        """Write an individual line, for the write method"""
-+        # NOTE: the calls to self._quote here handles non-StringType values.
-+        if not self.unrepr:
-+            val = self._decode_element(self._quote(this_entry))
-+        else:
-+            val = repr(this_entry)
-+        return '%s%s%s%s%s' % (indent_string,
-+                               self._decode_element(self._quote(entry, multiline=False)),
-+                               self._a_to_u(' = '),
-+                               val,
-+                               self._decode_element(comment))
-+
-+
-+    def _write_marker(self, indent_string, depth, entry, comment):
-+        """Write a section marker line"""
-+        return '%s%s%s%s%s' % (indent_string,
-+                               self._a_to_u('[' * depth),
-+                               self._quote(self._decode_element(entry), multiline=False),
-+                               self._a_to_u(']' * depth),
-+                               self._decode_element(comment))
-+
-+
-+    def _handle_comment(self, comment):
-+        """Deal with a comment."""
-+        if not comment:
-+            return ''
-+        start = self.indent_type
-+        if not comment.startswith('#'):
-+            start += self._a_to_u(' # ')
-+        return (start + comment)
-+
-+
-+    # Public methods
-+
-+    def write(self, outfile=None, section=None):
-+        """
-+        Write the current ConfigObj as a file
-+        
-+        tekNico: FIXME: use StringIO instead of real files
-+        
-+        >>> filename = a.filename
-+        >>> a.filename = 'test.ini'
-+        >>> a.write()
-+        >>> a.filename = filename
-+        >>> a == ConfigObj('test.ini', raise_errors=True)
-+        1
-+        """
-+        if self.indent_type is None:
-+            # this can be true if initialised from a dictionary
-+            self.indent_type = DEFAULT_INDENT_TYPE
-+            
-+        out = []
-+        cs = self._a_to_u('#')
-+        csp = self._a_to_u('# ')
-+        if section is None:
-+            int_val = self.interpolation
-+            self.interpolation = False
-+            section = self
-+            for line in self.initial_comment:
-+                line = self._decode_element(line)
-+                stripped_line = line.strip()
-+                if stripped_line and not stripped_line.startswith(cs):
-+                    line = csp + line
-+                out.append(line)
-+                
-+        indent_string = self.indent_type * section.depth
-+        for entry in (section.scalars + section.sections):
-+            if entry in section.defaults:
-+                # don't write out default values
-+                continue
-+            for comment_line in section.comments[entry]:
-+                comment_line = self._decode_element(comment_line.lstrip())
-+                if comment_line and not comment_line.startswith(cs):
-+                    comment_line = csp + comment_line
-+                out.append(indent_string + comment_line)
-+            this_entry = section[entry]
-+            comment = self._handle_comment(section.inline_comments[entry])
-+            
-+            if isinstance(this_entry, dict):
-+                # a section
-+                out.append(self._write_marker(
-+                    indent_string,
-+                    this_entry.depth,
-+                    entry,
-+                    comment))
-+                out.extend(self.write(section=this_entry))
-+            else:
-+                out.append(self._write_line(
-+                    indent_string,
-+                    entry,
-+                    this_entry,
-+                    comment))
-+                
-+        if section is self:
-+            for line in self.final_comment:
-+                line = self._decode_element(line)
-+                stripped_line = line.strip()
-+                if stripped_line and not stripped_line.startswith(cs):
-+                    line = csp + line
-+                out.append(line)
-+            self.interpolation = int_val
-+            
-+        if section is not self:
-+            return out
-+        
-+        if (self.filename is None) and (outfile is None):
-+            # output a list of lines
-+            # might need to encode
-+            # NOTE: This will *screw* UTF16, each line will start with the BOM
-+            if self.encoding:
-+                out = [l.encode(self.encoding) for l in out]
-+            if (self.BOM and ((self.encoding is None) or
-+                (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
-+                # Add the UTF8 BOM
-+                if not out:
-+                    out.append('')
-+                out[0] = BOM_UTF8 + out[0]
-+            return out
-+        
-+        # Turn the list to a string, joined with correct newlines
-+        newline = self.newlines or os.linesep
-+        output = self._a_to_u(newline).join(out)
-+        if self.encoding:
-+            output = output.encode(self.encoding)
-+        if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
-+            # Add the UTF8 BOM
-+            output = BOM_UTF8 + output
-+            
-+        if not output.endswith(newline):
-+            output += newline
-+        if outfile is not None:
-+            outfile.write(output)
-+        else:
-+            h = open(self.filename, 'wb')
-+            h.write(output)
-+            h.close()
-+
-+
-+    def validate(self, validator, preserve_errors=False, copy=False,
-+                 section=None):
-+        """
-+        Test the ConfigObj against a configspec.
-+        
-+        It uses the ``validator`` object from *validate.py*.
-+        
-+        To run ``validate`` on the current ConfigObj, call: ::
-+        
-+            test = config.validate(validator)
-+        
-+        (Normally having previously passed in the configspec when the ConfigObj
-+        was created - you can dynamically assign a dictionary of checks to the
-+        ``configspec`` attribute of a section though).
-+        
-+        It returns ``True`` if everything passes, or a dictionary of
-+        pass/fails (True/False). If every member of a subsection passes, it
-+        will just have the value ``True``. (It also returns ``False`` if all
-+        members fail).
-+        
-+        In addition, it converts the values from strings to their native
-+        types if their checks pass (and ``stringify`` is set).
-+        
-+        If ``preserve_errors`` is ``True`` (``False`` is default) then instead
-+        of a marking a fail with a ``False``, it will preserve the actual
-+        exception object. This can contain info about the reason for failure.
-+        For example the ``VdtValueTooSmallError`` indicates that the value
-+        supplied was too small. If a value (or section) is missing it will
-+        still be marked as ``False``.
-+        
-+        You must have the validate module to use ``preserve_errors=True``.
-+        
-+        You can then use the ``flatten_errors`` function to turn your nested
-+        results dictionary into a flattened list of failures - useful for
-+        displaying meaningful error messages.
-+        """
-+        if section is None:
-+            if self.configspec is None:
-+                raise ValueError('No configspec supplied.')
-+            if preserve_errors:
-+                # We do this once to remove a top level dependency on the validate module
-+                # Which makes importing configobj faster
-+                from validate import VdtMissingValue
-+                self._vdtMissingValue = VdtMissingValue
-+            section = self
-+        #
-+        spec_section = section.configspec
-+        if copy and hasattr(section, '_configspec_initial_comment'):
-+            section.initial_comment = section._configspec_initial_comment
-+            section.final_comment = section._configspec_final_comment
-+            section.encoding = section._configspec_encoding
-+            section.BOM = section._configspec_BOM
-+            section.newlines = section._configspec_newlines
-+            section.indent_type = section._configspec_indent_type
-+            
-+        if '__many__' in section.configspec:
-+            many = spec_section['__many__']
-+            # dynamically assign the configspecs
-+            # for the sections below
-+            for entry in section.sections:
-+                self._handle_repeat(section[entry], many)
-+        #
-+        out = {}
-+        ret_true = True
-+        ret_false = True
-+        order = [k for k in section._order if k in spec_section]
-+        order += [k for k in spec_section if k not in order]
-+        for entry in order:
-+            if entry == '__many__':
-+                continue
-+            if (not entry in section.scalars) or (entry in section.defaults):
-+                # missing entries
-+                # or entries from defaults
-+                missing = True
-+                val = None
-+                if copy and not entry in section.scalars:
-+                    # copy comments
-+                    section.comments[entry] = (
-+                        section._configspec_comments.get(entry, []))
-+                    section.inline_comments[entry] = (
-+                        section._configspec_inline_comments.get(entry, ''))
-+                #
-+            else:
-+                missing = False
-+                val = section[entry]
-+            try:
-+                check = validator.check(spec_section[entry],
-+                                        val,
-+                                        missing=missing
-+                                        )
-+            except validator.baseErrorClass, e:
-+                if not preserve_errors or isinstance(e, self._vdtMissingValue):
-+                    out[entry] = False
-+                else:
-+                    # preserve the error
-+                    out[entry] = e
-+                    ret_false = False
-+                ret_true = False
-+            else:
-+                try: 
-+                    section.default_values.pop(entry, None)
-+                except AttributeError: 
-+                    # For Python 2.2 compatibility
-+                    try:
-+                        del section.default_values[entry]
-+                    except KeyError:
-+                        pass
-+                    
-+                if hasattr(validator, 'get_default_value'):
-+                    try: 
-+                        section.default_values[entry] = validator.get_default_value(spec_section[entry])
-+                    except KeyError:
-+                        # No default
-+                        pass
-+                    
-+                ret_false = False
-+                out[entry] = True
-+                if self.stringify or missing:
-+                    # if we are doing type conversion
-+                    # or the value is a supplied default
-+                    if not self.stringify:
-+                        if isinstance(check, (list, tuple)):
-+                            # preserve lists
-+                            check = [self._str(item) for item in check]
-+                        elif missing and check is None:
-+                            # convert the None from a default to a ''
-+                            check = ''
-+                        else:
-+                            check = self._str(check)
-+                    if (check != val) or missing:
-+                        section[entry] = check
-+                if not copy and missing and entry not in section.defaults:
-+                    section.defaults.append(entry)
-+        # Missing sections will have been created as empty ones when the
-+        # configspec was read.
-+        for entry in section.sections:
-+            # FIXME: this means DEFAULT is not copied in copy mode
-+            if section is self and entry == 'DEFAULT':
-+                continue
-+            if copy:
-+                section.comments[entry] = section._cs_section_comments[entry]
-+                section.inline_comments[entry] = (
-+                    section._cs_section_inline_comments[entry])
-+            check = self.validate(validator, preserve_errors=preserve_errors,
-+                copy=copy, section=section[entry])
-+            out[entry] = check
-+            if check == False:
-+                ret_true = False
-+            elif check == True:
-+                ret_false = False
-+            else:
-+                ret_true = False
-+                ret_false = False
-+        #
-+        if ret_true:
-+            return True
-+        elif ret_false:
-+            return False
-+        return out
-+
-+
-+    def reset(self):
-+        """Clear ConfigObj instance and restore to 'freshly created' state."""
-+        self.clear()
-+        self._initialise()
-+        # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
-+        #        requires an empty dictionary
-+        self.configspec = None
-+        # Just to be sure ;-)
-+        self._original_configspec = None
-+        
-+        
-+    def reload(self):
-+        """
-+        Reload a ConfigObj from file.
-+        
-+        This method raises a ``ReloadError`` if the ConfigObj doesn't have
-+        a filename attribute pointing to a file.
-+        """
-+        if not isinstance(self.filename, StringTypes):
-+            raise ReloadError()
-+
-+        filename = self.filename
-+        current_options = {}
-+        for entry in OPTION_DEFAULTS:
-+            if entry == 'configspec':
-+                continue
-+            current_options[entry] = getattr(self, entry)
-+            
-+        configspec = self._original_configspec
-+        current_options['configspec'] = configspec
-+            
-+        self.clear()
-+        self._initialise(current_options)
-+        self._load(filename, configspec)
-+        
-+
-+
-+class SimpleVal(object):
-+    """
-+    A simple validator.
-+    Can be used to check that all members expected are present.
-+    
-+    To use it, provide a configspec with all your members in (the value given
-+    will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
-+    method of your ``ConfigObj``. ``validate`` will return ``True`` if all
-+    members are present, or a dictionary with True/False meaning
-+    present/missing. (Whole missing sections will be replaced with ``False``)
-+    """
-+    
-+    def __init__(self):
-+        self.baseErrorClass = ConfigObjError
-+    
-+    def check(self, check, member, missing=False):
-+        """A dummy check method, always returns the value unchanged."""
-+        if missing:
-+            raise self.baseErrorClass()
-+        return member
-+
-+
-+# Check / processing functions for options
-+def flatten_errors(cfg, res, levels=None, results=None):
-+    """
-+    An example function that will turn a nested dictionary of results
-+    (as returned by ``ConfigObj.validate``) into a flat list.
-+    
-+    ``cfg`` is the ConfigObj instance being checked, ``res`` is the results
-+    dictionary returned by ``validate``.
-+    
-+    (This is a recursive function, so you shouldn't use the ``levels`` or
-+    ``results`` arguments - they are used by the function.
-+    
-+    Returns a list of keys that failed. Each member of the list is a tuple :
-+    ::
-+    
-+        ([list of sections...], key, result)
-+    
-+    If ``validate`` was called with ``preserve_errors=False`` (the default)
-+    then ``result`` will always be ``False``.
-+
-+    *list of sections* is a flattened list of sections that the key was found
-+    in.
-+    
-+    If the section was missing then key will be ``None``.
-+    
-+    If the value (or section) was missing then ``result`` will be ``False``.
-+    
-+    If ``validate`` was called with ``preserve_errors=True`` and a value
-+    was present, but failed the check, then ``result`` will be the exception
-+    object returned. You can use this as a string that describes the failure.
-+    
-+    For example *The value "3" is of the wrong type*.
-+    
-+    >>> import validate
-+    >>> vtor = validate.Validator()
-+    >>> my_ini = '''
-+    ...     option1 = True
-+    ...     [section1]
-+    ...     option1 = True
-+    ...     [section2]
-+    ...     another_option = Probably
-+    ...     [section3]
-+    ...     another_option = True
-+    ...     [[section3b]]
-+    ...     value = 3
-+    ...     value2 = a
-+    ...     value3 = 11
-+    ...     '''
-+    >>> my_cfg = '''
-+    ...     option1 = boolean()
-+    ...     option2 = boolean()
-+    ...     option3 = boolean(default=Bad_value)
-+    ...     [section1]
-+    ...     option1 = boolean()
-+    ...     option2 = boolean()
-+    ...     option3 = boolean(default=Bad_value)
-+    ...     [section2]
-+    ...     another_option = boolean()
-+    ...     [section3]
-+    ...     another_option = boolean()
-+    ...     [[section3b]]
-+    ...     value = integer
-+    ...     value2 = integer
-+    ...     value3 = integer(0, 10)
-+    ...         [[[section3b-sub]]]
-+    ...         value = string
-+    ...     [section4]
-+    ...     another_option = boolean()
-+    ...     '''
-+    >>> cs = my_cfg.split('\\n')
-+    >>> ini = my_ini.split('\\n')
-+    >>> cfg = ConfigObj(ini, configspec=cs)
-+    >>> res = cfg.validate(vtor, preserve_errors=True)
-+    >>> errors = []
-+    >>> for entry in flatten_errors(cfg, res):
-+    ...     section_list, key, error = entry
-+    ...     section_list.insert(0, '[root]')
-+    ...     if key is not None:
-+    ...        section_list.append(key)
-+    ...     else:
-+    ...         section_list.append('[missing]')
-+    ...     section_string = ', '.join(section_list)
-+    ...     errors.append((section_string, ' = ', error))
-+    >>> errors.sort()
-+    >>> for entry in errors:
-+    ...     print entry[0], entry[1], (entry[2] or 0)
-+    [root], option2  =  0
-+    [root], option3  =  the value "Bad_value" is of the wrong type.
-+    [root], section1, option2  =  0
-+    [root], section1, option3  =  the value "Bad_value" is of the wrong type.
-+    [root], section2, another_option  =  the value "Probably" is of the wrong type.
-+    [root], section3, section3b, section3b-sub, [missing]  =  0
-+    [root], section3, section3b, value2  =  the value "a" is of the wrong type.
-+    [root], section3, section3b, value3  =  the value "11" is too big.
-+    [root], section4, [missing]  =  0
-+    """
-+    if levels is None:
-+        # first time called
-+        levels = []
-+        results = []
-+    if res is True:
-+        return results
-+    if res is False:
-+        results.append((levels[:], None, False))
-+        if levels:
-+            levels.pop()
-+        return results
-+    for (key, val) in res.items():
-+        if val == True:
-+            continue
-+        if isinstance(cfg.get(key), dict):
-+            # Go down one level
-+            levels.append(key)
-+            flatten_errors(cfg[key], val, levels, results)
-+            continue
-+        results.append((levels[:], key, val))
-+    #
-+    # Go up one level
-+    if levels:
-+        levels.pop()
-+    #
-+    return results
-+
-+
-+"""*A programming language is a medium of expression.* - Paul Graham"""
 Index: ipython-0.10/IPython/external/configobj/__init__.py
 ===================================================================
 --- /dev/null
@@ -6971,2687 +23,6 @@ Index: ipython-0.10/IPython/external/configobj/__init__.py
 +    from configobj import *
 +except ImportError:
 +    from _configobj import *
-Index: ipython-0.10/IPython/external/configobj.py
-===================================================================
---- ipython-0.10.orig/IPython/external/configobj.py
-+++ /dev/null
-@@ -1,2501 +0,0 @@
--# configobj.py
--# A config file reader/writer that supports nested sections in config files.
--# Copyright (C) 2005-2008 Michael Foord, Nicola Larosa
--# E-mail: fuzzyman AT voidspace DOT org DOT uk
--#         nico AT tekNico DOT net
--
--# ConfigObj 4
--# http://www.voidspace.org.uk/python/configobj.html
--
--# Released subject to the BSD License
--# Please see http://www.voidspace.org.uk/python/license.shtml
--
--# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
--# For information about bugfixes, updates and support, please join the
--# ConfigObj mailing list:
--# http://lists.sourceforge.net/lists/listinfo/configobj-develop
--# Comments, suggestions and bug reports welcome.
--
--from __future__ import generators
--
--import sys
--INTP_VER = sys.version_info[:2]
--if INTP_VER < (2, 2):
--    raise RuntimeError("Python v.2.2 or later needed")
--
--import os, re
--compiler = None
--try:
--    import compiler
--except ImportError:
--    # for IronPython
--    pass
--from types import StringTypes
--from warnings import warn
--try:
--    from codecs import BOM_UTF8, BOM_UTF16, BOM_UTF16_BE, BOM_UTF16_LE
--except ImportError:
--    # Python 2.2 does not have these
--    # UTF-8
--    BOM_UTF8 = '\xef\xbb\xbf'
--    # UTF-16, little endian
--    BOM_UTF16_LE = '\xff\xfe'
--    # UTF-16, big endian
--    BOM_UTF16_BE = '\xfe\xff'
--    if sys.byteorder == 'little':
--        # UTF-16, native endianness
--        BOM_UTF16 = BOM_UTF16_LE
--    else:
--        # UTF-16, native endianness
--        BOM_UTF16 = BOM_UTF16_BE
--
--# A dictionary mapping BOM to
--# the encoding to decode with, and what to set the
--# encoding attribute to.
--BOMS = {
--    BOM_UTF8: ('utf_8', None),
--    BOM_UTF16_BE: ('utf16_be', 'utf_16'),
--    BOM_UTF16_LE: ('utf16_le', 'utf_16'),
--    BOM_UTF16: ('utf_16', 'utf_16'),
--    }
--# All legal variants of the BOM codecs.
--# TODO: the list of aliases is not meant to be exhaustive, is there a
--#   better way ?
--BOM_LIST = {
--    'utf_16': 'utf_16',
--    'u16': 'utf_16',
--    'utf16': 'utf_16',
--    'utf-16': 'utf_16',
--    'utf16_be': 'utf16_be',
--    'utf_16_be': 'utf16_be',
--    'utf-16be': 'utf16_be',
--    'utf16_le': 'utf16_le',
--    'utf_16_le': 'utf16_le',
--    'utf-16le': 'utf16_le',
--    'utf_8': 'utf_8',
--    'u8': 'utf_8',
--    'utf': 'utf_8',
--    'utf8': 'utf_8',
--    'utf-8': 'utf_8',
--    }
--
--# Map of encodings to the BOM to write.
--BOM_SET = {
--    'utf_8': BOM_UTF8,
--    'utf_16': BOM_UTF16,
--    'utf16_be': BOM_UTF16_BE,
--    'utf16_le': BOM_UTF16_LE,
--    None: BOM_UTF8
--    }
--
--
--def match_utf8(encoding):
--    return BOM_LIST.get(encoding.lower()) == 'utf_8'
--
--
--# Quote strings used for writing values
--squot = "'%s'"
--dquot = '"%s"'
--noquot = "%s"
--wspace_plus = ' \r\t\n\v\t\'"'
--tsquot = '"""%s"""'
--tdquot = "'''%s'''"
--
--try:
--    enumerate
--except NameError:
--    def enumerate(obj):
--        """enumerate for Python 2.2."""
--        i = -1
--        for item in obj:
--            i += 1
--            yield i, item
--
--try:
--    True, False
--except NameError:
--    True, False = 1, 0
--
--
--__version__ = '4.5.2'
--
--__revision__ = '$Id: configobj.py 156 2006-01-31 14:57:08Z fuzzyman $'
--
--__docformat__ = "restructuredtext en"
--
--__all__ = (
--    '__version__',
--    'DEFAULT_INDENT_TYPE',
--    'DEFAULT_INTERPOLATION',
--    'ConfigObjError',
--    'NestingError',
--    'ParseError',
--    'DuplicateError',
--    'ConfigspecError',
--    'ConfigObj',
--    'SimpleVal',
--    'InterpolationError',
--    'InterpolationLoopError',
--    'MissingInterpolationOption',
--    'RepeatSectionError',
--    'ReloadError',
--    'UnreprError',
--    'UnknownType',
--    '__docformat__',
--    'flatten_errors',
--)
--
--DEFAULT_INTERPOLATION = 'configparser'
--DEFAULT_INDENT_TYPE = '    '
--MAX_INTERPOL_DEPTH = 10
--
--OPTION_DEFAULTS = {
--    'interpolation': True,
--    'raise_errors': False,
--    'list_values': True,
--    'create_empty': False,
--    'file_error': False,
--    'configspec': None,
--    'stringify': True,
--    # option may be set to one of ('', ' ', '\t')
--    'indent_type': None,
--    'encoding': None,
--    'default_encoding': None,
--    'unrepr': False,
--    'write_empty_values': False,
--}
--
--
--
--def getObj(s):
--    s = "a=" + s
--    if compiler is None:
--        raise ImportError('compiler module not available')
--    p = compiler.parse(s)
--    return p.getChildren()[1].getChildren()[0].getChildren()[1]
--
--
--class UnknownType(Exception):
--    pass
--
--
--class Builder(object):
--    
--    def build(self, o):
--        m = getattr(self, 'build_' + o.__class__.__name__, None)
--        if m is None:
--            raise UnknownType(o.__class__.__name__)
--        return m(o)
--    
--    def build_List(self, o):
--        return map(self.build, o.getChildren())
--    
--    def build_Const(self, o):
--        return o.value
--    
--    def build_Dict(self, o):
--        d = {}
--        i = iter(map(self.build, o.getChildren()))
--        for el in i:
--            d[el] = i.next()
--        return d
--    
--    def build_Tuple(self, o):
--        return tuple(self.build_List(o))
--    
--    def build_Name(self, o):
--        if o.name == 'None':
--            return None
--        if o.name == 'True':
--            return True
--        if o.name == 'False':
--            return False
--        
--        # An undefined Name
--        raise UnknownType('Undefined Name')
--    
--    def build_Add(self, o):
--        real, imag = map(self.build_Const, o.getChildren())
--        try:
--            real = float(real)
--        except TypeError:
--            raise UnknownType('Add')
--        if not isinstance(imag, complex) or imag.real != 0.0:
--            raise UnknownType('Add')
--        return real+imag
--    
--    def build_Getattr(self, o):
--        parent = self.build(o.expr)
--        return getattr(parent, o.attrname)
--    
--    def build_UnarySub(self, o):
--        return -self.build_Const(o.getChildren()[0])
--    
--    def build_UnaryAdd(self, o):
--        return self.build_Const(o.getChildren()[0])
--
--
--_builder = Builder()
--
--
--def unrepr(s):
--    if not s:
--        return s
--    return _builder.build(getObj(s))
--
--
--
--class ConfigObjError(SyntaxError):
--    """
--    This is the base class for all errors that ConfigObj raises.
--    It is a subclass of SyntaxError.
--    """
--    def __init__(self, message='', line_number=None, line=''):
--        self.line = line
--        self.line_number = line_number
--        self.message = message
--        SyntaxError.__init__(self, message)
--
--
--class NestingError(ConfigObjError):
--    """
--    This error indicates a level of nesting that doesn't match.
--    """
--
--
--class ParseError(ConfigObjError):
--    """
--    This error indicates that a line is badly written.
--    It is neither a valid ``key = value`` line,
--    nor a valid section marker line.
--    """
--
--
--class ReloadError(IOError):
--    """
--    A 'reload' operation failed.
--    This exception is a subclass of ``IOError``.
--    """
--    def __init__(self):
--        IOError.__init__(self, 'reload failed, filename is not set.')
--
--
--class DuplicateError(ConfigObjError):
--    """
--    The keyword or section specified already exists.
--    """
--
--
--class ConfigspecError(ConfigObjError):
--    """
--    An error occured whilst parsing a configspec.
--    """
--
--
--class InterpolationError(ConfigObjError):
--    """Base class for the two interpolation errors."""
--
--
--class InterpolationLoopError(InterpolationError):
--    """Maximum interpolation depth exceeded in string interpolation."""
--
--    def __init__(self, option):
--        InterpolationError.__init__(
--            self,
--            'interpolation loop detected in value "%s".' % option)
--
--
--class RepeatSectionError(ConfigObjError):
--    """
--    This error indicates additional sections in a section with a
--    ``__many__`` (repeated) section.
--    """
--
--
--class MissingInterpolationOption(InterpolationError):
--    """A value specified for interpolation was missing."""
--
--    def __init__(self, option):
--        InterpolationError.__init__(
--            self,
--            'missing option "%s" in interpolation.' % option)
--
--
--class UnreprError(ConfigObjError):
--    """An error parsing in unrepr mode."""
--
--
--
--class InterpolationEngine(object):
--    """
--    A helper class to help perform string interpolation.
--
--    This class is an abstract base class; its descendants perform
--    the actual work.
--    """
--
--    # compiled regexp to use in self.interpolate()
--    _KEYCRE = re.compile(r"%\(([^)]*)\)s")
--
--    def __init__(self, section):
--        # the Section instance that "owns" this engine
--        self.section = section
--
--
--    def interpolate(self, key, value):
--        def recursive_interpolate(key, value, section, backtrail):
--            """The function that does the actual work.
--
--            ``value``: the string we're trying to interpolate.
--            ``section``: the section in which that string was found
--            ``backtrail``: a dict to keep track of where we've been,
--            to detect and prevent infinite recursion loops
--
--            This is similar to a depth-first-search algorithm.
--            """
--            # Have we been here already?
--            if backtrail.has_key((key, section.name)):
--                # Yes - infinite loop detected
--                raise InterpolationLoopError(key)
--            # Place a marker on our backtrail so we won't come back here again
--            backtrail[(key, section.name)] = 1
--
--            # Now start the actual work
--            match = self._KEYCRE.search(value)
--            while match:
--                # The actual parsing of the match is implementation-dependent,
--                # so delegate to our helper function
--                k, v, s = self._parse_match(match)
--                if k is None:
--                    # That's the signal that no further interpolation is needed
--                    replacement = v
--                else:
--                    # Further interpolation may be needed to obtain final value
--                    replacement = recursive_interpolate(k, v, s, backtrail)
--                # Replace the matched string with its final value
--                start, end = match.span()
--                value = ''.join((value[:start], replacement, value[end:]))
--                new_search_start = start + len(replacement)
--                # Pick up the next interpolation key, if any, for next time
--                # through the while loop
--                match = self._KEYCRE.search(value, new_search_start)
--
--            # Now safe to come back here again; remove marker from backtrail
--            del backtrail[(key, section.name)]
--
--            return value
--
--        # Back in interpolate(), all we have to do is kick off the recursive
--        # function with appropriate starting values
--        value = recursive_interpolate(key, value, self.section, {})
--        return value
--
--
--    def _fetch(self, key):
--        """Helper function to fetch values from owning section.
--
--        Returns a 2-tuple: the value, and the section where it was found.
--        """
--        # switch off interpolation before we try and fetch anything !
--        save_interp = self.section.main.interpolation
--        self.section.main.interpolation = False
--
--        # Start at section that "owns" this InterpolationEngine
--        current_section = self.section
--        while True:
--            # try the current section first
--            val = current_section.get(key)
--            if val is not None:
--                break
--            # try "DEFAULT" next
--            val = current_section.get('DEFAULT', {}).get(key)
--            if val is not None:
--                break
--            # move up to parent and try again
--            # top-level's parent is itself
--            if current_section.parent is current_section:
--                # reached top level, time to give up
--                break
--            current_section = current_section.parent
--
--        # restore interpolation to previous value before returning
--        self.section.main.interpolation = save_interp
--        if val is None:
--            raise MissingInterpolationOption(key)
--        return val, current_section
--
--
--    def _parse_match(self, match):
--        """Implementation-dependent helper function.
--
--        Will be passed a match object corresponding to the interpolation
--        key we just found (e.g., "%(foo)s" or "$foo"). Should look up that
--        key in the appropriate config file section (using the ``_fetch()``
--        helper function) and return a 3-tuple: (key, value, section)
--
--        ``key`` is the name of the key we're looking for
--        ``value`` is the value found for that key
--        ``section`` is a reference to the section where it was found
--
--        ``key`` and ``section`` should be None if no further
--        interpolation should be performed on the resulting value
--        (e.g., if we interpolated "$$" and returned "$").
--        """
--        raise NotImplementedError()
--    
--
--
--class ConfigParserInterpolation(InterpolationEngine):
--    """Behaves like ConfigParser."""
--    _KEYCRE = re.compile(r"%\(([^)]*)\)s")
--
--    def _parse_match(self, match):
--        key = match.group(1)
--        value, section = self._fetch(key)
--        return key, value, section
--
--
--
--class TemplateInterpolation(InterpolationEngine):
--    """Behaves like string.Template."""
--    _delimiter = '$'
--    _KEYCRE = re.compile(r"""
--        \$(?:
--          (?P<escaped>\$)              |   # Two $ signs
--          (?P<named>[_a-z][_a-z0-9]*)  |   # $name format
--          {(?P<braced>[^}]*)}              # ${name} format
--        )
--        """, re.IGNORECASE | re.VERBOSE)
--
--    def _parse_match(self, match):
--        # Valid name (in or out of braces): fetch value from section
--        key = match.group('named') or match.group('braced')
--        if key is not None:
--            value, section = self._fetch(key)
--            return key, value, section
--        # Escaped delimiter (e.g., $$): return single delimiter
--        if match.group('escaped') is not None:
--            # Return None for key and section to indicate it's time to stop
--            return None, self._delimiter, None
--        # Anything else: ignore completely, just return it unchanged
--        return None, match.group(), None
--
--
--interpolation_engines = {
--    'configparser': ConfigParserInterpolation,
--    'template': TemplateInterpolation,
--}
--
--
--
--class Section(dict):
--    """
--    A dictionary-like object that represents a section in a config file.
--    
--    It does string interpolation if the 'interpolation' attribute
--    of the 'main' object is set to True.
--    
--    Interpolation is tried first from this object, then from the 'DEFAULT'
--    section of this object, next from the parent and its 'DEFAULT' section,
--    and so on until the main object is reached.
--    
--    A Section will behave like an ordered dictionary - following the
--    order of the ``scalars`` and ``sections`` attributes.
--    You can use this to change the order of members.
--    
--    Iteration follows the order: scalars, then sections.
--    """
--
--    def __init__(self, parent, depth, main, indict=None, name=None):
--        """
--        * parent is the section above
--        * depth is the depth level of this section
--        * main is the main ConfigObj
--        * indict is a dictionary to initialise the section with
--        """
--        if indict is None:
--            indict = {}
--        dict.__init__(self)
--        # used for nesting level *and* interpolation
--        self.parent = parent
--        # used for the interpolation attribute
--        self.main = main
--        # level of nesting depth of this Section
--        self.depth = depth
--        # purely for information
--        self.name = name
--        #
--        self._initialise()
--        # we do this explicitly so that __setitem__ is used properly
--        # (rather than just passing to ``dict.__init__``)
--        for entry, value in indict.iteritems():
--            self[entry] = value
--            
--            
--    def _initialise(self):
--        # the sequence of scalar values in this Section
--        self.scalars = []
--        # the sequence of sections in this Section
--        self.sections = []
--        # for comments :-)
--        self.comments = {}
--        self.inline_comments = {}
--        # for the configspec
--        self.configspec = {}
--        self._order = []
--        self._configspec_comments = {}
--        self._configspec_inline_comments = {}
--        self._cs_section_comments = {}
--        self._cs_section_inline_comments = {}
--        # for defaults
--        self.defaults = []
--        self.default_values = {}
--
--
--    def _interpolate(self, key, value):
--        try:
--            # do we already have an interpolation engine?
--            engine = self._interpolation_engine
--        except AttributeError:
--            # not yet: first time running _interpolate(), so pick the engine
--            name = self.main.interpolation
--            if name == True:  # note that "if name:" would be incorrect here
--                # backwards-compatibility: interpolation=True means use default
--                name = DEFAULT_INTERPOLATION
--            name = name.lower()  # so that "Template", "template", etc. all work
--            class_ = interpolation_engines.get(name, None)
--            if class_ is None:
--                # invalid value for self.main.interpolation
--                self.main.interpolation = False
--                return value
--            else:
--                # save reference to engine so we don't have to do this again
--                engine = self._interpolation_engine = class_(self)
--        # let the engine do the actual work
--        return engine.interpolate(key, value)
--
--
--    def __getitem__(self, key):
--        """Fetch the item and do string interpolation."""
--        val = dict.__getitem__(self, key)
--        if self.main.interpolation and isinstance(val, StringTypes):
--            return self._interpolate(key, val)
--        return val
--
--
--    def __setitem__(self, key, value, unrepr=False):
--        """
--        Correctly set a value.
--        
--        Making dictionary values Section instances.
--        (We have to special case 'Section' instances - which are also dicts)
--        
--        Keys must be strings.
--        Values need only be strings (or lists of strings) if
--        ``main.stringify`` is set.
--        
--        `unrepr`` must be set when setting a value to a dictionary, without
--        creating a new sub-section.
--        """
--        if not isinstance(key, StringTypes):
--            raise ValueError('The key "%s" is not a string.' % key)
--        
--        # add the comment
--        if not self.comments.has_key(key):
--            self.comments[key] = []
--            self.inline_comments[key] = ''
--        # remove the entry from defaults
--        if key in self.defaults:
--            self.defaults.remove(key)
--        #
--        if isinstance(value, Section):
--            if not self.has_key(key):
--                self.sections.append(key)
--            dict.__setitem__(self, key, value)
--        elif isinstance(value, dict) and not unrepr:
--            # First create the new depth level,
--            # then create the section
--            if not self.has_key(key):
--                self.sections.append(key)
--            new_depth = self.depth + 1
--            dict.__setitem__(
--                self,
--                key,
--                Section(
--                    self,
--                    new_depth,
--                    self.main,
--                    indict=value,
--                    name=key))
--        else:
--            if not self.has_key(key):
--                self.scalars.append(key)
--            if not self.main.stringify:
--                if isinstance(value, StringTypes):
--                    pass
--                elif isinstance(value, (list, tuple)):
--                    for entry in value:
--                        if not isinstance(entry, StringTypes):
--                            raise TypeError('Value is not a string "%s".' % entry)
--                else:
--                    raise TypeError('Value is not a string "%s".' % value)
--            dict.__setitem__(self, key, value)
--
--
--    def __delitem__(self, key):
--        """Remove items from the sequence when deleting."""
--        dict. __delitem__(self, key)
--        if key in self.scalars:
--            self.scalars.remove(key)
--        else:
--            self.sections.remove(key)
--        del self.comments[key]
--        del self.inline_comments[key]
--
--
--    def get(self, key, default=None):
--        """A version of ``get`` that doesn't bypass string interpolation."""
--        try:
--            return self[key]
--        except KeyError:
--            return default
--
--
--    def update(self, indict):
--        """
--        A version of update that uses our ``__setitem__``.
--        """
--        for entry in indict:
--            self[entry] = indict[entry]
--
--
--    def pop(self, key, *args):
--        """
--        'D.pop(k[,d]) -> v, remove specified key and return the corresponding value.
--        If key is not found, d is returned if given, otherwise KeyError is raised'
--        """
--        val = dict.pop(self, key, *args)
--        if key in self.scalars:
--            del self.comments[key]
--            del self.inline_comments[key]
--            self.scalars.remove(key)
--        elif key in self.sections:
--            del self.comments[key]
--            del self.inline_comments[key]
--            self.sections.remove(key)
--        if self.main.interpolation and isinstance(val, StringTypes):
--            return self._interpolate(key, val)
--        return val
--
--
--    def popitem(self):
--        """Pops the first (key,val)"""
--        sequence = (self.scalars + self.sections)
--        if not sequence:
--            raise KeyError(": 'popitem(): dictionary is empty'")
--        key = sequence[0]
--        val =  self[key]
--        del self[key]
--        return key, val
--
--
--    def clear(self):
--        """
--        A version of clear that also affects scalars/sections
--        Also clears comments and configspec.
--        
--        Leaves other attributes alone :
--            depth/main/parent are not affected
--        """
--        dict.clear(self)
--        self.scalars = []
--        self.sections = []
--        self.comments = {}
--        self.inline_comments = {}
--        self.configspec = {}
--
--
--    def setdefault(self, key, default=None):
--        """A version of setdefault that sets sequence if appropriate."""
--        try:
--            return self[key]
--        except KeyError:
--            self[key] = default
--            return self[key]
--
--
--    def items(self):
--        """D.items() -> list of D's (key, value) pairs, as 2-tuples"""
--        return zip((self.scalars + self.sections), self.values())
--
--
--    def keys(self):
--        """D.keys() -> list of D's keys"""
--        return (self.scalars + self.sections)
--
--
--    def values(self):
--        """D.values() -> list of D's values"""
--        return [self[key] for key in (self.scalars + self.sections)]
--
--
--    def iteritems(self):
--        """D.iteritems() -> an iterator over the (key, value) items of D"""
--        return iter(self.items())
--
--
--    def iterkeys(self):
--        """D.iterkeys() -> an iterator over the keys of D"""
--        return iter((self.scalars + self.sections))
--
--    __iter__ = iterkeys
--
--
--    def itervalues(self):
--        """D.itervalues() -> an iterator over the values of D"""
--        return iter(self.values())
--
--
--    def __repr__(self):
--        """x.__repr__() <==> repr(x)"""
--        return '{%s}' % ', '.join([('%s: %s' % (repr(key), repr(self[key])))
--            for key in (self.scalars + self.sections)])
--
--    __str__ = __repr__
--    __str__.__doc__ = "x.__str__() <==> str(x)"
--
--
--    # Extra methods - not in a normal dictionary
--
--    def dict(self):
--        """
--        Return a deepcopy of self as a dictionary.
--        
--        All members that are ``Section`` instances are recursively turned to
--        ordinary dictionaries - by calling their ``dict`` method.
--        
--        >>> n = a.dict()
--        >>> n == a
--        1
--        >>> n is a
--        0
--        """
--        newdict = {}
--        for entry in self:
--            this_entry = self[entry]
--            if isinstance(this_entry, Section):
--                this_entry = this_entry.dict()
--            elif isinstance(this_entry, list):
--                # create a copy rather than a reference
--                this_entry = list(this_entry)
--            elif isinstance(this_entry, tuple):
--                # create a copy rather than a reference
--                this_entry = tuple(this_entry)
--            newdict[entry] = this_entry
--        return newdict
--
--
--    def merge(self, indict):
--        """
--        A recursive update - useful for merging config files.
--        
--        >>> a = '''[section1]
--        ...     option1 = True
--        ...     [[subsection]]
--        ...     more_options = False
--        ...     # end of file'''.splitlines()
--        >>> b = '''# File is user.ini
--        ...     [section1]
--        ...     option1 = False
--        ...     # end of file'''.splitlines()
--        >>> c1 = ConfigObj(b)
--        >>> c2 = ConfigObj(a)
--        >>> c2.merge(c1)
--        >>> c2
--        {'section1': {'option1': 'False', 'subsection': {'more_options': 'False'}}}
--        """
--        for key, val in indict.items():
--            if (key in self and isinstance(self[key], dict) and
--                                isinstance(val, dict)):
--                self[key].merge(val)
--            else:   
--                self[key] = val
--
--
--    def rename(self, oldkey, newkey):
--        """
--        Change a keyname to another, without changing position in sequence.
--        
--        Implemented so that transformations can be made on keys,
--        as well as on values. (used by encode and decode)
--        
--        Also renames comments.
--        """
--        if oldkey in self.scalars:
--            the_list = self.scalars
--        elif oldkey in self.sections:
--            the_list = self.sections
--        else:
--            raise KeyError('Key "%s" not found.' % oldkey)
--        pos = the_list.index(oldkey)
--        #
--        val = self[oldkey]
--        dict.__delitem__(self, oldkey)
--        dict.__setitem__(self, newkey, val)
--        the_list.remove(oldkey)
--        the_list.insert(pos, newkey)
--        comm = self.comments[oldkey]
--        inline_comment = self.inline_comments[oldkey]
--        del self.comments[oldkey]
--        del self.inline_comments[oldkey]
--        self.comments[newkey] = comm
--        self.inline_comments[newkey] = inline_comment
--
--
--    def walk(self, function, raise_errors=True,
--            call_on_sections=False, **keywargs):
--        """
--        Walk every member and call a function on the keyword and value.
--        
--        Return a dictionary of the return values
--        
--        If the function raises an exception, raise the errror
--        unless ``raise_errors=False``, in which case set the return value to
--        ``False``.
--        
--        Any unrecognised keyword arguments you pass to walk, will be pased on
--        to the function you pass in.
--        
--        Note: if ``call_on_sections`` is ``True`` then - on encountering a
--        subsection, *first* the function is called for the *whole* subsection,
--        and then recurses into it's members. This means your function must be
--        able to handle strings, dictionaries and lists. This allows you
--        to change the key of subsections as well as for ordinary members. The
--        return value when called on the whole subsection has to be discarded.
--        
--        See  the encode and decode methods for examples, including functions.
--        
--        .. caution::
--        
--            You can use ``walk`` to transform the names of members of a section
--            but you mustn't add or delete members.
--        
--        >>> config = '''[XXXXsection]
--        ... XXXXkey = XXXXvalue'''.splitlines()
--        >>> cfg = ConfigObj(config)
--        >>> cfg
--        {'XXXXsection': {'XXXXkey': 'XXXXvalue'}}
--        >>> def transform(section, key):
--        ...     val = section[key]
--        ...     newkey = key.replace('XXXX', 'CLIENT1')
--        ...     section.rename(key, newkey)
--        ...     if isinstance(val, (tuple, list, dict)):
--        ...         pass
--        ...     else:
--        ...         val = val.replace('XXXX', 'CLIENT1')
--        ...         section[newkey] = val
--        >>> cfg.walk(transform, call_on_sections=True)
--        {'CLIENT1section': {'CLIENT1key': None}}
--        >>> cfg
--        {'CLIENT1section': {'CLIENT1key': 'CLIENT1value'}}
--        """
--        out = {}
--        # scalars first
--        for i in range(len(self.scalars)):
--            entry = self.scalars[i]
--            try:
--                val = function(self, entry, **keywargs)
--                # bound again in case name has changed
--                entry = self.scalars[i]
--                out[entry] = val
--            except Exception:
--                if raise_errors:
--                    raise
--                else:
--                    entry = self.scalars[i]
--                    out[entry] = False
--        # then sections
--        for i in range(len(self.sections)):
--            entry = self.sections[i]
--            if call_on_sections:
--                try:
--                    function(self, entry, **keywargs)
--                except Exception:
--                    if raise_errors:
--                        raise
--                    else:
--                        entry = self.sections[i]
--                        out[entry] = False
--                # bound again in case name has changed
--                entry = self.sections[i]
--            # previous result is discarded
--            out[entry] = self[entry].walk(
--                function,
--                raise_errors=raise_errors,
--                call_on_sections=call_on_sections,
--                **keywargs)
--        return out
--
--
--    def decode(self, encoding):
--        """
--        Decode all strings and values to unicode, using the specified encoding.
--        
--        Works with subsections and list values.
--        
--        Uses the ``walk`` method.
--        
--        Testing ``encode`` and ``decode``.
--        >>> m = ConfigObj(a)
--        >>> m.decode('ascii')
--        >>> def testuni(val):
--        ...     for entry in val:
--        ...         if not isinstance(entry, unicode):
--        ...             print >> sys.stderr, type(entry)
--        ...             raise AssertionError, 'decode failed.'
--        ...         if isinstance(val[entry], dict):
--        ...             testuni(val[entry])
--        ...         elif not isinstance(val[entry], unicode):
--        ...             raise AssertionError, 'decode failed.'
--        >>> testuni(m)
--        >>> m.encode('ascii')
--        >>> a == m
--        1
--        """
--        warn('use of ``decode`` is deprecated.', DeprecationWarning)
--        def decode(section, key, encoding=encoding, warn=True):
--            """ """
--            val = section[key]
--            if isinstance(val, (list, tuple)):
--                newval = []
--                for entry in val:
--                    newval.append(entry.decode(encoding))
--            elif isinstance(val, dict):
--                newval = val
--            else:
--                newval = val.decode(encoding)
--            newkey = key.decode(encoding)
--            section.rename(key, newkey)
--            section[newkey] = newval
--        # using ``call_on_sections`` allows us to modify section names
--        self.walk(decode, call_on_sections=True)
--
--
--    def encode(self, encoding):
--        """
--        Encode all strings and values from unicode,
--        using the specified encoding.
--        
--        Works with subsections and list values.
--        Uses the ``walk`` method.
--        """
--        warn('use of ``encode`` is deprecated.', DeprecationWarning)
--        def encode(section, key, encoding=encoding):
--            """ """
--            val = section[key]
--            if isinstance(val, (list, tuple)):
--                newval = []
--                for entry in val:
--                    newval.append(entry.encode(encoding))
--            elif isinstance(val, dict):
--                newval = val
--            else:
--                newval = val.encode(encoding)
--            newkey = key.encode(encoding)
--            section.rename(key, newkey)
--            section[newkey] = newval
--        self.walk(encode, call_on_sections=True)
--
--
--    def istrue(self, key):
--        """A deprecated version of ``as_bool``."""
--        warn('use of ``istrue`` is deprecated. Use ``as_bool`` method '
--                'instead.', DeprecationWarning)
--        return self.as_bool(key)
--
--
--    def as_bool(self, key):
--        """
--        Accepts a key as input. The corresponding value must be a string or
--        the objects (``True`` or 1) or (``False`` or 0). We allow 0 and 1 to
--        retain compatibility with Python 2.2.
--        
--        If the string is one of  ``True``, ``On``, ``Yes``, or ``1`` it returns 
--        ``True``.
--        
--        If the string is one of  ``False``, ``Off``, ``No``, or ``0`` it returns 
--        ``False``.
--        
--        ``as_bool`` is not case sensitive.
--        
--        Any other input will raise a ``ValueError``.
--        
--        >>> a = ConfigObj()
--        >>> a['a'] = 'fish'
--        >>> a.as_bool('a')
--        Traceback (most recent call last):
--        ValueError: Value "fish" is neither True nor False
--        >>> a['b'] = 'True'
--        >>> a.as_bool('b')
--        1
--        >>> a['b'] = 'off'
--        >>> a.as_bool('b')
--        0
--        """
--        val = self[key]
--        if val == True:
--            return True
--        elif val == False:
--            return False
--        else:
--            try:
--                if not isinstance(val, StringTypes):
--                    # TODO: Why do we raise a KeyError here?
--                    raise KeyError()
--                else:
--                    return self.main._bools[val.lower()]
--            except KeyError:
--                raise ValueError('Value "%s" is neither True nor False' % val)
--
--
--    def as_int(self, key):
--        """
--        A convenience method which coerces the specified value to an integer.
--        
--        If the value is an invalid literal for ``int``, a ``ValueError`` will
--        be raised.
--        
--        >>> a = ConfigObj()
--        >>> a['a'] = 'fish'
--        >>> a.as_int('a')
--        Traceback (most recent call last):
--        ValueError: invalid literal for int(): fish
--        >>> a['b'] = '1'
--        >>> a.as_int('b')
--        1
--        >>> a['b'] = '3.2'
--        >>> a.as_int('b')
--        Traceback (most recent call last):
--        ValueError: invalid literal for int(): 3.2
--        """
--        return int(self[key])
--
--
--    def as_float(self, key):
--        """
--        A convenience method which coerces the specified value to a float.
--        
--        If the value is an invalid literal for ``float``, a ``ValueError`` will
--        be raised.
--        
--        >>> a = ConfigObj()
--        >>> a['a'] = 'fish'
--        >>> a.as_float('a')
--        Traceback (most recent call last):
--        ValueError: invalid literal for float(): fish
--        >>> a['b'] = '1'
--        >>> a.as_float('b')
--        1.0
--        >>> a['b'] = '3.2'
--        >>> a.as_float('b')
--        3.2000000000000002
--        """
--        return float(self[key])
--
--
--    def restore_default(self, key):
--        """
--        Restore (and return) default value for the specified key.
--        
--        This method will only work for a ConfigObj that was created
--        with a configspec and has been validated.
--        
--        If there is no default value for this key, ``KeyError`` is raised.
--        """
--        default = self.default_values[key]
--        dict.__setitem__(self, key, default)
--        if key not in self.defaults:
--            self.defaults.append(key)
--        return default
--
--    
--    def restore_defaults(self):
--        """
--        Recursively restore default values to all members
--        that have them.
--        
--        This method will only work for a ConfigObj that was created
--        with a configspec and has been validated.
--        
--        It doesn't delete or modify entries without default values.
--        """
--        for key in self.default_values:
--            self.restore_default(key)
--            
--        for section in self.sections:
--            self[section].restore_defaults()
--
--
--class ConfigObj(Section):
--    """An object to read, create, and write config files."""
--
--    _keyword = re.compile(r'''^ # line start
--        (\s*)                   # indentation
--        (                       # keyword
--            (?:".*?")|          # double quotes
--            (?:'.*?')|          # single quotes
--            (?:[^'"=].*?)       # no quotes
--        )
--        \s*=\s*                 # divider
--        (.*)                    # value (including list values and comments)
--        $   # line end
--        ''',
--        re.VERBOSE)
--
--    _sectionmarker = re.compile(r'''^
--        (\s*)                     # 1: indentation
--        ((?:\[\s*)+)              # 2: section marker open
--        (                         # 3: section name open
--            (?:"\s*\S.*?\s*")|    # at least one non-space with double quotes
--            (?:'\s*\S.*?\s*')|    # at least one non-space with single quotes
--            (?:[^'"\s].*?)        # at least one non-space unquoted
--        )                         # section name close
--        ((?:\s*\])+)              # 4: section marker close
--        \s*(\#.*)?                # 5: optional comment
--        $''',
--        re.VERBOSE)
--
--    # this regexp pulls list values out as a single string
--    # or single values and comments
--    # FIXME: this regex adds a '' to the end of comma terminated lists
--    #   workaround in ``_handle_value``
--    _valueexp = re.compile(r'''^
--        (?:
--            (?:
--                (
--                    (?:
--                        (?:
--                            (?:".*?")|              # double quotes
--                            (?:'.*?')|              # single quotes
--                            (?:[^'",\#][^,\#]*?)    # unquoted
--                        )
--                        \s*,\s*                     # comma
--                    )*      # match all list items ending in a comma (if any)
--                )
--                (
--                    (?:".*?")|                      # double quotes
--                    (?:'.*?')|                      # single quotes
--                    (?:[^'",\#\s][^,]*?)|           # unquoted
--                    (?:(?<!,))                      # Empty value
--                )?          # last item in a list - or string value
--            )|
--            (,)             # alternatively a single comma - empty list
--        )
--        \s*(\#.*)?          # optional comment
--        $''',
--        re.VERBOSE)
--
--    # use findall to get the members of a list value
--    _listvalueexp = re.compile(r'''
--        (
--            (?:".*?")|          # double quotes
--            (?:'.*?')|          # single quotes
--            (?:[^'",\#].*?)       # unquoted
--        )
--        \s*,\s*                 # comma
--        ''',
--        re.VERBOSE)
--
--    # this regexp is used for the value
--    # when lists are switched off
--    _nolistvalue = re.compile(r'''^
--        (
--            (?:".*?")|          # double quotes
--            (?:'.*?')|          # single quotes
--            (?:[^'"\#].*?)|     # unquoted
--            (?:)                # Empty value
--        )
--        \s*(\#.*)?              # optional comment
--        $''',
--        re.VERBOSE)
--
--    # regexes for finding triple quoted values on one line
--    _single_line_single = re.compile(r"^'''(.*?)'''\s*(#.*)?$")
--    _single_line_double = re.compile(r'^"""(.*?)"""\s*(#.*)?$')
--    _multi_line_single = re.compile(r"^(.*?)'''\s*(#.*)?$")
--    _multi_line_double = re.compile(r'^(.*?)"""\s*(#.*)?$')
--
--    _triple_quote = {
--        "'''": (_single_line_single, _multi_line_single),
--        '"""': (_single_line_double, _multi_line_double),
--    }
--
--    # Used by the ``istrue`` Section method
--    _bools = {
--        'yes': True, 'no': False,
--        'on': True, 'off': False,
--        '1': True, '0': False,
--        'true': True, 'false': False,
--        }
--
--
--    def __init__(self, infile=None, options=None, **kwargs):
--        """
--        Parse a config file or create a config file object.
--        
--        ``ConfigObj(infile=None, options=None, **kwargs)``
--        """
--        # init the superclass
--        Section.__init__(self, self, 0, self)
--        
--        if infile is None:
--            infile = []
--        if options is None:
--            options = {}
--        else:
--            options = dict(options)
--            
--        # keyword arguments take precedence over an options dictionary
--        options.update(kwargs)
--        
--        defaults = OPTION_DEFAULTS.copy()
--        # TODO: check the values too.
--        for entry in options:
--            if entry not in defaults:
--                raise TypeError('Unrecognised option "%s".' % entry)
--        
--        # Add any explicit options to the defaults
--        defaults.update(options)
--        self._initialise(defaults)
--        configspec = defaults['configspec']
--        self._original_configspec = configspec
--        self._load(infile, configspec)
--        
--        
--    def _load(self, infile, configspec):
--        if isinstance(infile, StringTypes):
--            self.filename = infile
--            if os.path.isfile(infile):
--                h = open(infile, 'rb')
--                infile = h.read() or []
--                h.close()
--            elif self.file_error:
--                # raise an error if the file doesn't exist
--                raise IOError('Config file not found: "%s".' % self.filename)
--            else:
--                # file doesn't already exist
--                if self.create_empty:
--                    # this is a good test that the filename specified
--                    # isn't impossible - like on a non-existent device
--                    h = open(infile, 'w')
--                    h.write('')
--                    h.close()
--                infile = []
--                
--        elif isinstance(infile, (list, tuple)):
--            infile = list(infile)
--            
--        elif isinstance(infile, dict):
--            # initialise self
--            # the Section class handles creating subsections
--            if isinstance(infile, ConfigObj):
--                # get a copy of our ConfigObj
--                infile = infile.dict()
--                
--            for entry in infile:
--                self[entry] = infile[entry]
--            del self._errors
--            
--            if configspec is not None:
--                self._handle_configspec(configspec)
--            else:
--                self.configspec = None
--            return
--        
--        elif hasattr(infile, 'read'):
--            # This supports file like objects
--            infile = infile.read() or []
--            # needs splitting into lines - but needs doing *after* decoding
--            # in case it's not an 8 bit encoding
--        else:
--            raise TypeError('infile must be a filename, file like object, or list of lines.')
--        
--        if infile:
--            # don't do it for the empty ConfigObj
--            infile = self._handle_bom(infile)
--            # infile is now *always* a list
--            #
--            # Set the newlines attribute (first line ending it finds)
--            # and strip trailing '\n' or '\r' from lines
--            for line in infile:
--                if (not line) or (line[-1] not in ('\r', '\n', '\r\n')):
--                    continue
--                for end in ('\r\n', '\n', '\r'):
--                    if line.endswith(end):
--                        self.newlines = end
--                        break
--                break
--
--            infile = [line.rstrip('\r\n') for line in infile]
--            
--        self._parse(infile)
--        # if we had any errors, now is the time to raise them
--        if self._errors:
--            info = "at line %s." % self._errors[0].line_number
--            if len(self._errors) > 1:
--                msg = "Parsing failed with several errors.\nFirst error %s" % info
--                error = ConfigObjError(msg)
--            else:
--                error = self._errors[0]
--            # set the errors attribute; it's a list of tuples:
--            # (error_type, message, line_number)
--            error.errors = self._errors
--            # set the config attribute
--            error.config = self
--            raise error
--        # delete private attributes
--        del self._errors
--        
--        if configspec is None:
--            self.configspec = None
--        else:
--            self._handle_configspec(configspec)
--    
--    
--    def _initialise(self, options=None):
--        if options is None:
--            options = OPTION_DEFAULTS
--            
--        # initialise a few variables
--        self.filename = None
--        self._errors = []
--        self.raise_errors = options['raise_errors']
--        self.interpolation = options['interpolation']
--        self.list_values = options['list_values']
--        self.create_empty = options['create_empty']
--        self.file_error = options['file_error']
--        self.stringify = options['stringify']
--        self.indent_type = options['indent_type']
--        self.encoding = options['encoding']
--        self.default_encoding = options['default_encoding']
--        self.BOM = False
--        self.newlines = None
--        self.write_empty_values = options['write_empty_values']
--        self.unrepr = options['unrepr']
--        
--        self.initial_comment = []
--        self.final_comment = []
--        self.configspec = {}
--        
--        # Clear section attributes as well
--        Section._initialise(self)
--        
--        
--    def __repr__(self):
--        return ('ConfigObj({%s})' % 
--                ', '.join([('%s: %s' % (repr(key), repr(self[key]))) 
--                for key in (self.scalars + self.sections)]))
--    
--    
--    def _handle_bom(self, infile):
--        """
--        Handle any BOM, and decode if necessary.
--        
--        If an encoding is specified, that *must* be used - but the BOM should
--        still be removed (and the BOM attribute set).
--        
--        (If the encoding is wrongly specified, then a BOM for an alternative
--        encoding won't be discovered or removed.)
--        
--        If an encoding is not specified, UTF8 or UTF16 BOM will be detected and
--        removed. The BOM attribute will be set. UTF16 will be decoded to
--        unicode.
--        
--        NOTE: This method must not be called with an empty ``infile``.
--        
--        Specifying the *wrong* encoding is likely to cause a
--        ``UnicodeDecodeError``.
--        
--        ``infile`` must always be returned as a list of lines, but may be
--        passed in as a single string.
--        """
--        if ((self.encoding is not None) and
--            (self.encoding.lower() not in BOM_LIST)):
--            # No need to check for a BOM
--            # the encoding specified doesn't have one
--            # just decode
--            return self._decode(infile, self.encoding)
--        
--        if isinstance(infile, (list, tuple)):
--            line = infile[0]
--        else:
--            line = infile
--        if self.encoding is not None:
--            # encoding explicitly supplied
--            # And it could have an associated BOM
--            # TODO: if encoding is just UTF16 - we ought to check for both
--            # TODO: big endian and little endian versions.
--            enc = BOM_LIST[self.encoding.lower()]
--            if enc == 'utf_16':
--                # For UTF16 we try big endian and little endian
--                for BOM, (encoding, final_encoding) in BOMS.items():
--                    if not final_encoding:
--                        # skip UTF8
--                        continue
--                    if infile.startswith(BOM):
--                        ### BOM discovered
--                        ##self.BOM = True
--                        # Don't need to remove BOM
--                        return self._decode(infile, encoding)
--                    
--                # If we get this far, will *probably* raise a DecodeError
--                # As it doesn't appear to start with a BOM
--                return self._decode(infile, self.encoding)
--            
--            # Must be UTF8
--            BOM = BOM_SET[enc]
--            if not line.startswith(BOM):
--                return self._decode(infile, self.encoding)
--            
--            newline = line[len(BOM):]
--            
--            # BOM removed
--            if isinstance(infile, (list, tuple)):
--                infile[0] = newline
--            else:
--                infile = newline
--            self.BOM = True
--            return self._decode(infile, self.encoding)
--        
--        # No encoding specified - so we need to check for UTF8/UTF16
--        for BOM, (encoding, final_encoding) in BOMS.items():
--            if not line.startswith(BOM):
--                continue
--            else:
--                # BOM discovered
--                self.encoding = final_encoding
--                if not final_encoding:
--                    self.BOM = True
--                    # UTF8
--                    # remove BOM
--                    newline = line[len(BOM):]
--                    if isinstance(infile, (list, tuple)):
--                        infile[0] = newline
--                    else:
--                        infile = newline
--                    # UTF8 - don't decode
--                    if isinstance(infile, StringTypes):
--                        return infile.splitlines(True)
--                    else:
--                        return infile
--                # UTF16 - have to decode
--                return self._decode(infile, encoding)
--            
--        # No BOM discovered and no encoding specified, just return
--        if isinstance(infile, StringTypes):
--            # infile read from a file will be a single string
--            return infile.splitlines(True)
--        return infile
--
--
--    def _a_to_u(self, aString):
--        """Decode ASCII strings to unicode if a self.encoding is specified."""
--        if self.encoding:
--            return aString.decode('ascii')
--        else:
--            return aString
--
--
--    def _decode(self, infile, encoding):
--        """
--        Decode infile to unicode. Using the specified encoding.
--        
--        if is a string, it also needs converting to a list.
--        """
--        if isinstance(infile, StringTypes):
--            # can't be unicode
--            # NOTE: Could raise a ``UnicodeDecodeError``
--            return infile.decode(encoding).splitlines(True)
--        for i, line in enumerate(infile):
--            if not isinstance(line, unicode):
--                # NOTE: The isinstance test here handles mixed lists of unicode/string
--                # NOTE: But the decode will break on any non-string values
--                # NOTE: Or could raise a ``UnicodeDecodeError``
--                infile[i] = line.decode(encoding)
--        return infile
--
--
--    def _decode_element(self, line):
--        """Decode element to unicode if necessary."""
--        if not self.encoding:
--            return line
--        if isinstance(line, str) and self.default_encoding:
--            return line.decode(self.default_encoding)
--        return line
--
--
--    def _str(self, value):
--        """
--        Used by ``stringify`` within validate, to turn non-string values
--        into strings.
--        """
--        if not isinstance(value, StringTypes):
--            return str(value)
--        else:
--            return value
--
--
--    def _parse(self, infile):
--        """Actually parse the config file."""
--        temp_list_values = self.list_values
--        if self.unrepr:
--            self.list_values = False
--            
--        comment_list = []
--        done_start = False
--        this_section = self
--        maxline = len(infile) - 1
--        cur_index = -1
--        reset_comment = False
--        
--        while cur_index < maxline:
--            if reset_comment:
--                comment_list = []
--            cur_index += 1
--            line = infile[cur_index]
--            sline = line.strip()
--            # do we have anything on the line ?
--            if not sline or sline.startswith('#'):
--                reset_comment = False
--                comment_list.append(line)
--                continue
--            
--            if not done_start:
--                # preserve initial comment
--                self.initial_comment = comment_list
--                comment_list = []
--                done_start = True
--                
--            reset_comment = True
--            # first we check if it's a section marker
--            mat = self._sectionmarker.match(line)
--            if mat is not None:
--                # is a section line
--                (indent, sect_open, sect_name, sect_close, comment) = mat.groups()
--                if indent and (self.indent_type is None):
--                    self.indent_type = indent
--                cur_depth = sect_open.count('[')
--                if cur_depth != sect_close.count(']'):
--                    self._handle_error("Cannot compute the section depth at line %s.",
--                                       NestingError, infile, cur_index)
--                    continue
--                
--                if cur_depth < this_section.depth:
--                    # the new section is dropping back to a previous level
--                    try:
--                        parent = self._match_depth(this_section,
--                                                   cur_depth).parent
--                    except SyntaxError:
--                        self._handle_error("Cannot compute nesting level at line %s.",
--                                           NestingError, infile, cur_index)
--                        continue
--                elif cur_depth == this_section.depth:
--                    # the new section is a sibling of the current section
--                    parent = this_section.parent
--                elif cur_depth == this_section.depth + 1:
--                    # the new section is a child the current section
--                    parent = this_section
--                else:
--                    self._handle_error("Section too nested at line %s.",
--                                       NestingError, infile, cur_index)
--                    
--                sect_name = self._unquote(sect_name)
--                if parent.has_key(sect_name):
--                    self._handle_error('Duplicate section name at line %s.',
--                                       DuplicateError, infile, cur_index)
--                    continue
--                
--                # create the new section
--                this_section = Section(
--                    parent,
--                    cur_depth,
--                    self,
--                    name=sect_name)
--                parent[sect_name] = this_section
--                parent.inline_comments[sect_name] = comment
--                parent.comments[sect_name] = comment_list
--                continue
--            #
--            # it's not a section marker,
--            # so it should be a valid ``key = value`` line
--            mat = self._keyword.match(line)
--            if mat is None:
--                # it neither matched as a keyword
--                # or a section marker
--                self._handle_error(
--                    'Invalid line at line "%s".',
--                    ParseError, infile, cur_index)
--            else:
--                # is a keyword value
--                # value will include any inline comment
--                (indent, key, value) = mat.groups()
--                if indent and (self.indent_type is None):
--                    self.indent_type = indent
--                # check for a multiline value
--                if value[:3] in ['"""', "'''"]:
--                    try:
--                        (value, comment, cur_index) = self._multiline(
--                            value, infile, cur_index, maxline)
--                    except SyntaxError:
--                        self._handle_error(
--                            'Parse error in value at line %s.',
--                            ParseError, infile, cur_index)
--                        continue
--                    else:
--                        if self.unrepr:
--                            comment = ''
--                            try:
--                                value = unrepr(value)
--                            except Exception, e:
--                                if type(e) == UnknownType:
--                                    msg = 'Unknown name or type in value at line %s.'
--                                else:
--                                    msg = 'Parse error in value at line %s.'
--                                self._handle_error(msg, UnreprError, infile,
--                                    cur_index)
--                                continue
--                else:
--                    if self.unrepr:
--                        comment = ''
--                        try:
--                            value = unrepr(value)
--                        except Exception, e:
--                            if isinstance(e, UnknownType):
--                                msg = 'Unknown name or type in value at line %s.'
--                            else:
--                                msg = 'Parse error in value at line %s.'
--                            self._handle_error(msg, UnreprError, infile,
--                                cur_index)
--                            continue
--                    else:
--                        # extract comment and lists
--                        try:
--                            (value, comment) = self._handle_value(value)
--                        except SyntaxError:
--                            self._handle_error(
--                                'Parse error in value at line %s.',
--                                ParseError, infile, cur_index)
--                            continue
--                #
--                key = self._unquote(key)
--                if this_section.has_key(key):
--                    self._handle_error(
--                        'Duplicate keyword name at line %s.',
--                        DuplicateError, infile, cur_index)
--                    continue
--                # add the key.
--                # we set unrepr because if we have got this far we will never
--                # be creating a new section
--                this_section.__setitem__(key, value, unrepr=True)
--                this_section.inline_comments[key] = comment
--                this_section.comments[key] = comment_list
--                continue
--        #
--        if self.indent_type is None:
--            # no indentation used, set the type accordingly
--            self.indent_type = ''
--
--        # preserve the final comment
--        if not self and not self.initial_comment:
--            self.initial_comment = comment_list
--        elif not reset_comment:
--            self.final_comment = comment_list
--        self.list_values = temp_list_values
--
--
--    def _match_depth(self, sect, depth):
--        """
--        Given a section and a depth level, walk back through the sections
--        parents to see if the depth level matches a previous section.
--        
--        Return a reference to the right section,
--        or raise a SyntaxError.
--        """
--        while depth < sect.depth:
--            if sect is sect.parent:
--                # we've reached the top level already
--                raise SyntaxError()
--            sect = sect.parent
--        if sect.depth == depth:
--            return sect
--        # shouldn't get here
--        raise SyntaxError()
--
--
--    def _handle_error(self, text, ErrorClass, infile, cur_index):
--        """
--        Handle an error according to the error settings.
--        
--        Either raise the error or store it.
--        The error will have occured at ``cur_index``
--        """
--        line = infile[cur_index]
--        cur_index += 1
--        message = text % cur_index
--        error = ErrorClass(message, cur_index, line)
--        if self.raise_errors:
--            # raise the error - parsing stops here
--            raise error
--        # store the error
--        # reraise when parsing has finished
--        self._errors.append(error)
--
--
--    def _unquote(self, value):
--        """Return an unquoted version of a value"""
--        if (value[0] == value[-1]) and (value[0] in ('"', "'")):
--            value = value[1:-1]
--        return value
--
--
--    def _quote(self, value, multiline=True):
--        """
--        Return a safely quoted version of a value.
--        
--        Raise a ConfigObjError if the value cannot be safely quoted.
--        If multiline is ``True`` (default) then use triple quotes
--        if necessary.
--        
--        Don't quote values that don't need it.
--        Recursively quote members of a list and return a comma joined list.
--        Multiline is ``False`` for lists.
--        Obey list syntax for empty and single member lists.
--        
--        If ``list_values=False`` then the value is only quoted if it contains
--        a ``\n`` (is multiline) or '#'.
--        
--        If ``write_empty_values`` is set, and the value is an empty string, it
--        won't be quoted.
--        """
--        if multiline and self.write_empty_values and value == '':
--            # Only if multiline is set, so that it is used for values not
--            # keys, and not values that are part of a list
--            return ''
--        
--        if multiline and isinstance(value, (list, tuple)):
--            if not value:
--                return ','
--            elif len(value) == 1:
--                return self._quote(value[0], multiline=False) + ','
--            return ', '.join([self._quote(val, multiline=False)
--                for val in value])
--        if not isinstance(value, StringTypes):
--            if self.stringify:
--                value = str(value)
--            else:
--                raise TypeError('Value "%s" is not a string.' % value)
--
--        if not value:
--            return '""'
--        
--        no_lists_no_quotes = not self.list_values and '\n' not in value and '#' not in value
--        need_triple = multiline and ((("'" in value) and ('"' in value)) or ('\n' in value ))
--        hash_triple_quote = multiline and not need_triple and ("'" in value) and ('"' in value) and ('#' in value)
--        check_for_single = (no_lists_no_quotes or not need_triple) and not hash_triple_quote
--        
--        if check_for_single:
--            if not self.list_values:
--                # we don't quote if ``list_values=False``
--                quot = noquot
--            # for normal values either single or double quotes will do
--            elif '\n' in value:
--                # will only happen if multiline is off - e.g. '\n' in key
--                raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
--            elif ((value[0] not in wspace_plus) and
--                    (value[-1] not in wspace_plus) and
--                    (',' not in value)):
--                quot = noquot
--            else:
--                quot = self._get_single_quote(value)
--        else:
--            # if value has '\n' or "'" *and* '"', it will need triple quotes
--            quot = self._get_triple_quote(value)
--        
--        if quot == noquot and '#' in value and self.list_values:
--            quot = self._get_single_quote(value)
--                
--        return quot % value
--    
--    
--    def _get_single_quote(self, value):
--        if ("'" in value) and ('"' in value):
--            raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
--        elif '"' in value:
--            quot = squot
--        else:
--            quot = dquot
--        return quot
--    
--    
--    def _get_triple_quote(self, value):
--        if (value.find('"""') != -1) and (value.find("'''") != -1):
--            raise ConfigObjError('Value "%s" cannot be safely quoted.' % value)
--        if value.find('"""') == -1:
--            quot = tdquot
--        else:
--            quot = tsquot 
--        return quot
--
--
--    def _handle_value(self, value):
--        """
--        Given a value string, unquote, remove comment,
--        handle lists. (including empty and single member lists)
--        """
--        # do we look for lists in values ?
--        if not self.list_values:
--            mat = self._nolistvalue.match(value)
--            if mat is None:
--                raise SyntaxError()
--            # NOTE: we don't unquote here
--            return mat.groups()
--        #
--        mat = self._valueexp.match(value)
--        if mat is None:
--            # the value is badly constructed, probably badly quoted,
--            # or an invalid list
--            raise SyntaxError()
--        (list_values, single, empty_list, comment) = mat.groups()
--        if (list_values == '') and (single is None):
--            # change this if you want to accept empty values
--            raise SyntaxError()
--        # NOTE: note there is no error handling from here if the regex
--        # is wrong: then incorrect values will slip through
--        if empty_list is not None:
--            # the single comma - meaning an empty list
--            return ([], comment)
--        if single is not None:
--            # handle empty values
--            if list_values and not single:
--                # FIXME: the '' is a workaround because our regex now matches
--                #   '' at the end of a list if it has a trailing comma
--                single = None
--            else:
--                single = single or '""'
--                single = self._unquote(single)
--        if list_values == '':
--            # not a list value
--            return (single, comment)
--        the_list = self._listvalueexp.findall(list_values)
--        the_list = [self._unquote(val) for val in the_list]
--        if single is not None:
--            the_list += [single]
--        return (the_list, comment)
--
--
--    def _multiline(self, value, infile, cur_index, maxline):
--        """Extract the value, where we are in a multiline situation."""
--        quot = value[:3]
--        newvalue = value[3:]
--        single_line = self._triple_quote[quot][0]
--        multi_line = self._triple_quote[quot][1]
--        mat = single_line.match(value)
--        if mat is not None:
--            retval = list(mat.groups())
--            retval.append(cur_index)
--            return retval
--        elif newvalue.find(quot) != -1:
--            # somehow the triple quote is missing
--            raise SyntaxError()
--        #
--        while cur_index < maxline:
--            cur_index += 1
--            newvalue += '\n'
--            line = infile[cur_index]
--            if line.find(quot) == -1:
--                newvalue += line
--            else:
--                # end of multiline, process it
--                break
--        else:
--            # we've got to the end of the config, oops...
--            raise SyntaxError()
--        mat = multi_line.match(line)
--        if mat is None:
--            # a badly formed line
--            raise SyntaxError()
--        (value, comment) = mat.groups()
--        return (newvalue + value, comment, cur_index)
--
--
--    def _handle_configspec(self, configspec):
--        """Parse the configspec."""
--        # FIXME: Should we check that the configspec was created with the 
--        #        correct settings ? (i.e. ``list_values=False``)
--        if not isinstance(configspec, ConfigObj):
--            try:
--                configspec = ConfigObj(configspec,
--                                       raise_errors=True,
--                                       file_error=True,
--                                       list_values=False)
--            except ConfigObjError, e:
--                # FIXME: Should these errors have a reference
--                #        to the already parsed ConfigObj ?
--                raise ConfigspecError('Parsing configspec failed: %s' % e)
--            except IOError, e:
--                raise IOError('Reading configspec failed: %s' % e)
--        
--        self._set_configspec_value(configspec, self)
--
--
--    def _set_configspec_value(self, configspec, section):
--        """Used to recursively set configspec values."""
--        if '__many__' in configspec.sections:
--            section.configspec['__many__'] = configspec['__many__']
--            if len(configspec.sections) > 1:
--                # FIXME: can we supply any useful information here ?
--                raise RepeatSectionError()
--            
--        if hasattr(configspec, 'initial_comment'):
--            section._configspec_initial_comment = configspec.initial_comment
--            section._configspec_final_comment = configspec.final_comment
--            section._configspec_encoding = configspec.encoding
--            section._configspec_BOM = configspec.BOM
--            section._configspec_newlines = configspec.newlines
--            section._configspec_indent_type = configspec.indent_type
--            
--        for entry in configspec.scalars:
--            section._configspec_comments[entry] = configspec.comments[entry]
--            section._configspec_inline_comments[entry] = configspec.inline_comments[entry]
--            section.configspec[entry] = configspec[entry]
--            section._order.append(entry)
--            
--        for entry in configspec.sections:
--            if entry == '__many__':
--                continue
--            
--            section._cs_section_comments[entry] = configspec.comments[entry]
--            section._cs_section_inline_comments[entry] = configspec.inline_comments[entry]
--            if not section.has_key(entry):
--                section[entry] = {}
--            self._set_configspec_value(configspec[entry], section[entry])
--
--
--    def _handle_repeat(self, section, configspec):
--        """Dynamically assign configspec for repeated section."""
--        try:
--            section_keys = configspec.sections
--            scalar_keys = configspec.scalars
--        except AttributeError:
--            section_keys = [entry for entry in configspec 
--                                if isinstance(configspec[entry], dict)]
--            scalar_keys = [entry for entry in configspec 
--                                if not isinstance(configspec[entry], dict)]
--            
--        if '__many__' in section_keys and len(section_keys) > 1:
--            # FIXME: can we supply any useful information here ?
--            raise RepeatSectionError()
--        
--        scalars = {}
--        sections = {}
--        for entry in scalar_keys:
--            val = configspec[entry]
--            scalars[entry] = val
--        for entry in section_keys:
--            val = configspec[entry]
--            if entry == '__many__':
--                scalars[entry] = val
--                continue
--            sections[entry] = val
--            
--        section.configspec = scalars
--        for entry in sections:
--            if not section.has_key(entry):
--                section[entry] = {}
--            self._handle_repeat(section[entry], sections[entry])
--
--
--    def _write_line(self, indent_string, entry, this_entry, comment):
--        """Write an individual line, for the write method"""
--        # NOTE: the calls to self._quote here handles non-StringType values.
--        if not self.unrepr:
--            val = self._decode_element(self._quote(this_entry))
--        else:
--            val = repr(this_entry)
--        return '%s%s%s%s%s' % (indent_string,
--                               self._decode_element(self._quote(entry, multiline=False)),
--                               self._a_to_u(' = '),
--                               val,
--                               self._decode_element(comment))
--
--
--    def _write_marker(self, indent_string, depth, entry, comment):
--        """Write a section marker line"""
--        return '%s%s%s%s%s' % (indent_string,
--                               self._a_to_u('[' * depth),
--                               self._quote(self._decode_element(entry), multiline=False),
--                               self._a_to_u(']' * depth),
--                               self._decode_element(comment))
--
--
--    def _handle_comment(self, comment):
--        """Deal with a comment."""
--        if not comment:
--            return ''
--        start = self.indent_type
--        if not comment.startswith('#'):
--            start += self._a_to_u(' # ')
--        return (start + comment)
--
--
--    # Public methods
--
--    def write(self, outfile=None, section=None):
--        """
--        Write the current ConfigObj as a file
--        
--        tekNico: FIXME: use StringIO instead of real files
--        
--        >>> filename = a.filename
--        >>> a.filename = 'test.ini'
--        >>> a.write()
--        >>> a.filename = filename
--        >>> a == ConfigObj('test.ini', raise_errors=True)
--        1
--        """
--        if self.indent_type is None:
--            # this can be true if initialised from a dictionary
--            self.indent_type = DEFAULT_INDENT_TYPE
--            
--        out = []
--        cs = self._a_to_u('#')
--        csp = self._a_to_u('# ')
--        if section is None:
--            int_val = self.interpolation
--            self.interpolation = False
--            section = self
--            for line in self.initial_comment:
--                line = self._decode_element(line)
--                stripped_line = line.strip()
--                if stripped_line and not stripped_line.startswith(cs):
--                    line = csp + line
--                out.append(line)
--                
--        indent_string = self.indent_type * section.depth
--        for entry in (section.scalars + section.sections):
--            if entry in section.defaults:
--                # don't write out default values
--                continue
--            for comment_line in section.comments[entry]:
--                comment_line = self._decode_element(comment_line.lstrip())
--                if comment_line and not comment_line.startswith(cs):
--                    comment_line = csp + comment_line
--                out.append(indent_string + comment_line)
--            this_entry = section[entry]
--            comment = self._handle_comment(section.inline_comments[entry])
--            
--            if isinstance(this_entry, dict):
--                # a section
--                out.append(self._write_marker(
--                    indent_string,
--                    this_entry.depth,
--                    entry,
--                    comment))
--                out.extend(self.write(section=this_entry))
--            else:
--                out.append(self._write_line(
--                    indent_string,
--                    entry,
--                    this_entry,
--                    comment))
--                
--        if section is self:
--            for line in self.final_comment:
--                line = self._decode_element(line)
--                stripped_line = line.strip()
--                if stripped_line and not stripped_line.startswith(cs):
--                    line = csp + line
--                out.append(line)
--            self.interpolation = int_val
--            
--        if section is not self:
--            return out
--        
--        if (self.filename is None) and (outfile is None):
--            # output a list of lines
--            # might need to encode
--            # NOTE: This will *screw* UTF16, each line will start with the BOM
--            if self.encoding:
--                out = [l.encode(self.encoding) for l in out]
--            if (self.BOM and ((self.encoding is None) or
--                (BOM_LIST.get(self.encoding.lower()) == 'utf_8'))):
--                # Add the UTF8 BOM
--                if not out:
--                    out.append('')
--                out[0] = BOM_UTF8 + out[0]
--            return out
--        
--        # Turn the list to a string, joined with correct newlines
--        newline = self.newlines or os.linesep
--        output = self._a_to_u(newline).join(out)
--        if self.encoding:
--            output = output.encode(self.encoding)
--        if self.BOM and ((self.encoding is None) or match_utf8(self.encoding)):
--            # Add the UTF8 BOM
--            output = BOM_UTF8 + output
--            
--        if not output.endswith(newline):
--            output += newline
--        if outfile is not None:
--            outfile.write(output)
--        else:
--            h = open(self.filename, 'wb')
--            h.write(output)
--            h.close()
--
--
--    def validate(self, validator, preserve_errors=False, copy=False,
--                 section=None):
--        """
--        Test the ConfigObj against a configspec.
--        
--        It uses the ``validator`` object from *validate.py*.
--        
--        To run ``validate`` on the current ConfigObj, call: ::
--        
--            test = config.validate(validator)
--        
--        (Normally having previously passed in the configspec when the ConfigObj
--        was created - you can dynamically assign a dictionary of checks to the
--        ``configspec`` attribute of a section though).
--        
--        It returns ``True`` if everything passes, or a dictionary of
--        pass/fails (True/False). If every member of a subsection passes, it
--        will just have the value ``True``. (It also returns ``False`` if all
--        members fail).
--        
--        In addition, it converts the values from strings to their native
--        types if their checks pass (and ``stringify`` is set).
--        
--        If ``preserve_errors`` is ``True`` (``False`` is default) then instead
--        of a marking a fail with a ``False``, it will preserve the actual
--        exception object. This can contain info about the reason for failure.
--        For example the ``VdtValueTooSmallError`` indicates that the value
--        supplied was too small. If a value (or section) is missing it will
--        still be marked as ``False``.
--        
--        You must have the validate module to use ``preserve_errors=True``.
--        
--        You can then use the ``flatten_errors`` function to turn your nested
--        results dictionary into a flattened list of failures - useful for
--        displaying meaningful error messages.
--        """
--        if section is None:
--            if self.configspec is None:
--                raise ValueError('No configspec supplied.')
--            if preserve_errors:
--                # We do this once to remove a top level dependency on the validate module
--                # Which makes importing configobj faster
--                from validate import VdtMissingValue
--                self._vdtMissingValue = VdtMissingValue
--            section = self
--        #
--        spec_section = section.configspec
--        if copy and hasattr(section, '_configspec_initial_comment'):
--            section.initial_comment = section._configspec_initial_comment
--            section.final_comment = section._configspec_final_comment
--            section.encoding = section._configspec_encoding
--            section.BOM = section._configspec_BOM
--            section.newlines = section._configspec_newlines
--            section.indent_type = section._configspec_indent_type
--            
--        if '__many__' in section.configspec:
--            many = spec_section['__many__']
--            # dynamically assign the configspecs
--            # for the sections below
--            for entry in section.sections:
--                self._handle_repeat(section[entry], many)
--        #
--        out = {}
--        ret_true = True
--        ret_false = True
--        order = [k for k in section._order if k in spec_section]
--        order += [k for k in spec_section if k not in order]
--        for entry in order:
--            if entry == '__many__':
--                continue
--            if (not entry in section.scalars) or (entry in section.defaults):
--                # missing entries
--                # or entries from defaults
--                missing = True
--                val = None
--                if copy and not entry in section.scalars:
--                    # copy comments
--                    section.comments[entry] = (
--                        section._configspec_comments.get(entry, []))
--                    section.inline_comments[entry] = (
--                        section._configspec_inline_comments.get(entry, ''))
--                #
--            else:
--                missing = False
--                val = section[entry]
--            try:
--                check = validator.check(spec_section[entry],
--                                        val,
--                                        missing=missing
--                                        )
--            except validator.baseErrorClass, e:
--                if not preserve_errors or isinstance(e, self._vdtMissingValue):
--                    out[entry] = False
--                else:
--                    # preserve the error
--                    out[entry] = e
--                    ret_false = False
--                ret_true = False
--            else:
--                try: 
--                    section.default_values.pop(entry, None)
--                except AttributeError: 
--                    # For Python 2.2 compatibility
--                    try:
--                        del section.default_values[entry]
--                    except KeyError:
--                        pass
--                    
--                if hasattr(validator, 'get_default_value'):
--                    try: 
--                        section.default_values[entry] = validator.get_default_value(spec_section[entry])
--                    except KeyError:
--                        # No default
--                        pass
--                    
--                ret_false = False
--                out[entry] = True
--                if self.stringify or missing:
--                    # if we are doing type conversion
--                    # or the value is a supplied default
--                    if not self.stringify:
--                        if isinstance(check, (list, tuple)):
--                            # preserve lists
--                            check = [self._str(item) for item in check]
--                        elif missing and check is None:
--                            # convert the None from a default to a ''
--                            check = ''
--                        else:
--                            check = self._str(check)
--                    if (check != val) or missing:
--                        section[entry] = check
--                if not copy and missing and entry not in section.defaults:
--                    section.defaults.append(entry)
--        # Missing sections will have been created as empty ones when the
--        # configspec was read.
--        for entry in section.sections:
--            # FIXME: this means DEFAULT is not copied in copy mode
--            if section is self and entry == 'DEFAULT':
--                continue
--            if copy:
--                section.comments[entry] = section._cs_section_comments[entry]
--                section.inline_comments[entry] = (
--                    section._cs_section_inline_comments[entry])
--            check = self.validate(validator, preserve_errors=preserve_errors,
--                copy=copy, section=section[entry])
--            out[entry] = check
--            if check == False:
--                ret_true = False
--            elif check == True:
--                ret_false = False
--            else:
--                ret_true = False
--                ret_false = False
--        #
--        if ret_true:
--            return True
--        elif ret_false:
--            return False
--        return out
--
--
--    def reset(self):
--        """Clear ConfigObj instance and restore to 'freshly created' state."""
--        self.clear()
--        self._initialise()
--        # FIXME: Should be done by '_initialise', but ConfigObj constructor (and reload)
--        #        requires an empty dictionary
--        self.configspec = None
--        # Just to be sure ;-)
--        self._original_configspec = None
--        
--        
--    def reload(self):
--        """
--        Reload a ConfigObj from file.
--        
--        This method raises a ``ReloadError`` if the ConfigObj doesn't have
--        a filename attribute pointing to a file.
--        """
--        if not isinstance(self.filename, StringTypes):
--            raise ReloadError()
--
--        filename = self.filename
--        current_options = {}
--        for entry in OPTION_DEFAULTS:
--            if entry == 'configspec':
--                continue
--            current_options[entry] = getattr(self, entry)
--            
--        configspec = self._original_configspec
--        current_options['configspec'] = configspec
--            
--        self.clear()
--        self._initialise(current_options)
--        self._load(filename, configspec)
--        
--
--
--class SimpleVal(object):
--    """
--    A simple validator.
--    Can be used to check that all members expected are present.
--    
--    To use it, provide a configspec with all your members in (the value given
--    will be ignored). Pass an instance of ``SimpleVal`` to the ``validate``
--    method of your ``ConfigObj``. ``validate`` will return ``True`` if all
--    members are present, or a dictionary with True/False meaning
--    present/missing. (Whole missing sections will be replaced with ``False``)
--    """
--    
--    def __init__(self):
--        self.baseErrorClass = ConfigObjError
--    
--    def check(self, check, member, missing=False):
--        """A dummy check method, always returns the value unchanged."""
--        if missing:
--            raise self.baseErrorClass()
--        return member
--
--
--# Check / processing functions for options
--def flatten_errors(cfg, res, levels=None, results=None):
--    """
--    An example function that will turn a nested dictionary of results
--    (as returned by ``ConfigObj.validate``) into a flat list.
--    
--    ``cfg`` is the ConfigObj instance being checked, ``res`` is the results
--    dictionary returned by ``validate``.
--    
--    (This is a recursive function, so you shouldn't use the ``levels`` or
--    ``results`` arguments - they are used by the function.
--    
--    Returns a list of keys that failed. Each member of the list is a tuple :
--    ::
--    
--        ([list of sections...], key, result)
--    
--    If ``validate`` was called with ``preserve_errors=False`` (the default)
--    then ``result`` will always be ``False``.
--
--    *list of sections* is a flattened list of sections that the key was found
--    in.
--    
--    If the section was missing then key will be ``None``.
--    
--    If the value (or section) was missing then ``result`` will be ``False``.
--    
--    If ``validate`` was called with ``preserve_errors=True`` and a value
--    was present, but failed the check, then ``result`` will be the exception
--    object returned. You can use this as a string that describes the failure.
--    
--    For example *The value "3" is of the wrong type*.
--    
--    >>> import validate
--    >>> vtor = validate.Validator()
--    >>> my_ini = '''
--    ...     option1 = True
--    ...     [section1]
--    ...     option1 = True
--    ...     [section2]
--    ...     another_option = Probably
--    ...     [section3]
--    ...     another_option = True
--    ...     [[section3b]]
--    ...     value = 3
--    ...     value2 = a
--    ...     value3 = 11
--    ...     '''
--    >>> my_cfg = '''
--    ...     option1 = boolean()
--    ...     option2 = boolean()
--    ...     option3 = boolean(default=Bad_value)
--    ...     [section1]
--    ...     option1 = boolean()
--    ...     option2 = boolean()
--    ...     option3 = boolean(default=Bad_value)
--    ...     [section2]
--    ...     another_option = boolean()
--    ...     [section3]
--    ...     another_option = boolean()
--    ...     [[section3b]]
--    ...     value = integer
--    ...     value2 = integer
--    ...     value3 = integer(0, 10)
--    ...         [[[section3b-sub]]]
--    ...         value = string
--    ...     [section4]
--    ...     another_option = boolean()
--    ...     '''
--    >>> cs = my_cfg.split('\\n')
--    >>> ini = my_ini.split('\\n')
--    >>> cfg = ConfigObj(ini, configspec=cs)
--    >>> res = cfg.validate(vtor, preserve_errors=True)
--    >>> errors = []
--    >>> for entry in flatten_errors(cfg, res):
--    ...     section_list, key, error = entry
--    ...     section_list.insert(0, '[root]')
--    ...     if key is not None:
--    ...        section_list.append(key)
--    ...     else:
--    ...         section_list.append('[missing]')
--    ...     section_string = ', '.join(section_list)
--    ...     errors.append((section_string, ' = ', error))
--    >>> errors.sort()
--    >>> for entry in errors:
--    ...     print entry[0], entry[1], (entry[2] or 0)
--    [root], option2  =  0
--    [root], option3  =  the value "Bad_value" is of the wrong type.
--    [root], section1, option2  =  0
--    [root], section1, option3  =  the value "Bad_value" is of the wrong type.
--    [root], section2, another_option  =  the value "Probably" is of the wrong type.
--    [root], section3, section3b, section3b-sub, [missing]  =  0
--    [root], section3, section3b, value2  =  the value "a" is of the wrong type.
--    [root], section3, section3b, value3  =  the value "11" is too big.
--    [root], section4, [missing]  =  0
--    """
--    if levels is None:
--        # first time called
--        levels = []
--        results = []
--    if res is True:
--        return results
--    if res is False:
--        results.append((levels[:], None, False))
--        if levels:
--            levels.pop()
--        return results
--    for (key, val) in res.items():
--        if val == True:
--            continue
--        if isinstance(cfg.get(key), dict):
--            # Go down one level
--            levels.append(key)
--            flatten_errors(cfg[key], val, levels, results)
--            continue
--        results.append((levels[:], key, val))
--    #
--    # Go up one level
--    if levels:
--        levels.pop()
--    #
--    return results
--
--
--"""*A programming language is a medium of expression.* - Paul Graham"""
-Index: ipython-0.10/IPython/external/guid/_guid.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/guid/_guid.py
-@@ -0,0 +1,170 @@
-+#!/usr/bin/env python
-+# encoding: utf-8
-+
-+# GUID.py
-+# Version 2.6
-+#
-+# Copyright (c) 2006 Conan C. Albrecht
-+#
-+# Permission is hereby granted, free of charge, to any person obtaining a copy 
-+# of this software and associated documentation files (the "Software"), to deal 
-+# in the Software without restriction, including without limitation the rights 
-+# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 
-+# copies of the Software, and to permit persons to whom the Software is furnished 
-+# to do so, subject to the following conditions:
-+#
-+# The above copyright notice and this permission notice shall be included in all 
-+# copies or substantial portions of the Software.
-+#
-+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 
-+# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 
-+# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE 
-+# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
-+# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
-+# DEALINGS IN THE SOFTWARE.
-+
-+
-+
-+##################################################################################################
-+###   A globally-unique identifier made up of time and ip and 8 digits for a counter: 
-+###   each GUID is 40 characters wide
-+###
-+###   A globally unique identifier that combines ip, time, and a counter.  Since the 
-+###   time is listed first, you can sort records by guid.  You can also extract the time 
-+###   and ip if needed.  
-+###
-+###   Since the counter has eight hex characters, you can create up to 
-+###   0xffffffff (4294967295) GUIDs every millisecond.  If your processor
-+###   is somehow fast enough to create more than that in a millisecond (looking
-+###   toward the future, of course), the function will wait until the next
-+###   millisecond to return.
-+###     
-+###   GUIDs make wonderful database keys.  They require no access to the 
-+###   database (to get the max index number), they are extremely unique, and they sort 
-+###   automatically by time.   GUIDs prevent key clashes when merging
-+###   two databases together, combining data, or generating keys in distributed
-+###   systems.
-+###
-+###   There is an Internet Draft for UUIDs, but this module does not implement it.
-+###   If the draft catches on, perhaps I'll conform the module to it.
-+###
-+
-+
-+# Changelog
-+# Sometime, 1997     Created the Java version of GUID
-+#                    Went through many versions in Java
-+# Sometime, 2002     Created the Python version of GUID, mirroring the Java version
-+# November 24, 2003  Changed Python version to be more pythonic, took out object and made just a module
-+# December 2, 2003   Fixed duplicating GUIDs.  Sometimes they duplicate if multiples are created
-+#                    in the same millisecond (it checks the last 100 GUIDs now and has a larger random part)
-+# December 9, 2003   Fixed MAX_RANDOM, which was going over sys.maxint
-+# June 12, 2004      Allowed a custom IP address to be sent in rather than always using the 
-+#                    local IP address.  
-+# November 4, 2005   Changed the random part to a counter variable.  Now GUIDs are totally 
-+#                    unique and more efficient, as long as they are created by only
-+#                    on runtime on a given machine.  The counter part is after the time
-+#                    part so it sorts correctly.
-+# November 8, 2005   The counter variable now starts at a random long now and cycles
-+#                    around.  This is in case two guids are created on the same
-+#                    machine at the same millisecond (by different processes).  Even though
-+#                    it is possible the GUID can be created, this makes it highly unlikely
-+#                    since the counter will likely be different.
-+# November 11, 2005  Fixed a bug in the new IP getting algorithm.  Also, use IPv6 range
-+#                    for IP when we make it up (when it's no accessible)
-+# November 21, 2005  Added better IP-finding code.  It finds IP address better now.
-+# January 5, 2006    Fixed a small bug caused in old versions of python (random module use)
-+
-+import math
-+import socket
-+import random
-+import sys
-+import time
-+import threading
-+
-+
-+
-+#############################
-+###   global module variables
-+
-+#Makes a hex IP from a decimal dot-separated ip (eg: 127.0.0.1)
-+make_hexip = lambda ip: ''.join(["%04x" % long(i) for i in ip.split('.')]) # leave space for ip v6 (65K in each sub)
-+  
-+MAX_COUNTER = 0xfffffffe
-+counter = 0L
-+firstcounter = MAX_COUNTER
-+lasttime = 0
-+ip = ''
-+lock = threading.RLock()
-+try:  # only need to get the IP addresss once
-+  ip = socket.getaddrinfo(socket.gethostname(),0)[-1][-1][0]
-+  hexip = make_hexip(ip)
-+except: # if we don't have an ip, default to someting in the 10.x.x.x private range
-+  ip = '10'
-+  rand = random.Random()
-+  for i in range(3):
-+    ip += '.' + str(rand.randrange(1, 0xffff))  # might as well use IPv6 range if we're making it up
-+  hexip = make_hexip(ip)
-+
-+  
-+#################################
-+###   Public module functions
-+
-+def generate(ip=None):
-+  '''Generates a new guid.  A guid is unique in space and time because it combines
-+     the machine IP with the current time in milliseconds.  Be careful about sending in
-+     a specified IP address because the ip makes it unique in space.  You could send in
-+     the same IP address that is created on another machine.
-+  '''
-+  global counter, firstcounter, lasttime
-+  lock.acquire() # can't generate two guids at the same time
-+  try:
-+    parts = []
-+
-+    # do we need to wait for the next millisecond (are we out of counters?)
-+    now = long(time.time() * 1000)
-+    while lasttime == now and counter == firstcounter: 
-+      time.sleep(.01)
-+      now = long(time.time() * 1000)
-+
-+    # time part
-+    parts.append("%016x" % now)
-+
-+    # counter part
-+    if lasttime != now:  # time to start counter over since we have a different millisecond
-+      firstcounter = long(random.uniform(1, MAX_COUNTER))  # start at random position
-+      counter = firstcounter
-+    counter += 1
-+    if counter > MAX_COUNTER:
-+      counter = 0
-+    lasttime = now
-+    parts.append("%08x" % (counter)) 
-+
-+    # ip part
-+    parts.append(hexip)
-+
-+    # put them all together
-+    return ''.join(parts)
-+  finally:
-+    lock.release()
-+    
-+
-+def extract_time(guid):
-+  '''Extracts the time portion out of the guid and returns the 
-+     number of seconds since the epoch as a float'''
-+  return float(long(guid[0:16], 16)) / 1000.0
-+
-+
-+def extract_counter(guid):
-+  '''Extracts the counter from the guid (returns the bits in decimal)'''
-+  return int(guid[16:24], 16)
-+
-+
-+def extract_ip(guid):
-+  '''Extracts the ip portion out of the guid and returns it
-+     as a string like 10.10.10.10'''
-+  # there's probably a more elegant way to do this
-+  thisip = []
-+  for index in range(24, 40, 4):
-+    thisip.append(str(int(guid[index: index + 4], 16)))
-+  return '.'.join(thisip)
-+
 Index: ipython-0.10/IPython/external/guid/__init__.py
 ===================================================================
 --- /dev/null
@@ -9661,181 +32,6 @@ Index: ipython-0.10/IPython/external/guid/__init__.py
 +    from guid import *
 +except ImportError:
 +    from _guid import *
-Index: ipython-0.10/IPython/external/guid.py
-===================================================================
---- ipython-0.10.orig/IPython/external/guid.py
-+++ /dev/null
-@@ -1,170 +0,0 @@
--#!/usr/bin/env python
--# encoding: utf-8
--
--# GUID.py
--# Version 2.6
--#
--# Copyright (c) 2006 Conan C. Albrecht
--#
--# Permission is hereby granted, free of charge, to any person obtaining a copy 
--# of this software and associated documentation files (the "Software"), to deal 
--# in the Software without restriction, including without limitation the rights 
--# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell 
--# copies of the Software, and to permit persons to whom the Software is furnished 
--# to do so, subject to the following conditions:
--#
--# The above copyright notice and this permission notice shall be included in all 
--# copies or substantial portions of the Software.
--#
--# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, 
--# INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR 
--# PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE 
--# FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR 
--# OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER 
--# DEALINGS IN THE SOFTWARE.
--
--
--
--##################################################################################################
--###   A globally-unique identifier made up of time and ip and 8 digits for a counter: 
--###   each GUID is 40 characters wide
--###
--###   A globally unique identifier that combines ip, time, and a counter.  Since the 
--###   time is listed first, you can sort records by guid.  You can also extract the time 
--###   and ip if needed.  
--###
--###   Since the counter has eight hex characters, you can create up to 
--###   0xffffffff (4294967295) GUIDs every millisecond.  If your processor
--###   is somehow fast enough to create more than that in a millisecond (looking
--###   toward the future, of course), the function will wait until the next
--###   millisecond to return.
--###     
--###   GUIDs make wonderful database keys.  They require no access to the 
--###   database (to get the max index number), they are extremely unique, and they sort 
--###   automatically by time.   GUIDs prevent key clashes when merging
--###   two databases together, combining data, or generating keys in distributed
--###   systems.
--###
--###   There is an Internet Draft for UUIDs, but this module does not implement it.
--###   If the draft catches on, perhaps I'll conform the module to it.
--###
--
--
--# Changelog
--# Sometime, 1997     Created the Java version of GUID
--#                    Went through many versions in Java
--# Sometime, 2002     Created the Python version of GUID, mirroring the Java version
--# November 24, 2003  Changed Python version to be more pythonic, took out object and made just a module
--# December 2, 2003   Fixed duplicating GUIDs.  Sometimes they duplicate if multiples are created
--#                    in the same millisecond (it checks the last 100 GUIDs now and has a larger random part)
--# December 9, 2003   Fixed MAX_RANDOM, which was going over sys.maxint
--# June 12, 2004      Allowed a custom IP address to be sent in rather than always using the 
--#                    local IP address.  
--# November 4, 2005   Changed the random part to a counter variable.  Now GUIDs are totally 
--#                    unique and more efficient, as long as they are created by only
--#                    on runtime on a given machine.  The counter part is after the time
--#                    part so it sorts correctly.
--# November 8, 2005   The counter variable now starts at a random long now and cycles
--#                    around.  This is in case two guids are created on the same
--#                    machine at the same millisecond (by different processes).  Even though
--#                    it is possible the GUID can be created, this makes it highly unlikely
--#                    since the counter will likely be different.
--# November 11, 2005  Fixed a bug in the new IP getting algorithm.  Also, use IPv6 range
--#                    for IP when we make it up (when it's no accessible)
--# November 21, 2005  Added better IP-finding code.  It finds IP address better now.
--# January 5, 2006    Fixed a small bug caused in old versions of python (random module use)
--
--import math
--import socket
--import random
--import sys
--import time
--import threading
--
--
--
--#############################
--###   global module variables
--
--#Makes a hex IP from a decimal dot-separated ip (eg: 127.0.0.1)
--make_hexip = lambda ip: ''.join(["%04x" % long(i) for i in ip.split('.')]) # leave space for ip v6 (65K in each sub)
--  
--MAX_COUNTER = 0xfffffffe
--counter = 0L
--firstcounter = MAX_COUNTER
--lasttime = 0
--ip = ''
--lock = threading.RLock()
--try:  # only need to get the IP addresss once
--  ip = socket.getaddrinfo(socket.gethostname(),0)[-1][-1][0]
--  hexip = make_hexip(ip)
--except: # if we don't have an ip, default to someting in the 10.x.x.x private range
--  ip = '10'
--  rand = random.Random()
--  for i in range(3):
--    ip += '.' + str(rand.randrange(1, 0xffff))  # might as well use IPv6 range if we're making it up
--  hexip = make_hexip(ip)
--
--  
--#################################
--###   Public module functions
--
--def generate(ip=None):
--  '''Generates a new guid.  A guid is unique in space and time because it combines
--     the machine IP with the current time in milliseconds.  Be careful about sending in
--     a specified IP address because the ip makes it unique in space.  You could send in
--     the same IP address that is created on another machine.
--  '''
--  global counter, firstcounter, lasttime
--  lock.acquire() # can't generate two guids at the same time
--  try:
--    parts = []
--
--    # do we need to wait for the next millisecond (are we out of counters?)
--    now = long(time.time() * 1000)
--    while lasttime == now and counter == firstcounter: 
--      time.sleep(.01)
--      now = long(time.time() * 1000)
--
--    # time part
--    parts.append("%016x" % now)
--
--    # counter part
--    if lasttime != now:  # time to start counter over since we have a different millisecond
--      firstcounter = long(random.uniform(1, MAX_COUNTER))  # start at random position
--      counter = firstcounter
--    counter += 1
--    if counter > MAX_COUNTER:
--      counter = 0
--    lasttime = now
--    parts.append("%08x" % (counter)) 
--
--    # ip part
--    parts.append(hexip)
--
--    # put them all together
--    return ''.join(parts)
--  finally:
--    lock.release()
--    
--
--def extract_time(guid):
--  '''Extracts the time portion out of the guid and returns the 
--     number of seconds since the epoch as a float'''
--  return float(long(guid[0:16], 16)) / 1000.0
--
--
--def extract_counter(guid):
--  '''Extracts the counter from the guid (returns the bits in decimal)'''
--  return int(guid[16:24], 16)
--
--
--def extract_ip(guid):
--  '''Extracts the ip portion out of the guid and returns it
--     as a string like 10.10.10.10'''
--  # there's probably a more elegant way to do this
--  thisip = []
--  for index in range(24, 40, 4):
--    thisip.append(str(int(guid[index: index + 4], 16)))
--  return '.'.join(thisip)
--
 Index: ipython-0.10/IPython/external/Itpl/__init__.py
 ===================================================================
 --- /dev/null
@@ -9845,568 +41,6 @@ Index: ipython-0.10/IPython/external/Itpl/__init__.py
 +    from Itpl import *
 +except ImportError:
 +    from _Itpl import *
-Index: ipython-0.10/IPython/external/Itpl/_Itpl.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/Itpl/_Itpl.py
-@@ -0,0 +1,276 @@
-+# -*- coding: utf-8 -*-
-+"""String interpolation for Python (by Ka-Ping Yee, 14 Feb 2000).
-+
-+This module lets you quickly and conveniently interpolate values into
-+strings (in the flavour of Perl or Tcl, but with less extraneous
-+punctuation).  You get a bit more power than in the other languages,
-+because this module allows subscripting, slicing, function calls,
-+attribute lookup, or arbitrary expressions.  Variables and expressions
-+are evaluated in the namespace of the caller.
-+
-+The itpl() function returns the result of interpolating a string, and
-+printpl() prints out an interpolated string.  Here are some examples:
-+
-+    from Itpl import printpl
-+    printpl("Here is a $string.")
-+    printpl("Here is a $module.member.")
-+    printpl("Here is an $object.member.")
-+    printpl("Here is a $functioncall(with, arguments).")
-+    printpl("Here is an ${arbitrary + expression}.")
-+    printpl("Here is an $array[3] member.")
-+    printpl("Here is a $dictionary['member'].")
-+
-+The filter() function filters a file object so that output through it
-+is interpolated.  This lets you produce the illusion that Python knows
-+how to do interpolation:
-+
-+    import Itpl
-+    sys.stdout = Itpl.filter()
-+    f = "fancy"
-+    print "Is this not $f?"
-+    print "Standard output has been replaced with a $sys.stdout object."
-+    sys.stdout = Itpl.unfilter()
-+    print "Okay, back $to $normal."
-+
-+Under the hood, the Itpl class represents a string that knows how to
-+interpolate values.  An instance of the class parses the string once
-+upon initialization; the evaluation and substitution can then be done
-+each time the instance is evaluated with str(instance).  For example:
-+
-+    from Itpl import Itpl
-+    s = Itpl("Here is $foo.")
-+    foo = 5
-+    print str(s)
-+    foo = "bar"
-+    print str(s)
-+"""
-+
-+#*****************************************************************************
-+#
-+# Copyright (c) 2001 Ka-Ping Yee <ping at lfw.org>
-+#
-+#
-+# Published under the terms of the MIT license, hereby reproduced:
-+#
-+# Permission is hereby granted, free of charge, to any person obtaining a copy
-+# of this software and associated documentation files (the "Software"), to
-+# deal in the Software without restriction, including without limitation the
-+# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
-+# sell copies of the Software, and to permit persons to whom the Software is
-+# furnished to do so, subject to the following conditions:
-+#
-+# The above copyright notice and this permission notice shall be included in
-+# all copies or substantial portions of the Software.
-+#
-+# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-+# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-+# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
-+# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-+# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
-+# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
-+# IN THE SOFTWARE.
-+#
-+#*****************************************************************************
-+
-+__author__  = 'Ka-Ping Yee <ping at lfw.org>'
-+__license__ = 'MIT'
-+
-+import string
-+import sys
-+from tokenize import tokenprog
-+from types import StringType
-+
-+class ItplError(ValueError):
-+    def __init__(self, text, pos):
-+        self.text = text
-+        self.pos = pos
-+    def __str__(self):
-+        return "unfinished expression in %s at char %d" % (
-+            repr(self.text), self.pos)
-+
-+def matchorfail(text, pos):
-+    match = tokenprog.match(text, pos)
-+    if match is None:
-+        raise ItplError(text, pos)
-+    return match, match.end()
-+
-+class Itpl:
-+    """Class representing a string with interpolation abilities.
-+    
-+    Upon creation, an instance works out what parts of the format
-+    string are literal and what parts need to be evaluated.  The
-+    evaluation and substitution happens in the namespace of the
-+    caller when str(instance) is called."""
-+
-+    def __init__(self, format,codec='utf_8',encoding_errors='backslashreplace'):
-+        """The single mandatory argument to this constructor is a format
-+        string.
-+
-+        The format string is parsed according to the following rules:
-+
-+        1.  A dollar sign and a name, possibly followed by any of: 
-+              - an open-paren, and anything up to the matching paren 
-+              - an open-bracket, and anything up to the matching bracket 
-+              - a period and a name 
-+            any number of times, is evaluated as a Python expression.
-+
-+        2.  A dollar sign immediately followed by an open-brace, and
-+            anything up to the matching close-brace, is evaluated as
-+            a Python expression.
-+
-+        3.  Outside of the expressions described in the above two rules,
-+            two dollar signs in a row give you one literal dollar sign.
-+
-+        Optional arguments:
-+
-+        - codec('utf_8'): a string containing the name of a valid Python
-+        codec.
-+
-+        - encoding_errors('backslashreplace'): a string with a valid error handling
-+        policy.  See the codecs module documentation for details.
-+
-+        These are used to encode the format string if a call to str() fails on
-+        the expanded result."""
-+
-+        if not isinstance(format,basestring):
-+            raise TypeError, "needs string initializer"
-+        self.format = format
-+        self.codec = codec
-+        self.encoding_errors = encoding_errors
-+        
-+        namechars = "abcdefghijklmnopqrstuvwxyz" \
-+            "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
-+        chunks = []
-+        pos = 0
-+
-+        while 1:
-+            dollar = string.find(format, "$", pos)
-+            if dollar < 0: break
-+            nextchar = format[dollar+1]
-+
-+            if nextchar == "{":
-+                chunks.append((0, format[pos:dollar]))
-+                pos, level = dollar+2, 1
-+                while level:
-+                    match, pos = matchorfail(format, pos)
-+                    tstart, tend = match.regs[3]
-+                    token = format[tstart:tend]
-+                    if token == "{": level = level+1
-+                    elif token == "}": level = level-1
-+                chunks.append((1, format[dollar+2:pos-1]))
-+
-+            elif nextchar in namechars:
-+                chunks.append((0, format[pos:dollar]))
-+                match, pos = matchorfail(format, dollar+1)
-+                while pos < len(format):
-+                    if format[pos] == "." and \
-+                        pos+1 < len(format) and format[pos+1] in namechars:
-+                        match, pos = matchorfail(format, pos+1)
-+                    elif format[pos] in "([":
-+                        pos, level = pos+1, 1
-+                        while level:
-+                            match, pos = matchorfail(format, pos)
-+                            tstart, tend = match.regs[3]
-+                            token = format[tstart:tend]
-+                            if token[0] in "([": level = level+1
-+                            elif token[0] in ")]": level = level-1
-+                    else: break
-+                chunks.append((1, format[dollar+1:pos]))
-+
-+            else:
-+                chunks.append((0, format[pos:dollar+1]))
-+                pos = dollar + 1 + (nextchar == "$")
-+
-+        if pos < len(format): chunks.append((0, format[pos:]))
-+        self.chunks = chunks
-+
-+    def __repr__(self):
-+        return "<Itpl %s >" % repr(self.format)
-+
-+    def _str(self,glob,loc):
-+        """Evaluate to a string in the given globals/locals.
-+
-+        The final output is built by calling str(), but if this fails, the
-+        result is encoded with the instance's codec and error handling policy,
-+        via a call to out.encode(self.codec,self.encoding_errors)"""
-+        result = []
-+        app = result.append
-+        for live, chunk in self.chunks:
-+            if live: app(str(eval(chunk,glob,loc)))
-+            else: app(chunk)
-+        out = ''.join(result)
-+        try:
-+            return str(out)
-+        except UnicodeError:
-+            return out.encode(self.codec,self.encoding_errors)
-+
-+    def __str__(self):
-+        """Evaluate and substitute the appropriate parts of the string."""
-+
-+        # We need to skip enough frames to get to the actual caller outside of
-+        # Itpl.
-+        frame = sys._getframe(1)
-+        while frame.f_globals["__name__"] == __name__: frame = frame.f_back
-+        loc, glob = frame.f_locals, frame.f_globals
-+
-+        return self._str(glob,loc)
-+    
-+class ItplNS(Itpl):
-+    """Class representing a string with interpolation abilities.
-+
-+    This inherits from Itpl, but at creation time a namespace is provided
-+    where the evaluation will occur.  The interpolation becomes a bit more
-+    efficient, as no traceback needs to be extracte.  It also allows the
-+    caller to supply a different namespace for the interpolation to occur than
-+    its own."""
-+    
-+    def __init__(self, format,globals,locals=None,
-+                 codec='utf_8',encoding_errors='backslashreplace'):
-+        """ItplNS(format,globals[,locals]) -> interpolating string instance.
-+
-+        This constructor, besides a format string, takes a globals dictionary
-+        and optionally a locals (which defaults to globals if not provided).
-+
-+        For further details, see the Itpl constructor."""
-+
-+        if locals is None:
-+            locals = globals
-+        self.globals = globals
-+        self.locals = locals
-+        Itpl.__init__(self,format,codec,encoding_errors)
-+        
-+    def __str__(self):
-+        """Evaluate and substitute the appropriate parts of the string."""
-+        return self._str(self.globals,self.locals)
-+
-+    def __repr__(self):
-+        return "<ItplNS %s >" % repr(self.format)
-+
-+# utilities for fast printing
-+def itpl(text): return str(Itpl(text))
-+def printpl(text): print itpl(text)
-+# versions with namespace
-+def itplns(text,globals,locals=None): return str(ItplNS(text,globals,locals))
-+def printplns(text,globals,locals=None): print itplns(text,globals,locals)
-+
-+class ItplFile:
-+    """A file object that filters each write() through an interpolator."""
-+    def __init__(self, file): self.file = file
-+    def __repr__(self): return "<interpolated " + repr(self.file) + ">"
-+    def __getattr__(self, attr): return getattr(self.file, attr)
-+    def write(self, text): self.file.write(str(Itpl(text)))
-+
-+def filter(file=sys.stdout):
-+    """Return an ItplFile that filters writes to the given file object.
-+    
-+    'file = filter(file)' replaces 'file' with a filtered object that
-+    has a write() method.  When called with no argument, this creates
-+    a filter to sys.stdout."""
-+    return ItplFile(file)
-+
-+def unfilter(ifile=None):
-+    """Return the original file that corresponds to the given ItplFile.
-+    
-+    'file = unfilter(file)' undoes the effect of 'file = filter(file)'.
-+    'sys.stdout = unfilter()' undoes the effect of 'sys.stdout = filter()'."""
-+    return ifile and ifile.file or sys.stdout.file
-Index: ipython-0.10/IPython/external/Itpl.py
-===================================================================
---- ipython-0.10.orig/IPython/external/Itpl.py
-+++ /dev/null
-@@ -1,276 +0,0 @@
--# -*- coding: utf-8 -*-
--"""String interpolation for Python (by Ka-Ping Yee, 14 Feb 2000).
--
--This module lets you quickly and conveniently interpolate values into
--strings (in the flavour of Perl or Tcl, but with less extraneous
--punctuation).  You get a bit more power than in the other languages,
--because this module allows subscripting, slicing, function calls,
--attribute lookup, or arbitrary expressions.  Variables and expressions
--are evaluated in the namespace of the caller.
--
--The itpl() function returns the result of interpolating a string, and
--printpl() prints out an interpolated string.  Here are some examples:
--
--    from Itpl import printpl
--    printpl("Here is a $string.")
--    printpl("Here is a $module.member.")
--    printpl("Here is an $object.member.")
--    printpl("Here is a $functioncall(with, arguments).")
--    printpl("Here is an ${arbitrary + expression}.")
--    printpl("Here is an $array[3] member.")
--    printpl("Here is a $dictionary['member'].")
--
--The filter() function filters a file object so that output through it
--is interpolated.  This lets you produce the illusion that Python knows
--how to do interpolation:
--
--    import Itpl
--    sys.stdout = Itpl.filter()
--    f = "fancy"
--    print "Is this not $f?"
--    print "Standard output has been replaced with a $sys.stdout object."
--    sys.stdout = Itpl.unfilter()
--    print "Okay, back $to $normal."
--
--Under the hood, the Itpl class represents a string that knows how to
--interpolate values.  An instance of the class parses the string once
--upon initialization; the evaluation and substitution can then be done
--each time the instance is evaluated with str(instance).  For example:
--
--    from Itpl import Itpl
--    s = Itpl("Here is $foo.")
--    foo = 5
--    print str(s)
--    foo = "bar"
--    print str(s)
--"""
--
--#*****************************************************************************
--#
--# Copyright (c) 2001 Ka-Ping Yee <ping at lfw.org>
--#
--#
--# Published under the terms of the MIT license, hereby reproduced:
--#
--# Permission is hereby granted, free of charge, to any person obtaining a copy
--# of this software and associated documentation files (the "Software"), to
--# deal in the Software without restriction, including without limitation the
--# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
--# sell copies of the Software, and to permit persons to whom the Software is
--# furnished to do so, subject to the following conditions:
--#
--# The above copyright notice and this permission notice shall be included in
--# all copies or substantial portions of the Software.
--#
--# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
--# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
--# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL THE
--# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
--# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
--# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
--# IN THE SOFTWARE.
--#
--#*****************************************************************************
--
--__author__  = 'Ka-Ping Yee <ping at lfw.org>'
--__license__ = 'MIT'
--
--import string
--import sys
--from tokenize import tokenprog
--from types import StringType
--
--class ItplError(ValueError):
--    def __init__(self, text, pos):
--        self.text = text
--        self.pos = pos
--    def __str__(self):
--        return "unfinished expression in %s at char %d" % (
--            repr(self.text), self.pos)
--
--def matchorfail(text, pos):
--    match = tokenprog.match(text, pos)
--    if match is None:
--        raise ItplError(text, pos)
--    return match, match.end()
--
--class Itpl:
--    """Class representing a string with interpolation abilities.
--    
--    Upon creation, an instance works out what parts of the format
--    string are literal and what parts need to be evaluated.  The
--    evaluation and substitution happens in the namespace of the
--    caller when str(instance) is called."""
--
--    def __init__(self, format,codec='utf_8',encoding_errors='backslashreplace'):
--        """The single mandatory argument to this constructor is a format
--        string.
--
--        The format string is parsed according to the following rules:
--
--        1.  A dollar sign and a name, possibly followed by any of: 
--              - an open-paren, and anything up to the matching paren 
--              - an open-bracket, and anything up to the matching bracket 
--              - a period and a name 
--            any number of times, is evaluated as a Python expression.
--
--        2.  A dollar sign immediately followed by an open-brace, and
--            anything up to the matching close-brace, is evaluated as
--            a Python expression.
--
--        3.  Outside of the expressions described in the above two rules,
--            two dollar signs in a row give you one literal dollar sign.
--
--        Optional arguments:
--
--        - codec('utf_8'): a string containing the name of a valid Python
--        codec.
--
--        - encoding_errors('backslashreplace'): a string with a valid error handling
--        policy.  See the codecs module documentation for details.
--
--        These are used to encode the format string if a call to str() fails on
--        the expanded result."""
--
--        if not isinstance(format,basestring):
--            raise TypeError, "needs string initializer"
--        self.format = format
--        self.codec = codec
--        self.encoding_errors = encoding_errors
--        
--        namechars = "abcdefghijklmnopqrstuvwxyz" \
--            "ABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_";
--        chunks = []
--        pos = 0
--
--        while 1:
--            dollar = string.find(format, "$", pos)
--            if dollar < 0: break
--            nextchar = format[dollar+1]
--
--            if nextchar == "{":
--                chunks.append((0, format[pos:dollar]))
--                pos, level = dollar+2, 1
--                while level:
--                    match, pos = matchorfail(format, pos)
--                    tstart, tend = match.regs[3]
--                    token = format[tstart:tend]
--                    if token == "{": level = level+1
--                    elif token == "}": level = level-1
--                chunks.append((1, format[dollar+2:pos-1]))
--
--            elif nextchar in namechars:
--                chunks.append((0, format[pos:dollar]))
--                match, pos = matchorfail(format, dollar+1)
--                while pos < len(format):
--                    if format[pos] == "." and \
--                        pos+1 < len(format) and format[pos+1] in namechars:
--                        match, pos = matchorfail(format, pos+1)
--                    elif format[pos] in "([":
--                        pos, level = pos+1, 1
--                        while level:
--                            match, pos = matchorfail(format, pos)
--                            tstart, tend = match.regs[3]
--                            token = format[tstart:tend]
--                            if token[0] in "([": level = level+1
--                            elif token[0] in ")]": level = level-1
--                    else: break
--                chunks.append((1, format[dollar+1:pos]))
--
--            else:
--                chunks.append((0, format[pos:dollar+1]))
--                pos = dollar + 1 + (nextchar == "$")
--
--        if pos < len(format): chunks.append((0, format[pos:]))
--        self.chunks = chunks
--
--    def __repr__(self):
--        return "<Itpl %s >" % repr(self.format)
--
--    def _str(self,glob,loc):
--        """Evaluate to a string in the given globals/locals.
--
--        The final output is built by calling str(), but if this fails, the
--        result is encoded with the instance's codec and error handling policy,
--        via a call to out.encode(self.codec,self.encoding_errors)"""
--        result = []
--        app = result.append
--        for live, chunk in self.chunks:
--            if live: app(str(eval(chunk,glob,loc)))
--            else: app(chunk)
--        out = ''.join(result)
--        try:
--            return str(out)
--        except UnicodeError:
--            return out.encode(self.codec,self.encoding_errors)
--
--    def __str__(self):
--        """Evaluate and substitute the appropriate parts of the string."""
--
--        # We need to skip enough frames to get to the actual caller outside of
--        # Itpl.
--        frame = sys._getframe(1)
--        while frame.f_globals["__name__"] == __name__: frame = frame.f_back
--        loc, glob = frame.f_locals, frame.f_globals
--
--        return self._str(glob,loc)
--    
--class ItplNS(Itpl):
--    """Class representing a string with interpolation abilities.
--
--    This inherits from Itpl, but at creation time a namespace is provided
--    where the evaluation will occur.  The interpolation becomes a bit more
--    efficient, as no traceback needs to be extracte.  It also allows the
--    caller to supply a different namespace for the interpolation to occur than
--    its own."""
--    
--    def __init__(self, format,globals,locals=None,
--                 codec='utf_8',encoding_errors='backslashreplace'):
--        """ItplNS(format,globals[,locals]) -> interpolating string instance.
--
--        This constructor, besides a format string, takes a globals dictionary
--        and optionally a locals (which defaults to globals if not provided).
--
--        For further details, see the Itpl constructor."""
--
--        if locals is None:
--            locals = globals
--        self.globals = globals
--        self.locals = locals
--        Itpl.__init__(self,format,codec,encoding_errors)
--        
--    def __str__(self):
--        """Evaluate and substitute the appropriate parts of the string."""
--        return self._str(self.globals,self.locals)
--
--    def __repr__(self):
--        return "<ItplNS %s >" % repr(self.format)
--
--# utilities for fast printing
--def itpl(text): return str(Itpl(text))
--def printpl(text): print itpl(text)
--# versions with namespace
--def itplns(text,globals,locals=None): return str(ItplNS(text,globals,locals))
--def printplns(text,globals,locals=None): print itplns(text,globals,locals)
--
--class ItplFile:
--    """A file object that filters each write() through an interpolator."""
--    def __init__(self, file): self.file = file
--    def __repr__(self): return "<interpolated " + repr(self.file) + ">"
--    def __getattr__(self, attr): return getattr(self.file, attr)
--    def write(self, text): self.file.write(str(Itpl(text)))
--
--def filter(file=sys.stdout):
--    """Return an ItplFile that filters writes to the given file object.
--    
--    'file = filter(file)' replaces 'file' with a filtered object that
--    has a write() method.  When called with no argument, this creates
--    a filter to sys.stdout."""
--    return ItplFile(file)
--
--def unfilter(ifile=None):
--    """Return the original file that corresponds to the given ItplFile.
--    
--    'file = unfilter(file)' undoes the effect of 'file = filter(file)'.
--    'sys.stdout = unfilter()' undoes the effect of 'sys.stdout = filter()'."""
--    return ifile and ifile.file or sys.stdout.file
 Index: ipython-0.10/IPython/external/mglob/__init__.py
 ===================================================================
 --- /dev/null
@@ -10416,474 +50,6 @@ Index: ipython-0.10/IPython/external/mglob/__init__.py
 +    from mglob import *
 +except ImportError:
 +    from _mglob import *
-Index: ipython-0.10/IPython/external/mglob/_mglob.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/mglob/_mglob.py
-@@ -0,0 +1,229 @@
-+#!/usr/bin/env python
-+
-+r""" mglob - enhanced file list expansion module
-+
-+Use as stand-alone utility (for xargs, `backticks` etc.), 
-+or a globbing library for own python programs. Globbing the sys.argv is something 
-+that almost every Windows script has to perform manually, and this module is here
-+to help with that task. Also Unix users will benefit from enhanced modes 
-+such as recursion, exclusion, directory omission...
-+
-+Unlike glob.glob, directories are not included in the glob unless specified 
-+with 'dir:'
-+
-+'expand' is the function to use in python programs. Typical use
-+to expand argv (esp. in windows)::
-+
-+    try:
-+        import mglob 
-+        files = mglob.expand(sys.argv[1:])
-+    except ImportError:
-+        print "mglob not found; try 'easy_install mglob' for extra features"
-+        files = sys.argv[1:] 
-+
-+Note that for unix, shell expands *normal* wildcards (*.cpp, etc.) in argv.
-+Therefore, you might want to use quotes with normal wildcards to prevent this 
-+expansion, in order for mglob to see the wildcards and get the wanted behaviour.
-+Not quoting the wildcards is harmless and typically has equivalent results, though.
-+
-+Author: Ville Vainio <vivainio at gmail.com>
-+License: MIT Open Source license
-+
-+"""
-+
-+#Assigned in variable for "usage" printing convenience"
-+
-+globsyntax = """\
-+    This program allows specifying filenames with "mglob" mechanism.
-+    Supported syntax in globs (wilcard matching patterns)::
-+    
-+     *.cpp ?ellowo*                
-+         - obvious. Differs from normal glob in that dirs are not included.
-+           Unix users might want to write this as: "*.cpp" "?ellowo*"
-+     rec:/usr/share=*.txt,*.doc    
-+         - get all *.txt and *.doc under /usr/share, 
-+           recursively
-+     rec:/usr/share
-+         - All files under /usr/share, recursively
-+     rec:*.py
-+         - All .py files under current working dir, recursively
-+     foo                           
-+         - File or dir foo
-+     !*.bak readme*                   
-+         - readme*, exclude files ending with .bak
-+     !.svn/ !.hg/ !*_Data/ rec:.
-+         - Skip .svn, .hg, foo_Data dirs (and their subdirs) in recurse.
-+           Trailing / is the key, \ does not work! Use !.*/ for all hidden.
-+     dir:foo                       
-+         - the directory foo if it exists (not files in foo)
-+     dir:*                         
-+         - all directories in current folder
-+     foo.py bar.* !h* rec:*.py
-+         - Obvious. !h* exclusion only applies for rec:*.py.
-+           foo.py is *not* included twice.
-+     @filelist.txt
-+         - All files listed in 'filelist.txt' file, on separate lines.
-+     "cont:class \wak:" rec:*.py
-+         - Match files containing regexp. Applies to subsequent files.
-+           note quotes because of whitespace.
-+ """
-+
-+
-+__version__ = "0.2"
-+
-+
-+import os,glob,fnmatch,sys,re
-+                
-+def expand(flist,exp_dirs = False):
-+    """ Expand the glob(s) in flist.
-+    
-+    flist may be either a whitespace-separated list of globs/files
-+    or an array of globs/files.
-+    
-+    if exp_dirs is true, directory names in glob are expanded to the files
-+    contained in them - otherwise, directory names are returned as is.
-+    
-+    """
-+    if isinstance(flist, basestring):
-+        import shlex
-+        flist = shlex.split(flist)
-+    done_set = set()
-+    denied_set = set()
-+    cont_set = set()
-+    cur_rejected_dirs = set()
-+    
-+    def recfind(p, pats = ["*"]):
-+        denied_dirs = [os.path.dirname(d) for d in denied_set if d.endswith("/")]
-+        for (dp,dnames,fnames) in os.walk(p):
-+            # see if we should ignore the whole directory
-+            dp_norm = dp.replace("\\","/") + "/"
-+            deny = False
-+            # do not traverse under already rejected dirs
-+            for d in cur_rejected_dirs:
-+                if dp.startswith(d):
-+                    deny = True
-+                    break
-+            if deny:
-+                continue
-+            
-+
-+            #print "dp",dp
-+            bname = os.path.basename(dp)
-+            for deny_pat in denied_dirs:
-+                if fnmatch.fnmatch( bname, deny_pat):
-+                    deny = True
-+                    cur_rejected_dirs.add(dp)
-+                    break
-+            if deny:
-+                continue
-+
-+                    
-+            for f in fnames:
-+                matched = False
-+                for p in pats:
-+                    if fnmatch.fnmatch(f,p):
-+                        matched = True
-+                        break
-+                if matched:
-+                    yield os.path.join(dp,f)            
-+
-+    def once_filter(seq):
-+        for it in seq:
-+            p = os.path.abspath(it)
-+            if p in done_set:
-+                continue
-+            done_set.add(p)
-+            deny = False
-+            for deny_pat in denied_set:
-+                if fnmatch.fnmatch(os.path.basename(p), deny_pat):
-+                    deny = True
-+                    break
-+            if cont_set:
-+                try:
-+                    cont = open(p).read()
-+                except IOError:
-+                    # deny
-+                    continue
-+                for pat in cont_set:
-+                    if not re.search(pat,cont, re.IGNORECASE):
-+                        deny = True
-+                        break
-+                    
-+            if not deny:
-+                yield it
-+        return
-+            
-+    res = []
-+
-+    for ent in flist:
-+        ent = os.path.expanduser(os.path.expandvars(ent))
-+        if ent.lower().startswith('rec:'):
-+            fields = ent[4:].split('=')            
-+            if len(fields) == 2:
-+                pth, patlist = fields
-+            elif len(fields) == 1:
-+                if os.path.isdir(fields[0]):
-+                    # single arg is dir
-+                    pth, patlist = fields[0], '*'
-+                else: 
-+                    # single arg is pattern
-+                    pth, patlist = '.', fields[0]
-+                    
-+            elif len(fields) == 0:
-+                pth, pathlist = '.','*'
-+                
-+            pats = patlist.split(',')
-+            res.extend(once_filter(recfind(pth, pats)))
-+        # filelist
-+        elif ent.startswith('@') and os.path.isfile(ent[1:]):
-+            res.extend(once_filter(open(ent[1:]).read().splitlines()))
-+        # exclusion
-+        elif ent.startswith('!'):
-+            denied_set.add(ent[1:])
-+        # glob only dirs
-+        elif ent.lower().startswith('dir:'):
-+            res.extend(once_filter(filter(os.path.isdir,glob.glob(ent[4:]))))
-+        elif ent.lower().startswith('cont:'):
-+            cont_set.add(ent[5:])
-+        # get all files in the specified dir
-+        elif os.path.isdir(ent) and exp_dirs:
-+            res.extend(once_filter(filter(os.path.isfile,glob.glob(ent + os.sep+"*"))))
-+            
-+        # glob only files
-+
-+        elif '*' in ent or '?' in ent:
-+            res.extend(once_filter(filter(os.path.isfile,glob.glob(ent))))
-+
-+        else:
-+            res.extend(once_filter([ent]))
-+    return res
-+            
-+            
-+def test():
-+    assert (
-+        expand("*.py ~/.ipython/*.py rec:/usr/share/doc-base") == 
-+        expand( ['*.py', '~/.ipython/*.py', 'rec:/usr/share/doc-base'] ) 
-+        )
-+    
-+def main():
-+    if len(sys.argv) < 2:
-+        print globsyntax
-+        return
-+    
-+    print "\n".join(expand(sys.argv[1:])),
-+
-+def mglob_f(self, arg):
-+    from IPython.genutils import SList
-+    if arg.strip():
-+        return SList(expand(arg))
-+    print "Please specify pattern!"
-+    print globsyntax
-+
-+def init_ipython(ip):
-+    """ register %mglob for IPython """
-+    mglob_f.__doc__ = globsyntax
-+    ip.expose_magic("mglob",mglob_f)  
-+    
-+# test()
-+if __name__ == "__main__":
-+    main()
-Index: ipython-0.10/IPython/external/mglob.py
-===================================================================
---- ipython-0.10.orig/IPython/external/mglob.py
-+++ /dev/null
-@@ -1,229 +0,0 @@
--#!/usr/bin/env python
--
--r""" mglob - enhanced file list expansion module
--
--Use as stand-alone utility (for xargs, `backticks` etc.), 
--or a globbing library for own python programs. Globbing the sys.argv is something 
--that almost every Windows script has to perform manually, and this module is here
--to help with that task. Also Unix users will benefit from enhanced modes 
--such as recursion, exclusion, directory omission...
--
--Unlike glob.glob, directories are not included in the glob unless specified 
--with 'dir:'
--
--'expand' is the function to use in python programs. Typical use
--to expand argv (esp. in windows)::
--
--    try:
--        import mglob 
--        files = mglob.expand(sys.argv[1:])
--    except ImportError:
--        print "mglob not found; try 'easy_install mglob' for extra features"
--        files = sys.argv[1:] 
--
--Note that for unix, shell expands *normal* wildcards (*.cpp, etc.) in argv.
--Therefore, you might want to use quotes with normal wildcards to prevent this 
--expansion, in order for mglob to see the wildcards and get the wanted behaviour.
--Not quoting the wildcards is harmless and typically has equivalent results, though.
--
--Author: Ville Vainio <vivainio at gmail.com>
--License: MIT Open Source license
--
--"""
--
--#Assigned in variable for "usage" printing convenience"
--
--globsyntax = """\
--    This program allows specifying filenames with "mglob" mechanism.
--    Supported syntax in globs (wilcard matching patterns)::
--    
--     *.cpp ?ellowo*                
--         - obvious. Differs from normal glob in that dirs are not included.
--           Unix users might want to write this as: "*.cpp" "?ellowo*"
--     rec:/usr/share=*.txt,*.doc    
--         - get all *.txt and *.doc under /usr/share, 
--           recursively
--     rec:/usr/share
--         - All files under /usr/share, recursively
--     rec:*.py
--         - All .py files under current working dir, recursively
--     foo                           
--         - File or dir foo
--     !*.bak readme*                   
--         - readme*, exclude files ending with .bak
--     !.svn/ !.hg/ !*_Data/ rec:.
--         - Skip .svn, .hg, foo_Data dirs (and their subdirs) in recurse.
--           Trailing / is the key, \ does not work! Use !.*/ for all hidden.
--     dir:foo                       
--         - the directory foo if it exists (not files in foo)
--     dir:*                         
--         - all directories in current folder
--     foo.py bar.* !h* rec:*.py
--         - Obvious. !h* exclusion only applies for rec:*.py.
--           foo.py is *not* included twice.
--     @filelist.txt
--         - All files listed in 'filelist.txt' file, on separate lines.
--     "cont:class \wak:" rec:*.py
--         - Match files containing regexp. Applies to subsequent files.
--           note quotes because of whitespace.
-- """
--
--
--__version__ = "0.2"
--
--
--import os,glob,fnmatch,sys,re
--                
--def expand(flist,exp_dirs = False):
--    """ Expand the glob(s) in flist.
--    
--    flist may be either a whitespace-separated list of globs/files
--    or an array of globs/files.
--    
--    if exp_dirs is true, directory names in glob are expanded to the files
--    contained in them - otherwise, directory names are returned as is.
--    
--    """
--    if isinstance(flist, basestring):
--        import shlex
--        flist = shlex.split(flist)
--    done_set = set()
--    denied_set = set()
--    cont_set = set()
--    cur_rejected_dirs = set()
--    
--    def recfind(p, pats = ["*"]):
--        denied_dirs = [os.path.dirname(d) for d in denied_set if d.endswith("/")]
--        for (dp,dnames,fnames) in os.walk(p):
--            # see if we should ignore the whole directory
--            dp_norm = dp.replace("\\","/") + "/"
--            deny = False
--            # do not traverse under already rejected dirs
--            for d in cur_rejected_dirs:
--                if dp.startswith(d):
--                    deny = True
--                    break
--            if deny:
--                continue
--            
--
--            #print "dp",dp
--            bname = os.path.basename(dp)
--            for deny_pat in denied_dirs:
--                if fnmatch.fnmatch( bname, deny_pat):
--                    deny = True
--                    cur_rejected_dirs.add(dp)
--                    break
--            if deny:
--                continue
--
--                    
--            for f in fnames:
--                matched = False
--                for p in pats:
--                    if fnmatch.fnmatch(f,p):
--                        matched = True
--                        break
--                if matched:
--                    yield os.path.join(dp,f)            
--
--    def once_filter(seq):
--        for it in seq:
--            p = os.path.abspath(it)
--            if p in done_set:
--                continue
--            done_set.add(p)
--            deny = False
--            for deny_pat in denied_set:
--                if fnmatch.fnmatch(os.path.basename(p), deny_pat):
--                    deny = True
--                    break
--            if cont_set:
--                try:
--                    cont = open(p).read()
--                except IOError:
--                    # deny
--                    continue
--                for pat in cont_set:
--                    if not re.search(pat,cont, re.IGNORECASE):
--                        deny = True
--                        break
--                    
--            if not deny:
--                yield it
--        return
--            
--    res = []
--
--    for ent in flist:
--        ent = os.path.expanduser(os.path.expandvars(ent))
--        if ent.lower().startswith('rec:'):
--            fields = ent[4:].split('=')            
--            if len(fields) == 2:
--                pth, patlist = fields
--            elif len(fields) == 1:
--                if os.path.isdir(fields[0]):
--                    # single arg is dir
--                    pth, patlist = fields[0], '*'
--                else: 
--                    # single arg is pattern
--                    pth, patlist = '.', fields[0]
--                    
--            elif len(fields) == 0:
--                pth, pathlist = '.','*'
--                
--            pats = patlist.split(',')
--            res.extend(once_filter(recfind(pth, pats)))
--        # filelist
--        elif ent.startswith('@') and os.path.isfile(ent[1:]):
--            res.extend(once_filter(open(ent[1:]).read().splitlines()))
--        # exclusion
--        elif ent.startswith('!'):
--            denied_set.add(ent[1:])
--        # glob only dirs
--        elif ent.lower().startswith('dir:'):
--            res.extend(once_filter(filter(os.path.isdir,glob.glob(ent[4:]))))
--        elif ent.lower().startswith('cont:'):
--            cont_set.add(ent[5:])
--        # get all files in the specified dir
--        elif os.path.isdir(ent) and exp_dirs:
--            res.extend(once_filter(filter(os.path.isfile,glob.glob(ent + os.sep+"*"))))
--            
--        # glob only files
--
--        elif '*' in ent or '?' in ent:
--            res.extend(once_filter(filter(os.path.isfile,glob.glob(ent))))
--
--        else:
--            res.extend(once_filter([ent]))
--    return res
--            
--            
--def test():
--    assert (
--        expand("*.py ~/.ipython/*.py rec:/usr/share/doc-base") == 
--        expand( ['*.py', '~/.ipython/*.py', 'rec:/usr/share/doc-base'] ) 
--        )
--    
--def main():
--    if len(sys.argv) < 2:
--        print globsyntax
--        return
--    
--    print "\n".join(expand(sys.argv[1:])),
--
--def mglob_f(self, arg):
--    from IPython.genutils import SList
--    if arg.strip():
--        return SList(expand(arg))
--    print "Please specify pattern!"
--    print globsyntax
--
--def init_ipython(ip):
--    """ register %mglob for IPython """
--    mglob_f.__doc__ = globsyntax
--    ip.expose_magic("mglob",mglob_f)  
--    
--# test()
--if __name__ == "__main__":
--    main()
 Index: ipython-0.10/IPython/external/path/__init__.py
 ===================================================================
 --- /dev/null
@@ -10893,1962 +59,6 @@ Index: ipython-0.10/IPython/external/path/__init__.py
 +    from path import *
 +except ImportError:
 +    from _path import *
-Index: ipython-0.10/IPython/external/path/_path.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/path/_path.py
-@@ -0,0 +1,973 @@
-+""" path.py - An object representing a path to a file or directory.
-+
-+Example:
-+
-+from IPython.external.path import path
-+d = path('/home/guido/bin')
-+for f in d.files('*.py'):
-+    f.chmod(0755)
-+
-+This module requires Python 2.2 or later.
-+
-+
-+URL:     http://www.jorendorff.com/articles/python/path
-+Author:  Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
-+Date:    9 Mar 2007
-+"""
-+
-+
-+# TODO
-+#   - Tree-walking functions don't avoid symlink loops.  Matt Harrison
-+#     sent me a patch for this.
-+#   - Bug in write_text().  It doesn't support Universal newline mode.
-+#   - Better error message in listdir() when self isn't a
-+#     directory. (On Windows, the error message really sucks.)
-+#   - Make sure everything has a good docstring.
-+#   - Add methods for regex find and replace.
-+#   - guess_content_type() method?
-+#   - Perhaps support arguments to touch().
-+
-+from __future__ import generators
-+
-+import sys, warnings, os, fnmatch, glob, shutil, codecs
-+# deprecated in python 2.6
-+warnings.filterwarnings('ignore', r'.*md5.*')
-+import md5
-+
-+__version__ = '2.2'
-+__all__ = ['path']
-+
-+# Platform-specific support for path.owner
-+if os.name == 'nt':
-+    try:
-+        import win32security
-+    except ImportError:
-+        win32security = None
-+else:
-+    try:
-+        import pwd
-+    except ImportError:
-+        pwd = None
-+
-+# Pre-2.3 support.  Are unicode filenames supported?
-+_base = str
-+_getcwd = os.getcwd
-+try:
-+    if os.path.supports_unicode_filenames:
-+        _base = unicode
-+        _getcwd = os.getcwdu
-+except AttributeError:
-+    pass
-+
-+# Pre-2.3 workaround for booleans
-+try:
-+    True, False
-+except NameError:
-+    True, False = 1, 0
-+
-+# Pre-2.3 workaround for basestring.
-+try:
-+    basestring
-+except NameError:
-+    basestring = (str, unicode)
-+
-+# Universal newline support
-+_textmode = 'r'
-+if hasattr(file, 'newlines'):
-+    _textmode = 'U'
-+
-+
-+class TreeWalkWarning(Warning):
-+    pass
-+
-+class path(_base):
-+    """ Represents a filesystem path.
-+
-+    For documentation on individual methods, consult their
-+    counterparts in os.path.
-+    """
-+
-+    # --- Special Python methods.
-+
-+    def __repr__(self):
-+        return 'path(%s)' % _base.__repr__(self)
-+
-+    # Adding a path and a string yields a path.
-+    def __add__(self, more):
-+        try:
-+            resultStr = _base.__add__(self, more)
-+        except TypeError:  #Python bug
-+            resultStr = NotImplemented
-+        if resultStr is NotImplemented:
-+            return resultStr
-+        return self.__class__(resultStr)
-+
-+    def __radd__(self, other):
-+        if isinstance(other, basestring):
-+            return self.__class__(other.__add__(self))
-+        else:
-+            return NotImplemented
-+
-+    # The / operator joins paths.
-+    def __div__(self, rel):
-+        """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
-+
-+        Join two path components, adding a separator character if
-+        needed.
-+        """
-+        return self.__class__(os.path.join(self, rel))
-+
-+    # Make the / operator work even when true division is enabled.
-+    __truediv__ = __div__
-+
-+    def getcwd(cls):
-+        """ Return the current working directory as a path object. """
-+        return cls(_getcwd())
-+    getcwd = classmethod(getcwd)
-+
-+
-+    # --- Operations on path strings.
-+
-+    isabs = os.path.isabs
-+    def abspath(self):       return self.__class__(os.path.abspath(self))
-+    def normcase(self):      return self.__class__(os.path.normcase(self))
-+    def normpath(self):      return self.__class__(os.path.normpath(self))
-+    def realpath(self):      return self.__class__(os.path.realpath(self))
-+    def expanduser(self):    return self.__class__(os.path.expanduser(self))
-+    def expandvars(self):    return self.__class__(os.path.expandvars(self))
-+    def dirname(self):       return self.__class__(os.path.dirname(self))
-+    basename = os.path.basename
-+
-+    def expand(self):
-+        """ Clean up a filename by calling expandvars(),
-+        expanduser(), and normpath() on it.
-+
-+        This is commonly everything needed to clean up a filename
-+        read from a configuration file, for example.
-+        """
-+        return self.expandvars().expanduser().normpath()
-+
-+    def _get_namebase(self):
-+        base, ext = os.path.splitext(self.name)
-+        return base
-+
-+    def _get_ext(self):
-+        f, ext = os.path.splitext(_base(self))
-+        return ext
-+
-+    def _get_drive(self):
-+        drive, r = os.path.splitdrive(self)
-+        return self.__class__(drive)
-+
-+    parent = property(
-+        dirname, None, None,
-+        """ This path's parent directory, as a new path object.
-+
-+        For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
-+        """)
-+
-+    name = property(
-+        basename, None, None,
-+        """ The name of this file or directory without the full path.
-+
-+        For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
-+        """)
-+
-+    namebase = property(
-+        _get_namebase, None, None,
-+        """ The same as path.name, but with one file extension stripped off.
-+
-+        For example, path('/home/guido/python.tar.gz').name     == 'python.tar.gz',
-+        but          path('/home/guido/python.tar.gz').namebase == 'python.tar'
-+        """)
-+
-+    ext = property(
-+        _get_ext, None, None,
-+        """ The file extension, for example '.py'. """)
-+
-+    drive = property(
-+        _get_drive, None, None,
-+        """ The drive specifier, for example 'C:'.
-+        This is always empty on systems that don't use drive specifiers.
-+        """)
-+
-+    def splitpath(self):
-+        """ p.splitpath() -> Return (p.parent, p.name). """
-+        parent, child = os.path.split(self)
-+        return self.__class__(parent), child
-+
-+    def splitdrive(self):
-+        """ p.splitdrive() -> Return (p.drive, <the rest of p>).
-+
-+        Split the drive specifier from this path.  If there is
-+        no drive specifier, p.drive is empty, so the return value
-+        is simply (path(''), p).  This is always the case on Unix.
-+        """
-+        drive, rel = os.path.splitdrive(self)
-+        return self.__class__(drive), rel
-+
-+    def splitext(self):
-+        """ p.splitext() -> Return (p.stripext(), p.ext).
-+
-+        Split the filename extension from this path and return
-+        the two parts.  Either part may be empty.
-+
-+        The extension is everything from '.' to the end of the
-+        last path segment.  This has the property that if
-+        (a, b) == p.splitext(), then a + b == p.
-+        """
-+        filename, ext = os.path.splitext(self)
-+        return self.__class__(filename), ext
-+
-+    def stripext(self):
-+        """ p.stripext() -> Remove one file extension from the path.
-+
-+        For example, path('/home/guido/python.tar.gz').stripext()
-+        returns path('/home/guido/python.tar').
-+        """
-+        return self.splitext()[0]
-+
-+    if hasattr(os.path, 'splitunc'):
-+        def splitunc(self):
-+            unc, rest = os.path.splitunc(self)
-+            return self.__class__(unc), rest
-+
-+        def _get_uncshare(self):
-+            unc, r = os.path.splitunc(self)
-+            return self.__class__(unc)
-+
-+        uncshare = property(
-+            _get_uncshare, None, None,
-+            """ The UNC mount point for this path.
-+            This is empty for paths on local drives. """)
-+
-+    def joinpath(self, *args):
-+        """ Join two or more path components, adding a separator
-+        character (os.sep) if needed.  Returns a new path
-+        object.
-+        """
-+        return self.__class__(os.path.join(self, *args))
-+
-+    def splitall(self):
-+        r""" Return a list of the path components in this path.
-+
-+        The first item in the list will be a path.  Its value will be
-+        either os.curdir, os.pardir, empty, or the root directory of
-+        this path (for example, '/' or 'C:\\').  The other items in
-+        the list will be strings.
-+
-+        path.path.joinpath(*result) will yield the original path.
-+        """
-+        parts = []
-+        loc = self
-+        while loc != os.curdir and loc != os.pardir:
-+            prev = loc
-+            loc, child = prev.splitpath()
-+            if loc == prev:
-+                break
-+            parts.append(child)
-+        parts.append(loc)
-+        parts.reverse()
-+        return parts
-+
-+    def relpath(self):
-+        """ Return this path as a relative path,
-+        based from the current working directory.
-+        """
-+        cwd = self.__class__(os.getcwd())
-+        return cwd.relpathto(self)
-+
-+    def relpathto(self, dest):
-+        """ Return a relative path from self to dest.
-+
-+        If there is no relative path from self to dest, for example if
-+        they reside on different drives in Windows, then this returns
-+        dest.abspath().
-+        """
-+        origin = self.abspath()
-+        dest = self.__class__(dest).abspath()
-+
-+        orig_list = origin.normcase().splitall()
-+        # Don't normcase dest!  We want to preserve the case.
-+        dest_list = dest.splitall()
-+
-+        if orig_list[0] != os.path.normcase(dest_list[0]):
-+            # Can't get here from there.
-+            return dest
-+
-+        # Find the location where the two paths start to differ.
-+        i = 0
-+        for start_seg, dest_seg in zip(orig_list, dest_list):
-+            if start_seg != os.path.normcase(dest_seg):
-+                break
-+            i += 1
-+
-+        # Now i is the point where the two paths diverge.
-+        # Need a certain number of "os.pardir"s to work up
-+        # from the origin to the point of divergence.
-+        segments = [os.pardir] * (len(orig_list) - i)
-+        # Need to add the diverging part of dest_list.
-+        segments += dest_list[i:]
-+        if len(segments) == 0:
-+            # If they happen to be identical, use os.curdir.
-+            relpath = os.curdir
-+        else:
-+            relpath = os.path.join(*segments)
-+        return self.__class__(relpath)
-+
-+    # --- Listing, searching, walking, and matching
-+
-+    def listdir(self, pattern=None):
-+        """ D.listdir() -> List of items in this directory.
-+
-+        Use D.files() or D.dirs() instead if you want a listing
-+        of just files or just subdirectories.
-+
-+        The elements of the list are path objects.
-+
-+        With the optional 'pattern' argument, this only lists
-+        items whose names match the given pattern.
-+        """
-+        names = os.listdir(self)
-+        if pattern is not None:
-+            names = fnmatch.filter(names, pattern)
-+        return [self / child for child in names]
-+
-+    def dirs(self, pattern=None):
-+        """ D.dirs() -> List of this directory's subdirectories.
-+
-+        The elements of the list are path objects.
-+        This does not walk recursively into subdirectories
-+        (but see path.walkdirs).
-+
-+        With the optional 'pattern' argument, this only lists
-+        directories whose names match the given pattern.  For
-+        example, d.dirs('build-*').
-+        """
-+        return [p for p in self.listdir(pattern) if p.isdir()]
-+
-+    def files(self, pattern=None):
-+        """ D.files() -> List of the files in this directory.
-+
-+        The elements of the list are path objects.
-+        This does not walk into subdirectories (see path.walkfiles).
-+
-+        With the optional 'pattern' argument, this only lists files
-+        whose names match the given pattern.  For example,
-+        d.files('*.pyc').
-+        """
-+        
-+        return [p for p in self.listdir(pattern) if p.isfile()]
-+
-+    def walk(self, pattern=None, errors='strict'):
-+        """ D.walk() -> iterator over files and subdirs, recursively.
-+
-+        The iterator yields path objects naming each child item of
-+        this directory and its descendants.  This requires that
-+        D.isdir().
-+
-+        This performs a depth-first traversal of the directory tree.
-+        Each directory is returned just before all its children.
-+
-+        The errors= keyword argument controls behavior when an
-+        error occurs.  The default is 'strict', which causes an
-+        exception.  The other allowed values are 'warn', which
-+        reports the error via warnings.warn(), and 'ignore'.
-+        """
-+        if errors not in ('strict', 'warn', 'ignore'):
-+            raise ValueError("invalid errors parameter")
-+
-+        try:
-+            childList = self.listdir()
-+        except Exception:
-+            if errors == 'ignore':
-+                return
-+            elif errors == 'warn':
-+                warnings.warn(
-+                    "Unable to list directory '%s': %s"
-+                    % (self, sys.exc_info()[1]),
-+                    TreeWalkWarning)
-+                return
-+            else:
-+                raise
-+
-+        for child in childList:
-+            if pattern is None or child.fnmatch(pattern):
-+                yield child
-+            try:
-+                isdir = child.isdir()
-+            except Exception:
-+                if errors == 'ignore':
-+                    isdir = False
-+                elif errors == 'warn':
-+                    warnings.warn(
-+                        "Unable to access '%s': %s"
-+                        % (child, sys.exc_info()[1]),
-+                        TreeWalkWarning)
-+                    isdir = False
-+                else:
-+                    raise
-+
-+            if isdir:
-+                for item in child.walk(pattern, errors):
-+                    yield item
-+
-+    def walkdirs(self, pattern=None, errors='strict'):
-+        """ D.walkdirs() -> iterator over subdirs, recursively.
-+
-+        With the optional 'pattern' argument, this yields only
-+        directories whose names match the given pattern.  For
-+        example, mydir.walkdirs('*test') yields only directories
-+        with names ending in 'test'.
-+
-+        The errors= keyword argument controls behavior when an
-+        error occurs.  The default is 'strict', which causes an
-+        exception.  The other allowed values are 'warn', which
-+        reports the error via warnings.warn(), and 'ignore'.
-+        """
-+        if errors not in ('strict', 'warn', 'ignore'):
-+            raise ValueError("invalid errors parameter")
-+
-+        try:
-+            dirs = self.dirs()
-+        except Exception:
-+            if errors == 'ignore':
-+                return
-+            elif errors == 'warn':
-+                warnings.warn(
-+                    "Unable to list directory '%s': %s"
-+                    % (self, sys.exc_info()[1]),
-+                    TreeWalkWarning)
-+                return
-+            else:
-+                raise
-+
-+        for child in dirs:
-+            if pattern is None or child.fnmatch(pattern):
-+                yield child
-+            for subsubdir in child.walkdirs(pattern, errors):
-+                yield subsubdir
-+
-+    def walkfiles(self, pattern=None, errors='strict'):
-+        """ D.walkfiles() -> iterator over files in D, recursively.
-+
-+        The optional argument, pattern, limits the results to files
-+        with names that match the pattern.  For example,
-+        mydir.walkfiles('*.tmp') yields only files with the .tmp
-+        extension.
-+        """
-+        if errors not in ('strict', 'warn', 'ignore'):
-+            raise ValueError("invalid errors parameter")
-+
-+        try:
-+            childList = self.listdir()
-+        except Exception:
-+            if errors == 'ignore':
-+                return
-+            elif errors == 'warn':
-+                warnings.warn(
-+                    "Unable to list directory '%s': %s"
-+                    % (self, sys.exc_info()[1]),
-+                    TreeWalkWarning)
-+                return
-+            else:
-+                raise
-+
-+        for child in childList:
-+            try:
-+                isfile = child.isfile()
-+                isdir = not isfile and child.isdir()
-+            except:
-+                if errors == 'ignore':
-+                    continue
-+                elif errors == 'warn':
-+                    warnings.warn(
-+                        "Unable to access '%s': %s"
-+                        % (self, sys.exc_info()[1]),
-+                        TreeWalkWarning)
-+                    continue
-+                else:
-+                    raise
-+
-+            if isfile:
-+                if pattern is None or child.fnmatch(pattern):
-+                    yield child
-+            elif isdir:
-+                for f in child.walkfiles(pattern, errors):
-+                    yield f
-+
-+    def fnmatch(self, pattern):
-+        """ Return True if self.name matches the given pattern.
-+
-+        pattern - A filename pattern with wildcards,
-+            for example '*.py'.
-+        """
-+        return fnmatch.fnmatch(self.name, pattern)
-+
-+    def glob(self, pattern):
-+        """ Return a list of path objects that match the pattern.
-+
-+        pattern - a path relative to this directory, with wildcards.
-+
-+        For example, path('/users').glob('*/bin/*') returns a list
-+        of all the files users have in their bin directories.
-+        """
-+        cls = self.__class__
-+        return [cls(s) for s in glob.glob(_base(self / pattern))]
-+
-+
-+    # --- Reading or writing an entire file at once.
-+
-+    def open(self, mode='r'):
-+        """ Open this file.  Return a file object. """
-+        return file(self, mode)
-+
-+    def bytes(self):
-+        """ Open this file, read all bytes, return them as a string. """
-+        f = self.open('rb')
-+        try:
-+            return f.read()
-+        finally:
-+            f.close()
-+
-+    def write_bytes(self, bytes, append=False):
-+        """ Open this file and write the given bytes to it.
-+
-+        Default behavior is to overwrite any existing file.
-+        Call p.write_bytes(bytes, append=True) to append instead.
-+        """
-+        if append:
-+            mode = 'ab'
-+        else:
-+            mode = 'wb'
-+        f = self.open(mode)
-+        try:
-+            f.write(bytes)
-+        finally:
-+            f.close()
-+
-+    def text(self, encoding=None, errors='strict'):
-+        r""" Open this file, read it in, return the content as a string.
-+
-+        This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
-+        are automatically translated to '\n'.
-+
-+        Optional arguments:
-+
-+        encoding - The Unicode encoding (or character set) of
-+            the file.  If present, the content of the file is
-+            decoded and returned as a unicode object; otherwise
-+            it is returned as an 8-bit str.
-+        errors - How to handle Unicode errors; see help(str.decode)
-+            for the options.  Default is 'strict'.
-+        """
-+        if encoding is None:
-+            # 8-bit
-+            f = self.open(_textmode)
-+            try:
-+                return f.read()
-+            finally:
-+                f.close()
-+        else:
-+            # Unicode
-+            f = codecs.open(self, 'r', encoding, errors)
-+            # (Note - Can't use 'U' mode here, since codecs.open
-+            # doesn't support 'U' mode, even in Python 2.3.)
-+            try:
-+                t = f.read()
-+            finally:
-+                f.close()
-+            return (t.replace(u'\r\n', u'\n')
-+                     .replace(u'\r\x85', u'\n')
-+                     .replace(u'\r', u'\n')
-+                     .replace(u'\x85', u'\n')
-+                     .replace(u'\u2028', u'\n'))
-+
-+    def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
-+        r""" Write the given text to this file.
-+
-+        The default behavior is to overwrite any existing file;
-+        to append instead, use the 'append=True' keyword argument.
-+
-+        There are two differences between path.write_text() and
-+        path.write_bytes(): newline handling and Unicode handling.
-+        See below.
-+
-+        Parameters:
-+
-+          - text - str/unicode - The text to be written.
-+
-+          - encoding - str - The Unicode encoding that will be used.
-+            This is ignored if 'text' isn't a Unicode string.
-+
-+          - errors - str - How to handle Unicode encoding errors.
-+            Default is 'strict'.  See help(unicode.encode) for the
-+            options.  This is ignored if 'text' isn't a Unicode
-+            string.
-+
-+          - linesep - keyword argument - str/unicode - The sequence of
-+            characters to be used to mark end-of-line.  The default is
-+            os.linesep.  You can also specify None; this means to
-+            leave all newlines as they are in 'text'.
-+
-+          - append - keyword argument - bool - Specifies what to do if
-+            the file already exists (True: append to the end of it;
-+            False: overwrite it.)  The default is False.
-+
-+
-+        --- Newline handling.
-+
-+        write_text() converts all standard end-of-line sequences
-+        ('\n', '\r', and '\r\n') to your platform's default end-of-line
-+        sequence (see os.linesep; on Windows, for example, the
-+        end-of-line marker is '\r\n').
-+
-+        If you don't like your platform's default, you can override it
-+        using the 'linesep=' keyword argument.  If you specifically want
-+        write_text() to preserve the newlines as-is, use 'linesep=None'.
-+
-+        This applies to Unicode text the same as to 8-bit text, except
-+        there are three additional standard Unicode end-of-line sequences:
-+        u'\x85', u'\r\x85', and u'\u2028'.
-+
-+        (This is slightly different from when you open a file for
-+        writing with fopen(filename, "w") in C or file(filename, 'w')
-+        in Python.)
-+
-+
-+        --- Unicode
-+
-+        If 'text' isn't Unicode, then apart from newline handling, the
-+        bytes are written verbatim to the file.  The 'encoding' and
-+        'errors' arguments are not used and must be omitted.
-+
-+        If 'text' is Unicode, it is first converted to bytes using the
-+        specified 'encoding' (or the default encoding if 'encoding'
-+        isn't specified).  The 'errors' argument applies only to this
-+        conversion.
-+
-+        """
-+        if isinstance(text, unicode):
-+            if linesep is not None:
-+                # Convert all standard end-of-line sequences to
-+                # ordinary newline characters.
-+                text = (text.replace(u'\r\n', u'\n')
-+                            .replace(u'\r\x85', u'\n')
-+                            .replace(u'\r', u'\n')
-+                            .replace(u'\x85', u'\n')
-+                            .replace(u'\u2028', u'\n'))
-+                text = text.replace(u'\n', linesep)
-+            if encoding is None:
-+                encoding = sys.getdefaultencoding()
-+            bytes = text.encode(encoding, errors)
-+        else:
-+            # It is an error to specify an encoding if 'text' is
-+            # an 8-bit string.
-+            assert encoding is None
-+
-+            if linesep is not None:
-+                text = (text.replace('\r\n', '\n')
-+                            .replace('\r', '\n'))
-+                bytes = text.replace('\n', linesep)
-+
-+        self.write_bytes(bytes, append)
-+
-+    def lines(self, encoding=None, errors='strict', retain=True):
-+        r""" Open this file, read all lines, return them in a list.
-+
-+        Optional arguments:
-+            encoding - The Unicode encoding (or character set) of
-+                the file.  The default is None, meaning the content
-+                of the file is read as 8-bit characters and returned
-+                as a list of (non-Unicode) str objects.
-+            errors - How to handle Unicode errors; see help(str.decode)
-+                for the options.  Default is 'strict'
-+            retain - If true, retain newline characters; but all newline
-+                character combinations ('\r', '\n', '\r\n') are
-+                translated to '\n'.  If false, newline characters are
-+                stripped off.  Default is True.
-+
-+        This uses 'U' mode in Python 2.3 and later.
-+        """
-+        if encoding is None and retain:
-+            f = self.open(_textmode)
-+            try:
-+                return f.readlines()
-+            finally:
-+                f.close()
-+        else:
-+            return self.text(encoding, errors).splitlines(retain)
-+
-+    def write_lines(self, lines, encoding=None, errors='strict',
-+                    linesep=os.linesep, append=False):
-+        r""" Write the given lines of text to this file.
-+
-+        By default this overwrites any existing file at this path.
-+
-+        This puts a platform-specific newline sequence on every line.
-+        See 'linesep' below.
-+
-+        lines - A list of strings.
-+
-+        encoding - A Unicode encoding to use.  This applies only if
-+            'lines' contains any Unicode strings.
-+
-+        errors - How to handle errors in Unicode encoding.  This
-+            also applies only to Unicode strings.
-+
-+        linesep - The desired line-ending.  This line-ending is
-+            applied to every line.  If a line already has any
-+            standard line ending ('\r', '\n', '\r\n', u'\x85',
-+            u'\r\x85', u'\u2028'), that will be stripped off and
-+            this will be used instead.  The default is os.linesep,
-+            which is platform-dependent ('\r\n' on Windows, '\n' on
-+            Unix, etc.)  Specify None to write the lines as-is,
-+            like file.writelines().
-+
-+        Use the keyword argument append=True to append lines to the
-+        file.  The default is to overwrite the file.  Warning:
-+        When you use this with Unicode data, if the encoding of the
-+        existing data in the file is different from the encoding
-+        you specify with the encoding= parameter, the result is
-+        mixed-encoding data, which can really confuse someone trying
-+        to read the file later.
-+        """
-+        if append:
-+            mode = 'ab'
-+        else:
-+            mode = 'wb'
-+        f = self.open(mode)
-+        try:
-+            for line in lines:
-+                isUnicode = isinstance(line, unicode)
-+                if linesep is not None:
-+                    # Strip off any existing line-end and add the
-+                    # specified linesep string.
-+                    if isUnicode:
-+                        if line[-2:] in (u'\r\n', u'\x0d\x85'):
-+                            line = line[:-2]
-+                        elif line[-1:] in (u'\r', u'\n',
-+                                           u'\x85', u'\u2028'):
-+                            line = line[:-1]
-+                    else:
-+                        if line[-2:] == '\r\n':
-+                            line = line[:-2]
-+                        elif line[-1:] in ('\r', '\n'):
-+                            line = line[:-1]
-+                    line += linesep
-+                if isUnicode:
-+                    if encoding is None:
-+                        encoding = sys.getdefaultencoding()
-+                    line = line.encode(encoding, errors)
-+                f.write(line)
-+        finally:
-+            f.close()
-+
-+    def read_md5(self):
-+        """ Calculate the md5 hash for this file.
-+
-+        This reads through the entire file.
-+        """
-+        f = self.open('rb')
-+        try:
-+            m = md5.new()
-+            while True:
-+                d = f.read(8192)
-+                if not d:
-+                    break
-+                m.update(d)
-+        finally:
-+            f.close()
-+        return m.digest()
-+
-+    # --- Methods for querying the filesystem.
-+
-+    exists = os.path.exists
-+    isdir = os.path.isdir
-+    isfile = os.path.isfile
-+    islink = os.path.islink
-+    ismount = os.path.ismount
-+
-+    if hasattr(os.path, 'samefile'):
-+        samefile = os.path.samefile
-+
-+    getatime = os.path.getatime
-+    atime = property(
-+        getatime, None, None,
-+        """ Last access time of the file. """)
-+
-+    getmtime = os.path.getmtime
-+    mtime = property(
-+        getmtime, None, None,
-+        """ Last-modified time of the file. """)
-+
-+    if hasattr(os.path, 'getctime'):
-+        getctime = os.path.getctime
-+        ctime = property(
-+            getctime, None, None,
-+            """ Creation time of the file. """)
-+
-+    getsize = os.path.getsize
-+    size = property(
-+        getsize, None, None,
-+        """ Size of the file, in bytes. """)
-+
-+    if hasattr(os, 'access'):
-+        def access(self, mode):
-+            """ Return true if current user has access to this path.
-+
-+            mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
-+            """
-+            return os.access(self, mode)
-+
-+    def stat(self):
-+        """ Perform a stat() system call on this path. """
-+        return os.stat(self)
-+
-+    def lstat(self):
-+        """ Like path.stat(), but do not follow symbolic links. """
-+        return os.lstat(self)
-+
-+    def get_owner(self):
-+        r""" Return the name of the owner of this file or directory.
-+
-+        This follows symbolic links.
-+
-+        On Windows, this returns a name of the form ur'DOMAIN\User Name'.
-+        On Windows, a group can own a file or directory.
-+        """
-+        if os.name == 'nt':
-+            if win32security is None:
-+                raise Exception("path.owner requires win32all to be installed")
-+            desc = win32security.GetFileSecurity(
-+                self, win32security.OWNER_SECURITY_INFORMATION)
-+            sid = desc.GetSecurityDescriptorOwner()
-+            account, domain, typecode = win32security.LookupAccountSid(None, sid)
-+            return domain + u'\\' + account
-+        else:
-+            if pwd is None:
-+                raise NotImplementedError("path.owner is not implemented on this platform.")
-+            st = self.stat()
-+            return pwd.getpwuid(st.st_uid).pw_name
-+
-+    owner = property(
-+        get_owner, None, None,
-+        """ Name of the owner of this file or directory. """)
-+
-+    if hasattr(os, 'statvfs'):
-+        def statvfs(self):
-+            """ Perform a statvfs() system call on this path. """
-+            return os.statvfs(self)
-+
-+    if hasattr(os, 'pathconf'):
-+        def pathconf(self, name):
-+            return os.pathconf(self, name)
-+
-+
-+    # --- Modifying operations on files and directories
-+
-+    def utime(self, times):
-+        """ Set the access and modified times of this file. """
-+        os.utime(self, times)
-+
-+    def chmod(self, mode):
-+        os.chmod(self, mode)
-+
-+    if hasattr(os, 'chown'):
-+        def chown(self, uid, gid):
-+            os.chown(self, uid, gid)
-+
-+    def rename(self, new):
-+        os.rename(self, new)
-+
-+    def renames(self, new):
-+        os.renames(self, new)
-+
-+
-+    # --- Create/delete operations on directories
-+
-+    def mkdir(self, mode=0777):
-+        os.mkdir(self, mode)
-+
-+    def makedirs(self, mode=0777):
-+        os.makedirs(self, mode)
-+
-+    def rmdir(self):
-+        os.rmdir(self)
-+
-+    def removedirs(self):
-+        os.removedirs(self)
-+
-+
-+    # --- Modifying operations on files
-+
-+    def touch(self):
-+        """ Set the access/modified times of this file to the current time.
-+        Create the file if it does not exist.
-+        """
-+        fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
-+        os.close(fd)
-+        os.utime(self, None)
-+
-+    def remove(self):
-+        os.remove(self)
-+
-+    def unlink(self):
-+        os.unlink(self)
-+
-+
-+    # --- Links
-+
-+    if hasattr(os, 'link'):
-+        def link(self, newpath):
-+            """ Create a hard link at 'newpath', pointing to this file. """
-+            os.link(self, newpath)
-+
-+    if hasattr(os, 'symlink'):
-+        def symlink(self, newlink):
-+            """ Create a symbolic link at 'newlink', pointing here. """
-+            os.symlink(self, newlink)
-+
-+    if hasattr(os, 'readlink'):
-+        def readlink(self):
-+            """ Return the path to which this symbolic link points.
-+
-+            The result may be an absolute or a relative path.
-+            """
-+            return self.__class__(os.readlink(self))
-+
-+        def readlinkabs(self):
-+            """ Return the path to which this symbolic link points.
-+
-+            The result is always an absolute path.
-+            """
-+            p = self.readlink()
-+            if p.isabs():
-+                return p
-+            else:
-+                return (self.parent / p).abspath()
-+
-+
-+    # --- High-level functions from shutil
-+
-+    copyfile = shutil.copyfile
-+    copymode = shutil.copymode
-+    copystat = shutil.copystat
-+    copy = shutil.copy
-+    copy2 = shutil.copy2
-+    copytree = shutil.copytree
-+    if hasattr(shutil, 'move'):
-+        move = shutil.move
-+    rmtree = shutil.rmtree
-+
-+
-+    # --- Special stuff from os
-+
-+    if hasattr(os, 'chroot'):
-+        def chroot(self):
-+            os.chroot(self)
-+
-+    if hasattr(os, 'startfile'):
-+        def startfile(self):
-+            os.startfile(self)
-+
-Index: ipython-0.10/IPython/external/path.py
-===================================================================
---- ipython-0.10.orig/IPython/external/path.py
-+++ /dev/null
-@@ -1,973 +0,0 @@
--""" path.py - An object representing a path to a file or directory.
--
--Example:
--
--from IPython.external.path import path
--d = path('/home/guido/bin')
--for f in d.files('*.py'):
--    f.chmod(0755)
--
--This module requires Python 2.2 or later.
--
--
--URL:     http://www.jorendorff.com/articles/python/path
--Author:  Jason Orendorff <jason.orendorff\x40gmail\x2ecom> (and others - see the url!)
--Date:    9 Mar 2007
--"""
--
--
--# TODO
--#   - Tree-walking functions don't avoid symlink loops.  Matt Harrison
--#     sent me a patch for this.
--#   - Bug in write_text().  It doesn't support Universal newline mode.
--#   - Better error message in listdir() when self isn't a
--#     directory. (On Windows, the error message really sucks.)
--#   - Make sure everything has a good docstring.
--#   - Add methods for regex find and replace.
--#   - guess_content_type() method?
--#   - Perhaps support arguments to touch().
--
--from __future__ import generators
--
--import sys, warnings, os, fnmatch, glob, shutil, codecs
--# deprecated in python 2.6
--warnings.filterwarnings('ignore', r'.*md5.*')
--import md5
--
--__version__ = '2.2'
--__all__ = ['path']
--
--# Platform-specific support for path.owner
--if os.name == 'nt':
--    try:
--        import win32security
--    except ImportError:
--        win32security = None
--else:
--    try:
--        import pwd
--    except ImportError:
--        pwd = None
--
--# Pre-2.3 support.  Are unicode filenames supported?
--_base = str
--_getcwd = os.getcwd
--try:
--    if os.path.supports_unicode_filenames:
--        _base = unicode
--        _getcwd = os.getcwdu
--except AttributeError:
--    pass
--
--# Pre-2.3 workaround for booleans
--try:
--    True, False
--except NameError:
--    True, False = 1, 0
--
--# Pre-2.3 workaround for basestring.
--try:
--    basestring
--except NameError:
--    basestring = (str, unicode)
--
--# Universal newline support
--_textmode = 'r'
--if hasattr(file, 'newlines'):
--    _textmode = 'U'
--
--
--class TreeWalkWarning(Warning):
--    pass
--
--class path(_base):
--    """ Represents a filesystem path.
--
--    For documentation on individual methods, consult their
--    counterparts in os.path.
--    """
--
--    # --- Special Python methods.
--
--    def __repr__(self):
--        return 'path(%s)' % _base.__repr__(self)
--
--    # Adding a path and a string yields a path.
--    def __add__(self, more):
--        try:
--            resultStr = _base.__add__(self, more)
--        except TypeError:  #Python bug
--            resultStr = NotImplemented
--        if resultStr is NotImplemented:
--            return resultStr
--        return self.__class__(resultStr)
--
--    def __radd__(self, other):
--        if isinstance(other, basestring):
--            return self.__class__(other.__add__(self))
--        else:
--            return NotImplemented
--
--    # The / operator joins paths.
--    def __div__(self, rel):
--        """ fp.__div__(rel) == fp / rel == fp.joinpath(rel)
--
--        Join two path components, adding a separator character if
--        needed.
--        """
--        return self.__class__(os.path.join(self, rel))
--
--    # Make the / operator work even when true division is enabled.
--    __truediv__ = __div__
--
--    def getcwd(cls):
--        """ Return the current working directory as a path object. """
--        return cls(_getcwd())
--    getcwd = classmethod(getcwd)
--
--
--    # --- Operations on path strings.
--
--    isabs = os.path.isabs
--    def abspath(self):       return self.__class__(os.path.abspath(self))
--    def normcase(self):      return self.__class__(os.path.normcase(self))
--    def normpath(self):      return self.__class__(os.path.normpath(self))
--    def realpath(self):      return self.__class__(os.path.realpath(self))
--    def expanduser(self):    return self.__class__(os.path.expanduser(self))
--    def expandvars(self):    return self.__class__(os.path.expandvars(self))
--    def dirname(self):       return self.__class__(os.path.dirname(self))
--    basename = os.path.basename
--
--    def expand(self):
--        """ Clean up a filename by calling expandvars(),
--        expanduser(), and normpath() on it.
--
--        This is commonly everything needed to clean up a filename
--        read from a configuration file, for example.
--        """
--        return self.expandvars().expanduser().normpath()
--
--    def _get_namebase(self):
--        base, ext = os.path.splitext(self.name)
--        return base
--
--    def _get_ext(self):
--        f, ext = os.path.splitext(_base(self))
--        return ext
--
--    def _get_drive(self):
--        drive, r = os.path.splitdrive(self)
--        return self.__class__(drive)
--
--    parent = property(
--        dirname, None, None,
--        """ This path's parent directory, as a new path object.
--
--        For example, path('/usr/local/lib/libpython.so').parent == path('/usr/local/lib')
--        """)
--
--    name = property(
--        basename, None, None,
--        """ The name of this file or directory without the full path.
--
--        For example, path('/usr/local/lib/libpython.so').name == 'libpython.so'
--        """)
--
--    namebase = property(
--        _get_namebase, None, None,
--        """ The same as path.name, but with one file extension stripped off.
--
--        For example, path('/home/guido/python.tar.gz').name     == 'python.tar.gz',
--        but          path('/home/guido/python.tar.gz').namebase == 'python.tar'
--        """)
--
--    ext = property(
--        _get_ext, None, None,
--        """ The file extension, for example '.py'. """)
--
--    drive = property(
--        _get_drive, None, None,
--        """ The drive specifier, for example 'C:'.
--        This is always empty on systems that don't use drive specifiers.
--        """)
--
--    def splitpath(self):
--        """ p.splitpath() -> Return (p.parent, p.name). """
--        parent, child = os.path.split(self)
--        return self.__class__(parent), child
--
--    def splitdrive(self):
--        """ p.splitdrive() -> Return (p.drive, <the rest of p>).
--
--        Split the drive specifier from this path.  If there is
--        no drive specifier, p.drive is empty, so the return value
--        is simply (path(''), p).  This is always the case on Unix.
--        """
--        drive, rel = os.path.splitdrive(self)
--        return self.__class__(drive), rel
--
--    def splitext(self):
--        """ p.splitext() -> Return (p.stripext(), p.ext).
--
--        Split the filename extension from this path and return
--        the two parts.  Either part may be empty.
--
--        The extension is everything from '.' to the end of the
--        last path segment.  This has the property that if
--        (a, b) == p.splitext(), then a + b == p.
--        """
--        filename, ext = os.path.splitext(self)
--        return self.__class__(filename), ext
--
--    def stripext(self):
--        """ p.stripext() -> Remove one file extension from the path.
--
--        For example, path('/home/guido/python.tar.gz').stripext()
--        returns path('/home/guido/python.tar').
--        """
--        return self.splitext()[0]
--
--    if hasattr(os.path, 'splitunc'):
--        def splitunc(self):
--            unc, rest = os.path.splitunc(self)
--            return self.__class__(unc), rest
--
--        def _get_uncshare(self):
--            unc, r = os.path.splitunc(self)
--            return self.__class__(unc)
--
--        uncshare = property(
--            _get_uncshare, None, None,
--            """ The UNC mount point for this path.
--            This is empty for paths on local drives. """)
--
--    def joinpath(self, *args):
--        """ Join two or more path components, adding a separator
--        character (os.sep) if needed.  Returns a new path
--        object.
--        """
--        return self.__class__(os.path.join(self, *args))
--
--    def splitall(self):
--        r""" Return a list of the path components in this path.
--
--        The first item in the list will be a path.  Its value will be
--        either os.curdir, os.pardir, empty, or the root directory of
--        this path (for example, '/' or 'C:\\').  The other items in
--        the list will be strings.
--
--        path.path.joinpath(*result) will yield the original path.
--        """
--        parts = []
--        loc = self
--        while loc != os.curdir and loc != os.pardir:
--            prev = loc
--            loc, child = prev.splitpath()
--            if loc == prev:
--                break
--            parts.append(child)
--        parts.append(loc)
--        parts.reverse()
--        return parts
--
--    def relpath(self):
--        """ Return this path as a relative path,
--        based from the current working directory.
--        """
--        cwd = self.__class__(os.getcwd())
--        return cwd.relpathto(self)
--
--    def relpathto(self, dest):
--        """ Return a relative path from self to dest.
--
--        If there is no relative path from self to dest, for example if
--        they reside on different drives in Windows, then this returns
--        dest.abspath().
--        """
--        origin = self.abspath()
--        dest = self.__class__(dest).abspath()
--
--        orig_list = origin.normcase().splitall()
--        # Don't normcase dest!  We want to preserve the case.
--        dest_list = dest.splitall()
--
--        if orig_list[0] != os.path.normcase(dest_list[0]):
--            # Can't get here from there.
--            return dest
--
--        # Find the location where the two paths start to differ.
--        i = 0
--        for start_seg, dest_seg in zip(orig_list, dest_list):
--            if start_seg != os.path.normcase(dest_seg):
--                break
--            i += 1
--
--        # Now i is the point where the two paths diverge.
--        # Need a certain number of "os.pardir"s to work up
--        # from the origin to the point of divergence.
--        segments = [os.pardir] * (len(orig_list) - i)
--        # Need to add the diverging part of dest_list.
--        segments += dest_list[i:]
--        if len(segments) == 0:
--            # If they happen to be identical, use os.curdir.
--            relpath = os.curdir
--        else:
--            relpath = os.path.join(*segments)
--        return self.__class__(relpath)
--
--    # --- Listing, searching, walking, and matching
--
--    def listdir(self, pattern=None):
--        """ D.listdir() -> List of items in this directory.
--
--        Use D.files() or D.dirs() instead if you want a listing
--        of just files or just subdirectories.
--
--        The elements of the list are path objects.
--
--        With the optional 'pattern' argument, this only lists
--        items whose names match the given pattern.
--        """
--        names = os.listdir(self)
--        if pattern is not None:
--            names = fnmatch.filter(names, pattern)
--        return [self / child for child in names]
--
--    def dirs(self, pattern=None):
--        """ D.dirs() -> List of this directory's subdirectories.
--
--        The elements of the list are path objects.
--        This does not walk recursively into subdirectories
--        (but see path.walkdirs).
--
--        With the optional 'pattern' argument, this only lists
--        directories whose names match the given pattern.  For
--        example, d.dirs('build-*').
--        """
--        return [p for p in self.listdir(pattern) if p.isdir()]
--
--    def files(self, pattern=None):
--        """ D.files() -> List of the files in this directory.
--
--        The elements of the list are path objects.
--        This does not walk into subdirectories (see path.walkfiles).
--
--        With the optional 'pattern' argument, this only lists files
--        whose names match the given pattern.  For example,
--        d.files('*.pyc').
--        """
--        
--        return [p for p in self.listdir(pattern) if p.isfile()]
--
--    def walk(self, pattern=None, errors='strict'):
--        """ D.walk() -> iterator over files and subdirs, recursively.
--
--        The iterator yields path objects naming each child item of
--        this directory and its descendants.  This requires that
--        D.isdir().
--
--        This performs a depth-first traversal of the directory tree.
--        Each directory is returned just before all its children.
--
--        The errors= keyword argument controls behavior when an
--        error occurs.  The default is 'strict', which causes an
--        exception.  The other allowed values are 'warn', which
--        reports the error via warnings.warn(), and 'ignore'.
--        """
--        if errors not in ('strict', 'warn', 'ignore'):
--            raise ValueError("invalid errors parameter")
--
--        try:
--            childList = self.listdir()
--        except Exception:
--            if errors == 'ignore':
--                return
--            elif errors == 'warn':
--                warnings.warn(
--                    "Unable to list directory '%s': %s"
--                    % (self, sys.exc_info()[1]),
--                    TreeWalkWarning)
--                return
--            else:
--                raise
--
--        for child in childList:
--            if pattern is None or child.fnmatch(pattern):
--                yield child
--            try:
--                isdir = child.isdir()
--            except Exception:
--                if errors == 'ignore':
--                    isdir = False
--                elif errors == 'warn':
--                    warnings.warn(
--                        "Unable to access '%s': %s"
--                        % (child, sys.exc_info()[1]),
--                        TreeWalkWarning)
--                    isdir = False
--                else:
--                    raise
--
--            if isdir:
--                for item in child.walk(pattern, errors):
--                    yield item
--
--    def walkdirs(self, pattern=None, errors='strict'):
--        """ D.walkdirs() -> iterator over subdirs, recursively.
--
--        With the optional 'pattern' argument, this yields only
--        directories whose names match the given pattern.  For
--        example, mydir.walkdirs('*test') yields only directories
--        with names ending in 'test'.
--
--        The errors= keyword argument controls behavior when an
--        error occurs.  The default is 'strict', which causes an
--        exception.  The other allowed values are 'warn', which
--        reports the error via warnings.warn(), and 'ignore'.
--        """
--        if errors not in ('strict', 'warn', 'ignore'):
--            raise ValueError("invalid errors parameter")
--
--        try:
--            dirs = self.dirs()
--        except Exception:
--            if errors == 'ignore':
--                return
--            elif errors == 'warn':
--                warnings.warn(
--                    "Unable to list directory '%s': %s"
--                    % (self, sys.exc_info()[1]),
--                    TreeWalkWarning)
--                return
--            else:
--                raise
--
--        for child in dirs:
--            if pattern is None or child.fnmatch(pattern):
--                yield child
--            for subsubdir in child.walkdirs(pattern, errors):
--                yield subsubdir
--
--    def walkfiles(self, pattern=None, errors='strict'):
--        """ D.walkfiles() -> iterator over files in D, recursively.
--
--        The optional argument, pattern, limits the results to files
--        with names that match the pattern.  For example,
--        mydir.walkfiles('*.tmp') yields only files with the .tmp
--        extension.
--        """
--        if errors not in ('strict', 'warn', 'ignore'):
--            raise ValueError("invalid errors parameter")
--
--        try:
--            childList = self.listdir()
--        except Exception:
--            if errors == 'ignore':
--                return
--            elif errors == 'warn':
--                warnings.warn(
--                    "Unable to list directory '%s': %s"
--                    % (self, sys.exc_info()[1]),
--                    TreeWalkWarning)
--                return
--            else:
--                raise
--
--        for child in childList:
--            try:
--                isfile = child.isfile()
--                isdir = not isfile and child.isdir()
--            except:
--                if errors == 'ignore':
--                    continue
--                elif errors == 'warn':
--                    warnings.warn(
--                        "Unable to access '%s': %s"
--                        % (self, sys.exc_info()[1]),
--                        TreeWalkWarning)
--                    continue
--                else:
--                    raise
--
--            if isfile:
--                if pattern is None or child.fnmatch(pattern):
--                    yield child
--            elif isdir:
--                for f in child.walkfiles(pattern, errors):
--                    yield f
--
--    def fnmatch(self, pattern):
--        """ Return True if self.name matches the given pattern.
--
--        pattern - A filename pattern with wildcards,
--            for example '*.py'.
--        """
--        return fnmatch.fnmatch(self.name, pattern)
--
--    def glob(self, pattern):
--        """ Return a list of path objects that match the pattern.
--
--        pattern - a path relative to this directory, with wildcards.
--
--        For example, path('/users').glob('*/bin/*') returns a list
--        of all the files users have in their bin directories.
--        """
--        cls = self.__class__
--        return [cls(s) for s in glob.glob(_base(self / pattern))]
--
--
--    # --- Reading or writing an entire file at once.
--
--    def open(self, mode='r'):
--        """ Open this file.  Return a file object. """
--        return file(self, mode)
--
--    def bytes(self):
--        """ Open this file, read all bytes, return them as a string. """
--        f = self.open('rb')
--        try:
--            return f.read()
--        finally:
--            f.close()
--
--    def write_bytes(self, bytes, append=False):
--        """ Open this file and write the given bytes to it.
--
--        Default behavior is to overwrite any existing file.
--        Call p.write_bytes(bytes, append=True) to append instead.
--        """
--        if append:
--            mode = 'ab'
--        else:
--            mode = 'wb'
--        f = self.open(mode)
--        try:
--            f.write(bytes)
--        finally:
--            f.close()
--
--    def text(self, encoding=None, errors='strict'):
--        r""" Open this file, read it in, return the content as a string.
--
--        This uses 'U' mode in Python 2.3 and later, so '\r\n' and '\r'
--        are automatically translated to '\n'.
--
--        Optional arguments:
--
--        encoding - The Unicode encoding (or character set) of
--            the file.  If present, the content of the file is
--            decoded and returned as a unicode object; otherwise
--            it is returned as an 8-bit str.
--        errors - How to handle Unicode errors; see help(str.decode)
--            for the options.  Default is 'strict'.
--        """
--        if encoding is None:
--            # 8-bit
--            f = self.open(_textmode)
--            try:
--                return f.read()
--            finally:
--                f.close()
--        else:
--            # Unicode
--            f = codecs.open(self, 'r', encoding, errors)
--            # (Note - Can't use 'U' mode here, since codecs.open
--            # doesn't support 'U' mode, even in Python 2.3.)
--            try:
--                t = f.read()
--            finally:
--                f.close()
--            return (t.replace(u'\r\n', u'\n')
--                     .replace(u'\r\x85', u'\n')
--                     .replace(u'\r', u'\n')
--                     .replace(u'\x85', u'\n')
--                     .replace(u'\u2028', u'\n'))
--
--    def write_text(self, text, encoding=None, errors='strict', linesep=os.linesep, append=False):
--        r""" Write the given text to this file.
--
--        The default behavior is to overwrite any existing file;
--        to append instead, use the 'append=True' keyword argument.
--
--        There are two differences between path.write_text() and
--        path.write_bytes(): newline handling and Unicode handling.
--        See below.
--
--        Parameters:
--
--          - text - str/unicode - The text to be written.
--
--          - encoding - str - The Unicode encoding that will be used.
--            This is ignored if 'text' isn't a Unicode string.
--
--          - errors - str - How to handle Unicode encoding errors.
--            Default is 'strict'.  See help(unicode.encode) for the
--            options.  This is ignored if 'text' isn't a Unicode
--            string.
--
--          - linesep - keyword argument - str/unicode - The sequence of
--            characters to be used to mark end-of-line.  The default is
--            os.linesep.  You can also specify None; this means to
--            leave all newlines as they are in 'text'.
--
--          - append - keyword argument - bool - Specifies what to do if
--            the file already exists (True: append to the end of it;
--            False: overwrite it.)  The default is False.
--
--
--        --- Newline handling.
--
--        write_text() converts all standard end-of-line sequences
--        ('\n', '\r', and '\r\n') to your platform's default end-of-line
--        sequence (see os.linesep; on Windows, for example, the
--        end-of-line marker is '\r\n').
--
--        If you don't like your platform's default, you can override it
--        using the 'linesep=' keyword argument.  If you specifically want
--        write_text() to preserve the newlines as-is, use 'linesep=None'.
--
--        This applies to Unicode text the same as to 8-bit text, except
--        there are three additional standard Unicode end-of-line sequences:
--        u'\x85', u'\r\x85', and u'\u2028'.
--
--        (This is slightly different from when you open a file for
--        writing with fopen(filename, "w") in C or file(filename, 'w')
--        in Python.)
--
--
--        --- Unicode
--
--        If 'text' isn't Unicode, then apart from newline handling, the
--        bytes are written verbatim to the file.  The 'encoding' and
--        'errors' arguments are not used and must be omitted.
--
--        If 'text' is Unicode, it is first converted to bytes using the
--        specified 'encoding' (or the default encoding if 'encoding'
--        isn't specified).  The 'errors' argument applies only to this
--        conversion.
--
--        """
--        if isinstance(text, unicode):
--            if linesep is not None:
--                # Convert all standard end-of-line sequences to
--                # ordinary newline characters.
--                text = (text.replace(u'\r\n', u'\n')
--                            .replace(u'\r\x85', u'\n')
--                            .replace(u'\r', u'\n')
--                            .replace(u'\x85', u'\n')
--                            .replace(u'\u2028', u'\n'))
--                text = text.replace(u'\n', linesep)
--            if encoding is None:
--                encoding = sys.getdefaultencoding()
--            bytes = text.encode(encoding, errors)
--        else:
--            # It is an error to specify an encoding if 'text' is
--            # an 8-bit string.
--            assert encoding is None
--
--            if linesep is not None:
--                text = (text.replace('\r\n', '\n')
--                            .replace('\r', '\n'))
--                bytes = text.replace('\n', linesep)
--
--        self.write_bytes(bytes, append)
--
--    def lines(self, encoding=None, errors='strict', retain=True):
--        r""" Open this file, read all lines, return them in a list.
--
--        Optional arguments:
--            encoding - The Unicode encoding (or character set) of
--                the file.  The default is None, meaning the content
--                of the file is read as 8-bit characters and returned
--                as a list of (non-Unicode) str objects.
--            errors - How to handle Unicode errors; see help(str.decode)
--                for the options.  Default is 'strict'
--            retain - If true, retain newline characters; but all newline
--                character combinations ('\r', '\n', '\r\n') are
--                translated to '\n'.  If false, newline characters are
--                stripped off.  Default is True.
--
--        This uses 'U' mode in Python 2.3 and later.
--        """
--        if encoding is None and retain:
--            f = self.open(_textmode)
--            try:
--                return f.readlines()
--            finally:
--                f.close()
--        else:
--            return self.text(encoding, errors).splitlines(retain)
--
--    def write_lines(self, lines, encoding=None, errors='strict',
--                    linesep=os.linesep, append=False):
--        r""" Write the given lines of text to this file.
--
--        By default this overwrites any existing file at this path.
--
--        This puts a platform-specific newline sequence on every line.
--        See 'linesep' below.
--
--        lines - A list of strings.
--
--        encoding - A Unicode encoding to use.  This applies only if
--            'lines' contains any Unicode strings.
--
--        errors - How to handle errors in Unicode encoding.  This
--            also applies only to Unicode strings.
--
--        linesep - The desired line-ending.  This line-ending is
--            applied to every line.  If a line already has any
--            standard line ending ('\r', '\n', '\r\n', u'\x85',
--            u'\r\x85', u'\u2028'), that will be stripped off and
--            this will be used instead.  The default is os.linesep,
--            which is platform-dependent ('\r\n' on Windows, '\n' on
--            Unix, etc.)  Specify None to write the lines as-is,
--            like file.writelines().
--
--        Use the keyword argument append=True to append lines to the
--        file.  The default is to overwrite the file.  Warning:
--        When you use this with Unicode data, if the encoding of the
--        existing data in the file is different from the encoding
--        you specify with the encoding= parameter, the result is
--        mixed-encoding data, which can really confuse someone trying
--        to read the file later.
--        """
--        if append:
--            mode = 'ab'
--        else:
--            mode = 'wb'
--        f = self.open(mode)
--        try:
--            for line in lines:
--                isUnicode = isinstance(line, unicode)
--                if linesep is not None:
--                    # Strip off any existing line-end and add the
--                    # specified linesep string.
--                    if isUnicode:
--                        if line[-2:] in (u'\r\n', u'\x0d\x85'):
--                            line = line[:-2]
--                        elif line[-1:] in (u'\r', u'\n',
--                                           u'\x85', u'\u2028'):
--                            line = line[:-1]
--                    else:
--                        if line[-2:] == '\r\n':
--                            line = line[:-2]
--                        elif line[-1:] in ('\r', '\n'):
--                            line = line[:-1]
--                    line += linesep
--                if isUnicode:
--                    if encoding is None:
--                        encoding = sys.getdefaultencoding()
--                    line = line.encode(encoding, errors)
--                f.write(line)
--        finally:
--            f.close()
--
--    def read_md5(self):
--        """ Calculate the md5 hash for this file.
--
--        This reads through the entire file.
--        """
--        f = self.open('rb')
--        try:
--            m = md5.new()
--            while True:
--                d = f.read(8192)
--                if not d:
--                    break
--                m.update(d)
--        finally:
--            f.close()
--        return m.digest()
--
--    # --- Methods for querying the filesystem.
--
--    exists = os.path.exists
--    isdir = os.path.isdir
--    isfile = os.path.isfile
--    islink = os.path.islink
--    ismount = os.path.ismount
--
--    if hasattr(os.path, 'samefile'):
--        samefile = os.path.samefile
--
--    getatime = os.path.getatime
--    atime = property(
--        getatime, None, None,
--        """ Last access time of the file. """)
--
--    getmtime = os.path.getmtime
--    mtime = property(
--        getmtime, None, None,
--        """ Last-modified time of the file. """)
--
--    if hasattr(os.path, 'getctime'):
--        getctime = os.path.getctime
--        ctime = property(
--            getctime, None, None,
--            """ Creation time of the file. """)
--
--    getsize = os.path.getsize
--    size = property(
--        getsize, None, None,
--        """ Size of the file, in bytes. """)
--
--    if hasattr(os, 'access'):
--        def access(self, mode):
--            """ Return true if current user has access to this path.
--
--            mode - One of the constants os.F_OK, os.R_OK, os.W_OK, os.X_OK
--            """
--            return os.access(self, mode)
--
--    def stat(self):
--        """ Perform a stat() system call on this path. """
--        return os.stat(self)
--
--    def lstat(self):
--        """ Like path.stat(), but do not follow symbolic links. """
--        return os.lstat(self)
--
--    def get_owner(self):
--        r""" Return the name of the owner of this file or directory.
--
--        This follows symbolic links.
--
--        On Windows, this returns a name of the form ur'DOMAIN\User Name'.
--        On Windows, a group can own a file or directory.
--        """
--        if os.name == 'nt':
--            if win32security is None:
--                raise Exception("path.owner requires win32all to be installed")
--            desc = win32security.GetFileSecurity(
--                self, win32security.OWNER_SECURITY_INFORMATION)
--            sid = desc.GetSecurityDescriptorOwner()
--            account, domain, typecode = win32security.LookupAccountSid(None, sid)
--            return domain + u'\\' + account
--        else:
--            if pwd is None:
--                raise NotImplementedError("path.owner is not implemented on this platform.")
--            st = self.stat()
--            return pwd.getpwuid(st.st_uid).pw_name
--
--    owner = property(
--        get_owner, None, None,
--        """ Name of the owner of this file or directory. """)
--
--    if hasattr(os, 'statvfs'):
--        def statvfs(self):
--            """ Perform a statvfs() system call on this path. """
--            return os.statvfs(self)
--
--    if hasattr(os, 'pathconf'):
--        def pathconf(self, name):
--            return os.pathconf(self, name)
--
--
--    # --- Modifying operations on files and directories
--
--    def utime(self, times):
--        """ Set the access and modified times of this file. """
--        os.utime(self, times)
--
--    def chmod(self, mode):
--        os.chmod(self, mode)
--
--    if hasattr(os, 'chown'):
--        def chown(self, uid, gid):
--            os.chown(self, uid, gid)
--
--    def rename(self, new):
--        os.rename(self, new)
--
--    def renames(self, new):
--        os.renames(self, new)
--
--
--    # --- Create/delete operations on directories
--
--    def mkdir(self, mode=0777):
--        os.mkdir(self, mode)
--
--    def makedirs(self, mode=0777):
--        os.makedirs(self, mode)
--
--    def rmdir(self):
--        os.rmdir(self)
--
--    def removedirs(self):
--        os.removedirs(self)
--
--
--    # --- Modifying operations on files
--
--    def touch(self):
--        """ Set the access/modified times of this file to the current time.
--        Create the file if it does not exist.
--        """
--        fd = os.open(self, os.O_WRONLY | os.O_CREAT, 0666)
--        os.close(fd)
--        os.utime(self, None)
--
--    def remove(self):
--        os.remove(self)
--
--    def unlink(self):
--        os.unlink(self)
--
--
--    # --- Links
--
--    if hasattr(os, 'link'):
--        def link(self, newpath):
--            """ Create a hard link at 'newpath', pointing to this file. """
--            os.link(self, newpath)
--
--    if hasattr(os, 'symlink'):
--        def symlink(self, newlink):
--            """ Create a symbolic link at 'newlink', pointing here. """
--            os.symlink(self, newlink)
--
--    if hasattr(os, 'readlink'):
--        def readlink(self):
--            """ Return the path to which this symbolic link points.
--
--            The result may be an absolute or a relative path.
--            """
--            return self.__class__(os.readlink(self))
--
--        def readlinkabs(self):
--            """ Return the path to which this symbolic link points.
--
--            The result is always an absolute path.
--            """
--            p = self.readlink()
--            if p.isabs():
--                return p
--            else:
--                return (self.parent / p).abspath()
--
--
--    # --- High-level functions from shutil
--
--    copyfile = shutil.copyfile
--    copymode = shutil.copymode
--    copystat = shutil.copystat
--    copy = shutil.copy
--    copy2 = shutil.copy2
--    copytree = shutil.copytree
--    if hasattr(shutil, 'move'):
--        move = shutil.move
--    rmtree = shutil.rmtree
--
--
--    # --- Special stuff from os
--
--    if hasattr(os, 'chroot'):
--        def chroot(self):
--            os.chroot(self)
--
--    if hasattr(os, 'startfile'):
--        def startfile(self):
--            os.startfile(self)
--
 Index: ipython-0.10/IPython/external/pretty/__init__.py
 ===================================================================
 --- /dev/null
@@ -12858,1426 +68,6 @@ Index: ipython-0.10/IPython/external/pretty/__init__.py
 +    from pretty import *
 +except ImportError:
 +    from _pretty import *
-Index: ipython-0.10/IPython/external/pretty/_pretty.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/pretty/_pretty.py
-@@ -0,0 +1,705 @@
-+# -*- coding: utf-8 -*-
-+"""
-+    pretty
-+    ~~
-+
-+    Python advanced pretty printer.  This pretty printer is intended to
-+    replace the old `pprint` python module which does not allow developers
-+    to provide their own pretty print callbacks.
-+
-+    This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
-+
-+
-+    Example Usage
-+    =============
-+
-+    To directly print the representation of an object use `pprint`::
-+
-+        from pretty import pprint
-+        pprint(complex_object)
-+
-+    To get a string of the output use `pretty`::
-+
-+        from pretty import pretty
-+        string = pretty(complex_object)
-+
-+
-+    Extending
-+    =========
-+
-+    The pretty library allows developers to add pretty printing rules for their
-+    own objects.  This process is straightforward.  All you have to do is to
-+    add a `__pretty__` method to your object and call the methods on the
-+    pretty printer passed::
-+
-+        class MyObject(object):
-+
-+            def __pretty__(self, p, cycle):
-+                ...
-+
-+    Depending on the python version you want to support you have two
-+    possibilities.  The following list shows the python 2.5 version and the
-+    compatibility one.
-+
-+
-+    Here the example implementation of a `__pretty__` method for a list
-+    subclass for python 2.5 and higher (python 2.5 requires the with statement
-+    __future__ import)::
-+
-+        class MyList(list):
-+
-+            def __pretty__(self, p, cycle):
-+                if cycle:
-+                    p.text('MyList(...)')
-+                else:
-+                    with p.group(8, 'MyList([', '])'):
-+                        for idx, item in enumerate(self):
-+                            if idx:
-+                                p.text(',')
-+                                p.breakable()
-+                            p.pretty(item)
-+
-+    The `cycle` parameter is `True` if pretty detected a cycle.  You *have* to
-+    react to that or the result is an infinite loop.  `p.text()` just adds
-+    non breaking text to the output, `p.breakable()` either adds a whitespace
-+    or breaks here.  If you pass it an argument it's used instead of the
-+    default space.  `p.pretty` prettyprints another object using the pretty print
-+    method.
-+
-+    The first parameter to the `group` function specifies the extra indentation
-+    of the next line.  In this example the next item will either be not
-+    breaked (if the items are short enough) or aligned with the right edge of
-+    the opening bracked of `MyList`.
-+
-+    If you want to support python 2.4 and lower you can use this code::
-+
-+        class MyList(list):
-+
-+            def __pretty__(self, p, cycle):
-+                if cycle:
-+                    p.text('MyList(...)')
-+                else:
-+                    p.begin_group(8, 'MyList([')
-+                    for idx, item in enumerate(self):
-+                        if idx:
-+                            p.text(',')
-+                            p.breakable()
-+                        p.pretty(item)
-+                    p.end_group(8, '])')
-+
-+    If you just want to indent something you can use the group function
-+    without open / close parameters.  Under python 2.5 you can also use this
-+    code::
-+
-+        with p.indent(2):
-+            ...
-+
-+    Or under python2.4 you might want to modify ``p.indentation`` by hand but
-+    this is rather ugly.
-+
-+    :copyright: 2007 by Armin Ronacher.
-+                Portions (c) 2009 by Robert Kern.
-+    :license: BSD License.
-+"""
-+import __future__
-+import sys
-+import types
-+import re
-+import datetime
-+from StringIO import StringIO
-+from collections import deque
-+
-+
-+__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
-+    'for_type', 'for_type_by_name']
-+
-+
-+_re_pattern_type = type(re.compile(''))
-+
-+
-+def pretty(obj, verbose=False, max_width=79, newline='\n'):
-+    """
-+    Pretty print the object's representation.
-+    """
-+    stream = StringIO()
-+    printer = RepresentationPrinter(stream, verbose, max_width, newline)
-+    printer.pretty(obj)
-+    printer.flush()
-+    return stream.getvalue()
-+
-+
-+def pprint(obj, verbose=False, max_width=79, newline='\n'):
-+    """
-+    Like `pretty` but print to stdout.
-+    """
-+    printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline)
-+    printer.pretty(obj)
-+    printer.flush()
-+    sys.stdout.write(newline)
-+    sys.stdout.flush()
-+
-+
-+# add python2.5 context managers if we have the with statement feature
-+if hasattr(__future__, 'with_statement'): exec '''
-+from __future__ import with_statement
-+from contextlib import contextmanager
-+
-+class _PrettyPrinterBase(object):
-+
-+    @contextmanager
-+    def indent(self, indent):
-+        """with statement support for indenting/dedenting."""
-+        self.indentation += indent
-+        try:
-+            yield
-+        finally:
-+            self.indentation -= indent
-+
-+    @contextmanager
-+    def group(self, indent=0, open='', close=''):
-+        """like begin_group / end_group but for the with statement."""
-+        self.begin_group(indent, open)
-+        try:
-+            with self.indent(indent):
-+                yield
-+        finally:
-+            self.end_group(indent, close)
-+'''
-+else:
-+    class _PrettyPrinterBase(object):
-+
-+        def _unsupported(self, *a, **kw):
-+            """unsupported operation"""
-+            raise RuntimeError('not available in this python version')
-+        group = indent = _unsupported
-+        del _unsupported
-+
-+
-+class PrettyPrinter(_PrettyPrinterBase):
-+    """
-+    Baseclass for the `RepresentationPrinter` prettyprinter that is used to
-+    generate pretty reprs of objects.  Contrary to the `RepresentationPrinter`
-+    this printer knows nothing about the default pprinters or the `__pretty__`
-+    callback method.
-+    """
-+
-+    def __init__(self, output, max_width=79, newline='\n'):
-+        self.output = output
-+        self.max_width = max_width
-+        self.newline = newline
-+        self.output_width = 0
-+        self.buffer_width = 0
-+        self.buffer = deque()
-+
-+        root_group = Group(0)
-+        self.group_stack = [root_group]
-+        self.group_queue = GroupQueue(root_group)
-+        self.indentation = 0
-+
-+    def _break_outer_groups(self):
-+        while self.max_width < self.output_width + self.buffer_width:
-+            group = self.group_queue.deq()
-+            if not group:
-+                return
-+            while group.breakables:
-+                x = self.buffer.popleft()
-+                self.output_width = x.output(self.output, self.output_width)
-+                self.buffer_width -= x.width
-+            while self.buffer and isinstance(self.buffer[0], Text):
-+                x = self.buffer.popleft()
-+                self.output_width = x.output(self.output, self.output_width)
-+                self.buffer_width -= x.width
-+
-+    def text(self, obj):
-+        """Add literal text to the output."""
-+        width = len(obj)
-+        if self.buffer:
-+            text = self.buffer[-1]
-+            if not isinstance(text, Text):
-+                text = Text()
-+                self.buffer.append(text)
-+            text.add(obj, width)
-+            self.buffer_width += width
-+            self._break_outer_groups()
-+        else:
-+            self.output.write(obj)
-+            self.output_width += width
-+
-+    def breakable(self, sep=' '):
-+        """
-+        Add a breakable separator to the output.  This does not mean that it
-+        will automatically break here.  If no breaking on this position takes
-+        place the `sep` is inserted which default to one space.
-+        """
-+        width = len(sep)
-+        group = self.group_stack[-1]
-+        if group.want_break:
-+            self.flush()
-+            self.output.write(self.newline)
-+            self.output.write(' ' * self.indentation)
-+            self.output_width = self.indentation
-+            self.buffer_width = 0
-+        else:
-+            self.buffer.append(Breakable(sep, width, self))
-+            self.buffer_width += width
-+            self._break_outer_groups()
-+
-+
-+    def begin_group(self, indent=0, open=''):
-+        """
-+        Begin a group.  If you want support for python < 2.5 which doesn't has
-+        the with statement this is the preferred way:
-+
-+            p.begin_group(1, '{')
-+            ...
-+            p.end_group(1, '}')
-+
-+        The python 2.5 expression would be this:
-+
-+            with p.group(1, '{', '}'):
-+                ...
-+
-+        The first parameter specifies the indentation for the next line (usually
-+        the width of the opening text), the second the opening text.  All
-+        parameters are optional.
-+        """
-+        if open:
-+            self.text(open)
-+        group = Group(self.group_stack[-1].depth + 1)
-+        self.group_stack.append(group)
-+        self.group_queue.enq(group)
-+        self.indentation += indent
-+
-+    def end_group(self, dedent=0, close=''):
-+        """End a group. See `begin_group` for more details."""
-+        self.indentation -= dedent
-+        group = self.group_stack.pop()
-+        if not group.breakables:
-+            self.group_queue.remove(group)
-+        if close:
-+            self.text(close)
-+
-+    def flush(self):
-+        """Flush data that is left in the buffer."""
-+        for data in self.buffer:
-+            self.output_width += data.output(self.output, self.output_width)
-+        self.buffer.clear()
-+        self.buffer_width = 0
-+
-+
-+def _get_mro(obj_class):
-+    """ Get a reasonable method resolution order of a class and its superclasses
-+    for both old-style and new-style classes.
-+    """
-+    if not hasattr(obj_class, '__mro__'):
-+        # Old-style class. Mix in object to make a fake new-style class.
-+        try:
-+            obj_class = type(obj_class.__name__, (obj_class, object), {})
-+        except TypeError:
-+            # Old-style extension type that does not descend from object.
-+            # FIXME: try to construct a more thorough MRO.
-+            mro = [obj_class]
-+        else:
-+            mro = obj_class.__mro__[1:-1]
-+    else:
-+        mro = obj_class.__mro__
-+    return mro
-+
-+
-+class RepresentationPrinter(PrettyPrinter):
-+    """
-+    Special pretty printer that has a `pretty` method that calls the pretty
-+    printer for a python object.
-+
-+    This class stores processing data on `self` so you must *never* use
-+    this class in a threaded environment.  Always lock it or reinstanciate
-+    it.
-+
-+    Instances also have a verbose flag callbacks can access to control their
-+    output.  For example the default instance repr prints all attributes and
-+    methods that are not prefixed by an underscore if the printer is in
-+    verbose mode.
-+    """
-+
-+    def __init__(self, output, verbose=False, max_width=79, newline='\n'):
-+        PrettyPrinter.__init__(self, output, max_width, newline)
-+        self.verbose = verbose
-+        self.stack = []
-+
-+    def pretty(self, obj):
-+        """Pretty print the given object."""
-+        obj_id = id(obj)
-+        cycle = obj_id in self.stack
-+        self.stack.append(obj_id)
-+        self.begin_group()
-+        try:
-+            obj_class = getattr(obj, '__class__', None) or type(obj)
-+            if hasattr(obj_class, '__pretty__'):
-+                return obj_class.__pretty__(obj, self, cycle)
-+            try:
-+                printer = _singleton_pprinters[obj_id]
-+            except (TypeError, KeyError):
-+                pass
-+            else:
-+                return printer(obj, self, cycle)
-+            for cls in _get_mro(obj_class):
-+                if cls in _type_pprinters:
-+                    return _type_pprinters[cls](obj, self, cycle)
-+                else:
-+                    printer = self._in_deferred_types(cls)
-+                    if printer is not None:
-+                        return printer(obj, self, cycle)
-+            return _default_pprint(obj, self, cycle)
-+        finally:
-+            self.end_group()
-+            self.stack.pop()
-+
-+    def _in_deferred_types(self, cls):
-+        """
-+        Check if the given class is specified in the deferred type registry.
-+
-+        Returns the printer from the registry if it exists, and None if the
-+        class is not in the registry. Successful matches will be moved to the
-+        regular type registry for future use.
-+        """
-+        mod = getattr(cls, '__module__', None)
-+        name = getattr(cls, '__name__', None)
-+        key = (mod, name)
-+        printer = None
-+        if key in _deferred_type_pprinters:
-+            # Move the printer over to the regular registry.
-+            printer = _deferred_type_pprinters.pop(key)
-+            _type_pprinters[cls] = printer
-+        return printer
-+
-+
-+
-+class Printable(object):
-+
-+    def output(self, stream, output_width):
-+        return output_width
-+
-+
-+class Text(Printable):
-+
-+    def __init__(self):
-+        self.objs = []
-+        self.width = 0
-+
-+    def output(self, stream, output_width):
-+        for obj in self.objs:
-+            stream.write(obj)
-+        return output_width + self.width
-+
-+    def add(self, obj, width):
-+        self.objs.append(obj)
-+        self.width += width
-+
-+
-+class Breakable(Printable):
-+
-+    def __init__(self, seq, width, pretty):
-+        self.obj = seq
-+        self.width = width
-+        self.pretty = pretty
-+        self.indentation = pretty.indentation
-+        self.group = pretty.group_stack[-1]
-+        self.group.breakables.append(self)
-+
-+    def output(self, stream, output_width):
-+        self.group.breakables.popleft()
-+        if self.group.want_break:
-+            stream.write(self.pretty.newline)
-+            stream.write(' ' * self.indentation)
-+            return self.indentation
-+        if not self.group.breakables:
-+            self.pretty.group_queue.remove(self.group)
-+        stream.write(self.obj)
-+        return output_width + self.width
-+
-+
-+class Group(Printable):
-+
-+    def __init__(self, depth):
-+        self.depth = depth
-+        self.breakables = deque()
-+        self.want_break = False
-+
-+
-+class GroupQueue(object):
-+
-+    def __init__(self, *groups):
-+        self.queue = []
-+        for group in groups:
-+            self.enq(group)
-+
-+    def enq(self, group):
-+        depth = group.depth
-+        while depth > len(self.queue) - 1:
-+            self.queue.append([])
-+        self.queue[depth].append(group)
-+
-+    def deq(self):
-+        for stack in self.queue:
-+            for idx, group in enumerate(reversed(stack)):
-+                if group.breakables:
-+                    del stack[idx]
-+                    group.want_break = True
-+                    return group
-+            for group in stack:
-+                group.want_break = True
-+            del stack[:]
-+
-+    def remove(self, group):
-+        try:
-+            self.queue[group.depth].remove(group)
-+        except ValueError:
-+            pass
-+
-+
-+_baseclass_reprs = (object.__repr__, types.InstanceType.__repr__)
-+
-+
-+def _default_pprint(obj, p, cycle):
-+    """
-+    The default print function.  Used if an object does not provide one and
-+    it's none of the builtin objects.
-+    """
-+    klass = getattr(obj, '__class__', None) or type(obj)
-+    if getattr(klass, '__repr__', None) not in _baseclass_reprs:
-+        # A user-provided repr.
-+        p.text(repr(obj))
-+        return
-+    p.begin_group(1, '<')
-+    p.pretty(klass)
-+    p.text(' at 0x%x' % id(obj))
-+    if cycle:
-+        p.text(' ...')
-+    elif p.verbose:
-+        first = True
-+        for key in dir(obj):
-+            if not key.startswith('_'):
-+                try:
-+                    value = getattr(obj, key)
-+                except AttributeError:
-+                    continue
-+                if isinstance(value, types.MethodType):
-+                    continue
-+                if not first:
-+                    p.text(',')
-+                p.breakable()
-+                p.text(key)
-+                p.text('=')
-+                step = len(key) + 1
-+                p.indentation += step
-+                p.pretty(value)
-+                p.indentation -= step
-+                first = False
-+    p.end_group(1, '>')
-+
-+
-+def _seq_pprinter_factory(start, end):
-+    """
-+    Factory that returns a pprint function useful for sequences.  Used by
-+    the default pprint for tuples, dicts, lists, sets and frozensets.
-+    """
-+    def inner(obj, p, cycle):
-+        if cycle:
-+            return p.text(start + '...' + end)
-+        step = len(start)
-+        p.begin_group(step, start)
-+        for idx, x in enumerate(obj):
-+            if idx:
-+                p.text(',')
-+                p.breakable()
-+            p.pretty(x)
-+        if len(obj) == 1 and type(obj) is tuple:
-+            # Special case for 1-item tuples.
-+            p.text(',')
-+        p.end_group(step, end)
-+    return inner
-+
-+
-+def _dict_pprinter_factory(start, end):
-+    """
-+    Factory that returns a pprint function used by the default pprint of
-+    dicts and dict proxies.
-+    """
-+    def inner(obj, p, cycle):
-+        if cycle:
-+            return p.text('{...}')
-+        p.begin_group(1, start)
-+        keys = obj.keys()
-+        try:
-+            keys.sort()
-+        except Exception, e:
-+            # Sometimes the keys don't sort.
-+            pass
-+        for idx, key in enumerate(keys):
-+            if idx:
-+                p.text(',')
-+                p.breakable()
-+            p.pretty(key)
-+            p.text(': ')
-+            p.pretty(obj[key])
-+        p.end_group(1, end)
-+    return inner
-+
-+
-+def _super_pprint(obj, p, cycle):
-+    """The pprint for the super type."""
-+    p.begin_group(8, '<super: ')
-+    p.pretty(obj.__self_class__)
-+    p.text(',')
-+    p.breakable()
-+    p.pretty(obj.__self__)
-+    p.end_group(8, '>')
-+
-+
-+def _re_pattern_pprint(obj, p, cycle):
-+    """The pprint function for regular expression patterns."""
-+    p.text('re.compile(')
-+    pattern = repr(obj.pattern)
-+    if pattern[:1] in 'uU':
-+        pattern = pattern[1:]
-+        prefix = 'ur'
-+    else:
-+        prefix = 'r'
-+    pattern = prefix + pattern.replace('\\\\', '\\')
-+    p.text(pattern)
-+    if obj.flags:
-+        p.text(',')
-+        p.breakable()
-+        done_one = False
-+        for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
-+            'UNICODE', 'VERBOSE', 'DEBUG'):
-+            if obj.flags & getattr(re, flag):
-+                if done_one:
-+                    p.text('|')
-+                p.text('re.' + flag)
-+                done_one = True
-+    p.text(')')
-+
-+
-+def _type_pprint(obj, p, cycle):
-+    """The pprint for classes and types."""
-+    if obj.__module__ in ('__builtin__', 'exceptions'):
-+        name = obj.__name__
-+    else:
-+        name = obj.__module__ + '.' + obj.__name__
-+    p.text(name)
-+
-+
-+def _repr_pprint(obj, p, cycle):
-+    """A pprint that just redirects to the normal repr function."""
-+    p.text(repr(obj))
-+
-+
-+def _function_pprint(obj, p, cycle):
-+    """Base pprint for all functions and builtin functions."""
-+    if obj.__module__ in ('__builtin__', 'exceptions') or not obj.__module__:
-+        name = obj.__name__
-+    else:
-+        name = obj.__module__ + '.' + obj.__name__
-+    p.text('<function %s>' % name)
-+
-+
-+def _exception_pprint(obj, p, cycle):
-+    """Base pprint for all exceptions."""
-+    if obj.__class__.__module__ == 'exceptions':
-+        name = obj.__class__.__name__
-+    else:
-+        name = '%s.%s' % (
-+            obj.__class__.__module__,
-+            obj.__class__.__name__
-+        )
-+    step = len(name) + 1
-+    p.begin_group(step, '(')
-+    for idx, arg in enumerate(getattr(obj, 'args', ())):
-+        if idx:
-+            p.text(',')
-+            p.breakable()
-+        p.pretty(arg)
-+    p.end_group(step, ')')
-+
-+
-+#: the exception base
-+try:
-+    _exception_base = BaseException
-+except NameError:
-+    _exception_base = Exception
-+
-+
-+#: printers for builtin types
-+_type_pprinters = {
-+    int:                        _repr_pprint,
-+    long:                       _repr_pprint,
-+    float:                      _repr_pprint,
-+    str:                        _repr_pprint,
-+    unicode:                    _repr_pprint,
-+    tuple:                      _seq_pprinter_factory('(', ')'),
-+    list:                       _seq_pprinter_factory('[', ']'),
-+    dict:                       _dict_pprinter_factory('{', '}'),
-+    types.DictProxyType:        _dict_pprinter_factory('<dictproxy {', '}>'),
-+    set:                        _seq_pprinter_factory('set([', '])'),
-+    frozenset:                  _seq_pprinter_factory('frozenset([', '])'),
-+    super:                      _super_pprint,
-+    _re_pattern_type:           _re_pattern_pprint,
-+    type:                       _type_pprint,
-+    types.ClassType:            _type_pprint,
-+    types.FunctionType:         _function_pprint,
-+    types.BuiltinFunctionType:  _function_pprint,
-+    types.SliceType:            _repr_pprint,
-+    types.MethodType:           _repr_pprint,
-+    xrange:                     _repr_pprint,
-+    datetime.datetime:          _repr_pprint,
-+    datetime.timedelta:         _repr_pprint,
-+    _exception_base:            _exception_pprint
-+}
-+
-+#: printers for types specified by name
-+_deferred_type_pprinters = {
-+}
-+
-+def for_type(typ, func):
-+    """
-+    Add a pretty printer for a given type.
-+    """
-+    oldfunc = _type_pprinters.get(typ, None)
-+    if func is not None:
-+        # To support easy restoration of old pprinters, we need to ignore Nones.
-+        _type_pprinters[typ] = func
-+    return oldfunc
-+
-+def for_type_by_name(type_module, type_name, func):
-+    """
-+    Add a pretty printer for a type specified by the module and name of a type
-+    rather than the type object itself.
-+    """
-+    key = (type_module, type_name)
-+    oldfunc = _deferred_type_pprinters.get(key, None)
-+    if func is not None:
-+        # To support easy restoration of old pprinters, we need to ignore Nones.
-+        _deferred_type_pprinters[key] = func
-+    return oldfunc
-+
-+
-+#: printers for the default singletons
-+_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
-+                                      NotImplemented]), _repr_pprint)
-+
-+
-+if __name__ == '__main__':
-+    from random import randrange
-+    class Foo(object):
-+        def __init__(self):
-+            self.foo = 1
-+            self.bar = re.compile(r'\s+')
-+            self.blub = dict.fromkeys(range(30), randrange(1, 40))
-+            self.hehe = 23424.234234
-+            self.list = ["blub", "blah", self]
-+
-+        def get_foo(self):
-+            print "foo"
-+
-+    pprint(Foo(), verbose=True)
-Index: ipython-0.10/IPython/external/pretty.py
-===================================================================
---- ipython-0.10.orig/IPython/external/pretty.py
-+++ /dev/null
-@@ -1,705 +0,0 @@
--# -*- coding: utf-8 -*-
--"""
--    pretty
--    ~~
--
--    Python advanced pretty printer.  This pretty printer is intended to
--    replace the old `pprint` python module which does not allow developers
--    to provide their own pretty print callbacks.
--
--    This module is based on ruby's `prettyprint.rb` library by `Tanaka Akira`.
--
--
--    Example Usage
--    =============
--
--    To directly print the representation of an object use `pprint`::
--
--        from pretty import pprint
--        pprint(complex_object)
--
--    To get a string of the output use `pretty`::
--
--        from pretty import pretty
--        string = pretty(complex_object)
--
--
--    Extending
--    =========
--
--    The pretty library allows developers to add pretty printing rules for their
--    own objects.  This process is straightforward.  All you have to do is to
--    add a `__pretty__` method to your object and call the methods on the
--    pretty printer passed::
--
--        class MyObject(object):
--
--            def __pretty__(self, p, cycle):
--                ...
--
--    Depending on the python version you want to support you have two
--    possibilities.  The following list shows the python 2.5 version and the
--    compatibility one.
--
--
--    Here the example implementation of a `__pretty__` method for a list
--    subclass for python 2.5 and higher (python 2.5 requires the with statement
--    __future__ import)::
--
--        class MyList(list):
--
--            def __pretty__(self, p, cycle):
--                if cycle:
--                    p.text('MyList(...)')
--                else:
--                    with p.group(8, 'MyList([', '])'):
--                        for idx, item in enumerate(self):
--                            if idx:
--                                p.text(',')
--                                p.breakable()
--                            p.pretty(item)
--
--    The `cycle` parameter is `True` if pretty detected a cycle.  You *have* to
--    react to that or the result is an infinite loop.  `p.text()` just adds
--    non breaking text to the output, `p.breakable()` either adds a whitespace
--    or breaks here.  If you pass it an argument it's used instead of the
--    default space.  `p.pretty` prettyprints another object using the pretty print
--    method.
--
--    The first parameter to the `group` function specifies the extra indentation
--    of the next line.  In this example the next item will either be not
--    breaked (if the items are short enough) or aligned with the right edge of
--    the opening bracked of `MyList`.
--
--    If you want to support python 2.4 and lower you can use this code::
--
--        class MyList(list):
--
--            def __pretty__(self, p, cycle):
--                if cycle:
--                    p.text('MyList(...)')
--                else:
--                    p.begin_group(8, 'MyList([')
--                    for idx, item in enumerate(self):
--                        if idx:
--                            p.text(',')
--                            p.breakable()
--                        p.pretty(item)
--                    p.end_group(8, '])')
--
--    If you just want to indent something you can use the group function
--    without open / close parameters.  Under python 2.5 you can also use this
--    code::
--
--        with p.indent(2):
--            ...
--
--    Or under python2.4 you might want to modify ``p.indentation`` by hand but
--    this is rather ugly.
--
--    :copyright: 2007 by Armin Ronacher.
--                Portions (c) 2009 by Robert Kern.
--    :license: BSD License.
--"""
--import __future__
--import sys
--import types
--import re
--import datetime
--from StringIO import StringIO
--from collections import deque
--
--
--__all__ = ['pretty', 'pprint', 'PrettyPrinter', 'RepresentationPrinter',
--    'for_type', 'for_type_by_name']
--
--
--_re_pattern_type = type(re.compile(''))
--
--
--def pretty(obj, verbose=False, max_width=79, newline='\n'):
--    """
--    Pretty print the object's representation.
--    """
--    stream = StringIO()
--    printer = RepresentationPrinter(stream, verbose, max_width, newline)
--    printer.pretty(obj)
--    printer.flush()
--    return stream.getvalue()
--
--
--def pprint(obj, verbose=False, max_width=79, newline='\n'):
--    """
--    Like `pretty` but print to stdout.
--    """
--    printer = RepresentationPrinter(sys.stdout, verbose, max_width, newline)
--    printer.pretty(obj)
--    printer.flush()
--    sys.stdout.write(newline)
--    sys.stdout.flush()
--
--
--# add python2.5 context managers if we have the with statement feature
--if hasattr(__future__, 'with_statement'): exec '''
--from __future__ import with_statement
--from contextlib import contextmanager
--
--class _PrettyPrinterBase(object):
--
--    @contextmanager
--    def indent(self, indent):
--        """with statement support for indenting/dedenting."""
--        self.indentation += indent
--        try:
--            yield
--        finally:
--            self.indentation -= indent
--
--    @contextmanager
--    def group(self, indent=0, open='', close=''):
--        """like begin_group / end_group but for the with statement."""
--        self.begin_group(indent, open)
--        try:
--            with self.indent(indent):
--                yield
--        finally:
--            self.end_group(indent, close)
--'''
--else:
--    class _PrettyPrinterBase(object):
--
--        def _unsupported(self, *a, **kw):
--            """unsupported operation"""
--            raise RuntimeError('not available in this python version')
--        group = indent = _unsupported
--        del _unsupported
--
--
--class PrettyPrinter(_PrettyPrinterBase):
--    """
--    Baseclass for the `RepresentationPrinter` prettyprinter that is used to
--    generate pretty reprs of objects.  Contrary to the `RepresentationPrinter`
--    this printer knows nothing about the default pprinters or the `__pretty__`
--    callback method.
--    """
--
--    def __init__(self, output, max_width=79, newline='\n'):
--        self.output = output
--        self.max_width = max_width
--        self.newline = newline
--        self.output_width = 0
--        self.buffer_width = 0
--        self.buffer = deque()
--
--        root_group = Group(0)
--        self.group_stack = [root_group]
--        self.group_queue = GroupQueue(root_group)
--        self.indentation = 0
--
--    def _break_outer_groups(self):
--        while self.max_width < self.output_width + self.buffer_width:
--            group = self.group_queue.deq()
--            if not group:
--                return
--            while group.breakables:
--                x = self.buffer.popleft()
--                self.output_width = x.output(self.output, self.output_width)
--                self.buffer_width -= x.width
--            while self.buffer and isinstance(self.buffer[0], Text):
--                x = self.buffer.popleft()
--                self.output_width = x.output(self.output, self.output_width)
--                self.buffer_width -= x.width
--
--    def text(self, obj):
--        """Add literal text to the output."""
--        width = len(obj)
--        if self.buffer:
--            text = self.buffer[-1]
--            if not isinstance(text, Text):
--                text = Text()
--                self.buffer.append(text)
--            text.add(obj, width)
--            self.buffer_width += width
--            self._break_outer_groups()
--        else:
--            self.output.write(obj)
--            self.output_width += width
--
--    def breakable(self, sep=' '):
--        """
--        Add a breakable separator to the output.  This does not mean that it
--        will automatically break here.  If no breaking on this position takes
--        place the `sep` is inserted which default to one space.
--        """
--        width = len(sep)
--        group = self.group_stack[-1]
--        if group.want_break:
--            self.flush()
--            self.output.write(self.newline)
--            self.output.write(' ' * self.indentation)
--            self.output_width = self.indentation
--            self.buffer_width = 0
--        else:
--            self.buffer.append(Breakable(sep, width, self))
--            self.buffer_width += width
--            self._break_outer_groups()
--
--
--    def begin_group(self, indent=0, open=''):
--        """
--        Begin a group.  If you want support for python < 2.5 which doesn't has
--        the with statement this is the preferred way:
--
--            p.begin_group(1, '{')
--            ...
--            p.end_group(1, '}')
--
--        The python 2.5 expression would be this:
--
--            with p.group(1, '{', '}'):
--                ...
--
--        The first parameter specifies the indentation for the next line (usually
--        the width of the opening text), the second the opening text.  All
--        parameters are optional.
--        """
--        if open:
--            self.text(open)
--        group = Group(self.group_stack[-1].depth + 1)
--        self.group_stack.append(group)
--        self.group_queue.enq(group)
--        self.indentation += indent
--
--    def end_group(self, dedent=0, close=''):
--        """End a group. See `begin_group` for more details."""
--        self.indentation -= dedent
--        group = self.group_stack.pop()
--        if not group.breakables:
--            self.group_queue.remove(group)
--        if close:
--            self.text(close)
--
--    def flush(self):
--        """Flush data that is left in the buffer."""
--        for data in self.buffer:
--            self.output_width += data.output(self.output, self.output_width)
--        self.buffer.clear()
--        self.buffer_width = 0
--
--
--def _get_mro(obj_class):
--    """ Get a reasonable method resolution order of a class and its superclasses
--    for both old-style and new-style classes.
--    """
--    if not hasattr(obj_class, '__mro__'):
--        # Old-style class. Mix in object to make a fake new-style class.
--        try:
--            obj_class = type(obj_class.__name__, (obj_class, object), {})
--        except TypeError:
--            # Old-style extension type that does not descend from object.
--            # FIXME: try to construct a more thorough MRO.
--            mro = [obj_class]
--        else:
--            mro = obj_class.__mro__[1:-1]
--    else:
--        mro = obj_class.__mro__
--    return mro
--
--
--class RepresentationPrinter(PrettyPrinter):
--    """
--    Special pretty printer that has a `pretty` method that calls the pretty
--    printer for a python object.
--
--    This class stores processing data on `self` so you must *never* use
--    this class in a threaded environment.  Always lock it or reinstanciate
--    it.
--
--    Instances also have a verbose flag callbacks can access to control their
--    output.  For example the default instance repr prints all attributes and
--    methods that are not prefixed by an underscore if the printer is in
--    verbose mode.
--    """
--
--    def __init__(self, output, verbose=False, max_width=79, newline='\n'):
--        PrettyPrinter.__init__(self, output, max_width, newline)
--        self.verbose = verbose
--        self.stack = []
--
--    def pretty(self, obj):
--        """Pretty print the given object."""
--        obj_id = id(obj)
--        cycle = obj_id in self.stack
--        self.stack.append(obj_id)
--        self.begin_group()
--        try:
--            obj_class = getattr(obj, '__class__', None) or type(obj)
--            if hasattr(obj_class, '__pretty__'):
--                return obj_class.__pretty__(obj, self, cycle)
--            try:
--                printer = _singleton_pprinters[obj_id]
--            except (TypeError, KeyError):
--                pass
--            else:
--                return printer(obj, self, cycle)
--            for cls in _get_mro(obj_class):
--                if cls in _type_pprinters:
--                    return _type_pprinters[cls](obj, self, cycle)
--                else:
--                    printer = self._in_deferred_types(cls)
--                    if printer is not None:
--                        return printer(obj, self, cycle)
--            return _default_pprint(obj, self, cycle)
--        finally:
--            self.end_group()
--            self.stack.pop()
--
--    def _in_deferred_types(self, cls):
--        """
--        Check if the given class is specified in the deferred type registry.
--
--        Returns the printer from the registry if it exists, and None if the
--        class is not in the registry. Successful matches will be moved to the
--        regular type registry for future use.
--        """
--        mod = getattr(cls, '__module__', None)
--        name = getattr(cls, '__name__', None)
--        key = (mod, name)
--        printer = None
--        if key in _deferred_type_pprinters:
--            # Move the printer over to the regular registry.
--            printer = _deferred_type_pprinters.pop(key)
--            _type_pprinters[cls] = printer
--        return printer
--
--
--
--class Printable(object):
--
--    def output(self, stream, output_width):
--        return output_width
--
--
--class Text(Printable):
--
--    def __init__(self):
--        self.objs = []
--        self.width = 0
--
--    def output(self, stream, output_width):
--        for obj in self.objs:
--            stream.write(obj)
--        return output_width + self.width
--
--    def add(self, obj, width):
--        self.objs.append(obj)
--        self.width += width
--
--
--class Breakable(Printable):
--
--    def __init__(self, seq, width, pretty):
--        self.obj = seq
--        self.width = width
--        self.pretty = pretty
--        self.indentation = pretty.indentation
--        self.group = pretty.group_stack[-1]
--        self.group.breakables.append(self)
--
--    def output(self, stream, output_width):
--        self.group.breakables.popleft()
--        if self.group.want_break:
--            stream.write(self.pretty.newline)
--            stream.write(' ' * self.indentation)
--            return self.indentation
--        if not self.group.breakables:
--            self.pretty.group_queue.remove(self.group)
--        stream.write(self.obj)
--        return output_width + self.width
--
--
--class Group(Printable):
--
--    def __init__(self, depth):
--        self.depth = depth
--        self.breakables = deque()
--        self.want_break = False
--
--
--class GroupQueue(object):
--
--    def __init__(self, *groups):
--        self.queue = []
--        for group in groups:
--            self.enq(group)
--
--    def enq(self, group):
--        depth = group.depth
--        while depth > len(self.queue) - 1:
--            self.queue.append([])
--        self.queue[depth].append(group)
--
--    def deq(self):
--        for stack in self.queue:
--            for idx, group in enumerate(reversed(stack)):
--                if group.breakables:
--                    del stack[idx]
--                    group.want_break = True
--                    return group
--            for group in stack:
--                group.want_break = True
--            del stack[:]
--
--    def remove(self, group):
--        try:
--            self.queue[group.depth].remove(group)
--        except ValueError:
--            pass
--
--
--_baseclass_reprs = (object.__repr__, types.InstanceType.__repr__)
--
--
--def _default_pprint(obj, p, cycle):
--    """
--    The default print function.  Used if an object does not provide one and
--    it's none of the builtin objects.
--    """
--    klass = getattr(obj, '__class__', None) or type(obj)
--    if getattr(klass, '__repr__', None) not in _baseclass_reprs:
--        # A user-provided repr.
--        p.text(repr(obj))
--        return
--    p.begin_group(1, '<')
--    p.pretty(klass)
--    p.text(' at 0x%x' % id(obj))
--    if cycle:
--        p.text(' ...')
--    elif p.verbose:
--        first = True
--        for key in dir(obj):
--            if not key.startswith('_'):
--                try:
--                    value = getattr(obj, key)
--                except AttributeError:
--                    continue
--                if isinstance(value, types.MethodType):
--                    continue
--                if not first:
--                    p.text(',')
--                p.breakable()
--                p.text(key)
--                p.text('=')
--                step = len(key) + 1
--                p.indentation += step
--                p.pretty(value)
--                p.indentation -= step
--                first = False
--    p.end_group(1, '>')
--
--
--def _seq_pprinter_factory(start, end):
--    """
--    Factory that returns a pprint function useful for sequences.  Used by
--    the default pprint for tuples, dicts, lists, sets and frozensets.
--    """
--    def inner(obj, p, cycle):
--        if cycle:
--            return p.text(start + '...' + end)
--        step = len(start)
--        p.begin_group(step, start)
--        for idx, x in enumerate(obj):
--            if idx:
--                p.text(',')
--                p.breakable()
--            p.pretty(x)
--        if len(obj) == 1 and type(obj) is tuple:
--            # Special case for 1-item tuples.
--            p.text(',')
--        p.end_group(step, end)
--    return inner
--
--
--def _dict_pprinter_factory(start, end):
--    """
--    Factory that returns a pprint function used by the default pprint of
--    dicts and dict proxies.
--    """
--    def inner(obj, p, cycle):
--        if cycle:
--            return p.text('{...}')
--        p.begin_group(1, start)
--        keys = obj.keys()
--        try:
--            keys.sort()
--        except Exception, e:
--            # Sometimes the keys don't sort.
--            pass
--        for idx, key in enumerate(keys):
--            if idx:
--                p.text(',')
--                p.breakable()
--            p.pretty(key)
--            p.text(': ')
--            p.pretty(obj[key])
--        p.end_group(1, end)
--    return inner
--
--
--def _super_pprint(obj, p, cycle):
--    """The pprint for the super type."""
--    p.begin_group(8, '<super: ')
--    p.pretty(obj.__self_class__)
--    p.text(',')
--    p.breakable()
--    p.pretty(obj.__self__)
--    p.end_group(8, '>')
--
--
--def _re_pattern_pprint(obj, p, cycle):
--    """The pprint function for regular expression patterns."""
--    p.text('re.compile(')
--    pattern = repr(obj.pattern)
--    if pattern[:1] in 'uU':
--        pattern = pattern[1:]
--        prefix = 'ur'
--    else:
--        prefix = 'r'
--    pattern = prefix + pattern.replace('\\\\', '\\')
--    p.text(pattern)
--    if obj.flags:
--        p.text(',')
--        p.breakable()
--        done_one = False
--        for flag in ('TEMPLATE', 'IGNORECASE', 'LOCALE', 'MULTILINE', 'DOTALL',
--            'UNICODE', 'VERBOSE', 'DEBUG'):
--            if obj.flags & getattr(re, flag):
--                if done_one:
--                    p.text('|')
--                p.text('re.' + flag)
--                done_one = True
--    p.text(')')
--
--
--def _type_pprint(obj, p, cycle):
--    """The pprint for classes and types."""
--    if obj.__module__ in ('__builtin__', 'exceptions'):
--        name = obj.__name__
--    else:
--        name = obj.__module__ + '.' + obj.__name__
--    p.text(name)
--
--
--def _repr_pprint(obj, p, cycle):
--    """A pprint that just redirects to the normal repr function."""
--    p.text(repr(obj))
--
--
--def _function_pprint(obj, p, cycle):
--    """Base pprint for all functions and builtin functions."""
--    if obj.__module__ in ('__builtin__', 'exceptions') or not obj.__module__:
--        name = obj.__name__
--    else:
--        name = obj.__module__ + '.' + obj.__name__
--    p.text('<function %s>' % name)
--
--
--def _exception_pprint(obj, p, cycle):
--    """Base pprint for all exceptions."""
--    if obj.__class__.__module__ == 'exceptions':
--        name = obj.__class__.__name__
--    else:
--        name = '%s.%s' % (
--            obj.__class__.__module__,
--            obj.__class__.__name__
--        )
--    step = len(name) + 1
--    p.begin_group(step, '(')
--    for idx, arg in enumerate(getattr(obj, 'args', ())):
--        if idx:
--            p.text(',')
--            p.breakable()
--        p.pretty(arg)
--    p.end_group(step, ')')
--
--
--#: the exception base
--try:
--    _exception_base = BaseException
--except NameError:
--    _exception_base = Exception
--
--
--#: printers for builtin types
--_type_pprinters = {
--    int:                        _repr_pprint,
--    long:                       _repr_pprint,
--    float:                      _repr_pprint,
--    str:                        _repr_pprint,
--    unicode:                    _repr_pprint,
--    tuple:                      _seq_pprinter_factory('(', ')'),
--    list:                       _seq_pprinter_factory('[', ']'),
--    dict:                       _dict_pprinter_factory('{', '}'),
--    types.DictProxyType:        _dict_pprinter_factory('<dictproxy {', '}>'),
--    set:                        _seq_pprinter_factory('set([', '])'),
--    frozenset:                  _seq_pprinter_factory('frozenset([', '])'),
--    super:                      _super_pprint,
--    _re_pattern_type:           _re_pattern_pprint,
--    type:                       _type_pprint,
--    types.ClassType:            _type_pprint,
--    types.FunctionType:         _function_pprint,
--    types.BuiltinFunctionType:  _function_pprint,
--    types.SliceType:            _repr_pprint,
--    types.MethodType:           _repr_pprint,
--    xrange:                     _repr_pprint,
--    datetime.datetime:          _repr_pprint,
--    datetime.timedelta:         _repr_pprint,
--    _exception_base:            _exception_pprint
--}
--
--#: printers for types specified by name
--_deferred_type_pprinters = {
--}
--
--def for_type(typ, func):
--    """
--    Add a pretty printer for a given type.
--    """
--    oldfunc = _type_pprinters.get(typ, None)
--    if func is not None:
--        # To support easy restoration of old pprinters, we need to ignore Nones.
--        _type_pprinters[typ] = func
--    return oldfunc
--
--def for_type_by_name(type_module, type_name, func):
--    """
--    Add a pretty printer for a type specified by the module and name of a type
--    rather than the type object itself.
--    """
--    key = (type_module, type_name)
--    oldfunc = _deferred_type_pprinters.get(key, None)
--    if func is not None:
--        # To support easy restoration of old pprinters, we need to ignore Nones.
--        _deferred_type_pprinters[key] = func
--    return oldfunc
--
--
--#: printers for the default singletons
--_singleton_pprinters = dict.fromkeys(map(id, [None, True, False, Ellipsis,
--                                      NotImplemented]), _repr_pprint)
--
--
--if __name__ == '__main__':
--    from random import randrange
--    class Foo(object):
--        def __init__(self):
--            self.foo = 1
--            self.bar = re.compile(r'\s+')
--            self.blub = dict.fromkeys(range(30), randrange(1, 40))
--            self.hehe = 23424.234234
--            self.list = ["blub", "blah", self]
--
--        def get_foo(self):
--            print "foo"
--
--    pprint(Foo(), verbose=True)
 Index: ipython-0.10/IPython/external/simplegeneric/__init__.py
 ===================================================================
 --- /dev/null
@@ -14287,294 +77,6 @@ Index: ipython-0.10/IPython/external/simplegeneric/__init__.py
 +    from simplegeneric import *
 +except ImportError:
 +    from _simplegeneric import *
-Index: ipython-0.10/IPython/external/simplegeneric.py
-===================================================================
---- ipython-0.10.orig/IPython/external/simplegeneric.py
-+++ /dev/null
-@@ -1,139 +0,0 @@
--#Name: simplegeneric
--#Version: 0.6
--#Summary: Simple generic functions (similar to Python's own len(), pickle.dump(), etc.)
--#Home-page: http://cheeseshop.python.org/pypi/simplegeneric
--#Author: Phillip J. Eby
--#Author-email: peak at eby-sarna.com
--#License: PSF or ZPL
--
--# This is version 0.6 of Philip J. Eby's simplegeneric module
--# (http://cheeseshop.python.org/pypi/simplegeneric) patched to work
--# with Python 2.3 (which doesn't support assigning to __name__)
--
--__all__ = ["generic"]
--
--
--from types import ClassType, InstanceType
--classtypes = type, ClassType
--
--def generic(func):
--    """Create a simple generic function"""
--
--    _sentinel = object()
--
--    def _by_class(*args, **kw):
--        cls = args[0].__class__
--        for t in type(cls.__name__, (cls,object), {}).__mro__:
--            f = _gbt(t, _sentinel)
--            if f is not _sentinel:
--                return f(*args, **kw)
--        else:
--            return func(*args, **kw)
--
--    _by_type = {object: func, InstanceType: _by_class}
--    _gbt = _by_type.get
--
--    def when_type(t):
--        """Decorator to add a method that will be called for type `t`"""
--        if not isinstance(t, classtypes):
--            raise TypeError(
--                "%r is not a type or class" % (t,)
--            )
--        def decorate(f):
--            if _by_type.setdefault(t,f) is not f:
--                raise TypeError(
--                    "%r already has method for type %r" % (func, t)
--                )
--            return f
--        return decorate
--
--
--
--
--
--
--    _by_object = {}
--    _gbo = _by_object.get
--
--    def when_object(o):
--        """Decorator to add a method that will be called for object `o`"""
--        def decorate(f):
--            if _by_object.setdefault(id(o), (o,f))[1] is not f:
--                raise TypeError(
--                    "%r already has method for object %r" % (func, o)
--                )
--            return f
--        return decorate
--
--
--    def dispatch(*args, **kw):
--        f = _gbo(id(args[0]), _sentinel)
--        if f is _sentinel:
--            for t in type(args[0]).__mro__:
--                f = _gbt(t, _sentinel)
--                if f is not _sentinel:
--                    return f(*args, **kw)
--            else:
--                return func(*args, **kw)
--        else:
--            return f[1](*args, **kw)
--
--    try:
--        dispatch.__name__       = func.__name__
--    except TypeError:
--        pass
--    dispatch.__dict__       = func.__dict__.copy()
--    dispatch.__doc__        = func.__doc__
--    dispatch.__module__     = func.__module__
--
--    dispatch.when_type = when_type
--    dispatch.when_object = when_object
--    dispatch.default = func
--    dispatch.has_object = lambda o: id(o) in _by_object
--    dispatch.has_type   = lambda t: t in _by_type
--    return dispatch
--
--
--
--
--def test_suite():
--    import doctest
--    return doctest.DocFileSuite(
--        'README.txt',
--        optionflags=doctest.ELLIPSIS|doctest.REPORT_ONLY_FIRST_FAILURE,
--    )
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
--
-Index: ipython-0.10/IPython/external/simplegeneric/_simplegeneric.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/simplegeneric/_simplegeneric.py
-@@ -0,0 +1,139 @@
-+#Name: simplegeneric
-+#Version: 0.6
-+#Summary: Simple generic functions (similar to Python's own len(), pickle.dump(), etc.)
-+#Home-page: http://cheeseshop.python.org/pypi/simplegeneric
-+#Author: Phillip J. Eby
-+#Author-email: peak at eby-sarna.com
-+#License: PSF or ZPL
-+
-+# This is version 0.6 of Philip J. Eby's simplegeneric module
-+# (http://cheeseshop.python.org/pypi/simplegeneric) patched to work
-+# with Python 2.3 (which doesn't support assigning to __name__)
-+
-+__all__ = ["generic"]
-+
-+
-+from types import ClassType, InstanceType
-+classtypes = type, ClassType
-+
-+def generic(func):
-+    """Create a simple generic function"""
-+
-+    _sentinel = object()
-+
-+    def _by_class(*args, **kw):
-+        cls = args[0].__class__
-+        for t in type(cls.__name__, (cls,object), {}).__mro__:
-+            f = _gbt(t, _sentinel)
-+            if f is not _sentinel:
-+                return f(*args, **kw)
-+        else:
-+            return func(*args, **kw)
-+
-+    _by_type = {object: func, InstanceType: _by_class}
-+    _gbt = _by_type.get
-+
-+    def when_type(t):
-+        """Decorator to add a method that will be called for type `t`"""
-+        if not isinstance(t, classtypes):
-+            raise TypeError(
-+                "%r is not a type or class" % (t,)
-+            )
-+        def decorate(f):
-+            if _by_type.setdefault(t,f) is not f:
-+                raise TypeError(
-+                    "%r already has method for type %r" % (func, t)
-+                )
-+            return f
-+        return decorate
-+
-+
-+
-+
-+
-+
-+    _by_object = {}
-+    _gbo = _by_object.get
-+
-+    def when_object(o):
-+        """Decorator to add a method that will be called for object `o`"""
-+        def decorate(f):
-+            if _by_object.setdefault(id(o), (o,f))[1] is not f:
-+                raise TypeError(
-+                    "%r already has method for object %r" % (func, o)
-+                )
-+            return f
-+        return decorate
-+
-+
-+    def dispatch(*args, **kw):
-+        f = _gbo(id(args[0]), _sentinel)
-+        if f is _sentinel:
-+            for t in type(args[0]).__mro__:
-+                f = _gbt(t, _sentinel)
-+                if f is not _sentinel:
-+                    return f(*args, **kw)
-+            else:
-+                return func(*args, **kw)
-+        else:
-+            return f[1](*args, **kw)
-+
-+    try:
-+        dispatch.__name__       = func.__name__
-+    except TypeError:
-+        pass
-+    dispatch.__dict__       = func.__dict__.copy()
-+    dispatch.__doc__        = func.__doc__
-+    dispatch.__module__     = func.__module__
-+
-+    dispatch.when_type = when_type
-+    dispatch.when_object = when_object
-+    dispatch.default = func
-+    dispatch.has_object = lambda o: id(o) in _by_object
-+    dispatch.has_type   = lambda t: t in _by_type
-+    return dispatch
-+
-+
-+
-+
-+def test_suite():
-+    import doctest
-+    return doctest.DocFileSuite(
-+        'README.txt',
-+        optionflags=doctest.ELLIPSIS|doctest.REPORT_ONLY_FIRST_FAILURE,
-+    )
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
-+
 Index: ipython-0.10/IPython/external/validate/__init__.py
 ===================================================================
 --- /dev/null
@@ -14588,2844 +90,6 @@ Index: ipython-0.10/IPython/external/validate/__init__.py
 +    from validate import *
 +except ImportError:
 +    from _validate import *
-Index: ipython-0.10/IPython/external/validate.py
-===================================================================
---- ipython-0.10.orig/IPython/external/validate.py
-+++ /dev/null
-@@ -1,1414 +0,0 @@
--# validate.py
--# A Validator object
--# Copyright (C) 2005 Michael Foord, Mark Andrews, Nicola Larosa
--# E-mail: fuzzyman AT voidspace DOT org DOT uk
--#         mark AT la-la DOT com
--#         nico AT tekNico DOT net
--
--# This software is licensed under the terms of the BSD license.
--# http://www.voidspace.org.uk/python/license.shtml
--# Basically you're free to copy, modify, distribute and relicense it,
--# So long as you keep a copy of the license with it.
--
--# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
--# For information about bugfixes, updates and support, please join the
--# ConfigObj mailing list:
--# http://lists.sourceforge.net/lists/listinfo/configobj-develop
--# Comments, suggestions and bug reports welcome.
--
--"""
--    The Validator object is used to check that supplied values 
--    conform to a specification.
--    
--    The value can be supplied as a string - e.g. from a config file.
--    In this case the check will also *convert* the value to
--    the required type. This allows you to add validation
--    as a transparent layer to access data stored as strings.
--    The validation checks that the data is correct *and*
--    converts it to the expected type.
--    
--    Some standard checks are provided for basic data types.
--    Additional checks are easy to write. They can be
--    provided when the ``Validator`` is instantiated or
--    added afterwards.
--    
--    The standard functions work with the following basic data types :
--    
--    * integers
--    * floats
--    * booleans
--    * strings
--    * ip_addr
--    
--    plus lists of these datatypes
--    
--    Adding additional checks is done through coding simple functions.
--    
--    The full set of standard checks are : 
--    
--    * 'integer': matches integer values (including negative)
--                 Takes optional 'min' and 'max' arguments : ::
--    
--                   integer()
--                   integer(3, 9)  # any value from 3 to 9
--                   integer(min=0) # any positive value
--                   integer(max=9)
--    
--    * 'float': matches float values
--               Has the same parameters as the integer check.
--    
--    * 'boolean': matches boolean values - ``True`` or ``False``
--                 Acceptable string values for True are :
--                   true, on, yes, 1
--                 Acceptable string values for False are :
--                   false, off, no, 0
--    
--                 Any other value raises an error.
--    
--    * 'ip_addr': matches an Internet Protocol address, v.4, represented
--                 by a dotted-quad string, i.e. '1.2.3.4'.
--    
--    * 'string': matches any string.
--                Takes optional keyword args 'min' and 'max'
--                to specify min and max lengths of the string.
--    
--    * 'list': matches any list.
--              Takes optional keyword args 'min', and 'max' to specify min and
--              max sizes of the list. (Always returns a list.)
--    
--    * 'tuple': matches any tuple.
--              Takes optional keyword args 'min', and 'max' to specify min and
--              max sizes of the tuple. (Always returns a tuple.)
--    
--    * 'int_list': Matches a list of integers.
--                  Takes the same arguments as list.
--    
--    * 'float_list': Matches a list of floats.
--                    Takes the same arguments as list.
--    
--    * 'bool_list': Matches a list of boolean values.
--                   Takes the same arguments as list.
--    
--    * 'ip_addr_list': Matches a list of IP addresses.
--                     Takes the same arguments as list.
--    
--    * 'string_list': Matches a list of strings.
--                     Takes the same arguments as list.
--    
--    * 'mixed_list': Matches a list with different types in 
--                    specific positions. List size must match
--                    the number of arguments.
--    
--                    Each position can be one of :
--                    'integer', 'float', 'ip_addr', 'string', 'boolean'
--    
--                    So to specify a list with two strings followed
--                    by two integers, you write the check as : ::
--    
--                      mixed_list('string', 'string', 'integer', 'integer')
--    
--    * 'pass': This check matches everything ! It never fails
--              and the value is unchanged.
--    
--              It is also the default if no check is specified.
--    
--    * 'option': This check matches any from a list of options.
--                You specify this check with : ::
--    
--                  option('option 1', 'option 2', 'option 3')
--    
--    You can supply a default value (returned if no value is supplied)
--    using the default keyword argument.
--    
--    You specify a list argument for default using a list constructor syntax in
--    the check : ::
--    
--        checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3'))
--    
--    A badly formatted set of arguments will raise a ``VdtParamError``.
--"""
--
--__docformat__ = "restructuredtext en"
--
--__version__ = '0.3.2'
--
--__revision__ = '$Id: validate.py 123 2005-09-08 08:54:28Z fuzzyman $'
--
--__all__ = (
--    '__version__',
--    'dottedQuadToNum',
--    'numToDottedQuad',
--    'ValidateError',
--    'VdtUnknownCheckError',
--    'VdtParamError',
--    'VdtTypeError',
--    'VdtValueError',
--    'VdtValueTooSmallError',
--    'VdtValueTooBigError',
--    'VdtValueTooShortError',
--    'VdtValueTooLongError',
--    'VdtMissingValue',
--    'Validator',
--    'is_integer',
--    'is_float',
--    'is_boolean',
--    'is_list',
--    'is_tuple',
--    'is_ip_addr',
--    'is_string',
--    'is_int_list',
--    'is_bool_list',
--    'is_float_list',
--    'is_string_list',
--    'is_ip_addr_list',
--    'is_mixed_list',
--    'is_option',
--    '__docformat__',
--)
--
--
--import sys
--INTP_VER = sys.version_info[:2]
--if INTP_VER < (2, 2):
--    raise RuntimeError("Python v.2.2 or later needed")
--
--import re
--StringTypes = (str, unicode)
--
--
--_list_arg = re.compile(r'''
--    (?:
--        ([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\(
--            (
--                (?:
--                    \s*
--                    (?:
--                        (?:".*?")|              # double quotes
--                        (?:'.*?')|              # single quotes
--                        (?:[^'",\s\)][^,\)]*?)  # unquoted
--                    )
--                    \s*,\s*
--                )*
--                (?:
--                    (?:".*?")|              # double quotes
--                    (?:'.*?')|              # single quotes
--                    (?:[^'",\s\)][^,\)]*?)  # unquoted
--                )?                          # last one
--            )
--        \)
--    )
--''', re.VERBOSE)    # two groups
--
--_list_members = re.compile(r'''
--    (
--        (?:".*?")|              # double quotes
--        (?:'.*?')|              # single quotes
--        (?:[^'",\s=][^,=]*?)       # unquoted
--    )
--    (?:
--    (?:\s*,\s*)|(?:\s*$)            # comma
--    )
--''', re.VERBOSE)    # one group
--
--_paramstring = r'''
--    (?:
--        (
--            (?:
--                [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\(
--                    (?:
--                        \s*
--                        (?:
--                            (?:".*?")|              # double quotes
--                            (?:'.*?')|              # single quotes
--                            (?:[^'",\s\)][^,\)]*?)       # unquoted
--                        )
--                        \s*,\s*
--                    )*
--                    (?:
--                        (?:".*?")|              # double quotes
--                        (?:'.*?')|              # single quotes
--                        (?:[^'",\s\)][^,\)]*?)       # unquoted
--                    )?                              # last one
--                \)
--            )|
--            (?:
--                (?:".*?")|              # double quotes
--                (?:'.*?')|              # single quotes
--                (?:[^'",\s=][^,=]*?)|       # unquoted
--                (?:                         # keyword argument
--                    [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*
--                    (?:
--                        (?:".*?")|              # double quotes
--                        (?:'.*?')|              # single quotes
--                        (?:[^'",\s=][^,=]*?)       # unquoted
--                    )
--                )
--            )
--        )
--        (?:
--            (?:\s*,\s*)|(?:\s*$)            # comma
--        )
--    )
--    '''
--
--_matchstring = '^%s*' % _paramstring
--
--# Python pre 2.2.1 doesn't have bool
--try:
--    bool
--except NameError:
--    def bool(val):
--        """Simple boolean equivalent function. """
--        if val:
--            return 1
--        else:
--            return 0
--
--
--def dottedQuadToNum(ip):
--    """
--    Convert decimal dotted quad string to long integer
--    
--    >>> dottedQuadToNum('1 ')
--    1L
--    >>> dottedQuadToNum(' 1.2')
--    16777218L
--    >>> dottedQuadToNum(' 1.2.3 ')
--    16908291L
--    >>> dottedQuadToNum('1.2.3.4')
--    16909060L
--    >>> dottedQuadToNum('1.2.3. 4')
--    Traceback (most recent call last):
--    ValueError: Not a good dotted-quad IP: 1.2.3. 4
--    >>> dottedQuadToNum('255.255.255.255')
--    4294967295L
--    >>> dottedQuadToNum('255.255.255.256')
--    Traceback (most recent call last):
--    ValueError: Not a good dotted-quad IP: 255.255.255.256
--    """
--    
--    # import here to avoid it when ip_addr values are not used
--    import socket, struct
--    
--    try:
--        return struct.unpack('!L',
--            socket.inet_aton(ip.strip()))[0]
--    except socket.error:
--        # bug in inet_aton, corrected in Python 2.3
--        if ip.strip() == '255.255.255.255':
--            return 0xFFFFFFFFL
--        else:
--            raise ValueError('Not a good dotted-quad IP: %s' % ip)
--    return
--
--
--def numToDottedQuad(num):
--    """
--    Convert long int to dotted quad string
--    
--    >>> numToDottedQuad(-1L)
--    Traceback (most recent call last):
--    ValueError: Not a good numeric IP: -1
--    >>> numToDottedQuad(1L)
--    '0.0.0.1'
--    >>> numToDottedQuad(16777218L)
--    '1.0.0.2'
--    >>> numToDottedQuad(16908291L)
--    '1.2.0.3'
--    >>> numToDottedQuad(16909060L)
--    '1.2.3.4'
--    >>> numToDottedQuad(4294967295L)
--    '255.255.255.255'
--    >>> numToDottedQuad(4294967296L)
--    Traceback (most recent call last):
--    ValueError: Not a good numeric IP: 4294967296
--    """
--    
--    # import here to avoid it when ip_addr values are not used
--    import socket, struct
--    
--    # no need to intercept here, 4294967295L is fine
--    try:
--        return socket.inet_ntoa(
--            struct.pack('!L', long(num)))
--    except (socket.error, struct.error, OverflowError):
--        raise ValueError('Not a good numeric IP: %s' % num)
--
--
--class ValidateError(Exception):
--    """
--    This error indicates that the check failed.
--    It can be the base class for more specific errors.
--    
--    Any check function that fails ought to raise this error.
--    (or a subclass)
--    
--    >>> raise ValidateError
--    Traceback (most recent call last):
--    ValidateError
--    """
--
--
--class VdtMissingValue(ValidateError):
--    """No value was supplied to a check that needed one."""
--
--
--class VdtUnknownCheckError(ValidateError):
--    """An unknown check function was requested"""
--
--    def __init__(self, value):
--        """
--        >>> raise VdtUnknownCheckError('yoda')
--        Traceback (most recent call last):
--        VdtUnknownCheckError: the check "yoda" is unknown.
--        """
--        ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,))
--
--
--class VdtParamError(SyntaxError):
--    """An incorrect parameter was passed"""
--
--    def __init__(self, name, value):
--        """
--        >>> raise VdtParamError('yoda', 'jedi')
--        Traceback (most recent call last):
--        VdtParamError: passed an incorrect value "jedi" for parameter "yoda".
--        """
--        SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name))
--
--
--class VdtTypeError(ValidateError):
--    """The value supplied was of the wrong type"""
--
--    def __init__(self, value):
--        """
--        >>> raise VdtTypeError('jedi')
--        Traceback (most recent call last):
--        VdtTypeError: the value "jedi" is of the wrong type.
--        """
--        ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,))
--
--
--class VdtValueError(ValidateError):
--    """The value supplied was of the correct type, but was not an allowed value."""
--    
--    def __init__(self, value):
--        """
--        >>> raise VdtValueError('jedi')
--        Traceback (most recent call last):
--        VdtValueError: the value "jedi" is unacceptable.
--        """
--        ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,))
--
--
--class VdtValueTooSmallError(VdtValueError):
--    """The value supplied was of the correct type, but was too small."""
--
--    def __init__(self, value):
--        """
--        >>> raise VdtValueTooSmallError('0')
--        Traceback (most recent call last):
--        VdtValueTooSmallError: the value "0" is too small.
--        """
--        ValidateError.__init__(self, 'the value "%s" is too small.' % (value,))
--
--
--class VdtValueTooBigError(VdtValueError):
--    """The value supplied was of the correct type, but was too big."""
--
--    def __init__(self, value):
--        """
--        >>> raise VdtValueTooBigError('1')
--        Traceback (most recent call last):
--        VdtValueTooBigError: the value "1" is too big.
--        """
--        ValidateError.__init__(self, 'the value "%s" is too big.' % (value,))
--
--
--class VdtValueTooShortError(VdtValueError):
--    """The value supplied was of the correct type, but was too short."""
--
--    def __init__(self, value):
--        """
--        >>> raise VdtValueTooShortError('jed')
--        Traceback (most recent call last):
--        VdtValueTooShortError: the value "jed" is too short.
--        """
--        ValidateError.__init__(
--            self,
--            'the value "%s" is too short.' % (value,))
--
--
--class VdtValueTooLongError(VdtValueError):
--    """The value supplied was of the correct type, but was too long."""
--
--    def __init__(self, value):
--        """
--        >>> raise VdtValueTooLongError('jedie')
--        Traceback (most recent call last):
--        VdtValueTooLongError: the value "jedie" is too long.
--        """
--        ValidateError.__init__(self, 'the value "%s" is too long.' % (value,))
--
--
--class Validator(object):
--    """
--    Validator is an object that allows you to register a set of 'checks'.
--    These checks take input and test that it conforms to the check.
--    
--    This can also involve converting the value from a string into
--    the correct datatype.
--    
--    The ``check`` method takes an input string which configures which
--    check is to be used and applies that check to a supplied value.
--    
--    An example input string would be:
--    'int_range(param1, param2)'
--    
--    You would then provide something like:
--    
--    >>> def int_range_check(value, min, max):
--    ...     # turn min and max from strings to integers
--    ...     min = int(min)
--    ...     max = int(max)
--    ...     # check that value is of the correct type.
--    ...     # possible valid inputs are integers or strings
--    ...     # that represent integers
--    ...     if not isinstance(value, (int, long, StringTypes)):
--    ...         raise VdtTypeError(value)
--    ...     elif isinstance(value, StringTypes):
--    ...         # if we are given a string
--    ...         # attempt to convert to an integer
--    ...         try:
--    ...             value = int(value)
--    ...         except ValueError:
--    ...             raise VdtValueError(value)
--    ...     # check the value is between our constraints
--    ...     if not min <= value:
--    ...          raise VdtValueTooSmallError(value)
--    ...     if not value <= max:
--    ...          raise VdtValueTooBigError(value)
--    ...     return value
--    
--    >>> fdict = {'int_range': int_range_check}
--    >>> vtr1 = Validator(fdict)
--    >>> vtr1.check('int_range(20, 40)', '30')
--    30
--    >>> vtr1.check('int_range(20, 40)', '60')
--    Traceback (most recent call last):
--    VdtValueTooBigError: the value "60" is too big.
--    
--    New functions can be added with : ::
--    
--    >>> vtr2 = Validator()       
--    >>> vtr2.functions['int_range'] = int_range_check
--    
--    Or by passing in a dictionary of functions when Validator 
--    is instantiated.
--    
--    Your functions *can* use keyword arguments,
--    but the first argument should always be 'value'.
--    
--    If the function doesn't take additional arguments,
--    the parentheses are optional in the check.
--    It can be written with either of : ::
--    
--        keyword = function_name
--        keyword = function_name()
--    
--    The first program to utilise Validator() was Michael Foord's
--    ConfigObj, an alternative to ConfigParser which supports lists and
--    can validate a config file using a config schema.
--    For more details on using Validator with ConfigObj see:
--    http://www.voidspace.org.uk/python/configobj.html
--    """
--
--    # this regex does the initial parsing of the checks
--    _func_re = re.compile(r'(.+?)\((.*)\)')
--
--    # this regex takes apart keyword arguments
--    _key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$')
--
--
--    # this regex finds keyword=list(....) type values
--    _list_arg = _list_arg
--
--    # this regex takes individual values out of lists - in one pass
--    _list_members = _list_members
--
--    # These regexes check a set of arguments for validity
--    # and then pull the members out
--    _paramfinder = re.compile(_paramstring, re.VERBOSE)
--    _matchfinder = re.compile(_matchstring, re.VERBOSE)
--
--
--    def __init__(self, functions=None):
--        """
--        >>> vtri = Validator()
--        """
--        self.functions = {
--            '': self._pass,
--            'integer': is_integer,
--            'float': is_float,
--            'boolean': is_boolean,
--            'ip_addr': is_ip_addr,
--            'string': is_string,
--            'list': is_list,
--            'tuple': is_tuple,
--            'int_list': is_int_list,
--            'float_list': is_float_list,
--            'bool_list': is_bool_list,
--            'ip_addr_list': is_ip_addr_list,
--            'string_list': is_string_list,
--            'mixed_list': is_mixed_list,
--            'pass': self._pass,
--            'option': is_option,
--        }
--        if functions is not None:
--            self.functions.update(functions)
--        # tekNico: for use by ConfigObj
--        self.baseErrorClass = ValidateError
--        self._cache = {}
--
--
--    def check(self, check, value, missing=False):
--        """
--        Usage: check(check, value)
--        
--        Arguments:
--            check: string representing check to apply (including arguments)
--            value: object to be checked
--        Returns value, converted to correct type if necessary
--        
--        If the check fails, raises a ``ValidateError`` subclass.
--        
--        >>> vtor.check('yoda', '')
--        Traceback (most recent call last):
--        VdtUnknownCheckError: the check "yoda" is unknown.
--        >>> vtor.check('yoda()', '')
--        Traceback (most recent call last):
--        VdtUnknownCheckError: the check "yoda" is unknown.
--        
--        >>> vtor.check('string(default="")', '', missing=True)
--        ''
--        """
--        fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
--            
--        if missing:
--            if default is None:
--                # no information needed here - to be handled by caller
--                raise VdtMissingValue()
--            value = self._handle_none(default)
--                
--        if value is None:
--            return None
--        
--        return self._check_value(value, fun_name, fun_args, fun_kwargs)
--
--
--    def _handle_none(self, value):
--        if value == 'None':
--            value = None
--        elif value in ("'None'", '"None"'):
--            # Special case a quoted None
--            value = self._unquote(value)
--        return value
--
--
--    def _parse_with_caching(self, check):
--        if check in self._cache:
--            fun_name, fun_args, fun_kwargs, default = self._cache[check]
--            # We call list and dict below to work with *copies* of the data
--            # rather than the original (which are mutable of course)
--            fun_args = list(fun_args)
--            fun_kwargs = dict(fun_kwargs)
--        else:
--            fun_name, fun_args, fun_kwargs, default = self._parse_check(check)
--            fun_kwargs = dict((str(key), value) for (key, value) in fun_kwargs.items())
--            self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default
--        return fun_name, fun_args, fun_kwargs, default
--        
--        
--    def _check_value(self, value, fun_name, fun_args, fun_kwargs):
--        try:
--            fun = self.functions[fun_name]
--        except KeyError:
--            raise VdtUnknownCheckError(fun_name)
--        else:
--            return fun(value, *fun_args, **fun_kwargs)
--
--
--    def _parse_check(self, check):
--        fun_match = self._func_re.match(check)
--        if fun_match:
--            fun_name = fun_match.group(1)
--            arg_string = fun_match.group(2)
--            arg_match = self._matchfinder.match(arg_string)
--            if arg_match is None:
--                # Bad syntax
--                raise VdtParamError('Bad syntax in check "%s".' % check)
--            fun_args = []
--            fun_kwargs = {}
--            # pull out args of group 2
--            for arg in self._paramfinder.findall(arg_string):
--                # args may need whitespace removing (before removing quotes)
--                arg = arg.strip()
--                listmatch = self._list_arg.match(arg)
--                if listmatch:
--                    key, val = self._list_handle(listmatch)
--                    fun_kwargs[key] = val
--                    continue
--                keymatch = self._key_arg.match(arg)
--                if keymatch:
--                    val = keymatch.group(2)
--                    if not val in ("'None'", '"None"'):
--                        # Special case a quoted None
--                        val = self._unquote(val)
--                    fun_kwargs[keymatch.group(1)] = val
--                    continue
--                
--                fun_args.append(self._unquote(arg))
--        else:
--            # allows for function names without (args)
--            return check, (), {}, None
--
--        # Default must be deleted if the value is specified too,
--        # otherwise the check function will get a spurious "default" keyword arg
--        try:
--            default = fun_kwargs.pop('default', None)
--        except AttributeError:
--            # Python 2.2 compatibility
--            default = None
--            try:
--                default = fun_kwargs['default']
--                del fun_kwargs['default']
--            except KeyError:
--                pass
--            
--        return fun_name, fun_args, fun_kwargs, default
--
--
--    def _unquote(self, val):
--        """Unquote a value if necessary."""
--        if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
--            val = val[1:-1]
--        return val
--
--
--    def _list_handle(self, listmatch):
--        """Take apart a ``keyword=list('val, 'val')`` type string."""
--        out = []
--        name = listmatch.group(1)
--        args = listmatch.group(2)
--        for arg in self._list_members.findall(args):
--            out.append(self._unquote(arg))
--        return name, out
--
--
--    def _pass(self, value):
--        """
--        Dummy check that always passes
--        
--        >>> vtor.check('', 0)
--        0
--        >>> vtor.check('', '0')
--        '0'
--        """
--        return value
--    
--    
--    def get_default_value(self, check):
--        """
--        Given a check, return the default value for the check
--        (converted to the right type).
--        
--        If the check doesn't specify a default value then a
--        ``KeyError`` will be raised.
--        """
--        fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
--        if default is None:
--            raise KeyError('Check "%s" has no default value.' % check)
--        value = self._handle_none(default)
--        if value is None:
--            return value
--        return self._check_value(value, fun_name, fun_args, fun_kwargs)
--
--
--def _is_num_param(names, values, to_float=False):
--    """
--    Return numbers from inputs or raise VdtParamError.
--    
--    Lets ``None`` pass through.
--    Pass in keyword argument ``to_float=True`` to
--    use float for the conversion rather than int.
--    
--    >>> _is_num_param(('', ''), (0, 1.0))
--    [0, 1]
--    >>> _is_num_param(('', ''), (0, 1.0), to_float=True)
--    [0.0, 1.0]
--    >>> _is_num_param(('a'), ('a'))
--    Traceback (most recent call last):
--    VdtParamError: passed an incorrect value "a" for parameter "a".
--    """
--    fun = to_float and float or int
--    out_params = []
--    for (name, val) in zip(names, values):
--        if val is None:
--            out_params.append(val)
--        elif isinstance(val, (int, long, float, StringTypes)):
--            try:
--                out_params.append(fun(val))
--            except ValueError, e:
--                raise VdtParamError(name, val)
--        else:
--            raise VdtParamError(name, val)
--    return out_params
--
--
--# built in checks
--# you can override these by setting the appropriate name
--# in Validator.functions
--# note: if the params are specified wrongly in your input string,
--#       you will also raise errors.
--
--def is_integer(value, min=None, max=None):
--    """
--    A check that tests that a given value is an integer (int, or long)
--    and optionally, between bounds. A negative value is accepted, while
--    a float will fail.
--    
--    If the value is a string, then the conversion is done - if possible.
--    Otherwise a VdtError is raised.
--    
--    >>> vtor.check('integer', '-1')
--    -1
--    >>> vtor.check('integer', '0')
--    0
--    >>> vtor.check('integer', 9)
--    9
--    >>> vtor.check('integer', 'a')
--    Traceback (most recent call last):
--    VdtTypeError: the value "a" is of the wrong type.
--    >>> vtor.check('integer', '2.2')
--    Traceback (most recent call last):
--    VdtTypeError: the value "2.2" is of the wrong type.
--    >>> vtor.check('integer(10)', '20')
--    20
--    >>> vtor.check('integer(max=20)', '15')
--    15
--    >>> vtor.check('integer(10)', '9')
--    Traceback (most recent call last):
--    VdtValueTooSmallError: the value "9" is too small.
--    >>> vtor.check('integer(10)', 9)
--    Traceback (most recent call last):
--    VdtValueTooSmallError: the value "9" is too small.
--    >>> vtor.check('integer(max=20)', '35')
--    Traceback (most recent call last):
--    VdtValueTooBigError: the value "35" is too big.
--    >>> vtor.check('integer(max=20)', 35)
--    Traceback (most recent call last):
--    VdtValueTooBigError: the value "35" is too big.
--    >>> vtor.check('integer(0, 9)', False)
--    0
--    """
--    (min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
--    if not isinstance(value, (int, long, StringTypes)):
--        raise VdtTypeError(value)
--    if isinstance(value, StringTypes):
--        # if it's a string - does it represent an integer ?
--        try:
--            value = int(value)
--        except ValueError:
--            raise VdtTypeError(value)
--    if (min_val is not None) and (value < min_val):
--        raise VdtValueTooSmallError(value)
--    if (max_val is not None) and (value > max_val):
--        raise VdtValueTooBigError(value)
--    return value
--
--
--def is_float(value, min=None, max=None):
--    """
--    A check that tests that a given value is a float
--    (an integer will be accepted), and optionally - that it is between bounds.
--    
--    If the value is a string, then the conversion is done - if possible.
--    Otherwise a VdtError is raised.
--    
--    This can accept negative values.
--    
--    >>> vtor.check('float', '2')
--    2.0
--    
--    From now on we multiply the value to avoid comparing decimals
--    
--    >>> vtor.check('float', '-6.8') * 10
--    -68.0
--    >>> vtor.check('float', '12.2') * 10
--    122.0
--    >>> vtor.check('float', 8.4) * 10
--    84.0
--    >>> vtor.check('float', 'a')
--    Traceback (most recent call last):
--    VdtTypeError: the value "a" is of the wrong type.
--    >>> vtor.check('float(10.1)', '10.2') * 10
--    102.0
--    >>> vtor.check('float(max=20.2)', '15.1') * 10
--    151.0
--    >>> vtor.check('float(10.0)', '9.0')
--    Traceback (most recent call last):
--    VdtValueTooSmallError: the value "9.0" is too small.
--    >>> vtor.check('float(max=20.0)', '35.0')
--    Traceback (most recent call last):
--    VdtValueTooBigError: the value "35.0" is too big.
--    """
--    (min_val, max_val) = _is_num_param(
--        ('min', 'max'), (min, max), to_float=True)
--    if not isinstance(value, (int, long, float, StringTypes)):
--        raise VdtTypeError(value)
--    if not isinstance(value, float):
--        # if it's a string - does it represent a float ?
--        try:
--            value = float(value)
--        except ValueError:
--            raise VdtTypeError(value)
--    if (min_val is not None) and (value < min_val):
--        raise VdtValueTooSmallError(value)
--    if (max_val is not None) and (value > max_val):
--        raise VdtValueTooBigError(value)
--    return value
--
--
--bool_dict = {
--    True: True, 'on': True, '1': True, 'true': True, 'yes': True, 
--    False: False, 'off': False, '0': False, 'false': False, 'no': False,
--}
--
--
--def is_boolean(value):
--    """
--    Check if the value represents a boolean.
--    
--    >>> vtor.check('boolean', 0)
--    0
--    >>> vtor.check('boolean', False)
--    0
--    >>> vtor.check('boolean', '0')
--    0
--    >>> vtor.check('boolean', 'off')
--    0
--    >>> vtor.check('boolean', 'false')
--    0
--    >>> vtor.check('boolean', 'no')
--    0
--    >>> vtor.check('boolean', 'nO')
--    0
--    >>> vtor.check('boolean', 'NO')
--    0
--    >>> vtor.check('boolean', 1)
--    1
--    >>> vtor.check('boolean', True)
--    1
--    >>> vtor.check('boolean', '1')
--    1
--    >>> vtor.check('boolean', 'on')
--    1
--    >>> vtor.check('boolean', 'true')
--    1
--    >>> vtor.check('boolean', 'yes')
--    1
--    >>> vtor.check('boolean', 'Yes')
--    1
--    >>> vtor.check('boolean', 'YES')
--    1
--    >>> vtor.check('boolean', '')
--    Traceback (most recent call last):
--    VdtTypeError: the value "" is of the wrong type.
--    >>> vtor.check('boolean', 'up')
--    Traceback (most recent call last):
--    VdtTypeError: the value "up" is of the wrong type.
--    
--    """
--    if isinstance(value, StringTypes):
--        try:
--            return bool_dict[value.lower()]
--        except KeyError:
--            raise VdtTypeError(value)
--    # we do an equality test rather than an identity test
--    # this ensures Python 2.2 compatibilty
--    # and allows 0 and 1 to represent True and False
--    if value == False:
--        return False
--    elif value == True:
--        return True
--    else:
--        raise VdtTypeError(value)
--
--
--def is_ip_addr(value):
--    """
--    Check that the supplied value is an Internet Protocol address, v.4,
--    represented by a dotted-quad string, i.e. '1.2.3.4'.
--    
--    >>> vtor.check('ip_addr', '1 ')
--    '1'
--    >>> vtor.check('ip_addr', ' 1.2')
--    '1.2'
--    >>> vtor.check('ip_addr', ' 1.2.3 ')
--    '1.2.3'
--    >>> vtor.check('ip_addr', '1.2.3.4')
--    '1.2.3.4'
--    >>> vtor.check('ip_addr', '0.0.0.0')
--    '0.0.0.0'
--    >>> vtor.check('ip_addr', '255.255.255.255')
--    '255.255.255.255'
--    >>> vtor.check('ip_addr', '255.255.255.256')
--    Traceback (most recent call last):
--    VdtValueError: the value "255.255.255.256" is unacceptable.
--    >>> vtor.check('ip_addr', '1.2.3.4.5')
--    Traceback (most recent call last):
--    VdtValueError: the value "1.2.3.4.5" is unacceptable.
--    >>> vtor.check('ip_addr', '1.2.3. 4')
--    Traceback (most recent call last):
--    VdtValueError: the value "1.2.3. 4" is unacceptable.
--    >>> vtor.check('ip_addr', 0)
--    Traceback (most recent call last):
--    VdtTypeError: the value "0" is of the wrong type.
--    """
--    if not isinstance(value, StringTypes):
--        raise VdtTypeError(value)
--    value = value.strip()
--    try:
--        dottedQuadToNum(value)
--    except ValueError:
--        raise VdtValueError(value)
--    return value
--
--
--def is_list(value, min=None, max=None):
--    """
--    Check that the value is a list of values.
--    
--    You can optionally specify the minimum and maximum number of members.
--    
--    It does no check on list members.
--    
--    >>> vtor.check('list', ())
--    []
--    >>> vtor.check('list', [])
--    []
--    >>> vtor.check('list', (1, 2))
--    [1, 2]
--    >>> vtor.check('list', [1, 2])
--    [1, 2]
--    >>> vtor.check('list(3)', (1, 2))
--    Traceback (most recent call last):
--    VdtValueTooShortError: the value "(1, 2)" is too short.
--    >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6))
--    Traceback (most recent call last):
--    VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
--    >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4))
--    [1, 2, 3, 4]
--    >>> vtor.check('list', 0)
--    Traceback (most recent call last):
--    VdtTypeError: the value "0" is of the wrong type.
--    >>> vtor.check('list', '12')
--    Traceback (most recent call last):
--    VdtTypeError: the value "12" is of the wrong type.
--    """
--    (min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
--    if isinstance(value, StringTypes):
--        raise VdtTypeError(value)
--    try:
--        num_members = len(value)
--    except TypeError:
--        raise VdtTypeError(value)
--    if min_len is not None and num_members < min_len:
--        raise VdtValueTooShortError(value)
--    if max_len is not None and num_members > max_len:
--        raise VdtValueTooLongError(value)
--    return list(value)
--
--
--def is_tuple(value, min=None, max=None):
--    """
--    Check that the value is a tuple of values.
--    
--    You can optionally specify the minimum and maximum number of members.
--    
--    It does no check on members.
--    
--    >>> vtor.check('tuple', ())
--    ()
--    >>> vtor.check('tuple', [])
--    ()
--    >>> vtor.check('tuple', (1, 2))
--    (1, 2)
--    >>> vtor.check('tuple', [1, 2])
--    (1, 2)
--    >>> vtor.check('tuple(3)', (1, 2))
--    Traceback (most recent call last):
--    VdtValueTooShortError: the value "(1, 2)" is too short.
--    >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6))
--    Traceback (most recent call last):
--    VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
--    >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4))
--    (1, 2, 3, 4)
--    >>> vtor.check('tuple', 0)
--    Traceback (most recent call last):
--    VdtTypeError: the value "0" is of the wrong type.
--    >>> vtor.check('tuple', '12')
--    Traceback (most recent call last):
--    VdtTypeError: the value "12" is of the wrong type.
--    """
--    return tuple(is_list(value, min, max))
--
--
--def is_string(value, min=None, max=None):
--    """
--    Check that the supplied value is a string.
--    
--    You can optionally specify the minimum and maximum number of members.
--    
--    >>> vtor.check('string', '0')
--    '0'
--    >>> vtor.check('string', 0)
--    Traceback (most recent call last):
--    VdtTypeError: the value "0" is of the wrong type.
--    >>> vtor.check('string(2)', '12')
--    '12'
--    >>> vtor.check('string(2)', '1')
--    Traceback (most recent call last):
--    VdtValueTooShortError: the value "1" is too short.
--    >>> vtor.check('string(min=2, max=3)', '123')
--    '123'
--    >>> vtor.check('string(min=2, max=3)', '1234')
--    Traceback (most recent call last):
--    VdtValueTooLongError: the value "1234" is too long.
--    """
--    if not isinstance(value, StringTypes):
--        raise VdtTypeError(value)
--    (min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
--    try:
--        num_members = len(value)
--    except TypeError:
--        raise VdtTypeError(value)
--    if min_len is not None and num_members < min_len:
--        raise VdtValueTooShortError(value)
--    if max_len is not None and num_members > max_len:
--        raise VdtValueTooLongError(value)
--    return value
--
--
--def is_int_list(value, min=None, max=None):
--    """
--    Check that the value is a list of integers.
--    
--    You can optionally specify the minimum and maximum number of members.
--    
--    Each list member is checked that it is an integer.
--    
--    >>> vtor.check('int_list', ())
--    []
--    >>> vtor.check('int_list', [])
--    []
--    >>> vtor.check('int_list', (1, 2))
--    [1, 2]
--    >>> vtor.check('int_list', [1, 2])
--    [1, 2]
--    >>> vtor.check('int_list', [1, 'a'])
--    Traceback (most recent call last):
--    VdtTypeError: the value "a" is of the wrong type.
--    """
--    return [is_integer(mem) for mem in is_list(value, min, max)]
--
--
--def is_bool_list(value, min=None, max=None):
--    """
--    Check that the value is a list of booleans.
--    
--    You can optionally specify the minimum and maximum number of members.
--    
--    Each list member is checked that it is a boolean.
--    
--    >>> vtor.check('bool_list', ())
--    []
--    >>> vtor.check('bool_list', [])
--    []
--    >>> check_res = vtor.check('bool_list', (True, False))
--    >>> check_res == [True, False]
--    1
--    >>> check_res = vtor.check('bool_list', [True, False])
--    >>> check_res == [True, False]
--    1
--    >>> vtor.check('bool_list', [True, 'a'])
--    Traceback (most recent call last):
--    VdtTypeError: the value "a" is of the wrong type.
--    """
--    return [is_boolean(mem) for mem in is_list(value, min, max)]
--
--
--def is_float_list(value, min=None, max=None):
--    """
--    Check that the value is a list of floats.
--    
--    You can optionally specify the minimum and maximum number of members.
--    
--    Each list member is checked that it is a float.
--    
--    >>> vtor.check('float_list', ())
--    []
--    >>> vtor.check('float_list', [])
--    []
--    >>> vtor.check('float_list', (1, 2.0))
--    [1.0, 2.0]
--    >>> vtor.check('float_list', [1, 2.0])
--    [1.0, 2.0]
--    >>> vtor.check('float_list', [1, 'a'])
--    Traceback (most recent call last):
--    VdtTypeError: the value "a" is of the wrong type.
--    """
--    return [is_float(mem) for mem in is_list(value, min, max)]
--
--
--def is_string_list(value, min=None, max=None):
--    """
--    Check that the value is a list of strings.
--    
--    You can optionally specify the minimum and maximum number of members.
--    
--    Each list member is checked that it is a string.
--    
--    >>> vtor.check('string_list', ())
--    []
--    >>> vtor.check('string_list', [])
--    []
--    >>> vtor.check('string_list', ('a', 'b'))
--    ['a', 'b']
--    >>> vtor.check('string_list', ['a', 1])
--    Traceback (most recent call last):
--    VdtTypeError: the value "1" is of the wrong type.
--    >>> vtor.check('string_list', 'hello')
--    Traceback (most recent call last):
--    VdtTypeError: the value "hello" is of the wrong type.
--    """
--    if isinstance(value, StringTypes):
--        raise VdtTypeError(value)
--    return [is_string(mem) for mem in is_list(value, min, max)]
--
--
--def is_ip_addr_list(value, min=None, max=None):
--    """
--    Check that the value is a list of IP addresses.
--    
--    You can optionally specify the minimum and maximum number of members.
--    
--    Each list member is checked that it is an IP address.
--    
--    >>> vtor.check('ip_addr_list', ())
--    []
--    >>> vtor.check('ip_addr_list', [])
--    []
--    >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
--    ['1.2.3.4', '5.6.7.8']
--    >>> vtor.check('ip_addr_list', ['a'])
--    Traceback (most recent call last):
--    VdtValueError: the value "a" is unacceptable.
--    """
--    return [is_ip_addr(mem) for mem in is_list(value, min, max)]
--
--
--fun_dict = {
--    'integer': is_integer,
--    'float': is_float,
--    'ip_addr': is_ip_addr,
--    'string': is_string,
--    'boolean': is_boolean,
--}
--
--
--def is_mixed_list(value, *args):
--    """
--    Check that the value is a list.
--    Allow specifying the type of each member.
--    Work on lists of specific lengths.
--    
--    You specify each member as a positional argument specifying type
--    
--    Each type should be one of the following strings :
--      'integer', 'float', 'ip_addr', 'string', 'boolean'
--    
--    So you can specify a list of two strings, followed by
--    two integers as :
--    
--      mixed_list('string', 'string', 'integer', 'integer')
--    
--    The length of the list must match the number of positional
--    arguments you supply.
--    
--    >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
--    >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
--    >>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
--    1
--    >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
--    >>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
--    1
--    >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
--    Traceback (most recent call last):
--    VdtTypeError: the value "b" is of the wrong type.
--    >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
--    Traceback (most recent call last):
--    VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
--    >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
--    Traceback (most recent call last):
--    VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
--    >>> vtor.check(mix_str, 0)
--    Traceback (most recent call last):
--    VdtTypeError: the value "0" is of the wrong type.
--    
--    This test requires an elaborate setup, because of a change in error string
--    output from the interpreter between Python 2.2 and 2.3 .
--    
--    >>> res_seq = (
--    ...     'passed an incorrect value "',
--    ...     'yoda',
--    ...     '" for parameter "mixed_list".',
--    ... )
--    >>> if INTP_VER == (2, 2):
--    ...     res_str = "".join(res_seq)
--    ... else:
--    ...     res_str = "'".join(res_seq)
--    >>> try:
--    ...     vtor.check('mixed_list("yoda")', ('a'))
--    ... except VdtParamError, err:
--    ...     str(err) == res_str
--    1
--    """
--    try:
--        length = len(value)
--    except TypeError:
--        raise VdtTypeError(value)
--    if length < len(args):
--        raise VdtValueTooShortError(value)
--    elif length > len(args):
--        raise VdtValueTooLongError(value)
--    try:
--        return [fun_dict[arg](val) for arg, val in zip(args, value)]
--    except KeyError, e:
--        raise VdtParamError('mixed_list', e)
--
--
--def is_option(value, *options):
--    """
--    This check matches the value to any of a set of options.
--    
--    >>> vtor.check('option("yoda", "jedi")', 'yoda')
--    'yoda'
--    >>> vtor.check('option("yoda", "jedi")', 'jed')
--    Traceback (most recent call last):
--    VdtValueError: the value "jed" is unacceptable.
--    >>> vtor.check('option("yoda", "jedi")', 0)
--    Traceback (most recent call last):
--    VdtTypeError: the value "0" is of the wrong type.
--    """
--    if not isinstance(value, StringTypes):
--        raise VdtTypeError(value)
--    if not value in options:
--        raise VdtValueError(value)
--    return value
--
--
--def _test(value, *args, **keywargs):
--    """
--    A function that exists for test purposes.
--    
--    >>> checks = [
--    ...     '3, 6, min=1, max=3, test=list(a, b, c)',
--    ...     '3',
--    ...     '3, 6',
--    ...     '3,',
--    ...     'min=1, test="a b c"',
--    ...     'min=5, test="a, b, c"',
--    ...     'min=1, max=3, test="a, b, c"',
--    ...     'min=-100, test=-99',
--    ...     'min=1, max=3',
--    ...     '3, 6, test="36"',
--    ...     '3, 6, test="a, b, c"',
--    ...     '3, max=3, test=list("a", "b", "c")',
--    ...     '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
--    ...     "test='x=fish(3)'",
--    ...    ]
--    >>> v = Validator({'test': _test})
--    >>> for entry in checks:
--    ...     print v.check(('test(%s)' % entry), 3)
--    (3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'})
--    (3, ('3',), {})
--    (3, ('3', '6'), {})
--    (3, ('3',), {})
--    (3, (), {'test': 'a b c', 'min': '1'})
--    (3, (), {'test': 'a, b, c', 'min': '5'})
--    (3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'})
--    (3, (), {'test': '-99', 'min': '-100'})
--    (3, (), {'max': '3', 'min': '1'})
--    (3, ('3', '6'), {'test': '36'})
--    (3, ('3', '6'), {'test': 'a, b, c'})
--    (3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'})
--    (3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'})
--    (3, (), {'test': 'x=fish(3)'})
--    
--    >>> v = Validator()
--    >>> v.check('integer(default=6)', '3')
--    3
--    >>> v.check('integer(default=6)', None, True)
--    6
--    >>> v.get_default_value('integer(default=6)')
--    6
--    >>> v.get_default_value('float(default=6)')
--    6.0
--    >>> v.get_default_value('pass(default=None)')
--    >>> v.get_default_value("string(default='None')")
--    'None'
--    >>> v.get_default_value('pass')
--    Traceback (most recent call last):
--    KeyError: 'Check "pass" has no default value.'
--    >>> v.get_default_value('pass(default=list(1, 2, 3, 4))')
--    ['1', '2', '3', '4']
--    
--    >>> v = Validator()
--    >>> v.check("pass(default=None)", None, True)
--    >>> v.check("pass(default='None')", None, True)
--    'None'
--    >>> v.check('pass(default="None")', None, True)
--    'None'
--    >>> v.check('pass(default=list(1, 2, 3, 4))', None, True)
--    ['1', '2', '3', '4']
--    
--    Bug test for unicode arguments
--    >>> v = Validator()
--    >>> v.check(u'string(min=4)', u'test')
--    u'test'
--    
--    >>> v = Validator()
--    >>> v.get_default_value(u'string(min=4, default="1234")')
--    u'1234'
--    >>> v.check(u'string(min=4, default="1234")', u'test')
--    u'test'
--    
--    >>> v = Validator()
--    >>> default = v.get_default_value('string(default=None)')
--    >>> default == None
--    1
--    """
--    return (value, args, keywargs)
--
--
--if __name__ == '__main__':
--    # run the code tests in doctest format
--    import doctest
--    m = sys.modules.get('__main__')
--    globs = m.__dict__.copy()
--    globs.update({
--        'INTP_VER': INTP_VER,
--        'vtor': Validator(),
--    })
--    doctest.testmod(m, globs=globs)
-Index: ipython-0.10/IPython/external/validate/_validate.py
-===================================================================
---- /dev/null
-+++ ipython-0.10/IPython/external/validate/_validate.py
-@@ -0,0 +1,1414 @@
-+# validate.py
-+# A Validator object
-+# Copyright (C) 2005 Michael Foord, Mark Andrews, Nicola Larosa
-+# E-mail: fuzzyman AT voidspace DOT org DOT uk
-+#         mark AT la-la DOT com
-+#         nico AT tekNico DOT net
-+
-+# This software is licensed under the terms of the BSD license.
-+# http://www.voidspace.org.uk/python/license.shtml
-+# Basically you're free to copy, modify, distribute and relicense it,
-+# So long as you keep a copy of the license with it.
-+
-+# Scripts maintained at http://www.voidspace.org.uk/python/index.shtml
-+# For information about bugfixes, updates and support, please join the
-+# ConfigObj mailing list:
-+# http://lists.sourceforge.net/lists/listinfo/configobj-develop
-+# Comments, suggestions and bug reports welcome.
-+
-+"""
-+    The Validator object is used to check that supplied values 
-+    conform to a specification.
-+    
-+    The value can be supplied as a string - e.g. from a config file.
-+    In this case the check will also *convert* the value to
-+    the required type. This allows you to add validation
-+    as a transparent layer to access data stored as strings.
-+    The validation checks that the data is correct *and*
-+    converts it to the expected type.
-+    
-+    Some standard checks are provided for basic data types.
-+    Additional checks are easy to write. They can be
-+    provided when the ``Validator`` is instantiated or
-+    added afterwards.
-+    
-+    The standard functions work with the following basic data types :
-+    
-+    * integers
-+    * floats
-+    * booleans
-+    * strings
-+    * ip_addr
-+    
-+    plus lists of these datatypes
-+    
-+    Adding additional checks is done through coding simple functions.
-+    
-+    The full set of standard checks are : 
-+    
-+    * 'integer': matches integer values (including negative)
-+                 Takes optional 'min' and 'max' arguments : ::
-+    
-+                   integer()
-+                   integer(3, 9)  # any value from 3 to 9
-+                   integer(min=0) # any positive value
-+                   integer(max=9)
-+    
-+    * 'float': matches float values
-+               Has the same parameters as the integer check.
-+    
-+    * 'boolean': matches boolean values - ``True`` or ``False``
-+                 Acceptable string values for True are :
-+                   true, on, yes, 1
-+                 Acceptable string values for False are :
-+                   false, off, no, 0
-+    
-+                 Any other value raises an error.
-+    
-+    * 'ip_addr': matches an Internet Protocol address, v.4, represented
-+                 by a dotted-quad string, i.e. '1.2.3.4'.
-+    
-+    * 'string': matches any string.
-+                Takes optional keyword args 'min' and 'max'
-+                to specify min and max lengths of the string.
-+    
-+    * 'list': matches any list.
-+              Takes optional keyword args 'min', and 'max' to specify min and
-+              max sizes of the list. (Always returns a list.)
-+    
-+    * 'tuple': matches any tuple.
-+              Takes optional keyword args 'min', and 'max' to specify min and
-+              max sizes of the tuple. (Always returns a tuple.)
-+    
-+    * 'int_list': Matches a list of integers.
-+                  Takes the same arguments as list.
-+    
-+    * 'float_list': Matches a list of floats.
-+                    Takes the same arguments as list.
-+    
-+    * 'bool_list': Matches a list of boolean values.
-+                   Takes the same arguments as list.
-+    
-+    * 'ip_addr_list': Matches a list of IP addresses.
-+                     Takes the same arguments as list.
-+    
-+    * 'string_list': Matches a list of strings.
-+                     Takes the same arguments as list.
-+    
-+    * 'mixed_list': Matches a list with different types in 
-+                    specific positions. List size must match
-+                    the number of arguments.
-+    
-+                    Each position can be one of :
-+                    'integer', 'float', 'ip_addr', 'string', 'boolean'
-+    
-+                    So to specify a list with two strings followed
-+                    by two integers, you write the check as : ::
-+    
-+                      mixed_list('string', 'string', 'integer', 'integer')
-+    
-+    * 'pass': This check matches everything ! It never fails
-+              and the value is unchanged.
-+    
-+              It is also the default if no check is specified.
-+    
-+    * 'option': This check matches any from a list of options.
-+                You specify this check with : ::
-+    
-+                  option('option 1', 'option 2', 'option 3')
-+    
-+    You can supply a default value (returned if no value is supplied)
-+    using the default keyword argument.
-+    
-+    You specify a list argument for default using a list constructor syntax in
-+    the check : ::
-+    
-+        checkname(arg1, arg2, default=list('val 1', 'val 2', 'val 3'))
-+    
-+    A badly formatted set of arguments will raise a ``VdtParamError``.
-+"""
-+
-+__docformat__ = "restructuredtext en"
-+
-+__version__ = '0.3.2'
-+
-+__revision__ = '$Id: validate.py 123 2005-09-08 08:54:28Z fuzzyman $'
-+
-+__all__ = (
-+    '__version__',
-+    'dottedQuadToNum',
-+    'numToDottedQuad',
-+    'ValidateError',
-+    'VdtUnknownCheckError',
-+    'VdtParamError',
-+    'VdtTypeError',
-+    'VdtValueError',
-+    'VdtValueTooSmallError',
-+    'VdtValueTooBigError',
-+    'VdtValueTooShortError',
-+    'VdtValueTooLongError',
-+    'VdtMissingValue',
-+    'Validator',
-+    'is_integer',
-+    'is_float',
-+    'is_boolean',
-+    'is_list',
-+    'is_tuple',
-+    'is_ip_addr',
-+    'is_string',
-+    'is_int_list',
-+    'is_bool_list',
-+    'is_float_list',
-+    'is_string_list',
-+    'is_ip_addr_list',
-+    'is_mixed_list',
-+    'is_option',
-+    '__docformat__',
-+)
-+
-+
-+import sys
-+INTP_VER = sys.version_info[:2]
-+if INTP_VER < (2, 2):
-+    raise RuntimeError("Python v.2.2 or later needed")
-+
-+import re
-+StringTypes = (str, unicode)
-+
-+
-+_list_arg = re.compile(r'''
-+    (?:
-+        ([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*list\(
-+            (
-+                (?:
-+                    \s*
-+                    (?:
-+                        (?:".*?")|              # double quotes
-+                        (?:'.*?')|              # single quotes
-+                        (?:[^'",\s\)][^,\)]*?)  # unquoted
-+                    )
-+                    \s*,\s*
-+                )*
-+                (?:
-+                    (?:".*?")|              # double quotes
-+                    (?:'.*?')|              # single quotes
-+                    (?:[^'",\s\)][^,\)]*?)  # unquoted
-+                )?                          # last one
-+            )
-+        \)
-+    )
-+''', re.VERBOSE)    # two groups
-+
-+_list_members = re.compile(r'''
-+    (
-+        (?:".*?")|              # double quotes
-+        (?:'.*?')|              # single quotes
-+        (?:[^'",\s=][^,=]*?)       # unquoted
-+    )
-+    (?:
-+    (?:\s*,\s*)|(?:\s*$)            # comma
-+    )
-+''', re.VERBOSE)    # one group
-+
-+_paramstring = r'''
-+    (?:
-+        (
-+            (?:
-+                [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*list\(
-+                    (?:
-+                        \s*
-+                        (?:
-+                            (?:".*?")|              # double quotes
-+                            (?:'.*?')|              # single quotes
-+                            (?:[^'",\s\)][^,\)]*?)       # unquoted
-+                        )
-+                        \s*,\s*
-+                    )*
-+                    (?:
-+                        (?:".*?")|              # double quotes
-+                        (?:'.*?')|              # single quotes
-+                        (?:[^'",\s\)][^,\)]*?)       # unquoted
-+                    )?                              # last one
-+                \)
-+            )|
-+            (?:
-+                (?:".*?")|              # double quotes
-+                (?:'.*?')|              # single quotes
-+                (?:[^'",\s=][^,=]*?)|       # unquoted
-+                (?:                         # keyword argument
-+                    [a-zA-Z_][a-zA-Z0-9_]*\s*=\s*
-+                    (?:
-+                        (?:".*?")|              # double quotes
-+                        (?:'.*?')|              # single quotes
-+                        (?:[^'",\s=][^,=]*?)       # unquoted
-+                    )
-+                )
-+            )
-+        )
-+        (?:
-+            (?:\s*,\s*)|(?:\s*$)            # comma
-+        )
-+    )
-+    '''
-+
-+_matchstring = '^%s*' % _paramstring
-+
-+# Python pre 2.2.1 doesn't have bool
-+try:
-+    bool
-+except NameError:
-+    def bool(val):
-+        """Simple boolean equivalent function. """
-+        if val:
-+            return 1
-+        else:
-+            return 0
-+
-+
-+def dottedQuadToNum(ip):
-+    """
-+    Convert decimal dotted quad string to long integer
-+    
-+    >>> dottedQuadToNum('1 ')
-+    1L
-+    >>> dottedQuadToNum(' 1.2')
-+    16777218L
-+    >>> dottedQuadToNum(' 1.2.3 ')
-+    16908291L
-+    >>> dottedQuadToNum('1.2.3.4')
-+    16909060L
-+    >>> dottedQuadToNum('1.2.3. 4')
-+    Traceback (most recent call last):
-+    ValueError: Not a good dotted-quad IP: 1.2.3. 4
-+    >>> dottedQuadToNum('255.255.255.255')
-+    4294967295L
-+    >>> dottedQuadToNum('255.255.255.256')
-+    Traceback (most recent call last):
-+    ValueError: Not a good dotted-quad IP: 255.255.255.256
-+    """
-+    
-+    # import here to avoid it when ip_addr values are not used
-+    import socket, struct
-+    
-+    try:
-+        return struct.unpack('!L',
-+            socket.inet_aton(ip.strip()))[0]
-+    except socket.error:
-+        # bug in inet_aton, corrected in Python 2.3
-+        if ip.strip() == '255.255.255.255':
-+            return 0xFFFFFFFFL
-+        else:
-+            raise ValueError('Not a good dotted-quad IP: %s' % ip)
-+    return
-+
-+
-+def numToDottedQuad(num):
-+    """
-+    Convert long int to dotted quad string
-+    
-+    >>> numToDottedQuad(-1L)
-+    Traceback (most recent call last):
-+    ValueError: Not a good numeric IP: -1
-+    >>> numToDottedQuad(1L)
-+    '0.0.0.1'
-+    >>> numToDottedQuad(16777218L)
-+    '1.0.0.2'
-+    >>> numToDottedQuad(16908291L)
-+    '1.2.0.3'
-+    >>> numToDottedQuad(16909060L)
-+    '1.2.3.4'
-+    >>> numToDottedQuad(4294967295L)
-+    '255.255.255.255'
-+    >>> numToDottedQuad(4294967296L)
-+    Traceback (most recent call last):
-+    ValueError: Not a good numeric IP: 4294967296
-+    """
-+    
-+    # import here to avoid it when ip_addr values are not used
-+    import socket, struct
-+    
-+    # no need to intercept here, 4294967295L is fine
-+    try:
-+        return socket.inet_ntoa(
-+            struct.pack('!L', long(num)))
-+    except (socket.error, struct.error, OverflowError):
-+        raise ValueError('Not a good numeric IP: %s' % num)
-+
-+
-+class ValidateError(Exception):
-+    """
-+    This error indicates that the check failed.
-+    It can be the base class for more specific errors.
-+    
-+    Any check function that fails ought to raise this error.
-+    (or a subclass)
-+    
-+    >>> raise ValidateError
-+    Traceback (most recent call last):
-+    ValidateError
-+    """
-+
-+
-+class VdtMissingValue(ValidateError):
-+    """No value was supplied to a check that needed one."""
-+
-+
-+class VdtUnknownCheckError(ValidateError):
-+    """An unknown check function was requested"""
-+
-+    def __init__(self, value):
-+        """
-+        >>> raise VdtUnknownCheckError('yoda')
-+        Traceback (most recent call last):
-+        VdtUnknownCheckError: the check "yoda" is unknown.
-+        """
-+        ValidateError.__init__(self, 'the check "%s" is unknown.' % (value,))
-+
-+
-+class VdtParamError(SyntaxError):
-+    """An incorrect parameter was passed"""
-+
-+    def __init__(self, name, value):
-+        """
-+        >>> raise VdtParamError('yoda', 'jedi')
-+        Traceback (most recent call last):
-+        VdtParamError: passed an incorrect value "jedi" for parameter "yoda".
-+        """
-+        SyntaxError.__init__(self, 'passed an incorrect value "%s" for parameter "%s".' % (value, name))
-+
-+
-+class VdtTypeError(ValidateError):
-+    """The value supplied was of the wrong type"""
-+
-+    def __init__(self, value):
-+        """
-+        >>> raise VdtTypeError('jedi')
-+        Traceback (most recent call last):
-+        VdtTypeError: the value "jedi" is of the wrong type.
-+        """
-+        ValidateError.__init__(self, 'the value "%s" is of the wrong type.' % (value,))
-+
-+
-+class VdtValueError(ValidateError):
-+    """The value supplied was of the correct type, but was not an allowed value."""
-+    
-+    def __init__(self, value):
-+        """
-+        >>> raise VdtValueError('jedi')
-+        Traceback (most recent call last):
-+        VdtValueError: the value "jedi" is unacceptable.
-+        """
-+        ValidateError.__init__(self, 'the value "%s" is unacceptable.' % (value,))
-+
-+
-+class VdtValueTooSmallError(VdtValueError):
-+    """The value supplied was of the correct type, but was too small."""
-+
-+    def __init__(self, value):
-+        """
-+        >>> raise VdtValueTooSmallError('0')
-+        Traceback (most recent call last):
-+        VdtValueTooSmallError: the value "0" is too small.
-+        """
-+        ValidateError.__init__(self, 'the value "%s" is too small.' % (value,))
-+
-+
-+class VdtValueTooBigError(VdtValueError):
-+    """The value supplied was of the correct type, but was too big."""
-+
-+    def __init__(self, value):
-+        """
-+        >>> raise VdtValueTooBigError('1')
-+        Traceback (most recent call last):
-+        VdtValueTooBigError: the value "1" is too big.
-+        """
-+        ValidateError.__init__(self, 'the value "%s" is too big.' % (value,))
-+
-+
-+class VdtValueTooShortError(VdtValueError):
-+    """The value supplied was of the correct type, but was too short."""
-+
-+    def __init__(self, value):
-+        """
-+        >>> raise VdtValueTooShortError('jed')
-+        Traceback (most recent call last):
-+        VdtValueTooShortError: the value "jed" is too short.
-+        """
-+        ValidateError.__init__(
-+            self,
-+            'the value "%s" is too short.' % (value,))
-+
-+
-+class VdtValueTooLongError(VdtValueError):
-+    """The value supplied was of the correct type, but was too long."""
-+
-+    def __init__(self, value):
-+        """
-+        >>> raise VdtValueTooLongError('jedie')
-+        Traceback (most recent call last):
-+        VdtValueTooLongError: the value "jedie" is too long.
-+        """
-+        ValidateError.__init__(self, 'the value "%s" is too long.' % (value,))
-+
-+
-+class Validator(object):
-+    """
-+    Validator is an object that allows you to register a set of 'checks'.
-+    These checks take input and test that it conforms to the check.
-+    
-+    This can also involve converting the value from a string into
-+    the correct datatype.
-+    
-+    The ``check`` method takes an input string which configures which
-+    check is to be used and applies that check to a supplied value.
-+    
-+    An example input string would be:
-+    'int_range(param1, param2)'
-+    
-+    You would then provide something like:
-+    
-+    >>> def int_range_check(value, min, max):
-+    ...     # turn min and max from strings to integers
-+    ...     min = int(min)
-+    ...     max = int(max)
-+    ...     # check that value is of the correct type.
-+    ...     # possible valid inputs are integers or strings
-+    ...     # that represent integers
-+    ...     if not isinstance(value, (int, long, StringTypes)):
-+    ...         raise VdtTypeError(value)
-+    ...     elif isinstance(value, StringTypes):
-+    ...         # if we are given a string
-+    ...         # attempt to convert to an integer
-+    ...         try:
-+    ...             value = int(value)
-+    ...         except ValueError:
-+    ...             raise VdtValueError(value)
-+    ...     # check the value is between our constraints
-+    ...     if not min <= value:
-+    ...          raise VdtValueTooSmallError(value)
-+    ...     if not value <= max:
-+    ...          raise VdtValueTooBigError(value)
-+    ...     return value
-+    
-+    >>> fdict = {'int_range': int_range_check}
-+    >>> vtr1 = Validator(fdict)
-+    >>> vtr1.check('int_range(20, 40)', '30')
-+    30
-+    >>> vtr1.check('int_range(20, 40)', '60')
-+    Traceback (most recent call last):
-+    VdtValueTooBigError: the value "60" is too big.
-+    
-+    New functions can be added with : ::
-+    
-+    >>> vtr2 = Validator()       
-+    >>> vtr2.functions['int_range'] = int_range_check
-+    
-+    Or by passing in a dictionary of functions when Validator 
-+    is instantiated.
-+    
-+    Your functions *can* use keyword arguments,
-+    but the first argument should always be 'value'.
-+    
-+    If the function doesn't take additional arguments,
-+    the parentheses are optional in the check.
-+    It can be written with either of : ::
-+    
-+        keyword = function_name
-+        keyword = function_name()
-+    
-+    The first program to utilise Validator() was Michael Foord's
-+    ConfigObj, an alternative to ConfigParser which supports lists and
-+    can validate a config file using a config schema.
-+    For more details on using Validator with ConfigObj see:
-+    http://www.voidspace.org.uk/python/configobj.html
-+    """
-+
-+    # this regex does the initial parsing of the checks
-+    _func_re = re.compile(r'(.+?)\((.*)\)')
-+
-+    # this regex takes apart keyword arguments
-+    _key_arg = re.compile(r'^([a-zA-Z_][a-zA-Z0-9_]*)\s*=\s*(.*)$')
-+
-+
-+    # this regex finds keyword=list(....) type values
-+    _list_arg = _list_arg
-+
-+    # this regex takes individual values out of lists - in one pass
-+    _list_members = _list_members
-+
-+    # These regexes check a set of arguments for validity
-+    # and then pull the members out
-+    _paramfinder = re.compile(_paramstring, re.VERBOSE)
-+    _matchfinder = re.compile(_matchstring, re.VERBOSE)
-+
-+
-+    def __init__(self, functions=None):
-+        """
-+        >>> vtri = Validator()
-+        """
-+        self.functions = {
-+            '': self._pass,
-+            'integer': is_integer,
-+            'float': is_float,
-+            'boolean': is_boolean,
-+            'ip_addr': is_ip_addr,
-+            'string': is_string,
-+            'list': is_list,
-+            'tuple': is_tuple,
-+            'int_list': is_int_list,
-+            'float_list': is_float_list,
-+            'bool_list': is_bool_list,
-+            'ip_addr_list': is_ip_addr_list,
-+            'string_list': is_string_list,
-+            'mixed_list': is_mixed_list,
-+            'pass': self._pass,
-+            'option': is_option,
-+        }
-+        if functions is not None:
-+            self.functions.update(functions)
-+        # tekNico: for use by ConfigObj
-+        self.baseErrorClass = ValidateError
-+        self._cache = {}
-+
-+
-+    def check(self, check, value, missing=False):
-+        """
-+        Usage: check(check, value)
-+        
-+        Arguments:
-+            check: string representing check to apply (including arguments)
-+            value: object to be checked
-+        Returns value, converted to correct type if necessary
-+        
-+        If the check fails, raises a ``ValidateError`` subclass.
-+        
-+        >>> vtor.check('yoda', '')
-+        Traceback (most recent call last):
-+        VdtUnknownCheckError: the check "yoda" is unknown.
-+        >>> vtor.check('yoda()', '')
-+        Traceback (most recent call last):
-+        VdtUnknownCheckError: the check "yoda" is unknown.
-+        
-+        >>> vtor.check('string(default="")', '', missing=True)
-+        ''
-+        """
-+        fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
-+            
-+        if missing:
-+            if default is None:
-+                # no information needed here - to be handled by caller
-+                raise VdtMissingValue()
-+            value = self._handle_none(default)
-+                
-+        if value is None:
-+            return None
-+        
-+        return self._check_value(value, fun_name, fun_args, fun_kwargs)
-+
-+
-+    def _handle_none(self, value):
-+        if value == 'None':
-+            value = None
-+        elif value in ("'None'", '"None"'):
-+            # Special case a quoted None
-+            value = self._unquote(value)
-+        return value
-+
-+
-+    def _parse_with_caching(self, check):
-+        if check in self._cache:
-+            fun_name, fun_args, fun_kwargs, default = self._cache[check]
-+            # We call list and dict below to work with *copies* of the data
-+            # rather than the original (which are mutable of course)
-+            fun_args = list(fun_args)
-+            fun_kwargs = dict(fun_kwargs)
-+        else:
-+            fun_name, fun_args, fun_kwargs, default = self._parse_check(check)
-+            fun_kwargs = dict((str(key), value) for (key, value) in fun_kwargs.items())
-+            self._cache[check] = fun_name, list(fun_args), dict(fun_kwargs), default
-+        return fun_name, fun_args, fun_kwargs, default
-+        
-+        
-+    def _check_value(self, value, fun_name, fun_args, fun_kwargs):
-+        try:
-+            fun = self.functions[fun_name]
-+        except KeyError:
-+            raise VdtUnknownCheckError(fun_name)
-+        else:
-+            return fun(value, *fun_args, **fun_kwargs)
-+
-+
-+    def _parse_check(self, check):
-+        fun_match = self._func_re.match(check)
-+        if fun_match:
-+            fun_name = fun_match.group(1)
-+            arg_string = fun_match.group(2)
-+            arg_match = self._matchfinder.match(arg_string)
-+            if arg_match is None:
-+                # Bad syntax
-+                raise VdtParamError('Bad syntax in check "%s".' % check)
-+            fun_args = []
-+            fun_kwargs = {}
-+            # pull out args of group 2
-+            for arg in self._paramfinder.findall(arg_string):
-+                # args may need whitespace removing (before removing quotes)
-+                arg = arg.strip()
-+                listmatch = self._list_arg.match(arg)
-+                if listmatch:
-+                    key, val = self._list_handle(listmatch)
-+                    fun_kwargs[key] = val
-+                    continue
-+                keymatch = self._key_arg.match(arg)
-+                if keymatch:
-+                    val = keymatch.group(2)
-+                    if not val in ("'None'", '"None"'):
-+                        # Special case a quoted None
-+                        val = self._unquote(val)
-+                    fun_kwargs[keymatch.group(1)] = val
-+                    continue
-+                
-+                fun_args.append(self._unquote(arg))
-+        else:
-+            # allows for function names without (args)
-+            return check, (), {}, None
-+
-+        # Default must be deleted if the value is specified too,
-+        # otherwise the check function will get a spurious "default" keyword arg
-+        try:
-+            default = fun_kwargs.pop('default', None)
-+        except AttributeError:
-+            # Python 2.2 compatibility
-+            default = None
-+            try:
-+                default = fun_kwargs['default']
-+                del fun_kwargs['default']
-+            except KeyError:
-+                pass
-+            
-+        return fun_name, fun_args, fun_kwargs, default
-+
-+
-+    def _unquote(self, val):
-+        """Unquote a value if necessary."""
-+        if (len(val) >= 2) and (val[0] in ("'", '"')) and (val[0] == val[-1]):
-+            val = val[1:-1]
-+        return val
-+
-+
-+    def _list_handle(self, listmatch):
-+        """Take apart a ``keyword=list('val, 'val')`` type string."""
-+        out = []
-+        name = listmatch.group(1)
-+        args = listmatch.group(2)
-+        for arg in self._list_members.findall(args):
-+            out.append(self._unquote(arg))
-+        return name, out
-+
-+
-+    def _pass(self, value):
-+        """
-+        Dummy check that always passes
-+        
-+        >>> vtor.check('', 0)
-+        0
-+        >>> vtor.check('', '0')
-+        '0'
-+        """
-+        return value
-+    
-+    
-+    def get_default_value(self, check):
-+        """
-+        Given a check, return the default value for the check
-+        (converted to the right type).
-+        
-+        If the check doesn't specify a default value then a
-+        ``KeyError`` will be raised.
-+        """
-+        fun_name, fun_args, fun_kwargs, default = self._parse_with_caching(check)
-+        if default is None:
-+            raise KeyError('Check "%s" has no default value.' % check)
-+        value = self._handle_none(default)
-+        if value is None:
-+            return value
-+        return self._check_value(value, fun_name, fun_args, fun_kwargs)
-+
-+
-+def _is_num_param(names, values, to_float=False):
-+    """
-+    Return numbers from inputs or raise VdtParamError.
-+    
-+    Lets ``None`` pass through.
-+    Pass in keyword argument ``to_float=True`` to
-+    use float for the conversion rather than int.
-+    
-+    >>> _is_num_param(('', ''), (0, 1.0))
-+    [0, 1]
-+    >>> _is_num_param(('', ''), (0, 1.0), to_float=True)
-+    [0.0, 1.0]
-+    >>> _is_num_param(('a'), ('a'))
-+    Traceback (most recent call last):
-+    VdtParamError: passed an incorrect value "a" for parameter "a".
-+    """
-+    fun = to_float and float or int
-+    out_params = []
-+    for (name, val) in zip(names, values):
-+        if val is None:
-+            out_params.append(val)
-+        elif isinstance(val, (int, long, float, StringTypes)):
-+            try:
-+                out_params.append(fun(val))
-+            except ValueError, e:
-+                raise VdtParamError(name, val)
-+        else:
-+            raise VdtParamError(name, val)
-+    return out_params
-+
-+
-+# built in checks
-+# you can override these by setting the appropriate name
-+# in Validator.functions
-+# note: if the params are specified wrongly in your input string,
-+#       you will also raise errors.
-+
-+def is_integer(value, min=None, max=None):
-+    """
-+    A check that tests that a given value is an integer (int, or long)
-+    and optionally, between bounds. A negative value is accepted, while
-+    a float will fail.
-+    
-+    If the value is a string, then the conversion is done - if possible.
-+    Otherwise a VdtError is raised.
-+    
-+    >>> vtor.check('integer', '-1')
-+    -1
-+    >>> vtor.check('integer', '0')
-+    0
-+    >>> vtor.check('integer', 9)
-+    9
-+    >>> vtor.check('integer', 'a')
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "a" is of the wrong type.
-+    >>> vtor.check('integer', '2.2')
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "2.2" is of the wrong type.
-+    >>> vtor.check('integer(10)', '20')
-+    20
-+    >>> vtor.check('integer(max=20)', '15')
-+    15
-+    >>> vtor.check('integer(10)', '9')
-+    Traceback (most recent call last):
-+    VdtValueTooSmallError: the value "9" is too small.
-+    >>> vtor.check('integer(10)', 9)
-+    Traceback (most recent call last):
-+    VdtValueTooSmallError: the value "9" is too small.
-+    >>> vtor.check('integer(max=20)', '35')
-+    Traceback (most recent call last):
-+    VdtValueTooBigError: the value "35" is too big.
-+    >>> vtor.check('integer(max=20)', 35)
-+    Traceback (most recent call last):
-+    VdtValueTooBigError: the value "35" is too big.
-+    >>> vtor.check('integer(0, 9)', False)
-+    0
-+    """
-+    (min_val, max_val) = _is_num_param(('min', 'max'), (min, max))
-+    if not isinstance(value, (int, long, StringTypes)):
-+        raise VdtTypeError(value)
-+    if isinstance(value, StringTypes):
-+        # if it's a string - does it represent an integer ?
-+        try:
-+            value = int(value)
-+        except ValueError:
-+            raise VdtTypeError(value)
-+    if (min_val is not None) and (value < min_val):
-+        raise VdtValueTooSmallError(value)
-+    if (max_val is not None) and (value > max_val):
-+        raise VdtValueTooBigError(value)
-+    return value
-+
-+
-+def is_float(value, min=None, max=None):
-+    """
-+    A check that tests that a given value is a float
-+    (an integer will be accepted), and optionally - that it is between bounds.
-+    
-+    If the value is a string, then the conversion is done - if possible.
-+    Otherwise a VdtError is raised.
-+    
-+    This can accept negative values.
-+    
-+    >>> vtor.check('float', '2')
-+    2.0
-+    
-+    From now on we multiply the value to avoid comparing decimals
-+    
-+    >>> vtor.check('float', '-6.8') * 10
-+    -68.0
-+    >>> vtor.check('float', '12.2') * 10
-+    122.0
-+    >>> vtor.check('float', 8.4) * 10
-+    84.0
-+    >>> vtor.check('float', 'a')
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "a" is of the wrong type.
-+    >>> vtor.check('float(10.1)', '10.2') * 10
-+    102.0
-+    >>> vtor.check('float(max=20.2)', '15.1') * 10
-+    151.0
-+    >>> vtor.check('float(10.0)', '9.0')
-+    Traceback (most recent call last):
-+    VdtValueTooSmallError: the value "9.0" is too small.
-+    >>> vtor.check('float(max=20.0)', '35.0')
-+    Traceback (most recent call last):
-+    VdtValueTooBigError: the value "35.0" is too big.
-+    """
-+    (min_val, max_val) = _is_num_param(
-+        ('min', 'max'), (min, max), to_float=True)
-+    if not isinstance(value, (int, long, float, StringTypes)):
-+        raise VdtTypeError(value)
-+    if not isinstance(value, float):
-+        # if it's a string - does it represent a float ?
-+        try:
-+            value = float(value)
-+        except ValueError:
-+            raise VdtTypeError(value)
-+    if (min_val is not None) and (value < min_val):
-+        raise VdtValueTooSmallError(value)
-+    if (max_val is not None) and (value > max_val):
-+        raise VdtValueTooBigError(value)
-+    return value
-+
-+
-+bool_dict = {
-+    True: True, 'on': True, '1': True, 'true': True, 'yes': True, 
-+    False: False, 'off': False, '0': False, 'false': False, 'no': False,
-+}
-+
-+
-+def is_boolean(value):
-+    """
-+    Check if the value represents a boolean.
-+    
-+    >>> vtor.check('boolean', 0)
-+    0
-+    >>> vtor.check('boolean', False)
-+    0
-+    >>> vtor.check('boolean', '0')
-+    0
-+    >>> vtor.check('boolean', 'off')
-+    0
-+    >>> vtor.check('boolean', 'false')
-+    0
-+    >>> vtor.check('boolean', 'no')
-+    0
-+    >>> vtor.check('boolean', 'nO')
-+    0
-+    >>> vtor.check('boolean', 'NO')
-+    0
-+    >>> vtor.check('boolean', 1)
-+    1
-+    >>> vtor.check('boolean', True)
-+    1
-+    >>> vtor.check('boolean', '1')
-+    1
-+    >>> vtor.check('boolean', 'on')
-+    1
-+    >>> vtor.check('boolean', 'true')
-+    1
-+    >>> vtor.check('boolean', 'yes')
-+    1
-+    >>> vtor.check('boolean', 'Yes')
-+    1
-+    >>> vtor.check('boolean', 'YES')
-+    1
-+    >>> vtor.check('boolean', '')
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "" is of the wrong type.
-+    >>> vtor.check('boolean', 'up')
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "up" is of the wrong type.
-+    
-+    """
-+    if isinstance(value, StringTypes):
-+        try:
-+            return bool_dict[value.lower()]
-+        except KeyError:
-+            raise VdtTypeError(value)
-+    # we do an equality test rather than an identity test
-+    # this ensures Python 2.2 compatibilty
-+    # and allows 0 and 1 to represent True and False
-+    if value == False:
-+        return False
-+    elif value == True:
-+        return True
-+    else:
-+        raise VdtTypeError(value)
-+
-+
-+def is_ip_addr(value):
-+    """
-+    Check that the supplied value is an Internet Protocol address, v.4,
-+    represented by a dotted-quad string, i.e. '1.2.3.4'.
-+    
-+    >>> vtor.check('ip_addr', '1 ')
-+    '1'
-+    >>> vtor.check('ip_addr', ' 1.2')
-+    '1.2'
-+    >>> vtor.check('ip_addr', ' 1.2.3 ')
-+    '1.2.3'
-+    >>> vtor.check('ip_addr', '1.2.3.4')
-+    '1.2.3.4'
-+    >>> vtor.check('ip_addr', '0.0.0.0')
-+    '0.0.0.0'
-+    >>> vtor.check('ip_addr', '255.255.255.255')
-+    '255.255.255.255'
-+    >>> vtor.check('ip_addr', '255.255.255.256')
-+    Traceback (most recent call last):
-+    VdtValueError: the value "255.255.255.256" is unacceptable.
-+    >>> vtor.check('ip_addr', '1.2.3.4.5')
-+    Traceback (most recent call last):
-+    VdtValueError: the value "1.2.3.4.5" is unacceptable.
-+    >>> vtor.check('ip_addr', '1.2.3. 4')
-+    Traceback (most recent call last):
-+    VdtValueError: the value "1.2.3. 4" is unacceptable.
-+    >>> vtor.check('ip_addr', 0)
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "0" is of the wrong type.
-+    """
-+    if not isinstance(value, StringTypes):
-+        raise VdtTypeError(value)
-+    value = value.strip()
-+    try:
-+        dottedQuadToNum(value)
-+    except ValueError:
-+        raise VdtValueError(value)
-+    return value
-+
-+
-+def is_list(value, min=None, max=None):
-+    """
-+    Check that the value is a list of values.
-+    
-+    You can optionally specify the minimum and maximum number of members.
-+    
-+    It does no check on list members.
-+    
-+    >>> vtor.check('list', ())
-+    []
-+    >>> vtor.check('list', [])
-+    []
-+    >>> vtor.check('list', (1, 2))
-+    [1, 2]
-+    >>> vtor.check('list', [1, 2])
-+    [1, 2]
-+    >>> vtor.check('list(3)', (1, 2))
-+    Traceback (most recent call last):
-+    VdtValueTooShortError: the value "(1, 2)" is too short.
-+    >>> vtor.check('list(max=5)', (1, 2, 3, 4, 5, 6))
-+    Traceback (most recent call last):
-+    VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
-+    >>> vtor.check('list(min=3, max=5)', (1, 2, 3, 4))
-+    [1, 2, 3, 4]
-+    >>> vtor.check('list', 0)
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "0" is of the wrong type.
-+    >>> vtor.check('list', '12')
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "12" is of the wrong type.
-+    """
-+    (min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
-+    if isinstance(value, StringTypes):
-+        raise VdtTypeError(value)
-+    try:
-+        num_members = len(value)
-+    except TypeError:
-+        raise VdtTypeError(value)
-+    if min_len is not None and num_members < min_len:
-+        raise VdtValueTooShortError(value)
-+    if max_len is not None and num_members > max_len:
-+        raise VdtValueTooLongError(value)
-+    return list(value)
-+
-+
-+def is_tuple(value, min=None, max=None):
-+    """
-+    Check that the value is a tuple of values.
-+    
-+    You can optionally specify the minimum and maximum number of members.
-+    
-+    It does no check on members.
-+    
-+    >>> vtor.check('tuple', ())
-+    ()
-+    >>> vtor.check('tuple', [])
-+    ()
-+    >>> vtor.check('tuple', (1, 2))
-+    (1, 2)
-+    >>> vtor.check('tuple', [1, 2])
-+    (1, 2)
-+    >>> vtor.check('tuple(3)', (1, 2))
-+    Traceback (most recent call last):
-+    VdtValueTooShortError: the value "(1, 2)" is too short.
-+    >>> vtor.check('tuple(max=5)', (1, 2, 3, 4, 5, 6))
-+    Traceback (most recent call last):
-+    VdtValueTooLongError: the value "(1, 2, 3, 4, 5, 6)" is too long.
-+    >>> vtor.check('tuple(min=3, max=5)', (1, 2, 3, 4))
-+    (1, 2, 3, 4)
-+    >>> vtor.check('tuple', 0)
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "0" is of the wrong type.
-+    >>> vtor.check('tuple', '12')
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "12" is of the wrong type.
-+    """
-+    return tuple(is_list(value, min, max))
-+
-+
-+def is_string(value, min=None, max=None):
-+    """
-+    Check that the supplied value is a string.
-+    
-+    You can optionally specify the minimum and maximum number of members.
-+    
-+    >>> vtor.check('string', '0')
-+    '0'
-+    >>> vtor.check('string', 0)
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "0" is of the wrong type.
-+    >>> vtor.check('string(2)', '12')
-+    '12'
-+    >>> vtor.check('string(2)', '1')
-+    Traceback (most recent call last):
-+    VdtValueTooShortError: the value "1" is too short.
-+    >>> vtor.check('string(min=2, max=3)', '123')
-+    '123'
-+    >>> vtor.check('string(min=2, max=3)', '1234')
-+    Traceback (most recent call last):
-+    VdtValueTooLongError: the value "1234" is too long.
-+    """
-+    if not isinstance(value, StringTypes):
-+        raise VdtTypeError(value)
-+    (min_len, max_len) = _is_num_param(('min', 'max'), (min, max))
-+    try:
-+        num_members = len(value)
-+    except TypeError:
-+        raise VdtTypeError(value)
-+    if min_len is not None and num_members < min_len:
-+        raise VdtValueTooShortError(value)
-+    if max_len is not None and num_members > max_len:
-+        raise VdtValueTooLongError(value)
-+    return value
-+
-+
-+def is_int_list(value, min=None, max=None):
-+    """
-+    Check that the value is a list of integers.
-+    
-+    You can optionally specify the minimum and maximum number of members.
-+    
-+    Each list member is checked that it is an integer.
-+    
-+    >>> vtor.check('int_list', ())
-+    []
-+    >>> vtor.check('int_list', [])
-+    []
-+    >>> vtor.check('int_list', (1, 2))
-+    [1, 2]
-+    >>> vtor.check('int_list', [1, 2])
-+    [1, 2]
-+    >>> vtor.check('int_list', [1, 'a'])
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "a" is of the wrong type.
-+    """
-+    return [is_integer(mem) for mem in is_list(value, min, max)]
-+
-+
-+def is_bool_list(value, min=None, max=None):
-+    """
-+    Check that the value is a list of booleans.
-+    
-+    You can optionally specify the minimum and maximum number of members.
-+    
-+    Each list member is checked that it is a boolean.
-+    
-+    >>> vtor.check('bool_list', ())
-+    []
-+    >>> vtor.check('bool_list', [])
-+    []
-+    >>> check_res = vtor.check('bool_list', (True, False))
-+    >>> check_res == [True, False]
-+    1
-+    >>> check_res = vtor.check('bool_list', [True, False])
-+    >>> check_res == [True, False]
-+    1
-+    >>> vtor.check('bool_list', [True, 'a'])
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "a" is of the wrong type.
-+    """
-+    return [is_boolean(mem) for mem in is_list(value, min, max)]
-+
-+
-+def is_float_list(value, min=None, max=None):
-+    """
-+    Check that the value is a list of floats.
-+    
-+    You can optionally specify the minimum and maximum number of members.
-+    
-+    Each list member is checked that it is a float.
-+    
-+    >>> vtor.check('float_list', ())
-+    []
-+    >>> vtor.check('float_list', [])
-+    []
-+    >>> vtor.check('float_list', (1, 2.0))
-+    [1.0, 2.0]
-+    >>> vtor.check('float_list', [1, 2.0])
-+    [1.0, 2.0]
-+    >>> vtor.check('float_list', [1, 'a'])
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "a" is of the wrong type.
-+    """
-+    return [is_float(mem) for mem in is_list(value, min, max)]
-+
-+
-+def is_string_list(value, min=None, max=None):
-+    """
-+    Check that the value is a list of strings.
-+    
-+    You can optionally specify the minimum and maximum number of members.
-+    
-+    Each list member is checked that it is a string.
-+    
-+    >>> vtor.check('string_list', ())
-+    []
-+    >>> vtor.check('string_list', [])
-+    []
-+    >>> vtor.check('string_list', ('a', 'b'))
-+    ['a', 'b']
-+    >>> vtor.check('string_list', ['a', 1])
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "1" is of the wrong type.
-+    >>> vtor.check('string_list', 'hello')
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "hello" is of the wrong type.
-+    """
-+    if isinstance(value, StringTypes):
-+        raise VdtTypeError(value)
-+    return [is_string(mem) for mem in is_list(value, min, max)]
-+
-+
-+def is_ip_addr_list(value, min=None, max=None):
-+    """
-+    Check that the value is a list of IP addresses.
-+    
-+    You can optionally specify the minimum and maximum number of members.
-+    
-+    Each list member is checked that it is an IP address.
-+    
-+    >>> vtor.check('ip_addr_list', ())
-+    []
-+    >>> vtor.check('ip_addr_list', [])
-+    []
-+    >>> vtor.check('ip_addr_list', ('1.2.3.4', '5.6.7.8'))
-+    ['1.2.3.4', '5.6.7.8']
-+    >>> vtor.check('ip_addr_list', ['a'])
-+    Traceback (most recent call last):
-+    VdtValueError: the value "a" is unacceptable.
-+    """
-+    return [is_ip_addr(mem) for mem in is_list(value, min, max)]
-+
-+
-+fun_dict = {
-+    'integer': is_integer,
-+    'float': is_float,
-+    'ip_addr': is_ip_addr,
-+    'string': is_string,
-+    'boolean': is_boolean,
-+}
-+
-+
-+def is_mixed_list(value, *args):
-+    """
-+    Check that the value is a list.
-+    Allow specifying the type of each member.
-+    Work on lists of specific lengths.
-+    
-+    You specify each member as a positional argument specifying type
-+    
-+    Each type should be one of the following strings :
-+      'integer', 'float', 'ip_addr', 'string', 'boolean'
-+    
-+    So you can specify a list of two strings, followed by
-+    two integers as :
-+    
-+      mixed_list('string', 'string', 'integer', 'integer')
-+    
-+    The length of the list must match the number of positional
-+    arguments you supply.
-+    
-+    >>> mix_str = "mixed_list('integer', 'float', 'ip_addr', 'string', 'boolean')"
-+    >>> check_res = vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', True))
-+    >>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
-+    1
-+    >>> check_res = vtor.check(mix_str, ('1', '2.0', '1.2.3.4', 'a', 'True'))
-+    >>> check_res == [1, 2.0, '1.2.3.4', 'a', True]
-+    1
-+    >>> vtor.check(mix_str, ('b', 2.0, '1.2.3.4', 'a', True))
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "b" is of the wrong type.
-+    >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a'))
-+    Traceback (most recent call last):
-+    VdtValueTooShortError: the value "(1, 2.0, '1.2.3.4', 'a')" is too short.
-+    >>> vtor.check(mix_str, (1, 2.0, '1.2.3.4', 'a', 1, 'b'))
-+    Traceback (most recent call last):
-+    VdtValueTooLongError: the value "(1, 2.0, '1.2.3.4', 'a', 1, 'b')" is too long.
-+    >>> vtor.check(mix_str, 0)
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "0" is of the wrong type.
-+    
-+    This test requires an elaborate setup, because of a change in error string
-+    output from the interpreter between Python 2.2 and 2.3 .
-+    
-+    >>> res_seq = (
-+    ...     'passed an incorrect value "',
-+    ...     'yoda',
-+    ...     '" for parameter "mixed_list".',
-+    ... )
-+    >>> if INTP_VER == (2, 2):
-+    ...     res_str = "".join(res_seq)
-+    ... else:
-+    ...     res_str = "'".join(res_seq)
-+    >>> try:
-+    ...     vtor.check('mixed_list("yoda")', ('a'))
-+    ... except VdtParamError, err:
-+    ...     str(err) == res_str
-+    1
-+    """
-+    try:
-+        length = len(value)
-+    except TypeError:
-+        raise VdtTypeError(value)
-+    if length < len(args):
-+        raise VdtValueTooShortError(value)
-+    elif length > len(args):
-+        raise VdtValueTooLongError(value)
-+    try:
-+        return [fun_dict[arg](val) for arg, val in zip(args, value)]
-+    except KeyError, e:
-+        raise VdtParamError('mixed_list', e)
-+
-+
-+def is_option(value, *options):
-+    """
-+    This check matches the value to any of a set of options.
-+    
-+    >>> vtor.check('option("yoda", "jedi")', 'yoda')
-+    'yoda'
-+    >>> vtor.check('option("yoda", "jedi")', 'jed')
-+    Traceback (most recent call last):
-+    VdtValueError: the value "jed" is unacceptable.
-+    >>> vtor.check('option("yoda", "jedi")', 0)
-+    Traceback (most recent call last):
-+    VdtTypeError: the value "0" is of the wrong type.
-+    """
-+    if not isinstance(value, StringTypes):
-+        raise VdtTypeError(value)
-+    if not value in options:
-+        raise VdtValueError(value)
-+    return value
-+
-+
-+def _test(value, *args, **keywargs):
-+    """
-+    A function that exists for test purposes.
-+    
-+    >>> checks = [
-+    ...     '3, 6, min=1, max=3, test=list(a, b, c)',
-+    ...     '3',
-+    ...     '3, 6',
-+    ...     '3,',
-+    ...     'min=1, test="a b c"',
-+    ...     'min=5, test="a, b, c"',
-+    ...     'min=1, max=3, test="a, b, c"',
-+    ...     'min=-100, test=-99',
-+    ...     'min=1, max=3',
-+    ...     '3, 6, test="36"',
-+    ...     '3, 6, test="a, b, c"',
-+    ...     '3, max=3, test=list("a", "b", "c")',
-+    ...     '''3, max=3, test=list("'a'", 'b', "x=(c)")''',
-+    ...     "test='x=fish(3)'",
-+    ...    ]
-+    >>> v = Validator({'test': _test})
-+    >>> for entry in checks:
-+    ...     print v.check(('test(%s)' % entry), 3)
-+    (3, ('3', '6'), {'test': ['a', 'b', 'c'], 'max': '3', 'min': '1'})
-+    (3, ('3',), {})
-+    (3, ('3', '6'), {})
-+    (3, ('3',), {})
-+    (3, (), {'test': 'a b c', 'min': '1'})
-+    (3, (), {'test': 'a, b, c', 'min': '5'})
-+    (3, (), {'test': 'a, b, c', 'max': '3', 'min': '1'})
-+    (3, (), {'test': '-99', 'min': '-100'})
-+    (3, (), {'max': '3', 'min': '1'})
-+    (3, ('3', '6'), {'test': '36'})
-+    (3, ('3', '6'), {'test': 'a, b, c'})
-+    (3, ('3',), {'test': ['a', 'b', 'c'], 'max': '3'})
-+    (3, ('3',), {'test': ["'a'", 'b', 'x=(c)'], 'max': '3'})
-+    (3, (), {'test': 'x=fish(3)'})
-+    
-+    >>> v = Validator()
-+    >>> v.check('integer(default=6)', '3')
-+    3
-+    >>> v.check('integer(default=6)', None, True)
-+    6
-+    >>> v.get_default_value('integer(default=6)')
-+    6
-+    >>> v.get_default_value('float(default=6)')
-+    6.0
-+    >>> v.get_default_value('pass(default=None)')
-+    >>> v.get_default_value("string(default='None')")
-+    'None'
-+    >>> v.get_default_value('pass')
-+    Traceback (most recent call last):
-+    KeyError: 'Check "pass" has no default value.'
-+    >>> v.get_default_value('pass(default=list(1, 2, 3, 4))')
-+    ['1', '2', '3', '4']
-+    
-+    >>> v = Validator()
-+    >>> v.check("pass(default=None)", None, True)
-+    >>> v.check("pass(default='None')", None, True)
-+    'None'
-+    >>> v.check('pass(default="None")', None, True)
-+    'None'
-+    >>> v.check('pass(default=list(1, 2, 3, 4))', None, True)
-+    ['1', '2', '3', '4']
-+    
-+    Bug test for unicode arguments
-+    >>> v = Validator()
-+    >>> v.check(u'string(min=4)', u'test')
-+    u'test'
-+    
-+    >>> v = Validator()
-+    >>> v.get_default_value(u'string(min=4, default="1234")')
-+    u'1234'
-+    >>> v.check(u'string(min=4, default="1234")', u'test')
-+    u'test'
-+    
-+    >>> v = Validator()
-+    >>> default = v.get_default_value('string(default=None)')
-+    >>> default == None
-+    1
-+    """
-+    return (value, args, keywargs)
-+
-+
-+if __name__ == '__main__':
-+    # run the code tests in doctest format
-+    import doctest
-+    m = sys.modules.get('__main__')
-+    globs = m.__dict__.copy()
-+    globs.update({
-+        'INTP_VER': INTP_VER,
-+        'vtor': Validator(),
-+    })
-+    doctest.testmod(m, globs=globs)
 Index: ipython-0.10/setupbase.py
 ===================================================================
 --- ipython-0.10.orig/setupbase.py
diff --git a/ipython.spec b/ipython.spec
index a42ec5f..dadde6b 100644
--- a/ipython.spec
+++ b/ipython.spec
@@ -3,8 +3,8 @@
 %endif
 
 Name:           ipython
-Version:        0.10
-Release:        8%{?dist}
+Version:        0.10.1
+Release:        1%{?dist}
 Summary:        An enhanced interactive Python shell
 
 Group:          Development/Libraries
@@ -89,7 +89,29 @@ This package contains the gui of %{name}, which requires wxPython.
 
 %prep
 %setup -q
+
 %patch0 -p1
+# help with unbundling (don't use diffs to move files around)
+pushd IPython/external
+mkdir argparse
+mv argparse.py argparse/_argparse.py
+mkdir configobj
+mv configobj.py configobj/_configobj.py
+mkdir guid
+mv guid.py guid/_guid.py
+mkdir Itpl
+mv Itpl.py Itpl/_Itpl.py
+mkdir mglob
+mv mglob.py mglob/_mglob.py
+mkdir path
+mv path.py path/_path.py
+mkdir pretty
+mv pretty.py pretty/_pretty.py
+mkdir simplegeneric
+mv simplegeneric.py simplegeneric/_simplegeneric.py
+mkdir validate
+mv validate.py validate/_validate.py
+popd
 %patch1 -p1
 %patch2 -p1
 
@@ -207,6 +229,10 @@ rm -rf %{buildroot}
 
 
 %changelog
+* Wed Oct 13 2010 Thomas Spura <tomspur at fedoraproject.org> - 0.10.1-1
+- unbundle a bit differently
+- update to new version
+
 * Tue Aug 31 2010 Thomas Spura <tomspur at fedoraproject.org> - 0.10-8
 - pycolor: wrong filename -> no crash (#628742)
 
diff --git a/sources b/sources
index 90ba8b5..b44f71b 100644
--- a/sources
+++ b/sources
@@ -1 +1 @@
-dd10cd1b622c16c1afca2239fcc0dfdf  ipython-0.10.tar.gz
+54ae47079b0e9a0998593a99ce76ec1f  ipython-0.10.1.tar.gz


More information about the scm-commits mailing list