parent
4e7e3a39d2
commit
645952c61a
@ -1,797 +0,0 @@
|
|||||||
"""Configuration file parser.
|
|
||||||
|
|
||||||
A setup file consists of sections, lead by a "[section]" header,
|
|
||||||
and followed by "name: value" entries, with continuations and such in
|
|
||||||
the style of RFC 822.
|
|
||||||
|
|
||||||
The option values can contain format strings which refer to other values in
|
|
||||||
the same section, or values in a special [DEFAULT] section.
|
|
||||||
|
|
||||||
For example:
|
|
||||||
|
|
||||||
something: %(dir)s/whatever
|
|
||||||
|
|
||||||
would resolve the "%(dir)s" to the value of dir. All reference
|
|
||||||
expansions are done late, on demand.
|
|
||||||
|
|
||||||
Intrinsic defaults can be specified by passing them into the
|
|
||||||
ConfigParser constructor as a dictionary.
|
|
||||||
|
|
||||||
class:
|
|
||||||
|
|
||||||
ConfigParser -- responsible for parsing a list of
|
|
||||||
configuration files, and managing the parsed database.
|
|
||||||
|
|
||||||
methods:
|
|
||||||
|
|
||||||
__init__(defaults=None)
|
|
||||||
create the parser and specify a dictionary of intrinsic defaults. The
|
|
||||||
keys must be strings, the values must be appropriate for %()s string
|
|
||||||
interpolation. Note that `__name__' is always an intrinsic default;
|
|
||||||
its value is the section's name.
|
|
||||||
|
|
||||||
sections()
|
|
||||||
return all the configuration section names, sans DEFAULT
|
|
||||||
|
|
||||||
has_section(section)
|
|
||||||
return whether the given section exists
|
|
||||||
|
|
||||||
has_option(section, option)
|
|
||||||
return whether the given option exists in the given section
|
|
||||||
|
|
||||||
options(section)
|
|
||||||
return list of configuration options for the named section
|
|
||||||
|
|
||||||
read(filenames)
|
|
||||||
read and parse the list of named configuration files, given by
|
|
||||||
name. A single filename is also allowed. Non-existing files
|
|
||||||
are ignored. Return list of successfully read files.
|
|
||||||
|
|
||||||
readfp(fp, filename=None)
|
|
||||||
read and parse one configuration file, given as a file object.
|
|
||||||
The filename defaults to fp.name; it is only used in error
|
|
||||||
messages (if fp has no `name' attribute, the string `<???>' is used).
|
|
||||||
|
|
||||||
get(section, option, raw=False, vars=None)
|
|
||||||
return a string value for the named option. All % interpolations are
|
|
||||||
expanded in the return values, based on the defaults passed into the
|
|
||||||
constructor and the DEFAULT section. Additional substitutions may be
|
|
||||||
provided using the `vars' argument, which must be a dictionary whose
|
|
||||||
contents override any pre-existing defaults.
|
|
||||||
|
|
||||||
getint(section, options)
|
|
||||||
like get(), but convert value to an integer
|
|
||||||
|
|
||||||
getfloat(section, options)
|
|
||||||
like get(), but convert value to a float
|
|
||||||
|
|
||||||
getboolean(section, options)
|
|
||||||
like get(), but convert value to a boolean (currently case
|
|
||||||
insensitively defined as 0, false, no, off for False, and 1, true,
|
|
||||||
yes, on for True). Returns False or True.
|
|
||||||
|
|
||||||
items(section, raw=False, vars=None)
|
|
||||||
return a list of tuples with (name, value) for each option
|
|
||||||
in the section.
|
|
||||||
|
|
||||||
remove_section(section)
|
|
||||||
remove the given file section and all its options
|
|
||||||
|
|
||||||
remove_option(section, option)
|
|
||||||
remove the given option from the given section
|
|
||||||
|
|
||||||
set(section, option, value)
|
|
||||||
set the given option
|
|
||||||
|
|
||||||
write(fp)
|
|
||||||
write the configuration state in .ini format
|
|
||||||
"""
|
|
||||||
|
|
||||||
try:
|
|
||||||
from collections import OrderedDict as _default_dict
|
|
||||||
except ImportError:
|
|
||||||
# fallback for setup.py which hasn't yet built _collections
|
|
||||||
_default_dict = dict
|
|
||||||
|
|
||||||
import re
|
|
||||||
|
|
||||||
__all__ = ["NoSectionError", "DuplicateSectionError", "NoOptionError",
|
|
||||||
"InterpolationError", "InterpolationDepthError",
|
|
||||||
"InterpolationSyntaxError", "ParsingError",
|
|
||||||
"MissingSectionHeaderError",
|
|
||||||
"ConfigParser", "SafeConfigParser", "RawConfigParser",
|
|
||||||
"DEFAULTSECT", "MAX_INTERPOLATION_DEPTH"]
|
|
||||||
|
|
||||||
DEFAULTSECT = "DEFAULT"
|
|
||||||
|
|
||||||
MAX_INTERPOLATION_DEPTH = 10
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
# exception classes
|
|
||||||
class Error(Exception):
|
|
||||||
"""Base class for ConfigParser exceptions."""
|
|
||||||
|
|
||||||
def _get_message(self):
|
|
||||||
"""Getter for 'message'; needed only to override deprecation in
|
|
||||||
BaseException."""
|
|
||||||
return self.__message
|
|
||||||
|
|
||||||
def _set_message(self, value):
|
|
||||||
"""Setter for 'message'; needed only to override deprecation in
|
|
||||||
BaseException."""
|
|
||||||
self.__message = value
|
|
||||||
|
|
||||||
# BaseException.message has been deprecated since Python 2.6. To prevent
|
|
||||||
# DeprecationWarning from popping up over this pre-existing attribute, use
|
|
||||||
# a new property that takes lookup precedence.
|
|
||||||
message = property(_get_message, _set_message)
|
|
||||||
|
|
||||||
def __init__(self, msg=''):
|
|
||||||
self.message = msg
|
|
||||||
Exception.__init__(self, msg)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
return self.message
|
|
||||||
|
|
||||||
__str__ = __repr__
|
|
||||||
|
|
||||||
class NoSectionError(Error):
|
|
||||||
"""Raised when no section matches a requested option."""
|
|
||||||
|
|
||||||
def __init__(self, section):
|
|
||||||
Error.__init__(self, 'No section: %r' % (section,))
|
|
||||||
self.section = section
|
|
||||||
self.args = (section, )
|
|
||||||
|
|
||||||
class DuplicateSectionError(Error):
|
|
||||||
"""Raised when a section is multiply-created."""
|
|
||||||
|
|
||||||
def __init__(self, section):
|
|
||||||
Error.__init__(self, "Section %r already exists" % section)
|
|
||||||
self.section = section
|
|
||||||
self.args = (section, )
|
|
||||||
|
|
||||||
class NoOptionError(Error):
|
|
||||||
"""A requested option was not found."""
|
|
||||||
|
|
||||||
def __init__(self, option, section):
|
|
||||||
Error.__init__(self, "No option %r in section: %r" %
|
|
||||||
(option, section))
|
|
||||||
self.option = option
|
|
||||||
self.section = section
|
|
||||||
self.args = (option, section)
|
|
||||||
|
|
||||||
class InterpolationError(Error):
|
|
||||||
"""Base class for interpolation-related exceptions."""
|
|
||||||
|
|
||||||
def __init__(self, option, section, msg):
|
|
||||||
Error.__init__(self, msg)
|
|
||||||
self.option = option
|
|
||||||
self.section = section
|
|
||||||
self.args = (option, section, msg)
|
|
||||||
|
|
||||||
class InterpolationMissingOptionError(InterpolationError):
|
|
||||||
"""A string substitution required a setting which was not available."""
|
|
||||||
|
|
||||||
def __init__(self, option, section, rawval, reference):
|
|
||||||
msg = ("Bad value substitution:\n"
|
|
||||||
"\tsection: [%s]\n"
|
|
||||||
"\toption : %s\n"
|
|
||||||
"\tkey : %s\n"
|
|
||||||
"\trawval : %s\n"
|
|
||||||
% (section, option, reference, rawval))
|
|
||||||
InterpolationError.__init__(self, option, section, msg)
|
|
||||||
self.reference = reference
|
|
||||||
self.args = (option, section, rawval, reference)
|
|
||||||
|
|
||||||
class InterpolationSyntaxError(InterpolationError):
|
|
||||||
"""Raised when the source text into which substitutions are made
|
|
||||||
does not conform to the required syntax."""
|
|
||||||
|
|
||||||
class InterpolationDepthError(InterpolationError):
|
|
||||||
"""Raised when substitutions are nested too deeply."""
|
|
||||||
|
|
||||||
def __init__(self, option, section, rawval):
|
|
||||||
msg = ("Value interpolation too deeply recursive:\n"
|
|
||||||
"\tsection: [%s]\n"
|
|
||||||
"\toption : %s\n"
|
|
||||||
"\trawval : %s\n"
|
|
||||||
% (section, option, rawval))
|
|
||||||
InterpolationError.__init__(self, option, section, msg)
|
|
||||||
self.args = (option, section, rawval)
|
|
||||||
|
|
||||||
class ParsingError(Error):
|
|
||||||
"""Raised when a configuration file does not follow legal syntax."""
|
|
||||||
|
|
||||||
def __init__(self, filename):
|
|
||||||
Error.__init__(self, 'File contains parsing errors: %s' % filename)
|
|
||||||
self.filename = filename
|
|
||||||
self.errors = []
|
|
||||||
self.args = (filename, )
|
|
||||||
|
|
||||||
def append(self, lineno, line):
|
|
||||||
self.errors.append((lineno, line))
|
|
||||||
self.message += '\n\t[line %2d]: %s' % (lineno, line)
|
|
||||||
|
|
||||||
class MissingSectionHeaderError(ParsingError):
|
|
||||||
"""Raised when a key-value pair is found before any section header."""
|
|
||||||
|
|
||||||
def __init__(self, filename, lineno, line):
|
|
||||||
Error.__init__(
|
|
||||||
self,
|
|
||||||
'File contains no section headers.\nfile: %s, line: %d\n%r' %
|
|
||||||
(filename, lineno, line))
|
|
||||||
self.filename = filename
|
|
||||||
self.lineno = lineno
|
|
||||||
self.line = line
|
|
||||||
self.args = (filename, lineno, line)
|
|
||||||
|
|
||||||
|
|
||||||
class RawConfigParser:
|
|
||||||
def __init__(self, defaults=None, dict_type=_default_dict,
|
|
||||||
allow_no_value=False):
|
|
||||||
self._dict = dict_type
|
|
||||||
self._sections = self._dict()
|
|
||||||
self._defaults = self._dict()
|
|
||||||
if allow_no_value:
|
|
||||||
self._optcre = self.OPTCRE_NV
|
|
||||||
else:
|
|
||||||
self._optcre = self.OPTCRE
|
|
||||||
if defaults:
|
|
||||||
for key, value in defaults.items():
|
|
||||||
self._defaults[self.optionxform(key)] = value
|
|
||||||
self.comment_store = None ## used for storing comments in ini
|
|
||||||
|
|
||||||
|
|
||||||
def defaults(self):
|
|
||||||
return self._defaults
|
|
||||||
|
|
||||||
def sections(self):
|
|
||||||
"""Return a list of section names, excluding [DEFAULT]"""
|
|
||||||
# self._sections will never have [DEFAULT] in it
|
|
||||||
return self._sections.keys()
|
|
||||||
|
|
||||||
def add_section(self, section):
|
|
||||||
"""Create a new section in the configuration.
|
|
||||||
|
|
||||||
Raise DuplicateSectionError if a section by the specified name
|
|
||||||
already exists. Raise ValueError if name is DEFAULT or any of it's
|
|
||||||
case-insensitive variants.
|
|
||||||
"""
|
|
||||||
if section.lower() == "default":
|
|
||||||
raise ValueError, 'Invalid section name: %s' % section
|
|
||||||
|
|
||||||
if section in self._sections:
|
|
||||||
raise DuplicateSectionError(section)
|
|
||||||
self._sections[section] = self._dict()
|
|
||||||
|
|
||||||
def has_section(self, section):
|
|
||||||
"""Indicate whether the named section is present in the configuration.
|
|
||||||
|
|
||||||
The DEFAULT section is not acknowledged.
|
|
||||||
"""
|
|
||||||
return section in self._sections
|
|
||||||
|
|
||||||
def options(self, section):
|
|
||||||
"""Return a list of option names for the given section name."""
|
|
||||||
try:
|
|
||||||
opts = self._sections[section].copy()
|
|
||||||
except KeyError:
|
|
||||||
raise NoSectionError(section)
|
|
||||||
opts.update(self._defaults)
|
|
||||||
if '__name__' in opts:
|
|
||||||
del opts['__name__']
|
|
||||||
return opts.keys()
|
|
||||||
|
|
||||||
def read(self, filenames):
|
|
||||||
"""Read and parse a filename or a list of filenames.
|
|
||||||
|
|
||||||
Files that cannot be opened are silently ignored; this is
|
|
||||||
designed so that you can specify a list of potential
|
|
||||||
configuration file locations (e.g. current directory, user's
|
|
||||||
home directory, systemwide directory), and all existing
|
|
||||||
configuration files in the list will be read. A single
|
|
||||||
filename may also be given.
|
|
||||||
|
|
||||||
Return list of successfully read files.
|
|
||||||
"""
|
|
||||||
if isinstance(filenames, basestring):
|
|
||||||
filenames = [filenames]
|
|
||||||
read_ok = []
|
|
||||||
for filename in filenames:
|
|
||||||
try:
|
|
||||||
fp = open(filename)
|
|
||||||
except IOError:
|
|
||||||
continue
|
|
||||||
self._read(fp, filename)
|
|
||||||
fp.close()
|
|
||||||
read_ok.append(filename)
|
|
||||||
return read_ok
|
|
||||||
|
|
||||||
def readfp(self, fp, filename=None):
|
|
||||||
"""Like read() but the argument must be a file-like object.
|
|
||||||
|
|
||||||
The `fp' argument must have a `readline' method. Optional
|
|
||||||
second argument is the `filename', which if not given, is
|
|
||||||
taken from fp.name. If fp has no `name' attribute, `<???>' is
|
|
||||||
used.
|
|
||||||
|
|
||||||
"""
|
|
||||||
if filename is None:
|
|
||||||
try:
|
|
||||||
filename = fp.name
|
|
||||||
except AttributeError:
|
|
||||||
filename = '<???>'
|
|
||||||
self._read(fp, filename)
|
|
||||||
|
|
||||||
def get(self, section, option):
|
|
||||||
opt = self.optionxform(option)
|
|
||||||
if section not in self._sections:
|
|
||||||
if section != DEFAULTSECT:
|
|
||||||
raise NoSectionError(section)
|
|
||||||
if opt in self._defaults:
|
|
||||||
return self._defaults[opt]
|
|
||||||
else:
|
|
||||||
raise NoOptionError(option, section)
|
|
||||||
elif opt in self._sections[section]:
|
|
||||||
return self._sections[section][opt]
|
|
||||||
elif opt in self._defaults:
|
|
||||||
return self._defaults[opt]
|
|
||||||
else:
|
|
||||||
raise NoOptionError(option, section)
|
|
||||||
|
|
||||||
def items(self, section):
|
|
||||||
try:
|
|
||||||
d2 = self._sections[section]
|
|
||||||
except KeyError:
|
|
||||||
if section != DEFAULTSECT:
|
|
||||||
raise NoSectionError(section)
|
|
||||||
d2 = self._dict()
|
|
||||||
d = self._defaults.copy()
|
|
||||||
d.update(d2)
|
|
||||||
if "__name__" in d:
|
|
||||||
del d["__name__"]
|
|
||||||
return d.items()
|
|
||||||
|
|
||||||
def _get(self, section, conv, option):
|
|
||||||
return conv(self.get(section, option))
|
|
||||||
|
|
||||||
def getint(self, section, option):
|
|
||||||
return self._get(section, int, option)
|
|
||||||
|
|
||||||
def getfloat(self, section, option):
|
|
||||||
return self._get(section, float, option)
|
|
||||||
|
|
||||||
_boolean_states = {'1': True, 'yes': True, 'true': True, 'on': True,
|
|
||||||
'0': False, 'no': False, 'false': False, 'off': False}
|
|
||||||
|
|
||||||
def getboolean(self, section, option):
|
|
||||||
v = self.get(section, option)
|
|
||||||
if v.lower() not in self._boolean_states:
|
|
||||||
raise ValueError, 'Not a boolean: %s' % v
|
|
||||||
return self._boolean_states[v.lower()]
|
|
||||||
|
|
||||||
def optionxform(self, optionstr):
|
|
||||||
return optionstr.lower()
|
|
||||||
|
|
||||||
def has_option(self, section, option):
|
|
||||||
"""Check for the existence of a given option in a given section."""
|
|
||||||
if not section or section == DEFAULTSECT:
|
|
||||||
option = self.optionxform(option)
|
|
||||||
return option in self._defaults
|
|
||||||
elif section not in self._sections:
|
|
||||||
return False
|
|
||||||
else:
|
|
||||||
option = self.optionxform(option)
|
|
||||||
return (option in self._sections[section]
|
|
||||||
or option in self._defaults)
|
|
||||||
|
|
||||||
def set(self, section, option, value=None):
|
|
||||||
"""Set an option."""
|
|
||||||
if not section or section == DEFAULTSECT:
|
|
||||||
sectdict = self._defaults
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
sectdict = self._sections[section]
|
|
||||||
except KeyError:
|
|
||||||
raise NoSectionError(section)
|
|
||||||
sectdict[self.optionxform(option)] = value
|
|
||||||
|
|
||||||
def write(self, fp):
|
|
||||||
"""Write an .ini-format representation of the configuration state."""
|
|
||||||
if self._defaults:
|
|
||||||
fp.write("[%s]\n" % DEFAULTSECT)
|
|
||||||
for (key, value) in self._defaults.items():
|
|
||||||
fp.write("%s = %s\n" % (key, str(value).replace('\n', '\n\t')))
|
|
||||||
fp.write("\n")
|
|
||||||
for section in self._sections:
|
|
||||||
fp.write("[%s]\n" % section)
|
|
||||||
for (key, value) in self._sections[section].items():
|
|
||||||
if key == "__name__":
|
|
||||||
continue
|
|
||||||
if (value is not None) or (self._optcre == self.OPTCRE):
|
|
||||||
key = " = ".join((key, str(value).replace('\n', '\n\t')))
|
|
||||||
fp.write("%s\n" % (key))
|
|
||||||
fp.write("\n")
|
|
||||||
|
|
||||||
def remove_option(self, section, option):
|
|
||||||
"""Remove an option."""
|
|
||||||
if not section or section == DEFAULTSECT:
|
|
||||||
sectdict = self._defaults
|
|
||||||
else:
|
|
||||||
try:
|
|
||||||
sectdict = self._sections[section]
|
|
||||||
except KeyError:
|
|
||||||
raise NoSectionError(section)
|
|
||||||
option = self.optionxform(option)
|
|
||||||
existed = option in sectdict
|
|
||||||
if existed:
|
|
||||||
del sectdict[option]
|
|
||||||
return existed
|
|
||||||
|
|
||||||
def remove_section(self, section):
|
|
||||||
"""Remove a file section."""
|
|
||||||
existed = section in self._sections
|
|
||||||
if existed:
|
|
||||||
del self._sections[section]
|
|
||||||
return existed
|
|
||||||
|
|
||||||
#
|
|
||||||
# Regular expressions for parsing section headers and options.
|
|
||||||
#
|
|
||||||
SECTCRE = re.compile(
|
|
||||||
r'\[' # [
|
|
||||||
r'(?P<header>[^]]+)' # very permissive!
|
|
||||||
r'\]' # ]
|
|
||||||
)
|
|
||||||
OPTCRE = re.compile(
|
|
||||||
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
|
|
||||||
r'\s*(?P<vi>[:=])\s*' # any number of space/tab,
|
|
||||||
# followed by separator
|
|
||||||
# (either : or =), followed
|
|
||||||
# by any # space/tab
|
|
||||||
r'(?P<value>.*)$' # everything up to eol
|
|
||||||
)
|
|
||||||
OPTCRE_NV = re.compile(
|
|
||||||
r'(?P<option>[^:=\s][^:=]*)' # very permissive!
|
|
||||||
r'\s*(?:' # any number of space/tab,
|
|
||||||
r'(?P<vi>[:=])\s*' # optionally followed by
|
|
||||||
# separator (either : or
|
|
||||||
# =), followed by any #
|
|
||||||
# space/tab
|
|
||||||
r'(?P<value>.*))?$' # everything up to eol
|
|
||||||
)
|
|
||||||
|
|
||||||
def _read(self, fp, fpname):
|
|
||||||
"""Parse a sectioned setup file.
|
|
||||||
|
|
||||||
The sections in setup file contains a title line at the top,
|
|
||||||
indicated by a name in square brackets (`[]'), plus key/value
|
|
||||||
options lines, indicated by `name: value' format lines.
|
|
||||||
Continuations are represented by an embedded newline then
|
|
||||||
leading whitespace. Blank lines, lines beginning with a '#',
|
|
||||||
and just about everything else are ignored.
|
|
||||||
"""
|
|
||||||
|
|
||||||
comment_store = {}
|
|
||||||
cursect = None # None, or a dictionary
|
|
||||||
optname = None
|
|
||||||
lineno = 0
|
|
||||||
e = None # None, or an exception
|
|
||||||
while True:
|
|
||||||
line = fp.readline()
|
|
||||||
if not line:
|
|
||||||
break
|
|
||||||
lineno = lineno + 1
|
|
||||||
# comment or blank line?
|
|
||||||
if line.strip() == '' :
|
|
||||||
continue
|
|
||||||
### store comments for doc purposes
|
|
||||||
### Deal with cases of sections and options being there or not
|
|
||||||
if line[0] in '#;' and cursect is not None:
|
|
||||||
if optname is None:
|
|
||||||
comment_store.setdefault(cursect['__name__'] +
|
|
||||||
"::" + "global",[]).append(line)
|
|
||||||
else:
|
|
||||||
comment_store.setdefault(cursect['__name__'] +
|
|
||||||
"::" + optname,[]).append(line)
|
|
||||||
continue
|
|
||||||
elif line[0] in '#;' and cursect is None:
|
|
||||||
comment_store.setdefault("global" +
|
|
||||||
"::" + optname,[]).append(line)
|
|
||||||
continue
|
|
||||||
|
|
||||||
if line.split(None, 1)[0].lower() == 'rem' and line[0] in "rR":
|
|
||||||
# no leading whitespace
|
|
||||||
continue
|
|
||||||
# continuation line?
|
|
||||||
if line[0].isspace() and cursect is not None and optname:
|
|
||||||
value = line.strip()
|
|
||||||
if value:
|
|
||||||
cursect[optname].append(value)
|
|
||||||
# a section header or option header?
|
|
||||||
else:
|
|
||||||
# is it a section header?
|
|
||||||
mo = self.SECTCRE.match(line)
|
|
||||||
if mo:
|
|
||||||
sectname = mo.group('header')
|
|
||||||
if sectname in self._sections:
|
|
||||||
cursect = self._sections[sectname]
|
|
||||||
elif sectname == DEFAULTSECT:
|
|
||||||
cursect = self._defaults
|
|
||||||
else:
|
|
||||||
cursect = self._dict()
|
|
||||||
cursect['__name__'] = sectname
|
|
||||||
self._sections[sectname] = cursect
|
|
||||||
# So sections can't start with a continuation line
|
|
||||||
optname = None
|
|
||||||
# no section header in the file?
|
|
||||||
elif cursect is None:
|
|
||||||
raise MissingSectionHeaderError(fpname, lineno, line)
|
|
||||||
# an option line?
|
|
||||||
else:
|
|
||||||
mo = self._optcre.match(line)
|
|
||||||
if mo:
|
|
||||||
optname, vi, optval = mo.group('option', 'vi', 'value')
|
|
||||||
optname = self.optionxform(optname.rstrip())
|
|
||||||
# This check is fine because the OPTCRE cannot
|
|
||||||
# match if it would set optval to None
|
|
||||||
if optval is not None:
|
|
||||||
if vi in ('=', ':') and ';' in optval:
|
|
||||||
# ';' is a comment delimiter only if it follows
|
|
||||||
# a spacing character
|
|
||||||
pos = optval.find(';')
|
|
||||||
if pos != -1 and optval[pos-1].isspace():
|
|
||||||
optval = optval[:pos]
|
|
||||||
optval = optval.strip()
|
|
||||||
# allow empty values
|
|
||||||
if optval == '""':
|
|
||||||
optval = ''
|
|
||||||
cursect[optname] = [optval]
|
|
||||||
else:
|
|
||||||
# valueless option handling
|
|
||||||
cursect[optname] = optval
|
|
||||||
else:
|
|
||||||
# a non-fatal parsing error occurred. set up the
|
|
||||||
# exception but keep going. the exception will be
|
|
||||||
# raised at the end of the file and will contain a
|
|
||||||
# list of all bogus lines
|
|
||||||
if not e:
|
|
||||||
e = ParsingError(fpname)
|
|
||||||
e.append(lineno, repr(line))
|
|
||||||
# if any parsing errors occurred, raise an exception
|
|
||||||
if e:
|
|
||||||
raise e
|
|
||||||
|
|
||||||
# join the multi-line values collected while reading
|
|
||||||
all_sections = [self._defaults]
|
|
||||||
all_sections.extend(self._sections.values())
|
|
||||||
for options in all_sections:
|
|
||||||
for name, val in options.items():
|
|
||||||
if isinstance(val, list):
|
|
||||||
options[name] = '\n'.join(val)
|
|
||||||
self.comment_store = comment_store
|
|
||||||
|
|
||||||
def ini_as_rst(self):
|
|
||||||
"""trivial helper function to putput comment_stroe as rest
|
|
||||||
|
|
||||||
.. todo:: write actual doctests with string input
|
|
||||||
>> p = ConfigParser2.SafeConfigParser()
|
|
||||||
>> p.read(f)
|
|
||||||
['/usr/home/pbrian/src/public/configparser2/example.ini']
|
|
||||||
>> open("/tmp/foo.rst", "w").write(p.ini_as_rst())
|
|
||||||
|
|
||||||
"""
|
|
||||||
outstr = ".. rst version of ini file\n\n"
|
|
||||||
_cursectname = None
|
|
||||||
for item in sorted(self.comment_store.keys()):
|
|
||||||
_sect, _opt = item.split("::")
|
|
||||||
if _sect != _cursectname:
|
|
||||||
outstr += "\n%s\n%s\n" % (_sect, "-"* len(_sect))
|
|
||||||
_cursectname = _sect
|
|
||||||
txt = " ".join(self.comment_store[item])
|
|
||||||
txt = txt.replace("#", "").replace(";","")
|
|
||||||
outstr += ":%s: %s" % (_opt, txt)
|
|
||||||
return outstr
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
import UserDict as _UserDict
|
|
||||||
|
|
||||||
class _Chainmap(_UserDict.DictMixin):
|
|
||||||
"""Combine multiple mappings for successive lookups.
|
|
||||||
|
|
||||||
For example, to emulate Python's normal lookup sequence:
|
|
||||||
|
|
||||||
import __builtin__
|
|
||||||
pylookup = _Chainmap(locals(), globals(), vars(__builtin__))
|
|
||||||
"""
|
|
||||||
|
|
||||||
def __init__(self, *maps):
|
|
||||||
self._maps = maps
|
|
||||||
|
|
||||||
def __getitem__(self, key):
|
|
||||||
for mapping in self._maps:
|
|
||||||
try:
|
|
||||||
return mapping[key]
|
|
||||||
except KeyError:
|
|
||||||
pass
|
|
||||||
raise KeyError(key)
|
|
||||||
|
|
||||||
def keys(self):
|
|
||||||
result = []
|
|
||||||
seen = set()
|
|
||||||
for mapping in self._maps:
|
|
||||||
for key in mapping:
|
|
||||||
if key not in seen:
|
|
||||||
result.append(key)
|
|
||||||
seen.add(key)
|
|
||||||
return result
|
|
||||||
|
|
||||||
class ConfigParser(RawConfigParser):
|
|
||||||
|
|
||||||
def get(self, section, option, raw=False, vars=None):
|
|
||||||
"""Get an option value for a given section.
|
|
||||||
|
|
||||||
If `vars' is provided, it must be a dictionary. The option is looked up
|
|
||||||
in `vars' (if provided), `section', and in `defaults' in that order.
|
|
||||||
|
|
||||||
All % interpolations are expanded in the return values, unless the
|
|
||||||
optional argument `raw' is true. Values for interpolation keys are
|
|
||||||
looked up in the same manner as the option.
|
|
||||||
|
|
||||||
The section DEFAULT is special.
|
|
||||||
"""
|
|
||||||
sectiondict = {}
|
|
||||||
try:
|
|
||||||
sectiondict = self._sections[section]
|
|
||||||
except KeyError:
|
|
||||||
if section != DEFAULTSECT:
|
|
||||||
raise NoSectionError(section)
|
|
||||||
# Update with the entry specific variables
|
|
||||||
vardict = {}
|
|
||||||
if vars:
|
|
||||||
for key, value in vars.items():
|
|
||||||
vardict[self.optionxform(key)] = value
|
|
||||||
d = _Chainmap(vardict, sectiondict, self._defaults)
|
|
||||||
option = self.optionxform(option)
|
|
||||||
try:
|
|
||||||
value = d[option]
|
|
||||||
except KeyError:
|
|
||||||
raise NoOptionError(option, section)
|
|
||||||
|
|
||||||
if raw or value is None:
|
|
||||||
return value
|
|
||||||
else:
|
|
||||||
return self._interpolate(section, option, value, d)
|
|
||||||
|
|
||||||
def items(self, section, raw=False, vars=None):
|
|
||||||
"""Return a list of tuples with (name, value) for each option
|
|
||||||
in the section.
|
|
||||||
|
|
||||||
All % interpolations are expanded in the return values, based on the
|
|
||||||
defaults passed into the constructor, unless the optional argument
|
|
||||||
`raw' is true. Additional substitutions may be provided using the
|
|
||||||
`vars' argument, which must be a dictionary whose contents overrides
|
|
||||||
any pre-existing defaults.
|
|
||||||
|
|
||||||
The section DEFAULT is special.
|
|
||||||
"""
|
|
||||||
d = self._defaults.copy()
|
|
||||||
try:
|
|
||||||
d.update(self._sections[section])
|
|
||||||
except KeyError:
|
|
||||||
if section != DEFAULTSECT:
|
|
||||||
raise NoSectionError(section)
|
|
||||||
# Update with the entry specific variables
|
|
||||||
if vars:
|
|
||||||
for key, value in vars.items():
|
|
||||||
d[self.optionxform(key)] = value
|
|
||||||
options = d.keys()
|
|
||||||
if "__name__" in options:
|
|
||||||
options.remove("__name__")
|
|
||||||
if raw:
|
|
||||||
return [(option, d[option])
|
|
||||||
for option in options]
|
|
||||||
else:
|
|
||||||
return [(option, self._interpolate(section, option, d[option], d))
|
|
||||||
for option in options]
|
|
||||||
|
|
||||||
def _interpolate(self, section, option, rawval, vars):
|
|
||||||
# do the string interpolation
|
|
||||||
value = rawval
|
|
||||||
depth = MAX_INTERPOLATION_DEPTH
|
|
||||||
while depth: # Loop through this until it's done
|
|
||||||
depth -= 1
|
|
||||||
if value and "%(" in value:
|
|
||||||
value = self._KEYCRE.sub(self._interpolation_replace, value)
|
|
||||||
try:
|
|
||||||
value = value % vars
|
|
||||||
except KeyError, e:
|
|
||||||
raise InterpolationMissingOptionError(
|
|
||||||
option, section, rawval, e.args[0])
|
|
||||||
else:
|
|
||||||
break
|
|
||||||
if value and "%(" in value:
|
|
||||||
raise InterpolationDepthError(option, section, rawval)
|
|
||||||
return value
|
|
||||||
|
|
||||||
_KEYCRE = re.compile(r"%\(([^)]*)\)s|.")
|
|
||||||
|
|
||||||
def _interpolation_replace(self, match):
|
|
||||||
s = match.group(1)
|
|
||||||
if s is None:
|
|
||||||
return match.group()
|
|
||||||
else:
|
|
||||||
return "%%(%s)s" % self.optionxform(s)
|
|
||||||
|
|
||||||
|
|
||||||
class SafeConfigParser(ConfigParser):
|
|
||||||
|
|
||||||
def _interpolate(self, section, option, rawval, vars):
|
|
||||||
# do the string interpolation
|
|
||||||
L = []
|
|
||||||
self._interpolate_some(option, L, rawval, section, vars, 1)
|
|
||||||
return ''.join(L)
|
|
||||||
|
|
||||||
_interpvar_re = re.compile(r"%\(([^)]+)\)s")
|
|
||||||
|
|
||||||
def _interpolate_some(self, option, accum, rest, section, map, depth):
|
|
||||||
if depth > MAX_INTERPOLATION_DEPTH:
|
|
||||||
raise InterpolationDepthError(option, section, rest)
|
|
||||||
while rest:
|
|
||||||
p = rest.find("%")
|
|
||||||
if p < 0:
|
|
||||||
accum.append(rest)
|
|
||||||
return
|
|
||||||
if p > 0:
|
|
||||||
accum.append(rest[:p])
|
|
||||||
rest = rest[p:]
|
|
||||||
# p is no longer used
|
|
||||||
c = rest[1:2]
|
|
||||||
if c == "%":
|
|
||||||
accum.append("%")
|
|
||||||
rest = rest[2:]
|
|
||||||
elif c == "(":
|
|
||||||
m = self._interpvar_re.match(rest)
|
|
||||||
if m is None:
|
|
||||||
raise InterpolationSyntaxError(option, section,
|
|
||||||
"bad interpolation variable reference %r" % rest)
|
|
||||||
var = self.optionxform(m.group(1))
|
|
||||||
rest = rest[m.end():]
|
|
||||||
try:
|
|
||||||
v = map[var]
|
|
||||||
except KeyError:
|
|
||||||
raise InterpolationMissingOptionError(
|
|
||||||
option, section, rest, var)
|
|
||||||
if "%" in v:
|
|
||||||
self._interpolate_some(option, accum, v,
|
|
||||||
section, map, depth + 1)
|
|
||||||
else:
|
|
||||||
accum.append(v)
|
|
||||||
else:
|
|
||||||
raise InterpolationSyntaxError(
|
|
||||||
option, section,
|
|
||||||
"'%%' must be followed by '%%' or '(', found: %r" % (rest,))
|
|
||||||
|
|
||||||
def set(self, section, option, value=None):
|
|
||||||
"""Set an option. Extend ConfigParser.set: check for string values."""
|
|
||||||
# The only legal non-string value if we allow valueless
|
|
||||||
# options is None, so we need to check if the value is a
|
|
||||||
# string if:
|
|
||||||
# - we do not allow valueless options, or
|
|
||||||
# - we allow valueless options but the value is not None
|
|
||||||
if self._optcre is self.OPTCRE or value:
|
|
||||||
if not isinstance(value, basestring):
|
|
||||||
raise TypeError("option values must be strings")
|
|
||||||
if value is not None:
|
|
||||||
# check for bad percent signs:
|
|
||||||
# first, replace all "good" interpolations
|
|
||||||
tmp_value = value.replace('%%', '')
|
|
||||||
tmp_value = self._interpvar_re.sub('', tmp_value)
|
|
||||||
# then, check if there's a lone percent sign left
|
|
||||||
if '%' in tmp_value:
|
|
||||||
raise ValueError("invalid interpolation syntax in %r at "
|
|
||||||
"position %d" % (value, tmp_value.find('%')))
|
|
||||||
ConfigParser.set(self, section, option, value)
|
|
@ -1,43 +0,0 @@
|
|||||||
Behold, mortal, the origins of Beautiful Soup...
|
|
||||||
================================================
|
|
||||||
|
|
||||||
Leonard Richardson is the primary programmer.
|
|
||||||
|
|
||||||
Aaron DeVore is awesome.
|
|
||||||
|
|
||||||
Mark Pilgrim provided the encoding detection code that forms the base
|
|
||||||
of UnicodeDammit.
|
|
||||||
|
|
||||||
Thomas Kluyver and Ezio Melotti finished the work of getting Beautiful
|
|
||||||
Soup 4 working under Python 3.
|
|
||||||
|
|
||||||
Simon Willison wrote soupselect, which was used to make Beautiful Soup
|
|
||||||
support CSS selectors.
|
|
||||||
|
|
||||||
Sam Ruby helped with a lot of edge cases.
|
|
||||||
|
|
||||||
Jonathan Ellis was awarded the prestigous Beau Potage D'Or for his
|
|
||||||
work in solving the nestable tags conundrum.
|
|
||||||
|
|
||||||
An incomplete list of people have contributed patches to Beautiful
|
|
||||||
Soup:
|
|
||||||
|
|
||||||
Istvan Albert, Andrew Lin, Anthony Baxter, Andrew Boyko, Tony Chang,
|
|
||||||
Zephyr Fang, Fuzzy, Roman Gaufman, Yoni Gilad, Richie Hindle, Peteris
|
|
||||||
Krumins, Kent Johnson, Ben Last, Robert Leftwich, Staffan Malmgren,
|
|
||||||
Ksenia Marasanova, JP Moins, Adam Monsen, John Nagle, "Jon", Ed
|
|
||||||
Oskiewicz, Greg Phillips, Giles Radford, Arthur Rudolph, Marko
|
|
||||||
Samastur, Jouni Seppänen, Alexander Schmolck, Andy Theyers, Glyn
|
|
||||||
Webster, Paul Wright, Danny Yoo
|
|
||||||
|
|
||||||
An incomplete list of people who made suggestions or found bugs or
|
|
||||||
found ways to break Beautiful Soup:
|
|
||||||
|
|
||||||
Hanno Böck, Matteo Bertini, Chris Curvey, Simon Cusack, Bruce Eckel,
|
|
||||||
Matt Ernst, Michael Foord, Tom Harris, Bill de hOra, Donald Howes,
|
|
||||||
Matt Patterson, Scott Roberts, Steve Strassmann, Mike Williams,
|
|
||||||
warchild at redho dot com, Sami Kuisma, Carlos Rocha, Bob Hutchison,
|
|
||||||
Joren Mc, Michal Migurski, John Kleven, Tim Heaney, Tripp Lilley, Ed
|
|
||||||
Summers, Dennis Sutch, Chris Smith, Aaron Sweep^W Swartz, Stuart
|
|
||||||
Turner, Greg Edwards, Kevin J Kalupson, Nikos Kouremenos, Artur de
|
|
||||||
Sousa Rocha, Yichun Wei, Per Vognsen
|
|
@ -1,27 +0,0 @@
|
|||||||
Beautiful Soup is made available under the MIT license:
|
|
||||||
|
|
||||||
Copyright (c) 2004-2015 Leonard Richardson
|
|
||||||
|
|
||||||
Permission is hereby granted, free of charge, to any person obtaining
|
|
||||||
a copy of this software and associated documentation files (the
|
|
||||||
"Software"), to deal in the Software without restriction, including
|
|
||||||
without limitation the rights to use, copy, modify, merge, publish,
|
|
||||||
distribute, sublicense, and/or sell copies of the Software, and to
|
|
||||||
permit persons to whom the Software is furnished to do so, subject to
|
|
||||||
the following conditions:
|
|
||||||
|
|
||||||
The above copyright notice and this permission notice shall be
|
|
||||||
included in all copies or substantial portions of the Software.
|
|
||||||
|
|
||||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
|
||||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
|
||||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
|
||||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
|
|
||||||
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
|
|
||||||
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
|
||||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
|
||||||
SOFTWARE.
|
|
||||||
|
|
||||||
Beautiful Soup incorporates code from the html5lib library, which is
|
|
||||||
also made available under the MIT license. Copyright (c) 2006-2013
|
|
||||||
James Graham and other contributors
|
|
File diff suppressed because it is too large
Load Diff
@ -1,63 +0,0 @@
|
|||||||
= Introduction =
|
|
||||||
|
|
||||||
>>> from bs4 import BeautifulSoup
|
|
||||||
>>> soup = BeautifulSoup("<p>Some<b>bad<i>HTML")
|
|
||||||
>>> print soup.prettify()
|
|
||||||
<html>
|
|
||||||
<body>
|
|
||||||
<p>
|
|
||||||
Some
|
|
||||||
<b>
|
|
||||||
bad
|
|
||||||
<i>
|
|
||||||
HTML
|
|
||||||
</i>
|
|
||||||
</b>
|
|
||||||
</p>
|
|
||||||
</body>
|
|
||||||
</html>
|
|
||||||
>>> soup.find(text="bad")
|
|
||||||
u'bad'
|
|
||||||
|
|
||||||
>>> soup.i
|
|
||||||
<i>HTML</i>
|
|
||||||
|
|
||||||
>>> soup = BeautifulSoup("<tag1>Some<tag2/>bad<tag3>XML", "xml")
|
|
||||||
>>> print soup.prettify()
|
|
||||||
<?xml version="1.0" encoding="utf-8">
|
|
||||||
<tag1>
|
|
||||||
Some
|
|
||||||
<tag2 />
|
|
||||||
bad
|
|
||||||
<tag3>
|
|
||||||
XML
|
|
||||||
</tag3>
|
|
||||||
</tag1>
|
|
||||||
|
|
||||||
= Full documentation =
|
|
||||||
|
|
||||||
The bs4/doc/ directory contains full documentation in Sphinx
|
|
||||||
format. Run "make html" in that directory to create HTML
|
|
||||||
documentation.
|
|
||||||
|
|
||||||
= Running the unit tests =
|
|
||||||
|
|
||||||
Beautiful Soup supports unit test discovery from the project root directory:
|
|
||||||
|
|
||||||
$ nosetests
|
|
||||||
|
|
||||||
$ python -m unittest discover -s bs4 # Python 2.7 and up
|
|
||||||
|
|
||||||
If you checked out the source tree, you should see a script in the
|
|
||||||
home directory called test-all-versions. This script will run the unit
|
|
||||||
tests under Python 2.7, then create a temporary Python 3 conversion of
|
|
||||||
the source and run the unit tests again under Python 3.
|
|
||||||
|
|
||||||
= Links =
|
|
||||||
|
|
||||||
Homepage: http://www.crummy.com/software/BeautifulSoup/bs4/
|
|
||||||
Documentation: http://www.crummy.com/software/BeautifulSoup/bs4/doc/
|
|
||||||
http://readthedocs.org/docs/beautiful-soup-4/
|
|
||||||
Discussion group: http://groups.google.com/group/beautifulsoup/
|
|
||||||
Development: https://code.launchpad.net/beautifulsoup/
|
|
||||||
Bug tracker: https://bugs.launchpad.net/beautifulsoup/
|
|
@ -1,31 +0,0 @@
|
|||||||
Additions
|
|
||||||
---------
|
|
||||||
|
|
||||||
More of the jQuery API: nextUntil?
|
|
||||||
|
|
||||||
Optimizations
|
|
||||||
-------------
|
|
||||||
|
|
||||||
The html5lib tree builder doesn't use the standard tree-building API,
|
|
||||||
which worries me and has resulted in a number of bugs.
|
|
||||||
|
|
||||||
markup_attr_map can be optimized since it's always a map now.
|
|
||||||
|
|
||||||
Upon encountering UTF-16LE data or some other uncommon serialization
|
|
||||||
of Unicode, UnicodeDammit will convert the data to Unicode, then
|
|
||||||
encode it at UTF-8. This is wasteful because it will just get decoded
|
|
||||||
back to Unicode.
|
|
||||||
|
|
||||||
CDATA
|
|
||||||
-----
|
|
||||||
|
|
||||||
The elementtree XMLParser has a strip_cdata argument that, when set to
|
|
||||||
False, should allow Beautiful Soup to preserve CDATA sections instead
|
|
||||||
of treating them as text. Except it doesn't. (This argument is also
|
|
||||||
present for HTMLParser, and also does nothing there.)
|
|
||||||
|
|
||||||
Currently, htm5lib converts CDATA sections into comments. An
|
|
||||||
as-yet-unreleased version of html5lib changes the parser's handling of
|
|
||||||
CDATA sections to allow CDATA sections in tags like <svg> and
|
|
||||||
<math>. The HTML5TreeBuilder will need to be updated to create CData
|
|
||||||
objects instead of Comment objects in this situation.
|
|
File diff suppressed because it is too large
Load Diff
@ -1,3 +0,0 @@
|
|||||||
from pkgutil import extend_path
|
|
||||||
|
|
||||||
__path__ = extend_path(__path__, __name__)
|
|
@ -1,23 +0,0 @@
|
|||||||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
|
||||||
# Licensed to PSF under a Contributor Agreement.
|
|
||||||
|
|
||||||
"""Execute computations asynchronously using threads or processes."""
|
|
||||||
|
|
||||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
|
||||||
|
|
||||||
from concurrent.futures._base import (FIRST_COMPLETED,
|
|
||||||
FIRST_EXCEPTION,
|
|
||||||
ALL_COMPLETED,
|
|
||||||
CancelledError,
|
|
||||||
TimeoutError,
|
|
||||||
Future,
|
|
||||||
Executor,
|
|
||||||
wait,
|
|
||||||
as_completed)
|
|
||||||
from concurrent.futures.thread import ThreadPoolExecutor
|
|
||||||
|
|
||||||
try:
|
|
||||||
from concurrent.futures.process import ProcessPoolExecutor
|
|
||||||
except ImportError:
|
|
||||||
# some platforms don't have multiprocessing
|
|
||||||
pass
|
|
@ -1,607 +0,0 @@
|
|||||||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
|
||||||
# Licensed to PSF under a Contributor Agreement.
|
|
||||||
|
|
||||||
import collections
|
|
||||||
import logging
|
|
||||||
import threading
|
|
||||||
import itertools
|
|
||||||
import time
|
|
||||||
|
|
||||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
|
||||||
|
|
||||||
FIRST_COMPLETED = 'FIRST_COMPLETED'
|
|
||||||
FIRST_EXCEPTION = 'FIRST_EXCEPTION'
|
|
||||||
ALL_COMPLETED = 'ALL_COMPLETED'
|
|
||||||
_AS_COMPLETED = '_AS_COMPLETED'
|
|
||||||
|
|
||||||
# Possible future states (for internal use by the futures package).
|
|
||||||
PENDING = 'PENDING'
|
|
||||||
RUNNING = 'RUNNING'
|
|
||||||
# The future was cancelled by the user...
|
|
||||||
CANCELLED = 'CANCELLED'
|
|
||||||
# ...and _Waiter.add_cancelled() was called by a worker.
|
|
||||||
CANCELLED_AND_NOTIFIED = 'CANCELLED_AND_NOTIFIED'
|
|
||||||
FINISHED = 'FINISHED'
|
|
||||||
|
|
||||||
_FUTURE_STATES = [
|
|
||||||
PENDING,
|
|
||||||
RUNNING,
|
|
||||||
CANCELLED,
|
|
||||||
CANCELLED_AND_NOTIFIED,
|
|
||||||
FINISHED
|
|
||||||
]
|
|
||||||
|
|
||||||
_STATE_TO_DESCRIPTION_MAP = {
|
|
||||||
PENDING: "pending",
|
|
||||||
RUNNING: "running",
|
|
||||||
CANCELLED: "cancelled",
|
|
||||||
CANCELLED_AND_NOTIFIED: "cancelled",
|
|
||||||
FINISHED: "finished"
|
|
||||||
}
|
|
||||||
|
|
||||||
# Logger for internal use by the futures package.
|
|
||||||
LOGGER = logging.getLogger("concurrent.futures")
|
|
||||||
|
|
||||||
class Error(Exception):
|
|
||||||
"""Base class for all future-related exceptions."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
class CancelledError(Error):
|
|
||||||
"""The Future was cancelled."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
class TimeoutError(Error):
|
|
||||||
"""The operation exceeded the given deadline."""
|
|
||||||
pass
|
|
||||||
|
|
||||||
class _Waiter(object):
|
|
||||||
"""Provides the event that wait() and as_completed() block on."""
|
|
||||||
def __init__(self):
|
|
||||||
self.event = threading.Event()
|
|
||||||
self.finished_futures = []
|
|
||||||
|
|
||||||
def add_result(self, future):
|
|
||||||
self.finished_futures.append(future)
|
|
||||||
|
|
||||||
def add_exception(self, future):
|
|
||||||
self.finished_futures.append(future)
|
|
||||||
|
|
||||||
def add_cancelled(self, future):
|
|
||||||
self.finished_futures.append(future)
|
|
||||||
|
|
||||||
class _AsCompletedWaiter(_Waiter):
|
|
||||||
"""Used by as_completed()."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
super(_AsCompletedWaiter, self).__init__()
|
|
||||||
self.lock = threading.Lock()
|
|
||||||
|
|
||||||
def add_result(self, future):
|
|
||||||
with self.lock:
|
|
||||||
super(_AsCompletedWaiter, self).add_result(future)
|
|
||||||
self.event.set()
|
|
||||||
|
|
||||||
def add_exception(self, future):
|
|
||||||
with self.lock:
|
|
||||||
super(_AsCompletedWaiter, self).add_exception(future)
|
|
||||||
self.event.set()
|
|
||||||
|
|
||||||
def add_cancelled(self, future):
|
|
||||||
with self.lock:
|
|
||||||
super(_AsCompletedWaiter, self).add_cancelled(future)
|
|
||||||
self.event.set()
|
|
||||||
|
|
||||||
class _FirstCompletedWaiter(_Waiter):
|
|
||||||
"""Used by wait(return_when=FIRST_COMPLETED)."""
|
|
||||||
|
|
||||||
def add_result(self, future):
|
|
||||||
super(_FirstCompletedWaiter, self).add_result(future)
|
|
||||||
self.event.set()
|
|
||||||
|
|
||||||
def add_exception(self, future):
|
|
||||||
super(_FirstCompletedWaiter, self).add_exception(future)
|
|
||||||
self.event.set()
|
|
||||||
|
|
||||||
def add_cancelled(self, future):
|
|
||||||
super(_FirstCompletedWaiter, self).add_cancelled(future)
|
|
||||||
self.event.set()
|
|
||||||
|
|
||||||
class _AllCompletedWaiter(_Waiter):
|
|
||||||
"""Used by wait(return_when=FIRST_EXCEPTION and ALL_COMPLETED)."""
|
|
||||||
|
|
||||||
def __init__(self, num_pending_calls, stop_on_exception):
|
|
||||||
self.num_pending_calls = num_pending_calls
|
|
||||||
self.stop_on_exception = stop_on_exception
|
|
||||||
self.lock = threading.Lock()
|
|
||||||
super(_AllCompletedWaiter, self).__init__()
|
|
||||||
|
|
||||||
def _decrement_pending_calls(self):
|
|
||||||
with self.lock:
|
|
||||||
self.num_pending_calls -= 1
|
|
||||||
if not self.num_pending_calls:
|
|
||||||
self.event.set()
|
|
||||||
|
|
||||||
def add_result(self, future):
|
|
||||||
super(_AllCompletedWaiter, self).add_result(future)
|
|
||||||
self._decrement_pending_calls()
|
|
||||||
|
|
||||||
def add_exception(self, future):
|
|
||||||
super(_AllCompletedWaiter, self).add_exception(future)
|
|
||||||
if self.stop_on_exception:
|
|
||||||
self.event.set()
|
|
||||||
else:
|
|
||||||
self._decrement_pending_calls()
|
|
||||||
|
|
||||||
def add_cancelled(self, future):
|
|
||||||
super(_AllCompletedWaiter, self).add_cancelled(future)
|
|
||||||
self._decrement_pending_calls()
|
|
||||||
|
|
||||||
class _AcquireFutures(object):
|
|
||||||
"""A context manager that does an ordered acquire of Future conditions."""
|
|
||||||
|
|
||||||
def __init__(self, futures):
|
|
||||||
self.futures = sorted(futures, key=id)
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
for future in self.futures:
|
|
||||||
future._condition.acquire()
|
|
||||||
|
|
||||||
def __exit__(self, *args):
|
|
||||||
for future in self.futures:
|
|
||||||
future._condition.release()
|
|
||||||
|
|
||||||
def _create_and_install_waiters(fs, return_when):
|
|
||||||
if return_when == _AS_COMPLETED:
|
|
||||||
waiter = _AsCompletedWaiter()
|
|
||||||
elif return_when == FIRST_COMPLETED:
|
|
||||||
waiter = _FirstCompletedWaiter()
|
|
||||||
else:
|
|
||||||
pending_count = sum(
|
|
||||||
f._state not in [CANCELLED_AND_NOTIFIED, FINISHED] for f in fs)
|
|
||||||
|
|
||||||
if return_when == FIRST_EXCEPTION:
|
|
||||||
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=True)
|
|
||||||
elif return_when == ALL_COMPLETED:
|
|
||||||
waiter = _AllCompletedWaiter(pending_count, stop_on_exception=False)
|
|
||||||
else:
|
|
||||||
raise ValueError("Invalid return condition: %r" % return_when)
|
|
||||||
|
|
||||||
for f in fs:
|
|
||||||
f._waiters.append(waiter)
|
|
||||||
|
|
||||||
return waiter
|
|
||||||
|
|
||||||
def as_completed(fs, timeout=None):
|
|
||||||
"""An iterator over the given futures that yields each as it completes.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
fs: The sequence of Futures (possibly created by different Executors) to
|
|
||||||
iterate over.
|
|
||||||
timeout: The maximum number of seconds to wait. If None, then there
|
|
||||||
is no limit on the wait time.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
An iterator that yields the given Futures as they complete (finished or
|
|
||||||
cancelled). If any given Futures are duplicated, they will be returned
|
|
||||||
once.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TimeoutError: If the entire result iterator could not be generated
|
|
||||||
before the given timeout.
|
|
||||||
"""
|
|
||||||
if timeout is not None:
|
|
||||||
end_time = timeout + time.time()
|
|
||||||
|
|
||||||
fs = set(fs)
|
|
||||||
with _AcquireFutures(fs):
|
|
||||||
finished = set(
|
|
||||||
f for f in fs
|
|
||||||
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
|
|
||||||
pending = fs - finished
|
|
||||||
waiter = _create_and_install_waiters(fs, _AS_COMPLETED)
|
|
||||||
|
|
||||||
try:
|
|
||||||
for future in finished:
|
|
||||||
yield future
|
|
||||||
|
|
||||||
while pending:
|
|
||||||
if timeout is None:
|
|
||||||
wait_timeout = None
|
|
||||||
else:
|
|
||||||
wait_timeout = end_time - time.time()
|
|
||||||
if wait_timeout < 0:
|
|
||||||
raise TimeoutError(
|
|
||||||
'%d (of %d) futures unfinished' % (
|
|
||||||
len(pending), len(fs)))
|
|
||||||
|
|
||||||
waiter.event.wait(wait_timeout)
|
|
||||||
|
|
||||||
with waiter.lock:
|
|
||||||
finished = waiter.finished_futures
|
|
||||||
waiter.finished_futures = []
|
|
||||||
waiter.event.clear()
|
|
||||||
|
|
||||||
for future in finished:
|
|
||||||
yield future
|
|
||||||
pending.remove(future)
|
|
||||||
|
|
||||||
finally:
|
|
||||||
for f in fs:
|
|
||||||
with f._condition:
|
|
||||||
f._waiters.remove(waiter)
|
|
||||||
|
|
||||||
DoneAndNotDoneFutures = collections.namedtuple(
|
|
||||||
'DoneAndNotDoneFutures', 'done not_done')
|
|
||||||
def wait(fs, timeout=None, return_when=ALL_COMPLETED):
|
|
||||||
"""Wait for the futures in the given sequence to complete.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
fs: The sequence of Futures (possibly created by different Executors) to
|
|
||||||
wait upon.
|
|
||||||
timeout: The maximum number of seconds to wait. If None, then there
|
|
||||||
is no limit on the wait time.
|
|
||||||
return_when: Indicates when this function should return. The options
|
|
||||||
are:
|
|
||||||
|
|
||||||
FIRST_COMPLETED - Return when any future finishes or is
|
|
||||||
cancelled.
|
|
||||||
FIRST_EXCEPTION - Return when any future finishes by raising an
|
|
||||||
exception. If no future raises an exception
|
|
||||||
then it is equivalent to ALL_COMPLETED.
|
|
||||||
ALL_COMPLETED - Return when all futures finish or are cancelled.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A named 2-tuple of sets. The first set, named 'done', contains the
|
|
||||||
futures that completed (is finished or cancelled) before the wait
|
|
||||||
completed. The second set, named 'not_done', contains uncompleted
|
|
||||||
futures.
|
|
||||||
"""
|
|
||||||
with _AcquireFutures(fs):
|
|
||||||
done = set(f for f in fs
|
|
||||||
if f._state in [CANCELLED_AND_NOTIFIED, FINISHED])
|
|
||||||
not_done = set(fs) - done
|
|
||||||
|
|
||||||
if (return_when == FIRST_COMPLETED) and done:
|
|
||||||
return DoneAndNotDoneFutures(done, not_done)
|
|
||||||
elif (return_when == FIRST_EXCEPTION) and done:
|
|
||||||
if any(f for f in done
|
|
||||||
if not f.cancelled() and f.exception() is not None):
|
|
||||||
return DoneAndNotDoneFutures(done, not_done)
|
|
||||||
|
|
||||||
if len(done) == len(fs):
|
|
||||||
return DoneAndNotDoneFutures(done, not_done)
|
|
||||||
|
|
||||||
waiter = _create_and_install_waiters(fs, return_when)
|
|
||||||
|
|
||||||
waiter.event.wait(timeout)
|
|
||||||
for f in fs:
|
|
||||||
with f._condition:
|
|
||||||
f._waiters.remove(waiter)
|
|
||||||
|
|
||||||
done.update(waiter.finished_futures)
|
|
||||||
return DoneAndNotDoneFutures(done, set(fs) - done)
|
|
||||||
|
|
||||||
class Future(object):
|
|
||||||
"""Represents the result of an asynchronous computation."""
|
|
||||||
|
|
||||||
def __init__(self):
|
|
||||||
"""Initializes the future. Should not be called by clients."""
|
|
||||||
self._condition = threading.Condition()
|
|
||||||
self._state = PENDING
|
|
||||||
self._result = None
|
|
||||||
self._exception = None
|
|
||||||
self._traceback = None
|
|
||||||
self._waiters = []
|
|
||||||
self._done_callbacks = []
|
|
||||||
|
|
||||||
def _invoke_callbacks(self):
|
|
||||||
for callback in self._done_callbacks:
|
|
||||||
try:
|
|
||||||
callback(self)
|
|
||||||
except Exception:
|
|
||||||
LOGGER.exception('exception calling callback for %r', self)
|
|
||||||
|
|
||||||
def __repr__(self):
|
|
||||||
with self._condition:
|
|
||||||
if self._state == FINISHED:
|
|
||||||
if self._exception:
|
|
||||||
return '<Future at %s state=%s raised %s>' % (
|
|
||||||
hex(id(self)),
|
|
||||||
_STATE_TO_DESCRIPTION_MAP[self._state],
|
|
||||||
self._exception.__class__.__name__)
|
|
||||||
else:
|
|
||||||
return '<Future at %s state=%s returned %s>' % (
|
|
||||||
hex(id(self)),
|
|
||||||
_STATE_TO_DESCRIPTION_MAP[self._state],
|
|
||||||
self._result.__class__.__name__)
|
|
||||||
return '<Future at %s state=%s>' % (
|
|
||||||
hex(id(self)),
|
|
||||||
_STATE_TO_DESCRIPTION_MAP[self._state])
|
|
||||||
|
|
||||||
def cancel(self):
|
|
||||||
"""Cancel the future if possible.
|
|
||||||
|
|
||||||
Returns True if the future was cancelled, False otherwise. A future
|
|
||||||
cannot be cancelled if it is running or has already completed.
|
|
||||||
"""
|
|
||||||
with self._condition:
|
|
||||||
if self._state in [RUNNING, FINISHED]:
|
|
||||||
return False
|
|
||||||
|
|
||||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
|
||||||
return True
|
|
||||||
|
|
||||||
self._state = CANCELLED
|
|
||||||
self._condition.notify_all()
|
|
||||||
|
|
||||||
self._invoke_callbacks()
|
|
||||||
return True
|
|
||||||
|
|
||||||
def cancelled(self):
|
|
||||||
"""Return True if the future has cancelled."""
|
|
||||||
with self._condition:
|
|
||||||
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]
|
|
||||||
|
|
||||||
def running(self):
|
|
||||||
"""Return True if the future is currently executing."""
|
|
||||||
with self._condition:
|
|
||||||
return self._state == RUNNING
|
|
||||||
|
|
||||||
def done(self):
|
|
||||||
"""Return True of the future was cancelled or finished executing."""
|
|
||||||
with self._condition:
|
|
||||||
return self._state in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]
|
|
||||||
|
|
||||||
def __get_result(self):
|
|
||||||
if self._exception:
|
|
||||||
raise type(self._exception), self._exception, self._traceback
|
|
||||||
else:
|
|
||||||
return self._result
|
|
||||||
|
|
||||||
def add_done_callback(self, fn):
|
|
||||||
"""Attaches a callable that will be called when the future finishes.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
fn: A callable that will be called with this future as its only
|
|
||||||
argument when the future completes or is cancelled. The callable
|
|
||||||
will always be called by a thread in the same process in which
|
|
||||||
it was added. If the future has already completed or been
|
|
||||||
cancelled then the callable will be called immediately. These
|
|
||||||
callables are called in the order that they were added.
|
|
||||||
"""
|
|
||||||
with self._condition:
|
|
||||||
if self._state not in [CANCELLED, CANCELLED_AND_NOTIFIED, FINISHED]:
|
|
||||||
self._done_callbacks.append(fn)
|
|
||||||
return
|
|
||||||
fn(self)
|
|
||||||
|
|
||||||
def result(self, timeout=None):
|
|
||||||
"""Return the result of the call that the future represents.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
timeout: The number of seconds to wait for the result if the future
|
|
||||||
isn't done. If None, then there is no limit on the wait time.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The result of the call that the future represents.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
CancelledError: If the future was cancelled.
|
|
||||||
TimeoutError: If the future didn't finish executing before the given
|
|
||||||
timeout.
|
|
||||||
Exception: If the call raised then that exception will be raised.
|
|
||||||
"""
|
|
||||||
with self._condition:
|
|
||||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
|
||||||
raise CancelledError()
|
|
||||||
elif self._state == FINISHED:
|
|
||||||
return self.__get_result()
|
|
||||||
|
|
||||||
self._condition.wait(timeout)
|
|
||||||
|
|
||||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
|
||||||
raise CancelledError()
|
|
||||||
elif self._state == FINISHED:
|
|
||||||
return self.__get_result()
|
|
||||||
else:
|
|
||||||
raise TimeoutError()
|
|
||||||
|
|
||||||
def exception_info(self, timeout=None):
|
|
||||||
"""Return a tuple of (exception, traceback) raised by the call that the
|
|
||||||
future represents.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
timeout: The number of seconds to wait for the exception if the
|
|
||||||
future isn't done. If None, then there is no limit on the wait
|
|
||||||
time.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The exception raised by the call that the future represents or None
|
|
||||||
if the call completed without raising.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
CancelledError: If the future was cancelled.
|
|
||||||
TimeoutError: If the future didn't finish executing before the given
|
|
||||||
timeout.
|
|
||||||
"""
|
|
||||||
with self._condition:
|
|
||||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
|
||||||
raise CancelledError()
|
|
||||||
elif self._state == FINISHED:
|
|
||||||
return self._exception, self._traceback
|
|
||||||
|
|
||||||
self._condition.wait(timeout)
|
|
||||||
|
|
||||||
if self._state in [CANCELLED, CANCELLED_AND_NOTIFIED]:
|
|
||||||
raise CancelledError()
|
|
||||||
elif self._state == FINISHED:
|
|
||||||
return self._exception, self._traceback
|
|
||||||
else:
|
|
||||||
raise TimeoutError()
|
|
||||||
|
|
||||||
def exception(self, timeout=None):
|
|
||||||
"""Return the exception raised by the call that the future represents.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
timeout: The number of seconds to wait for the exception if the
|
|
||||||
future isn't done. If None, then there is no limit on the wait
|
|
||||||
time.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
The exception raised by the call that the future represents or None
|
|
||||||
if the call completed without raising.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
CancelledError: If the future was cancelled.
|
|
||||||
TimeoutError: If the future didn't finish executing before the given
|
|
||||||
timeout.
|
|
||||||
"""
|
|
||||||
return self.exception_info(timeout)[0]
|
|
||||||
|
|
||||||
# The following methods should only be used by Executors and in tests.
|
|
||||||
def set_running_or_notify_cancel(self):
|
|
||||||
"""Mark the future as running or process any cancel notifications.
|
|
||||||
|
|
||||||
Should only be used by Executor implementations and unit tests.
|
|
||||||
|
|
||||||
If the future has been cancelled (cancel() was called and returned
|
|
||||||
True) then any threads waiting on the future completing (though calls
|
|
||||||
to as_completed() or wait()) are notified and False is returned.
|
|
||||||
|
|
||||||
If the future was not cancelled then it is put in the running state
|
|
||||||
(future calls to running() will return True) and True is returned.
|
|
||||||
|
|
||||||
This method should be called by Executor implementations before
|
|
||||||
executing the work associated with this future. If this method returns
|
|
||||||
False then the work should not be executed.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
False if the Future was cancelled, True otherwise.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
RuntimeError: if this method was already called or if set_result()
|
|
||||||
or set_exception() was called.
|
|
||||||
"""
|
|
||||||
with self._condition:
|
|
||||||
if self._state == CANCELLED:
|
|
||||||
self._state = CANCELLED_AND_NOTIFIED
|
|
||||||
for waiter in self._waiters:
|
|
||||||
waiter.add_cancelled(self)
|
|
||||||
# self._condition.notify_all() is not necessary because
|
|
||||||
# self.cancel() triggers a notification.
|
|
||||||
return False
|
|
||||||
elif self._state == PENDING:
|
|
||||||
self._state = RUNNING
|
|
||||||
return True
|
|
||||||
else:
|
|
||||||
LOGGER.critical('Future %s in unexpected state: %s',
|
|
||||||
id(self),
|
|
||||||
self._state)
|
|
||||||
raise RuntimeError('Future in unexpected state')
|
|
||||||
|
|
||||||
def set_result(self, result):
|
|
||||||
"""Sets the return value of work associated with the future.
|
|
||||||
|
|
||||||
Should only be used by Executor implementations and unit tests.
|
|
||||||
"""
|
|
||||||
with self._condition:
|
|
||||||
self._result = result
|
|
||||||
self._state = FINISHED
|
|
||||||
for waiter in self._waiters:
|
|
||||||
waiter.add_result(self)
|
|
||||||
self._condition.notify_all()
|
|
||||||
self._invoke_callbacks()
|
|
||||||
|
|
||||||
def set_exception_info(self, exception, traceback):
|
|
||||||
"""Sets the result of the future as being the given exception
|
|
||||||
and traceback.
|
|
||||||
|
|
||||||
Should only be used by Executor implementations and unit tests.
|
|
||||||
"""
|
|
||||||
with self._condition:
|
|
||||||
self._exception = exception
|
|
||||||
self._traceback = traceback
|
|
||||||
self._state = FINISHED
|
|
||||||
for waiter in self._waiters:
|
|
||||||
waiter.add_exception(self)
|
|
||||||
self._condition.notify_all()
|
|
||||||
self._invoke_callbacks()
|
|
||||||
|
|
||||||
def set_exception(self, exception):
|
|
||||||
"""Sets the result of the future as being the given exception.
|
|
||||||
|
|
||||||
Should only be used by Executor implementations and unit tests.
|
|
||||||
"""
|
|
||||||
self.set_exception_info(exception, None)
|
|
||||||
|
|
||||||
class Executor(object):
|
|
||||||
"""This is an abstract base class for concrete asynchronous executors."""
|
|
||||||
|
|
||||||
def submit(self, fn, *args, **kwargs):
|
|
||||||
"""Submits a callable to be executed with the given arguments.
|
|
||||||
|
|
||||||
Schedules the callable to be executed as fn(*args, **kwargs) and returns
|
|
||||||
a Future instance representing the execution of the callable.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
A Future representing the given call.
|
|
||||||
"""
|
|
||||||
raise NotImplementedError()
|
|
||||||
|
|
||||||
def map(self, fn, *iterables, **kwargs):
|
|
||||||
"""Returns a iterator equivalent to map(fn, iter).
|
|
||||||
|
|
||||||
Args:
|
|
||||||
fn: A callable that will take as many arguments as there are
|
|
||||||
passed iterables.
|
|
||||||
timeout: The maximum number of seconds to wait. If None, then there
|
|
||||||
is no limit on the wait time.
|
|
||||||
|
|
||||||
Returns:
|
|
||||||
An iterator equivalent to: map(func, *iterables) but the calls may
|
|
||||||
be evaluated out-of-order.
|
|
||||||
|
|
||||||
Raises:
|
|
||||||
TimeoutError: If the entire result iterator could not be generated
|
|
||||||
before the given timeout.
|
|
||||||
Exception: If fn(*args) raises for any values.
|
|
||||||
"""
|
|
||||||
timeout = kwargs.get('timeout')
|
|
||||||
if timeout is not None:
|
|
||||||
end_time = timeout + time.time()
|
|
||||||
|
|
||||||
fs = [self.submit(fn, *args) for args in itertools.izip(*iterables)]
|
|
||||||
|
|
||||||
# Yield must be hidden in closure so that the futures are submitted
|
|
||||||
# before the first iterator value is required.
|
|
||||||
def result_iterator():
|
|
||||||
try:
|
|
||||||
for future in fs:
|
|
||||||
if timeout is None:
|
|
||||||
yield future.result()
|
|
||||||
else:
|
|
||||||
yield future.result(end_time - time.time())
|
|
||||||
finally:
|
|
||||||
for future in fs:
|
|
||||||
future.cancel()
|
|
||||||
return result_iterator()
|
|
||||||
|
|
||||||
def shutdown(self, wait=True):
|
|
||||||
"""Clean-up the resources associated with the Executor.
|
|
||||||
|
|
||||||
It is safe to call this method several times. Otherwise, no other
|
|
||||||
methods can be called after this one.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
wait: If True then shutdown will not return until all running
|
|
||||||
futures have finished executing and the resources used by the
|
|
||||||
executor have been reclaimed.
|
|
||||||
"""
|
|
||||||
pass
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
return self
|
|
||||||
|
|
||||||
def __exit__(self, exc_type, exc_val, exc_tb):
|
|
||||||
self.shutdown(wait=True)
|
|
||||||
return False
|
|
@ -1,359 +0,0 @@
|
|||||||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
|
||||||
# Licensed to PSF under a Contributor Agreement.
|
|
||||||
|
|
||||||
"""Implements ProcessPoolExecutor.
|
|
||||||
|
|
||||||
The follow diagram and text describe the data-flow through the system:
|
|
||||||
|
|
||||||
|======================= In-process =====================|== Out-of-process ==|
|
|
||||||
|
|
||||||
+----------+ +----------+ +--------+ +-----------+ +---------+
|
|
||||||
| | => | Work Ids | => | | => | Call Q | => | |
|
|
||||||
| | +----------+ | | +-----------+ | |
|
|
||||||
| | | ... | | | | ... | | |
|
|
||||||
| | | 6 | | | | 5, call() | | |
|
|
||||||
| | | 7 | | | | ... | | |
|
|
||||||
| Process | | ... | | Local | +-----------+ | Process |
|
|
||||||
| Pool | +----------+ | Worker | | #1..n |
|
|
||||||
| Executor | | Thread | | |
|
|
||||||
| | +----------- + | | +-----------+ | |
|
|
||||||
| | <=> | Work Items | <=> | | <= | Result Q | <= | |
|
|
||||||
| | +------------+ | | +-----------+ | |
|
|
||||||
| | | 6: call() | | | | ... | | |
|
|
||||||
| | | future | | | | 4, result | | |
|
|
||||||
| | | ... | | | | 3, except | | |
|
|
||||||
+----------+ +------------+ +--------+ +-----------+ +---------+
|
|
||||||
|
|
||||||
Executor.submit() called:
|
|
||||||
- creates a uniquely numbered _WorkItem and adds it to the "Work Items" dict
|
|
||||||
- adds the id of the _WorkItem to the "Work Ids" queue
|
|
||||||
|
|
||||||
Local worker thread:
|
|
||||||
- reads work ids from the "Work Ids" queue and looks up the corresponding
|
|
||||||
WorkItem from the "Work Items" dict: if the work item has been cancelled then
|
|
||||||
it is simply removed from the dict, otherwise it is repackaged as a
|
|
||||||
_CallItem and put in the "Call Q". New _CallItems are put in the "Call Q"
|
|
||||||
until "Call Q" is full. NOTE: the size of the "Call Q" is kept small because
|
|
||||||
calls placed in the "Call Q" can no longer be cancelled with Future.cancel().
|
|
||||||
- reads _ResultItems from "Result Q", updates the future stored in the
|
|
||||||
"Work Items" dict and deletes the dict entry
|
|
||||||
|
|
||||||
Process #1..n:
|
|
||||||
- reads _CallItems from "Call Q", executes the calls, and puts the resulting
|
|
||||||
_ResultItems in "Request Q"
|
|
||||||
"""
|
|
||||||
|
|
||||||
import atexit
|
|
||||||
from concurrent.futures import _base
|
|
||||||
import Queue as queue
|
|
||||||
import multiprocessing
|
|
||||||
import threading
|
|
||||||
import weakref
|
|
||||||
import sys
|
|
||||||
|
|
||||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
|
||||||
|
|
||||||
# Workers are created as daemon threads and processes. This is done to allow the
|
|
||||||
# interpreter to exit when there are still idle processes in a
|
|
||||||
# ProcessPoolExecutor's process pool (i.e. shutdown() was not called). However,
|
|
||||||
# allowing workers to die with the interpreter has two undesirable properties:
|
|
||||||
# - The workers would still be running during interpretor shutdown,
|
|
||||||
# meaning that they would fail in unpredictable ways.
|
|
||||||
# - The workers could be killed while evaluating a work item, which could
|
|
||||||
# be bad if the callable being evaluated has external side-effects e.g.
|
|
||||||
# writing to a file.
|
|
||||||
#
|
|
||||||
# To work around this problem, an exit handler is installed which tells the
|
|
||||||
# workers to exit when their work queues are empty and then waits until the
|
|
||||||
# threads/processes finish.
|
|
||||||
|
|
||||||
_threads_queues = weakref.WeakKeyDictionary()
|
|
||||||
_shutdown = False
|
|
||||||
|
|
||||||
def _python_exit():
|
|
||||||
global _shutdown
|
|
||||||
_shutdown = True
|
|
||||||
items = list(_threads_queues.items()) if _threads_queues else ()
|
|
||||||
for t, q in items:
|
|
||||||
q.put(None)
|
|
||||||
for t, q in items:
|
|
||||||
t.join(sys.maxint)
|
|
||||||
|
|
||||||
# Controls how many more calls than processes will be queued in the call queue.
|
|
||||||
# A smaller number will mean that processes spend more time idle waiting for
|
|
||||||
# work while a larger number will make Future.cancel() succeed less frequently
|
|
||||||
# (Futures in the call queue cannot be cancelled).
|
|
||||||
EXTRA_QUEUED_CALLS = 1
|
|
||||||
|
|
||||||
class _WorkItem(object):
|
|
||||||
def __init__(self, future, fn, args, kwargs):
|
|
||||||
self.future = future
|
|
||||||
self.fn = fn
|
|
||||||
self.args = args
|
|
||||||
self.kwargs = kwargs
|
|
||||||
|
|
||||||
class _ResultItem(object):
|
|
||||||
def __init__(self, work_id, exception=None, result=None):
|
|
||||||
self.work_id = work_id
|
|
||||||
self.exception = exception
|
|
||||||
self.result = result
|
|
||||||
|
|
||||||
class _CallItem(object):
|
|
||||||
def __init__(self, work_id, fn, args, kwargs):
|
|
||||||
self.work_id = work_id
|
|
||||||
self.fn = fn
|
|
||||||
self.args = args
|
|
||||||
self.kwargs = kwargs
|
|
||||||
|
|
||||||
def _process_worker(call_queue, result_queue):
|
|
||||||
"""Evaluates calls from call_queue and places the results in result_queue.
|
|
||||||
|
|
||||||
This worker is run in a separate process.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
call_queue: A multiprocessing.Queue of _CallItems that will be read and
|
|
||||||
evaluated by the worker.
|
|
||||||
result_queue: A multiprocessing.Queue of _ResultItems that will written
|
|
||||||
to by the worker.
|
|
||||||
shutdown: A multiprocessing.Event that will be set as a signal to the
|
|
||||||
worker that it should exit when call_queue is empty.
|
|
||||||
"""
|
|
||||||
while True:
|
|
||||||
call_item = call_queue.get(block=True)
|
|
||||||
if call_item is None:
|
|
||||||
# Wake up queue management thread
|
|
||||||
result_queue.put(None)
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
r = call_item.fn(*call_item.args, **call_item.kwargs)
|
|
||||||
except BaseException:
|
|
||||||
e = sys.exc_info()[1]
|
|
||||||
result_queue.put(_ResultItem(call_item.work_id,
|
|
||||||
exception=e))
|
|
||||||
else:
|
|
||||||
result_queue.put(_ResultItem(call_item.work_id,
|
|
||||||
result=r))
|
|
||||||
|
|
||||||
def _add_call_item_to_queue(pending_work_items,
|
|
||||||
work_ids,
|
|
||||||
call_queue):
|
|
||||||
"""Fills call_queue with _WorkItems from pending_work_items.
|
|
||||||
|
|
||||||
This function never blocks.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
pending_work_items: A dict mapping work ids to _WorkItems e.g.
|
|
||||||
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
|
|
||||||
work_ids: A queue.Queue of work ids e.g. Queue([5, 6, ...]). Work ids
|
|
||||||
are consumed and the corresponding _WorkItems from
|
|
||||||
pending_work_items are transformed into _CallItems and put in
|
|
||||||
call_queue.
|
|
||||||
call_queue: A multiprocessing.Queue that will be filled with _CallItems
|
|
||||||
derived from _WorkItems.
|
|
||||||
"""
|
|
||||||
while True:
|
|
||||||
if call_queue.full():
|
|
||||||
return
|
|
||||||
try:
|
|
||||||
work_id = work_ids.get(block=False)
|
|
||||||
except queue.Empty:
|
|
||||||
return
|
|
||||||
else:
|
|
||||||
work_item = pending_work_items[work_id]
|
|
||||||
|
|
||||||
if work_item.future.set_running_or_notify_cancel():
|
|
||||||
call_queue.put(_CallItem(work_id,
|
|
||||||
work_item.fn,
|
|
||||||
work_item.args,
|
|
||||||
work_item.kwargs),
|
|
||||||
block=True)
|
|
||||||
else:
|
|
||||||
del pending_work_items[work_id]
|
|
||||||
continue
|
|
||||||
|
|
||||||
def _queue_management_worker(executor_reference,
|
|
||||||
processes,
|
|
||||||
pending_work_items,
|
|
||||||
work_ids_queue,
|
|
||||||
call_queue,
|
|
||||||
result_queue):
|
|
||||||
"""Manages the communication between this process and the worker processes.
|
|
||||||
|
|
||||||
This function is run in a local thread.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
executor_reference: A weakref.ref to the ProcessPoolExecutor that owns
|
|
||||||
this thread. Used to determine if the ProcessPoolExecutor has been
|
|
||||||
garbage collected and that this function can exit.
|
|
||||||
process: A list of the multiprocessing.Process instances used as
|
|
||||||
workers.
|
|
||||||
pending_work_items: A dict mapping work ids to _WorkItems e.g.
|
|
||||||
{5: <_WorkItem...>, 6: <_WorkItem...>, ...}
|
|
||||||
work_ids_queue: A queue.Queue of work ids e.g. Queue([5, 6, ...]).
|
|
||||||
call_queue: A multiprocessing.Queue that will be filled with _CallItems
|
|
||||||
derived from _WorkItems for processing by the process workers.
|
|
||||||
result_queue: A multiprocessing.Queue of _ResultItems generated by the
|
|
||||||
process workers.
|
|
||||||
"""
|
|
||||||
nb_shutdown_processes = [0]
|
|
||||||
def shutdown_one_process():
|
|
||||||
"""Tell a worker to terminate, which will in turn wake us again"""
|
|
||||||
call_queue.put(None)
|
|
||||||
nb_shutdown_processes[0] += 1
|
|
||||||
while True:
|
|
||||||
_add_call_item_to_queue(pending_work_items,
|
|
||||||
work_ids_queue,
|
|
||||||
call_queue)
|
|
||||||
|
|
||||||
result_item = result_queue.get(block=True)
|
|
||||||
if result_item is not None:
|
|
||||||
work_item = pending_work_items[result_item.work_id]
|
|
||||||
del pending_work_items[result_item.work_id]
|
|
||||||
|
|
||||||
if result_item.exception:
|
|
||||||
work_item.future.set_exception(result_item.exception)
|
|
||||||
else:
|
|
||||||
work_item.future.set_result(result_item.result)
|
|
||||||
# Delete references to object. See issue16284
|
|
||||||
del work_item
|
|
||||||
# Check whether we should start shutting down.
|
|
||||||
executor = executor_reference()
|
|
||||||
# No more work items can be added if:
|
|
||||||
# - The interpreter is shutting down OR
|
|
||||||
# - The executor that owns this worker has been collected OR
|
|
||||||
# - The executor that owns this worker has been shutdown.
|
|
||||||
if _shutdown or executor is None or executor._shutdown_thread:
|
|
||||||
# Since no new work items can be added, it is safe to shutdown
|
|
||||||
# this thread if there are no pending work items.
|
|
||||||
if not pending_work_items:
|
|
||||||
while nb_shutdown_processes[0] < len(processes):
|
|
||||||
shutdown_one_process()
|
|
||||||
# If .join() is not called on the created processes then
|
|
||||||
# some multiprocessing.Queue methods may deadlock on Mac OS
|
|
||||||
# X.
|
|
||||||
for p in processes:
|
|
||||||
p.join()
|
|
||||||
call_queue.close()
|
|
||||||
return
|
|
||||||
del executor
|
|
||||||
|
|
||||||
_system_limits_checked = False
|
|
||||||
_system_limited = None
|
|
||||||
def _check_system_limits():
|
|
||||||
global _system_limits_checked, _system_limited
|
|
||||||
if _system_limits_checked:
|
|
||||||
if _system_limited:
|
|
||||||
raise NotImplementedError(_system_limited)
|
|
||||||
_system_limits_checked = True
|
|
||||||
try:
|
|
||||||
import os
|
|
||||||
nsems_max = os.sysconf("SC_SEM_NSEMS_MAX")
|
|
||||||
except (AttributeError, ValueError):
|
|
||||||
# sysconf not available or setting not available
|
|
||||||
return
|
|
||||||
if nsems_max == -1:
|
|
||||||
# indetermine limit, assume that limit is determined
|
|
||||||
# by available memory only
|
|
||||||
return
|
|
||||||
if nsems_max >= 256:
|
|
||||||
# minimum number of semaphores available
|
|
||||||
# according to POSIX
|
|
||||||
return
|
|
||||||
_system_limited = "system provides too few semaphores (%d available, 256 necessary)" % nsems_max
|
|
||||||
raise NotImplementedError(_system_limited)
|
|
||||||
|
|
||||||
class ProcessPoolExecutor(_base.Executor):
|
|
||||||
def __init__(self, max_workers=None):
|
|
||||||
"""Initializes a new ProcessPoolExecutor instance.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
max_workers: The maximum number of processes that can be used to
|
|
||||||
execute the given calls. If None or not given then as many
|
|
||||||
worker processes will be created as the machine has processors.
|
|
||||||
"""
|
|
||||||
_check_system_limits()
|
|
||||||
|
|
||||||
if max_workers is None:
|
|
||||||
self._max_workers = multiprocessing.cpu_count()
|
|
||||||
else:
|
|
||||||
self._max_workers = max_workers
|
|
||||||
|
|
||||||
# Make the call queue slightly larger than the number of processes to
|
|
||||||
# prevent the worker processes from idling. But don't make it too big
|
|
||||||
# because futures in the call queue cannot be cancelled.
|
|
||||||
self._call_queue = multiprocessing.Queue(self._max_workers +
|
|
||||||
EXTRA_QUEUED_CALLS)
|
|
||||||
self._result_queue = multiprocessing.Queue()
|
|
||||||
self._work_ids = queue.Queue()
|
|
||||||
self._queue_management_thread = None
|
|
||||||
self._processes = set()
|
|
||||||
|
|
||||||
# Shutdown is a two-step process.
|
|
||||||
self._shutdown_thread = False
|
|
||||||
self._shutdown_lock = threading.Lock()
|
|
||||||
self._queue_count = 0
|
|
||||||
self._pending_work_items = {}
|
|
||||||
|
|
||||||
def _start_queue_management_thread(self):
|
|
||||||
# When the executor gets lost, the weakref callback will wake up
|
|
||||||
# the queue management thread.
|
|
||||||
def weakref_cb(_, q=self._result_queue):
|
|
||||||
q.put(None)
|
|
||||||
if self._queue_management_thread is None:
|
|
||||||
self._queue_management_thread = threading.Thread(
|
|
||||||
target=_queue_management_worker,
|
|
||||||
args=(weakref.ref(self, weakref_cb),
|
|
||||||
self._processes,
|
|
||||||
self._pending_work_items,
|
|
||||||
self._work_ids,
|
|
||||||
self._call_queue,
|
|
||||||
self._result_queue))
|
|
||||||
self._queue_management_thread.daemon = True
|
|
||||||
self._queue_management_thread.start()
|
|
||||||
_threads_queues[self._queue_management_thread] = self._result_queue
|
|
||||||
|
|
||||||
def _adjust_process_count(self):
|
|
||||||
for _ in range(len(self._processes), self._max_workers):
|
|
||||||
p = multiprocessing.Process(
|
|
||||||
target=_process_worker,
|
|
||||||
args=(self._call_queue,
|
|
||||||
self._result_queue))
|
|
||||||
p.start()
|
|
||||||
self._processes.add(p)
|
|
||||||
|
|
||||||
def submit(self, fn, *args, **kwargs):
|
|
||||||
with self._shutdown_lock:
|
|
||||||
if self._shutdown_thread:
|
|
||||||
raise RuntimeError('cannot schedule new futures after shutdown')
|
|
||||||
|
|
||||||
f = _base.Future()
|
|
||||||
w = _WorkItem(f, fn, args, kwargs)
|
|
||||||
|
|
||||||
self._pending_work_items[self._queue_count] = w
|
|
||||||
self._work_ids.put(self._queue_count)
|
|
||||||
self._queue_count += 1
|
|
||||||
# Wake up queue management thread
|
|
||||||
self._result_queue.put(None)
|
|
||||||
|
|
||||||
self._start_queue_management_thread()
|
|
||||||
self._adjust_process_count()
|
|
||||||
return f
|
|
||||||
submit.__doc__ = _base.Executor.submit.__doc__
|
|
||||||
|
|
||||||
def shutdown(self, wait=True):
|
|
||||||
with self._shutdown_lock:
|
|
||||||
self._shutdown_thread = True
|
|
||||||
if self._queue_management_thread:
|
|
||||||
# Wake up queue management thread
|
|
||||||
self._result_queue.put(None)
|
|
||||||
if wait:
|
|
||||||
self._queue_management_thread.join(sys.maxint)
|
|
||||||
# To reduce the risk of openning too many files, remove references to
|
|
||||||
# objects that use file descriptors.
|
|
||||||
self._queue_management_thread = None
|
|
||||||
self._call_queue = None
|
|
||||||
self._result_queue = None
|
|
||||||
self._processes = None
|
|
||||||
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
|
||||||
|
|
||||||
atexit.register(_python_exit)
|
|
@ -1,134 +0,0 @@
|
|||||||
# Copyright 2009 Brian Quinlan. All Rights Reserved.
|
|
||||||
# Licensed to PSF under a Contributor Agreement.
|
|
||||||
|
|
||||||
"""Implements ThreadPoolExecutor."""
|
|
||||||
|
|
||||||
import atexit
|
|
||||||
from concurrent.futures import _base
|
|
||||||
import Queue as queue
|
|
||||||
import threading
|
|
||||||
import weakref
|
|
||||||
import sys
|
|
||||||
|
|
||||||
__author__ = 'Brian Quinlan (brian@sweetapp.com)'
|
|
||||||
|
|
||||||
# Workers are created as daemon threads. This is done to allow the interpreter
|
|
||||||
# to exit when there are still idle threads in a ThreadPoolExecutor's thread
|
|
||||||
# pool (i.e. shutdown() was not called). However, allowing workers to die with
|
|
||||||
# the interpreter has two undesirable properties:
|
|
||||||
# - The workers would still be running during interpretor shutdown,
|
|
||||||
# meaning that they would fail in unpredictable ways.
|
|
||||||
# - The workers could be killed while evaluating a work item, which could
|
|
||||||
# be bad if the callable being evaluated has external side-effects e.g.
|
|
||||||
# writing to a file.
|
|
||||||
#
|
|
||||||
# To work around this problem, an exit handler is installed which tells the
|
|
||||||
# workers to exit when their work queues are empty and then waits until the
|
|
||||||
# threads finish.
|
|
||||||
|
|
||||||
_threads_queues = weakref.WeakKeyDictionary()
|
|
||||||
_shutdown = False
|
|
||||||
|
|
||||||
def _python_exit():
|
|
||||||
global _shutdown
|
|
||||||
_shutdown = True
|
|
||||||
items = list(_threads_queues.items()) if _threads_queues else ()
|
|
||||||
for t, q in items:
|
|
||||||
q.put(None)
|
|
||||||
for t, q in items:
|
|
||||||
t.join(sys.maxint)
|
|
||||||
|
|
||||||
atexit.register(_python_exit)
|
|
||||||
|
|
||||||
class _WorkItem(object):
|
|
||||||
def __init__(self, future, fn, args, kwargs):
|
|
||||||
self.future = future
|
|
||||||
self.fn = fn
|
|
||||||
self.args = args
|
|
||||||
self.kwargs = kwargs
|
|
||||||
|
|
||||||
def run(self):
|
|
||||||
if not self.future.set_running_or_notify_cancel():
|
|
||||||
return
|
|
||||||
|
|
||||||
try:
|
|
||||||
result = self.fn(*self.args, **self.kwargs)
|
|
||||||
except BaseException:
|
|
||||||
e, tb = sys.exc_info()[1:]
|
|
||||||
self.future.set_exception_info(e, tb)
|
|
||||||
else:
|
|
||||||
self.future.set_result(result)
|
|
||||||
|
|
||||||
def _worker(executor_reference, work_queue):
|
|
||||||
try:
|
|
||||||
while True:
|
|
||||||
work_item = work_queue.get(block=True)
|
|
||||||
if work_item is not None:
|
|
||||||
work_item.run()
|
|
||||||
# Delete references to object. See issue16284
|
|
||||||
del work_item
|
|
||||||
continue
|
|
||||||
executor = executor_reference()
|
|
||||||
# Exit if:
|
|
||||||
# - The interpreter is shutting down OR
|
|
||||||
# - The executor that owns the worker has been collected OR
|
|
||||||
# - The executor that owns the worker has been shutdown.
|
|
||||||
if _shutdown or executor is None or executor._shutdown:
|
|
||||||
# Notice other workers
|
|
||||||
work_queue.put(None)
|
|
||||||
return
|
|
||||||
del executor
|
|
||||||
except BaseException:
|
|
||||||
_base.LOGGER.critical('Exception in worker', exc_info=True)
|
|
||||||
|
|
||||||
class ThreadPoolExecutor(_base.Executor):
|
|
||||||
def __init__(self, max_workers):
|
|
||||||
"""Initializes a new ThreadPoolExecutor instance.
|
|
||||||
|
|
||||||
Args:
|
|
||||||
max_workers: The maximum number of threads that can be used to
|
|
||||||
execute the given calls.
|
|
||||||
"""
|
|
||||||
self._max_workers = max_workers
|
|
||||||
self._work_queue = queue.Queue()
|
|
||||||
self._threads = set()
|
|
||||||
self._shutdown = False
|
|
||||||
self._shutdown_lock = threading.Lock()
|
|
||||||
|
|
||||||
def submit(self, fn, *args, **kwargs):
|
|
||||||
with self._shutdown_lock:
|
|
||||||
if self._shutdown:
|
|
||||||
raise RuntimeError('cannot schedule new futures after shutdown')
|
|
||||||
|
|
||||||
f = _base.Future()
|
|
||||||
w = _WorkItem(f, fn, args, kwargs)
|
|
||||||
|
|
||||||
self._work_queue.put(w)
|
|
||||||
self._adjust_thread_count()
|
|
||||||
return f
|
|
||||||
submit.__doc__ = _base.Executor.submit.__doc__
|
|
||||||
|
|
||||||
def _adjust_thread_count(self):
|
|
||||||
# When the executor gets lost, the weakref callback will wake up
|
|
||||||
# the worker threads.
|
|
||||||
def weakref_cb(_, q=self._work_queue):
|
|
||||||
q.put(None)
|
|
||||||
# TODO(bquinlan): Should avoid creating new threads if there are more
|
|
||||||
# idle threads than items in the work queue.
|
|
||||||
if len(self._threads) < self._max_workers:
|
|
||||||
t = threading.Thread(target=_worker,
|
|
||||||
args=(weakref.ref(self, weakref_cb),
|
|
||||||
self._work_queue))
|
|
||||||
t.daemon = True
|
|
||||||
t.start()
|
|
||||||
self._threads.add(t)
|
|
||||||
_threads_queues[t] = self._work_queue
|
|
||||||
|
|
||||||
def shutdown(self, wait=True):
|
|
||||||
with self._shutdown_lock:
|
|
||||||
self._shutdown = True
|
|
||||||
self._work_queue.put(None)
|
|
||||||
if wait:
|
|
||||||
for t in self._threads:
|
|
||||||
t.join(sys.maxint)
|
|
||||||
shutdown.__doc__ = _base.Executor.shutdown.__doc__
|
|
@ -1,4 +1,4 @@
|
|||||||
__version__ = '0.6.5'
|
__version__ = '0.7.1'
|
||||||
|
|
||||||
from .lock import Lock # noqa
|
from .lock import Lock # noqa
|
||||||
from .lock import NeedRegenerationException # noqa
|
from .lock import NeedRegenerationException # noqa
|
||||||
|
@ -1,2 +1,8 @@
|
|||||||
|
|
||||||
import dict, geezip, httpfake, io, json, rar, which
|
from .dict import *
|
||||||
|
from .geezip import *
|
||||||
|
from .httpfake import *
|
||||||
|
from .io import *
|
||||||
|
from .json import *
|
||||||
|
from .rar import *
|
||||||
|
from .which import *
|
@ -1,5 +1,5 @@
|
|||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
|
|
||||||
from registry import registry
|
from .registry import registry
|
||||||
from mods import hearing_impaired, ocr_fixes, fps, offset, common, color
|
from .mods import hearing_impaired, ocr_fixes, fps, offset, common, color
|
||||||
from main import SubtitleModifications, SubMod
|
from .main import SubtitleModifications, SubMod
|
||||||
|
@ -1,3 +1,3 @@
|
|||||||
# coding=utf-8
|
# coding=utf-8
|
||||||
|
|
||||||
from data import data
|
from .data import data
|
File diff suppressed because one or more lines are too long
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in new issue