feat: new alternate syntax and switch to black + isort (yeah, maybe not the best time, but that is done).

This commit is contained in:
Romain Dorgueil
2018-07-29 18:21:56 +01:00
parent 3094e43f9f
commit 89dda0dca6
123 changed files with 1672 additions and 1640 deletions

File diff suppressed because one or more lines are too long

View File

@ -1,4 +1,4 @@
from bonobo.commands import entrypoint from bonobo.commands import entrypoint
if __name__ == '__main__': if __name__ == "__main__":
entrypoint() entrypoint()

View File

@ -45,14 +45,17 @@ def run(graph, *, plugins=None, services=None, strategy=None):
plugins = plugins or [] plugins = plugins or []
from bonobo import settings from bonobo import settings
settings.check() settings.check()
if not settings.QUIET.get(): # pragma: no cover if not settings.QUIET.get(): # pragma: no cover
if _is_interactive_console(): if _is_interactive_console():
import mondrian import mondrian
mondrian.setup(excepthook=True) mondrian.setup(excepthook=True)
from bonobo.plugins.console import ConsoleOutputPlugin from bonobo.plugins.console import ConsoleOutputPlugin
if ConsoleOutputPlugin not in plugins: if ConsoleOutputPlugin not in plugins:
plugins.append(ConsoleOutputPlugin) plugins.append(ConsoleOutputPlugin)
@ -61,20 +64,23 @@ def run(graph, *, plugins=None, services=None, strategy=None):
from bonobo.contrib.jupyter import JupyterOutputPlugin from bonobo.contrib.jupyter import JupyterOutputPlugin
except ImportError: except ImportError:
import logging import logging
logging.warning( logging.warning(
'Failed to load jupyter widget. Easiest way is to install the optional "jupyter" ' 'Failed to load jupyter widget. Easiest way is to install the optional "jupyter" '
'dependencies with «pip install bonobo[jupyter]», but you can also install a specific ' "dependencies with «pip install bonobo[jupyter]», but you can also install a specific "
'version by yourself.' "version by yourself."
) )
else: else:
if JupyterOutputPlugin not in plugins: if JupyterOutputPlugin not in plugins:
plugins.append(JupyterOutputPlugin) plugins.append(JupyterOutputPlugin)
import logging import logging
logging.getLogger().setLevel(settings.LOGGING_LEVEL.get()) logging.getLogger().setLevel(settings.LOGGING_LEVEL.get())
strategy = create_strategy(strategy) strategy = create_strategy(strategy)
from bonobo.util.errors import sweeten_errors from bonobo.util.errors import sweeten_errors
with sweeten_errors(): with sweeten_errors():
return strategy.execute(graph, plugins=plugins, services=services) return strategy.execute(graph, plugins=plugins, services=services)
@ -83,15 +89,15 @@ def _inspect_as_graph(graph):
return graph._repr_dot_() return graph._repr_dot_()
_inspect_formats = {'graph': _inspect_as_graph} _inspect_formats = {"graph": _inspect_as_graph}
@api.register_graph @api.register_graph
def inspect(graph, *, plugins=None, services=None, strategy=None, format): def inspect(graph, *, plugins=None, services=None, strategy=None, format):
if not format in _inspect_formats: if not format in _inspect_formats:
raise NotImplementedError( raise NotImplementedError(
'Output format {} not implemented. Choices are: {}.'.format( "Output format {} not implemented. Choices are: {}.".format(
format, ', '.join(sorted(_inspect_formats.keys())) format, ", ".join(sorted(_inspect_formats.keys()))
) )
) )
print(_inspect_formats[format](graph)) print(_inspect_formats[format](graph))
@ -160,20 +166,18 @@ api.register_group(
) )
# registry # registry
api.register_group( api.register_group(create_reader, create_writer)
create_reader,
create_writer,
)
def _is_interactive_console(): def _is_interactive_console():
import sys import sys
return sys.stdout.isatty() return sys.stdout.isatty()
def _is_jupyter_notebook(): def _is_jupyter_notebook():
try: try:
return get_ipython().__class__.__name__ == 'ZMQInteractiveShell' return get_ipython().__class__.__name__ == "ZMQInteractiveShell"
except NameError: except NameError:
return False return False
@ -182,7 +186,8 @@ def _is_jupyter_notebook():
def get_examples_path(*pathsegments): def get_examples_path(*pathsegments):
import os import os
import pathlib import pathlib
return str(pathlib.Path(os.path.dirname(__file__), 'examples', *pathsegments))
return str(pathlib.Path(os.path.dirname(__file__), "examples", *pathsegments))
@api.register @api.register

View File

@ -1 +1 @@
__version__ = '0.6.2' __version__ = "0.6.2"

View File

@ -19,9 +19,9 @@ def entrypoint(args=None):
logger.setLevel(settings.LOGGING_LEVEL.get()) logger.setLevel(settings.LOGGING_LEVEL.get())
parser = argparse.ArgumentParser() parser = argparse.ArgumentParser()
parser.add_argument('--debug', '-D', action='store_true') parser.add_argument("--debug", "-D", action="store_true")
subparsers = parser.add_subparsers(dest='command') subparsers = parser.add_subparsers(dest="command")
subparsers.required = True subparsers.required = True
commands = {} commands = {}
@ -39,23 +39,24 @@ def entrypoint(args=None):
# old school, function based. # old school, function based.
commands[ext.name] = ext.plugin(parser) commands[ext.name] = ext.plugin(parser)
except Exception: except Exception:
logger.exception('Error while loading command {}.'.format(ext.name)) logger.exception("Error while loading command {}.".format(ext.name))
from stevedore import ExtensionManager from stevedore import ExtensionManager
mgr = ExtensionManager(namespace='bonobo.commands')
mgr = ExtensionManager(namespace="bonobo.commands")
mgr.map(register_extension) mgr.map(register_extension)
parsed_args = parser.parse_args(args).__dict__ parsed_args = parser.parse_args(args).__dict__
if parsed_args.pop('debug', False): if parsed_args.pop("debug", False):
settings.DEBUG.set(True) settings.DEBUG.set(True)
settings.LOGGING_LEVEL.set(logging.DEBUG) settings.LOGGING_LEVEL.set(logging.DEBUG)
logger.setLevel(settings.LOGGING_LEVEL.get()) logger.setLevel(settings.LOGGING_LEVEL.get())
logger.debug('Command: ' + parsed_args['command'] + ' Arguments: ' + repr(parsed_args)) logger.debug("Command: " + parsed_args["command"] + " Arguments: " + repr(parsed_args))
# Get command handler, execute, rince. # Get command handler, execute, rince.
command = commands[parsed_args.pop('command')] command = commands[parsed_args.pop("command")]
command(**parsed_args) command(**parsed_args)
return 0 return 0

View File

@ -33,7 +33,7 @@ class BaseCommand:
""" """
The actual logic of the command. Subclasses must implement this method. The actual logic of the command. Subclasses must implement this method.
""" """
raise NotImplementedError('Subclasses of BaseCommand must provide a handle() method') raise NotImplementedError("Subclasses of BaseCommand must provide a handle() method")
class BaseGraphCommand(BaseCommand): class BaseGraphCommand(BaseCommand):
@ -48,8 +48,8 @@ class BaseGraphCommand(BaseCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
# target arguments (cannot provide both). # target arguments (cannot provide both).
source_group = parser.add_mutually_exclusive_group(required=self.required) source_group = parser.add_mutually_exclusive_group(required=self.required)
source_group.add_argument('file', nargs='?', type=str) source_group.add_argument("file", nargs="?", type=str)
source_group.add_argument('-m', dest='mod', type=str) source_group.add_argument("-m", dest="mod", type=str)
# add arguments to enforce system environment. # add arguments to enforce system environment.
parser = get_argument_parser(parser) parser = get_argument_parser(parser)
@ -66,7 +66,7 @@ class BaseGraphCommand(BaseCommand):
def do_handle(self, graph, **options): def do_handle(self, graph, **options):
if not self.handler: if not self.handler:
raise RuntimeError('{} has no handler defined.'.format(get_name(self))) raise RuntimeError("{} has no handler defined.".format(get_name(self)))
return self.handler(graph, **options) return self.handler(graph, **options)
@contextmanager @contextmanager
@ -87,20 +87,20 @@ class BaseGraphCommand(BaseCommand):
sys.argv = [mod] sys.argv = [mod]
self._run_module(mod) self._run_module(mod)
else: else:
raise RuntimeError('No target provided.') raise RuntimeError("No target provided.")
finally: finally:
sys.argv = _argv sys.argv = _argv
if _graph is None: if _graph is None:
raise RuntimeError('Could not find graph.') raise RuntimeError("Could not find graph.")
yield _graph, _graph_execution_options, options yield _graph, _graph_execution_options, options
def _run_path(self, file): def _run_path(self, file):
return runpy.run_path(file, run_name='__main__') return runpy.run_path(file, run_name="__main__")
def _run_module(self, mod): def _run_module(self, mod):
return runpy.run_module(mod, run_name='__main__') return runpy.run_module(mod, run_name="__main__")
@contextmanager @contextmanager

View File

@ -6,82 +6,75 @@ from bonobo.util.resolvers import _resolve_options, _resolve_transformations
class ConvertCommand(BaseCommand): class ConvertCommand(BaseCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('input_filename', help='Input filename.') parser.add_argument("input_filename", help="Input filename.")
parser.add_argument('output_filename', help='Output filename.') parser.add_argument("output_filename", help="Output filename.")
parser.add_argument( parser.add_argument(
'--' + READER, "--" + READER,
'-r', "-r",
help='Choose the reader factory if it cannot be detected from extension, or if detection is wrong.' help="Choose the reader factory if it cannot be detected from extension, or if detection is wrong.",
) )
parser.add_argument( parser.add_argument(
'--' + WRITER, "--" + WRITER,
'-w', "-w",
help= help="Choose the writer factory if it cannot be detected from extension, or if detection is wrong (use - for console pretty print).",
'Choose the writer factory if it cannot be detected from extension, or if detection is wrong (use - for console pretty print).' )
parser.add_argument("--limit", "-l", type=int, help="Adds a Limit() after the reader instance.", default=None)
parser.add_argument(
"--transformation",
"-t",
dest="transformation",
action="append",
help="Add a transformation between input and output (can be used multiple times, order is preserved).",
) )
parser.add_argument( parser.add_argument(
'--limit', "--option",
'-l', "-O",
type=int, dest="option",
help='Adds a Limit() after the reader instance.', action="append",
default=None,
)
parser.add_argument(
'--transformation',
'-t',
dest='transformation',
action='append',
help='Add a transformation between input and output (can be used multiple times, order is preserved).',
)
parser.add_argument(
'--option',
'-O',
dest='option',
action='append',
help='Add a named option to both reader and writer factories (i.e. foo="bar").', help='Add a named option to both reader and writer factories (i.e. foo="bar").',
) )
parser.add_argument( parser.add_argument(
'--' + READER + '-option', "--" + READER + "-option",
'-' + READER[0].upper(), "-" + READER[0].upper(),
dest=READER + '_option', dest=READER + "_option",
action='append', action="append",
help='Add a named option to the reader factory.', help="Add a named option to the reader factory.",
) )
parser.add_argument( parser.add_argument(
'--' + WRITER + '-option', "--" + WRITER + "-option",
'-' + WRITER[0].upper(), "-" + WRITER[0].upper(),
dest=WRITER + '_option', dest=WRITER + "_option",
action='append', action="append",
help='Add a named option to the writer factory.', help="Add a named option to the writer factory.",
) )
def handle( def handle(
self, self,
input_filename, input_filename,
output_filename, output_filename,
reader=None, reader=None,
reader_option=None, reader_option=None,
writer=None, writer=None,
writer_option=None, writer_option=None,
option=None, option=None,
limit=None, limit=None,
transformation=None, transformation=None,
): ):
reader_factory = default_registry.get_reader_factory_for(input_filename, format=reader) reader_factory = default_registry.get_reader_factory_for(input_filename, format=reader)
reader_kwargs = _resolve_options((option or []) + (reader_option or [])) reader_kwargs = _resolve_options((option or []) + (reader_option or []))
if output_filename == '-': if output_filename == "-":
writer_factory = bonobo.PrettyPrinter writer_factory = bonobo.PrettyPrinter
writer_args = () writer_args = ()
else: else:
writer_factory = default_registry.get_writer_factory_for(output_filename, format=writer) writer_factory = default_registry.get_writer_factory_for(output_filename, format=writer)
writer_args = (output_filename, ) writer_args = (output_filename,)
writer_kwargs = _resolve_options((option or []) + (writer_option or [])) writer_kwargs = _resolve_options((option or []) + (writer_option or []))
transformations = () transformations = ()
if limit: if limit:
transformations += (bonobo.Limit(limit), ) transformations += (bonobo.Limit(limit),)
transformations += _resolve_transformations(transformation) transformations += _resolve_transformations(transformation)
@ -92,8 +85,4 @@ class ConvertCommand(BaseCommand):
writer_factory(*writer_args, **writer_kwargs), writer_factory(*writer_args, **writer_kwargs),
) )
return bonobo.run( return bonobo.run(graph, services={"fs": bonobo.open_fs()})
graph, services={
'fs': bonobo.open_fs(),
}
)

View File

@ -6,28 +6,28 @@ import requests
import bonobo import bonobo
from bonobo.commands import BaseCommand from bonobo.commands import BaseCommand
EXAMPLES_BASE_URL = 'https://raw.githubusercontent.com/python-bonobo/bonobo/master/bonobo/examples/' EXAMPLES_BASE_URL = "https://raw.githubusercontent.com/python-bonobo/bonobo/master/bonobo/examples/"
"""The URL to our git repository, in raw mode.""" """The URL to our git repository, in raw mode."""
class DownloadCommand(BaseCommand): class DownloadCommand(BaseCommand):
def handle(self, *, path, **options): def handle(self, *, path, **options):
if not path.startswith('examples'): if not path.startswith("examples"):
raise ValueError('Download command currently supports examples only') raise ValueError("Download command currently supports examples only")
examples_path = re.sub('^examples/', '', path) examples_path = re.sub("^examples/", "", path)
output_path = bonobo.get_examples_path(examples_path) output_path = bonobo.get_examples_path(examples_path)
with _open_url(EXAMPLES_BASE_URL + examples_path) as response, open(output_path, 'wb') as fout: with _open_url(EXAMPLES_BASE_URL + examples_path) as response, open(output_path, "wb") as fout:
for chunk in response.iter_content(io.DEFAULT_BUFFER_SIZE): for chunk in response.iter_content(io.DEFAULT_BUFFER_SIZE):
fout.write(chunk) fout.write(chunk)
self.logger.info('Download saved to {}'.format(output_path)) self.logger.info("Download saved to {}".format(output_path))
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('path', help='The relative path of the thing to download.') parser.add_argument("path", help="The relative path of the thing to download.")
def _open_url(url): def _open_url(url):
"""Open a HTTP connection to the URL and return a file-like object.""" """Open a HTTP connection to the URL and return a file-like object."""
response = requests.get(url, stream=True) response = requests.get(url, stream=True)
if response.status_code != 200: if response.status_code != 200:
raise IOError('Unable to download {}, HTTP {}'.format(url, response.status_code)) raise IOError("Unable to download {}, HTTP {}".format(url, response.status_code))
return response return response

View File

@ -1,23 +1,23 @@
from bonobo.commands import BaseCommand from bonobo.commands import BaseCommand
all_examples = ( all_examples = (
'clock', "clock",
'datasets', "datasets",
'environ', "environ",
'files.csv_handlers', "files.csv_handlers",
'files.json_handlers', "files.json_handlers",
'files.pickle_handlers', "files.pickle_handlers",
'files.text_handlers', "files.text_handlers",
'types', "types",
) )
class ExamplesCommand(BaseCommand): class ExamplesCommand(BaseCommand):
def handle(self): def handle(self):
print('You can run the following examples:') print("You can run the following examples:")
print() print()
for example in all_examples: for example in all_examples:
print(' $ python -m bonobo.examples.{}'.format(example)) print(" $ python -m bonobo.examples.{}".format(example))
print() print()
def add_arguments(self, parser): def add_arguments(self, parser):

View File

@ -6,67 +6,67 @@ from bonobo.commands import BaseCommand
class InitCommand(BaseCommand): class InitCommand(BaseCommand):
TEMPLATES = {'bare', 'default'} TEMPLATES = {"bare", "default"}
TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), 'templates') TEMPLATES_PATH = os.path.join(os.path.dirname(__file__), "templates")
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('filename') parser.add_argument("filename")
parser.add_argument('--force', '-f', default=False, action='store_true') parser.add_argument("--force", "-f", default=False, action="store_true")
target_group = parser.add_mutually_exclusive_group(required=False) target_group = parser.add_mutually_exclusive_group(required=False)
target_group.add_argument('--template', '-t', choices=self.TEMPLATES, default='default') target_group.add_argument("--template", "-t", choices=self.TEMPLATES, default="default")
target_group.add_argument('--package', '-p', action='store_true', default=False) target_group.add_argument("--package", "-p", action="store_true", default=False)
def create_file_from_template(self, *, template, filename): def create_file_from_template(self, *, template, filename):
template_name = template template_name = template
name, ext = os.path.splitext(filename) name, ext = os.path.splitext(filename)
if ext != '.py': if ext != ".py":
raise ValueError('Filenames should end with ".py".') raise ValueError('Filenames should end with ".py".')
loader = FileSystemLoader(self.TEMPLATES_PATH) loader = FileSystemLoader(self.TEMPLATES_PATH)
env = Environment(loader=loader) env = Environment(loader=loader)
template = env.get_template(template_name + '.py-tpl') template = env.get_template(template_name + ".py-tpl")
with open(filename, 'w+') as f: with open(filename, "w+") as f:
f.write(template.render(name=name)) f.write(template.render(name=name))
self.logger.info('Generated {} using template {!r}.'.format(filename, template_name)) self.logger.info("Generated {} using template {!r}.".format(filename, template_name))
def create_package(self, *, filename): def create_package(self, *, filename):
name, ext = os.path.splitext(filename) name, ext = os.path.splitext(filename)
if ext != '': if ext != "":
raise ValueError('Package names should not have an extension.') raise ValueError("Package names should not have an extension.")
try: try:
import medikit.commands import medikit.commands
except ImportError as exc: except ImportError as exc:
raise ImportError( raise ImportError(
'To initialize a package, you need to install medikit (pip install --upgrade medikit).' "To initialize a package, you need to install medikit (pip install --upgrade medikit)."
) from exc ) from exc
package_name = os.path.basename(filename) package_name = os.path.basename(filename)
medikit.commands.handle_init( medikit.commands.handle_init(
os.path.join(os.getcwd(), filename, 'Projectfile'), name=package_name, requirements=['bonobo'] os.path.join(os.getcwd(), filename, "Projectfile"), name=package_name, requirements=["bonobo"]
) )
self.logger.info('Generated "{}" package with medikit.'.format(package_name)) self.logger.info('Generated "{}" package with medikit.'.format(package_name))
self.create_file_from_template(template='default', filename=os.path.join(filename, package_name, '__main__.py')) self.create_file_from_template(template="default", filename=os.path.join(filename, package_name, "__main__.py"))
print('Your "{}" package has been created.'.format(package_name)) print('Your "{}" package has been created.'.format(package_name))
print() print()
print('Install it...') print("Install it...")
print() print()
print(' pip install --editable {}'.format(filename)) print(" pip install --editable {}".format(filename))
print() print()
print('Then maybe run the example...') print("Then maybe run the example...")
print() print()
print(' python -m {}'.format(package_name)) print(" python -m {}".format(package_name))
print() print()
print('Enjoy!') print("Enjoy!")
def handle(self, *, template, filename, package=False, force=False): def handle(self, *, template, filename, package=False, force=False):
if os.path.exists(filename) and not force: if os.path.exists(filename) and not force:
raise FileExistsError('Target filename already exists, use --force to override.') raise FileExistsError("Target filename already exists, use --force to override.")
if package: if package:
self.create_package(filename=filename) self.create_package(filename=filename)

View File

@ -7,9 +7,9 @@ class InspectCommand(BaseGraphCommand):
def add_arguments(self, parser): def add_arguments(self, parser):
super(InspectCommand, self).add_arguments(parser) super(InspectCommand, self).add_arguments(parser)
parser.add_argument('--graph', '-g', dest='format', action='store_const', const='graph') parser.add_argument("--graph", "-g", dest="format", action="store_const", const="graph")
def parse_options(self, **options): def parse_options(self, **options):
if not options.get('format'): if not options.get("format"):
raise RuntimeError('You must provide a format (try --graph).') raise RuntimeError("You must provide a format (try --graph).")
return options return options

View File

@ -12,13 +12,14 @@ class RunCommand(BaseGraphCommand):
super(RunCommand, self).add_arguments(parser) super(RunCommand, self).add_arguments(parser)
verbosity_group = parser.add_mutually_exclusive_group() verbosity_group = parser.add_mutually_exclusive_group()
verbosity_group.add_argument('--quiet', '-q', action='store_true') verbosity_group.add_argument("--quiet", "-q", action="store_true")
verbosity_group.add_argument('--verbose', '-v', action='store_true') verbosity_group.add_argument("--verbose", "-v", action="store_true")
parser.add_argument('--install', '-I', action='store_true') parser.add_argument("--install", "-I", action="store_true")
def parse_options(self, *, quiet=False, verbose=False, install=False, **options): def parse_options(self, *, quiet=False, verbose=False, install=False, **options):
from bonobo import settings from bonobo import settings
settings.QUIET.set_if_true(quiet) settings.QUIET.set_if_true(quiet)
settings.DEBUG.set_if_true(verbose) settings.DEBUG.set_if_true(verbose)
self.install = install self.install = install
@ -28,9 +29,9 @@ class RunCommand(BaseGraphCommand):
# add install logic # add install logic
if self.install: if self.install:
if os.path.isdir(file): if os.path.isdir(file):
requirements = os.path.join(file, 'requirements.txt') requirements = os.path.join(file, "requirements.txt")
else: else:
requirements = os.path.join(os.path.dirname(file), 'requirements.txt') requirements = os.path.join(os.path.dirname(file), "requirements.txt")
_install_requirements(requirements) _install_requirements(requirements)
return super()._run_path(file) return super()._run_path(file)
@ -38,7 +39,7 @@ class RunCommand(BaseGraphCommand):
def _run_module(self, mod): def _run_module(self, mod):
# install not implemented for a module, not sure it even make sense. # install not implemented for a module, not sure it even make sense.
if self.install: if self.install:
raise RuntimeError('--install behaviour when running a module is not defined.') raise RuntimeError("--install behaviour when running a module is not defined.")
return super()._run_module(mod) return super()._run_module(mod)
@ -59,10 +60,11 @@ def _install_requirements(requirements):
import importlib import importlib
import pip import pip
pip.main(['install', '-r', requirements]) pip.main(["install", "-r", requirements])
# Some shenanigans to be sure everything is importable after this, especially .egg-link files which # Some shenanigans to be sure everything is importable after this, especially .egg-link files which
# are referenced in *.pth files and apparently loaded by site.py at some magic bootstrap moment of the # are referenced in *.pth files and apparently loaded by site.py at some magic bootstrap moment of the
# python interpreter. # python interpreter.
pip.utils.pkg_resources = importlib.reload(pip.utils.pkg_resources) pip.utils.pkg_resources = importlib.reload(pip.utils.pkg_resources)
import site import site
importlib.reload(site) importlib.reload(site)

View File

@ -3,6 +3,7 @@ import bonobo
def get_graph(**options): def get_graph(**options):
graph = bonobo.Graph() graph = bonobo.Graph()
graph.get_cursor() >> ...
return graph return graph

View File

@ -27,7 +27,7 @@ def get_graph(**options):
""" """
graph = bonobo.Graph() graph = bonobo.Graph()
graph.add_chain(extract, transform, load) graph.get_cursor() >> extract >> transform >> load
return graph return graph

View File

@ -9,15 +9,15 @@ def get_versions(*, all=False, quiet=None):
if all: if all:
for name in sorted(bonobo_packages): for name in sorted(bonobo_packages):
if name != 'bonobo': if name != "bonobo":
try: try:
mod = __import__(name.replace('-', '_')) mod = __import__(name.replace("-", "_"))
try: try:
yield _format_version(mod, name=name, quiet=quiet) yield _format_version(mod, name=name, quiet=quiet)
except Exception as exc: except Exception as exc:
yield '{} ({})'.format(name, exc) yield "{} ({})".format(name, exc)
except ImportError as exc: except ImportError as exc:
yield '{} is not importable ({}).'.format(name, exc) yield "{} is not importable ({}).".format(name, exc)
class VersionCommand(BaseCommand): class VersionCommand(BaseCommand):
@ -26,23 +26,24 @@ class VersionCommand(BaseCommand):
print(line) print(line)
def add_arguments(self, parser): def add_arguments(self, parser):
parser.add_argument('--all', '-a', action='store_true') parser.add_argument("--all", "-a", action="store_true")
parser.add_argument('--quiet', '-q', action='count') parser.add_argument("--quiet", "-q", action="count")
def _format_version(mod, *, name=None, quiet=False): def _format_version(mod, *, name=None, quiet=False):
from bonobo.util.pkgs import bonobo_packages from bonobo.util.pkgs import bonobo_packages
args = { args = {
'name': name or mod.__name__, "name": name or mod.__name__,
'version': mod.__version__, "version": mod.__version__,
'location': bonobo_packages[name or mod.__name__].location "location": bonobo_packages[name or mod.__name__].location,
} }
if not quiet: if not quiet:
return '{name} v.{version} (in {location})'.format(**args) return "{name} v.{version} (in {location})".format(**args)
if quiet < 2: if quiet < 2:
return '{name} {version}'.format(**args) return "{name} {version}".format(**args)
if quiet < 3: if quiet < 3:
return '{version}'.format(**args) return "{version}".format(**args)
raise RuntimeError('Hard to be so quiet...') raise RuntimeError("Hard to be so quiet...")

View File

@ -11,23 +11,23 @@ from bonobo.config.processors import ContextProcessor, use_context, use_context_
from bonobo.config.services import Container, Exclusive, Service, use, create_container from bonobo.config.services import Container, Exclusive, Service, use, create_container
from bonobo.util import deprecated_alias from bonobo.util import deprecated_alias
requires = deprecated_alias('requires', use) requires = deprecated_alias("requires", use)
# Bonobo's Config API # Bonobo's Config API
__all__ = [ __all__ = [
'Configurable', "Configurable",
'Container', "Container",
'ContextProcessor', "ContextProcessor",
'Exclusive', "Exclusive",
'Method', "Method",
'Option', "Option",
'Service', "Service",
'create_container', "create_container",
'requires', "requires",
'transformation_factory', "transformation_factory",
'use', "use",
'use_context', "use_context",
'use_context_processor', "use_context_processor",
'use_no_input', "use_no_input",
'use_raw_input', "use_raw_input",
] ]

View File

@ -1,9 +1,7 @@
from bonobo.errors import AbstractError from bonobo.errors import AbstractError
from bonobo.util import get_name, iscontextprocessor, isoption, sortedlist from bonobo.util import get_name, iscontextprocessor, isoption, sortedlist
__all__ = [ __all__ = ["Configurable"]
'Configurable',
]
get_creation_counter = lambda v: v._creation_counter get_creation_counter = lambda v: v._creation_counter
@ -42,12 +40,12 @@ class ConfigurableMeta(type):
for _positional, _counter, _name, _value in cls.__options: for _positional, _counter, _name, _value in cls.__options:
_param = _name _param = _name
if _value.type: if _value.type:
_param = get_name(_value.type) + ' ' + _param _param = get_name(_value.type) + " " + _param
prefix = ':param {}: '.format(_param) prefix = ":param {}: ".format(_param)
for lineno, line in enumerate((_value.__doc__ or '').split('\n')): for lineno, line in enumerate((_value.__doc__ or "").split("\n")):
_options_doc.append((' ' * len(prefix) if lineno else prefix) + line) _options_doc.append((" " * len(prefix) if lineno else prefix) + line)
cls.__doc__ = '\n\n'.join(map(str.strip, filter(None, (cls.__doc__, '\n'.join(_options_doc))))) cls.__doc__ = "\n\n".join(map(str.strip, filter(None, (cls.__doc__, "\n".join(_options_doc)))))
@property @property
def __options__(cls): def __options__(cls):
@ -64,10 +62,7 @@ class ConfigurableMeta(type):
return cls.__processors_cache return cls.__processors_cache
def __repr__(self): def __repr__(self):
return ' '.join(( return " ".join(("<Configurable", super(ConfigurableMeta, self).__repr__().split(" ", 1)[1]))
'<Configurable',
super(ConfigurableMeta, self).__repr__().split(' ', 1)[1],
))
try: try:
@ -154,9 +149,11 @@ class Configurable(metaclass=ConfigurableMeta):
extraneous = set(kwargs.keys()) - (set(next(zip(*options))) if len(options) else set()) extraneous = set(kwargs.keys()) - (set(next(zip(*options))) if len(options) else set())
if len(extraneous): if len(extraneous):
raise TypeError( raise TypeError(
'{}() got {} unexpected option{}: {}.'.format( "{}() got {} unexpected option{}: {}.".format(
cls.__name__, len(extraneous), 's' cls.__name__,
if len(extraneous) > 1 else '', ', '.join(map(repr, sorted(extraneous))) len(extraneous),
"s" if len(extraneous) > 1 else "",
", ".join(map(repr, sorted(extraneous))),
) )
) )
@ -165,9 +162,11 @@ class Configurable(metaclass=ConfigurableMeta):
if len(missing): if len(missing):
if _final: if _final:
raise TypeError( raise TypeError(
'{}() missing {} required option{}: {}.'.format( "{}() missing {} required option{}: {}.".format(
cls.__name__, len(missing), 's' cls.__name__,
if len(missing) > 1 else '', ', '.join(map(repr, sorted(missing))) len(missing),
"s" if len(missing) > 1 else "",
", ".join(map(repr, sorted(missing))),
) )
) )
return PartiallyConfigured(cls, *args, **kwargs) return PartiallyConfigured(cls, *args, **kwargs)
@ -196,7 +195,7 @@ class Configurable(metaclass=ConfigurableMeta):
break break
if name in self._options_values: if name in self._options_values:
raise ValueError('Already got a value for option {}'.format(name)) raise ValueError("Already got a value for option {}".format(name))
setattr(self, name, args[position]) setattr(self, name, args[position])
position += 1 position += 1

View File

@ -6,8 +6,8 @@ def transformation_factory(f):
@functools.wraps(f) @functools.wraps(f)
def _transformation_factory(*args, **kwargs): def _transformation_factory(*args, **kwargs):
retval = f(*args, **kwargs) retval = f(*args, **kwargs)
retval.__name__ = f.__name__ + '({})'.format( retval.__name__ = f.__name__ + "({})".format(
', '.join(itertools.chain(map(repr, args), ('{}={!r}'.format(k, v) for k, v in kwargs.items()))) ", ".join(itertools.chain(map(repr, args), ("{}={!r}".format(k, v) for k, v in kwargs.items())))
) )
return retval return retval

View File

@ -66,9 +66,9 @@ class Option:
# Docstring formating # Docstring formating
self.__doc__ = __doc__ or None self.__doc__ = __doc__ or None
if self.__doc__: if self.__doc__:
self.__doc__ = textwrap.dedent(self.__doc__.strip('\n')).strip() self.__doc__ = textwrap.dedent(self.__doc__.strip("\n")).strip()
if default: if default:
self.__doc__ += '\n\nDefault: {!r}'.format(default) self.__doc__ += "\n\nDefault: {!r}".format(default)
# This hack is necessary for python3.5 # This hack is necessary for python3.5
self._creation_counter = Option._creation_counter self._creation_counter = Option._creation_counter
@ -88,13 +88,13 @@ class Option:
inst._options_values[self.name] = self.clean(value) inst._options_values[self.name] = self.clean(value)
def __repr__(self): def __repr__(self):
return '<{positional}{typename} {type}{name} default={default!r}{required}>'.format( return "<{positional}{typename} {type}{name} default={default!r}{required}>".format(
typename=type(self).__name__, typename=type(self).__name__,
type='({})'.format(self.type) if istype(self.type) else '', type="({})".format(self.type) if istype(self.type) else "",
name=self.name, name=self.name,
positional='*' if self.positional else '**', positional="*" if self.positional else "**",
default=self.default, default=self.default,
required=' (required)' if self.required else '', required=" (required)" if self.required else "",
) )
def clean(self, value): def clean(self, value):
@ -106,15 +106,16 @@ class Option:
class RemovedOption(Option): class RemovedOption(Option):
def __init__(self, *args, value, **kwargs): def __init__(self, *args, value, **kwargs):
kwargs['required'] = False kwargs["required"] = False
super(RemovedOption, self).__init__(*args, **kwargs) super(RemovedOption, self).__init__(*args, **kwargs)
self.value = value self.value = value
def clean(self, value): def clean(self, value):
if value != self.value: if value != self.value:
raise ValueError( raise ValueError(
'Removed options cannot change value, {!r} must now be {!r} (and you should remove setting the value explicitely, as it is deprecated and will be removed quite soon.'. "Removed options cannot change value, {!r} must now be {!r} (and you should remove setting the value explicitely, as it is deprecated and will be removed quite soon.".format(
format(self.name, self.value) self.name, self.value
)
) )
return self.value return self.value
@ -129,12 +130,12 @@ class RenamedOption(Option):
def __get__(self, instance, owner): def __get__(self, instance, owner):
raise ValueError( raise ValueError(
'Trying to get value from renamed option {}, try getting {} instead.'.format(self.name, self.target) "Trying to get value from renamed option {}, try getting {} instead.".format(self.name, self.target)
) )
def clean(self, value): def clean(self, value):
raise ValueError( raise ValueError(
'Trying to set value of renamed option {}, try setting {} instead.'.format(self.name, self.target) "Trying to set value of renamed option {}, try setting {} instead.".format(self.name, self.target)
) )
@ -182,7 +183,7 @@ class Method(Option):
# If a callable is provided as default, then use self as if it was used as a decorator # If a callable is provided as default, then use self as if it was used as a decorator
if default is not None: if default is not None:
if not callable(default): if not callable(default):
raise ValueError('Method defaults should be callable, if provided.') raise ValueError("Method defaults should be callable, if provided.")
self(default) self(default)
def __get__(self, inst, type_): def __get__(self, inst, type_):
@ -194,17 +195,15 @@ class Method(Option):
def __set__(self, inst, value): def __set__(self, inst, value):
if not callable(value): if not callable(value):
raise TypeError( raise TypeError(
'Option {!r} ({}) is expecting a callable value, got {!r} object: {!r}.'.format( "Option {!r} ({}) is expecting a callable value, got {!r} object: {!r}.".format(
self.name, self.name, type(self).__name__, type(value).__name__, value
type(self).__name__,
type(value).__name__, value
) )
) )
inst._options_values[self.name] = self.type(value) if self.type else value inst._options_values[self.name] = self.type(value) if self.type else value
def __call__(self, impl): def __call__(self, impl):
if self.default: if self.default:
raise RuntimeError('Can only be used once as a decorator.') raise RuntimeError("Can only be used once as a decorator.")
self.default = impl self.default = impl
self.required = False self.required = False
return self return self

View File

@ -53,7 +53,7 @@ class ContextProcessor(Option):
self.name = self.__name__ self.name = self.__name__
def __repr__(self): def __repr__(self):
return repr(self.func).replace('<function', '<{}'.format(type(self).__name__)) return repr(self.func).replace("<function", "<{}".format(type(self).__name__))
def __call__(self, *args, **kwargs): def __call__(self, *args, **kwargs):
return self.func(*args, **kwargs) return self.func(*args, **kwargs)
@ -74,7 +74,7 @@ class ContextCurrifier:
self.wrapped = wrapped self.wrapped = wrapped
self.args = args self.args = args
self.kwargs = kwargs self.kwargs = kwargs
self.format = getattr(wrapped, '__input_format__', _args) self.format = getattr(wrapped, "__input_format__", _args)
self._stack, self._stack_values = None, None self._stack, self._stack_values = None, None
def __iter__(self): def __iter__(self):
@ -91,30 +91,32 @@ class ContextCurrifier:
return bind(*self.args, _input, **self.kwargs) return bind(*self.args, _input, **self.kwargs)
if self.format is _none: if self.format is _none:
return bind(*self.args, **self.kwargs) return bind(*self.args, **self.kwargs)
raise NotImplementedError('Invalid format {!r}.'.format(self.format)) raise NotImplementedError("Invalid format {!r}.".format(self.format))
def __call__(self, _input): def __call__(self, _input):
if not callable(self.wrapped): if not callable(self.wrapped):
if isinstance(self.wrapped, Iterable): if isinstance(self.wrapped, Iterable):
return self.__iter__() return self.__iter__()
raise UnrecoverableTypeError('Uncallable node {}'.format(self.wrapped)) raise UnrecoverableTypeError("Uncallable node {}".format(self.wrapped))
try: try:
bound = self._bind(_input) bound = self._bind(_input)
except TypeError as exc: except TypeError as exc:
raise UnrecoverableTypeError(( raise UnrecoverableTypeError(
'Input of {wrapped!r} does not bind to the node signature.\n' (
'Args: {args}\n' "Input of {wrapped!r} does not bind to the node signature.\n"
'Input: {input}\n' "Args: {args}\n"
'Kwargs: {kwargs}\n' "Input: {input}\n"
'Signature: {sig}' "Kwargs: {kwargs}\n"
).format( "Signature: {sig}"
wrapped=self.wrapped, args=self.args, input=_input, kwargs=self.kwargs, sig=signature(self.wrapped) ).format(
)) from exc wrapped=self.wrapped, args=self.args, input=_input, kwargs=self.kwargs, sig=signature(self.wrapped)
)
) from exc
return self.wrapped(*bound.args, **bound.kwargs) return self.wrapped(*bound.args, **bound.kwargs)
def setup(self, *context): def setup(self, *context):
if self._stack is not None: if self._stack is not None:
raise RuntimeError('Cannot setup context currification twice.') raise RuntimeError("Cannot setup context currification twice.")
self._stack, self._stack_values = list(), list() self._stack, self._stack_values = list(), list()
for processor in resolve_processors(self.wrapped): for processor in resolve_processors(self.wrapped):
@ -136,7 +138,7 @@ class ContextCurrifier:
pass pass
else: else:
# No error ? We should have had StopIteration ... # No error ? We should have had StopIteration ...
raise RuntimeError('Context processors should not yield more than once.') raise RuntimeError("Context processors should not yield more than once.")
self._stack, self._stack_values = None, None self._stack, self._stack_values = None, None
@contextmanager @contextmanager
@ -164,7 +166,7 @@ def resolve_processors(mixed):
yield from () yield from ()
get_context_processors = deprecated_alias('get_context_processors', resolve_processors) get_context_processors = deprecated_alias("get_context_processors", resolve_processors)
def use_context(f): def use_context(f):
@ -192,11 +194,11 @@ def use_context_processor(context_processor):
def _use_input_format(input_format): def _use_input_format(input_format):
if input_format not in INPUT_FORMATS: if input_format not in INPUT_FORMATS:
raise ValueError( raise ValueError(
'Invalid input format {!r}. Choices: {}'.format(input_format, ', '.join(sorted(INPUT_FORMATS))) "Invalid input format {!r}. Choices: {}".format(input_format, ", ".join(sorted(INPUT_FORMATS)))
) )
def _set_input_format(f): def _set_input_format(f):
setattr(f, '__input_format__', input_format) setattr(f, "__input_format__", input_format)
return f return f
return _set_input_format return _set_input_format

View File

@ -11,7 +11,7 @@ _service_name_re = re.compile(r"^[^\d\W]\w*(:?\.[^\d\W]\w*)*$", re.UNICODE)
def validate_service_name(name): def validate_service_name(name):
if not _service_name_re.match(name): if not _service_name_re.match(name):
raise ValueError('Invalid service name {!r}.'.format(name)) raise ValueError("Invalid service name {!r}.".format(name))
return name return name
@ -66,7 +66,7 @@ class Service(Option):
class Container(dict): class Container(dict):
def __new__(cls, *args, **kwargs): def __new__(cls, *args, **kwargs):
if len(args) == 1: if len(args) == 1:
assert not len(kwargs), 'only one usage at a time, my dear.' assert not len(kwargs), "only one usage at a time, my dear."
if not (args[0]): if not (args[0]):
return super().__new__(cls) return super().__new__(cls)
if isinstance(args[0], cls): if isinstance(args[0], cls):
@ -86,7 +86,7 @@ class Container(dict):
if default: if default:
return default return default
raise MissingServiceImplementationError( raise MissingServiceImplementationError(
'Cannot resolve service {!r} using provided service collection.'.format(name) "Cannot resolve service {!r} using provided service collection.".format(name)
) )
value = super().get(name) value = super().get(name)
# XXX this is not documented and can lead to errors. # XXX this is not documented and can lead to errors.
@ -108,13 +108,15 @@ def create_container(services=None, factory=Container):
""" """
container = factory(services) if services else factory() container = factory(services) if services else factory()
if not 'fs' in container: if not "fs" in container:
import bonobo import bonobo
container.setdefault('fs', bonobo.open_fs())
if not 'http' in container: container.setdefault("fs", bonobo.open_fs())
if not "http" in container:
import requests import requests
container.setdefault('http', requests)
container.setdefault("http", requests)
return container return container
@ -137,6 +139,7 @@ class Exclusive(ContextDecorator):
ensure that. ensure that.
""" """
_locks = {} _locks = {}
def __init__(self, wrapped): def __init__(self, wrapped):

View File

@ -23,8 +23,8 @@
from bonobo.structs.tokens import Token from bonobo.structs.tokens import Token
from bonobo.util.envelopes import UnchangedEnvelope from bonobo.util.envelopes import UnchangedEnvelope
BEGIN = Token('Begin') BEGIN = Token("Begin")
END = Token('End') END = Token("End")
NOT_MODIFIED = UnchangedEnvelope() NOT_MODIFIED = UnchangedEnvelope()

View File

@ -9,7 +9,4 @@ This module contains all tools for Bonobo and Django to interract nicely.
from .utils import create_or_update from .utils import create_or_update
from .commands import ETLCommand from .commands import ETLCommand
__all__ = [ __all__ = ["ETLCommand", "create_or_update"]
'ETLCommand',
'create_or_update',
]

View File

@ -35,7 +35,7 @@ class ETLCommand(BaseCommand):
def get_graph(self, *args, **options): def get_graph(self, *args, **options):
def not_implemented(): def not_implemented():
raise NotImplementedError('You must implement {}.get_graph() method.'.format(self)) raise NotImplementedError("You must implement {}.get_graph() method.".format(self))
return bonobo.Graph(not_implemented) return bonobo.Graph(not_implemented)
@ -56,14 +56,14 @@ class ETLCommand(BaseCommand):
graph_coll = self.get_graph(*args, **options) graph_coll = self.get_graph(*args, **options)
if not isinstance(graph_coll, GeneratorType): if not isinstance(graph_coll, GeneratorType):
graph_coll = (graph_coll, ) graph_coll = (graph_coll,)
for i, graph in enumerate(graph_coll): for i, graph in enumerate(graph_coll):
assert isinstance(graph, bonobo.Graph), 'Invalid graph provided.' assert isinstance(graph, bonobo.Graph), "Invalid graph provided."
print(term.lightwhite('{}. {}'.format(i + 1, graph.name))) print(term.lightwhite("{}. {}".format(i + 1, graph.name)))
result = bonobo.run(graph, services=services, strategy=strategy) result = bonobo.run(graph, services=services, strategy=strategy)
results.append(result) results.append(result)
print(term.lightblack(' ... return value: ' + str(result))) print(term.lightblack(" ... return value: " + str(result)))
print() print()
return results return results
@ -71,9 +71,9 @@ class ETLCommand(BaseCommand):
def handle(self, *args, **options): def handle(self, *args, **options):
_stdout_backup, _stderr_backup = self.stdout, self.stderr _stdout_backup, _stderr_backup = self.stdout, self.stderr
self.stdout = OutputWrapper(ConsoleOutputPlugin._stdout, ending=CLEAR_EOL + '\n') self.stdout = OutputWrapper(ConsoleOutputPlugin._stdout, ending=CLEAR_EOL + "\n")
self.stderr = OutputWrapper(ConsoleOutputPlugin._stderr, ending=CLEAR_EOL + '\n') self.stderr = OutputWrapper(ConsoleOutputPlugin._stderr, ending=CLEAR_EOL + "\n")
self.stderr.style_func = lambda x: Fore.LIGHTRED_EX + Back.RED + '!' + Style.RESET_ALL + ' ' + x self.stderr.style_func = lambda x: Fore.LIGHTRED_EX + Back.RED + "!" + Style.RESET_ALL + " " + x
self.run(*args, **kwargs) self.run(*args, **kwargs)

View File

@ -9,8 +9,8 @@ from oauth2client import client, tools
from oauth2client.file import Storage from oauth2client.file import Storage
from oauth2client.tools import argparser from oauth2client.tools import argparser
HOME_DIR = os.path.expanduser('~') HOME_DIR = os.path.expanduser("~")
GOOGLE_SECRETS = os.path.join(HOME_DIR, '.cache/secrets/client_secrets.json') GOOGLE_SECRETS = os.path.join(HOME_DIR, ".cache/secrets/client_secrets.json")
def get_credentials(*, scopes): def get_credentials(*, scopes):
@ -22,10 +22,10 @@ def get_credentials(*, scopes):
Returns: Returns:
Credentials, the obtained credential. Credentials, the obtained credential.
""" """
credential_dir = os.path.join(HOME_DIR, '.cache', __package__, 'credentials') credential_dir = os.path.join(HOME_DIR, ".cache", __package__, "credentials")
if not os.path.exists(credential_dir): if not os.path.exists(credential_dir):
os.makedirs(credential_dir) os.makedirs(credential_dir)
credential_path = os.path.join(credential_dir, 'googleapis.json') credential_path = os.path.join(credential_dir, "googleapis.json")
store = Storage(credential_path) store = Storage(credential_path)
credentials = store.get() credentials = store.get()
@ -34,22 +34,22 @@ def get_credentials(*, scopes):
# kw: "incremental scopes" # kw: "incremental scopes"
if not credentials or credentials.invalid or not credentials.has_scopes(scopes): if not credentials or credentials.invalid or not credentials.has_scopes(scopes):
flow = client.flow_from_clientsecrets(GOOGLE_SECRETS, scopes) flow = client.flow_from_clientsecrets(GOOGLE_SECRETS, scopes)
flow.user_agent = 'Bonobo ETL (https://www.bonobo-project.org/)' flow.user_agent = "Bonobo ETL (https://www.bonobo-project.org/)"
flags = argparser.parse_args(['--noauth_local_webserver']) flags = argparser.parse_args(["--noauth_local_webserver"])
credentials = tools.run_flow(flow, store, flags) credentials = tools.run_flow(flow, store, flags)
print('Storing credentials to ' + credential_path) print("Storing credentials to " + credential_path)
return credentials return credentials
def get_google_spreadsheets_api_client(scopes=('https://www.googleapis.com/auth/spreadsheets', )): def get_google_spreadsheets_api_client(scopes=("https://www.googleapis.com/auth/spreadsheets",)):
credentials = get_credentials(scopes=scopes) credentials = get_credentials(scopes=scopes)
http = credentials.authorize(httplib2.Http()) http = credentials.authorize(httplib2.Http())
discoveryUrl = 'https://sheets.googleapis.com/$discovery/rest?version=v4' discoveryUrl = "https://sheets.googleapis.com/$discovery/rest?version=v4"
return discovery.build('sheets', 'v4', http=http, discoveryServiceUrl=discoveryUrl, cache_discovery=False) return discovery.build("sheets", "v4", http=http, discoveryServiceUrl=discoveryUrl, cache_discovery=False)
def get_google_people_api_client(scopes=('https://www.googleapis.com/auth/contacts', )): def get_google_people_api_client(scopes=("https://www.googleapis.com/auth/contacts",)):
credentials = get_credentials(scopes=scopes) credentials = get_credentials(scopes=scopes)
http = credentials.authorize(httplib2.Http()) http = credentials.authorize(httplib2.Http())
discoveryUrl = 'https://people.googleapis.com/$discovery/rest?version=v1' discoveryUrl = "https://people.googleapis.com/$discovery/rest?version=v1"
return discovery.build('people', 'v1', http=http, discoveryServiceUrl=discoveryUrl, cache_discovery=False) return discovery.build("people", "v1", http=http, discoveryServiceUrl=discoveryUrl, cache_discovery=False)

View File

@ -2,9 +2,7 @@ from bonobo.plugins.jupyter import JupyterOutputPlugin
def _jupyter_nbextension_paths(): def _jupyter_nbextension_paths():
return [{'section': 'notebook', 'src': 'static', 'dest': 'bonobo-jupyter', 'require': 'bonobo-jupyter/extension'}] return [{"section": "notebook", "src": "static", "dest": "bonobo-jupyter", "require": "bonobo-jupyter/extension"}]
__all__ = [ __all__ = ["JupyterOutputPlugin"]
'JupyterOutputPlugin',
]

View File

@ -2,10 +2,10 @@ import ipywidgets as widgets
from traitlets import List, Unicode from traitlets import List, Unicode
@widgets.register('bonobo-widget.bonobo') @widgets.register("bonobo-widget.bonobo")
class BonoboWidget(widgets.DOMWidget): class BonoboWidget(widgets.DOMWidget):
_view_name = Unicode('BonoboView').tag(sync=True) _view_name = Unicode("BonoboView").tag(sync=True)
_model_name = Unicode('BonoboModel').tag(sync=True) _model_name = Unicode("BonoboModel").tag(sync=True)
_view_module = Unicode('bonobo-jupyter').tag(sync=True) _view_module = Unicode("bonobo-jupyter").tag(sync=True)
_model_module = Unicode('bonobo-jupyter').tag(sync=True) _model_module = Unicode("bonobo-jupyter").tag(sync=True)
value = List().tag(sync=True) value = List().tag(sync=True)

View File

@ -9,24 +9,24 @@ from bonobo.util.objects import ValueHolder
def path_str(path): def path_str(path):
return path if path.startswith('/') else '/' + path return path if path.startswith("/") else "/" + path
class OpenDataSoftAPI(Configurable): class OpenDataSoftAPI(Configurable):
dataset = Option(str, positional=True) dataset = Option(str, positional=True)
endpoint = Option(str, required=False, default='{scheme}://{netloc}{path}') endpoint = Option(str, required=False, default="{scheme}://{netloc}{path}")
scheme = Option(str, required=False, default='https') scheme = Option(str, required=False, default="https")
netloc = Option(str, required=False, default='data.opendatasoft.com') netloc = Option(str, required=False, default="data.opendatasoft.com")
path = Option(path_str, required=False, default='/api/records/1.0/search/') path = Option(path_str, required=False, default="/api/records/1.0/search/")
rows = Option(int, required=False, default=500) rows = Option(int, required=False, default=500)
limit = Option(int, required=False) limit = Option(int, required=False)
timezone = Option(str, required=False, default='Europe/Paris') timezone = Option(str, required=False, default="Europe/Paris")
kwargs = Option(dict, required=False, default=dict) kwargs = Option(dict, required=False, default=dict)
@ContextProcessor @ContextProcessor
def compute_path(self, context): def compute_path(self, context):
params = (('dataset', self.dataset), ('timezone', self.timezone)) + tuple(sorted(self.kwargs.items())) params = (("dataset", self.dataset), ("timezone", self.timezone)) + tuple(sorted(self.kwargs.items()))
yield self.endpoint.format(scheme=self.scheme, netloc=self.netloc, path=self.path) + '?' + urlencode(params) yield self.endpoint.format(scheme=self.scheme, netloc=self.netloc, path=self.path) + "?" + urlencode(params)
@ContextProcessor @ContextProcessor
def start(self, context, base_url): def start(self, context, base_url):
@ -34,25 +34,19 @@ class OpenDataSoftAPI(Configurable):
def __call__(self, base_url, start, *args, **kwargs): def __call__(self, base_url, start, *args, **kwargs):
while (not self.limit) or (self.limit > start): while (not self.limit) or (self.limit > start):
url = '{}&start={start}&rows={rows}'.format( url = "{}&start={start}&rows={rows}".format(
base_url, start=start.value, rows=self.rows if not self.limit else min(self.rows, self.limit - start) base_url, start=start.value, rows=self.rows if not self.limit else min(self.rows, self.limit - start)
) )
resp = requests.get(url) resp = requests.get(url)
records = resp.json().get('records', []) records = resp.json().get("records", [])
if not len(records): if not len(records):
break break
for row in records: for row in records:
yield { yield {**row.get("fields", {}), "geometry": row.get("geometry", {}), "recordid": row.get("recordid")}
**row.get('fields', {}),
'geometry': row.get('geometry', {}),
'recordid': row.get('recordid'),
}
start += self.rows start += self.rows
__all__ = [ __all__ = ["OpenDataSoftAPI"]
'OpenDataSoftAPI',
]

View File

@ -16,10 +16,7 @@ class InactiveWritableError(InactiveIOError):
class ValidationError(RuntimeError): class ValidationError(RuntimeError):
def __init__(self, inst, message): def __init__(self, inst, message):
super(ValidationError, self).__init__( super(ValidationError, self).__init__(
'Validation error in {class_name}: {message}'.format( "Validation error in {class_name}: {message}".format(class_name=type(inst).__name__, message=message)
class_name=type(inst).__name__,
message=message,
)
) )
@ -41,9 +38,8 @@ class AbstractError(UnrecoverableError, NotImplementedError):
def __init__(self, method): def __init__(self, method):
super().__init__( super().__init__(
'Call to abstract method {class_name}.{method_name}(...): missing implementation.'.format( "Call to abstract method {class_name}.{method_name}(...): missing implementation.".format(
class_name=get_name(method.__self__), class_name=get_name(method.__self__), method_name=get_name(method)
method_name=get_name(method),
) )
) )

View File

@ -8,40 +8,21 @@ from bonobo.util.statistics import Timer
def get_argument_parser(parser=None): def get_argument_parser(parser=None):
parser = bonobo.get_argument_parser(parser=parser) parser = bonobo.get_argument_parser(parser=parser)
parser.add_argument("--limit", "-l", type=int, default=None, help="If set, limits the number of processed lines.")
parser.add_argument( parser.add_argument(
'--limit', "--print", "-p", action="store_true", default=False, help="If set, pretty prints before writing to output file."
'-l',
type=int,
default=None,
help='If set, limits the number of processed lines.'
)
parser.add_argument(
'--print',
'-p',
action='store_true',
default=False,
help='If set, pretty prints before writing to output file.'
) )
parser.add_argument( parser.add_argument("--strategy", "-s", type=str, choices=STRATEGIES.keys(), default=DEFAULT_STRATEGY)
'--strategy',
'-s',
type=str,
choices=STRATEGIES.keys(),
default=DEFAULT_STRATEGY,
)
return parser return parser
def get_graph_options(options): def get_graph_options(options):
_limit = options.pop('limit', None) _limit = options.pop("limit", None)
_print = options.pop('print', False) _print = options.pop("print", False)
return { return {"_limit": (bonobo.Limit(_limit),) if _limit else (), "_print": (bonobo.PrettyPrinter(),) if _print else ()}
'_limit': (bonobo.Limit(_limit), ) if _limit else (),
'_print': (bonobo.PrettyPrinter(), ) if _print else (),
}
def run(get_graph, get_services, *, parser=None): def run(get_graph, get_services, *, parser=None):
@ -49,38 +30,29 @@ def run(get_graph, get_services, *, parser=None):
with bonobo.parse_args(parser) as options: with bonobo.parse_args(parser) as options:
with Timer() as timer: with Timer() as timer:
print( print("Options:", " ".join("{}={}".format(k, v) for k, v in sorted(options.items())))
'Options:', ' '.join(
'{}={}'.format(k, v)
for k, v in sorted(options.items())
)
)
retval = bonobo.run( retval = bonobo.run(
get_graph(**get_graph_options(options)), get_graph(**get_graph_options(options)), services=get_services(), strategy=options["strategy"]
services=get_services(),
strategy=options['strategy'],
) )
print('Execution time:', timer) print("Execution time:", timer)
print('Return value:', retval) print("Return value:", retval)
print('XStatus:', retval.xstatus) print("XStatus:", retval.xstatus)
return retval.xstatus return retval.xstatus
def get_minor_version(): def get_minor_version():
return '.'.join(bonobo.__version__.split('.')[:2]) return ".".join(bonobo.__version__.split(".")[:2])
def get_datasets_dir(*dirs): def get_datasets_dir(*dirs):
home_dir = os.path.expanduser('~') home_dir = os.path.expanduser("~")
target_dir = os.path.join( target_dir = os.path.join(home_dir, ".cache/bonobo", get_minor_version(), *dirs)
home_dir, '.cache/bonobo', get_minor_version(), *dirs
)
os.makedirs(target_dir, exist_ok=True) os.makedirs(target_dir, exist_ok=True)
return target_dir return target_dir
def get_services(): def get_services():
return { return {
'fs': bonobo.open_fs(get_datasets_dir('datasets')), "fs": bonobo.open_fs(get_datasets_dir("datasets")),
'fs.static': bonobo.open_examples_fs('datasets', 'static'), "fs.static": bonobo.open_examples_fs("datasets", "static"),
} }

View File

@ -1,5 +1,5 @@
if __name__ == '__main__': if __name__ == "__main__":
from bonobo.commands import entrypoint from bonobo.commands import entrypoint
import sys import sys
entrypoint(['examples'] + sys.argv[1:]) entrypoint(["examples"] + sys.argv[1:])

View File

@ -14,15 +14,12 @@ def extract():
def get_graph(): def get_graph():
graph = bonobo.Graph() graph = bonobo.Graph()
graph.add_chain( graph.add_chain(extract, print)
extract,
print,
)
return graph return graph
if __name__ == '__main__': if __name__ == "__main__":
parser = bonobo.get_argument_parser() parser = bonobo.get_argument_parser()
with bonobo.parse_args(parser): with bonobo.parse_args(parser):
bonobo.run(get_graph()) bonobo.run(get_graph())

View File

@ -6,24 +6,16 @@ from bonobo.examples import get_datasets_dir, get_minor_version, get_services
from bonobo.examples.datasets.coffeeshops import get_graph as get_coffeeshops_graph from bonobo.examples.datasets.coffeeshops import get_graph as get_coffeeshops_graph
from bonobo.examples.datasets.fablabs import get_graph as get_fablabs_graph from bonobo.examples.datasets.fablabs import get_graph as get_fablabs_graph
graph_factories = { graph_factories = {"coffeeshops": get_coffeeshops_graph, "fablabs": get_fablabs_graph}
'coffeeshops': get_coffeeshops_graph,
'fablabs': get_fablabs_graph,
}
if __name__ == '__main__': if __name__ == "__main__":
parser = examples.get_argument_parser() parser = examples.get_argument_parser()
parser.add_argument( parser.add_argument("--target", "-t", choices=graph_factories.keys(), nargs="+")
'--target', '-t', choices=graph_factories.keys(), nargs='+' parser.add_argument("--sync", action="store_true", default=False)
)
parser.add_argument('--sync', action='store_true', default=False)
with bonobo.parse_args(parser) as options: with bonobo.parse_args(parser) as options:
graph_options = examples.get_graph_options(options) graph_options = examples.get_graph_options(options)
graph_names = list( graph_names = list(options["target"] if options["target"] else sorted(graph_factories.keys()))
options['target']
if options['target'] else sorted(graph_factories.keys())
)
# Create a graph with all requested subgraphs # Create a graph with all requested subgraphs
graph = bonobo.Graph() graph = bonobo.Graph()
@ -32,29 +24,20 @@ if __name__ == '__main__':
bonobo.run(graph, services=get_services()) bonobo.run(graph, services=get_services())
if options['sync']: if options["sync"]:
# TODO: when parallel option for node will be implemented, need to be rewriten to use a graph. # TODO: when parallel option for node will be implemented, need to be rewriten to use a graph.
import boto3 import boto3
s3 = boto3.client('s3') s3 = boto3.client("s3")
local_dir = get_datasets_dir() local_dir = get_datasets_dir()
for root, dirs, files in os.walk(local_dir): for root, dirs, files in os.walk(local_dir):
for filename in files: for filename in files:
local_path = os.path.join(root, filename) local_path = os.path.join(root, filename)
relative_path = os.path.relpath(local_path, local_dir) relative_path = os.path.relpath(local_path, local_dir)
s3_path = os.path.join( s3_path = os.path.join(get_minor_version(), relative_path)
get_minor_version(), relative_path
)
try: try:
s3.head_object( s3.head_object(Bucket="bonobo-examples", Key=s3_path)
Bucket='bonobo-examples', Key=s3_path
)
except Exception: except Exception:
s3.upload_file( s3.upload_file(local_path, "bonobo-examples", s3_path, ExtraArgs={"ACL": "public-read"})
local_path,
'bonobo-examples',
s3_path,
ExtraArgs={'ACL': 'public-read'}
)

View File

@ -1,63 +1,39 @@
"""
"""
import sys import sys
import bonobo import bonobo
from bonobo import examples from bonobo import examples
from bonobo.contrib.opendatasoft import OpenDataSoftAPI as ODSReader from bonobo.contrib.opendatasoft import OpenDataSoftAPI as ODSReader
from bonobo.examples import get_services from bonobo.examples import get_services
from bonobo.structs.graphs import PartialGraph
def get_graph(graph=None, *, _limit=(), _print=()): def get_graph(graph=None, *, _limit=(), _print=()):
graph = graph or bonobo.Graph() graph = graph or bonobo.Graph()
producer = graph.add_chain( producer = (
ODSReader( graph.get_cursor()
dataset='liste-des-cafes-a-un-euro', >> ODSReader(dataset="liste-des-cafes-a-un-euro", netloc="opendata.paris.fr")
netloc='opendata.paris.fr' >> PartialGraph(*_limit)
), >> bonobo.UnpackItems(0)
*_limit, >> bonobo.Rename(name="nom_du_cafe", address="adresse", zipcode="arrondissement")
bonobo.UnpackItems(0), >> bonobo.Format(city="Paris", country="France")
bonobo.Rename( >> bonobo.OrderFields(["name", "address", "zipcode", "city", "country", "geometry", "geoloc"])
name='nom_du_cafe', >> PartialGraph(*_print)
address='adresse',
zipcode='arrondissement'
),
bonobo.Format(city='Paris', country='France'),
bonobo.OrderFields(
[
'name', 'address', 'zipcode', 'city', 'country',
'geometry', 'geoloc'
]
),
*_print,
) )
# Comma separated values. # Comma separated values.
graph.add_chain( graph.get_cursor(producer.output) >> bonobo.CsvWriter(
bonobo.CsvWriter( "coffeeshops.csv", fields=["name", "address", "zipcode", "city"], delimiter=","
'coffeeshops.csv',
fields=['name', 'address', 'zipcode', 'city'],
delimiter=','
),
_input=producer.output,
) )
# Standard JSON # Standard JSON
graph.add_chain( graph.get_cursor(producer.output) >> bonobo.JsonWriter(path="coffeeshops.json")
bonobo.JsonWriter(path='coffeeshops.json'),
_input=producer.output,
)
# Line-delimited JSON # Line-delimited JSON
graph.add_chain( graph.get_cursor(producer.output) >> bonobo.LdjsonWriter(path="coffeeshops.ldjson")
bonobo.LdjsonWriter(path='coffeeshops.ldjson'),
_input=producer.output,
)
return graph return graph
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(examples.run(get_graph, get_services)) sys.exit(examples.run(get_graph, get_services))

View File

@ -25,27 +25,21 @@ from bonobo.examples import get_services
try: try:
import pycountry import pycountry
except ImportError as exc: except ImportError as exc:
raise ImportError( raise ImportError('You must install package "pycountry" to run this example.') from exc
'You must install package "pycountry" to run this example.'
) from exc
API_DATASET = 'fablabs@public-us' API_DATASET = "fablabs@public-us"
ROWS = 100 ROWS = 100
def _getlink(x): def _getlink(x):
return x.get('url', None) return x.get("url", None)
def normalize(row): def normalize(row):
result = { result = {
**row, **row,
'links': "links": list(filter(None, map(_getlink, json.loads(row.get("links"))))),
list(filter(None, map(_getlink, json.loads(row.get('links'))))), "country": pycountry.countries.get(alpha_2=row.get("country_code", "").upper()).name,
'country':
pycountry.countries.get(
alpha_2=row.get('country_code', '').upper()
).name,
} }
return result return result
@ -58,10 +52,10 @@ def get_graph(graph=None, *, _limit=(), _print=()):
normalize, normalize,
bonobo.UnpackItems(0), bonobo.UnpackItems(0),
*_print, *_print,
bonobo.JsonWriter(path='fablabs.json'), bonobo.JsonWriter(path="fablabs.json"),
) )
return graph return graph
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(examples.run(get_graph, get_services)) sys.exit(examples.run(get_graph, get_services))

View File

@ -19,7 +19,7 @@ def get_graph():
return graph return graph
if __name__ == '__main__': if __name__ == "__main__":
parser = bonobo.get_argument_parser() parser = bonobo.get_argument_parser()
with bonobo.parse_args(parser): with bonobo.parse_args(parser):
bonobo.run(get_graph()) bonobo.run(get_graph())

View File

@ -21,7 +21,7 @@ def get_graph():
return graph return graph
if __name__ == '__main__': if __name__ == "__main__":
parser = bonobo.get_argument_parser() parser = bonobo.get_argument_parser()
with bonobo.parse_args(parser): with bonobo.parse_args(parser):
bonobo.run(get_graph()) bonobo.run(get_graph())

View File

@ -7,12 +7,12 @@ from bonobo.examples.files.services import get_services
def get_graph(*, _limit=None, _print=False): def get_graph(*, _limit=None, _print=False):
return bonobo.Graph( return bonobo.Graph(
bonobo.CsvReader('coffeeshops.csv'), bonobo.CsvReader("coffeeshops.csv"),
*((bonobo.Limit(_limit), ) if _limit else ()), *((bonobo.Limit(_limit),) if _limit else ()),
*((bonobo.PrettyPrinter(), ) if _print else ()), *((bonobo.PrettyPrinter(),) if _print else ()),
bonobo.CsvWriter('coffeeshops.csv', fs='fs.output') bonobo.CsvWriter("coffeeshops.csv", fs="fs.output")
) )
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(examples.run(get_graph, get_services)) sys.exit(examples.run(get_graph, get_services))

View File

@ -9,24 +9,17 @@ def get_graph(*, _limit=None, _print=False):
graph = bonobo.Graph() graph = bonobo.Graph()
trunk = graph.add_chain( trunk = graph.add_chain(
bonobo.JsonReader('theaters.json', fs='fs.static'), bonobo.JsonReader("theaters.json", fs="fs.static"), *((bonobo.Limit(_limit),) if _limit else ())
*((bonobo.Limit(_limit), ) if _limit else ()),
) )
if _print: if _print:
graph.add_chain(bonobo.PrettyPrinter(), _input=trunk.output) graph.add_chain(bonobo.PrettyPrinter(), _input=trunk.output)
graph.add_chain( graph.add_chain(bonobo.JsonWriter("theaters.output.json", fs="fs.output"), _input=trunk.output)
bonobo.JsonWriter('theaters.output.json', fs='fs.output'), graph.add_chain(bonobo.LdjsonWriter("theaters.output.ldjson", fs="fs.output"), _input=trunk.output)
_input=trunk.output
)
graph.add_chain(
bonobo.LdjsonWriter('theaters.output.ldjson', fs='fs.output'),
_input=trunk.output
)
return graph return graph
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(examples.run(get_graph, get_services)) sys.exit(examples.run(get_graph, get_services))

View File

@ -1,4 +1,4 @@
''' """
This example shows how a different file system service can be injected This example shows how a different file system service can be injected
into a transformation (as compressing pickled objects often makes sense into a transformation (as compressing pickled objects often makes sense
anyways). The pickle itself contains a list of lists as follows: anyways). The pickle itself contains a list of lists as follows:
@ -25,7 +25,7 @@ https://www.kaggle.com/uciml/sms-spam-collection-dataset/downloads/sms-spam-coll
The transformation (1) reads the pickled data, (2) marks and shortens The transformation (1) reads the pickled data, (2) marks and shortens
messages categorized as spam, and (3) prints the output. messages categorized as spam, and (3) prints the output.
''' """
import sys import sys
@ -36,14 +36,12 @@ from bonobo import examples
def cleanse_sms(category, sms): def cleanse_sms(category, sms):
if category == 'spam': if category == "spam":
sms_clean = '**MARKED AS SPAM** ' + sms[0:50] + ( sms_clean = "**MARKED AS SPAM** " + sms[0:50] + ("..." if len(sms) > 50 else "")
'...' if len(sms) > 50 else '' elif category == "ham":
)
elif category == 'ham':
sms_clean = sms sms_clean = sms
else: else:
raise ValueError('Unknown category {!r}.'.format(category)) raise ValueError("Unknown category {!r}.".format(category))
return category, sms, sms_clean return category, sms, sms_clean
@ -53,7 +51,7 @@ def get_graph(*, _limit=(), _print=()):
graph.add_chain( graph.add_chain(
# spam.pkl is within the gzipped tarball # spam.pkl is within the gzipped tarball
bonobo.PickleReader('spam.pkl'), bonobo.PickleReader("spam.pkl"),
*_limit, *_limit,
cleanse_sms, cleanse_sms,
*_print, *_print,
@ -63,11 +61,8 @@ def get_graph(*, _limit=(), _print=()):
def get_services(): def get_services():
return { return {**examples.get_services(), "fs": TarFS(bonobo.get_examples_path("datasets", "static", "spam.tgz"))}
**examples.get_services(), 'fs':
TarFS(bonobo.get_examples_path('datasets', 'static', 'spam.tgz'))
}
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(examples.run(get_graph, get_services)) sys.exit(examples.run(get_graph, get_services))

View File

@ -2,7 +2,4 @@ from bonobo import examples, open_fs
def get_services(): def get_services():
return { return {**examples.get_services(), "fs.output": open_fs()}
**examples.get_services(),
'fs.output': open_fs(),
}

View File

@ -7,20 +7,20 @@ from bonobo.examples.files.services import get_services
def skip_comments(line): def skip_comments(line):
line = line.strip() line = line.strip()
if not line.startswith('#'): if not line.startswith("#"):
yield line yield line
def get_graph(*, _limit=(), _print=()): def get_graph(*, _limit=(), _print=()):
return bonobo.Graph( return bonobo.Graph(
bonobo.FileReader('passwd.txt', fs='fs.static'), bonobo.FileReader("passwd.txt", fs="fs.static"),
skip_comments, skip_comments,
*_limit, *_limit,
lambda s: s.split(':')[0], lambda s: s.split(":")[0],
*_print, *_print,
bonobo.FileWriter('usernames.txt', fs='fs.output'), bonobo.FileWriter("usernames.txt", fs="fs.output"),
) )
if __name__ == '__main__': if __name__ == "__main__":
sys.exit(examples.run(get_graph, get_services)) sys.exit(examples.run(get_graph, get_services))

View File

@ -1,7 +1,7 @@
import bonobo import bonobo
from bonobo.examples.types.strings import get_graph from bonobo.examples.types.strings import get_graph
if __name__ == '__main__': if __name__ == "__main__":
parser = bonobo.get_argument_parser() parser = bonobo.get_argument_parser()
with bonobo.parse_args(parser): with bonobo.parse_args(parser):
bonobo.run(get_graph()) bonobo.run(get_graph())

View File

@ -18,13 +18,13 @@ import bonobo
def extract(): def extract():
yield 'foo' yield "foo"
yield 'bar' yield "bar"
yield 'baz' yield "baz"
def transform(s): def transform(s):
return '{} ({})'.format(s.title(), randint(10, 99)) return "{} ({})".format(s.title(), randint(10, 99))
def load(s): def load(s):
@ -35,7 +35,7 @@ def get_graph():
return bonobo.Graph(extract, transform, load) return bonobo.Graph(extract, transform, load)
if __name__ == '__main__': if __name__ == "__main__":
parser = bonobo.get_argument_parser() parser = bonobo.get_argument_parser()
with bonobo.parse_args(parser): with bonobo.parse_args(parser):
bonobo.run(get_graph()) bonobo.run(get_graph())

View File

@ -9,8 +9,4 @@ from bonobo.execution.contexts.graph import GraphExecutionContext
from bonobo.execution.contexts.node import NodeExecutionContext from bonobo.execution.contexts.node import NodeExecutionContext
from bonobo.execution.contexts.plugin import PluginExecutionContext from bonobo.execution.contexts.plugin import PluginExecutionContext
__all__ = [ __all__ = ["GraphExecutionContext", "NodeExecutionContext", "PluginExecutionContext"]
'GraphExecutionContext',
'NodeExecutionContext',
'PluginExecutionContext',
]

View File

@ -62,12 +62,12 @@ class Lifecycle:
""" """
if self._defunct: if self._defunct:
return '!' return "!"
if not self.started: if not self.started:
return ' ' return " "
if not self.stopped: if not self.stopped:
return '+' return "+"
return '-' return "-"
def __enter__(self): def __enter__(self):
self.start() self.start()
@ -78,31 +78,31 @@ class Lifecycle:
def get_flags_as_string(self): def get_flags_as_string(self):
if self._defunct: if self._defunct:
return term.red('[defunct]') return term.red("[defunct]")
if self.killed: if self.killed:
return term.lightred('[killed]') return term.lightred("[killed]")
if self.stopped: if self.stopped:
return term.lightblack('[done]') return term.lightblack("[done]")
return '' return ""
def start(self): def start(self):
if self.started: if self.started:
raise RuntimeError('This context is already started ({}).'.format(get_name(self))) raise RuntimeError("This context is already started ({}).".format(get_name(self)))
self._started = True self._started = True
def stop(self): def stop(self):
if not self.started: if not self.started:
raise RuntimeError('This context cannot be stopped as it never started ({}).'.format(get_name(self))) raise RuntimeError("This context cannot be stopped as it never started ({}).".format(get_name(self)))
self._stopped = True self._stopped = True
def kill(self): def kill(self):
if not self.started: if not self.started:
raise RuntimeError('Cannot kill an unstarted context.') raise RuntimeError("Cannot kill an unstarted context.")
if self.stopped: if self.stopped:
raise RuntimeError('Cannot kill a stopped context.') raise RuntimeError("Cannot kill a stopped context.")
self._killed = True self._killed = True
@ -119,10 +119,10 @@ class Lifecycle:
def as_dict(self): def as_dict(self):
return { return {
'status': self.status, "status": self.status,
'name': self.name, "name": self.name,
'stats': self.get_statistics_as_string(), "stats": self.get_statistics_as_string(),
'flags': self.get_flags_as_string(), "flags": self.get_flags_as_string(),
} }

View File

@ -63,7 +63,7 @@ class BaseGraphExecutionContext(BaseContext):
self.services = create_container(services) self.services = create_container(services)
# Probably not a good idea to use it unless you really know what you're doing. But you can access the context. # Probably not a good idea to use it unless you really know what you're doing. But you can access the context.
self.services['__graph_context'] = self self.services["__graph_context"] = self
for i, node_context in enumerate(self): for i, node_context in enumerate(self):
outputs = self.graph.outputs_of(i) outputs = self.graph.outputs_of(i)

View File

@ -19,7 +19,7 @@ from bonobo.util.statistics import WithStatistics
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
UnboundArguments = namedtuple('UnboundArguments', ['args', 'kwargs']) UnboundArguments = namedtuple("UnboundArguments", ["args", "kwargs"])
class NodeExecutionContext(BaseContext, WithStatistics): class NodeExecutionContext(BaseContext, WithStatistics):
@ -46,13 +46,13 @@ class NodeExecutionContext(BaseContext, WithStatistics):
:param _outputs: output queues (optional) :param _outputs: output queues (optional)
""" """
BaseContext.__init__(self, wrapped, parent=parent) BaseContext.__init__(self, wrapped, parent=parent)
WithStatistics.__init__(self, 'in', 'out', 'err', 'warn') WithStatistics.__init__(self, "in", "out", "err", "warn")
# Services: how we'll access external dependencies # Services: how we'll access external dependencies
if services: if services:
if self.parent: if self.parent:
raise RuntimeError( raise RuntimeError(
'Having services defined both in GraphExecutionContext and child NodeExecutionContext is not supported, for now.' "Having services defined both in GraphExecutionContext and child NodeExecutionContext is not supported, for now."
) )
self.services = create_container(services) self.services = create_container(services)
else: else:
@ -70,11 +70,11 @@ class NodeExecutionContext(BaseContext, WithStatistics):
self._stack = None self._stack = None
def __str__(self): def __str__(self):
return self.__name__ + self.get_statistics_as_string(prefix=' ') return self.__name__ + self.get_statistics_as_string(prefix=" ")
def __repr__(self): def __repr__(self):
name, type_name = get_name(self), get_name(type(self)) name, type_name = get_name(self), get_name(type(self))
return '<{}({}{}){}>'.format(type_name, self.status, name, self.get_statistics_as_string(prefix=' ')) return "<{}({}{}){}>".format(type_name, self.status, name, self.get_statistics_as_string(prefix=" "))
def start(self): def start(self):
""" """
@ -97,13 +97,13 @@ class NodeExecutionContext(BaseContext, WithStatistics):
# Not normal to have a partially configured object here, so let's warn the user instead of having get into # Not normal to have a partially configured object here, so let's warn the user instead of having get into
# the hard trouble of understanding that by himself. # the hard trouble of understanding that by himself.
raise TypeError( raise TypeError(
'Configurables should be instanciated before execution starts.\nGot {!r}.\n'.format( "Configurables should be instanciated before execution starts.\nGot {!r}.\n".format(
self.wrapped self.wrapped
) )
) from exc ) from exc
else: else:
raise TypeError( raise TypeError(
'Configurables should be instanciated before execution starts.\nGot {!r}.\n'.format( "Configurables should be instanciated before execution starts.\nGot {!r}.\n".format(
self.wrapped self.wrapped
) )
) )
@ -120,7 +120,7 @@ class NodeExecutionContext(BaseContext, WithStatistics):
The actual infinite loop for this transformation. The actual infinite loop for this transformation.
""" """
logger.debug('Node loop starts for {!r}.'.format(self)) logger.debug("Node loop starts for {!r}.".format(self))
while self.should_loop: while self.should_loop:
try: try:
@ -128,7 +128,7 @@ class NodeExecutionContext(BaseContext, WithStatistics):
except InactiveReadableError: except InactiveReadableError:
break break
logger.debug('Node loop ends for {!r}.'.format(self)) logger.debug("Node loop ends for {!r}.".format(self))
def step(self): def step(self):
try: try:
@ -137,10 +137,7 @@ class NodeExecutionContext(BaseContext, WithStatistics):
raise raise
except Empty: except Empty:
sleep(TICK_PERIOD) # XXX: How do we determine this constant? sleep(TICK_PERIOD) # XXX: How do we determine this constant?
except ( except (NotImplementedError, UnrecoverableError):
NotImplementedError,
UnrecoverableError,
):
self.fatal(sys.exc_info()) # exit loop self.fatal(sys.exc_info()) # exit loop
except Exception: # pylint: disable=broad-except except Exception: # pylint: disable=broad-except
self.error(sys.exc_info()) # does not exit loop self.error(sys.exc_info()) # does not exit loop
@ -208,20 +205,20 @@ class NodeExecutionContext(BaseContext, WithStatistics):
def set_input_type(self, input_type): def set_input_type(self, input_type):
if self._input_type is not None: if self._input_type is not None:
raise RuntimeError('Cannot override input type, already have %r.', self._input_type) raise RuntimeError("Cannot override input type, already have %r.", self._input_type)
if type(input_type) is not type: if type(input_type) is not type:
raise UnrecoverableTypeError('Input types must be regular python types.') raise UnrecoverableTypeError("Input types must be regular python types.")
if not issubclass(input_type, tuple): if not issubclass(input_type, tuple):
raise UnrecoverableTypeError('Input types must be subclasses of tuple (and act as tuples).') raise UnrecoverableTypeError("Input types must be subclasses of tuple (and act as tuples).")
self._input_type = input_type self._input_type = input_type
def get_input_fields(self): def get_input_fields(self):
return self._input_type._fields if self._input_type and hasattr(self._input_type, '_fields') else None return self._input_type._fields if self._input_type and hasattr(self._input_type, "_fields") else None
def set_input_fields(self, fields, typename='Bag'): def set_input_fields(self, fields, typename="Bag"):
self.set_input_type(BagType(typename, fields)) self.set_input_type(BagType(typename, fields))
### Output type and fields ### Output type and fields
@ -231,20 +228,20 @@ class NodeExecutionContext(BaseContext, WithStatistics):
def set_output_type(self, output_type): def set_output_type(self, output_type):
if self._output_type is not None: if self._output_type is not None:
raise RuntimeError('Cannot override output type, already have %r.', self._output_type) raise RuntimeError("Cannot override output type, already have %r.", self._output_type)
if type(output_type) is not type: if type(output_type) is not type:
raise UnrecoverableTypeError('Output types must be regular python types.') raise UnrecoverableTypeError("Output types must be regular python types.")
if not issubclass(output_type, tuple): if not issubclass(output_type, tuple):
raise UnrecoverableTypeError('Output types must be subclasses of tuple (and act as tuples).') raise UnrecoverableTypeError("Output types must be subclasses of tuple (and act as tuples).")
self._output_type = output_type self._output_type = output_type
def get_output_fields(self): def get_output_fields(self):
return self._output_type._fields if self._output_type and hasattr(self._output_type, '_fields') else None return self._output_type._fields if self._output_type and hasattr(self._output_type, "_fields") else None
def set_output_fields(self, fields, typename='Bag'): def set_output_fields(self, fields, typename="Bag"):
self.set_output_type(BagType(typename, fields)) self.set_output_type(BagType(typename, fields))
### Attributes ### Attributes
@ -273,11 +270,11 @@ class NodeExecutionContext(BaseContext, WithStatistics):
self.step() self.step()
def error(self, exc_info, *, level=logging.ERROR): def error(self, exc_info, *, level=logging.ERROR):
self.increment('err') self.increment("err")
super().error(exc_info, level=level) super().error(exc_info, level=level)
def fatal(self, exc_info, *, level=logging.CRITICAL): def fatal(self, exc_info, *, level=logging.CRITICAL):
self.increment('err') self.increment("err")
super().fatal(exc_info, level=level) super().fatal(exc_info, level=level)
self.input.shutdown() self.input.shutdown()
@ -306,8 +303,9 @@ class NodeExecutionContext(BaseContext, WithStatistics):
input_bag = self._input_type(*input_bag) input_bag = self._input_type(*input_bag)
except Exception as exc: except Exception as exc:
raise UnrecoverableTypeError( raise UnrecoverableTypeError(
'Input type changed to incompatible type between calls to {!r}.\nGot {!r} which is not of type {!r}.'. "Input type changed to incompatible type between calls to {!r}.\nGot {!r} which is not of type {!r}.".format(
format(self.wrapped, input_bag, self._input_type) self.wrapped, input_bag, self._input_type
)
) from exc ) from exc
# Store or check input length, which is a soft fallback in case we're just using tuples # Store or check input length, which is a soft fallback in case we're just using tuples
@ -315,12 +313,12 @@ class NodeExecutionContext(BaseContext, WithStatistics):
self._input_length = len(input_bag) self._input_length = len(input_bag)
elif len(input_bag) != self._input_length: elif len(input_bag) != self._input_length:
raise UnrecoverableTypeError( raise UnrecoverableTypeError(
'Input length changed between calls to {!r}.\nExpected {} but got {}: {!r}.'.format( "Input length changed between calls to {!r}.\nExpected {} but got {}: {!r}.".format(
self.wrapped, self._input_length, len(input_bag), input_bag self.wrapped, self._input_length, len(input_bag), input_bag
) )
) )
self.increment('in') # XXX should that go before type check ? self.increment("in") # XXX should that go before type check ?
return input_bag return input_bag
@ -366,7 +364,7 @@ class NodeExecutionContext(BaseContext, WithStatistics):
""" """
if not _control: if not _control:
self.increment('out') self.increment("out")
for output in self.outputs: for output in self.outputs:
output.put(value) output.put(value)
@ -406,8 +404,9 @@ class AsyncNodeExecutionContext(NodeExecutionContext):
input_bag = self._input_type(*input_bag) input_bag = self._input_type(*input_bag)
except Exception as exc: except Exception as exc:
raise UnrecoverableTypeError( raise UnrecoverableTypeError(
'Input type changed to incompatible type between calls to {!r}.\nGot {!r} which is not of type {!r}.'. "Input type changed to incompatible type between calls to {!r}.\nGot {!r} which is not of type {!r}.".format(
format(self.wrapped, input_bag, self._input_type) self.wrapped, input_bag, self._input_type
)
) from exc ) from exc
# Store or check input length, which is a soft fallback in case we're just using tuples # Store or check input length, which is a soft fallback in case we're just using tuples
@ -415,12 +414,12 @@ class AsyncNodeExecutionContext(NodeExecutionContext):
self._input_length = len(input_bag) self._input_length = len(input_bag)
elif len(input_bag) != self._input_length: elif len(input_bag) != self._input_length:
raise UnrecoverableTypeError( raise UnrecoverableTypeError(
'Input length changed between calls to {!r}.\nExpected {} but got {}: {!r}.'.format( "Input length changed between calls to {!r}.\nExpected {} but got {}: {!r}.".format(
self.wrapped, self._input_length, len(input_bag), input_bag self.wrapped, self._input_length, len(input_bag), input_bag
) )
) )
self.increment('in') # XXX should that go before type check ? self.increment("in") # XXX should that go before type check ?
return input_bag return input_bag
@ -443,18 +442,18 @@ def split_token(output):
flags, i, len_output, data_allowed = set(), 0, len(output), True flags, i, len_output, data_allowed = set(), 0, len(output), True
while i < len_output and isflag(output[i]): while i < len_output and isflag(output[i]):
if output[i].must_be_first and i: if output[i].must_be_first and i:
raise ValueError('{} flag must be first.'.format(output[i])) raise ValueError("{} flag must be first.".format(output[i]))
if i and output[i - 1].must_be_last: if i and output[i - 1].must_be_last:
raise ValueError('{} flag must be last.'.format(output[i - 1])) raise ValueError("{} flag must be last.".format(output[i - 1]))
if output[i] in flags: if output[i] in flags:
raise ValueError('Duplicate flag {}.'.format(output[i])) raise ValueError("Duplicate flag {}.".format(output[i]))
flags.add(output[i]) flags.add(output[i])
data_allowed &= output[i].allows_data data_allowed &= output[i].allows_data
i += 1 i += 1
output = output[i:] output = output[i:]
if not data_allowed and len(output): if not data_allowed and len(output):
raise ValueError('Output data provided after a flag that does not allow data.') raise ValueError("Output data provided after a flag that does not allow data.")
return flags, output return flags, output
@ -465,7 +464,7 @@ def concat_types(t1, l1, t2, l2):
if t1 == t2 == tuple: if t1 == t2 == tuple:
return tuple return tuple
f1 = t1._fields if hasattr(t1, '_fields') else tuple(range(l1)) f1 = t1._fields if hasattr(t1, "_fields") else tuple(range(l1))
f2 = t2._fields if hasattr(t2, '_fields') else tuple(range(l2)) f2 = t2._fields if hasattr(t2, "_fields") else tuple(range(l2))
return BagType('Inherited', f1 + f2) return BagType("Inherited", f1 + f2)

View File

@ -27,12 +27,12 @@
from whistle import Event from whistle import Event
START = 'execution.start' START = "execution.start"
STARTED = 'execution.started' STARTED = "execution.started"
TICK = 'execution.tick' TICK = "execution.tick"
STOP = 'execution.stop' STOP = "execution.stop"
STOPPED = 'execution.stopped' STOPPED = "execution.stopped"
KILL = 'execution.kill' KILL = "execution.kill"
class ExecutionEvent(Event): class ExecutionEvent(Event):

View File

@ -6,22 +6,23 @@ In the future, the two strategies that would really benefit bonobo are subproces
at home if you want to give it a shot. at home if you want to give it a shot.
""" """
from bonobo.execution.strategies.executor import ProcessPoolExecutorStrategy, ThreadPoolExecutorStrategy, \ from bonobo.execution.strategies.executor import (
AsyncThreadPoolExecutorStrategy ProcessPoolExecutorStrategy,
ThreadPoolExecutorStrategy,
AsyncThreadPoolExecutorStrategy,
)
from bonobo.execution.strategies.naive import NaiveStrategy from bonobo.execution.strategies.naive import NaiveStrategy
__all__ = [ __all__ = ["create_strategy"]
'create_strategy',
]
STRATEGIES = { STRATEGIES = {
'naive': NaiveStrategy, "naive": NaiveStrategy,
'processpool': ProcessPoolExecutorStrategy, "processpool": ProcessPoolExecutorStrategy,
'threadpool': ThreadPoolExecutorStrategy, "threadpool": ThreadPoolExecutorStrategy,
'aio_threadpool': AsyncThreadPoolExecutorStrategy, "aio_threadpool": AsyncThreadPoolExecutorStrategy,
} }
DEFAULT_STRATEGY = 'threadpool' DEFAULT_STRATEGY = "threadpool"
def create_strategy(name=None): def create_strategy(name=None):
@ -40,13 +41,13 @@ def create_strategy(name=None):
if name is None: if name is None:
name = DEFAULT_STRATEGY name = DEFAULT_STRATEGY
logging.debug('Creating execution strategy {!r}...'.format(name)) logging.debug("Creating execution strategy {!r}...".format(name))
try: try:
factory = STRATEGIES[name] factory = STRATEGIES[name]
except KeyError as exc: except KeyError as exc:
raise RuntimeError( raise RuntimeError(
'Invalid strategy {}. Available choices: {}.'.format(repr(name), ', '.join(sorted(STRATEGIES.keys()))) "Invalid strategy {}. Available choices: {}.".format(repr(name), ", ".join(sorted(STRATEGIES.keys())))
) from exc ) from exc
return factory() return factory()

View File

@ -6,6 +6,7 @@ class Strategy:
Base class for execution strategies. Base class for execution strategies.
""" """
GraphExecutionContextType = GraphExecutionContext GraphExecutionContextType = GraphExecutionContext
def __init__(self, GraphExecutionContextType=None): def __init__(self, GraphExecutionContextType=None):
@ -13,7 +14,7 @@ class Strategy:
def create_graph_execution_context(self, graph, *args, GraphExecutionContextType=None, **kwargs): def create_graph_execution_context(self, graph, *args, GraphExecutionContextType=None, **kwargs):
if not len(graph): if not len(graph):
raise ValueError('You provided an empty graph, which does not really make sense. Please add some nodes.') raise ValueError("You provided an empty graph, which does not really make sense. Please add some nodes.")
return (GraphExecutionContextType or self.GraphExecutionContextType)(graph, *args, **kwargs) return (GraphExecutionContextType or self.GraphExecutionContextType)(graph, *args, **kwargs)
def execute(self, graph, *args, **kwargs): def execute(self, graph, *args, **kwargs):

View File

@ -36,14 +36,14 @@ class ExecutorStrategy(Strategy):
try: try:
context.start(self.get_starter(executor, futures)) context.start(self.get_starter(executor, futures))
except Exception: except Exception:
logger.critical('Exception caught while starting execution context.', exc_info=sys.exc_info()) logger.critical("Exception caught while starting execution context.", exc_info=sys.exc_info())
while context.alive: while context.alive:
try: try:
context.tick() context.tick()
except KeyboardInterrupt: except KeyboardInterrupt:
logging.getLogger(__name__).warning( logging.getLogger(__name__).warning(
'KeyboardInterrupt received. Trying to terminate the nodes gracefully.' "KeyboardInterrupt received. Trying to terminate the nodes gracefully."
) )
context.kill() context.kill()
break break
@ -61,13 +61,13 @@ class ExecutorStrategy(Strategy):
node.loop() node.loop()
except Exception: except Exception:
logging.getLogger(__name__).critical( logging.getLogger(__name__).critical(
'Critical error in threadpool node starter.', exc_info=sys.exc_info() "Critical error in threadpool node starter.", exc_info=sys.exc_info()
) )
try: try:
futures.append(executor.submit(_runner)) futures.append(executor.submit(_runner))
except Exception: except Exception:
logging.getLogger(__name__).critical('futures.append', exc_info=sys.exc_info()) logging.getLogger(__name__).critical("futures.append", exc_info=sys.exc_info())
return starter return starter
@ -85,7 +85,7 @@ class AsyncThreadPoolExecutorStrategy(ThreadPoolExecutorStrategy):
def __init__(self, GraphExecutionContextType=None): def __init__(self, GraphExecutionContextType=None):
if not settings.ALPHA.get(): if not settings.ALPHA.get():
raise NotImplementedError( raise NotImplementedError(
'{} is experimental, you need to explicitely activate it using ALPHA=True in system env.'.format( "{} is experimental, you need to explicitely activate it using ALPHA=True in system env.".format(
get_name(self) get_name(self)
) )
) )

View File

@ -5,4 +5,4 @@ from bonobo.nodes.io import *
from bonobo.nodes.io import __all__ as _all_io from bonobo.nodes.io import __all__ as _all_io
from bonobo.nodes.throttle import RateLimited from bonobo.nodes.throttle import RateLimited
__all__ = _all_basics + _all_io + ['Filter', 'RateLimited'] __all__ = _all_basics + _all_io + ["Filter", "RateLimited"]

View File

@ -13,18 +13,18 @@ from bonobo.util.term import CLEAR_EOL
from mondrian import term from mondrian import term
__all__ = [ __all__ = [
'FixedWindow', "FixedWindow",
'Format', "Format",
'Limit', "Limit",
'OrderFields', "OrderFields",
'PrettyPrinter', "PrettyPrinter",
'Rename', "Rename",
'SetFields', "SetFields",
'Tee', "Tee",
'UnpackItems', "UnpackItems",
'count', "count",
'identity', "identity",
'noop', "noop",
] ]
@ -43,6 +43,7 @@ class Limit(Configurable):
TODO: simplify into a closure building factory? TODO: simplify into a closure building factory?
""" """
limit = Option(positional=True, default=10) limit = Option(positional=True, default=10)
@ContextProcessor @ContextProcessor
@ -69,7 +70,7 @@ def Tee(f):
def _shorten(s, w): def _shorten(s, w):
if w and len(s) > w: if w and len(s) > w:
s = s[0:w - 3] + '...' s = s[0 : w - 3] + "..."
return s return s
@ -78,28 +79,31 @@ class PrettyPrinter(Configurable):
int, int,
default=term.get_size()[0], default=term.get_size()[0],
required=False, required=False,
__doc__=''' __doc__="""
If set, truncates the output values longer than this to this width. If set, truncates the output values longer than this to this width.
''' """,
) )
filter = Method( filter = Method(
default= default=(
(lambda self, index, key, value: (value is not None) and (not isinstance(key, str) or not key.startswith('_'))), lambda self, index, key, value: (value is not None)
__doc__=''' and (not isinstance(key, str) or not key.startswith("_"))
),
__doc__="""
A filter that determine what to print. A filter that determine what to print.
Default is to ignore any key starting with an underscore and none values. Default is to ignore any key starting with an underscore and none values.
''' """,
) )
@ContextProcessor @ContextProcessor
def context(self, context): def context(self, context):
context.setdefault('_jupyter_html', None) context.setdefault("_jupyter_html", None)
yield context yield context
if context._jupyter_html is not None: if context._jupyter_html is not None:
from IPython.display import display, HTML from IPython.display import display, HTML
display(HTML('\n'.join(['<table>'] + context._jupyter_html + ['</table>'])))
display(HTML("\n".join(["<table>"] + context._jupyter_html + ["</table>"])))
def __call__(self, context, *args, **kwargs): def __call__(self, context, *args, **kwargs):
if not settings.QUIET: if not settings.QUIET:
@ -120,49 +124,44 @@ class PrettyPrinter(Configurable):
def format_quiet(self, index, key, value, *, fields=None): def format_quiet(self, index, key, value, *, fields=None):
# XXX should we implement argnames here ? # XXX should we implement argnames here ?
return ' '.join(((' ' if index else '-'), str(key), ':', str(value).strip())) return " ".join(((" " if index else "-"), str(key), ":", str(value).strip()))
def print_console(self, context, *args, **kwargs): def print_console(self, context, *args, **kwargs):
print('\u250c') print("\u250c")
for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())): for index, (key, value) in enumerate(itertools.chain(enumerate(args), kwargs.items())):
if self.filter(index, key, value): if self.filter(index, key, value):
print(self.format_console(index, key, value, fields=context.get_input_fields())) print(self.format_console(index, key, value, fields=context.get_input_fields()))
print('\u2514') print("\u2514")
def format_console(self, index, key, value, *, fields=None): def format_console(self, index, key, value, *, fields=None):
fields = fields or [] fields = fields or []
if not isinstance(key, str): if not isinstance(key, str):
if len(fields) > key and str(key) != str(fields[key]): if len(fields) > key and str(key) != str(fields[key]):
key = '{}{}'.format(fields[key], term.lightblack('[{}]'.format(key))) key = "{}{}".format(fields[key], term.lightblack("[{}]".format(key)))
else: else:
key = str(index) key = str(index)
prefix = '\u2502 {} = '.format(key) prefix = "\u2502 {} = ".format(key)
prefix_length = len(prefix) prefix_length = len(prefix)
def indent(text, prefix): def indent(text, prefix):
for i, line in enumerate(text.splitlines()): for i, line in enumerate(text.splitlines()):
yield (prefix if i else '') + line + CLEAR_EOL + '\n' yield (prefix if i else "") + line + CLEAR_EOL + "\n"
repr_of_value = ''.join( repr_of_value = "".join(
indent(pprint.pformat(value, width=self.max_width - prefix_length), '\u2502' + ' ' * (len(prefix) - 1)) indent(pprint.pformat(value, width=self.max_width - prefix_length), "\u2502" + " " * (len(prefix) - 1))
).strip() ).strip()
return '{}{}{}'.format(prefix, repr_of_value.replace('\n', CLEAR_EOL + '\n'), CLEAR_EOL) return "{}{}{}".format(prefix, repr_of_value.replace("\n", CLEAR_EOL + "\n"), CLEAR_EOL)
def print_jupyter(self, context, *args): def print_jupyter(self, context, *args):
if not context._jupyter_html: if not context._jupyter_html:
context._jupyter_html = [ context._jupyter_html = [
'<thead><tr>', "<thead><tr>",
*map('<th>{}</th>'.format, map(html.escape, map(str, *map("<th>{}</th>".format, map(html.escape, map(str, context.get_input_fields() or range(len(args))))),
context.get_input_fields() or range(len(args))))), "</tr></thead>",
'</tr></thead>',
] ]
context._jupyter_html += [ context._jupyter_html += ["<tr>", *map("<td>{}</td>".format, map(html.escape, map(repr, args))), "</tr>"]
'<tr>',
*map('<td>{}</td>'.format, map(html.escape, map(repr, args))),
'</tr>',
]
@use_no_input @use_no_input
@ -211,7 +210,7 @@ def OrderFields(fields):
@use_raw_input @use_raw_input
def _OrderFields(context, row): def _OrderFields(context, row):
nonlocal fields nonlocal fields
context.setdefault('remaining', None) context.setdefault("remaining", None)
if not context.output_type: if not context.output_type:
context.remaining = list(sorted(set(context.get_input_fields()) - set(fields))) context.remaining = list(sorted(set(context.get_input_fields()) - set(fields)))
context.set_output_fields(fields + context.remaining) context.set_output_fields(fields + context.remaining)

View File

@ -6,14 +6,14 @@ from .json import JsonReader, JsonWriter, LdjsonReader, LdjsonWriter
from .pickle import PickleReader, PickleWriter from .pickle import PickleReader, PickleWriter
__all__ = [ __all__ = [
'CsvReader', "CsvReader",
'CsvWriter', "CsvWriter",
'FileReader', "FileReader",
'FileWriter', "FileWriter",
'JsonReader', "JsonReader",
'JsonWriter', "JsonWriter",
'LdjsonReader', "LdjsonReader",
'LdjsonWriter', "LdjsonWriter",
'PickleReader', "PickleReader",
'PickleWriter', "PickleWriter",
] ]

View File

@ -13,22 +13,39 @@ class FileHandler(Configurable):
""" """
path = Option( path = Option(
str, required=True, positional=True, __doc__=''' str,
required=True,
positional=True,
__doc__="""
Path to use within the provided filesystem. Path to use within the provided filesystem.
''' """,
) # type: str ) # type: str
eol = Option(str, default='\n', __doc__=''' eol = Option(
str,
default="\n",
__doc__="""
Character to use as line separator. Character to use as line separator.
''') # type: str """,
mode = Option(str, __doc__=''' ) # type: str
mode = Option(
str,
__doc__="""
What mode to use for open() call. What mode to use for open() call.
''') # type: str """,
encoding = Option(str, default='utf-8', __doc__=''' ) # type: str
encoding = Option(
str,
default="utf-8",
__doc__="""
Encoding. Encoding.
''') # type: str """,
fs = Service('fs', __doc__=''' ) # type: str
fs = Service(
"fs",
__doc__="""
The filesystem instance to use. The filesystem instance to use.
''') # type: str """,
) # type: str
@ContextProcessor @ContextProcessor
def file(self, context, *, fs): def file(self, context, *, fs):

View File

@ -35,18 +35,18 @@ class CsvHandler(FileHandler):
quoting = Option(int, default=csv.excel.quoting, required=False) quoting = Option(int, default=csv.excel.quoting, required=False)
# Fields (renamed from headers) # Fields (renamed from headers)
headers = RenamedOption('fields') headers = RenamedOption("fields")
fields = Option(ensure_tuple, required=False) fields = Option(ensure_tuple, required=False)
def get_dialect_kwargs(self): def get_dialect_kwargs(self):
return { return {
'delimiter': self.delimiter, "delimiter": self.delimiter,
'quotechar': self.quotechar, "quotechar": self.quotechar,
'escapechar': self.escapechar, "escapechar": self.escapechar,
'doublequote': self.doublequote, "doublequote": self.doublequote,
'skipinitialspace': self.skipinitialspace, "skipinitialspace": self.skipinitialspace,
'lineterminator': self.lineterminator, "lineterminator": self.lineterminator,
'quoting': self.quoting, "quoting": self.quoting,
} }
@ -59,25 +59,25 @@ class CsvReader(FileReader, CsvHandler):
skip = Option( skip = Option(
int, int,
default=0, default=0,
__doc__=''' __doc__="""
If set and greater than zero, the reader will skip this amount of lines. If set and greater than zero, the reader will skip this amount of lines.
''' """,
) )
@Method( @Method(
positional=False, positional=False,
__doc__=''' __doc__="""
Builds the CSV reader, a.k.a an object we can iterate, each iteration giving one line of fields, as an Builds the CSV reader, a.k.a an object we can iterate, each iteration giving one line of fields, as an
iterable. iterable.
Defaults to builtin csv.reader(...), but can be overriden to fit your special needs. Defaults to builtin csv.reader(...), but can be overriden to fit your special needs.
''' """,
) )
def reader_factory(self, file): def reader_factory(self, file):
return csv.reader(file, **self.get_dialect_kwargs()) return csv.reader(file, **self.get_dialect_kwargs())
def read(self, file, context, *, fs): def read(self, file, context, *, fs):
context.setdefault('skipped', 0) context.setdefault("skipped", 0)
reader = self.reader_factory(file) reader = self.reader_factory(file)
skip = self.skip skip = self.skip
@ -96,18 +96,18 @@ class CsvReader(FileReader, CsvHandler):
@use_context @use_context
class CsvWriter(FileWriter, CsvHandler): class CsvWriter(FileWriter, CsvHandler):
@Method( @Method(
__doc__=''' __doc__="""
Builds the CSV writer, a.k.a an object we can pass a field collection to be written as one line in the Builds the CSV writer, a.k.a an object we can pass a field collection to be written as one line in the
target file. target file.
Defaults to builtin csv.writer(...).writerow, but can be overriden to fit your special needs. Defaults to builtin csv.writer(...).writerow, but can be overriden to fit your special needs.
''' """
) )
def writer_factory(self, file): def writer_factory(self, file):
return csv.writer(file, **self.get_dialect_kwargs()).writerow return csv.writer(file, **self.get_dialect_kwargs()).writerow
def write(self, file, context, *values, fs): def write(self, file, context, *values, fs):
context.setdefault('lineno', 0) context.setdefault("lineno", 0)
fields = context.get_input_fields() fields = context.get_input_fields()
if not context.lineno: if not context.lineno:
@ -120,7 +120,7 @@ class CsvWriter(FileWriter, CsvHandler):
if fields: if fields:
if len(values) != len(fields): if len(values) != len(fields):
raise ValueError( raise ValueError(
'Values length differs from input fields length. Expected: {}. Got: {}. Values: {!r}.'.format( "Values length differs from input fields length. Expected: {}. Got: {}. Values: {!r}.".format(
len(fields), len(values), values len(fields), len(values), values
) )
) )

View File

@ -12,24 +12,28 @@ class FileReader(Reader, FileHandler):
present. Extending it is usually the right way to create more specific file readers (like json, csv, etc.) present. Extending it is usually the right way to create more specific file readers (like json, csv, etc.)
""" """
mode = Option(str, default='r', __doc__=''' mode = Option(
str,
default="r",
__doc__="""
What mode to use for open() call. What mode to use for open() call.
''') # type: str """,
) # type: str
output_fields = Option( output_fields = Option(
ensure_tuple, ensure_tuple,
required=False, required=False,
__doc__=''' __doc__="""
Specify the field names of output lines. Specify the field names of output lines.
Mutually exclusive with "output_type". Mutually exclusive with "output_type".
''' """,
) )
output_type = Option( output_type = Option(
required=False, required=False,
__doc__=''' __doc__="""
Specify the type of output lines. Specify the type of output lines.
Mutually exclusive with "output_fields". Mutually exclusive with "output_fields".
''' """,
) )
@ContextProcessor @ContextProcessor
@ -43,7 +47,7 @@ class FileReader(Reader, FileHandler):
output_type = self.output_type output_type = self.output_type
if output_fields and output_type: if output_fields and output_type:
raise UnrecoverableError('Cannot specify both output_fields and output_type option.') raise UnrecoverableError("Cannot specify both output_fields and output_type option.")
if self.output_type: if self.output_type:
context.set_output_type(self.output_type) context.set_output_type(self.output_type)
@ -72,16 +76,20 @@ class FileWriter(Writer, FileHandler):
usually the right way to create more specific file writers (like json, csv, etc.) usually the right way to create more specific file writers (like json, csv, etc.)
""" """
mode = Option(str, default='w+', __doc__=''' mode = Option(
str,
default="w+",
__doc__="""
What mode to use for open() call. What mode to use for open() call.
''') # type: str """,
) # type: str
def write(self, file, context, line, *, fs): def write(self, file, context, line, *, fs):
""" """
Write a row on the next line of opened file in context. Write a row on the next line of opened file in context.
""" """
context.setdefault('lineno', 0) context.setdefault("lineno", 0)
self._write_line(file, (self.eol if context.lineno else '') + line) self._write_line(file, (self.eol if context.lineno else "") + line)
context.lineno += 1 context.lineno += 1
return NOT_MODIFIED return NOT_MODIFIED

View File

@ -9,13 +9,13 @@ from bonobo.nodes.io.file import FileReader, FileWriter
class JsonHandler(FileHandler): class JsonHandler(FileHandler):
eol = ',\n' eol = ",\n"
prefix, suffix = '[', ']' prefix, suffix = "[", "]"
class LdjsonHandler(FileHandler): class LdjsonHandler(FileHandler):
eol = '\n' eol = "\n"
prefix, suffix = '', '' prefix, suffix = "", ""
class JsonReader(JsonHandler, FileReader): class JsonReader(JsonHandler, FileReader):
@ -58,16 +58,16 @@ class JsonWriter(JsonHandler, FileWriter):
:param ctx: :param ctx:
:param row: :param row:
""" """
context.setdefault('lineno', 0) context.setdefault("lineno", 0)
fields = context.get_input_fields() fields = context.get_input_fields()
if fields: if fields:
prefix = self.eol if context.lineno else '' prefix = self.eol if context.lineno else ""
self._write_line(file, prefix + json.dumps(OrderedDict(zip(fields, args)))) self._write_line(file, prefix + json.dumps(OrderedDict(zip(fields, args))))
context.lineno += 1 context.lineno += 1
else: else:
for arg in args: for arg in args:
prefix = self.eol if context.lineno else '' prefix = self.eol if context.lineno else ""
self._write_line(file, prefix + json.dumps(arg)) self._write_line(file, prefix + json.dumps(arg))
context.lineno += 1 context.lineno += 1

View File

@ -24,7 +24,7 @@ class PickleReader(FileReader, PickleHandler):
Reads a Python pickle object and yields the items in dicts. Reads a Python pickle object and yields the items in dicts.
""" """
mode = Option(str, default='rb') mode = Option(str, default="rb")
def read(self, file, context, *, fs): def read(self, file, context, *, fs):
data = pickle.load(file) data = pickle.load(file)
@ -47,7 +47,7 @@ class PickleReader(FileReader, PickleHandler):
for row in iterator: for row in iterator:
if len(row) != fields_length: if len(row) != fields_length:
raise ValueError('Received an object with {} items, expected {}.'.format(len(row), fields_length)) raise ValueError("Received an object with {} items, expected {}.".format(len(row), fields_length))
yield tuple(row.values() if is_dict else row) yield tuple(row.values() if is_dict else row)
@ -56,13 +56,13 @@ class PickleReader(FileReader, PickleHandler):
@use_context @use_context
class PickleWriter(FileWriter, PickleHandler): class PickleWriter(FileWriter, PickleHandler):
mode = Option(str, default='wb') mode = Option(str, default="wb")
def write(self, file, context, item, *, fs): def write(self, file, context, item, *, fs):
""" """
Write a pickled item to the opened file. Write a pickled item to the opened file.
""" """
context.setdefault('lineno', 0) context.setdefault("lineno", 0)
file.write(pickle.dumps(item)) file.write(pickle.dumps(item))
context.lineno += 1 context.lineno += 1
return NOT_MODIFIED return NOT_MODIFIED

View File

@ -35,7 +35,7 @@ class ConsoleOutputPlugin(Plugin):
isatty = False isatty = False
# Whether we're on windows, or a real operating system. # Whether we're on windows, or a real operating system.
iswindows = (sys.platform == 'win32') iswindows = sys.platform == "win32"
def __init__(self): def __init__(self):
self.isatty = self._stdout.isatty() self.isatty = self._stdout.isatty()
@ -55,9 +55,9 @@ class ConsoleOutputPlugin(Plugin):
# Two options: # Two options:
# - move state to context # - move state to context
# - forbid registering more than once # - forbid registering more than once
self.prefix = '' self.prefix = ""
self.counter = 0 self.counter = 0
self._append_cache = '' self._append_cache = ""
self.stdout = IOBuffer() self.stdout = IOBuffer()
self.redirect_stdout = redirect_stdout(self._stdout if self.iswindows else self.stdout) self.redirect_stdout = redirect_stdout(self._stdout if self.iswindows else self.stdout)
@ -78,13 +78,13 @@ class ConsoleOutputPlugin(Plugin):
self.redirect_stderr.__exit__(None, None, None) self.redirect_stderr.__exit__(None, None, None)
self.redirect_stdout.__exit__(None, None, None) self.redirect_stdout.__exit__(None, None, None)
def write(self, context, prefix='', rewind=True, append=None): def write(self, context, prefix="", rewind=True, append=None):
t_cnt = len(context) t_cnt = len(context)
if not self.iswindows: if not self.iswindows:
for line in self.stdout.switch().split('\n')[:-1]: for line in self.stdout.switch().split("\n")[:-1]:
print(line + CLEAR_EOL, file=self._stdout) print(line + CLEAR_EOL, file=self._stdout)
for line in self.stderr.switch().split('\n')[:-1]: for line in self.stderr.switch().split("\n")[:-1]:
print(line + CLEAR_EOL, file=self._stderr) print(line + CLEAR_EOL, file=self._stderr)
alive_color = Style.BRIGHT alive_color = Style.BRIGHT
@ -92,31 +92,36 @@ class ConsoleOutputPlugin(Plugin):
for i in context.graph.topologically_sorted_indexes: for i in context.graph.topologically_sorted_indexes:
node = context[i] node = context[i]
name_suffix = '({})'.format(i) if settings.DEBUG.get() else '' name_suffix = "({})".format(i) if settings.DEBUG.get() else ""
liveliness_color = alive_color if node.alive else dead_color liveliness_color = alive_color if node.alive else dead_color
liveliness_prefix = ' {}{}{} '.format(liveliness_color, node.status, Style.RESET_ALL) liveliness_prefix = " {}{}{} ".format(liveliness_color, node.status, Style.RESET_ALL)
_line = ''.join(( _line = "".join(
liveliness_prefix, (
node.name, liveliness_prefix,
name_suffix, node.name,
' ', name_suffix,
node.get_statistics_as_string(), " ",
' ', node.get_statistics_as_string(),
node.get_flags_as_string(), " ",
Style.RESET_ALL, node.get_flags_as_string(),
' ', Style.RESET_ALL,
)) " ",
)
)
print(prefix + _line + CLEAR_EOL, file=self._stderr) print(prefix + _line + CLEAR_EOL, file=self._stderr)
if append: if append:
# todo handle multiline # todo handle multiline
print( print(
''.join(( "".join(
' `-> ', ' '.join('{}{}{}: {}'.format(Style.BRIGHT, k, Style.RESET_ALL, v) for k, v in append), (
CLEAR_EOL " `-> ",
)), " ".join("{}{}{}: {}".format(Style.BRIGHT, k, Style.RESET_ALL, v) for k, v in append),
file=self._stderr CLEAR_EOL,
)
),
file=self._stderr,
) )
t_cnt += 1 t_cnt += 1
@ -129,16 +134,17 @@ class ConsoleOutputPlugin(Plugin):
if self.counter % 10 and self._append_cache: if self.counter % 10 and self._append_cache:
append = self._append_cache append = self._append_cache
else: else:
self._append_cache = append = (('Memory', '{0:.2f} Mb'.format(memory_usage())), self._append_cache = append = (
# ('Total time', '{0} s'.format(execution_time(harness))), ("Memory", "{0:.2f} Mb".format(memory_usage())),
) # ('Total time', '{0} s'.format(execution_time(harness))),
)
else: else:
append = () append = ()
self.write(context, prefix=self.prefix, append=append, rewind=rewind) self.write(context, prefix=self.prefix, append=append, rewind=rewind)
self.counter += 1 self.counter += 1
class IOBuffer(): class IOBuffer:
""" """
The role of IOBuffer is to overcome the problem of multiple threads wanting to write to stdout at the same time. It The role of IOBuffer is to overcome the problem of multiple threads wanting to write to stdout at the same time. It
works a bit like a videogame: there are two buffers, one that is used to write, and one which is used to read from. works a bit like a videogame: there are two buffers, one that is used to write, and one which is used to read from.
@ -165,5 +171,6 @@ class IOBuffer():
def memory_usage(): def memory_usage():
import os, psutil import os, psutil
process = psutil.Process(os.getpid()) process = psutil.Process(os.getpid())
return process.memory_info()[0] / float(2**20) return process.memory_info()[0] / float(2 ** 20)

View File

@ -8,9 +8,9 @@ try:
import IPython.core.display import IPython.core.display
except ImportError as e: except ImportError as e:
logging.exception( logging.exception(
'You must install Jupyter to use the bonobo Jupyter extension. Easiest way is to install the ' "You must install Jupyter to use the bonobo Jupyter extension. Easiest way is to install the "
'optional "jupyter" dependencies with «pip install bonobo[jupyter]», but you can also install a ' 'optional "jupyter" dependencies with «pip install bonobo[jupyter]», but you can also install a '
'specific version by yourself.' "specific version by yourself."
) )

View File

@ -5,23 +5,23 @@ from bonobo.nodes import (
CsvReader, CsvWriter, FileReader, FileWriter, JsonReader, JsonWriter, PickleReader, PickleWriter CsvReader, CsvWriter, FileReader, FileWriter, JsonReader, JsonWriter, PickleReader, PickleWriter
) )
FILETYPE_CSV = 'text/csv' FILETYPE_CSV = "text/csv"
FILETYPE_JSON = 'application/json' FILETYPE_JSON = "application/json"
FILETYPE_PICKLE = 'pickle' FILETYPE_PICKLE = "pickle"
FILETYPE_PLAIN = 'text/plain' FILETYPE_PLAIN = "text/plain"
READER = 'reader' READER = "reader"
WRITER = 'writer' WRITER = "writer"
class Registry: class Registry:
ALIASES = { ALIASES = {
'csv': FILETYPE_CSV, "csv": FILETYPE_CSV,
'json': FILETYPE_JSON, "json": FILETYPE_JSON,
'pickle': FILETYPE_PICKLE, "pickle": FILETYPE_PICKLE,
'plain': FILETYPE_PLAIN, "plain": FILETYPE_PLAIN,
'text': FILETYPE_PLAIN, "text": FILETYPE_PLAIN,
'txt': FILETYPE_PLAIN, "txt": FILETYPE_PLAIN,
} }
FACTORIES = { FACTORIES = {
@ -41,10 +41,10 @@ class Registry:
def get_factory_for(self, kind, name, *, format=None): def get_factory_for(self, kind, name, *, format=None):
if not kind in self.FACTORIES: if not kind in self.FACTORIES:
raise KeyError('Unknown factory kind {!r}.'.format(kind)) raise KeyError("Unknown factory kind {!r}.".format(kind))
if format is None and name is None: if format is None and name is None:
raise RuntimeError('Cannot guess factory without at least a filename or a format.') raise RuntimeError("Cannot guess factory without at least a filename or a format.")
# Guess mimetype if possible # Guess mimetype if possible
if format is None: if format is None:
@ -62,7 +62,7 @@ class Registry:
if format is None or not format in self.FACTORIES[kind]: if format is None or not format in self.FACTORIES[kind]:
raise RuntimeError( raise RuntimeError(
'Could not resolve {kind} factory for {name} ({format}).'.format(kind=kind, name=name, format=format) "Could not resolve {kind} factory for {name} ({format}).".format(kind=kind, name=name, format=format)
) )
return self.FACTORIES[kind][format] return self.FACTORIES[kind][format]

View File

@ -10,7 +10,7 @@ def to_bool(s):
if type(s) is bool: if type(s) is bool:
return s return s
if len(s): if len(s):
if s.lower() in ('f', 'false', 'n', 'no', '0'): if s.lower() in ("f", "false", "n", "no", "0"):
return False return False
return True return True
return False return False
@ -40,7 +40,7 @@ class Setting:
self.formatter = formatter self.formatter = formatter
def __repr__(self): def __repr__(self):
return '<Setting {}={!r}>'.format(self.name, self.get()) return "<Setting {}={!r}>".format(self.name, self.get())
def __eq__(self, other): def __eq__(self, other):
return self.get() == other return self.get() == other
@ -51,7 +51,7 @@ class Setting:
def set(self, value): def set(self, value):
value = self.formatter(value) if self.formatter else value value = self.formatter(value) if self.formatter else value
if self.validator and not self.validator(value): if self.validator and not self.validator(value):
raise ValidationError(self, 'Invalid value {!r} for setting {!r}.'.format(value, self.name)) raise ValidationError(self, "Invalid value {!r} for setting {!r}.".format(value, self.name))
self.value = value self.value = value
def set_if_true(self, value): def set_if_true(self, value):
@ -78,40 +78,37 @@ class Setting:
# Debug/verbose mode. # Debug/verbose mode.
DEBUG = Setting('DEBUG', formatter=to_bool, default=False) DEBUG = Setting("DEBUG", formatter=to_bool, default=False)
# Profile mode. # Profile mode.
PROFILE = Setting('PROFILE', formatter=to_bool, default=False) PROFILE = Setting("PROFILE", formatter=to_bool, default=False)
# Alpha mode. # Alpha mode.
ALPHA = Setting('ALPHA', formatter=to_bool, default=False) ALPHA = Setting("ALPHA", formatter=to_bool, default=False)
# Quiet mode. # Quiet mode.
QUIET = Setting('QUIET', formatter=to_bool, default=False) QUIET = Setting("QUIET", formatter=to_bool, default=False)
# Logging level. # Logging level.
LOGGING_LEVEL = Setting( LOGGING_LEVEL = Setting(
'LOGGING_LEVEL', "LOGGING_LEVEL",
formatter=logging._checkLevel, formatter=logging._checkLevel,
validator=logging._checkLevel, validator=logging._checkLevel,
default=lambda: logging.DEBUG if DEBUG.get() else logging.INFO default=lambda: logging.DEBUG if DEBUG.get() else logging.INFO,
) )
# Input/Output format for transformations # Input/Output format for transformations
IOFORMAT_ARG0 = 'arg0' IOFORMAT_ARG0 = "arg0"
IOFORMAT_KWARGS = 'kwargs' IOFORMAT_KWARGS = "kwargs"
IOFORMATS = { IOFORMATS = {IOFORMAT_ARG0, IOFORMAT_KWARGS}
IOFORMAT_ARG0,
IOFORMAT_KWARGS,
}
IOFORMAT = Setting('IOFORMAT', default=IOFORMAT_KWARGS, validator=IOFORMATS.__contains__) IOFORMAT = Setting("IOFORMAT", default=IOFORMAT_KWARGS, validator=IOFORMATS.__contains__)
def check(): def check():
if DEBUG.get() and QUIET.get(): if DEBUG.get() and QUIET.get():
raise RuntimeError('I cannot be verbose and quiet at the same time.') raise RuntimeError("I cannot be verbose and quiet at the same time.")
clear_all = Setting.clear_all clear_all = Setting.clear_all

View File

@ -9,24 +9,51 @@ from graphviz.dot import Digraph
from bonobo.constants import BEGIN from bonobo.constants import BEGIN
from bonobo.util import get_name from bonobo.util import get_name
GraphRange = namedtuple('GraphRange', ['graph', 'input', 'output']) GraphRange = namedtuple("GraphRange", ["graph", "input", "output"])
class GraphCursor: class GraphCursor:
def __init__(self, graph, node): @property
def input(self):
return self.first
@property
def output(self):
return self.last
def __init__(self, graph, *, first=None, last=None):
self.graph = graph self.graph = graph
self.node = node self.first = first or last
self.last = last
def __rshift__(self, other): def __rshift__(self, other):
""" Self >> Other """ """ Self >> Other """
chain = self.graph.add_chain(other, _input=self.node)
return GraphCursor(chain.graph, chain.output) if other == ...:
raise NotImplementedError(
"Expected something looking like a node, but got an Ellipsis (...). Did you forget to complete the graph?"
)
nodes = other.nodes if hasattr(other, "nodes") else [other]
if len(nodes):
chain = self.graph.add_chain(*nodes, _input=self.last)
return GraphCursor(chain.graph, first=self.first, last=chain.output)
return self
class PartialGraph:
def __init__(self, *nodes):
self.nodes = list(nodes)
class Graph: class Graph:
""" """
Represents a directed graph of nodes. Represents a directed graph of nodes.
""" """
name = ''
name = ""
def __init__(self, *chain): def __init__(self, *chain):
self.edges = {BEGIN: set()} self.edges = {BEGIN: set()}
@ -46,7 +73,7 @@ class Graph:
return self.nodes[key] return self.nodes[key]
def get_cursor(self, ref=BEGIN): def get_cursor(self, ref=BEGIN):
return GraphCursor(self, self._resolve_index(ref)) return GraphCursor(self, last=self._resolve_index(ref))
def outputs_of(self, idx, create=False): def outputs_of(self, idx, create=False):
""" Get a set of the outputs for a given node index. """ Get a set of the outputs for a given node index.
@ -76,7 +103,7 @@ class Graph:
_last = self.add_node(node) _last = self.add_node(node)
if not i and _name: if not i and _name:
if _name in self.named: if _name in self.named:
raise KeyError('Duplicate name {!r} in graph.'.format(_name)) raise KeyError("Duplicate name {!r} in graph.".format(_name))
self.named[_name] = _last self.named[_name] = _last
if _first is None: if _first is None:
_first = _last _first = _last
@ -86,7 +113,7 @@ class Graph:
if _output is not None: if _output is not None:
self.outputs_of(_input, create=True).add(_output) self.outputs_of(_input, create=True).add(_output)
if hasattr(self, '_topologcally_sorted_indexes_cache'): if hasattr(self, "_topologcally_sorted_indexes_cache"):
del self._topologcally_sorted_indexes_cache del self._topologcally_sorted_indexes_cache
return GraphRange(self, _first, _last) return GraphRange(self, _first, _last)
@ -144,10 +171,10 @@ class Graph:
return self._graphviz return self._graphviz
except AttributeError: except AttributeError:
g = Digraph() g = Digraph()
g.attr(rankdir='LR') g.attr(rankdir="LR")
g.node('BEGIN', shape='point') g.node("BEGIN", shape="point")
for i in self.outputs_of(BEGIN): for i in self.outputs_of(BEGIN):
g.edge('BEGIN', str(i)) g.edge("BEGIN", str(i))
for ix in self.topologically_sorted_indexes: for ix in self.topologically_sorted_indexes:
g.node(str(ix), label=get_name(self[ix])) g.node(str(ix), label=get_name(self[ix]))
for iy in self.outputs_of(ix): for iy in self.outputs_of(ix):
@ -160,9 +187,9 @@ class Graph:
def _repr_html_(self): def _repr_html_(self):
try: try:
return '<div>{}</div><pre>{}</pre>'.format(self.graphviz._repr_svg_(), html.escape(repr(self))) return "<div>{}</div><pre>{}</pre>".format(self.graphviz._repr_svg_(), html.escape(repr(self)))
except (ExecutableNotFound, FileNotFoundError) as exc: except (ExecutableNotFound, FileNotFoundError) as exc:
return '<strong>{}</strong>: {}'.format(type(exc).__name__, str(exc)) return "<strong>{}</strong>: {}".format(type(exc).__name__, str(exc))
def _resolve_index(self, mixed): def _resolve_index(self, mixed):
""" """
@ -182,10 +209,10 @@ class Graph:
if mixed in self.nodes: if mixed in self.nodes:
return self.nodes.index(mixed) return self.nodes.index(mixed)
raise ValueError('Cannot find node matching {!r}.'.format(mixed)) raise ValueError("Cannot find node matching {!r}.".format(mixed))
def _get_graphviz_node_id(graph, i): def _get_graphviz_node_id(graph, i):
escaped_index = str(i) escaped_index = str(i)
escaped_name = json.dumps(get_name(graph[i])) escaped_name = json.dumps(get_name(graph[i]))
return '{{{} [label={}]}}'.format(escaped_index, escaped_name) return "{{{} [label={}]}}".format(escaped_index, escaped_name)

View File

@ -70,7 +70,7 @@ class Input(Queue, Readable, Writable):
# Check we are actually able to receive data. # Check we are actually able to receive data.
if self._writable_runlevel < 1: if self._writable_runlevel < 1:
raise InactiveWritableError('Cannot put() on an inactive {}.'.format(Writable.__name__)) raise InactiveWritableError("Cannot put() on an inactive {}.".format(Writable.__name__))
if data == END: if data == END:
self._writable_runlevel -= 1 self._writable_runlevel -= 1
@ -85,7 +85,7 @@ class Input(Queue, Readable, Writable):
def get(self, block=True, timeout=None): def get(self, block=True, timeout=None):
if not self.alive: if not self.alive:
raise InactiveReadableError('Cannot get() on an inactive {}.'.format(Readable.__name__)) raise InactiveReadableError("Cannot get() on an inactive {}.".format(Readable.__name__))
data = Queue.get(self, block, timeout) data = Queue.get(self, block, timeout)
@ -94,7 +94,7 @@ class Input(Queue, Readable, Writable):
if not self.alive: if not self.alive:
raise InactiveReadableError( raise InactiveReadableError(
'Cannot get() on an inactive {} (runlevel just reached 0).'.format(Readable.__name__) "Cannot get() on an inactive {} (runlevel just reached 0).".format(Readable.__name__)
) )
return self.get(block, timeout) return self.get(block, timeout)

View File

@ -3,7 +3,7 @@ class Token:
self.__name__ = name self.__name__ = name
def __repr__(self): def __repr__(self):
return '<{}>'.format(self.__name__) return "<{}>".format(self.__name__)
class Flag(Token): class Flag(Token):

View File

@ -16,25 +16,25 @@ from bonobo.util.inspect import (
istuple, istuple,
istype, istype,
) )
from bonobo.util.objects import (get_name, get_attribute_or_create, ValueHolder) from bonobo.util.objects import get_name, get_attribute_or_create, ValueHolder
# Bonobo's util API # Bonobo's util API
__all__ = [ __all__ = [
'ValueHolder', "ValueHolder",
'cast', "cast",
'deprecated', "deprecated",
'deprecated_alias', "deprecated_alias",
'ensure_tuple', "ensure_tuple",
'get_attribute_or_create', "get_attribute_or_create",
'get_name', "get_name",
'inspect_node', "inspect_node",
'isconfigurable', "isconfigurable",
'isconfigurabletype', "isconfigurabletype",
'iscontextprocessor', "iscontextprocessor",
'isdict', "isdict",
'ismethod', "ismethod",
'isoption', "isoption",
'istype', "istype",
'sortedlist', "sortedlist",
'tuplize', "tuplize",
] ]

View File

@ -12,14 +12,14 @@ class ApiHelper:
if graph: if graph:
# This function must comply to the "graph" API interface, meaning it can bahave like bonobo.run. # This function must comply to the "graph" API interface, meaning it can bahave like bonobo.run.
from inspect import signature from inspect import signature
parameters = list(signature(x).parameters) parameters = list(signature(x).parameters)
required_parameters = {'plugins', 'services', 'strategy'} required_parameters = {"plugins", "services", "strategy"}
assert len(parameters assert (
) > 0 and parameters[0] == 'graph', 'First parameter of a graph api function must be "graph".' len(parameters) > 0 and parameters[0] == "graph"
assert required_parameters.intersection( ), 'First parameter of a graph api function must be "graph".'
parameters assert required_parameters.intersection(parameters) == required_parameters, (
) == required_parameters, 'Graph api functions must define the following parameters: ' + ', '.join( "Graph api functions must define the following parameters: " + ", ".join(sorted(required_parameters))
sorted(required_parameters)
) )
self.__all__.append(get_name(x)) self.__all__.append(get_name(x))

View File

@ -71,16 +71,18 @@ class {typename}(tuple):
{field_defs} {field_defs}
''' '''
_field_template = '''\ _field_template = """\
{name} = _property(_itemgetter({index:d}), doc={doc!r}) {name} = _property(_itemgetter({index:d}), doc={doc!r})
'''.strip('\n') """.strip(
"\n"
_reserved = frozenset(
['_', '_cls', '_attrs', '_fields', 'get', '_asdict', '_replace', '_make', 'self', '_self', 'tuple'] + dir(tuple)
) )
_multiple_underscores_pattern = re.compile('__+') _reserved = frozenset(
_slugify_allowed_chars_pattern = re.compile(r'[^a-z0-9_]+', flags=re.IGNORECASE) ["_", "_cls", "_attrs", "_fields", "get", "_asdict", "_replace", "_make", "self", "_self", "tuple"] + dir(tuple)
)
_multiple_underscores_pattern = re.compile("__+")
_slugify_allowed_chars_pattern = re.compile(r"[^a-z0-9_]+", flags=re.IGNORECASE)
def _uniquify(f): def _uniquify(f):
@ -90,13 +92,13 @@ def _uniquify(f):
def _uniquified(x): def _uniquified(x):
nonlocal f, seen nonlocal f, seen
x = str(x) x = str(x)
v = v0 = _multiple_underscores_pattern.sub('_', f(x)) v = v0 = _multiple_underscores_pattern.sub("_", f(x))
i = 0 i = 0
# if last character is not "allowed", let's start appending indexes right from the first iteration # if last character is not "allowed", let's start appending indexes right from the first iteration
if len(x) and _slugify_allowed_chars_pattern.match(x[-1]): if len(x) and _slugify_allowed_chars_pattern.match(x[-1]):
v = '{}{}'.format(v0, i) v = "{}{}".format(v0, i)
while v in seen: while v in seen:
v = '{}{}'.format(v0, i) v = "{}{}".format(v0, i)
i += 1 i += 1
seen.add(v) seen.add(v)
return v return v
@ -106,13 +108,13 @@ def _uniquify(f):
def _make_valid_attr_name(x): def _make_valid_attr_name(x):
if iskeyword(x): if iskeyword(x):
x = '_' + x x = "_" + x
if x.isidentifier(): if x.isidentifier():
return x return x
x = slugify(x, separator='_', regex_pattern=_slugify_allowed_chars_pattern) x = slugify(x, separator="_", regex_pattern=_slugify_allowed_chars_pattern)
if x.isidentifier(): if x.isidentifier():
return x return x
x = '_' + x x = "_" + x
if x.isidentifier(): if x.isidentifier():
return x return x
raise ValueError(x) raise ValueError(x)
@ -124,23 +126,23 @@ def BagType(typename, fields, *, verbose=False, module=None):
attrs = tuple(map(_uniquify(_make_valid_attr_name), fields)) attrs = tuple(map(_uniquify(_make_valid_attr_name), fields))
if type(fields) is str: if type(fields) is str:
raise TypeError('BagType does not support providing fields as a string.') raise TypeError("BagType does not support providing fields as a string.")
fields = list(map(str, fields)) fields = list(map(str, fields))
typename = str(typename) typename = str(typename)
for i, name in enumerate([typename] + fields): for i, name in enumerate([typename] + fields):
if type(name) is not str: if type(name) is not str:
raise TypeError('Type names and field names must be strings, got {name!r}'.format(name=name)) raise TypeError("Type names and field names must be strings, got {name!r}".format(name=name))
if not i: if not i:
if not name.isidentifier(): if not name.isidentifier():
raise ValueError('Type names must be valid identifiers: {name!r}'.format(name=name)) raise ValueError("Type names must be valid identifiers: {name!r}".format(name=name))
if iskeyword(name): if iskeyword(name):
raise ValueError('Type names cannot be a keyword: {name!r}'.format(name=name)) raise ValueError("Type names cannot be a keyword: {name!r}".format(name=name))
seen = set() seen = set()
for name in fields: for name in fields:
if name in seen: if name in seen:
raise ValueError('Encountered duplicate field name: {name!r}'.format(name=name)) raise ValueError("Encountered duplicate field name: {name!r}".format(name=name))
seen.add(name) seen.add(name)
# Fill-in the class template # Fill-in the class template
@ -150,21 +152,24 @@ def BagType(typename, fields, *, verbose=False, module=None):
attrs=attrs, attrs=attrs,
num_fields=len(fields), num_fields=len(fields),
arg_list=repr(attrs).replace("'", "")[1:-1], arg_list=repr(attrs).replace("'", "")[1:-1],
repr_fmt=', '.join(('%r' if isinstance(fields[index], int) else '{name}=%r').format(name=name) repr_fmt=", ".join(
for index, name in enumerate(attrs)), ("%r" if isinstance(fields[index], int) else "{name}=%r").format(name=name)
field_defs='\n'.join( for index, name in enumerate(attrs)
),
field_defs="\n".join(
_field_template.format( _field_template.format(
index=index, index=index,
name=name, name=name,
doc='Alias for ' + doc="Alias for "
('field #{}'.format(index) if isinstance(fields[index], int) else repr(fields[index])) + ("field #{}".format(index) if isinstance(fields[index], int) else repr(fields[index])),
) for index, name in enumerate(attrs) )
) for index, name in enumerate(attrs)
),
) )
# Execute the template string in a temporary namespace and support # Execute the template string in a temporary namespace and support
# tracing utilities by setting a value for frame.f_globals['__name__'] # tracing utilities by setting a value for frame.f_globals['__name__']
namespace = dict(__name__='namedtuple_%s' % typename) namespace = dict(__name__="namedtuple_%s" % typename)
exec(class_definition, namespace) exec(class_definition, namespace)
result = namespace[typename] result = namespace[typename]
result._source = class_definition result._source = class_definition
@ -178,7 +183,7 @@ def BagType(typename, fields, *, verbose=False, module=None):
# specified a particular module. # specified a particular module.
if module is None: if module is None:
try: try:
module = sys._getframe(1).f_globals.get('__name__', '__main__') module = sys._getframe(1).f_globals.get("__name__", "__main__")
except (AttributeError, ValueError): except (AttributeError, ValueError):
pass pass
if module is not None: if module is not None:

View File

@ -25,7 +25,7 @@ def _with_length_check(f):
if length is not None: if length is not None:
if length != len(result): if length != len(result):
raise TypeError( raise TypeError(
'Length check failed, expected {} fields but got {}: {!r}.'.format(length, len(result), result) "Length check failed, expected {} fields but got {}: {!r}.".format(length, len(result), result)
) )
return result return result
@ -54,7 +54,7 @@ def ensure_tuple(tuple_or_mixed, *, cls=None):
if isinstance(tuple_or_mixed, tuple): if isinstance(tuple_or_mixed, tuple):
return tuple.__new__(cls, tuple_or_mixed) return tuple.__new__(cls, tuple_or_mixed)
return tuple.__new__(cls, (tuple_or_mixed, )) return tuple.__new__(cls, (tuple_or_mixed,))
def cast(type_): def cast(type_):

View File

@ -5,13 +5,13 @@ import warnings
def deprecated_alias(alias, func): def deprecated_alias(alias, func):
@functools.wraps(func) @functools.wraps(func)
def new_func(*args, **kwargs): def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter warnings.simplefilter("always", DeprecationWarning) # turn off filter
warnings.warn( warnings.warn(
"Call to deprecated function alias {}, use {} instead.".format(alias, func.__name__), "Call to deprecated function alias {}, use {} instead.".format(alias, func.__name__),
category=DeprecationWarning, category=DeprecationWarning,
stacklevel=2 stacklevel=2,
) )
warnings.simplefilter('default', DeprecationWarning) # reset filter warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs) return func(*args, **kwargs)
return new_func return new_func
@ -24,11 +24,11 @@ def deprecated(func):
@functools.wraps(func) @functools.wraps(func)
def new_func(*args, **kwargs): def new_func(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning) # turn off filter warnings.simplefilter("always", DeprecationWarning) # turn off filter
warnings.warn( warnings.warn(
"Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2 "Call to deprecated function {}.".format(func.__name__), category=DeprecationWarning, stacklevel=2
) )
warnings.simplefilter('default', DeprecationWarning) # reset filter warnings.simplefilter("default", DeprecationWarning) # reset filter
return func(*args, **kwargs) return func(*args, **kwargs)
return new_func return new_func

View File

@ -1,8 +1,8 @@
from bonobo.structs.tokens import Flag from bonobo.structs.tokens import Flag
F_INHERIT = Flag('Inherit') F_INHERIT = Flag("Inherit")
F_NOT_MODIFIED = Flag('NotModified') F_NOT_MODIFIED = Flag("NotModified")
F_NOT_MODIFIED.must_be_first = True F_NOT_MODIFIED.must_be_first = True
F_NOT_MODIFIED.must_be_last = True F_NOT_MODIFIED.must_be_last = True
F_NOT_MODIFIED.allows_data = False F_NOT_MODIFIED.allows_data = False

View File

@ -5,12 +5,12 @@ import re
import warnings import warnings
from contextlib import contextmanager from contextlib import contextmanager
__escape_decoder = codecs.getdecoder('unicode_escape') __escape_decoder = codecs.getdecoder("unicode_escape")
__posix_variable = re.compile('\$\{[^\}]*\}') __posix_variable = re.compile("\$\{[^\}]*\}")
def parse_var(var): def parse_var(var):
name, value = var.split('=', 1) name, value = var.split("=", 1)
def decode_escaped(escaped): def decode_escaped(escaped):
return __escape_decoder(escaped)[0] return __escape_decoder(escaped)[0]
@ -29,15 +29,15 @@ def load_env_from_file(filename):
Read an env file into a collection of (name, value) tuples. Read an env file into a collection of (name, value) tuples.
""" """
if not os.path.exists(filename): if not os.path.exists(filename):
raise FileNotFoundError('Environment file {} does not exist.'.format(filename)) raise FileNotFoundError("Environment file {} does not exist.".format(filename))
with open(filename) as f: with open(filename) as f:
for lineno, line in enumerate(f): for lineno, line in enumerate(f):
line = line.strip() line = line.strip()
if not line or line.startswith('#'): if not line or line.startswith("#"):
continue continue
if '=' not in line: if "=" not in line:
raise SyntaxError('Invalid environment file syntax in {} at line {}.'.format(filename, lineno + 1)) raise SyntaxError("Invalid environment file syntax in {} at line {}.".format(filename, lineno + 1))
name, value = parse_var(line) name, value = parse_var(line)
@ -64,10 +64,10 @@ def get_argument_parser(parser=None):
global _parser global _parser
_parser = parser _parser = parser
_parser.add_argument('--default-env-file', '-E', action='append') _parser.add_argument("--default-env-file", "-E", action="append")
_parser.add_argument('--default-env', action='append') _parser.add_argument("--default-env", action="append")
_parser.add_argument('--env-file', action='append') _parser.add_argument("--env-file", action="append")
_parser.add_argument('--env', '-e', action='append') _parser.add_argument("--env", "-e", action="append")
return _parser return _parser
@ -89,10 +89,11 @@ def parse_args(mixed=None):
global _parser global _parser
if _parser is not None: if _parser is not None:
warnings.warn( warnings.warn(
'You are calling bonobo.parse_args() without a parser argument, but it looks like you created a parser before. You probably want to pass your parser to this call, or if creating a new parser here is really what you want to do, please create a new one explicitely to silence this warning.' "You are calling bonobo.parse_args() without a parser argument, but it looks like you created a parser before. You probably want to pass your parser to this call, or if creating a new parser here is really what you want to do, please create a new one explicitely to silence this warning."
) )
# use the api from bonobo namespace, in case a command patched it. # use the api from bonobo namespace, in case a command patched it.
import bonobo import bonobo
mixed = bonobo.get_argument_parser() mixed = bonobo.get_argument_parser()
if isinstance(mixed, argparse.ArgumentParser): if isinstance(mixed, argparse.ArgumentParser):
@ -117,14 +118,14 @@ def parse_args(mixed=None):
# env-file sets something.) # env-file sets something.)
try: try:
# Set default environment # Set default environment
for name, value in map(parse_var, options.pop('default_env', []) or []): for name, value in map(parse_var, options.pop("default_env", []) or []):
if not name in os.environ: if not name in os.environ:
if not name in _backup: if not name in _backup:
_backup[name] = os.environ.get(name, None) _backup[name] = os.environ.get(name, None)
os.environ[name] = value os.environ[name] = value
# Read and set default environment from file(s) # Read and set default environment from file(s)
for filename in options.pop('default_env_file', []) or []: for filename in options.pop("default_env_file", []) or []:
for name, value in load_env_from_file(filename): for name, value in load_env_from_file(filename):
if not name in os.environ: if not name in os.environ:
if not name in _backup: if not name in _backup:
@ -132,14 +133,14 @@ def parse_args(mixed=None):
os.environ[name] = value os.environ[name] = value
# Read and set environment from file(s) # Read and set environment from file(s)
for filename in options.pop('env_file', []) or []: for filename in options.pop("env_file", []) or []:
for name, value in load_env_from_file(filename): for name, value in load_env_from_file(filename):
if not name in _backup: if not name in _backup:
_backup[name] = os.environ.get(name, None) _backup[name] = os.environ.get(name, None)
os.environ[name] = value os.environ[name] = value
# Set environment # Set environment
for name, value in map(parse_var, options.pop('env', []) or []): for name, value in map(parse_var, options.pop("env", []) or []):
if not name in _backup: if not name in _backup:
_backup[name] = os.environ.get(name, None) _backup[name] = os.environ.get(name, None)
os.environ[name] = value os.environ[name] = value

View File

@ -15,36 +15,32 @@ def sweeten_errors():
except Exception as exc: except Exception as exc:
SPACES = 2 SPACES = 2
w = term.white w = term.white
prefix = w('' + ' ' * (SPACES - 1)) prefix = w("" + " " * (SPACES - 1))
suffix = w(' ' * (SPACES - 1) + '') suffix = w(" " * (SPACES - 1) + "")
pre_re = re.compile('([^`]*)`([^`]*)`([^`]*)') pre_re = re.compile("([^`]*)`([^`]*)`([^`]*)")
def format_arg(arg): def format_arg(arg):
length = len(pre_re.sub('\\1\\2\\3', arg)) length = len(pre_re.sub("\\1\\2\\3", arg))
arg = pre_re.sub(w('\\1') + term.bold('\\2') + w('\\3'), arg) arg = pre_re.sub(w("\\1") + term.bold("\\2") + w("\\3"), arg)
arg = re.sub('^ \$ (.*)', term.lightblack(' $ ') + term.reset('\\1'), arg) arg = re.sub("^ \$ (.*)", term.lightblack(" $ ") + term.reset("\\1"), arg)
return (arg, length) return (arg, length)
def f(*args): def f(*args):
return ''.join(args) return "".join(args)
term_width, term_height = term.get_size() term_width, term_height = term.get_size()
line_length = min(80, term_width) line_length = min(80, term_width)
for arg in exc.args: for arg in exc.args:
line_length = max(min(line_length, len(arg) + 2 * SPACES), 120) line_length = max(min(line_length, len(arg) + 2 * SPACES), 120)
print(f(w('' + '' * (line_length - 2) + ''))) print(f(w("" + "" * (line_length - 2) + "")))
for i, arg in enumerate(exc.args): for i, arg in enumerate(exc.args):
if i == 1: if i == 1:
print(f( print(f(prefix, " " * (line_length - 2 * SPACES), suffix))
prefix,
' ' * (line_length - 2 * SPACES),
suffix,
))
arg_formatted, arg_length = format_arg(arg) arg_formatted, arg_length = format_arg(arg)
if not i: if not i:
@ -52,17 +48,17 @@ def sweeten_errors():
print( print(
f( f(
prefix, prefix,
term.red_bg(term.bold(' ' + type(exc).__name__ + ' ')), term.red_bg(term.bold(" " + type(exc).__name__ + " ")),
' ', " ",
w(arg_formatted), w(arg_formatted),
' ' * (line_length - (arg_length + 3 + len(type(exc).__name__) + 2 * SPACES)), " " * (line_length - (arg_length + 3 + len(type(exc).__name__) + 2 * SPACES)),
suffix, suffix,
) )
) )
else: else:
# other lines # other lines
print(f(prefix, arg_formatted + ' ' * (line_length - arg_length - 2 * SPACES), suffix)) print(f(prefix, arg_formatted + " " * (line_length - arg_length - 2 * SPACES), suffix))
print(f(w('' + '' * (line_length - 2) + ''))) print(f(w("" + "" * (line_length - 2) + "")))
logging.getLogger().debug('This error was caused by the following exception chain.', exc_info=exc_info()) logging.getLogger().debug("This error was caused by the following exception chain.", exc_info=exc_info())

View File

@ -9,6 +9,7 @@ def isconfigurable(mixed):
:return: bool :return: bool
""" """
from bonobo.config.configurables import Configurable from bonobo.config.configurables import Configurable
return isinstance(mixed, Configurable) return isinstance(mixed, Configurable)
@ -32,7 +33,7 @@ def isconfigurabletype(mixed, *, strict=False):
if isinstance(mixed, PartiallyConfigured): if isinstance(mixed, PartiallyConfigured):
return True return True
if hasattr(mixed, '_partial') and mixed._partial: if hasattr(mixed, "_partial") and mixed._partial:
return True return True
return False return False
@ -47,6 +48,7 @@ def isoption(mixed):
""" """
from bonobo.config.options import Option from bonobo.config.options import Option
return isinstance(mixed, Option) return isinstance(mixed, Option)
@ -58,6 +60,7 @@ def ismethod(mixed):
:return: bool :return: bool
""" """
from bonobo.config.options import Method from bonobo.config.options import Method
return isinstance(mixed, Method) return isinstance(mixed, Method)
@ -69,6 +72,7 @@ def iscontextprocessor(x):
:return: bool :return: bool
""" """
from bonobo.config.processors import ContextProcessor from bonobo.config.processors import ContextProcessor
return isinstance(x, ContextProcessor) return isinstance(x, ContextProcessor)
@ -102,15 +106,7 @@ def istuple(mixed):
return isinstance(mixed, tuple) return isinstance(mixed, tuple)
ConfigurableInspection = namedtuple( ConfigurableInspection = namedtuple("ConfigurableInspection", ["type", "instance", "options", "processors", "partial"])
'ConfigurableInspection', [
'type',
'instance',
'options',
'processors',
'partial',
]
)
ConfigurableInspection.__enter__ = lambda self: self ConfigurableInspection.__enter__ = lambda self: self
ConfigurableInspection.__exit__ = lambda *exc_details: None ConfigurableInspection.__exit__ = lambda *exc_details: None
@ -134,17 +130,11 @@ def inspect_node(mixed, *, _partial=None):
inst, typ = None, mixed inst, typ = None, mixed
elif isconfigurable(mixed): elif isconfigurable(mixed):
inst, typ = mixed, type(mixed) inst, typ = mixed, type(mixed)
elif hasattr(mixed, 'func'): elif hasattr(mixed, "func"):
return inspect_node(mixed.func, _partial=(mixed.args, mixed.keywords)) return inspect_node(mixed.func, _partial=(mixed.args, mixed.keywords))
else: else:
raise TypeError( raise TypeError(
'Not a Configurable, nor a Configurable instance and not even a partially configured Configurable. Check your inputs.' "Not a Configurable, nor a Configurable instance and not even a partially configured Configurable. Check your inputs."
) )
return ConfigurableInspection( return ConfigurableInspection(typ, inst, list(typ.__options__), list(typ.__processors__), _partial)
typ,
inst,
list(typ.__options__),
list(typ.__processors__),
_partial,
)

View File

@ -11,7 +11,7 @@ class Wrapper:
@property @property
def __name__(self): def __name__(self):
return getattr(self.wrapped, '__name__', getattr(type(self.wrapped), '__name__', repr(self.wrapped))) return getattr(self.wrapped, "__name__", getattr(type(self.wrapped), "__name__", repr(self.wrapped)))
name = __name__ name = __name__
@ -142,10 +142,10 @@ class ValueHolder:
return divmod(other, self._value) return divmod(other, self._value)
def __pow__(self, other): def __pow__(self, other):
return self._value**other return self._value ** other
def __rpow__(self, other): def __rpow__(self, other):
return other**self._value return other ** self._value
def __ipow__(self, other): def __ipow__(self, other):
self._value **= other self._value **= other

View File

@ -4,5 +4,5 @@ from packaging.utils import canonicalize_name
bonobo_packages = {} bonobo_packages = {}
for p in pkg_resources.working_set: for p in pkg_resources.working_set:
name = canonicalize_name(p.project_name) name = canonicalize_name(p.project_name)
if name.startswith('bonobo'): if name.startswith("bonobo"):
bonobo_packages[name] = p bonobo_packages[name] = p

View File

@ -23,8 +23,8 @@ class _ModulesRegistry(dict):
def require(self, name): def require(self, name):
if name not in self: if name not in self:
bits = name.split('.') bits = name.split(".")
filename = os.path.join(self.pathname, *bits[:-1], bits[-1] + '.py') filename = os.path.join(self.pathname, *bits[:-1], bits[-1] + ".py")
self[name] = _RequiredModule(runpy.run_path(filename, run_name=name)) self[name] = _RequiredModule(runpy.run_path(filename, run_name=name))
return self[name] return self[name]
@ -37,7 +37,7 @@ def _parse_option(option):
:return: tuple :return: tuple
""" """
try: try:
key, val = option.split('=', 1) key, val = option.split("=", 1)
except ValueError: except ValueError:
return option, True return option, True
@ -75,7 +75,7 @@ def _resolve_transformations(transformations):
transformations = transformations or [] transformations = transformations or []
for t in transformations: for t in transformations:
try: try:
mod, attr = t.split(':', 1) mod, attr = t.split(":", 1)
yield getattr(registry.require(mod), attr) yield getattr(registry.require(mod), attr)
except ValueError: except ValueError:
yield getattr(bonobo, t) yield getattr(bonobo, t)

View File

@ -10,8 +10,8 @@ class WithStatistics:
return ((name, self.statistics[name]) for name in self.statistics_names) return ((name, self.statistics[name]) for name in self.statistics_names)
def get_statistics_as_string(self, *args, **kwargs): def get_statistics_as_string(self, *args, **kwargs):
stats = tuple('{0}={1}'.format(name, cnt) for name, cnt in self.get_statistics(*args, **kwargs) if cnt > 0) stats = tuple("{0}={1}".format(name, cnt) for name, cnt in self.get_statistics(*args, **kwargs) if cnt > 0)
return (kwargs.get('prefix', '') + ' '.join(stats)) if len(stats) else '' return (kwargs.get("prefix", "") + " ".join(stats)) if len(stats) else ""
def increment(self, name, *, amount=1): def increment(self, name, *, amount=1):
self.statistics[name] += amount self.statistics[name] += amount
@ -35,4 +35,4 @@ class Timer:
return self.__finish - self.__start return self.__finish - self.__start
def __str__(self): def __str__(self):
return str(int(self.duration * 1000) / 1000.0) + 's' return str(int(self.duration * 1000) / 1000.0) + "s"

View File

@ -1,2 +1,2 @@
CLEAR_EOL = '\033[0K' CLEAR_EOL = "\033[0K"
MOVE_CURSOR_UP = lambda n: '\033[{}A'.format(n) MOVE_CURSOR_UP = lambda n: "\033[{}A".format(n)

View File

@ -26,20 +26,20 @@ def optional_contextmanager(cm, *, ignore=False):
class FilesystemTester: class FilesystemTester:
def __init__(self, extension='txt', mode='w', *, input_data=''): def __init__(self, extension="txt", mode="w", *, input_data=""):
self.extension = extension self.extension = extension
self.input_data = input_data self.input_data = input_data
self.mode = mode self.mode = mode
def get_services_for_reader(self, tmpdir): def get_services_for_reader(self, tmpdir):
fs, filename = open_fs(tmpdir), 'input.' + self.extension fs, filename = open_fs(tmpdir), "input." + self.extension
with fs.open(filename, self.mode) as fp: with fs.open(filename, self.mode) as fp:
fp.write(self.input_data) fp.write(self.input_data)
return fs, filename, {'fs': fs} return fs, filename, {"fs": fs}
def get_services_for_writer(self, tmpdir): def get_services_for_writer(self, tmpdir):
fs, filename = open_fs(tmpdir), 'output.' + self.extension fs, filename = open_fs(tmpdir), "output." + self.extension
return fs, filename, {'fs': fs} return fs, filename, {"fs": fs}
class QueueList(list): class QueueList(list):
@ -60,7 +60,7 @@ class BufferingContext:
return self.buffer return self.buffer
def get_buffer_args_as_dicts(self): def get_buffer_args_as_dicts(self):
return [row._asdict() if hasattr(row, '_asdict') else dict(row) for row in self.buffer] return [row._asdict() if hasattr(row, "_asdict") else dict(row) for row in self.buffer]
class BufferingNodeExecutionContext(BufferingContext, NodeExecutionContext): class BufferingNodeExecutionContext(BufferingContext, NodeExecutionContext):
@ -106,43 +106,37 @@ def runner_entrypoint(args):
@runner @runner
def runner_module(args): def runner_module(args):
""" Run bonobo using the bonobo.__main__ file, which is equivalent as doing "python -m bonobo ...".""" """ Run bonobo using the bonobo.__main__ file, which is equivalent as doing "python -m bonobo ..."."""
with patch.object(sys, 'argv', ['bonobo', *args]): with patch.object(sys, "argv", ["bonobo", *args]):
return runpy.run_path(__main__.__file__, run_name='__main__') return runpy.run_path(__main__.__file__, run_name="__main__")
all_runners = pytest.mark.parametrize('runner', [runner_entrypoint, runner_module]) all_runners = pytest.mark.parametrize("runner", [runner_entrypoint, runner_module])
all_environ_targets = pytest.mark.parametrize( all_environ_targets = pytest.mark.parametrize(
'target', [ "target", [(get_examples_path("environ.py"),), ("-m", "bonobo.examples.environ")]
(get_examples_path('environ.py'), ),
(
'-m',
'bonobo.examples.environ',
),
]
) )
@all_runners @all_runners
@all_environ_targets @all_environ_targets
class EnvironmentTestCase(): class EnvironmentTestCase:
def run_quiet(self, runner, *args): def run_quiet(self, runner, *args):
return runner('run', '--quiet', *args) return runner("run", "--quiet", *args)
def run_environ(self, runner, *args, environ=None): def run_environ(self, runner, *args, environ=None):
_environ = {'PATH': '/usr/bin'} _environ = {"PATH": "/usr/bin"}
if environ: if environ:
_environ.update(environ) _environ.update(environ)
with patch.dict('os.environ', _environ, clear=True): with patch.dict("os.environ", _environ, clear=True):
out, err = self.run_quiet(runner, *args) out, err = self.run_quiet(runner, *args)
assert 'SECRET' not in os.environ assert "SECRET" not in os.environ
assert 'PASSWORD' not in os.environ assert "PASSWORD" not in os.environ
if 'PATH' in _environ: if "PATH" in _environ:
assert 'PATH' in os.environ assert "PATH" in os.environ
assert os.environ['PATH'] == _environ['PATH'] assert os.environ["PATH"] == _environ["PATH"]
assert err == '' assert err == ""
return dict(map(lambda line: line.split(' ', 1), filter(None, out.split('\n')))) return dict(map(lambda line: line.split(" ", 1), filter(None, out.split("\n"))))
class StaticNodeTest: class StaticNodeTest:
@ -202,8 +196,8 @@ class ReaderTest(ConfigurableNodeTest):
ReaderNodeType = None ReaderNodeType = None
extension = 'txt' extension = "txt"
input_data = '' input_data = ""
@property @property
def NodeType(self): def NodeType(self):
@ -216,12 +210,12 @@ class ReaderTest(ConfigurableNodeTest):
self.tmpdir = tmpdir self.tmpdir = tmpdir
def get_create_args(self, *args): def get_create_args(self, *args):
return (self.filename, ) + args return (self.filename,) + args
def test_customizable_output_type_transform_not_a_type(self): def test_customizable_output_type_transform_not_a_type(self):
context = self.NodeExecutionContextType( context = self.NodeExecutionContextType(
self.create(*self.get_create_args(), output_type=str.upper, **self.get_create_kwargs()), self.create(*self.get_create_args(), output_type=str.upper, **self.get_create_kwargs()),
services=self.services services=self.services,
) )
with pytest.raises(TypeError): with pytest.raises(TypeError):
context.start() context.start()
@ -229,9 +223,9 @@ class ReaderTest(ConfigurableNodeTest):
def test_customizable_output_type_transform_not_a_tuple(self): def test_customizable_output_type_transform_not_a_tuple(self):
context = self.NodeExecutionContextType( context = self.NodeExecutionContextType(
self.create( self.create(
*self.get_create_args(), output_type=type('UpperString', (str, ), {}), **self.get_create_kwargs() *self.get_create_args(), output_type=type("UpperString", (str,), {}), **self.get_create_kwargs()
), ),
services=self.services services=self.services,
) )
with pytest.raises(TypeError): with pytest.raises(TypeError):
context.start() context.start()
@ -242,8 +236,8 @@ class WriterTest(ConfigurableNodeTest):
WriterNodeType = None WriterNodeType = None
extension = 'txt' extension = "txt"
input_data = '' input_data = ""
@property @property
def NodeType(self): def NodeType(self):
@ -256,7 +250,7 @@ class WriterTest(ConfigurableNodeTest):
self.tmpdir = tmpdir self.tmpdir = tmpdir
def get_create_args(self, *args): def get_create_args(self, *args):
return (self.filename, ) + args return (self.filename,) + args
def readlines(self): def readlines(self):
with self.fs.open(self.filename) as fp: with self.fs.open(self.filename) as fp:

View File

@ -6,20 +6,14 @@ from bonobo.util.testing import all_runners
def test_entrypoint(): def test_entrypoint():
commands = {} commands = {}
for command in pkg_resources.iter_entry_points('bonobo.commands'): for command in pkg_resources.iter_entry_points("bonobo.commands"):
commands[command.name] = command commands[command.name] = command
assert not { assert not {"convert", "init", "inspect", "run", "version"}.difference(set(commands))
'convert',
'init',
'inspect',
'run',
'version',
}.difference(set(commands))
@all_runners @all_runners
def test_no_command(runner): def test_no_command(runner):
_, err, exc = runner(catch_errors=True) _, err, exc = runner(catch_errors=True)
assert type(exc) == SystemExit assert type(exc) == SystemExit
assert 'error: the following arguments are required: command' in err assert "error: the following arguments are required: command" in err

View File

@ -8,10 +8,10 @@ from bonobo.util.testing import all_runners
@all_runners @all_runners
def test_convert(runner, tmpdir): def test_convert(runner, tmpdir):
csv_content = 'id;name\n1;Romain' csv_content = "id;name\n1;Romain"
tmpdir.join('in.csv').write(csv_content) tmpdir.join("in.csv").write(csv_content)
with change_working_directory(tmpdir): with change_working_directory(tmpdir):
runner('convert', 'in.csv', 'out.csv') runner("convert", "in.csv", "out.csv")
assert tmpdir.join('out.csv').read().strip() == csv_content assert tmpdir.join("out.csv").read().strip() == csv_content

View File

@ -9,7 +9,7 @@ from bonobo.util.testing import all_runners
@all_runners @all_runners
def test_download_works_for_examples(runner): def test_download_works_for_examples(runner):
expected_bytes = b'hello world' expected_bytes = b"hello world"
class MockResponse(object): class MockResponse(object):
def __init__(self): def __init__(self):
@ -27,12 +27,13 @@ def test_download_works_for_examples(runner):
fout = io.BytesIO() fout = io.BytesIO()
fout.close = lambda: None fout.close = lambda: None
with patch('bonobo.commands.download._open_url') as mock_open_url, \ with patch("bonobo.commands.download._open_url") as mock_open_url, patch(
patch('bonobo.commands.download.open') as mock_open: "bonobo.commands.download.open"
) as mock_open:
mock_open_url.return_value = MockResponse() mock_open_url.return_value = MockResponse()
mock_open.return_value = fout mock_open.return_value = fout
runner('download', 'examples/datasets/coffeeshops.txt') runner("download", "examples/datasets/coffeeshops.txt")
expected_url = EXAMPLES_BASE_URL + 'datasets/coffeeshops.txt' expected_url = EXAMPLES_BASE_URL + "datasets/coffeeshops.txt"
mock_open_url.assert_called_once_with(expected_url) mock_open_url.assert_called_once_with(expected_url)
assert fout.getvalue() == expected_bytes assert fout.getvalue() == expected_bytes
@ -41,4 +42,4 @@ def test_download_works_for_examples(runner):
@all_runners @all_runners
def test_download_fails_non_example(runner): def test_download_fails_non_example(runner):
with pytest.raises(ValueError): with pytest.raises(ValueError):
runner('download', 'something/entirely/different.txt') runner("download", "something/entirely/different.txt")

View File

@ -8,22 +8,22 @@ from bonobo.util.testing import all_runners
@all_runners @all_runners
def test_init_file(runner, tmpdir): def test_init_file(runner, tmpdir):
target = tmpdir.join('foo.py') target = tmpdir.join("foo.py")
target_filename = str(target) target_filename = str(target)
runner('init', target_filename) runner("init", target_filename)
assert os.path.exists(target_filename) assert os.path.exists(target_filename)
out, err = runner('run', target_filename) out, err = runner("run", target_filename)
assert out.replace('\n', ' ').strip() == 'Hello World' assert out.replace("\n", " ").strip() == "Hello World"
assert not err assert not err
@all_runners @all_runners
@pytest.mark.parametrize('template', InitCommand.TEMPLATES) @pytest.mark.parametrize("template", InitCommand.TEMPLATES)
def test_init_file_templates(runner, template, tmpdir): def test_init_file_templates(runner, template, tmpdir):
target = tmpdir.join('foo.py') target = tmpdir.join("foo.py")
target_filename = str(target) target_filename = str(target)
runner('init', target_filename) runner("init", target_filename)
assert os.path.exists(target_filename) assert os.path.exists(target_filename)
out, err = runner('run', target_filename) out, err = runner("run", target_filename)
assert not err assert not err

View File

@ -7,42 +7,42 @@ from bonobo.util.testing import all_runners
@all_runners @all_runners
def test_run(runner): def test_run(runner):
out, err = runner('run', '--quiet', get_examples_path('types/strings.py')) out, err = runner("run", "--quiet", get_examples_path("types/strings.py"))
out = out.split('\n') out = out.split("\n")
assert out[0].startswith('Foo ') assert out[0].startswith("Foo ")
assert out[1].startswith('Bar ') assert out[1].startswith("Bar ")
assert out[2].startswith('Baz ') assert out[2].startswith("Baz ")
@all_runners @all_runners
def test_run_module(runner): def test_run_module(runner):
out, err = runner('run', '--quiet', '-m', 'bonobo.examples.types.strings') out, err = runner("run", "--quiet", "-m", "bonobo.examples.types.strings")
out = out.split('\n') out = out.split("\n")
assert out[0].startswith('Foo ') assert out[0].startswith("Foo ")
assert out[1].startswith('Bar ') assert out[1].startswith("Bar ")
assert out[2].startswith('Baz ') assert out[2].startswith("Baz ")
@all_runners @all_runners
def test_run_path(runner): def test_run_path(runner):
out, err = runner('run', '--quiet', get_examples_path('types')) out, err = runner("run", "--quiet", get_examples_path("types"))
out = out.split('\n') out = out.split("\n")
assert out[0].startswith('Foo ') assert out[0].startswith("Foo ")
assert out[1].startswith('Bar ') assert out[1].startswith("Bar ")
assert out[2].startswith('Baz ') assert out[2].startswith("Baz ")
@all_runners @all_runners
def test_install_requirements_for_dir(runner): def test_install_requirements_for_dir(runner):
dirname = get_examples_path('types') dirname = get_examples_path("types")
with patch('bonobo.commands.run._install_requirements') as install_mock: with patch("bonobo.commands.run._install_requirements") as install_mock:
runner('run', '--install', dirname) runner("run", "--install", dirname)
install_mock.assert_called_once_with(os.path.join(dirname, 'requirements.txt')) install_mock.assert_called_once_with(os.path.join(dirname, "requirements.txt"))
@all_runners @all_runners
def test_install_requirements_for_file(runner): def test_install_requirements_for_file(runner):
dirname = get_examples_path('types') dirname = get_examples_path("types")
with patch('bonobo.commands.run._install_requirements') as install_mock: with patch("bonobo.commands.run._install_requirements") as install_mock:
runner('run', '--install', os.path.join(dirname, 'strings.py')) runner("run", "--install", os.path.join(dirname, "strings.py"))
install_mock.assert_called_once_with(os.path.join(dirname, 'requirements.txt')) install_mock.assert_called_once_with(os.path.join(dirname, "requirements.txt"))

View File

@ -5,103 +5,104 @@ from bonobo.util.testing import EnvironmentTestCase
@pytest.fixture @pytest.fixture
def env1(tmpdir): def env1(tmpdir):
env_file = tmpdir.join('.env_one') env_file = tmpdir.join(".env_one")
env_file.write('\n'.join(( env_file.write("\n".join(("SECRET=unknown", "PASSWORD=sweet", "PATH=first")))
'SECRET=unknown',
'PASSWORD=sweet',
'PATH=first',
)))
return str(env_file) return str(env_file)
@pytest.fixture @pytest.fixture
def env2(tmpdir): def env2(tmpdir):
env_file = tmpdir.join('.env_two') env_file = tmpdir.join(".env_two")
env_file.write('\n'.join(( env_file.write("\n".join(("PASSWORD=bitter", "PATH='second'")))
'PASSWORD=bitter',
"PATH='second'",
)))
return str(env_file) return str(env_file)
class TestDefaultEnvFile(EnvironmentTestCase): class TestDefaultEnvFile(EnvironmentTestCase):
def test_run_with_default_env_file(self, runner, target, env1): def test_run_with_default_env_file(self, runner, target, env1):
env = self.run_environ(runner, *target, '--default-env-file', env1) env = self.run_environ(runner, *target, "--default-env-file", env1)
assert env.get('SECRET') == 'unknown' assert env.get("SECRET") == "unknown"
assert env.get('PASSWORD') == 'sweet' assert env.get("PASSWORD") == "sweet"
assert env.get('PATH') == '/usr/bin' assert env.get("PATH") == "/usr/bin"
def test_run_with_multiple_default_env_files(self, runner, target, env1, env2): def test_run_with_multiple_default_env_files(self, runner, target, env1, env2):
env = self.run_environ(runner, *target, '--default-env-file', env1, '--default-env-file', env2) env = self.run_environ(runner, *target, "--default-env-file", env1, "--default-env-file", env2)
assert env.get('SECRET') == 'unknown' assert env.get("SECRET") == "unknown"
assert env.get('PASSWORD') == 'sweet' assert env.get("PASSWORD") == "sweet"
assert env.get('PATH') == '/usr/bin' assert env.get("PATH") == "/usr/bin"
env = self.run_environ(runner, *target, '--default-env-file', env2, '--default-env-file', env1) env = self.run_environ(runner, *target, "--default-env-file", env2, "--default-env-file", env1)
assert env.get('SECRET') == 'unknown' assert env.get("SECRET") == "unknown"
assert env.get('PASSWORD') == 'bitter' assert env.get("PASSWORD") == "bitter"
assert env.get('PATH') == '/usr/bin' assert env.get("PATH") == "/usr/bin"
class TestEnvFile(EnvironmentTestCase): class TestEnvFile(EnvironmentTestCase):
def test_run_with_file(self, runner, target, env1): def test_run_with_file(self, runner, target, env1):
env = self.run_environ(runner, *target, '--env-file', env1) env = self.run_environ(runner, *target, "--env-file", env1)
assert env.get('SECRET') == 'unknown' assert env.get("SECRET") == "unknown"
assert env.get('PASSWORD') == 'sweet' assert env.get("PASSWORD") == "sweet"
assert env.get('PATH') == 'first' assert env.get("PATH") == "first"
def test_run_with_multiple_files(self, runner, target, env1, env2): def test_run_with_multiple_files(self, runner, target, env1, env2):
env = self.run_environ(runner, *target, '--env-file', env1, '--env-file', env2) env = self.run_environ(runner, *target, "--env-file", env1, "--env-file", env2)
assert env.get('SECRET') == 'unknown' assert env.get("SECRET") == "unknown"
assert env.get('PASSWORD') == 'bitter' assert env.get("PASSWORD") == "bitter"
assert env.get('PATH') == 'second' assert env.get("PATH") == "second"
env = self.run_environ(runner, *target, '--env-file', env2, '--env-file', env1) env = self.run_environ(runner, *target, "--env-file", env2, "--env-file", env1)
assert env.get('SECRET') == 'unknown' assert env.get("SECRET") == "unknown"
assert env.get('PASSWORD') == 'sweet' assert env.get("PASSWORD") == "sweet"
assert env.get('PATH') == 'first' assert env.get("PATH") == "first"
class TestEnvFileCombinations(EnvironmentTestCase): class TestEnvFileCombinations(EnvironmentTestCase):
def test_run_with_both_env_files(self, runner, target, env1, env2): def test_run_with_both_env_files(self, runner, target, env1, env2):
env = self.run_environ(runner, *target, '--default-env-file', env1, '--env-file', env2) env = self.run_environ(runner, *target, "--default-env-file", env1, "--env-file", env2)
assert env.get('SECRET') == 'unknown' assert env.get("SECRET") == "unknown"
assert env.get('PASSWORD') == 'bitter' assert env.get("PASSWORD") == "bitter"
assert env.get('PATH') == 'second' assert env.get("PATH") == "second"
def test_run_with_both_env_files_then_overrides(self, runner, target, env1, env2): def test_run_with_both_env_files_then_overrides(self, runner, target, env1, env2):
env = self.run_environ( env = self.run_environ(
runner, *target, '--default-env-file', env1, '--env-file', env2, '--env', 'PASSWORD=mine', '--env', runner,
'SECRET=s3cr3t' *target,
"--default-env-file",
env1,
"--env-file",
env2,
"--env",
"PASSWORD=mine",
"--env",
"SECRET=s3cr3t"
) )
assert env.get('SECRET') == 's3cr3t' assert env.get("SECRET") == "s3cr3t"
assert env.get('PASSWORD') == 'mine' assert env.get("PASSWORD") == "mine"
assert env.get('PATH') == 'second' assert env.get("PATH") == "second"
class TestEnvVars(EnvironmentTestCase): class TestEnvVars(EnvironmentTestCase):
def test_run_no_env(self, runner, target): def test_run_no_env(self, runner, target):
env = self.run_environ(runner, *target, environ={'USER': 'romain'}) env = self.run_environ(runner, *target, environ={"USER": "romain"})
assert env.get('USER') == 'romain' assert env.get("USER") == "romain"
def test_run_env(self, runner, target): def test_run_env(self, runner, target):
env = self.run_environ(runner, *target, '--env', 'USER=serious', environ={'USER': 'romain'}) env = self.run_environ(runner, *target, "--env", "USER=serious", environ={"USER": "romain"})
assert env.get('USER') == 'serious' assert env.get("USER") == "serious"
def test_run_env_mixed(self, runner, target): def test_run_env_mixed(self, runner, target):
env = self.run_environ(runner, *target, '--env', 'ONE=1', '--env', 'TWO="2"', environ={'USER': 'romain'}) env = self.run_environ(runner, *target, "--env", "ONE=1", "--env", 'TWO="2"', environ={"USER": "romain"})
assert env.get('USER') == 'romain' assert env.get("USER") == "romain"
assert env.get('ONE') == '1' assert env.get("ONE") == "1"
assert env.get('TWO') == '2' assert env.get("TWO") == "2"
def test_run_default_env(self, runner, target): def test_run_default_env(self, runner, target):
env = self.run_environ(runner, *target, '--default-env', 'USER=clown') env = self.run_environ(runner, *target, "--default-env", "USER=clown")
assert env.get('USER') == 'clown' assert env.get("USER") == "clown"
env = self.run_environ(runner, *target, '--default-env', 'USER=clown', environ={'USER': 'romain'}) env = self.run_environ(runner, *target, "--default-env", "USER=clown", environ={"USER": "romain"})
assert env.get('USER') == 'romain' assert env.get("USER") == "romain"
env = self.run_environ( env = self.run_environ(
runner, *target, '--env', 'USER=serious', '--default-env', 'USER=clown', environ={'USER': 'romain'} runner, *target, "--env", "USER=serious", "--default-env", "USER=clown", environ={"USER": "romain"}
) )
assert env.get('USER') == 'serious' assert env.get("USER") == "serious"

View File

@ -4,17 +4,17 @@ from bonobo.util.testing import all_runners
@all_runners @all_runners
def test_version(runner): def test_version(runner):
out, err = runner('version') out, err = runner("version")
out = out.strip() out = out.strip()
assert out.startswith('bonobo ') assert out.startswith("bonobo ")
assert __version__ in out assert __version__ in out
out, err = runner('version', '-q') out, err = runner("version", "-q")
out = out.strip() out = out.strip()
assert out.startswith('bonobo ') assert out.startswith("bonobo ")
assert __version__ in out assert __version__ in out
out, err = runner('version', '-qq') out, err = runner("version", "-qq")
out = out.strip() out = out.strip()
assert not out.startswith('bonobo ') assert not out.startswith("bonobo ")
assert __version__ in out assert __version__ in out

View File

@ -11,7 +11,7 @@ class NoOptConfigurable(Configurable):
class MyConfigurable(Configurable): class MyConfigurable(Configurable):
required_str = Option(str) required_str = Option(str)
default_str = Option(str, default='foo') default_str = Option(str, default="foo")
integer = Option(int, required=False) integer = Option(int, required=False)
@ -20,7 +20,7 @@ class MyHarderConfigurable(MyConfigurable):
class MyBetterConfigurable(MyConfigurable): class MyBetterConfigurable(MyConfigurable):
required_str = Option(str, required=False, default='kaboom') required_str = Option(str, required=False, default="kaboom")
class MyConfigurableUsingPositionalOptions(MyConfigurable): class MyConfigurableUsingPositionalOptions(MyConfigurable):
@ -35,7 +35,7 @@ def test_missing_required_option_error():
with pytest.raises(TypeError) as exc: with pytest.raises(TypeError) as exc:
MyConfigurable(_final=True) MyConfigurable(_final=True)
assert exc.match('missing 1 required option:') assert exc.match("missing 1 required option:")
def test_missing_required_options_error(): def test_missing_required_options_error():
@ -44,29 +44,29 @@ def test_missing_required_options_error():
with pytest.raises(TypeError) as exc: with pytest.raises(TypeError) as exc:
MyHarderConfigurable(_final=True) MyHarderConfigurable(_final=True)
assert exc.match('missing 2 required options:') assert exc.match("missing 2 required options:")
def test_extraneous_option_error(): def test_extraneous_option_error():
with pytest.raises(TypeError) as exc: with pytest.raises(TypeError) as exc:
MyConfigurable(required_str='foo', hello='world') MyConfigurable(required_str="foo", hello="world")
assert exc.match('got 1 unexpected option:') assert exc.match("got 1 unexpected option:")
def test_extraneous_options_error(): def test_extraneous_options_error():
with pytest.raises(TypeError) as exc: with pytest.raises(TypeError) as exc:
MyConfigurable(required_str='foo', hello='world', acme='corp') MyConfigurable(required_str="foo", hello="world", acme="corp")
assert exc.match('got 2 unexpected options:') assert exc.match("got 2 unexpected options:")
def test_defaults(): def test_defaults():
o = MyConfigurable(required_str='hello') o = MyConfigurable(required_str="hello")
with inspect_node(o) as ni: with inspect_node(o) as ni:
assert not ni.partial assert not ni.partial
assert o.required_str == 'hello' assert o.required_str == "hello"
assert o.default_str == 'foo' assert o.default_str == "foo"
assert o.integer is None assert o.integer is None
@ -76,30 +76,30 @@ def test_str_type_factory():
with inspect_node(o) as ni: with inspect_node(o) as ni:
assert not ni.partial assert not ni.partial
assert o.required_str == '42' assert o.required_str == "42"
assert o.default_str == 'foo' assert o.default_str == "foo"
assert o.integer is None assert o.integer is None
def test_int_type_factory(): def test_int_type_factory():
o = MyConfigurable(required_str='yo', default_str='bar', integer='42') o = MyConfigurable(required_str="yo", default_str="bar", integer="42")
with inspect_node(o) as ni: with inspect_node(o) as ni:
assert not ni.partial assert not ni.partial
assert o.required_str == 'yo' assert o.required_str == "yo"
assert o.default_str == 'bar' assert o.default_str == "bar"
assert o.integer == 42 assert o.integer == 42
def test_bool_type_factory(): def test_bool_type_factory():
o = MyHarderConfigurable(required_str='yes', also_required='True') o = MyHarderConfigurable(required_str="yes", also_required="True")
with inspect_node(o) as ni: with inspect_node(o) as ni:
assert not ni.partial assert not ni.partial
assert o.required_str == 'yes' assert o.required_str == "yes"
assert o.default_str == 'foo' assert o.default_str == "foo"
assert o.integer is None assert o.integer is None
assert o.also_required is True assert o.also_required is True
@ -110,22 +110,22 @@ def test_option_resolution_order():
with inspect_node(o) as ni: with inspect_node(o) as ni:
assert not ni.partial assert not ni.partial
assert o.required_str == 'kaboom' assert o.required_str == "kaboom"
assert o.default_str == 'foo' assert o.default_str == "foo"
assert o.integer is None assert o.integer is None
def test_option_positional(): def test_option_positional():
o = MyConfigurableUsingPositionalOptions('1', '2', '3', required_str='hello') o = MyConfigurableUsingPositionalOptions("1", "2", "3", required_str="hello")
with inspect_node(o) as ni: with inspect_node(o) as ni:
assert not ni.partial assert not ni.partial
assert o.first == '1' assert o.first == "1"
assert o.second == '2' assert o.second == "2"
assert o.third == '3' assert o.third == "3"
assert o.required_str == 'hello' assert o.required_str == "hello"
assert o.default_str == 'foo' assert o.default_str == "foo"
assert o.integer is None assert o.integer is None

View File

@ -50,10 +50,7 @@ def test_define_with_decorator():
calls = [] calls = []
def my_handler(*args, **kwargs): def my_handler(*args, **kwargs):
calls.append(( calls.append((args, kwargs))
args,
kwargs,
))
Concrete = MethodBasedConfigurable(my_handler) Concrete = MethodBasedConfigurable(my_handler)
@ -64,7 +61,7 @@ def test_define_with_decorator():
assert ci.type == MethodBasedConfigurable assert ci.type == MethodBasedConfigurable
assert ci.partial assert ci.partial
t = Concrete('foo', bar='baz') t = Concrete("foo", bar="baz")
assert callable(t.handler) assert callable(t.handler)
assert len(calls) == 0 assert len(calls) == 0
@ -75,15 +72,12 @@ def test_define_with_decorator():
def test_late_binding_method_decoration(): def test_late_binding_method_decoration():
calls = [] calls = []
@MethodBasedConfigurable(foo='foo') @MethodBasedConfigurable(foo="foo")
def Concrete(*args, **kwargs): def Concrete(*args, **kwargs):
calls.append(( calls.append((args, kwargs))
args,
kwargs,
))
assert callable(Concrete.handler) assert callable(Concrete.handler)
t = Concrete(bar='baz') t = Concrete(bar="baz")
assert callable(t.handler) assert callable(t.handler)
assert len(calls) == 0 assert len(calls) == 0
@ -95,12 +89,9 @@ def test_define_with_argument():
calls = [] calls = []
def concrete_handler(*args, **kwargs): def concrete_handler(*args, **kwargs):
calls.append(( calls.append((args, kwargs))
args,
kwargs,
))
t = MethodBasedConfigurable(concrete_handler, 'foo', bar='baz') t = MethodBasedConfigurable(concrete_handler, "foo", bar="baz")
assert callable(t.handler) assert callable(t.handler)
assert len(calls) == 0 assert len(calls) == 0
t() t()
@ -112,12 +103,9 @@ def test_define_with_inheritance():
class Inheriting(MethodBasedConfigurable): class Inheriting(MethodBasedConfigurable):
def handler(self, *args, **kwargs): def handler(self, *args, **kwargs):
calls.append(( calls.append((args, kwargs))
args,
kwargs,
))
t = Inheriting('foo', bar='baz') t = Inheriting("foo", bar="baz")
assert callable(t.handler) assert callable(t.handler)
assert len(calls) == 0 assert len(calls) == 0
t() t()
@ -132,13 +120,10 @@ def test_inheritance_then_decorate():
@Inheriting @Inheriting
def Concrete(*args, **kwargs): def Concrete(*args, **kwargs):
calls.append(( calls.append((args, kwargs))
args,
kwargs,
))
assert callable(Concrete.handler) assert callable(Concrete.handler)
t = Concrete('foo', bar='baz') t = Concrete("foo", bar="baz")
assert callable(t.handler) assert callable(t.handler)
assert len(calls) == 0 assert len(calls) == 0
t() t()

View File

@ -12,11 +12,11 @@ class Bobby(Configurable):
@ContextProcessor @ContextProcessor
def think(self, context): def think(self, context):
yield 'different' yield "different"
def __call__(self, think, *args, **kwargs): def __call__(self, think, *args, **kwargs):
self.handler('1', *args, **kwargs) self.handler("1", *args, **kwargs)
self.handler2('2', *args, **kwargs) self.handler2("2", *args, **kwargs)
def test_partial(): def test_partial():
@ -40,7 +40,7 @@ def test_partial():
assert len(ci.options) == 4 assert len(ci.options) == 4
assert len(ci.processors) == 1 assert len(ci.processors) == 1
assert ci.partial assert ci.partial
assert ci.partial[0] == (f1, ) assert ci.partial[0] == (f1,)
assert not len(ci.partial[1]) assert not len(ci.partial[1])
# instanciate a more complete partial instance ... # instanciate a more complete partial instance ...
@ -53,13 +53,10 @@ def test_partial():
assert len(ci.options) == 4 assert len(ci.options) == 4
assert len(ci.processors) == 1 assert len(ci.processors) == 1
assert ci.partial assert ci.partial
assert ci.partial[0] == ( assert ci.partial[0] == (f1, f2)
f1,
f2,
)
assert not len(ci.partial[1]) assert not len(ci.partial[1])
c = C('foo') c = C("foo")
with inspect_node(c) as ci: with inspect_node(c) as ci:
assert ci.type == Bobby assert ci.type == Bobby

View File

@ -1,7 +1,7 @@
from operator import attrgetter from operator import attrgetter
from bonobo.config import Configurable from bonobo.config import Configurable
from bonobo.config.processors import ContextProcessor, resolve_processors, ContextCurrifier, use_context_processor from bonobo.config.processors import ContextCurrifier, ContextProcessor, resolve_processors, use_context_processor
class CP1(Configurable): class CP1(Configurable):
@ -11,11 +11,11 @@ class CP1(Configurable):
@ContextProcessor @ContextProcessor
def a(self): def a(self):
yield 'this is A' yield "this is A"
@ContextProcessor @ContextProcessor
def b(self, a): def b(self, a):
yield a.upper()[:-1] + 'b' yield a.upper()[:-1] + "b"
def __call__(self, a, b): def __call__(self, a, b):
return a, b return a, b
@ -46,20 +46,20 @@ class CP3(CP2):
def get_all_processors_names(cls): def get_all_processors_names(cls):
return list(map(attrgetter('__name__'), resolve_processors(cls))) return list(map(attrgetter("__name__"), resolve_processors(cls)))
def test_inheritance_and_ordering(): def test_inheritance_and_ordering():
assert get_all_processors_names(CP1) == ['c', 'a', 'b'] assert get_all_processors_names(CP1) == ["c", "a", "b"]
assert get_all_processors_names(CP2) == ['c', 'a', 'b', 'f', 'e', 'd'] assert get_all_processors_names(CP2) == ["c", "a", "b", "f", "e", "d"]
assert get_all_processors_names(CP3) == ['c', 'a', 'b', 'f', 'e', 'd', 'c', 'b'] assert get_all_processors_names(CP3) == ["c", "a", "b", "f", "e", "d", "c", "b"]
def test_setup_teardown(): def test_setup_teardown():
o = CP1() o = CP1()
stack = ContextCurrifier(o) stack = ContextCurrifier(o)
stack.setup() stack.setup()
assert o(*stack.args) == ('this is A', 'THIS IS b') assert o(*stack.args) == ("this is A", "THIS IS b")
stack.teardown() stack.teardown()
@ -71,4 +71,4 @@ def test_processors_on_func():
def node(context): def node(context):
pass pass
assert get_all_processors_names(node) == ['cp'] assert get_all_processors_names(node) == ["cp"]

View File

@ -4,11 +4,11 @@ import time
import pytest import pytest
from bonobo.config import Configurable, Container, Exclusive, Service, use from bonobo.config import Configurable, Container, Exclusive, Service, use
from bonobo.config.services import validate_service_name, create_container from bonobo.config.services import create_container, validate_service_name
from bonobo.util import get_name from bonobo.util import get_name
class PrinterInterface(): class PrinterInterface:
def print(self, *args): def print(self, *args):
raise NotImplementedError() raise NotImplementedError()
@ -18,46 +18,43 @@ class ConcretePrinter(PrinterInterface):
self.prefix = prefix self.prefix = prefix
def print(self, *args): def print(self, *args):
return ';'.join((self.prefix, *args)) return ";".join((self.prefix, *args))
SERVICES = Container( SERVICES = Container(printer0=ConcretePrinter(prefix="0"), printer1=ConcretePrinter(prefix="1"))
printer0=ConcretePrinter(prefix='0'),
printer1=ConcretePrinter(prefix='1'),
)
class MyServiceDependantConfigurable(Configurable): class MyServiceDependantConfigurable(Configurable):
printer = Service(PrinterInterface, ) printer = Service(PrinterInterface)
def __call__(self, *args, printer: PrinterInterface): def __call__(self, *args, printer: PrinterInterface):
return printer.print(*args) return printer.print(*args)
def test_service_name_validator(): def test_service_name_validator():
assert validate_service_name('foo') == 'foo' assert validate_service_name("foo") == "foo"
assert validate_service_name('foo.bar') == 'foo.bar' assert validate_service_name("foo.bar") == "foo.bar"
assert validate_service_name('Foo') == 'Foo' assert validate_service_name("Foo") == "Foo"
assert validate_service_name('Foo.Bar') == 'Foo.Bar' assert validate_service_name("Foo.Bar") == "Foo.Bar"
assert validate_service_name('Foo.a0') == 'Foo.a0' assert validate_service_name("Foo.a0") == "Foo.a0"
with pytest.raises(ValueError): with pytest.raises(ValueError):
validate_service_name('foo.0') validate_service_name("foo.0")
with pytest.raises(ValueError): with pytest.raises(ValueError):
validate_service_name('0.foo') validate_service_name("0.foo")
def test_service_dependency(): def test_service_dependency():
o = MyServiceDependantConfigurable(printer='printer0') o = MyServiceDependantConfigurable(printer="printer0")
assert o('foo', 'bar', printer=SERVICES.get('printer0')) == '0;foo;bar' assert o("foo", "bar", printer=SERVICES.get("printer0")) == "0;foo;bar"
assert o('bar', 'baz', printer=SERVICES.get('printer1')) == '1;bar;baz' assert o("bar", "baz", printer=SERVICES.get("printer1")) == "1;bar;baz"
assert o('foo', 'bar', **SERVICES.kwargs_for(o)) == '0;foo;bar' assert o("foo", "bar", **SERVICES.kwargs_for(o)) == "0;foo;bar"
def test_service_dependency_unavailable(): def test_service_dependency_unavailable():
o = MyServiceDependantConfigurable(printer='printer2') o = MyServiceDependantConfigurable(printer="printer2")
with pytest.raises(KeyError): with pytest.raises(KeyError):
SERVICES.kwargs_for(o) SERVICES.kwargs_for(o)
@ -72,15 +69,15 @@ class VCR:
def test_exclusive(): def test_exclusive():
vcr = VCR() vcr = VCR()
vcr.append('hello') vcr.append("hello")
def record(prefix, vcr=vcr): def record(prefix, vcr=vcr):
with Exclusive(vcr): with Exclusive(vcr):
for i in range(5): for i in range(5):
vcr.append(' '.join((prefix, str(i)))) vcr.append(" ".join((prefix, str(i))))
time.sleep(0.05) time.sleep(0.05)
threads = [threading.Thread(target=record, args=(str(i), )) for i in range(5)] threads = [threading.Thread(target=record, args=(str(i),)) for i in range(5)]
for thread in threads: for thread in threads:
thread.start() thread.start()
@ -90,8 +87,32 @@ def test_exclusive():
thread.join() thread.join()
assert vcr.tape == [ assert vcr.tape == [
'hello', '0 0', '0 1', '0 2', '0 3', '0 4', '1 0', '1 1', '1 2', '1 3', '1 4', '2 0', '2 1', '2 2', '2 3', "hello",
'2 4', '3 0', '3 1', '3 2', '3 3', '3 4', '4 0', '4 1', '4 2', '4 3', '4 4' "0 0",
"0 1",
"0 2",
"0 3",
"0 4",
"1 0",
"1 1",
"1 2",
"1 3",
"1 4",
"2 0",
"2 1",
"2 2",
"2 3",
"2 4",
"3 0",
"3 1",
"3 2",
"3 3",
"3 4",
"4 0",
"4 1",
"4 2",
"4 3",
"4 4",
] ]
@ -100,28 +121,25 @@ def test_requires():
services = Container(output=vcr.append) services = Container(output=vcr.append)
@use('output') @use("output")
def append(out, x): def append(out, x):
out(x) out(x)
svcargs = services.kwargs_for(append) svcargs = services.kwargs_for(append)
assert len(svcargs) == 1 assert len(svcargs) == 1
assert svcargs['output'] == vcr.append assert svcargs["output"] == vcr.append
@pytest.mark.parametrize('services', [None, {}]) @pytest.mark.parametrize("services", [None, {}])
def test_create_container_empty_values(services): def test_create_container_empty_values(services):
c = create_container(services) c = create_container(services)
assert len(c) == 2 assert len(c) == 2
assert 'fs' in c and get_name(c['fs']) == 'OSFS' assert "fs" in c and get_name(c["fs"]) == "OSFS"
assert 'http' in c and get_name(c['http']) == 'requests' assert "http" in c and get_name(c["http"]) == "requests"
def test_create_container_override(): def test_create_container_override():
c = create_container({ c = create_container({"http": "http", "fs": "fs"})
'http': 'http',
'fs': 'fs',
})
assert len(c) == 2 assert len(c) == 2
assert 'fs' in c and c['fs'] == 'fs' assert "fs" in c and c["fs"] == "fs"
assert 'http' in c and c['http'] == 'http' assert "http" in c and c["http"] == "http"

View File

@ -5,22 +5,22 @@ from bonobo.config import use_raw_input
from bonobo.execution.contexts import GraphExecutionContext from bonobo.execution.contexts import GraphExecutionContext
from bonobo.util.bags import BagType from bonobo.util.bags import BagType
Extracted = namedtuple('Extracted', ['id', 'name', 'value']) Extracted = namedtuple("Extracted", ["id", "name", "value"])
ExtractedBT = BagType('ExtractedBT', ['id', 'name', 'value']) ExtractedBT = BagType("ExtractedBT", ["id", "name", "value"])
def extract_nt(): def extract_nt():
yield Extracted(id=1, name='Guido', value='.py') yield Extracted(id=1, name="Guido", value=".py")
yield Extracted(id=2, name='Larry', value='.pl') yield Extracted(id=2, name="Larry", value=".pl")
yield Extracted(id=3, name='Dennis', value='.c') yield Extracted(id=3, name="Dennis", value=".c")
yield Extracted(id=4, name='Yukihiro', value='.rb') yield Extracted(id=4, name="Yukihiro", value=".rb")
def extract_bt(): def extract_bt():
yield ExtractedBT(id=1, name='Guido', value='.py') yield ExtractedBT(id=1, name="Guido", value=".py")
yield ExtractedBT(id=2, name='Larry', value='.pl') yield ExtractedBT(id=2, name="Larry", value=".pl")
yield ExtractedBT(id=3, name='Dennis', value='.c') yield ExtractedBT(id=3, name="Dennis", value=".c")
yield ExtractedBT(id=4, name='Yukihiro', value='.rb') yield ExtractedBT(id=4, name="Yukihiro", value=".rb")
def transform_using_args(id, name, value): def transform_using_args(id, name, value):
@ -53,10 +53,18 @@ def test_execution():
with GraphExecutionContext(graph) as context: with GraphExecutionContext(graph) as context:
context.run_until_complete() context.run_until_complete()
assert result_args == [(2, 'Guido', 'guido.py'), (4, 'Larry', 'larry.pl'), (6, 'Dennis', 'dennis.c'), assert result_args == [
(8, 'Yukihiro', 'yukihiro.rb')] (2, "Guido", "guido.py"),
(4, "Larry", "larry.pl"),
(6, "Dennis", "dennis.c"),
(8, "Yukihiro", "yukihiro.rb"),
]
assert result_nt == [(1, 'GUIDO', '.py'), (2, 'LARRY', '.pl'), (3, 'DENNIS', '.c'), (4, 'YUKIHIRO', '.rb')] assert result_nt == [(1, "GUIDO", ".py"), (2, "LARRY", ".pl"), (3, "DENNIS", ".c"), (4, "YUKIHIRO", ".rb")]
assert result_bt == [(2, 'Guido', 'guido.py'), (4, 'Larry', 'larry.pl'), (6, 'Dennis', 'dennis.c'), assert result_bt == [
(8, 'Yukihiro', 'yukihiro.rb')] (2, "Guido", "guido.py"),
(4, "Larry", "larry.pl"),
(6, "Dennis", "dennis.c"),
(8, "Yukihiro", "yukihiro.rb"),
]

View File

@ -1,14 +1,14 @@
from bonobo import Graph from bonobo import Graph
from bonobo.constants import EMPTY, BEGIN, END from bonobo.constants import BEGIN, EMPTY, END
from bonobo.execution.contexts import GraphExecutionContext from bonobo.execution.contexts import GraphExecutionContext
def raise_an_error(*args, **kwargs): def raise_an_error(*args, **kwargs):
raise Exception('Careful, man, there\'s a beverage here!') raise Exception("Careful, man, there's a beverage here!")
def raise_an_unrecoverrable_error(*args, **kwargs): def raise_an_unrecoverrable_error(*args, **kwargs):
raise Exception('You are entering a world of pain!') raise Exception("You are entering a world of pain!")
def test_lifecycle_of_empty_graph(): def test_lifecycle_of_empty_graph():

View File

@ -6,131 +6,131 @@ from bonobo import Graph
from bonobo.constants import EMPTY from bonobo.constants import EMPTY
from bonobo.execution.contexts.node import NodeExecutionContext, split_token from bonobo.execution.contexts.node import NodeExecutionContext, split_token
from bonobo.execution.strategies import NaiveStrategy from bonobo.execution.strategies import NaiveStrategy
from bonobo.util.envelopes import F_NOT_MODIFIED, F_INHERIT from bonobo.util.envelopes import F_INHERIT, F_NOT_MODIFIED
from bonobo.util.testing import BufferingNodeExecutionContext, BufferingGraphExecutionContext from bonobo.util.testing import BufferingGraphExecutionContext, BufferingNodeExecutionContext
def test_node_string(): def test_node_string():
def f(): def f():
return 'foo' return "foo"
with BufferingNodeExecutionContext(f) as context: with BufferingNodeExecutionContext(f) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 1 assert len(output) == 1
assert output[0] == ('foo', ) assert output[0] == ("foo",)
def g(): def g():
yield 'foo' yield "foo"
yield 'bar' yield "bar"
with BufferingNodeExecutionContext(g) as context: with BufferingNodeExecutionContext(g) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 2 assert len(output) == 2
assert output[0] == ('foo', ) assert output[0] == ("foo",)
assert output[1] == ('bar', ) assert output[1] == ("bar",)
def test_node_bytes(): def test_node_bytes():
def f(): def f():
return b'foo' return b"foo"
with BufferingNodeExecutionContext(f) as context: with BufferingNodeExecutionContext(f) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 1 assert len(output) == 1
assert output[0] == (b'foo', ) assert output[0] == (b"foo",)
def g(): def g():
yield b'foo' yield b"foo"
yield b'bar' yield b"bar"
with BufferingNodeExecutionContext(g) as context: with BufferingNodeExecutionContext(g) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 2 assert len(output) == 2
assert output[0] == (b'foo', ) assert output[0] == (b"foo",)
assert output[1] == (b'bar', ) assert output[1] == (b"bar",)
def test_node_dict(): def test_node_dict():
def f(): def f():
return {'id': 1, 'name': 'foo'} return {"id": 1, "name": "foo"}
with BufferingNodeExecutionContext(f) as context: with BufferingNodeExecutionContext(f) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 1 assert len(output) == 1
assert output[0] == ({'id': 1, 'name': 'foo'}, ) assert output[0] == ({"id": 1, "name": "foo"},)
def g(): def g():
yield {'id': 1, 'name': 'foo'} yield {"id": 1, "name": "foo"}
yield {'id': 2, 'name': 'bar'} yield {"id": 2, "name": "bar"}
with BufferingNodeExecutionContext(g) as context: with BufferingNodeExecutionContext(g) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 2 assert len(output) == 2
assert output[0] == ({'id': 1, 'name': 'foo'}, ) assert output[0] == ({"id": 1, "name": "foo"},)
assert output[1] == ({'id': 2, 'name': 'bar'}, ) assert output[1] == ({"id": 2, "name": "bar"},)
def test_node_dict_chained(): def test_node_dict_chained():
strategy = NaiveStrategy(GraphExecutionContextType=BufferingGraphExecutionContext) strategy = NaiveStrategy(GraphExecutionContextType=BufferingGraphExecutionContext)
def f(): def f():
return {'id': 1, 'name': 'foo'} return {"id": 1, "name": "foo"}
def uppercase_name(values): def uppercase_name(values):
return {**values, 'name': values['name'].upper()} return {**values, "name": values["name"].upper()}
graph = Graph(f, uppercase_name) graph = Graph(f, uppercase_name)
context = strategy.execute(graph) context = strategy.execute(graph)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 1 assert len(output) == 1
assert output[0] == ({'id': 1, 'name': 'FOO'}, ) assert output[0] == ({"id": 1, "name": "FOO"},)
def g(): def g():
yield {'id': 1, 'name': 'foo'} yield {"id": 1, "name": "foo"}
yield {'id': 2, 'name': 'bar'} yield {"id": 2, "name": "bar"}
graph = Graph(g, uppercase_name) graph = Graph(g, uppercase_name)
context = strategy.execute(graph) context = strategy.execute(graph)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 2 assert len(output) == 2
assert output[0] == ({'id': 1, 'name': 'FOO'}, ) assert output[0] == ({"id": 1, "name": "FOO"},)
assert output[1] == ({'id': 2, 'name': 'BAR'}, ) assert output[1] == ({"id": 2, "name": "BAR"},)
def test_node_tuple(): def test_node_tuple():
def f(): def f():
return 'foo', 'bar' return "foo", "bar"
with BufferingNodeExecutionContext(f) as context: with BufferingNodeExecutionContext(f) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 1 assert len(output) == 1
assert output[0] == ('foo', 'bar') assert output[0] == ("foo", "bar")
def g(): def g():
yield 'foo', 'bar' yield "foo", "bar"
yield 'foo', 'baz' yield "foo", "baz"
with BufferingNodeExecutionContext(g) as context: with BufferingNodeExecutionContext(g) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 2 assert len(output) == 2
assert output[0] == ('foo', 'bar') assert output[0] == ("foo", "bar")
assert output[1] == ('foo', 'baz') assert output[1] == ("foo", "baz")
def test_node_tuple_chained(): def test_node_tuple_chained():
@ -140,50 +140,50 @@ def test_node_tuple_chained():
return tuple(map(str.upper, args)) return tuple(map(str.upper, args))
def f(): def f():
return 'foo', 'bar' return "foo", "bar"
graph = Graph(f, uppercase) graph = Graph(f, uppercase)
context = strategy.execute(graph) context = strategy.execute(graph)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 1 assert len(output) == 1
assert output[0] == ('FOO', 'BAR') assert output[0] == ("FOO", "BAR")
def g(): def g():
yield 'foo', 'bar' yield "foo", "bar"
yield 'foo', 'baz' yield "foo", "baz"
graph = Graph(g, uppercase) graph = Graph(g, uppercase)
context = strategy.execute(graph) context = strategy.execute(graph)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 2 assert len(output) == 2
assert output[0] == ('FOO', 'BAR') assert output[0] == ("FOO", "BAR")
assert output[1] == ('FOO', 'BAZ') assert output[1] == ("FOO", "BAZ")
def test_node_tuple_dict(): def test_node_tuple_dict():
def f(): def f():
return 'foo', 'bar', {'id': 1} return "foo", "bar", {"id": 1}
with BufferingNodeExecutionContext(f) as context: with BufferingNodeExecutionContext(f) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 1 assert len(output) == 1
assert output[0] == ('foo', 'bar', {'id': 1}) assert output[0] == ("foo", "bar", {"id": 1})
def g(): def g():
yield 'foo', 'bar', {'id': 1} yield "foo", "bar", {"id": 1}
yield 'foo', 'baz', {'id': 2} yield "foo", "baz", {"id": 2}
with BufferingNodeExecutionContext(g) as context: with BufferingNodeExecutionContext(g) as context:
context.write_sync(EMPTY) context.write_sync(EMPTY)
output = context.get_buffer() output = context.get_buffer()
assert len(output) == 2 assert len(output) == 2
assert output[0] == ('foo', 'bar', {'id': 1}) assert output[0] == ("foo", "bar", {"id": 1})
assert output[1] == ('foo', 'baz', {'id': 2}) assert output[1] == ("foo", "baz", {"id": 2})
def test_node_lifecycle_natural(): def test_node_lifecycle_natural():
@ -229,9 +229,9 @@ def test_node_lifecycle_with_kill():
def test_split_token(): def test_split_token():
with pytest.deprecated_call(): with pytest.deprecated_call():
assert split_token(('foo', 'bar')) == (set(), ('foo', 'bar')) assert split_token(("foo", "bar")) == (set(), ("foo", "bar"))
assert split_token(()) == (set(), ()) assert split_token(()) == (set(), ())
assert split_token('') == (set(), ('', )) assert split_token("") == (set(), ("",))
def test_split_token_duplicate(): def test_split_token_duplicate():
@ -247,17 +247,17 @@ def test_split_token_duplicate():
def test_split_token_not_modified(): def test_split_token_not_modified():
with pytest.deprecated_call(): with pytest.deprecated_call():
with pytest.raises(ValueError): with pytest.raises(ValueError):
split_token((F_NOT_MODIFIED, 'foo', 'bar')) split_token((F_NOT_MODIFIED, "foo", "bar"))
with pytest.raises(ValueError): with pytest.raises(ValueError):
split_token((F_NOT_MODIFIED, F_INHERIT)) split_token((F_NOT_MODIFIED, F_INHERIT))
with pytest.raises(ValueError): with pytest.raises(ValueError):
split_token((F_INHERIT, F_NOT_MODIFIED)) split_token((F_INHERIT, F_NOT_MODIFIED))
assert split_token(F_NOT_MODIFIED) == ({F_NOT_MODIFIED}, ()) assert split_token(F_NOT_MODIFIED) == ({F_NOT_MODIFIED}, ())
assert split_token((F_NOT_MODIFIED, )) == ({F_NOT_MODIFIED}, ()) assert split_token((F_NOT_MODIFIED,)) == ({F_NOT_MODIFIED}, ())
def test_split_token_inherit(): def test_split_token_inherit():
with pytest.deprecated_call(): with pytest.deprecated_call():
assert split_token(F_INHERIT) == ({F_INHERIT}, ()) assert split_token(F_INHERIT) == ({F_INHERIT}, ())
assert split_token((F_INHERIT, )) == ({F_INHERIT}, ()) assert split_token((F_INHERIT,)) == ({F_INHERIT}, ())
assert split_token((F_INHERIT, 'foo', 'bar')) == ({F_INHERIT}, ('foo', 'bar')) assert split_token((F_INHERIT, "foo", "bar")) == ({F_INHERIT}, ("foo", "bar"))

View File

@ -6,9 +6,9 @@ from bonobo.execution import events
def test_names(): def test_names():
# This test looks useless, but as it's becoming the pliugin API, I want to make sure that nothing changes here, or # This test looks useless, but as it's becoming the pliugin API, I want to make sure that nothing changes here, or
# notice it otherwise. # notice it otherwise.
for name in 'start', 'started', 'tick', 'stop', 'stopped', 'kill': for name in "start", "started", "tick", "stop", "stopped", "kill":
event_name = getattr(events, name.upper()) event_name = getattr(events, name.upper())
assert event_name == '.'.join(('execution', name)) assert event_name == ".".join(("execution", name))
def test_event_object(): def test_event_object():

View File

@ -14,16 +14,11 @@ class ResponseMock:
return {} return {}
else: else:
self.count += 1 self.count += 1
return { return {"records": self.json_value}
'records': self.json_value,
}
def test_read_from_opendatasoft_api(): def test_read_from_opendatasoft_api():
extract = OpenDataSoftAPI(dataset='test-a-set') extract = OpenDataSoftAPI(dataset="test-a-set")
with patch('requests.get', return_value=ResponseMock([ with patch("requests.get", return_value=ResponseMock([{"fields": {"foo": "bar"}}, {"fields": {"foo": "zab"}}])):
{'fields': {'foo': 'bar'}}, for line in extract("http://example.com/", ValueHolder(0)):
{'fields': {'foo': 'zab'}}, assert "foo" in line
])):
for line in extract('http://example.com/', ValueHolder(0)):
assert 'foo' in line

View File

@ -1,27 +1,24 @@
from bonobo.util.envelopes import AppendingEnvelope from bonobo.util.envelopes import AppendingEnvelope
from bonobo.util.testing import BufferingNodeExecutionContext from bonobo.util.testing import BufferingNodeExecutionContext
messages = [ messages = [("Hello",), ("Goodbye",)]
('Hello', ),
('Goodbye', ),
]
def append(*args): def append(*args):
return AppendingEnvelope('!') return AppendingEnvelope("!")
def test_inherit(): def test_inherit():
with BufferingNodeExecutionContext(append) as context: with BufferingNodeExecutionContext(append) as context:
context.write_sync(*messages) context.write_sync(*messages)
assert context.get_buffer() == list(map(lambda x: x + ('!', ), messages)) assert context.get_buffer() == list(map(lambda x: x + ("!",), messages))
def test_inherit_bag_tuple(): def test_inherit_bag_tuple():
with BufferingNodeExecutionContext(append) as context: with BufferingNodeExecutionContext(append) as context:
context.set_input_fields(['message']) context.set_input_fields(["message"])
context.write_sync(*messages) context.write_sync(*messages)
assert context.get_output_fields() == ('message', '0') assert context.get_output_fields() == ("message", "0")
assert context.get_buffer() == list(map(lambda x: x + ('!', ), messages)) assert context.get_buffer() == list(map(lambda x: x + ("!",), messages))

Some files were not shown because too many files have changed in this diff Show More