New bag implementation improves a lot how bonobo works, even if this is highly backward incompatible (sorry, that's needed, and better sooner than later). * New implementation uses the same approach as python's namedtuple, by dynamically creating the python type's code. This has drawbacks, as it feels like not the right way, but also a lot of benefits that cannot be achieved using a regular approach, especially the constructor parameter order, hardcoded. * Memory usage is now much more efficient. The "keys" memory space will be used only once per "io type", being spent in the underlying type definition instead of in the actual instances. * Transformations now needs to use tuples as output, which will be bound to its "output type". The output type can be infered from the tuple length, or explicitely set by the user using either `context.set_output_type(...)` or `context.set_output_fields(...)` (to build a bag type from a list of field names). Jupyter/Graphviz integration is more tight, allowing to easily display graphs in a notebook, or displaying the live transformation status in an html table instead of a simple <div>. For now, context processors were hacked to stay working as before but the current API is not satisfactory, and should be replaced. This new big change being unreasonable without some time to work on it properly, it is postponed for next versions (0.7, 0.8, ...). Maybe the best idea is to have some kind of "local services", that would use the same dependency injection mechanism as the execution-wide services. Services are now passed by keywoerd arguments only, to avoid confusion with data-arguments.
75 lines
1.5 KiB
Python
75 lines
1.5 KiB
Python
from operator import attrgetter
|
|
|
|
from bonobo.config import Configurable
|
|
from bonobo.config.processors import ContextProcessor, resolve_processors, ContextCurrifier, use_context_processor
|
|
|
|
|
|
class CP1(Configurable):
|
|
@ContextProcessor
|
|
def c(self):
|
|
yield
|
|
|
|
@ContextProcessor
|
|
def a(self):
|
|
yield 'this is A'
|
|
|
|
@ContextProcessor
|
|
def b(self, a):
|
|
yield a.upper()[:-1] + 'b'
|
|
|
|
def __call__(self, a, b):
|
|
return a, b
|
|
|
|
|
|
class CP2(CP1):
|
|
@ContextProcessor
|
|
def f(self):
|
|
pass
|
|
|
|
@ContextProcessor
|
|
def e(self):
|
|
pass
|
|
|
|
@ContextProcessor
|
|
def d(self):
|
|
pass
|
|
|
|
|
|
class CP3(CP2):
|
|
@ContextProcessor
|
|
def c(self):
|
|
pass
|
|
|
|
@ContextProcessor
|
|
def b(self):
|
|
pass
|
|
|
|
|
|
def get_all_processors_names(cls):
|
|
return list(map(attrgetter('__name__'), resolve_processors(cls)))
|
|
|
|
|
|
def test_inheritance_and_ordering():
|
|
assert get_all_processors_names(CP1) == ['c', 'a', 'b']
|
|
assert get_all_processors_names(CP2) == ['c', 'a', 'b', 'f', 'e', 'd']
|
|
assert get_all_processors_names(CP3) == ['c', 'a', 'b', 'f', 'e', 'd', 'c', 'b']
|
|
|
|
|
|
def test_setup_teardown():
|
|
o = CP1()
|
|
stack = ContextCurrifier(o)
|
|
stack.setup()
|
|
assert o(*stack.args) == ('this is A', 'THIS IS b')
|
|
stack.teardown()
|
|
|
|
|
|
def test_processors_on_func():
|
|
def cp(context):
|
|
yield context
|
|
|
|
@use_context_processor(cp)
|
|
def node(context):
|
|
pass
|
|
|
|
assert get_all_processors_names(node) == ['cp']
|