Python six 模块,viewitems() 实例源码

我们从Python开源项目中,提取了以下30个代码示例,用于说明如何使用six.viewitems()

项目:girder_worker    作者:girder    | 项目源码 | 文件源码
def _setup_input_pipes(input_pipes):
    """
    Given a mapping of input pipes, return a tuple with 2 elements. The first is
    a list of file descriptors to pass to ``select`` as writeable descriptors.
    The second is a dictionary mapping paths to existing named pipes to their
    adapters.
    """
    wds = []
    fifos = {}
    for pipe, adapter in six.viewitems(input_pipes):
        if isinstance(pipe, int):
            # This is assumed to be an open system-level file descriptor
            wds.append(pipe)
        else:
            if not os.path.exists(pipe):
                raise Exception('Input pipe does not exist: %s' % pipe)
            if not stat.S_ISFIFO(os.stat(pipe).st_mode):
                raise Exception('Input pipe must be a fifo object: %s' % pipe)
            fifos[pipe] = adapter

    return wds, fifos
项目:feagen    作者:ianlini    | 项目源码 | 文件源码
def get_data_keys_from_structure(structure):
    data_keys = []

    def _get_data_keys_from_structure(structure):
        if isinstance(structure, basestring):
            data_keys.append(structure)
        elif isinstance(structure, list):
            data_keys.extend(structure)
        elif isinstance(structure, dict):
            for _, val in six.viewitems(structure):
                _get_data_keys_from_structure(val)
        else:
            raise TypeError("The bundle structure only support "
                            "dict, list and str.")
    _get_data_keys_from_structure(structure)

    return data_keys
项目:feagen    作者:ianlini    | 项目源码 | 文件源码
def match_node(self, key):
        found_node = None
        for regex_key, node in six.viewitems(self._key_node_dict):
            match_object = re.match("(?:%s)\Z" % regex_key, key)
            if match_object is not None:
                if found_node is None:
                    found_node = node
                    found_regex_key = regex_key
                    found_match_object = match_object
                else:
                    raise ValueError("The data key '{}' matches multiple keys: "
                                     "'{}' for {} and '{}' for {}.".format(
                                         key, found_regex_key, found_node,
                                         regex_key, node))
        if found_node is None:
            raise KeyError(key)
        return found_regex_key, found_node, found_match_object
项目:striatum    作者:ntucllab    | 项目源码 | 文件源码
def reward(self, history_id, rewards):
        """Reward the previous action with reward.

        Parameters
        ----------
        history_id : int
            The history id of the action to reward.

        rewards : dictionary
            The dictionary {action_id, reward}, where reward is a float.
        """

        # Update the model
        model = self._model_storage.get_model()
        total_action_reward = model['total_action_reward']
        action_times = model['action_times']
        for action_id, reward in six.viewitems(rewards):
            total_action_reward[action_id] += reward
            action_times[action_id] += 1
            model['n_rounds'] += 1
        self._model_storage.save_model(model)
        # Update the history
        self._history_storage.add_reward(history_id, rewards)
项目:striatum    作者:ntucllab    | 项目源码 | 文件源码
def plot_avg_regret(policy):

    """Plot average regret with respect to time.

        Parameters
        ----------
        policy: bandit object
            The bandit algorithm you want to evaluate.
    """

    avg_reward = calculate_avg_reward(policy)
    points = sorted(six.viewitems(avg_reward), key=lambda x: x[0])
    x, y = zip(*points)
    plt.plot(x, [1 - reward for reward in y], 'r-', label="average regret")
    plt.xlabel('time')
    plt.ylabel('avg regret')
    plt.legend()
    plt.title("Average Regret with respect to Time")
项目:solr-zkutil    作者:bendemott    | 项目源码 | 文件源码
def get_async_result_paths_chilren_per_host(children):
    """
    Combines arguments, with returned lists to get a unique sequence of 
    full qualified Zookeeper paths, for all the children of the directories
    queried.

    Returns a simple sorted list of unique paths returned by ``get_async_call_per_host``
    when the call performed is ``get_children()``

    :param children: A structure as returned from ``get_async_call_per_host()``
    """
    paths = set()

    for parent_path, host_children in six.viewitems(children):
        for client_idx, child_paths in six.viewitems(host_children):
            if isinstance(child_paths, Exception):
                continue
            for child_path in child_paths:
                paths.add(znode_path_join([parent_path, child_path]))

    return sorted(paths)
项目:solr-zkutil    作者:bendemott    | 项目源码 | 文件源码
def check_queue_sizes(zk_client, threshold=5):
    """
    For the most part queues should be empty.  If they contain more than a given number of 
    entries, return information.

    :param threshold: ``int`` the max number of children a queue can contain before an error is raised.
    """

    errors = []
    stats = get_znode_children_counts(zk_client, ZK_QUEUE_PATHS)
    missing = set(stats.keys()) ^ set(ZK_QUEUE_PATHS)
    for path in missing:
        errors.append("queue path [%s] is missing" % path)

    if stats is None:
        raise ValueError("stats is None!!!")
    for path, max_children in six.viewitems(stats):
        if max_children > threshold:
            errors.append(
                "queue [%s] is backed up with: %d children, error threshold: %d" % (path, max_children, threshold)
            )

    return errors
项目:database_assetstore    作者:OpenGeoscience    | 项目源码 | 文件源码
def createDatabaseLink(self, file, params):
    dbs.clearDBConnectorCache(file['_id'])
    dbinfo = self.getBodyJson()
    if DB_INFO_KEY not in file:
        file[DB_INFO_KEY] = {}
    file[DB_INFO_KEY].update(six.viewitems(dbinfo))
    toDelete = [k for k, v in six.viewitems(file[DB_INFO_KEY]) if v is None]
    for key in toDelete:
        del file[DB_INFO_KEY][key]
    file['updated'] = datetime.datetime.utcnow()
    dbinfo = file[DB_INFO_KEY]
    return self.model('file').save(file)
项目:database_assetstore    作者:OpenGeoscience    | 项目源码 | 文件源码
def getEngine(uri, **kwargs):
    """
    Get a sqlalchemy engine from a pool in case we use the same parameters for
    multiple connections.
    """
    key = (uri, frozenset(six.viewitems(kwargs)))
    engine = _enginePool.get(key)
    if engine is None:
        engine = sqlalchemy.create_engine(uri, **kwargs)
        if len(_enginePool) >= _enginePoolMaxSize:
            _enginePoolMaxSize.clear()
        _enginePool[key] = engine
    return engine
项目:feagen    作者:ianlini    | 项目源码 | 文件源码
def _get_upstream_data(self, dag, node):
        data = {}
        for source, _, edge_attr in dag.in_edges_iter(node, data=True):
            source_attr = dag.node[source]
            source_handler = self._handlers[source_attr['handler']]
            formatted_key_data = source_handler.get(edge_attr['keys'])
            # change the key to template
            data.update({template_key: formatted_key_data[key]
                         for template_key, key in six.viewitems(
                             edge_attr['template_keys'])})
        return data
项目:feagen    作者:ianlini    | 项目源码 | 文件源码
def bundle(self, structure, data_bundle_hdf_path, buffer_size=int(1e+9),
               structure_config=None):
        if structure_config is None:
            structure_config = {}

        def _bundle_data(structure, structure_config, dset_name=""):
            if isinstance(structure, basestring) and dset_name != "":
                (self.get_handler(structure)
                 .bundle(structure, data_bundle_hdf_path, dset_name))
            elif isinstance(structure, list):
                if structure_config.get('concat', False):
                    self.fill_concat_data(data_bundle_hdf_path, dset_name,
                                          structure, buffer_size)
                else:
                    for data_key in structure:
                        (self.get_handler(data_key)
                         .bundle(data_key, data_bundle_hdf_path,
                                 dset_name + "/" + data_key))
            elif isinstance(structure, dict):
                for key, val in six.viewitems(structure):
                    _bundle_data(val, structure_config.get(key, {}),
                                 dset_name + "/" + key)
            else:
                raise TypeError("The bundle structure only support "
                                "dict, list and str (except the first layer).")

        if os.path.isfile(data_bundle_hdf_path):
            os.remove(data_bundle_hdf_path)
        with SimpleTimer("Bundling data"):
            _bundle_data(structure, structure_config)
项目:feagen    作者:ianlini    | 项目源码 | 文件源码
def _grow_ancestors(self, nx_digraph, root_node_key, successor_keys,
                        re_args={}):
        successor_keys = {k: k.format(**re_args) for k in successor_keys}
        # grow the graph using DFS
        for template_key, key in six.viewitems(successor_keys):
            regex_key, node, match_object = self.match_node(key)

            # for merging node, we use key as the 'key' in nx_digraph
            mode = self._node_mode_dict[node]
            if mode == 'full':
                node_key = (self._node_key_dict[node]['keys']
                            + self._node_key_dict[node]['re_escape_keys'])
            elif mode == 'one':
                node_key = key
            else:
                raise ValueError("Mode '%s' is not supported." % mode)

            re_args = match_object.groupdict()
            if node_key not in nx_digraph:
                attr = self._node_attr_dict[node].copy()
                attr.setdefault('__name__', node)
                attr['__re_args__'] = re_args
                nx_digraph.add_node(node_key, attr)
                self._grow_ancestors(nx_digraph, node_key,
                                     self._node_succesor_dict[node], re_args)
            if not nx_digraph.has_edge(root_node_key, node_key):
                nx_digraph.add_edge(root_node_key, node_key,
                                    keys=set(), template_keys={})
            edge_attr = nx_digraph[root_node_key][node_key]
            edge_attr['keys'].add(key)
            edge_attr['template_keys'].update(((template_key, key),))
项目:django-requestlogging    作者:tarkatronic    | 项目源码 | 文件源码
def bound_logger(self, request):
        loggers = self.middleware.find_loggers_with_filter(RequestFilter)
        for logger, filters in six.viewitems(loggers):
            if any(f.request == request for f in filters):
                return True
        return False
项目:django-requestlogging    作者:tarkatronic    | 项目源码 | 文件源码
def bound_handler(self, request):
        handlers = self.middleware.find_handlers_with_filter(RequestFilter)
        for handler, filters in six.viewitems(handlers):
            if any(f.request == request for f in filters):
                return True
        return False
项目:odoo-rpc-client    作者:katyukha    | 项目源码 | 文件源码
def get_ids_to_read(self, *fields):
        """ Return list of ids, that have no at least one of specified
            fields in cache

            For example::

                cache.get_ids_to_read('name', 'country_id', 'parent_id')

            This code will traverse all record ids managed by this cache,
            and find those that have no at least one field in cache.
            This is highly useful in prefetching
        """
        return [key for key, val in six.viewitems(self)
                if any(((field not in val) for field in fields))]
项目:inspire-query-parser    作者:inspirehep    | 项目源码 | 文件源码
def parametrize(test_configurations):
    """Custom parametrize method that accepts a more readable test conf. format.

    It accepts a dictionary whose keys are the test names (ids equivalent) and
    the value of each key is a dictionary of test configuration, in the form of
    { test_parameter1: x, test_parameter2: y}

    Example:
        {
            'Unicode tokens': {'query_str': '?-radiation', 'unrecognised_text': ''},
            'Simple token: {'query_str': 'foo', 'unrecognized_text': ''}
        }
    """
    if not test_configurations:
        __tracebackhide__ = True
        pytest.fail('In parametrize test configurations parameter cannot be empty.')

    if not isinstance(test_configurations, dict):
        __tracebackhide__ = True
        pytest.fail('In parametrize test configurations parameter must be a dictionary.')

    ordered_tests_config = OrderedDict(sorted(viewitems(test_configurations)))

    for test_name, test_configuration in iteritems(ordered_tests_config):
        ordered_tests_config[test_name] = OrderedDict(sorted(viewitems(test_configuration)))

    # Extract arg_names from a test configuration
    arg_names = list(iterkeys(next(itervalues(ordered_tests_config))))

    # Generate list of arg_values
    arg_values = [ordered_tests_config[test_config].values() for test_config in ordered_tests_config]

    # Generate ids list
    ids = list(iterkeys(ordered_tests_config))
    return pytest.mark.parametrize(argnames=arg_names, argvalues=arg_values, ids=ids)
项目:striatum    作者:ntucllab    | 项目源码 | 文件源码
def reward(self, history_id, rewards):
        """Reward the previous action with reward.

        Parameters
        ----------
        history_id : int
            The history id of the action to reward.

        rewards : dictionary
            The dictionary {action_id, reward}, where reward is a float.
        """
        context = (self._history_storage
                   .get_unrewarded_history(history_id)
                   .context)

        # Update the model
        model = self._model_storage.get_model()
        B = model['B']  # pylint: disable=invalid-name
        f = model['f']

        for action_id, reward in six.viewitems(rewards):
            context_t = np.reshape(context[action_id], (-1, 1))
            B += context_t.dot(context_t.T)  # pylint: disable=invalid-name
            f += reward * context_t
            mu_hat = np.linalg.inv(B).dot(f)
        self._model_storage.save_model({'B': B, 'mu_hat': mu_hat, 'f': f})

        # Update the history
        self._history_storage.add_reward(history_id, rewards)
项目:striatum    作者:ntucllab    | 项目源码 | 文件源码
def reward(self, history_id, rewards):
        """Reward the previous action with reward.

        Parameters
        ----------
        history_id : int
            The history id of the action to reward.

        rewards : dictionary
            The dictionary {action_id, reward}, where reward is a float.
        """
        context = (self._historystorage
                   .get_unrewarded_history(history_id)
                   .context)

        model = self._modelstorage.get_model()
        w = model['w']
        action_probs = model['action_probs']
        action_ids = list(six.viewkeys(six.next(six.itervalues(context))))

        # Update the model
        for action_id, reward in six.viewitems(rewards):
            y_hat = {}
            v_hat = {}
            for i in six.viewkeys(context):
                y_hat[i] = (context[i][action_id] * reward
                            / action_probs[action_id])
                v_hat[i] = sum(
                    [context[i][k] / action_probs[k] for k in action_ids])
                w[i] = w[i] * np.exp(
                    self.p_min / 2
                    * (y_hat[i] + v_hat[i]
                       * np.sqrt(np.log(len(context) / self.delta)
                                 / (len(action_ids) * self.max_rounds))))

        self._modelstorage.save_model({
            'action_probs': action_probs, 'w': w})

        # Update the history
        self._historystorage.add_reward(history_id, rewards)
项目:striatum    作者:ntucllab    | 项目源码 | 文件源码
def reward(self, history_id, rewards):
        """Reward the previous action with reward.

        Parameters
        ----------
        history_id : int
            The history id of the action to reward.

        rewards : dictionary
            The dictionary {action_id, reward}, where reward is a float.
        """
        context = (self._history_storage
                   .get_unrewarded_history(history_id)
                   .context)

        # Update the model
        model = self._model_storage.get_model()
        A = model['A']  # pylint: disable=invalid-name
        A_inv = model['A_inv']  # pylint: disable=invalid-name
        b = model['b']
        theta = model['theta']

        for action_id, reward in six.viewitems(rewards):
            action_context = np.reshape(context[action_id], (-1, 1))
            A[action_id] += action_context.dot(action_context.T)
            A_inv[action_id] = np.linalg.inv(A[action_id])
            b[action_id] += reward * action_context
            theta[action_id] = A_inv[action_id].dot(b[action_id])
        self._model_storage.save_model({
            'A': A,
            'A_inv': A_inv,
            'b': b,
            'theta': theta,
        })

        # Update the history
        self._history_storage.add_reward(history_id, rewards)
项目:flatten-dict    作者:ianlini    | 项目源码 | 文件源码
def flatten(d, reducer='tuple', inverse=False):
    """Flatten dict-like object.

    Parameters
    ----------
    d: dict-like object
        The dict that will be flattened.
    reducer: {'tuple', 'path', function} (default: 'tuple')
        The key joining method. If a function is given, the function will be
        used to reduce.
        'tuple': The resulting key will be tuple of the original keys
        'path': Use ``os.path.join`` to join keys.
    inverse: bool (default: False)
        Whether you want invert the resulting key and value.

    Returns
    -------
    flat_dict: dict
    """
    if isinstance(reducer, str):
        reducer = REDUCER_DICT[reducer]
    flat_dict = {}

    def _flatten(d, parent=None):
        for key, val in six.viewitems(d):
            flat_key = reducer(parent, key)
            if isinstance(val, Mapping):
                _flatten(val, flat_key)
            elif inverse:
                if val in flat_dict:
                    raise ValueError("duplicated key '{}'".format(val))
                flat_dict[val] = flat_key
            else:
                flat_dict[flat_key] = val

    _flatten(d)
    return flat_dict
项目:flatten-dict    作者:ianlini    | 项目源码 | 文件源码
def test_flatten_dict_inverse():
    inv_flat_normal_dict = {v: k for k, v in six.viewitems(flat_normal_dict)}
    assert flatten(normal_dict, inverse=True) == inv_flat_normal_dict
项目:flatten-dict    作者:ianlini    | 项目源码 | 文件源码
def test_flatten_dict_path():
    from os.path import join
    flat_path_dict = {join(*k): v for k, v in six.viewitems(flat_normal_dict)}
    assert flatten(normal_dict, reducer='path') == flat_path_dict
项目:treadmill    作者:Morgan-Stanley    | 项目源码 | 文件源码
def watch_running(zkclient, cell_state):
    """Watch running instances."""

    @zkclient.ChildrenWatch(z.path.running())
    @utils.exit_on_unhandled
    def _watch_running(running):
        """Watch /running nodes."""
        cell_state.running = set(running)
        for name, item in six.viewitems(cell_state.placement):
            if name in cell_state.running:
                item['state'] = 'running'
        return True

    _LOGGER.info('Loaded running.')
项目:h5sparse    作者:appier    | 项目源码 | 文件源码
def get_format_str(data):
    for format_str, format_class in six.viewitems(FORMAT_DICT):
        if isinstance(data, format_class):
            return format_str
    raise ValueError("Data type {} is not supported.".format(type(data)))
项目:anndata    作者:theislab    | 项目源码 | 文件源码
def get_format_str(data):
    for format_str, format_class in six.viewitems(FORMAT_DICT):
        if isinstance(data, format_class):
            return format_str
    raise ValueError("Data type {} is not supported.".format(type(data)))
项目:solr-zkutil    作者:bendemott    | 项目源码 | 文件源码
def check_ephemeral_dump_consistency(zk_client):
    """
    Check the consistency of 'dump' output across Zookeeper hosts

    :param zookeepers: A zookeeper connection string (should describe all ensemble members)
    """
    zk_hosts = zk_client.hosts
    dump_results = multi_admin_command(zk_client, b'dump')
    ephemerals = [parse_admin_dump(item)['ephemerals'] for item in dump_results]

    # Flatten the data structure returned by parsing the 'dump' command so that we have
    # a sequence (list) of sets that can be compared using set operations.
    ephemerals_compare = []
    for ephemerals in ephemerals:
        ephemeral_set = set()
        for session, paths in six.viewitems(ephemerals):
            for path in paths:
                ephemeral_set.add((session, path))

        ephemerals_compare.append(ephemeral_set)

    # Find all unique sets of indexes to use for comparisons.
    errors = []
    comparisons = {tuple(sorted(pair)) for pair in itertools.product(range(len(ephemerals_compare)), repeat=2) if pair[0] != pair[1]}
    for idx1, idx2 in comparisons:
        # Set comparison to determine differences between the two hosts
        differences = ephemerals_compare[idx1] ^ ephemerals_compare[idx2]
        if differences:
            errors.append(
                'ephemeral nodes do not match for host:{host1} and host:{host2}... differences: {diff}'.format(
                    host1=format_host(zk_hosts[idx1]),
                    host2=format_host(zk_hosts[idx2]),
                    diff='\n\t' + '\n\t'.join([six.text_type(entry) for entry in differences])
                )
            )

    if not errors:
        log.debug('%s.%s encountered no errors' % (__name__, check_ephemeral_dump_consistency.__name__))

    return errors
项目:solr-zkutil    作者:bendemott    | 项目源码 | 文件源码
def get_async_result_tuples(results):
    """
    Given a dictionary like::

        {
            arg_0: {
                0: result or exception obj
                1: result or exception obj 
                2: result or exception obj 
            },
            arg_1: {
                0: result or exception obj
                1: result or exception obj 
                2: result or exception obj 
            },
        }

    Return a list, composed of tuples of (host, arg, result)
    where arg is the input argument, host is the host index and
    result is the response/result object from the zookeeper api call

    Any results that contain exception objects / errors are ignored.

    :param result: A result set dictionary as returned from ``get_async_call_per_host``
    :returns: ``list``
    """
    if not isinstance(results, dict):
        raise ValueError('"result" must be dict, got: %s' % type(dict))

    items = []

    for arg, host_result in six.viewitems(results):
        items.extend([(host, arg, result) for host, result in six.viewitems(host_result) if not isinstance(result, Exception)])

    return items
项目:solr-zkutil    作者:bendemott    | 项目源码 | 文件源码
def get_async_call_per_host_errors(zk_client, async_result, ignore=None):
    """
    Return a list of errors contained within the response of ephemeral_children

    :param zk_client: Zookeeper connection object (KazooClient instance or subclass of)
                           start() will be called internally when the connection is used.
                           The connection instance should be configured with the hosts that are
                           members of the ensemble.

    :param async_result: The response from ``get_async_call_per_host()``
    :param ignore: Ignore these exception objects.
    :returns: ``list``
    """
    hosts = zk_client.hosts
    # note that 'cb' is a kazoo.interfaces.IAsyncResult
    errors = []
    for arg, host_result in six.viewitems(async_result):
        for client_idx, result in six.viewitems(host_result):
            if isinstance(result, Exception):
                exception = result
                if ignore and any([isinstance(exception, exc) for exc in ignore]):
                    log.debug('ignore error class: [%s] %s' % (exception.__class__.__name__, exception))
                    continue

                # see if this one is an error.
                errors.append(
                    "error from host: %s for input: [%s], error: (%s) %s" % (
                        format_host(hosts[client_idx]),
                        arg,
                        exception.__class__.__name__,
                        str(exception)
                    )
                )
                continue

    return errors
项目:treadmill    作者:Morgan-Stanley    | 项目源码 | 文件源码
def __init__(self):

        if context.GLOBAL.cell is not None:
            zkclient = context.GLOBAL.zk.conn
            cell_state = CellState()

            _LOGGER.info('Initializing api.')

            watch_running(zkclient, cell_state)
            watch_placement(zkclient, cell_state)
            watch_finished(zkclient, cell_state)
            watch_finished_history(zkclient, cell_state)

        def _list(match=None, finished=False, partition=None):
            """List instances state."""
            if match is None:
                match = '*'
            if '#' not in match:
                match += '#*'
            filtered = [
                {'name': name, 'state': item['state'], 'host': item['host']}
                for name, item in six.viewitems(cell_state.placement.copy())
                if fnmatch.fnmatch(name, match)
            ]

            if finished:
                for name in six.viewkeys(cell_state.finished.copy()):
                    if fnmatch.fnmatch(name, match):
                        state = cell_state.get_finished(name)
                        item = {'name': name}
                        item.update(state)
                        filtered.append(item)

            if partition is not None:
                hosts = [rec['_id'] for rec in
                         API._get_server_info()
                         if rec['partition'] == partition]
                filtered = [item for item in filtered
                            if item['host'] in hosts]

            return sorted(filtered, key=lambda item: item['name'])

        @schema.schema({'$ref': 'instance.json#/resource_id'})
        def get(rsrc_id):
            """Get instance state."""
            if rsrc_id in cell_state.placement:
                state = cell_state.placement[rsrc_id]
            else:
                state = cell_state.get_finished(rsrc_id)

            if not state:
                return None

            res = {'name': rsrc_id}
            res.update(state)
            return res

        self.list = _list
        self.get = get
项目:solr-zkutil    作者:bendemott    | 项目源码 | 文件源码
def get_async_call_per_host(zk_client, args, call):
    """
    :param args: arguments to pass into ``call``, this should be a list of znode paths for example.
    :param call: a callable that accepts two arguments (KazooClient, arg)
                 where arg is an entry from args

    ``call`` should usually be a lambda such as::

        lambda c, arg: c.get(arg)

    returns a dictionary like::

        {
            arg_0: {
                0: result or exception obj
                1: result or exception obj 
                2: result or exception obj 
            },
            arg_1: {
                0: result or exception obj
                1: result or exception obj 
                2: result or exception obj 
            },
        }
    """
    clients = kazoo_clients_from_client(zk_client)
    kazoo_clients_connect(clients)

    asyncs = defaultdict(dict)
    for arg in args:
        for client_idx, client in enumerate(clients):
            asyncs[arg][client_idx] = call(client, arg)

    # block until the calls complete
    get_async_ready(asyncs)

    results = defaultdict(dict)
    for arg, host_async in six.viewitems(asyncs):
        for host_idx, async_result in six.viewitems(host_async):
            results[arg][host_idx] = async_result.exception or async_result.get()

    return results