我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用six.viewkeys()。
def _all_as_cql(self): ret = self.as_cql_query(formatted=True) ret += ";" for index in self.indexes.values(): ret += "\n%s;" % index.as_cql_query() for trigger_meta in self.triggers.values(): ret += "\n%s;" % (trigger_meta.as_cql_query(),) for view_meta in self.views.values(): ret += "\n\n%s;" % (view_meta.as_cql_query(formatted=True),) if self.extensions: registry = _RegisteredExtensionType._extension_registry for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: ret += "\n\n%s" % (cql,) return ret
def to_table_data(self): """ :raises ValueError: :raises pytablereader.error.ValidationError: """ self._validate_source_data() attr_name_set = set() for json_record in self._buffer: attr_name_set = attr_name_set.union(six.viewkeys(json_record)) self._loader.inc_table_count() yield TableData( table_name=self._make_table_name(), header_list=sorted(attr_name_set), record_list=self._buffer, quoting_flags=self._loader.quoting_flags)
def to_table_data(self): """ :raises ValueError: :raises pytablereader.error.ValidationError: """ self._validate_source_data() self._loader.inc_table_count() header_list = sorted(six.viewkeys(self._buffer)) yield TableData( table_name=self._make_table_name(), header_list=header_list, record_list=zip( *[self._buffer.get(header) for header in header_list]), quoting_flags=self._loader.quoting_flags)
def to_table_data(self): """ :raises ValueError: :raises pytablereader.error.ValidationError: """ self._validate_source_data() for table_key, json_record_list in six.iteritems(self._buffer): attr_name_set = set() for json_record in json_record_list: attr_name_set = attr_name_set.union(six.viewkeys(json_record)) self._loader.inc_table_count() self._table_key = table_key yield TableData( table_name=self._make_table_name(), header_list=sorted(attr_name_set), record_list=json_record_list, quoting_flags=self._loader.quoting_flags)
def to_table_data(self): """ :raises ValueError: :raises pytablereader.error.ValidationError: """ self._validate_source_data() for table_key, json_record_list in six.iteritems(self._buffer): header_list = sorted(six.viewkeys(json_record_list)) self._loader.inc_table_count() self._table_key = table_key yield TableData( table_name=self._make_table_name(), header_list=header_list, record_list=zip( *[json_record_list.get(header) for header in header_list]), quoting_flags=self._loader.quoting_flags)
def setup_params(self, data): params = self.params.copy() lookup = { 'biweight': 'biw', 'cosine': 'cos', 'cosine2': 'cos2', 'epanechnikov': 'epa', 'gaussian': 'gau', 'triangular': 'tri', 'triweight': 'triw', 'uniform': 'uni'} with suppress(KeyError): params['kernel'] = lookup[params['kernel'].lower()] if params['kernel'] not in six.viewvalues(lookup): msg = ("kernel should be one of {}. " "You may use the abbreviations {}") raise PlotnineError(msg.format(six.viewkeys(lookup), six.viewvalues(lookup))) return params
def _verify_arguments(self, kwargs): """ Verify arguments passed to the geom """ keys = six.viewkeys unknown = (keys(kwargs) - self.aesthetics() - # geom aesthetics keys(self.DEFAULT_PARAMS) - # geom parameters self._stat.aesthetics() - # stat aesthetics keys(self._stat.DEFAULT_PARAMS) - # stat parameters {'data', 'mapping', # layer parameters 'show_legend', 'inherit_aes'}) # layer parameters if unknown: msg = ("Parameters {}, are not understood by " "either the geom, stat or layer.") raise PlotnineError(msg.format(unknown))
def assert_dict_equal(result, expected, path=(), msg='', **kwargs): _check_sets( viewkeys(result), viewkeys(expected), msg, path + ('.%s()' % ('viewkeys' if PY2 else 'keys'),), 'key', ) failures = [] for k, (resultv, expectedv) in iteritems(dzip_exact(result, expected)): try: assert_equal( resultv, expectedv, path=path + ('[%r]' % (k,),), msg=msg, **kwargs ) except AssertionError as e: failures.append(str(e)) if failures: raise AssertionError('\n'.join(failures))
def test_blocked_lookup_symbol_query(self): # we will try to query for more variables than sqlite supports # to make sure we are properly chunking on the client side as_of = pd.Timestamp('2013-01-01', tz='UTC') # we need more sids than we can query from sqlite nsids = SQLITE_MAX_VARIABLE_NUMBER + 10 sids = range(nsids) frame = pd.DataFrame.from_records( [ { 'sid': sid, 'symbol': 'TEST.%d' % sid, 'start_date': as_of.value, 'end_date': as_of.value, 'exchange': uuid.uuid4().hex } for sid in sids ] ) self.write_assets(equities=frame) assets = self.asset_finder.retrieve_equities(sids) assert_equal(viewkeys(assets), set(sids))
def _linthompsamp_score(self, context): """Thompson Sampling""" action_ids = list(six.viewkeys(context)) context_array = np.asarray([context[action_id] for action_id in action_ids]) model = self._model_storage.get_model() B = model['B'] # pylint: disable=invalid-name mu_hat = model['mu_hat'] v = self.R * np.sqrt(24 / self.epsilon * self.context_dimension * np.log(1 / self.delta)) mu_tilde = self.random_state.multivariate_normal( mu_hat.flat, v**2 * np.linalg.inv(B))[..., np.newaxis] estimated_reward_array = context_array.dot(mu_hat) score_array = context_array.dot(mu_tilde) estimated_reward_dict = {} uncertainty_dict = {} score_dict = {} for action_id, estimated_reward, score in zip( action_ids, estimated_reward_array, score_array): estimated_reward_dict[action_id] = float(estimated_reward) score_dict[action_id] = float(score) uncertainty_dict[action_id] = float(score - estimated_reward) return estimated_reward_dict, uncertainty_dict, score_dict
def synchronize(self): """Make sure that all stale volumes are removed. """ modified = False for uniqueid in six.viewkeys(self._volumes.copy()): if not self._volumes[uniqueid].pop('stale', False): continue modified = True # This is a stale volume, destroy it. self._destroy_volume(uniqueid) if not modified: return # Now that we successfully removed a volume, retry all the pending # resources. for pending_id in self._pending: self._retry_request(pending_id) self._pending = [] # We just destroyed a volume, refresh cached status from LVM and notify # the service of the availability of the new status. self._vg_status = localdiskutils.refresh_vg_status( localdiskutils.TREADMILL_VG )
def __init__(self, policies=None): if policies is None: policies = self.policies if not isinstance(policies, compat.Mapping): raise ValueError("policies must be a mapping of bytes" " method names to sequence of policies.") allowedMethods = getattr(self, 'allowedMethods', None) if not allowedMethods: raise ValueError("instance must have allowedMethods") required = set(allowedMethods) available = six.viewkeys(policies) missing = required - available if missing: raise ValueError("missing methods: {}".format(missing)) # adapt any policies we have to our resource self._actingPolicies = {method: tuple(p.forResource(self) for p in methodPolicies) for method, methodPolicies in policies.items()}
def this_is_okay(): d = {} iterkeys(d) six.iterkeys(d) six.itervalues(d) six.iteritems(d) six.iterlists(d) six.viewkeys(d) six.viewvalues(d) six.viewlists(d) itervalues(d) future.utils.iterkeys(d) future.utils.itervalues(d) future.utils.iteritems(d) future.utils.iterlists(d) future.utils.viewkeys(d) future.utils.viewvalues(d) future.utils.viewlists(d) six.next(d) builtins.next(d)
def dzip_exact(*dicts): """ Parameters ---------- *dicts : iterable[dict] A sequence of dicts all sharing the same keys. Returns ------- zipped : dict A dict whose keys are the union of all keys in *dicts, and whose values are tuples of length len(dicts) containing the result of looking up each key in each dict. Raises ------ ValueError If dicts don't all have the same keys. Example ------- >>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4}) >>> result == {'a': (1, 3), 'b': (2, 4)} True """ if not same(*map(viewkeys, dicts)): raise ValueError( "dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts) ) return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _convert_asset_timestamp_fields(dict_): """ Takes in a dict of Asset init args and converts dates to pd.Timestamps """ for key in (_asset_timestamp_fields & viewkeys(dict_)): value = pd.Timestamp(dict_[key], tz='UTC') dict_[key] = None if isnull(value) else value return dict_
def as_cql_query(self, formatted=False): """ Returns a CQL query that can be used to recreate this function. If `formatted` is set to :const:`True`, extra whitespace will be added to make the query more readable. """ sep = '\n ' if formatted else ' ' keyspace = protect_name(self.keyspace_name) name = protect_name(self.name) selected_cols = '*' if self.include_all_columns else ', '.join(protect_name(col.name) for col in self.columns.values()) base_table = protect_name(self.base_table_name) where_clause = self.where_clause part_key = ', '.join(protect_name(col.name) for col in self.partition_key) if len(self.partition_key) > 1: pk = "((%s)" % part_key else: pk = "(%s" % part_key if self.clustering_key: pk += ", %s" % ', '.join(protect_name(col.name) for col in self.clustering_key) pk += ")" properties = TableMetadataV3._property_string(formatted, self.clustering_key, self.options) ret = "CREATE MATERIALIZED VIEW %(keyspace)s.%(name)s AS%(sep)s" \ "SELECT %(selected_cols)s%(sep)s" \ "FROM %(keyspace)s.%(base_table)s%(sep)s" \ "WHERE %(where_clause)s%(sep)s" \ "PRIMARY KEY %(pk)s%(sep)s" \ "WITH %(properties)s" % locals() if self.extensions: registry = _RegisteredExtensionType._extension_registry for k in six.viewkeys(registry) & self.extensions: # no viewkeys on OrderedMapSerializeKey ext = registry[k] cql = ext.after_table_cql(self, k, self.extensions[k]) if cql: ret += "\n\n%s" % (cql,) return ret
def __init__(self, *args, **kwargs): kwargs = data_mapping_as_kwargs(args, kwargs) self._kwargs = kwargs # Will be used to create the geom self.params = copy_keys(kwargs, deepcopy(self.DEFAULT_PARAMS)) self.aes_params = {ae: kwargs[ae] for ae in (self.aesthetics() & six.viewkeys(kwargs))}
def use_defaults(self, data): """ Combine data with defaults and set aesthetics from parameters stats should not override this method. Parameters ---------- data : pandas.DataFrame Data used for drawing the geom. Returns ------- out : pandas.DataFrame Data used for drawing the geom. """ missing = (self.aesthetics() - six.viewkeys(self.aes_params) - set(data.columns)) for ae in missing-self.REQUIRED_AES: if self.DEFAULT_AES[ae] is not None: data[ae] = self.DEFAULT_AES[ae] missing = (six.viewkeys(self.aes_params) - set(data.columns)) for ae in self.aes_params: data[ae] = self.aes_params[ae] return data
def setup_params(self, data): params = self.params.copy() valid_scale = ('area', 'count', 'width') if params['scale'] not in valid_scale: msg = "Parameter scale should be one of {}" raise PlotnineError(msg.format(valid_scale)) lookup = { 'biweight': 'biw', 'cosine': 'cos', 'cosine2': 'cos2', 'epanechnikov': 'epa', 'gaussian': 'gau', 'triangular': 'tri', 'triweight': 'triw', 'uniform': 'uni'} with suppress(KeyError): params['kernel'] = lookup[params['kernel'].lower()] if params['kernel'] not in six.viewvalues(lookup): msg = ("kernel should be one of {}. " "You may use the abbreviations {}") raise PlotnineError(msg.format(six.viewkeys(lookup), six.viewvalues())) missing_params = (six.viewkeys(stat_density.DEFAULT_PARAMS) - six.viewkeys(params)) for key in missing_params: params[key] = stat_density.DEFAULT_PARAMS[key] return params
def aesthetics(cls): """ Return all the aesthetics for this geom geoms should not override this method. """ main = six.viewkeys(cls.DEFAULT_AES) | cls.REQUIRED_AES other = {'group'} # Need to recognize both spellings if 'color' in main: other.add('colour') if 'outlier_color' in main: other.add('outlier_colour') return main | other
def use_defaults(self, data): """ Combine data with defaults and set aesthetics from parameters geoms should not override this method. Parameters ---------- data : pandas.DataFrame Data used for drawing the geom. Returns ------- out : pandas.DataFrame Data used for drawing the geom. """ missing_aes = (six.viewkeys(self.DEFAULT_AES) - six.viewkeys(self.aes_params) - set(data.columns)) # Not in data and not set, use default for ae in missing_aes: data[ae] = self.DEFAULT_AES[ae] # If set, use it for ae, value in self.aes_params.items(): try: data[ae] = value except ValueError: # sniff out the special cases, like custom # tupled linetypes, shapes and colors if is_valid_aesthetic(value, ae): data[ae] = [value]*len(data) else: msg = ("'{}' does not look like a " "valid value for `{}`") raise PlotnineError(msg.format(value, ae)) return data
def __init__(self, handlers): handler_set = set(six.viewkeys(handlers)) if handler_set != self._handler_set: redundant_handlers_set = handler_set - self._handler_set lacked_handlers_set = self._handler_set - handler_set raise ValueError('Handler set mismatch. {} redundant and {} lacked.' .format(redundant_handlers_set, lacked_handlers_set)) self._handlers = handlers
def update_containers_states(self, context, containers): db_containers = self.list(context) if not db_containers: return id_to_db_container_map = {container.container_id: container for container in db_containers} id_to_container_map = {container.container_id: container for container in containers} for cid in (six.viewkeys(id_to_container_map) & six.viewkeys(id_to_db_container_map)): container = id_to_container_map[cid] # sync status db_container = id_to_db_container_map[cid] if container.status != db_container.status: old_status = container.status container.status = db_container.status container.save(context) LOG.info('Status of container %s changed from %s to %s', container.uuid, old_status, container.status) # sync host # Note(kiennt): Current host. cur_host = CONF.host if container.host != cur_host: old_host = container.host container.host = cur_host container.save(context) LOG.info('Host of container %s changed from %s to %s', container.uuid, old_host, container.host)
def process_inheritance(config_dct, keys): def get_processed_dct(tlkey, host, hostsdict): rr = {} extends = host.get('extends', []) if isinstance(extends, six.string_types): extends = [extends] for extend in extends + [host['name']]: extend_host = hostsdict[extend] for key in six.viewkeys(extend_host): if key in six.viewkeys(rr) and isinstance(rr[key], list): ehlst = (extend_host[key] if isinstance(extend_host[key], (list,tuple)) else [extend_host[key]]) for val in ehlst: if not val in rr[key]: rr[key].append(val) else: rr[key] = copy.deepcopy(extend_host[key]) return rr for tlkey in keys: hostsdict = {x['name']: x for x in config_dct.get(tlkey, [])} dfsnodes = _dfs(config_dct.get(tlkey, [])) #print [x['name'] for x in dfsnodes] rr = [] for dct in dfsnodes: isabstract = dct.get('abstract') #disabled = dct.get('disabled') dct = get_processed_dct(tlkey, dct, hostsdict) for key in ['abstract', 'extends']: if dct.get(key) is not None: del dct[key] #if not isabstract: # process_expansions(dct) hostsdict[dct['name']] = dct if not isabstract: rr.append(dct) config_dct[tlkey] = rr
def test_find_loggers_with_filter(self): loggers = self.middleware.find_loggers_with_filter(RequestFilter) self.assertListEqual(list(six.viewkeys(loggers)), [self.logger]) self.assertEqual([type(f) for f in loggers[self.logger]], [RequestFilter], loggers[self.logger])
def update_keys(self, keys): """ Add new IDs to cache. :param list keys: list of new IDs to be added to cache :return: self :rtype: ObjectCache """ if not self: # for large amounts of data, this may be faster (no need for set # and difference calls) self.update({cid: {'id': cid} for cid in keys}) else: self.update({cid: {'id': cid} for cid in set(keys).difference(six.viewkeys(self))}) return self
def dzip_exact(*dicts): """ Parameters ---------- *dicts : iterable[dict] A sequence of dicts all sharing the same keys. Returns ------- zipped : dict A dict whose keys are the union of all keys in *dicts, and whose values are tuples of length len(dicts) containing the result of looking up each key in each dict. Raises ------ ValueError If dicts don't all have the same keys. Examples -------- >>> result = dzip_exact({'a': 1, 'b': 2}, {'a': 3, 'b': 4}) >>> result == {'a': (1, 3), 'b': (2, 4)} True """ if not same(*map(viewkeys, dicts)): raise ValueError( "dict keys not all equal:\n\n%s" % _format_unequal_keys(dicts) ) return {k: tuple(d[k] for d in dicts) for k in dicts[0]}
def _convert_asset_timestamp_fields(dict_): """ Takes in a dict of Asset init args and converts dates to pd.Timestamps """ for key in _asset_timestamp_fields & viewkeys(dict_): value = pd.Timestamp(dict_[key], tz='UTC') dict_[key] = None if isnull(value) else value return dict_
def _exp4p_score(self, context): """The main part of Exp4.P. """ advisor_ids = list(six.viewkeys(context)) w = self._modelstorage.get_model()['w'] if len(w) == 0: for i in advisor_ids: w[i] = 1 w_sum = sum(six.viewvalues(w)) action_probs_list = [] for action_id in self.action_ids: weighted_exp = [w[advisor_id] * context[advisor_id][action_id] for advisor_id in advisor_ids] prob_vector = np.sum(weighted_exp) / w_sum action_probs_list.append((1 - self.n_actions * self.p_min) * prob_vector + self.p_min) action_probs_list = np.asarray(action_probs_list) action_probs_list /= action_probs_list.sum() estimated_reward = {} uncertainty = {} score = {} for action_id, action_prob in zip(self.action_ids, action_probs_list): estimated_reward[action_id] = action_prob uncertainty[action_id] = 0 score[action_id] = action_prob self._modelstorage.save_model( {'action_probs': estimated_reward, 'w': w}) return estimated_reward, uncertainty, score
def reward(self, history_id, rewards): """Reward the previous action with reward. Parameters ---------- history_id : int The history id of the action to reward. rewards : dictionary The dictionary {action_id, reward}, where reward is a float. """ context = (self._historystorage .get_unrewarded_history(history_id) .context) model = self._modelstorage.get_model() w = model['w'] action_probs = model['action_probs'] action_ids = list(six.viewkeys(six.next(six.itervalues(context)))) # Update the model for action_id, reward in six.viewitems(rewards): y_hat = {} v_hat = {} for i in six.viewkeys(context): y_hat[i] = (context[i][action_id] * reward / action_probs[action_id]) v_hat[i] = sum( [context[i][k] / action_probs[k] for k in action_ids]) w[i] = w[i] * np.exp( self.p_min / 2 * (y_hat[i] + v_hat[i] * np.sqrt(np.log(len(context) / self.delta) / (len(action_ids) * self.max_rounds)))) self._modelstorage.save_model({ 'action_probs': action_probs, 'w': w}) # Update the history self._historystorage.add_reward(history_id, rewards)
def iterids(self): r"""Return iterable of the Action ids. Returns ------- action_ids: iterable Action ids. """ return six.viewkeys(self._actions)
def rebalance(context, data): ############################################################################# # Pipeline data will be a dataframe with boolean columns named 'longs' and # 'shorts'. pipeline_data = context.pipeline_data all_assets = pipeline_data.index longs = all_assets[pipeline_data.longs] shorts = all_assets[pipeline_data.shorts] record(universe_size=len(all_assets)) # Build a 2x-leveraged, equal-weight, long-short portfolio. one_third = 1.0 / context.optim_leveraged for asset in longs: order_target_percent(asset, one_third) for asset in shorts: order_target_percent(asset, -one_third) # Remove any assets that should no longer be in our portfolio. portfolio_assets = longs | shorts positions = context.portfolio.positions for asset in viewkeys(positions) - set(portfolio_assets): # This will fail if the asset was removed from our portfolio because it # was delisted. if data.can_trade(asset): order_target_percent(asset, 0)
def _normalize(d): ''' The above parse function generates output of list in dict form i.e. {'abc' : {0: 'xyz', 1: 'pqr'}}. This function normalize it and turn them into proper data type, i.e. {'abc': ['xyz', 'pqr']} Note: if dict has element starts with 10, 11 etc.. this function won't fill blanks. for eg: {'abc': {10: 'xyz', 12: 'pqr'}} will convert to {'abc': ['xyz', 'pqr']} ''' newd = {} if isinstance(d, dict) == False: return d # if dictionary. iterate over each element and append to newd for k, v in six.iteritems(d): if isinstance(v, dict): first_key = next(iter(six.viewkeys(v))) if isinstance(first_key, int): temp_new = [] for k1, v1 in v.items(): temp_new.append(_normalize(v1)) newd[k] = temp_new elif first_key == '': newd[k] = v.values()[0] else: newd[k] = _normalize(v) else: newd[k] = v return newd
def synchronize(self): modified = False for app_unique_name in six.viewkeys(self._devices.copy()): if not self._devices[app_unique_name].get('stale', False): continue modified = True # This is a stale device, destroy it. self.on_delete_request(app_unique_name) if not modified: return # Read bridge status self._bridge_mtu = netdev.dev_mtu(self._TMBR_DEV)
def viewkeys(self): return self._dict.viewkeys()
def instantiate_plugins(config, **kwargs): kwargs.setdefault('propagate_map_exceptions', True) kwargs.setdefault('on_load_failure_callback', extension_load_failure_callback) kwargs.setdefault('verify_requirements', True) kwargs.setdefault('names', config.get('PLUGINS_ENABLED', six.viewkeys(config.get('PLUGINS', {})))) plugins = stevedore.named.NamedExtensionManager(**kwargs) logger.debug("'{!s}' plugins: loaded {!s}", kwargs['namespace'], plugins.names()) for plugin in plugins: plugin_config = config.get('PLUGINS', {}).get(plugin.name, {}) plugin_config.setdefault('DEBUG', config.get('DEBUG', False)) plugin_config.setdefault('DEFAULT_TIMEOUT', config.get('DEFAULT_TIMEOUT', False)) plugin.obj = plugin.plugin(plugin_config) plugin.obj.plugin_name = plugin.name logger.debug("'{!s}' plugins: instantiated {!s}", kwargs['namespace'], plugins.names()) return plugins
def instantiate_practices(config, **kwargs): kwargs.setdefault('propagate_map_exceptions', True) kwargs.setdefault('on_load_failure_callback', extension_load_failure_callback) kwargs.setdefault('verify_requirements', True) kwargs.setdefault('names', config.get('PRACTICES_ENABLED', six.viewkeys(config.get('PRACTICES', {})))) practices = stevedore.named.NamedExtensionManager(**kwargs) logger.debug("'{!s}' practices: loaded {!s}", kwargs['namespace'], practices.names()) for plugin in practices: plugin_config = config.get('PRACTICES', {}).get(plugin.name, {}) plugin.obj = plugin.plugin(plugin_config) plugin.obj.plugin_name = plugin.name logger.debug("'{!s}' practices: instantiated {!s}", kwargs['namespace'], practices.names()) return practices
def adjust_attributes(token, replacements): if PY3 or _utils.PY27: needs_adjustment = viewkeys(token['data']) & viewkeys(replacements) else: needs_adjustment = frozenset(token['data']) & frozenset(replacements) if needs_adjustment: token['data'] = OrderedDict((replacements.get(k, k), v) for k, v in token['data'].items())
def everything_else_is_wrong(): d = None # note: bugbear is no type checker d.iterkeys() d.itervalues() d.iteritems() d.iterlists() # Djangoism d.viewkeys() d.viewvalues() d.viewitems() d.viewlists() # Djangoism d.next() d.keys().next()
def _retrieve_assets(self, sids, asset_tbl, asset_type): """ Internal function for loading assets from a table. This should be the only method of `AssetFinder` that writes Assets into self._asset_cache. Parameters --------- sids : iterable of int Asset ids to look up. asset_tbl : sqlalchemy.Table Table from which to query assets. asset_type : type Type of asset to be constructed. Returns ------- assets : dict[int -> Asset] Dict mapping requested sids to the retrieved assets. """ # Fastpath for empty request. if not sids: return {} cache = self._asset_cache hits = {} for assets in self._group_into_chunks(sids): # Load misses from the db. query = self._select_assets_by_sid(asset_tbl, assets) for row in imap(dict, query.execute().fetchall()): asset = asset_type(**_convert_asset_timestamp_fields(row)) sid = asset.sid hits[sid] = cache[sid] = asset # If we get here, it means something in our code thought that a # particular sid was an equity/future and called this function with a # concrete type, but we couldn't actually resolve the asset. This is # an error in our code, not a user-input error. misses = tuple(set(sids) - viewkeys(hits)) if misses: if asset_type == Equity: raise EquitiesNotFound(sids=misses) else: raise FutureContractsNotFound(sids=misses) return hits
def from_geom(geom): """ Return an instantiated stat object stats should not override this method. Parameters ---------- geom : geom `geom` Returns ------- out : stat A stat object Raises ------ :class:`PlotnineError` if unable to create a `stat`. """ name = geom.params['stat'] kwargs = geom._kwargs # More stable when reloading modules than # using issubclass if (not isinstance(name, type) and hasattr(name, 'compute_layer')): return name if isinstance(name, stat): return name elif isinstance(name, type) and issubclass(name, stat): klass = name elif is_string(name): if not name.startswith('stat_'): name = 'stat_{}'.format(name) klass = Registry[name] else: raise PlotnineError( 'Unknown stat of type {}'.format(type(name))) valid_kwargs = ( (klass.aesthetics() | six.viewkeys(klass.DEFAULT_PARAMS)) & six.viewkeys(kwargs)) params = {k: kwargs[k] for k in valid_kwargs} return klass(geom=geom, **params)
def generate_for_host(host): from . import util rr = '' rr += '''\n########## GENERATED DO NOT MODIFY #####################\n''' sshport = 22 if util.is_localhost(host['name']) else 2222 if not util.is_localhost(host['name']): if host.get('match'): matches = host.get('match') for hostname in [host['name'], host['name'] + '-ports']: for match in matches: rr += 'Match originalhost {hostname} exec "{match[condition]}"\n'.format( hostname=hostname, match=match) for key in six.viewkeys(match): if not key in ssh_option_names: continue rr += ' {key} {value}\n'.format(key=key, value=match[key]) rr += '\n' rr += 'host {}\n'.format(host['name']) for key, val in six.iteritems(host): if not key in ssh_option_names: continue if not isinstance(val, (list, tuple)): val = [val] for vv in val: rr += ' {key} {value}\n'.format(key=key, value=vv) rr += '\n' if host.get('containers'): rr += 'host {}-ports\n'.format(host['name']) if not 'HostName' in six.viewkeys(host): host['HostName'] = host['name'] for key, val in six.iteritems(host): if not key in ssh_option_names + ['LocalForward']: continue if not isinstance(val, (list, tuple)): val = [val] for vv in val: rr += ' {key} {value}\n'.format(key=key, value=vv) # rr += ' LocalForward {}-local:2375 localhost:2375\n'.format(host['name']) for cont in host.get('containers', []): ports = cont['image'].get('ports', []) for port in ports + ["{}:22".format(sshport)]: (p1, p2) = port.split(':') rr += ( " LocalForward {0}:{1} {2}:{1}\n".format( cont['name'], p1, cont['ip'])) rr += '\n' for cont in host.get('containers', []): rr += container_entry_template.format(**locals()) for key, val in six.iteritems(host): if key in container_ssh_option_names: rr += ' {} {}\n'.format(key, val) return rr
def _retrieve_assets(self, sids, asset_tbl, asset_type): """ Internal function for loading assets from a table. This should be the only method of `AssetFinder` that writes Assets into self._asset_cache. Parameters --------- sids : iterable of int Asset ids to look up. asset_tbl : sqlalchemy.Table Table from which to query assets. asset_type : type Type of asset to be constructed. Returns ------- assets : dict[int -> Asset] Dict mapping requested sids to the retrieved assets. """ # Fastpath for empty request. if not sids: return {} cache = self._asset_cache hits = {} querying_equities = issubclass(asset_type, Equity) filter_kwargs = ( _filter_equity_kwargs if querying_equities else _filter_future_kwargs ) rows = self._retrieve_asset_dicts(sids, asset_tbl, querying_equities) for row in rows: sid = row['sid'] asset = asset_type(**filter_kwargs(row)) hits[sid] = cache[sid] = asset # If we get here, it means something in our code thought that a # particular sid was an equity/future and called this function with a # concrete type, but we couldn't actually resolve the asset. This is # an error in our code, not a user-input error. misses = tuple(set(sids) - viewkeys(hits)) if misses: if querying_equities: raise EquitiesNotFound(sids=misses) else: raise FutureContractsNotFound(sids=misses) return hits
def _forward(self): if self.fallback_to_local: return self._local_forward() responses = {} errors = collections.defaultdict(lambda: []) for sp in self.enabled_sps: if sp == 'default': r = self._do_request_on('default') if 200 <= r.status_code < 300: responses['default'] = r if not self.aggregate: return self._finalize(r) else: errors[r.status_code].append(r) else: for p in auth.get_projects_at_sp(sp, self.details.token): r = self._do_request_on(sp, p) if 200 <= r.status_code < 300: responses[(sp, p)] = r if not self.aggregate: return self._finalize(r) else: errors[r.status_code].append(r) # NOTE(knikolla): If we haven't returned yet, either we're aggregating # or there are errors. if not errors: # TODO(knikolla): Plug this into _finalize to have a common path # for everything that is returned. return flask.Response( services.aggregate(responses, self.details.action[0], self.details.service, version=self.details.version, params=self.details.args, path=request.base_url, strip_details=self.strip_details), 200, content_type='application/json' ) if six.viewkeys(errors) == {404}: return self._finalize(errors[404][0]) else: utils.safe_pop(errors, 404) if len(errors.keys()) == 1: return self._finalize(list(errors.values())[0][0]) # TODO(jfreud): log return flask.Response("Something strange happened.\n", 500)
def get_action(self, context=None, n_actions=None): """Return the action to perform Parameters ---------- context : {array-like, None} The context of current state, None if no context available. n_actions: int (default: None) Number of actions wanted to recommend users. If None, only return one action. If -1, get all actions. Returns ------- history_id : int The history id of the action. recommendations : list of dict Each dict contains {Action object, estimated_reward, uncertainty}. """ if self._action_storage.count() == 0: return self._get_action_with_empty_action_storage(context, n_actions) probs = self._exp3_probs() if n_actions == -1: n_actions = self._action_storage.count() action_ids = list(six.viewkeys(probs)) prob_array = np.asarray([probs[action_id] for action_id in action_ids]) recommendation_ids = self.random_state.choice( action_ids, size=n_actions, p=prob_array, replace=False) if n_actions is None: recommendations = self._recommendation_cls( action=self._action_storage.get(recommendation_ids), estimated_reward=probs[recommendation_ids], uncertainty=probs[recommendation_ids], score=probs[recommendation_ids], ) else: recommendations = [] # pylint: disable=redefined-variable-type for action_id in recommendation_ids: recommendations.append(self._recommendation_cls( action=self._action_storage.get(action_id), estimated_reward=probs[action_id], uncertainty=probs[action_id], score=probs[action_id], )) history_id = self._history_storage.add_history(context, recommendations) return history_id, recommendations
def __init__(self): if context.GLOBAL.cell is not None: zkclient = context.GLOBAL.zk.conn cell_state = CellState() _LOGGER.info('Initializing api.') watch_running(zkclient, cell_state) watch_placement(zkclient, cell_state) watch_finished(zkclient, cell_state) watch_finished_history(zkclient, cell_state) def _list(match=None, finished=False, partition=None): """List instances state.""" if match is None: match = '*' if '#' not in match: match += '#*' filtered = [ {'name': name, 'state': item['state'], 'host': item['host']} for name, item in six.viewitems(cell_state.placement.copy()) if fnmatch.fnmatch(name, match) ] if finished: for name in six.viewkeys(cell_state.finished.copy()): if fnmatch.fnmatch(name, match): state = cell_state.get_finished(name) item = {'name': name} item.update(state) filtered.append(item) if partition is not None: hosts = [rec['_id'] for rec in API._get_server_info() if rec['partition'] == partition] filtered = [item for item in filtered if item['host'] in hosts] return sorted(filtered, key=lambda item: item['name']) @schema.schema({'$ref': 'instance.json#/resource_id'}) def get(rsrc_id): """Get instance state.""" if rsrc_id in cell_state.placement: state = cell_state.placement[rsrc_id] else: state = cell_state.get_finished(rsrc_id) if not state: return None res = {'name': rsrc_id} res.update(state) return res self.list = _list self.get = get