我们从Python开源项目中,提取了以下14个代码示例,用于说明如何使用charmhelpers.core.hookenv.leader_get()。
def get_encryption_key(): encryption_key = config("encryption-key") if not encryption_key: encryption_key = leader_get('heat-auth-encryption-key') return encryption_key
def __call__(self): ctxt = {} # check if we have stored encryption key ctxt['encryption_key'] = get_encryption_key() ctxt['heat_domain_admin_passwd'] = ( leader_get('heat-domain-admin-passwd')) return ctxt
def db_sync_done(self): return hookenv.leader_get(attribute='db-sync-done')
def domain_init_done(self): """Query leader db to see if domain creation is donei @returns boolean""" return hookenv.leader_get(attribute='domain-init-done')
def create_initial_servers_and_domains(cls): """Create the nameserver entry and domains based on the charm user supplied config NOTE(AJK): This only wants to be done ONCE and by the leader, so we use leader settings to store that we've done it, after it's successfully completed. @returns None """ KEY = 'create_initial_servers_and_domains' if hookenv.is_leader() and not hookenv.leader_get(KEY): nova_domain_name = hookenv.config('nova-domain') neutron_domain_name = hookenv.config('neutron-domain') with cls.check_zone_ids(nova_domain_name, neutron_domain_name): if hookenv.config('nameservers'): for ns in hookenv.config('nameservers').split(): cls.create_server(ns) else: hookenv.log('No nameserver specified, skipping creation of' 'nova and neutron domains', level=hookenv.WARNING) return if nova_domain_name: cls.create_domain( nova_domain_name, hookenv.config('nova-domain-email')) if neutron_domain_name: cls.create_domain( neutron_domain_name, hookenv.config('neutron-domain-email')) # if this fails, we weren't the leader any more; another unit may # attempt to do this too. hookenv.leader_set({KEY: 'done'})
def get_rndc_secret(): """rndc secret :returns: str: rndc secret """ return hookenv.leader_get(attribute=LEADERDB_SECRET_KEY)
def get_sync_src(): """URL published zone file can be retrieved from :returns: str: URL published zone file can be retrieved from """ return hookenv.leader_get(attribute=LEADERDB_SYNC_SRC_KEY)
def get_sync_time(): """Epoch seconds when published sync was created :returns: str: Epoch seconds when published sync was created """ return hookenv.leader_get(attribute=LEADERDB_SYNC_TIME_KEY)
def cluster_token(self): ''' Getter to return the unique cluster token. ''' if not is_leader(): return leader_get('token') if not self.db.get('cluster-token'): token = self.id_generator() self.db.set('cluster-token', token) return token return self.db.get('cluster-token')
def perform_self_unregistration(cluster=None): ''' Attempt self removal during unit teardown. ''' etcdctl = EtcdCtl() leader_address = leader_get('leader_address') unit_name = os.getenv('JUJU_UNIT_NAME').replace('/', '') members = etcdctl.member_list() # Self Unregistration etcdctl.unregister(members[unit_name]['unit_id'], leader_address)
def _load_state(self): self.msg('Loading state'.format(self._name())) # All responses must be stored in the leadership settings. # The leader cannot use local state, as a different unit may # be leader next time. Which is fine, as the leadership # settings are always available. self.grants = json.loads(hookenv.leader_get(self.key) or '{}') local_unit = hookenv.local_unit() # All requests must be stored on the peers relation. This is # the only channel units have to communicate with the leader. # Even the leader needs to store its requests here, as a # different unit may be leader by the time the request can be # granted. if self.relid is None: # The peers relation is not available. Maybe we are early in # the units's lifecycle. Maybe this unit is standalone. # Fallback to using local state. self.msg('No peer relation. Loading local state') self.requests = {local_unit: self._load_local_state()} else: self.requests = self._load_peer_state() if local_unit not in self.requests: # The peers relation has just been joined. Update any state # loaded from our peers with our local state. self.msg('New peer relation. Merging local state') self.requests[local_unit] = self._load_local_state()
def data(self): return hookenv.leader_get()
def server_connected(peer) -> None: """ The peer.available state is set when there are one or more peer units that have joined. :return: """ update_status() bricks = check_for_new_devices() if bricks.is_ok(): log('Reporting my bricks {} to the leader'.format(bricks.value)) peer.set_bricks(bricks=bricks.value) if not is_leader(): log('Reporting my public address {} to the leader'.format( unit_public_ip())) peer.set_address(address_type='public', address=unit_public_ip()) return # I am the leader log('Leader probing peers') probed_units = [] try: p = hookenv.leader_get('probed-units') if p: probed_units = json.loads(p) except json.decoder.JSONDecodeError as e: log("json decoder failed for {}: {}".format(e.doc, e.msg)) log("probed_units: {}".format(probed_units)) peer_info = peer.get_peer_info() for unit in peer_info: if unit in probed_units: continue address = peer_info[unit]['address'] log('probing host {} at {}'.format(unit, address)) status_set('maintenance', 'Probing peer {}'.format(unit)) try: peer_probe(address) probed_units.append(unit) except (GlusterCmdException, GlusterCmdOutputParseError): log('Error probing host {}: {}'.format(unit, address), ERROR) continue log('successfully probed {}: {}'.format(unit, address), DEBUG) settings = {'probed-units': json.dumps(probed_units)} hookenv.leader_set(settings) status_set('maintenance', '')
def register_node_with_leader(cluster): ''' Control flow mechanism to perform self registration with the leader. Before executing self registration, we must adhere to the nature of offline static turnup rules. If we find a GUID in the member list without peering information the unit will enter a race condition and must wait for a clean status output before we can progress to self registration. ''' # We're going to communicate with the leader, and we need our bootstrap # startup string once.. TBD after that. etcdctl = EtcdCtl() bag = EtcdDatabag() # Assume a hiccup during registration and attempt a retry if bag.cluster_unit_id: bag.cluster = bag.registration_peer_string # conf_path = '{}/etcd.conf'.format(bag.etcd_conf_dir) render_config(bag) time.sleep(2) try: peers = etcdctl.member_list(leader_get('leader_address')) except CalledProcessError: log("Etcd attempted to invoke registration before service ready") # This error state is transient, and does not imply the unit is broken. # Erroring at this stage can be resolved, and should not effect the # overall condition of unit turn-up. Return from the method and let the # charm re-invoke on next run return for unit in peers: if 'client_urls' not in peers[unit].keys(): # we cannot register. State not attainable. msg = 'Waiting for unit to complete registration.' status_set('waiting', msg) return if not bag.cluster_unit_id: bag.leader_address = leader_get('leader_address') resp = etcdctl.register(bag.__dict__) if resp and 'cluster_unit_id' in resp.keys() and 'cluster' in resp.keys(): # noqa bag.cache_registration_detail('cluster_unit_id', resp['cluster_unit_id']) bag.cache_registration_detail('registration_peer_string', resp['cluster']) bag.cluster_unit_id = resp['cluster_unit_id'] bag.cluster = resp['cluster'] render_config(bag) host.service_restart(bag.etcd_daemon) time.sleep(2) # Check health status before we say we are good etcdctl = EtcdCtl() status = etcdctl.cluster_health() if 'unhealthy' in status: status_set('blocked', 'Cluster not healthy.') return open_port(bag.port) set_state('etcd.registered')