我们从Python开源项目中,提取了以下21个代码示例,用于说明如何使用charmhelpers.core.hookenv.status_set()。
def install(self): """Install packages or snaps related to this charm based on contents of self.packages or self.snaps attribute. """ packages = fetch.filter_installed_packages( self.all_packages) if packages: hookenv.status_set('maintenance', 'Installing packages') fetch.apt_install(packages, fatal=True) if os_utils.snap_install_requested(): if self.all_snaps: hookenv.status_set('maintenance', 'Installing snaps') os_utils.install_os_snaps( os_utils.get_snaps_install_info_from_origin( self.all_snaps, self.config['openstack-origin'], mode=self.snap_mode) ) # AJK: we set this as charms can use it to detect installed state self.set_state('{}-installed'.format(self.name)) self.update_api_ports() hookenv.status_set('maintenance', 'Installation complete - awaiting next status')
def report_ready(hadoop): hookenv.status_set('active', 'ready')
def report_blocked(): hookenv.status_set('blocked', 'waiting for relation to hadoop plugin')
def report_waiting_for_hadoop(hadoop): hookenv.status_set('waiting', 'waiting for plugin to become ready')
def report_waiting_for_java(hadoop): hookenv.status_set('waiting', 'waiting for java to become ready')
def setup_prometheus(): if not is_state('basenode.complete'): hookenv.status_set('maintenance', 'Waiting for basenode to run') return hookenv.status_set('maintenance', 'Configuring software') set_datadir_perms() install_packages() set_state('prometheus.do-check-reconfig')
def restart_prometheus(): if not host.service_running(SVCNAME): hookenv.log('Starting {}...'.format(SVCNAME)) host.service_start(SVCNAME) else: hookenv.log('Restarting {}, config file changed...'.format(SVCNAME)) host.service_restart(SVCNAME) hookenv.status_set('active', 'Ready') set_state('prometheus.started') remove_state('prometheus.do-restart') # Relations
def assess_status(self): """Determine the current application status for the charm""" hookenv.application_version_set(self.application_version) if not self.configuration_complete(): hookenv.status_set('blocked', 'LDAP configuration incomplete') else: hookenv.status_set('active', 'Unit is ready')
def bootstrap(self): """Generate Jenkins' initial config.""" hookenv.log("Bootstrapping initial Jenkins configuration") config = hookenv.config() if not -1 <= config["jnlp-port"] <= 65535: err = "{} is not a valid setting for jnlp-port".format( config["jnlp-port"] ) hookenv.log(err) hookenv.status_set("blocked", err) return False context = { "master_executors": config["master-executors"], "jnlp_port": config["jnlp-port"]} templating.render( "jenkins-config.xml", paths.CONFIG_FILE, context, owner="jenkins", group="nogroup") hookenv.open_port(PORT) # if we're using a set JNLP port, open it if config["jnlp-port"] > 0: hookenv.open_port(config["jnlp-port"]) return True
def configure_openvswitch(self, odl_ovsdb): hookenv.log("Configuring OpenvSwitch with ODL OVSDB controller: %s" % odl_ovsdb.connection_string()) local_ip = ch_ip.get_address_in_network( self.config.get('os-data-network'), hookenv.unit_private_ip()) ovs.set_config('local_ip', local_ip) ovs.set_config('controller-ips', odl_ovsdb.private_address(), table='external_ids') ovs.set_config('host-id', socket.gethostname(), table='external_ids') ovs.set_manager(odl_ovsdb.connection_string()) hookenv.status_set('active', 'Unit is ready')
def unconfigure_openvswitch(self, odl_ovsdb): hookenv.log("Unconfiguring OpenvSwitch") subprocess.check_call(['ovs-vsctl', 'del-manager']) bridges = subprocess.check_output(['ovs-vsctl', 'list-br']).split() for bridge in bridges: subprocess.check_call(['ovs-vsctl', 'del-controller', bridge]) hookenv.status_set( 'waiting', 'Open vSwitch not configured with an ODL OVSDB controller')
def _add_dnsha_config(self, hacluster): """Add a DNSHA object to self.resources @param hacluster instance of interface class HAClusterRequires """ if not self.config.get(DNSHA_KEY): return settings = ['os-admin-hostname', 'os-internal-hostname', 'os-public-hostname', 'os-access-hostname'] for setting in settings: hostname = self.config.get(setting) if hostname is None: hookenv.log( 'DNS HA: Hostname setting {} is None. Ignoring.'.format( setting), hookenv.DEBUG) continue m = re.search('os-(.+?)-hostname', setting) if m: endpoint_type = m.group(1) # resolve_address's ADDRESS_MAP uses 'int' not 'internal' if endpoint_type == 'internal': endpoint_type = 'int' else: msg = ( 'Unexpected DNS hostname setting: {}. Cannot determine ' 'endpoint_type name'.format(setting)) hookenv.status_set('blocked', msg) raise os_ha.DNSHAException(msg) ip = os_ip.resolve_address( endpoint_type=endpoint_type, override=False) hacluster.add_dnsha(self.name, ip, hostname, endpoint_type)
def upgrade_if_available(self, interfaces_list): """Upgrade OpenStack if an upgrade is available :param interfaces_list: List of instances of interface classes :returns: None """ if self.openstack_upgrade_available(self.release_pkg): hookenv.status_set('maintenance', 'Running openstack upgrade') self.do_openstack_pkg_upgrade() self.do_openstack_upgrade_config_render(interfaces_list) self.do_openstack_upgrade_db_migration()
def _assess_status(self): """Assess the status of the unit and set the status and a useful message as appropriate. The 3 checks are: 1. Check if the unit has been paused (using os_utils.is_unit_paused_set(). 2. Do a custom_assess_status_check() check. 3. Check if the interfaces are all present (using the states that are set by each interface as it comes 'live'. 4. Check that services that should be running are running. Each sub-function determins what checks are taking place. If custom assess_status() functionality is required then the derived class should override any of the 4 check functions to alter the behaviour as required. Note that if ports are NOT to be checked, then the derived class should override :meth:`ports_to_check()` and return an empty list. SIDE EFFECT: this function calls status_set(state, message) to set the workload status in juju. """ # set the application version when we set the status (always) # NOTE(tinwood) this is not, strictly speaking, good code organisation, # as the 'application_version' property is in the classes.py file. # However, as this is ALWAYS a mixin on that class, we can get away # with this. hookenv.application_version_set(self.application_version) for f in [self.check_if_paused, self.custom_assess_status_check, self.check_interfaces, self.check_services_running]: state, message = f() if state is not None: hookenv.status_set(state, message) return # No state was particularly set, so assume the unit is active hookenv.status_set('active', 'Unit is ready')
def install(self): """Perform the normal charm install, and then kick off setting up the barbican_token in the softhsm2 token store. """ super(BarbicanSoftHSMCharm, self).install() # now add the barbican user to the softhsm group so that the # barbican-worker can access the softhsm2.conf file. ch_core_host.add_user_to_group('barbican', 'softhsm') self.setup_token_store() hookenv.status_set( 'waiting', 'Charm installed and token store configured')
def on_hsm_connected(self, hsm): """Called when the hsm interface becomes connected. This means the plugin has connected to the principal Barbican charm. In order for the Barbican charm to use this plugin (softhsm2) the plugin needs to provide a PKCS#11 libary for barbican to access, a password to access the token and a slot_id for the token. This sets the plugin_data on the hsm relation for the Barbican charm to pick up. :param hsm: a BarbicanProvides instance for the relation. :raises RuntimeError: if the token_store can't be setup - which is FATAL. """ hookenv.log("Setting plugin name to softhsm2", level=hookenv.DEBUG) hsm.set_name('softhsm2') pin, so_pin = read_pins_from_store() if pin is None: self.setup_token_store() pin, so_pin = read_pins_from_store() if pin is None: hookenv.status_set('error', "Couldn't set up the token store?") raise RuntimeError( "BarbicanSoftHSMCharm.setup_token_store() failed?") slot_id = read_slot_id(BARBICAN_TOKEN_LABEL) if slot_id is None: raise RuntimeError("No {} slot in token store?" .format(BARBICAN_TOKEN_LABEL)) plugin_data = { "library_path": SOFTHSM2_LIB_PATH, "login": pin, "slot_id": slot_id } hsm.set_plugin_data(plugin_data)
def test_status_set_invalid_state(self): self.assertRaises(ValueError, hookenv.status_set, 'random', 'message')
def test_status(self, call): call.return_value = 0 hookenv.status_set('active', 'Everything is Awesome!') call.assert_called_with(['status-set', 'active', 'Everything is Awesome!'])
def test_status_enoent(self, log, call): call.side_effect = OSError(2, 'fail') hookenv.status_set('active', 'Everything is Awesome!') log.assert_called_with('status-set failed: active Everything is Awesome!', level='INFO')
def test_status_statuscmd_fail(self, log, call): call.side_effect = OSError(3, 'fail') self.assertRaises(OSError, hookenv.status_set, 'active', 'msg') call.assert_called_with(['status-set', 'active', 'msg'])
def prepare_bigtop_config(self, hr_conf, NN=None, RM=None, extra=None): ''' NN: fqdn of the namenode (head node) RM: fqdn of the resourcemanager (optional) extra: list of extra cluster components ''' # TODO storage dirs should be configurable # TODO list of cluster components should be configurable cluster_components = ['hadoop'] # Setting NN (our head node) is required; exit and log if we dont have it if NN is None: hookenv.log("No NN hostname given for install") hookenv.status_set("waiting", "Cannot install without NN") sys.exit(1) else: nn_fqdn = NN hookenv.log("Using %s as our hadoop_head_node" % nn_fqdn) # If we have an RM, add 'yarn' to the installed components if RM is None: rm_fqdn = '' hookenv.log("No RM hostname given for install") else: rm_fqdn = RM cluster_components.append('yarn') # Add anything else the user wanted if extra is not None: cluster_components.extend(extra) java_package_name = self.options.get('java_package_name') bigtop_apt = self.options.get('bigtop_repo-{}'.format(utils.cpu_arch())) yaml_data = { 'bigtop::hadoop_head_node': nn_fqdn, 'hadoop::common_yarn::hadoop_rm_host': rm_fqdn, 'hadoop::hadoop_storage_dirs': ['/data/1', '/data/2'], 'hadoop_cluster_node::cluster_components': cluster_components, 'bigtop::jdk_package_name': '{0}'.format(java_package_name), 'bigtop::bigtop_repo_uri': '{0}'.format(bigtop_apt), } Path(hr_conf).dirname().makedirs_p() with open(hr_conf, 'w+') as fd: yaml.dump(yaml_data, fd)