我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用socket.gethostname()。
def is_crm_leader(resource, retry=False): """ Returns True if the charm calling this is the elected corosync leader, as returned by calling the external "crm" command. We allow this operation to be retried to avoid the possibility of getting a false negative. See LP #1396246 for more info. """ if resource == DC_RESOURCE_NAME: return is_crm_dc() cmd = ['crm', 'resource', 'show', resource] try: status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) if not isinstance(status, six.text_type): status = six.text_type(status, "utf-8") except subprocess.CalledProcessError: status = None if status and get_unit_hostname() in status: return True if status and "resource %s is NOT running" % (resource) in status: raise CRMResourceNotFound("CRM resource %s not found" % (resource)) return False
def get_iphostname(): '''??linux?????????IP??''' def get_ip(ifname): sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ipaddr = socket.inet_ntoa(fcntl.ioctl( sock.fileno(), 0x8915, # SIOCGIFADDR struct.pack('256s', ifname[:15]) )[20:24] ) sock.close() return ipaddr try: ip = get_ip('eth0') except IOError: ip = get_ip('eno1') hostname = socket.gethostname() return {'hostname': hostname, 'ip':ip}
def get_gw_interfaces(): ''' Gateway node can have multiple interfaces. This function parses json provided in config to get all gateway interfaces for this node. ''' node_interfaces = [] try: all_interfaces = json.loads(config('external-interfaces')) except ValueError: raise ValueError("Invalid json provided for gateway interfaces") hostname = get_unit_hostname() if hostname in all_interfaces: node_interfaces = all_interfaces[hostname].split(',') elif 'DEFAULT' in all_interfaces: node_interfaces = all_interfaces['DEFAULT'].split(',') for interface in node_interfaces: if not interface_exists(interface): log('Provided gateway interface %s does not exist' % interface) raise ValueError('Provided gateway interface does not exist') return node_interfaces
def is_crm_dc(): """ Determine leadership by querying the pacemaker Designated Controller """ cmd = ['crm', 'status'] try: status = subprocess.check_output(cmd, stderr=subprocess.STDOUT) if not isinstance(status, six.text_type): status = six.text_type(status, "utf-8") except subprocess.CalledProcessError as ex: raise CRMDCNotFound(str(ex)) current_dc = '' for line in status.split('\n'): if line.startswith('Current DC'): # Current DC: juju-lytrusty-machine-2 (168108163) - partition with quorum current_dc = line.split(':')[1].split()[0] if current_dc == get_unit_hostname(): return True elif current_dc == 'NONE': raise CRMDCNotFound('Current DC: NONE') return False
def check(request): return { 'hostname': socket.gethostname(), 'ips': ips, 'cpus': psutil.cpu_count(), 'uptime': timesince(datetime.fromtimestamp(psutil.boot_time())), 'memory': { 'total': filesizeformat(psutil.virtual_memory().total), 'available': filesizeformat(psutil.virtual_memory().available), 'used': filesizeformat(psutil.virtual_memory().used), 'free': filesizeformat(psutil.virtual_memory().free), 'percent': psutil.virtual_memory().percent }, 'swap': { 'total': filesizeformat(psutil.swap_memory().total), 'used': filesizeformat(psutil.swap_memory().used), 'free': filesizeformat(psutil.swap_memory().free), 'percent': psutil.swap_memory().percent } }
def attach(self, streamData=None, name=None): """ streamData: type BULKIO.SDDSStreamDefinition name: user id (string) The return value is the attachment id (use this to detach) If there exists more than one connection, then the return value is a list of all attachment id's generated """ if streamData == None: streamData = createSDDSStreamDefinition() if name == None: name = _socket.gethostname()+'_user' if not isinstance(streamData, _BULKIO.SDDSStreamDefinition): raise Exception("streamData must be of type BULKIO.SDDSStreamDefinition") if not isinstance(name, str): raise Exception("name must be of <type 'str'>") retval = self._src.attach(streamData, name) if retval: self._streamdefs[name] = streamData return retval
def getStreamDef( self, name=None, hostip=None, pkts=1000, block=True, returnSddsAnalyzer=True): # grab data if stream definition is available sdef =None aid=name if not aid: if len(self._streamdefs) == 0: raise Exception("No attachment have been made, use grabData or call attach") aid = self._streamdefs.keys()[0] print "Defaults to first entry, attach id = ", aid sdef = self._streamdefs[aid] else: sdef = sefl._streamdefs[aid] if not sdef: raise Exception("No SDDS stream definition for attach id:" + aid ) if not hostip: hostip = _socket.gethostbyname(_socket.gethostname()) return self.getData( sdef.multicastAddress, hostip, sdef.port, packets, block=block, returnSDDSAnalyzer=returnSDDSAnalyzer)
def check_open_fh(): _, hard = resource.getrlimit(resource.RLIMIT_NOFILE) if 0 <= hard and hard < tk_constants.MIN_PROCESS_NOFILE: return False, "On machine: %s, process open file handle hard limit (%d) is less than %d. Please run 'ulimit -n %d' before restarting the pipeline." % ( socket.gethostname(), hard, tk_constants.MIN_PROCESS_NOFILE, tk_constants.MIN_PROCESS_NOFILE) if not os.path.exists(tk_constants.GLOBAL_NOFILE_PATH): return False, "On machine: %s, %s does not exist." % (socket.gethostname(), tk_constants.GLOBAL_NOFILE_PATH) with open(tk_constants.GLOBAL_NOFILE_PATH) as f: glob_str = f.read().strip() if not glob_str.isdigit(): return False, "On machine: %s, %s contains a non-integer global open file handle limit: %s." % ( socket.gethostname(), tk_constants.GLOBAL_NOFILE_PATH, glob_str) glob = int(glob_str) if glob < tk_constants.MIN_GLOBAL_NOFILE: return False, "On machine: %s, global open file handle limit (%d) is less than %d. Please set the global file handle limit to %d before restarting the pipeline." % ( socket.gethostname(), glob, tk_constants.MIN_GLOBAL_NOFILE, tk_constants.MIN_GLOBAL_NOFILE) return True, None
def check_specs(args): hostname = socket.gethostname() specs = args.specs if not specs: martian.exit("Cannot create samplesheet with empty specs.") for spec in specs: check_spec(spec) if len(specs) > 1 and any([spec.get('csv') is not None for spec in specs]): martian.exit("Cannot combine specs for CSV plus additional entries") # check for samplesheet csv_specs = [spec for spec in specs if spec.get('csv')] if csv_specs: csv_spec = csv_specs[0] csv_path = csv_spec['csv'] tk_preflight.check_file("samplesheet", csv_path, hostname) is_iem = tk_sheet.file_is_iem_samplesheet(csv_path) is_csv = tk_sheet.file_is_simple_samplesheet(csv_path) if not (is_iem or is_csv): martian.exit("Formatting error in sample sheet: %s" % csv_path)
def _node(default=''): """ Helper to determine the node name of this machine. """ try: import socket except ImportError: # No sockets... return default try: return socket.gethostname() except socket.error: # Still not working... return default # os.path.abspath is new in Python 1.5.2:
def __init__(self, log_dir=None, comment=''): """ Args: log_dir (string): save location, default is: runs/**CURRENT_DATETIME_HOSTNAME**, which changes after each run. Use hierarchical folder structure to compare between runs easily. e.g. 'runs/exp1', 'runs/exp2' comment (string): comment that appends to the default log_dir """ if log_dir == None: import socket from datetime import datetime log_dir = os.path.join('runs', datetime.now().strftime('%b%d_%H-%M-%S') + '_' + socket.gethostname() + comment) self.file_writer = FileWriter(logdir=log_dir) v = 1E-12 buckets = [] neg_buckets = [] while v < 1E20: buckets.append(v) neg_buckets.append(-v) v *= 1.1 self.default_bins = neg_buckets[::-1] + [0] + buckets self.text_tags = [] # self.all_writers = {self.file_writer.get_logdir(): self.file_writer} self.scalar_dict = {} # {writer_id : [[timestamp, step, value],...],...}
def __init__(self, file_prefix): ''' Events files have a name of the form '/some/file/path/events.out.tfevents.[timestamp].[hostname]' ''' self._file_prefix = file_prefix + ".out.tfevents." \ + str(time.time())[:10] + "." + socket.gethostname() # Open(Create) the log file with the particular form of name. logging.basicConfig(filename=self._file_prefix) self._num_outstanding_events = 0 self._py_recordio_writer = RecordWriter(self._file_prefix) # Initialize an event instance. self._event = event_pb2.Event() self._event.wall_time = time.time() self.write_event(self._event)
def getipaddr(self, ifname='eth0'): import socket import struct ret = '127.0.0.1' try: ret = socket.gethostbyname(socket.getfqdn(socket.gethostname())) except: pass if ret == '127.0.0.1': try: import fcntl s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) ret = socket.inet_ntoa(fcntl.ioctl(s.fileno(), 0x8915, struct.pack('256s', ifname[:15]))[20:24]) except: pass return ret
def __init__(self, addr, port, message, packet_no, username, hostname=None, command=None): self.addr = addr self.port = port # message must be end with \00 self.message = message.rstrip("\00")+"\00" # : is special character for ipmsg protocol so replace. self.message = self.message.replace(":",";") self.packet_no = packet_no self.username = username self.command = 0x0 if command: self.command = command self.hostname = gethostname() if hostname: self.hostname = hostname # TODO self.encode = "sjis" self.sub_encode = "cp932" # for manage limit dead self.born_time = None
def reset(self): """Reset all resolver configuration to the defaults.""" self.domain = \ dns.name.Name(dns.name.from_text(socket.gethostname())[1:]) if len(self.domain) == 0: self.domain = dns.name.root self.nameservers = [] self.nameserver_ports = {} self.port = 53 self.search = [] self.timeout = 2.0 self.lifetime = 30.0 self.keyring = None self.keyname = None self.keyalgorithm = dns.tsig.default_algorithm self.edns = -1 self.ednsflags = 0 self.payload = 0 self.cache = None self.flags = None self.retry_servfail = False self.rotate = False
def registerWorker(self): start_time = strftime("%Y-%m-%d %H:%M:%S %Z") hostname = check_output("hostname").rstrip() ip = gethostbyname(gethostname()).rstrip() self.cur.execute("insert into worker(worker_id, hostname, ip, time_start, time_finish) values (?,?,?,?,NULL);", (self.worker_id, hostname, ip, start_time) ) self.cur.execute("select * from desired_solution") for row in self.cur.fetchall(): key = str(row['des_solution_id']) self.des_solutions[key] = {'status': str(row['status']), 'des_solution_id': str(row['des_solution_id'])} self.cur.execute("select generated_solution_id from generated_solution") for row in self.cur.fetchall(): self.gen_solutions_id[str(row['generated_solution_id'])] = '1' return 0
def reset(self): """Reset all resolver configuration to the defaults.""" self.domain = \ dns.name.Name(dns.name.from_text(socket.gethostname())[1:]) if len(self.domain) == 0: self.domain = dns.name.root self.nameservers = [] self.search = [] self.port = 53 self.timeout = 2.0 self.lifetime = 30.0 self.keyring = None self.keyname = None self.keyalgorithm = dns.tsig.default_algorithm self.edns = -1 self.ednsflags = 0 self.payload = 0 self.cache = None self.flags = None self.retry_servfail = False
def get_main(): services = [] for service in config.sections(): service_status = get_service_action(service, 'status') if service_status['status'] == 'not-found': cls = 'active' elif service_status['status'] == 'inactive' or service_status['status'] == 'failed': cls = 'danger' elif service_status['status'] == 'active': cls = 'success' else: cls = 'warning' disabled_start = True if cls == 'active' or cls == 'success' else False disabled_stop = True if cls == 'active' or cls == 'danger' else False disabled_restart = True if cls == 'active' or cls == 'danger' else False services.append({'class': cls, 'disabled_start': disabled_start, 'disabled_stop': disabled_stop, 'disabled_restart': disabled_restart, 'title': config.get(service, 'title'), 'service': service}) return template('index', hostname=gethostname(), services=services)
def get_game(self): # if the movelist is positioned part way through the game then # we must redo all moves to get the full game redo_count = len(gv.jcchess.get_redolist()) for i in range(0, redo_count): gv.jcchess.redo_move() game = chess.pgn.Game.from_board(self.chessboard) # if we did any redo moves then undo them now to get things back # the way they were for i in range(0, redo_count): gv.jcchess.undo_move() game.headers["Event"] = "Computer Chess Game" game.headers["Site"] = socket.gethostname() game.headers["Date"] = datetime.strftime(datetime.now(), '%Y.%m.%d') game.headers["Round"] = "-" game.headers["White"] = gv.jcchess.get_player(WHITE) game.headers["Black"] = gv.jcchess.get_player(BLACK) return game
def on_change(): ''' called when there is a change in the list of IPs and ports for this backend ''' hostname = socket.gethostname() ip = get_ip() local_mongo = MongoClient(ip, connect=False) try: repl_status = local_mongo.admin.command('replSetGetStatus') is_mongo_primary = repl_status['myState'] == 1 # ref https://docs.mongodb.com/manual/reference/replica-states/ except Exception as e: log.error(e, 'unable to get primary status') return False if is_mongo_primary: return mongo_update_replset_config(local_mongo, ip) else: return True # ---------------------------------------------------------
def test_vir_event_filter(self, mock_utcnow, mock_libvirt_event_callback, mock_save_and_reraise_exception): current_time = timeutils.utcnow() mock_utcnow.return_value = current_time mock_libvirt_event_callback.return_value = None mock_save_and_reraise_exception.return_value = None obj = eventfilter.EventFilter() eventID = 0 eventType = 5 detail = 5 uuID = uuid.uuid4() obj.vir_event_filter(eventID, eventType, detail, uuID) mock_libvirt_event_callback.assert_called_once_with( evft.eventID_dic[eventID], evft.detail_dic[eventID][eventType][detail], uuID, ec.EventConstants.TYPE_VM, socket.gethostname(), current_time) mock_save_and_reraise_exception.assert_not_called()
def _make_event(self, process_name): hostname = socket.gethostname() current_time = timeutils.utcnow() event = { 'notification': { 'type': ec.EventConstants.TYPE_PROCESS, 'hostname': hostname, 'generated_time': current_time, 'payload': { 'event': ec.EventConstants.EVENT_STOPPED, 'process_name': process_name } } } return event
def distributed_transaction_commit(*instances): if not instances: return instances = enumerate(instances) thread_key = '%s.%s' % ( socket.gethostname(), threading.currentThread()) keys = ['%s.%i' % (thread_key, i) for (i, db) in instances] for (i, db) in instances: if not db._adapter.support_distributed_transaction(): raise SyntaxError( 'distributed transaction not suported by %s' % db._dbanme) try: for (i, db) in instances: db._adapter.prepare(keys[i]) except: for (i, db) in instances: db._adapter.rollback_prepared(keys[i]) raise RuntimeError('failure to commit distributed transaction') else: for (i, db) in instances: db._adapter.commit_prepared(keys[i]) return
def compute_joined(rid=None): # NOTE(james-page) in MAAS environments the actual hostname is a CNAME # record so won't get scanned based on private-address which is an IP # add the hostname configured locally to the relation. settings = { 'hostname': gethostname(), 'private-address': get_relation_ip( 'cloud-compute', cidr_network=config('os-internal-network')), } if migration_enabled(): auth_type = config('migration-auth-type') settings['migration_auth_type'] = auth_type if auth_type == 'ssh': settings['ssh_public_key'] = public_ssh_key() relation_set(relation_id=rid, **settings) if config('enable-resize'): settings['nova_ssh_public_key'] = public_ssh_key(user='nova') relation_set(relation_id=rid, **settings)
def reset(self): """Reset all resolver configuration to the defaults.""" self.domain = \ dns.name.Name(dns.name.from_text(socket.gethostname())[1:]) if len(self.domain) == 0: self.domain = dns.name.root self.nameservers = [] self.search = [] self.port = 53 self.timeout = 2.0 self.lifetime = 30.0 self.keyring = None self.keyname = None self.keyalgorithm = dns.tsig.default_algorithm self.edns = -1 self.ednsflags = 0 self.payload = 0 self.cache = None
def get_fabric_interface(): ''' Returns the fabric interface. ''' fabric_interfaces = config('fabric-interfaces') if fabric_interfaces == 'MANAGEMENT': return get_mgmt_interface() else: try: all_fabric_interfaces = json.loads(fabric_interfaces) except ValueError: raise ValueError('Invalid json provided for fabric interfaces') hostname = get_unit_hostname() if hostname in all_fabric_interfaces: node_fabric_interface = all_fabric_interfaces[hostname] elif 'DEFAULT' in all_fabric_interfaces: node_fabric_interface = all_fabric_interfaces['DEFAULT'] else: raise ValueError('No fabric interface provided for node') if interface_exists(node_fabric_interface): return node_fabric_interface else: log('Provided fabric interface %s does not exist' % node_fabric_interface) raise ValueError('Provided fabric interface does not exist') return node_fabric_interface
def run(self): self.pipeline = h_cni.CNIPipeline() self.pipeline.register(h_cni.CallbackHandler(self.on_done)) self.watcher = k_watcher.Watcher(self.pipeline) self.watcher.add( "%(base)s/pods?fieldSelector=spec.nodeName=%(node_name)s" % { 'base': k_const.K8S_API_BASE, 'node_name': socket.gethostname()}) self.watcher.start()
def __init__(self, path, threaded=True, timeout=None): """ >>> lock = LockBase('somefile') >>> lock = LockBase('somefile', threaded=False) """ super(LockBase, self).__init__(path) self.lock_file = os.path.abspath(path) + ".lock" self.hostname = socket.gethostname() self.pid = os.getpid() if threaded: t = threading.current_thread() # Thread objects in Python 2.4 and earlier do not have ident # attrs. Worm around that. ident = getattr(t, "ident", hash(t)) self.tname = "-%x" % (ident & 0xffffffff) else: self.tname = "" dirname = os.path.dirname(self.lock_file) # unique name is mostly about the current process, but must # also contain the path -- otherwise, two adjacent locked # files conflict (one file gets locked, creating lock-file and # unique file, the other one gets locked, creating lock-file # and overwriting the already existing lock-file, then one # gets unlocked, deleting both lock-file and unique file, # finally the last lock errors out upon releasing. self.unique_name = os.path.join(dirname, "%s%s.%s%s" % (self.hostname, self.tname, self.pid, hash(self.path))) self.timeout = timeout
def __init__(self, service, role_source, configfile=DEFAULT_CONFIGFILE): self.service = service self.role_source = role_source self.api_endpoint = 'http://127.0.0.1:8500/v1' self.api_session = requests.Session() self.hostname = gethostname() self.short_hostname = self.hostname.split('.')[0] self.update_service = False self.valid_states = ['master', 'slave', 'fail'] self.configfile = configfile self.leader_uri = self.api_endpoint + '/kv/session/' + self.service + '/leader'
def metadata(cwd, goos='', goarch=''): md = { 'commit_hash': subprocess.check_output('git rev-parse --verify HEAD', shell=True, cwd=cwd).strip(), 'git_branch': subprocess.check_output('git rev-parse --abbrev-ref HEAD', shell=True, cwd=cwd).strip(), # http://stackoverflow.com/a/1404862/3476121 'git_tag': subprocess.check_output('git describe --exact-match --abbrev=0 2>/dev/null || echo ""', shell=True, cwd=cwd).strip(), 'commit_timestamp': datetime.datetime.utcfromtimestamp( int(subprocess.check_output('git show -s --format=%ct', shell=True, cwd=cwd).strip())).isoformat(), 'build_timestamp': datetime.datetime.utcnow().isoformat(), 'build_host': socket.gethostname(), 'build_host_os': GOENV["GOHOSTOS"], 'build_host_arch': GOENV["GOHOSTARCH"] } if md['git_tag']: md['version'] = md['git_tag'] md['version_strategy'] = 'tag' elif not md['git_branch'] in ['master', 'HEAD'] and not md['git_branch'].startswith('release-'): md['version'] = md['git_branch'] md['version_strategy'] = 'branch' else: hash_ver = subprocess.check_output('git describe --tags --always --dirty', shell=True, cwd=cwd).strip() md['version'] = hash_ver md['version_strategy'] = 'commit_hash' if goos: md['os'] = goos if goarch: md['arch'] = goarch return md