我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用syslog.syslog()。
def publish(self, host, data): code = 200 if self.isBST(data): success, sdata = BSTToMonasca().serialize(host, data) elif self.isPT(data): success, sdata = PTToMonasca().serialize(host, data) elif self.isBHD(data): success, sdata = BHDToMonasca().serialize(host, data) else: success = False if success: sdata = json.loads(sdata) for x in sdata: syslog.syslog(json.dumps(x)) else: code = 500 return code
def _post( self, msg ): if msg.type != 'sysex': syslog.syslog( "Err: Saw msg type: " + msg.type ) self.receive_cond.acquire() curr_addr = msg.data[7:11] self.addr.append( curr_addr ) curr_data = msg.data[11:-1] self.data.append( curr_data ) # Signal the consumer if we've reached the expected number of messages. self.chunk_count += 1 if self.chunk_count == self.target_count: self.chunk_count = 0 self.receive_cond.notify() self.receive_cond.release() # Concatenate caller's prefix and message, add checksum and send # as sysex message. Handles both store and query commands.
def query_sysex_data( self, addr, len ): self.receive_cond.acquire() self.data = [] self.addr = [] msg = list( addr ) msg.extend( Katana.encode_scalar(len) ) self.target_count = (len // 241) + 1 self._send( QUERY_PREFIX, msg ) result = self.receive_cond.wait(5) if not result: syslog.syslog( "Error: Timeout on cond wait" ) self.receive_cond.release() return self.addr, self.data # Request sysex data (possibly requiring multiple chunks) by # passing first and last address of desired range. It is the # caller's responsibility to ensure the total response does not # span address discontinuities. If that occurs the chunk count is # likely to be over-estimated and the operation will timeout.
def query_sysex_range( self, first_addr, last_addr ): self.receive_cond.acquire() span = Katana.decode_array(last_addr) - Katana.decode_array(first_addr) offset = Katana.encode_scalar( span + 1 ) self.data = [] self.addr = [] msg = list( first_addr ) msg.extend( offset ) # Calculate expected number of chunks. Maximum chunk is 255 bytes # with max payload of 241 data bytes/ self.target_count = ((span + 1) // 241) + 1 self._send( QUERY_PREFIX, msg ) result = self.receive_cond.wait(5) if not result: syslog.syslog( "Error: Timeout on cond wait" ) self.receive_cond.release() return self.addr, self.data # Bias 4-byte sysex array by scalar value
def search_gssdp_service(self, service): if self.verbose: print("searching") command = 'gssdp-discover -n 3 | grep -A 1 ' + service try: process = subprocess.Popen(command, shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) except Exception as e: print("search_gssdp_service: command failed:" + str(e)) syslog.syslog("command failed:" + str(e)) lines = b"" while True: nextline = process.stdout.readline() if len(nextline) == 0 and process.poll() != None: break lines += nextline self.process_batch(lines, True) exitCode = process.returncode if self.verbose: print("searching done") return exitCode
def handle(self, *args, **options): """ Always try to grab the celerybeat lock, and if you get it start celerybeat. If not, stop any instances of celerybeat. Every instance of this script has a unique ID stored in memory for the duration of the process. If the process dies, it'll get a new UUID. While it's down, another process may pick up the lock and start its own celerybeat instance. We assume there is one instance of this script and one instance of celerybeat (both running inside supervisor) on each machine in the cluster. """ my_id = str(uuid.uuid4()) environment = os.getenv("CELERY_DEFAULT_QUEUE", "none") lock_name = '%s:celerybeat' % environment while True: if Lock.grab(lock_name, my_id, ttl=300): envoy.run('sudo supervisorctl start celerybeat') syslog.syslog("I have the lock: %s -> %s" % (lock_name, my_id)) else: syslog.syslog("Can't grab lock: %s -> %s" % (lock_name, my_id)) envoy.run('sudo supervisorctl stop celerybeat') time.sleep(240)
def __add_ctype_to_index(self, ctype): """Take a file type, list all the files there, and add all the body contents to the index.""" # Make sure BaseFiles is populated opendir(self.config, ctype) card_root = GlobalConfig.get("paths", "data_root") + "/private" card_path = card_root + "/" + self.config.get("paths", ctype) for filename in BaseFiles[ctype]: try: fnmtime = int(os.path.getmtime(card_path + "/" + filename)) except os.error: syslog.syslog("Failed to get \"" + filename + "\" mtime for indexing.") return # File has been removed, nothing to index lastmtime = '' try: lastmtime = int(float(self.searcher.document(file=unicode(filename))['mtime'])) except index.IndexError: lastmtime = 0 # File hasn't been indexed # If small revisions were made after the fact, the indexes won't # be accurate unless we reindex this file now if lastmtime < fnmtime: self.__add_file_to_index(fnmtime, filename, ctype)
def out_of_content(self, card_count): """ Count the number of cards that are part of our page counting. If we've already displayed this number of cards, we are out of content. """ card_limit = 0 for application in self.config.get("applications", "enabled").replace(" ", "").split(","): app_state = getattr(self, application) for ctype in app_state.config.get("card_properties", "pagecount").replace(" ", "").split(","): if ctype in app_state.config.get("card_properties", "randomize"): card_limit += self.page * app_state.config.getint("card_counts", ctype) else: card_limit = card_limit + getattr(app_state, ctype).file_count syslog.syslog("card_limit: " + str(card_limit) + " card_count: " + str(card_count)) if card_count >= card_limit: return True else: return False
def __age_keypair(self, source_id, dest_id): """ Migrate a signing key and an encryption key from the source_id slot into the dest_id slot. Afterwards, the source slot gets a new keypair. """ for key_type in ["sign", "encrypt"]: section = key_type + "_" + source_id if not self.config.has_section(section): self.__create_slotpair(source_id) # If keypair is malformed, just make a new one # Then there will be no need to age it if self.config.get(section, "date") == '': self.__regen_keypair(source_id) break else: keydate = self.config.getint(section, "date") # syslog.syslog("age id %s keydate: %s expiry: %s" # % (source_id, str(keydate), str(keydate + self.lifetime))) if self.time > (keydate + self.sunset): syslog.syslog("aging keyid " + source_id + " into " + dest_id) self.__age_key(key_type, source_id, dest_id)
def __regen_keypair(self, key_id): """ If the datestamps on either member of a keypair have expired, or if the date parameter in the slot is un-set for either of the pair, generate new keys for both slots. """ for key_type in ["sign", "encrypt"]: section = key_type + "_" + key_id if not self.config.has_section(section): self.__create_slotpair(key_id) if self.config.get(section, "date") == '': self.__write_keypair(key_id) break else: keydate = self.config.getint(section, "date") # syslog.syslog("regen id %s keydate: %s expiry: %s" # % (key_id, str(keydate), str(keydate + self.lifetime))) if self.time > (keydate + self.lifetime): syslog.syslog("regen keyid " + key_id) self.__write_keypair(key_id) break
def checkAgents(agents): msg = "" for key, value in agents.iteritems(): print (key, str(value['status'])) if (str(value['status']) == STATUS_ERROR): errMsg = "Unable to reach agent " + str(key) + " at " + str(value['host']) syslog.syslog(syslog.LOG_ERR, errMsg) msg = msg + errMsg + "\n" if (msg != ""): print "Errors found in Flume Agents" print msg sendEmail(msg) return ################################################################################################## # Function to check whether a flume agent can be contacted using the json rest api ##################################################################################################
def log(msg, level='INFO', error=False): """Logging facility setup. args: msg (str): The message to log. level (str): The priority level for the message. (Default: INFO) See :mod:`syslog` for more options. error (bool): Flag if this is an error condition. """ if error: level = "ERR" print "ERROR: {0} ({1}) {2}".format(os.path.basename(sys.argv[0]), level, msg) if DEBUG: # Print to console print "{0} ({1}) {2}".format(os.path.basename(sys.argv[0]), level, msg) else: if level == 'DEBUG': # Don't send DEBUG messages unless --debug was also set. return priority = ''.join(["syslog.LOG_", level]) syslog.syslog(eval(priority), msg)
def notify(msg, level='INFO', error=False, uptime=0, out=sys.stdout): """Manage notifications args: msg (str): The message to log. level (str): The priority level for the message. (Default: INFO) See :mod:`syslog` for more options. error (bool): Flag if this is an error condition. """ if SYSLOG: log(msg, level, error) if SNMP: send_trap(SNMP_SETTINGS, msg, uptime=uptime, test=False) if out != sys.stdout: out.write(msg)
def send_mail(json_string): # Extract sender and subject json_blob = json.loads(json_string) sender = json_blob['headers']['From'] sender = re.sub('^.*\<', '', sender) EMAIL_TO = re.sub('\>.*$', '', sender) if BB_DEBUG: syslog.syslog(syslog.LOG_ERR, 'Invoked send_mail(json_string) for '+EMAIL_TO) subj = common_functions.extract_subject(json_blob['headers']) if BB_DEBUG: syslog.syslog(syslog.LOG_ERR, 'Invoked send_mail(json_string) subject '+subj) #SUBJECT = 'Extracted IOCs for: '+subj.decode("utf-8", "ignore") SUBJECT = 'Extracted IOCs for: '+str(codecs.utf_8_decode(subj.encode('utf8'))[0]) if BB_DEBUG: syslog.syslog(syslog.LOG_ERR, 'Invoked send_mail(json_string) subject '+SUBJECT) msg = MIMEText(json2string(json_string), _charset='utf-8') msg['Subject'] = SUBJECT msg['From'] = EMAIL_FROM msg['To'] = EMAIL_TO if BB_DEBUG: syslog.syslog(syslog.LOG_ERR, 'Invoked send_mail(json_string) msg composed ') server = smtplib.SMTP(EMAIL_SERVER) server.sendmail(EMAIL_FROM, EMAIL_TO, msg.as_string()) if BB_DEBUG: syslog.syslog(syslog.LOG_ERR, 'Finished')
def _NewHunt(self): """Construct and start new GRR hunt. Returns: str representing hunt ID. Raises: RuntimeError: if no items specified for collection. """ artifact_list = self.artifacts.split(',') if not artifact_list: raise RuntimeError('Artifacts must be specified for artifact collection') syslog.syslog('Artifacts to be collected: {0:s}'.format(self.artifacts)) hunt_name = 'ArtifactCollectorFlow' hunt_args = flows_pb2.ArtifactCollectorFlowArgs( artifact_list=artifact_list, use_tsk=self.use_tsk, ignore_interpolation_errors=True, apply_parsers=False,) return self._StartHunt(hunt_name, hunt_args)
def _NewHunt(self): """Construct and start new GRR hunt. Returns: str representing hunt ID. Raises: RuntimeError: if no items specified for collection. """ file_list = self.file_list.split(',') if not file_list: raise RuntimeError('File must be specified for hunts') syslog.syslog('Hunt to collect {0:d} items'.format(len(self.file_list))) self.console_out.VerboseOut( 'Files to be collected: {0:s}'.format(self.file_list)) hunt_name = 'FileFinder' hunt_action = flows_pb2.FileFinderAction( action_type=flows_pb2.FileFinderAction.DOWNLOAD,) hunt_args = flows_pb2.FileFinderArgs( paths=file_list, action=hunt_action,) return self._StartHunt(hunt_name, hunt_args)
def confirm_leave(self, owner, level): """ ??????owner,level???????facility,severity """ # ????? facility = syslog.LOG_USER severity = syslog.LOG_INFO # as??????syslog???? level_info = dict(INFO=syslog.LOG_INFO, WARN=syslog.LOG_WARNING, ERROR=syslog.LOG_ERR, DEBUG=syslog.LOG_DEBUG) if level in level_info.keys(): severity = level_info[level] # ???? if owner in ['ecms_troubleshoot']: severity = syslog.syslog.LOG_EMERG return facility, severity
def genLoopPackets(self): while True: raw = self._get_current() logdbg("raw data: %s" % raw) if raw: try: decoded = WH23xxStation.decode_weather_data(raw) logdbg("decoded data: %s" % decoded) if decoded: packet = self._data_to_packet(decoded) logdbg("packet: %s" % packet) yield packet except IndexError, e: logerr("decode failed: %s (%s)" % (e, _fmt(raw))) log_traceback(loglevel=syslog.LOG_DEBUG) time.sleep(self._poll_interval)
def __init__(self, program_name, facility=None): # Default values always get evaluated, for which reason we avoid # using 'syslog' directly, which may not be available. facility = facility if facility is not None else syslog.LOG_USER if not syslog: raise RuntimeError("Syslog not available on this platform") super(SyslogHandler, self).__init__() syslog.openlog(program_name, 0, facility)
def emit(self, record): priority = SYSLOG_MAP.get(record.levelname, 7) message = self.format(record) syslog.syslog(priority, message)
def _exec_rgw_admin(self, args): try: cmd_args = ['radosgw-admin'] if self.name is not None: cmd_args.append('--name') cmd_args.append(self.name) cmd_args.extend(args) out = subprocess.check_output(cmd_args) return json.loads(out.decode()) except Exception as e: syslog.syslog(syslog.LOG_ERR, str(e)) return {}
def main(): exit_status = 1 try: args = parse_args() # Make sure the exporter is only running once. lock_file = '/var/lock/{}.lock'.format(os.path.basename(sys.argv[0])) lock_fd = os.open(lock_file, os.O_CREAT) lock_success = False try: fcntl.flock(lock_fd, fcntl.LOCK_EX | fcntl.LOCK_NB) lock_success = True except IOError: msg = 'Failed to export metrics, another instance is running.' syslog.syslog(syslog.LOG_INFO, msg) sys.stderr.write(msg + '\n') if lock_success: # Create a new registry, otherwise unwanted default collectors are # added automatically. registry = prometheus_client.CollectorRegistry() # Register our own collector and write metrics to STDOUT. registry.register(CephRgwCollector(**vars(args))) sys.stdout.write(prometheus_client.generate_latest(registry)) sys.stdout.flush() # Unlock the lock file. fcntl.flock(lock_fd, fcntl.LOCK_UN) exit_status = 0 except Exception as e: syslog.syslog(syslog.LOG_ERR, str(e)) # Cleanup os.close(lock_fd) if lock_success: try: os.unlink(lock_file) except: pass sys.exit(exit_status)
def pam_sm_setcred( pamh, flags, argv ): """ pam_sm_setcred """ syslog.syslog( syslog.LOG_INFO, "Please note: pam_linotp does not support setcred" ) return pamh.PAM_CRED_UNAVAIL
def pam_sm_acct_mgmt( pamh, flags, argv ): """ pam_sm_acct_mgmt """ syslog.syslog( syslog.LOG_INFO, "Please note: pam_linotp does not support acct_mgmt" ) return pamh.PAM_SERVICE_ERR
def pam_sm_chauthtok( pamh, flags, argv ): """ pam_sm_chauthtok """ syslog.syslog( syslog.LOG_INFO, "Please note: pam_linotp does not support chauthtok" ) return pamh.PAM_SERVICE_ERR
def pam_sm_open_session( pamh, flags, argv ): """ pam_sm_open_session """ syslog.syslog( syslog.LOG_INFO, "Please note: pam_linotp does not support open_session" ) return pamh.PAM_SERVICE_ERR
def pam_sm_close_session( pamh, flags, argv ): """ pam_sm_close_session """ syslog.syslog( syslog.LOG_INFO, "Please note: pam_linotp does not support close_session" ) return pamh.PAM_SERVICE_ERR ##eof##########################################################################
def __init__(self): syslog.openlog(facility=syslog.LOG_DAEMON)
def info(self, message): syslog.syslog(syslog.LOG_INFO, message)
def error(self, message): syslog.syslog(syslog.LOG_ERR, message)
def close(self): syslog.syslog(syslog.LOG_INFO, 'Closing HSVerifyd') syslog.closelog()
def start(self): """Start the daemon.""" # Check for a pidfile to see if the daemon already runs try: with open(self.pidfile, 'r') as pidf: pid = int(pidf.read().strip()) except Exception: pid = None if pid: message = "pidfile {0} already exist. " + \ "Daemon already running?\n" sys.stderr.write(message.format(self.pidfile)) sys.exit(1) # Start the daemon self.daemonize() syslog.syslog(syslog.LOG_INFO, '{}: started'.format(os.path.basename(sys.argv[0]))) self.on_start() while True: try: self.run() except Exception as e: # pylint: disable=W0703 syslog.syslog(syslog.LOG_ERR, '{}: {!s}'.format(os.path.basename(sys.argv[0]), e)) time.sleep(2)
def stop(self, once=False): """Stop the daemon.""" # Get the pid from the pidfile try: with open(self.pidfile, 'r') as pidf: pid = int(pidf.read().strip()) except Exception: pid = None if not pid: message = "pidfile {0} does not exist. " + \ "Daemon not running?\n" sys.stderr.write(message.format(self.pidfile)) return # not an error in a restart # Try killing the daemon process try: for x in xrange(0, 10): # Waits max 1s os.kill(pid, signal.SIGTERM) if once: break for x in xrange(50): os.kill(pid, 0) time.sleep(0.1) time.sleep(0.1) os.kill(pid, signal.SIGKILL) except OSError as err: e = str(err.args) if e.find("No such process") > 0: if os.path.exists(self.pidfile): os.remove(self.pidfile) else: print(str(err.args)) sys.exit(1) syslog.syslog(syslog.LOG_INFO, '{}: stopped'.format(os.path.basename(sys.argv[0])))
def __init__(self, name, facility=syslog.LOG_DAEMON): logging.Handler.__init__(self) syslog.openlog(name, syslog.LOG_PID, facility)
def close(self): syslog.closelog() logging.Handler.close(self)
def emit(self, record): priority = self.priority_map.get(record.levelno, syslog.LOG_INFO) message = self.format(record) if isinstance(message, unicode): message = message.encode('UTF-8') for line in message.rstrip().split('\n'): syslog.syslog(priority, line) # noinspection PyMethodMayBeStatic
def start_syslog(name='python-app', facility=syslog.LOG_DAEMON, capture_stdout=IfNotInteractive, capture_stderr=IfNotInteractive): if syslog is Null: raise RuntimeError("syslog is not available on this platform") for handler in root_logger.handlers[:]: root_logger.removeHandler(handler) handler = SyslogHandler(name, facility) handler.setFormatter(_default_formatter) root_logger.addHandler(handler) if capture_stdout: sys.stdout = StandardIOLogger(root_logger.info) if capture_stderr: sys.stderr = StandardIOLogger(root_logger.error)
def __init__(self): syslog.openlog(ident="BroadView")
def __del__(self): syslog.closelog()
def __repr__(self): return "BroadView syslog Publisher"
def logmsg(level, msg): syslog.syslog(level, 'restx: Influx: %s' % msg)
def logdbg(msg): logmsg(syslog.LOG_DEBUG, msg)
def loginf(msg): logmsg(syslog.LOG_INFO, msg)
def logerr(msg): logmsg(syslog.LOG_ERR, msg) # some unit labels are rather lengthy. this reduces them to something shorter.