我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用logger.Logger()。
def __init__(self, logFile, hostIDList): threading.Thread.__init__(self) self.threadCmdLock = threading.Lock() self.threadCmdQueue = [] self.threadCallbackQueue = {} self.threadCallbackLock = threading.Lock() self.sharedBufferArray = shared_buffer_array() self.log = logger.Logger(logFile, "core IPC Thread") self.hostList = hostIDList self.nPendingCallbacks = 0 self.controlCenterID = -1 self.hostIDtoPowerSimID = None self.powerSimIDtohostID = None self.transportLayer = None self.nHosts = len(self.hostList) self.init_shared_ipc_buffers()
def __init__(self, hostID, logFile,sharedBufferArray): threading.Thread.__init__(self) self.threadCmdLock = threading.Lock() self.threadCallbackQueue = {} self.threadCallbackLock = threading.Lock() self.nPendingCallbacks = 0 self.threadCmdQueue = [] self.hostID = hostID self.sharedBufferArray = sharedBufferArray self.log = logger.Logger(logFile, "Host " + str(hostID) + " IPC Thread") self.hostIDtoPowerSimID = None self.powerSimIDtohostID = None self.attackLayer = None self.raw_sock = socket.socket(socket.AF_INET, socket.SOCK_RAW, socket.IPPROTO_RAW) self.init_shared_ipc_buffer() # self.raw_sock.setsockopt(socket.IPPROTO_IP, socket.IP_HDRINCL, 1)
def __init__(self, cfgpath): if not os.path.exists(cfgpath): raise ValueError('config file does not exist') self.cfgpath = cfgpath self.log = Logger() # link inside content self.configs = {} # link inside content self.profiles = {} # not linked to content self.dotfiles = {} # not linked to content self.actions = {} # not linked to content self.prodots = {} if not self._load_file(): raise ValueError('config is not valid')
def parseIndicator(self, iocFile, iocFileName): indicator_to_return = [] # Read file try: xmldoc = minidom.parseString(iocFile) except Exception: logger = Logger() logger.info("Ignore IOC file {}".format(iocFile)) else: itemlist = xmldoc.getElementsByTagName('stix:STIX_Package') for item in itemlist: id = item.attributes['id'].value indicator = Indicator(id, self.getFormat()) indicator.title = self.getChildrenByTagName(item, 'stix:Title') indicator.evidences = self.__getChildrenEvidences__(item) indicator_to_return.append(indicator) return indicator_to_return
def checkEvidences(self, filePath): # Calculate the MD5 try: with open(filePath, 'rb') as file: file_data = file.read() hashValue = hashlib.md5(file_data).hexdigest() logger = Logger() logger.warn(("Hash file {}: {}").format( filePath, hashValue) ) if hashValue in self.evidences: logger = Logger() logger.warn("Hash md5 MATCH: %s" % filePath) evidence = self.evidences[hashValue] evidence.compromised = True evidence.proof.append(filePath) except Exception: traceback.print_exc() pass
def __init__(self, timeout,max_msg_count,msg,connect_num,dst_addr): import logger, tcp_client self.logger = logger.Logger() self.tcp_client = tcp_client.TcpClient(timeout, self.logger) self.tcp_client.set_app_data_callback(self.on_app_data) self.io_thread = threading.Thread(target=self.io_thread_func) self.io_thread.setDaemon(True) self.io_thread.start() self._max_msg_count=max_msg_count#????????? self._msg=msg self._connect_num=connect_num self._dst_addr=dst_addr self.start_time = 0#??????? self.end_time = 0 # ?????? self.recv_bytes=0#?????? self.recv_msg_count=0
def dispatch_remote_integrity_checker(args): """ Dispatch the main remote integrity tool :param args: Arguments passed to the script :return: None """ config = load_config(path=args.config) server = Server(config=config) server.connect() output = server.acquire_checksum_list() logger = Logger(config=config) integrity = Integrity(config=config) integrity.on_events_detected += logger.dispatch_syslog integrity.on_events_detected += logger.dispatch_events_mail integrity.on_events_detected += logger.dispatch_telegram_msg integrity.load_database() integrity.identify(output) integrity.print_statistics() database.commit()
def __init__(self, log=False): """ Keyword Arguments: log {object} -- object of logger (default: {False}) Raises: FileNotFoundError -- raised when UFW not installed """ self.log = log if log is False: from logger import Logger self.log = Logger() if not os.path.isfile('/usr/sbin/ufw'): # Detect if ufw installed print(avalon.FM.BD + avalon.FG.R + '\nWe have detected that you don\'t have UFW installed!' + avalon.FM.RST) print('UFW Firewall function requires UFW to run') if not self.sysInstallPackage("ufw"): avalon.error("ufw is required for this function. Exiting...") raise FileNotFoundError("File: \"/usr/sbin/ufw\" not found")
def __init__(self, interface, log=False): """ Arguments: interface {string} -- name of interface to handle log {object} -- object of logger (default: {False}) Raises: FileNotFoundError -- raised when arptables not installed """ self.log = log if log is False: from logger import Logger self.log = Logger() self.interface = interface installer = Installer() if not os.path.isfile('/usr/bin/arptables') and not os.path.isfile('/sbin/arptables'): # Detect if arptables installed print(avalon.FM.BD + avalon.FG.R + '\nWe have detected that you don\'t have arptables installed!' + avalon.FM.RST) print('SCUTUM requires arptables to run') if not installer.sysInstallPackage("arptables"): avalon.error("arptables is required for scutum. Exiting...") raise FileNotFoundError("File: \"/usr/bin/arptables\" and \"/sbin/arptables\" not found")
def test_config(self): instance = Logger() self.assertEqual(instance.config(request_id='request_id', original_job_id="original_job_id", job_id='job_id', artifact_revision_id='artifact_revision_id', pipeline_execution_id='pipeline_execution_id', pipeline_action='pipeline_action', stage_name='stage_name', pipeline_name='pipeline_name', loglevel='loglevel', botolevel='botolevel'), None) self.assertEqual(type(instance.log), logging.LoggerAdapter) self.assertEqual(logging.getLogger('boto3').level, 40) self.assertEqual(instance.log.logger.level, 20) self.assertEqual(instance.request_id, 'request_id') self.assertEqual(instance.original_job_id, 'original_job_id') self.assertEqual(instance.job_id, 'job_id') self.assertEqual(instance.pipeline_execution_id, 'pipeline_execution_id') self.assertEqual(instance.artifact_revision_id, 'artifact_revision_id') self.assertEqual(instance.pipeline_action, 'pipeline_action') self.assertEqual(instance.stage_name, 'stage_name')
def setUp(self): # Database settings. self.host = 'localhost' self.port = '3306' self.test_database_name = "TEST_CHECKSUM_MANAGER_DB" self.test_table_name = "TEST_TABLE" self.empty_table_name = "" self.bad_schema_table = "BAD_SCHEMA_TABLE" # Setup connection information that lets the tester # setup different states for testing. self.connect = mysql.connector.connect( user='root', passwd='', host=self.host, port=self.port) self.cursor = self.connect.cursor() self.db_connector = DBConnector(db_name=self.test_database_name) self.logger = Logger() self.delete_test_db()
def __init__(self): """ Initialisation of Parser """ self.parser = argparse.ArgumentParser() self.help_af = "To add a file to checksum tuple database, type: 'cmon -af <filename>'" self.help_rf = "To remove a file from checksum tuple database, type: 'cmon -rf <filename>'" self.help_lf = "To get all tuples from checksum tuple database, type: 'cmon -lf'" self.help_ar = "To add an email to recipient database, type: 'cmon -ar <email>'" self.help_rr = "To remove an email from recipient database, type: 'cmon -rr <email>'" self.help_lr = "To get list of recipients from recipient database, type: 'cmon -lr'" self.start = "To start the daemon, type: 'cmon --start'" self.stop = "To stop the daemon, type: 'cmon --stop'" self.status = "To get the status of daemon, type: 'cmon --status'" self.restart = "To restart the daemon, type: 'cmon --restart'" self.build_parser() self.logger = Logger()
def __init__(self, email_server="127.0.0.1", email_port=587, email_username="", email_pwd=""): """ Set up connection information and authentication tokens to allow user to access smtp server. :param email_server: IP Address of SMTP server for sending mail. :type email_server: string :param email_port: Port to use to send email :type email_port: int :param email_username: Authentication username for SMTP server. :type email_username: string :param email_pwd: Authentication username for SMTP server. :type email_pwd: string """ self.email_port = email_port self.email_server = email_server self.gmail_user = email_username self.gmail_pwd = email_pwd self.logger = Logger()
def __init__(self, population_size, vacc_percentage, virus_name, mortality_rate, basic_repro_num, initial_infected=1): self.population_size = population_size self.population = [] self.total_infected = 0 self.current_infected = 0 self.next_person_id = 0 self.virus_name = virus_name self.mortality_rate = mortality_rate self.basic_repro_num = basic_repro_num self.file_name = "{}_simulation_pop_{}_vp_{}_infected_{}.txt".format( virus_name, population_size, vacc_percentage, initial_infected) # TODO: Create a Logger object and bind it to self.logger. You should use this # logger object to log all events of any importance during the simulation. Don't forget # to call these logger methods in the corresponding parts of the simulation! self.logger = None # This attribute will be used to keep track of all the people that catch # the infection during a given time step. We'll store each newly infected # person's .ID attribute in here. At the end of each time step, we'll call # self._infect_newly_infected() and then reset .newly_infected back to an empty # list. self.newly_infected = [] # TODO: Call self._create_population() and pass in the correct parameters. # Store the array that this method will return in the self.population attribute.
def get_logger(): global log_instance if log_instance: return log_instance log_instance = Logger('log.txt') return log_instance
def __init__(self): self.logger = Logger() self.control = ImportaController(self.logger)
def __init__(self): global log log = logger.Logger() log.info('Sniffer is starting up') with open('Stalker.log', 'w') as file: file.write('') file.close() log.info('Wiped previous session')
def __init__(self,hostID,logFile,hostID_To_IP) : threading.Thread.__init__(self) self.threadCmdLock = threading.Lock() self.threadCmdQueue = [] self.hostID = hostID self.IPMap = hostID_To_IP self.hostIP,self.listenPort = self.IPMap[self.hostID] self.log = logger.Logger(logFile,"Host " + str(hostID) + " Network Layer Thread") self.hostIDtoPowerSimID = None self.powerSimIDtohostID = None self.attackLayer = None
def __init__(self,logFile,powerSimIP) : threading.Thread.__init__(self) self.threadCmdLock = threading.Lock() self.NetLayerRxLock = threading.Lock() self.NetLayerRxBuffer = [] self.threadCmdQueue = [] self.powerSimIP = powerSimIP self.log = logger.Logger(logFile,"core Network Layer Thread") self.transportLayer = None
def __init__(self,logFile,IPCLayer,NetworkServiceLayer) : threading.Thread.__init__(self) self.threadCmdLock = threading.Lock() self.threadCmdQueue = [] self.threadCallbackQueue = {} self.threadCallbackLock = threading.Lock() self.log = logger.Logger(logFile,"core Transport Layer Thread") self.IPCLayer = IPCLayer self.NetServiceLayer = NetworkServiceLayer self.nPendingCallbacks = 0 self.sendSocket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
def __init__(self, configs): self.c = configs #init roguebox self.rb = RogueBox(configs) self.ui = UIManager.init(self.c["userinterface"], self.rb) self.l = Logger(log_depth=self.c["verbose"], log_targets=["file", "ui"], ui=self.ui) self.ui.on_key_press(self._act_callback)
def __init__(self, configs): self.rb = RogueBox(configs) self._pending_action_timer = None self.ui = UIManager.init(configs["userinterface"], self.rb) self.l = Logger(log_depth=configs["verbose"], log_targets=["file", "ui"], ui=self.ui) self.ui.on_key_press(self._keypress_callback) self._timer_value = 100 self._pending_action_timer = self.ui.on_timer_end(self._timer_value, self._act_callback)
def __init__(self, configs): import history, models # class instances self.rogomatic = StalkOMatic(configs) self.model_manager = getattr(models, configs["model_manager"])(self.rogomatic) self.history_manager = getattr(history, configs["history_manager"])(self) # configs self.configs = configs self.configs["iteration"] = 1 self.configs["actions"] = self.rogomatic.get_actions() self.configs["actions_num"] = len(self.configs["actions"]) # gui stuff ui = None log_targets = [] if configs["logsonfile"]: log_targets.append("file") if self.configs["gui"]: self.ui = UIManager.init(configs["userinterface"], self.rogomatic) self._pending_action = None ui = self.ui log_targets.append("ui") self.l = Logger(log_depth=configs["verbose"], log_targets=log_targets, ui=ui) else: log_targets.append("terminal") self.l = Logger(log_depth=configs["verbose"], log_targets=log_targets) # state self.state = self.model_manager.reshape_initial_state(self.rogomatic.compute_state()) self.old_state = self.state self.last_pos = self.rogomatic.player_pos self.same_pos_count = 0 self.starting = False
def __init__(self, configs): import models, history # class instances self.rb = RogueBox(configs) self.model_manager = getattr(models, configs["model_manager"])(self.rb) self.history_manager = getattr(history, configs["history_manager"])(self) # configs self.configs = configs self.configs["iteration"] = 1 self.configs["actions"] = self.rb.get_actions() self.configs["actions_num"] = len(self.configs["actions"]) # gui stuff ui = None log_targets = [] if configs["logsonfile"]: log_targets.append("file") if self.configs["gui"]: self.ui = UIManager.init(configs["userinterface"], self.rb) self._pending_action = None ui = self.ui log_targets.append("ui") self.l = Logger(log_depth=configs["verbose"], log_targets=log_targets, ui=ui) else: log_targets.append("terminal") self.l = Logger(log_depth=configs["verbose"], log_targets=log_targets) # state self.state = self.model_manager.reshape_initial_state(self.rb.compute_state()) self.old_state = self.state # model self.model = self.model_manager.build_model() self.target_model = self.model_manager.build_model() self.target_model.set_weights(self.model.get_weights()) # resume from file # load weights, transitions history and parameters from assets, if any self._load_progress()
def __init__(self, pe_db_address=None, logger=None, pe_db_handler=None): self.logger = logger if self.logger is None: self.logger = Logger() self.pe_db_address = pe_db_address if self.pe_db_address is None: self.pe_db_address = "pedb.db" self.pe_db_handler = pe_db_handler self.pe_db = self.pe_connect_to_database(self.pe_db_address)
def __init__(self, path="/opt/tm/logs/", project_name=None): """Tasks manager constructor""" self.log = Logger(path, project_name) if not self._check_if_exists(): self.create_table("NotStarted", "WorkingOn", "Completed") # Use this queue to save the most recent new, modified, on process or # completed tasks. These tasks should be saved in tuples along with # their corresponding table. The length of this queue should not be # greater than 10 tasks. self.recent_tasks = TaskCache(10) # Lets the application know whether there are partial commits left to # push to remote repository. self.partials_exist = Partials(path)
def __init__(self): self.logger = Logger('validator.py') self.valid_cfg = VALIDATE_CONFIG self.result = [] self.proxies = {}
def __init__(self): self.proxylist = [] self.logger = Logger('crawler.py')
def __init__(self,PORT): self.port = PORT self.logger = Logger('server.py') self.run()
def __init__(self, base='.', create=True, backup=True, dry=False, safe=False, quiet=False, diff=True): self.create = create self.backup = backup self.dry = dry self.safe = safe self.base = base self.quiet = quiet self.diff = diff self.comparing = False self.log = Logger()
def __init__(self, key, action): self.key = key self.action = action self.log = Logger()
def recoverIOC(self, data_path, api_keys): logger = Logger() logger.info("DummyRecovery.recoverIOC")
def recoverIOC(self, data_path, api_keys): logger = Logger() logger.info("OTXRecovery.recoverIOC") # Record the Starting Time startTime = time.time() dataPath = data_path + "/" + Format.OPENIOC_10.value + "/" key = OTX_KEY if KEY_NAME in api_keys: key = api_keys[KEY_NAME] # Create data dir if not os.path.exists(dataPath): os.makedirs(dataPath) otx = OTXv2(key) pulses = otx.getall() logger.info("Download complete - %s events received" % len(pulses) ) # For each pulse get all ioc for pulse in pulses: n = json_normalize(pulse) url = OTX_GET_URL.format(n.id[0]) file_name = dataPath + n.id[0] + ".ioc" # HTTP Request headers = {'X-OTX-API-KEY': key, 'User-Agent': OTX_USR_AGT, "Content-Type": "application/json"} data = {} params = {'format': Format.OPENIOC_10.value} response = requests.post(url, params=params, data=json.dumps(data), headers=headers) with open(file_name, "wb") as code: code.write(response.content) logger.debug("Download OpenIOC ioc file: " + n.id[0] + " - " + n["name"][0] + " -> " + file_name) # Trace the end time and calculate the duration endTime = time.time() - startTime logger.info('OTXRecovery finished on: ' + str(endTime) + ' seconds')
def parseIndicator(self, iocFile, iocFileName): # Record the Starting Time startTime = time.time() indicator_to_return = [] # Read file try: xmldoc = minidom.parseString(iocFile) except Exception: logger = Logger() logger.info("Ignore IOC file {}".format(iocFile)) else: # Principal Indicator id = os.path.splitext(iocFileName)[0] parent_indicator = Indicator(id, self.getFormat()) description = self.getChildrenByTagName( xmldoc._get_firstChild(), "description") parent_indicator.description = description indicator_to_return.append(parent_indicator) children_indicators = [] itemlist = xmldoc._get_firstChild().getElementsByTagName("Indicator") for item in itemlist: children_indicator = Indicator(item.attributes['id'].value, self.getFormat()) children_indicator.operator = item.attributes['operator'].value children_indicator.evidences = self.__getChildrenEvidences__(item) children_indicator.parent = parent_indicator children_indicators.append(children_indicator) parent_indicator.children = children_indicators return indicator_to_return
def extractIndicatorOfCompromise(self): logger = Logger() logger.info('Start to extract indicator of compromise from data repository: {}'.format(self.data_path)) # Record the Starting Time startTime = time.time() indicators_to_return = [] formats = Format.getFormats() for format in formats: iocDB = IocHandler(self.data_path + "/" + format.value + "/") try: all_iocs = iocDB.get_all_ioc(format) except InvalidDataPath: logger.info("Ignore IOC format {}".format(format.value)) continue logger.info("Getting IOC files with format: " + format.value) parser = IOCParserFactory.createParser(format) for iocFileName in all_iocs: iocFile = iocDB.get_ioc_file(iocFileName) indicators = parser.parseIndicator(iocFile, iocFileName) indicators_to_return = indicators_to_return + indicators # Trace the end time and calculate the duration endTime = time.time() - startTime logger.info('Extract ({}) IOCs finished on: {} seconds'.format(indicators_to_return.__len__(), str(endTime))) return indicators_to_return
def checkEvidences(self, indicators): self.evidences = [] self.__getEvidences__(indicators, DnsHostScanner.getEvidenteType()) for evidence in self.evidences: host = evidence.value for entry_key in self.dns_entries.keys(): entry_value = self.dns_entries[entry_key] if host in entry_value: logger = Logger() logger.warn( "Host MATCH: %s" % host) evidence.compromised = True evidence.proof.append(entry_value) pass pass
def checkEvidences(self, filePath): filename = os.path.basename(filePath) if filename.lower() in self.evidences: logger = Logger() logger.warn( "File Name MATCH: %s" % filename) evidence = self.evidences[filename] evidence.compromised = True evidence.proof.append(filePath)
def enable_logging(self, logger_obj): if isinstance(logger_obj, Logger): self.logger = logger_obj
def __init__(self): self.sc = SlackClient(SLACK_TOKEN) self.logger = Logger() self.logger.log("Bot initialized")
def __init__(self): self.logger = Logger() self.logger.log("New instance of scraper created") browser = webdriver.PhantomJS('phantomjs') # browser = webdriver.Chrome('./chromedriver') browser.get(URL) self.browser = browser self.logger.log("Navigated to url") time.sleep(5)
def __init__(self): self.logger = Logger() self.db = sqlite3.connect('dmv_db') self.cur = self.db.cursor() self.cur.execute(''' CREATE table IF NOT EXISTS appointment(ts TEXT, location TEXT, appt_time TEXT); ''') self.db.commit()
def __init__(self): self.db = DB() self.logger = Logger() self.bot = Bot()
def __init__(self, num_actions, discount, exploration_prob, step_size, logging=True): self.actions = range(num_actions) self.discount = discount self.exploration_prob = exploration_prob self.step_size = step_size self.num_iters = 1 self.weights = collections.Counter() self.logger = logger.Logger(agent_name='QLearningAgent', logging=logging) self.prev_state = None self.prev_action = None
def test_log_epoch_empty_log(self): l = logger.Logger(agent_name='test') l.log_epoch(epoch=0) log_dir = l.log_dir self.assertTrue(os.path.isfile(os.path.join(log_dir, 'actions.npz'))) self.assertTrue(os.path.isfile(os.path.join(log_dir, 'rewards.npz'))) self.assertTrue(os.path.isfile(os.path.join(log_dir, 'losses.npz'))) shutil.rmtree(log_dir) # class TestMovingAverage(unittest.TestCase): # def test_moving_average_single_item_window(self): # arr = [1,2,3] # actual = logger.moving_average(arr, 1) # self.assertSequenceEqual(actual, arr) # def test_moving_average_small_window(self): # arr = [1,2,3,4,5,6,7] # actual = logger.moving_average(arr, 2) # expected = [0.5, 1.5, 2.5, 3.5, 4.5, 5.5, 6.5] # self.assertSequenceEqual(actual, expected) # def test_moving_average_small_window_large_variance(self): # arr = [0,9,0,9,0] # actual = logger.moving_average(arr, 3) # expected = [3, 3, 6, 3, 3] # self.assertSequenceEqual(actual, expected) # def test_moving_average_large_window_large_variance(self): # arr = [0,9,0,9,0] # actual = logger.moving_average(arr, 4) # expected = [2.25, 2.25, 4.5, 4.5, 2.25] # self.assertSequenceEqual(actual, expected)
def __init__(self): self.log = Logger(prog_dir) self.last_hwconfig = status.get("last_hwconfig") self.last_position = status.get("last_position") self.last_line_count = status.get("last_line_count") self.raft_multi = status.get("raft_multi")
def main(): if len(sys.argv) < 2: # GUI mode gui = GUI() gui.show_gui() else: parser = argparse.ArgumentParser() parser.add_argument("file", help="Path to g-code file to process") parser.add_argument("hw_config", help="Extruder/hotend configuration", choices=HW_CONFIGS) parser.add_argument("--debug", help="Show debug prints", action="store_true") parser.add_argument("--lines", help="Purge lines to print after filament change", type=int, default=LINE_COUNT_DEFAULT) parser.add_argument("--position", help="Purge tower position. Default Auto. Auto will try to find a position with enough free space for the tower", choices=TOWER_POSITIONS, default=AUTO) args = parser.parse_args() options = Settings() options.hw_config = args.hw_config options.purge_lines = args.lines options.tower_position = args.position log = Logger(prog_dir, gui=False, debug=args.debug) print_type = detect_file_type(args.file, log) pf = print_type(log, options) result_file = pf.process(args.file) log.info("New file saved: %s" % result_file)
def __init__(self,host_addr,timeout): import logger,tcp_server self.logger=logger.Logger() self.tcp_server=tcp_server.TcpServer(host_addr,timeout,self.logger) self.tcp_server.set_app_data_callback(self.on_app_data)
def __init__(self): import loop self._loop = loop.EventLoop(0.1, logger.Logger()) pass
def test_init(self, config): config.return_value = None instance = Logger() self.assertEqual(instance.job_id, None) self.assertEqual(instance.request_id, 'CONTAINER_INIT') self.assertEqual(instance.original_job_id, None) config.assert_called()