我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用multiprocessing.Process.__init__()。
def __init__(self, env, train_q, play_pipe, is_training): self.env = env self.input_size = self.env.state_n self.output_size = 12 #self.dis = 0.9 self.dis = 0.9 self.val = 0 self.save_path = "./save/save_model" self.max_episodes = 2000001 self.replay_buffer = deque() self.episode_buffer = train_q self.play_pipe = play_pipe self.MAX_BUFFER_SIZE = 20000 self.frame_action = 3 self.training = is_training
def __init__(self, model_identifier, seconds_to_wait=60, *args): """ :param timeout: number of seconds to sleep :param sleep_chunk: :param callback: :param args: """ Process.__init__(self) self.seconds_to_wait = seconds_to_wait self.function_args = args logging.info(str(args)) self.model_identifier = model_identifier # TODO questo comando non funziona perchè i sottoprocessi non possono chiamare questo metodo # signal.signal(signal.SIGUSR1, terminate)
def __init__(self,settings,Sim,stage): self.outFnames = [] self.params = [] self.sigmas = [] self.bestRedChiSqrs = [] self.avgAcceptRates = [] self.acceptStrs = [] self.settings = settings self.stgNsampDict = {'SA':'nSAsamp','ST':'nSTsamp','MC':'nSamples','MCMC':'nSamples','emcee':'nSamples'} self.Sim = Sim self.stage = stage if stage=="emcee": self.numProcs = 1 elif stage=='MCMC': self.numProcs = settings['nMCMCcns'] else: self.numProcs = settings['nChains'] self.retStr = '' self.latestRetStr = ''
def __init__(self, name, includes): self._name = name self._to_frame = "yumi_{0}".format(name) self._comm_timeout = YMC.COMM_TIMEOUT self._bufsize = YMC.BUFSIZE self._ip = YMC.IP self._time_offset = 0 self._qs = { 'cmd': {}, 'data': {} } self.includes = includes for name in self.includes: self._qs['cmd'][name] = Queue() self._qs['data'][name] = Queue(maxsize=1) self.msgs_map = { 'states': message_to_state, 'torques': message_to_torques, 'poses': message_to_pose }
def __init__(self, context): """ Publishes asynchronous messages :param context: general settings :type context: Context """ Process.__init__(self) self.daemon = True self.context = context self.fan = Queue() self.socket = None # allow socket injection for tests
def __init__(self, platform_index, device_index, ip, port): Process.__init__(self) Logger.__init__(self) # self.logger_level ^= Logger.MSG_VERBOSE self.daemon = True self.exit_evt = Event() self.running = Value('i', 0) self.platform_index = platform_index self.device_index = device_index self.ip = ip self.port = port self.uuid = uuid.uuid1().hex self.ocl_ga = None ## Terminate worker process, this should be only called when OpenCLGAClient # is shutting down. The exti_evt will be set to break the wait in the # process's run.
def __init__(self, in_queue, out_queue, conf, conf_lock): Process.__init__(self) self._in_queue = in_queue self._out_queue = out_queue self._stop = Event() self._stop.set() self._new_conf = Event() self._new_conf.clear() self._conf_lock = conf_lock self._conf = conf self._jpg_buffer = deque([]) self._client = None self._error_time = None
def __init__(self, pID, pName, pVM, pTarget, pDuration=60): """ Initialize the test :param pID: Used to identify the process :type pID: int :param pName: A unique name given to a proces :type pName: str :param pVM: The Genymotion AVD name and (optionally snapshot) to run the test on :type pVM: tuple :param pTarget: The path to the APK under test :type pTarget: str :param pDuration: The duration of the Droidutan test in seconds (default: 60s) :type pDuration: int """ Process.__init__(self, name=pName) self.processID = pID self.processName = pName self.processVM = pVM self.processTarget = pTarget self.processDuration = pDuration
def __init__(self, pID, pName, pVM, pTarget, pSt="", pDuration=60): """ Initialize the test :param pID: Used to identify the process :type pID: int :param pName: A unique name given to a proces :type pName: str :param pVM: The Genymotion AVD name to run the test on :type pVM: str :param pTarget: The path to the APK under test :type pTarget: str :param pSt: The snapshot of the AVD in case restoring is needed :type pSt: str :param pDuration: The duration of the Droidutan test in seconds (default: 60s) :type pDuration: int """ Process.__init__(self, name=pName) self.processID = pID self.processName = pName self.processVM = pVM self.processTarget = pTarget self.processSnapshot = pSt self.processDuration = pDuration
def __init__( self, node_id, provider_config, auth_config, cluster_name, file_mounts, init_cmds, runtime_hash, redirect_output=True, process_runner=subprocess): self.daemon = True self.process_runner = process_runner self.provider = get_node_provider(provider_config, cluster_name) self.ssh_private_key = auth_config["ssh_private_key"] self.ssh_user = auth_config["ssh_user"] self.ssh_ip = self.provider.external_ip(node_id) self.node_id = node_id self.file_mounts = file_mounts self.init_cmds = init_cmds self.runtime_hash = runtime_hash if redirect_output: self.logfile = tempfile.NamedTemporaryFile( mode="w", prefix="node-updater-", delete=False) self.output_name = self.logfile.name self.stdout = self.logfile self.stderr = self.logfile else: self.logfile = None self.output_name = "(console)" self.stdout = sys.stdout self.stderr = sys.stderr
def __init__(self, client_receiver_label, client_socket, ipv4_address, tcp_port, to_controller_queue, connection_pool): """ Constructor. :param client_receiver_label: A label to derive the concrete functionality of this client receiver :param client_socket: The socket from/to the affected the affected client :param ipv4_address: The IPv4 address of the client :param tcp_port: The TCP port of the client :param to_controller_queue: The queue which connects this client receiver with the responsible controller :param connection_pool: If the socket crashes, the connection will be removed in this connection pool """ Process.__init__(self) self.client_receiver_label = client_receiver_label self.client_socket = client_socket self.identifier = '%s:%d' % (ipv4_address, tcp_port) self.to_controller_queue = to_controller_queue self.connection_pool = connection_pool
def __init__(self,q): Process.__init__(self) self.q = q self.tmp = 0 self.Initialised = 0 self.sensor = mraa.I2c(0) self.sensor.address(TCS34725_ADDRESS) self.sensor.writeByte(TCS34725_COMMAND_BIT | TCS34725_ID) d = self.sensor.read(1) bdata = bytearray(d) d = bdata[0] if d!= 0x44 and d!= 0x10: self.Initialised = 0 else: self.Initialised = 1 self.sensor.writeByte(TCS34725_ATIME | TCS34725_INTEGRATIONTIME) self.sensor.writeByte(TCS34725_CONTROL | TCS34725_GAIN) self.sensor.writeByte(TCS34725_ENABLE | TCS34725_ENABLE_PON) time.sleep(0.01) self.sensor.writeByte(TCS34725_ENABLE | ( TCS34725_ENABLE_PON | TCS34725_ENABLE_AEN))
def __init__(self, q): Process.__init__(self) self.q = q self.u=mraa.Uart(0) self.u.setBaudRate(9600) self.u.setMode(8, mraa.UART_PARITY_NONE, 1) self.u.setFlowcontrol(False, False) self.cfpm1_0_avg = move_avge.move_avg(1) self.cfpm2_5_avg = move_avge.move_avg(1) self.cfpm10_avg = move_avge.move_avg(1) self.pm1_0_avg = move_avge.move_avg(1) self.pm2_5_avg = move_avge.move_avg(1) self.pm10_avg = move_avge.move_avg(1) self.tmp_avg = move_avge.move_avg(1) self.rh_avg = move_avge.move_avg(1)
def __init__(self, backup_stop, tail_stop, uri, config, timer, oplog_file, state, dump_gzip=False): Process.__init__(self) self.backup_stop = backup_stop self.tail_stop = tail_stop self.uri = uri self.config = config self.timer = timer self.oplog_file = oplog_file self.state = state self.dump_gzip = dump_gzip self.flush_docs = self.config.oplog.flush.max_docs self.flush_secs = self.config.oplog.flush.max_secs self.status_secs = self.config.oplog.tailer.status_interval self.status_last = time() self.cursor_name = "mongodb_consistent_backup.Oplog.Tailer.TailThread" self.timer_name = "%s-%s" % (self.__class__.__name__, self.uri.replset) self.db = None self.conn = None self.count = 0 self.first_ts = None self.last_ts = None self.stopped = False self._oplog = None self._cursor = None self._cursor_addr = None self.exit_code = 0 self._tail_retry = 0 self._tail_retry_max = 10 signal(SIGINT, SIG_IGN) signal(SIGTERM, self.close)
def __init__(self, state, uri, timer, config, base_dir, version, threads=0, dump_gzip=False): Process.__init__(self) self.state = state self.uri = uri self.timer = timer self.config = config self.base_dir = base_dir self.version = version self.threads = threads self.dump_gzip = dump_gzip self.user = self.config.username self.password = self.config.password self.authdb = self.config.authdb self.ssl_ca_file = self.config.ssl.ca_file self.ssl_crl_file = self.config.ssl.crl_file self.ssl_client_cert_file = self.config.ssl.client_cert_file self.read_pref_tags = self.config.replication.read_pref_tags self.binary = self.config.backup.mongodump.binary self.timer_name = "%s-%s" % (self.__class__.__name__, self.uri.replset) self.exit_code = 1 self.error_message = None self._command = None self.do_stdin_passwd = False self.stdin_passwd_sent = False self.backup_dir = os.path.join(self.base_dir, self.uri.replset) self.dump_dir = os.path.join(self.backup_dir, "dump") self.oplog_file = os.path.join(self.dump_dir, "oplog.bson") signal(SIGINT, SIG_IGN) signal(SIGTERM, self.close)
def __init__(self, storage, indexname, jobqueue, resultqueue, kwargs, multisegment): Process.__init__(self) self.storage = storage self.indexname = indexname self.jobqueue = jobqueue self.resultqueue = resultqueue self.kwargs = kwargs self.multisegment = multisegment self.running = True
def __init__(self, ix, procs=None, batchsize=100, subargs=None, multisegment=False, **kwargs): # This is the "main" writer that will aggregate the results created by # the sub-tasks SegmentWriter.__init__(self, ix, **kwargs) self.procs = procs or cpu_count() # The maximum number of documents in each job file submitted to the # sub-tasks self.batchsize = batchsize # You can use keyword arguments or the "subargs" argument to pass # keyword arguments to the sub-writers self.subargs = subargs if subargs else kwargs # If multisegment is True, don't merge the segments created by the # sub-writers, just add them directly to the TOC self.multisegment = multisegment # A list to hold the sub-task Process objects self.tasks = [] # A queue to pass the filenames of job files to the sub-tasks self.jobqueue = Queue(self.procs * 4) # A queue to get back the final results of the sub-tasks self.resultqueue = Queue() # A buffer for documents before they are flushed to a job file self.docbuffer = [] self._grouping = 0 self._added_sub = False
def __init__(self, ix, procs=None, batchsize=100, subargs=None, **kwargs): SegmentWriter.__init__(self, ix, **kwargs) self.procs = procs or cpu_count() self.batchsize = batchsize self.subargs = subargs if subargs else kwargs self.tasks = [SegmentWriter(ix, _lk=False, **self.subargs) for _ in xrange(self.procs)] self.pointer = 0 self._added_sub = False
def __init__(self, *args, **kwargs): MpWriter.__init__(self, *args, **kwargs) self.multisegment = True
def __init__(self, priority_queue_in, queue_in, queue_out, available_commands, network_retries, network_timeout): Process.__init__(self) self.priority_queue_in = priority_queue_in self.queue_in = queue_in self.queue_out = queue_out self.available_commands = available_commands # Set global network settings; needs to be done in each process SSLConnection.set_global_network_settings(network_retries, network_timeout)
def __init__(self, sp_zeroconf): self.public_key = utils.to_unicode(sp_zeroconf.publicKey) self.device_id = utils.to_unicode(sp_zeroconf.deviceId) self.active_user = utils.to_unicode(sp_zeroconf.activeUser) self.remote_name = utils.to_unicode(sp_zeroconf.remoteName) self.account_req = utils.to_unicode(sp_zeroconf.accountReq) self.device_type = utils.to_unicode(sp_zeroconf.deviceType) self.library_version = utils.to_unicode(sp_zeroconf.libraryVersion)
def __init__(self, port=6697): Process.__init__(self) self._application = _zeroconfserver.app self.port = port
def __init__(self, input_size, output_size, train_q, pipe, is_training): Process.__init__(self) self.train_q = train_q self.pipe = pipe self.input_size = input_size self.output_size = 12 self.dis = 0.9 self.save_path = "./save/save_model" self.training = is_training
def __init__(self, tsk_q, res_q): Process.__init__(self) self.tq = tsk_q self.rq = res_q
def __init__(self, func, kwargs): self.func = func self.args = kwargs
def __init__(self, job_str, tskid_int, host_str, port_str): self.clusterspec = '/job:%s/task:%d' % (job_str, tskid_int) self.hostport = '%s:%s' % (host_str, port_str)
def __init__(self, job_name, tasks_int, port_int=2222): self.localhost = gethostname() self.port = int(port_int) self.taskcount = int(tasks_int) self.job_name = job_name self.nodes = dict()
def __init__(self, marathon_url, job_name, num_tasks, marathon_usr, marathon_pwd, uri, cpu=1.0, mem=1024.0, port=2222): self.marathon_url = marathon_url self.job_name = job_name self.num_tasks = int(num_tasks) self.usr = marathon_usr self.pwd = marathon_pwd self.uri = uri self.cpu = cpu self.mem = mem self.port = port self.localhost = gethostname() self.nodes = dict()
def __init__(self, product_manager, logging_queue: Queue, exit_event: Event, ready_event: Event) -> None: Process.__init__(self) self.products = product_manager self.exit = exit_event self.ready_event = ready_event self.logging_queue = logging_queue self.order_book_manager = OrderBookManager(product_manager)
def __init__(self, product_manager: ProductManager, websocket_feed_queue: Queue, logging_queue: Queue, exit_event: Event, ready_event: Event) -> None: Process.__init__(self) self.websocket_feed_queue = websocket_feed_queue self.product_manager = product_manager self.exit = exit_event self.logging_queue = logging_queue self.ready_event = ready_event self.order_book_manager = OrderBookManager(self.product_manager)
def __init__(self, product_manager: ProductManager, websocket_feed_queue: Queue, logging_queue: Queue, exit_event: Event, ready_events: List[Event]) -> None: Process.__init__(self) self.websocket_feed_queue = websocket_feed_queue self.logging_queue = logging_queue self.exit = exit_event self.product_manager = product_manager self.order_book = PortfolioOrderBook(self.product_manager) self.portfolio = BasePortfolioGroup(self.order_book) self.ready_events = ready_events self.registered_orders = []
def __init__(self, settings, SimObj, stage, chainNum, pklFilename = '', params=[],sigmas=[],strtTemp=1.0): Process.__init__(self) self.chainNum = chainNum self.log = log self.settings = settings self.stage = stage self.params = params self.sigmas = sigmas self.strtTemp = strtTemp self.Sim = SimObj self.pklFilename = pklFilename
def __init__(self,bestRedChiSqrs,avgAcceptRates,acceptStrs,stage,retStr,latestRetStr): self.bestRedChiSqrs = bestRedChiSqrs self.avgAcceptRates = avgAcceptRates self.acceptStrs = acceptStrs self.stage = stage self.retStr = retStr self.latestRetStr = latestRetStr
def __init__(self, name=None): Process.__init__(self, name=name) self.counter = 0
def __init__(self, name=None, lock=None, m_count=None): Process.__init__(self, name=name) self.lock = lock self.m_count = m_count
def __init__(self, req_q, res_q, ip, port, bufsize, timeout, debug): Process.__init__(self) self._ip = ip self._port = port self._timeout = timeout self._bufsize = bufsize self._socket = None self._req_q = req_q self._res_q = res_q self._current_state = None self._debug = debug
def __init__(self, arm_service, namespace = None, timeout = YMC.ROS_TIMEOUT): if namespace == None: self.arm_service = rospy.get_namespace() + arm_service else: self.arm_service = namespace + arm_service self.timeout = timeout
def __getattr__(self, name): """ Override the __getattr__ method so that function calls become server requests If the name is a method of the YuMiArm class, this returns a function that calls that function on the YuMiArm instance in the server. The wait_for_res argument is not available remotely and will always be set to True. This is to prevent odd desynchronized crashes Otherwise, the name is considered to be an attribute, and getattr is called on the YuMiArm instance in the server. Note that if it isn't an attribute either a RuntimeError will be raised. The difference here is that functions access the server *on call* and non-functions do *on getting the name* Also note that this is __getattr__, so things like __init__ and __dict__ WILL NOT trigger this function as the YuMiArm_ROS object already has these as attributes. """ if name in YuMiArm.__dict__: def handle_remote_call(*args, **kwargs): """ Handle the remote call to some YuMiArm function. """ rospy.wait_for_service(self.arm_service, timeout = self.timeout) arm = rospy.ServiceProxy(self.arm_service, ROSYumiArm) if 'wait_for_res' in kwargs: kwargs['wait_for_res'] = True try: response = arm(pickle.dumps(name), pickle.dumps(args), pickle.dumps(kwargs)) except rospy.ServiceException, e: raise RuntimeError("Service call failed: {0}".format(str(e))) return pickle.loads(response.ret) return handle_remote_call else: rospy.wait_for_service(self.arm_service, timeout = self.timeout) arm = rospy.ServiceProxy(self.arm_service, ROSYumiArm) try: response = arm(pickle.dumps('__getattribute__'), pickle.dumps(name), pickle.dumps(None)) except rospy.ServiceException, e: raise RuntimeError("Could not get attribute: {0}".format(str(e))) return pickle.loads(response.ret)
def __init__(self, name, data_q, cmd_q, ip, port, bufsize, timeout): Process.__init__(self) self._name = name self._data_q = data_q self._cmd_q = cmd_q self._ip = ip self._port = port self._bufsize = bufsize self._timeout = timeout self._end_run = False self._socket = None
def __init__(self, connection, event_handle, server): self.connection = connection self.event_handle = event_handle self.server = server
def __init__(self, queue): self.queue = queue
def __init__(self, command_channel, event_queue, featurelist): BaseImplServer.__init__(self) Process.__init__(self) self.command_channel = command_channel self.event_queue = event_queue self.event = EventAdapter(event_queue) self.featurelist = featurelist self.quit = False self.quitin, self.quitout = Pipe() self.event_handle = multiprocessing.Value("i")
def __init__(self, serverImpl, ui_channel, event_queue): self.procserver = serverImpl self.ui_channel = ui_channel self.event_queue = event_queue self.connection = ServerCommunicator(self.ui_channel, self.procserver.event_handle, self.procserver) self.events = self.event_queue self.terminated = False
def __init__(self, maxsize): multiprocessing.queues.Queue.__init__(self, maxsize, ctx=multiprocessing.get_context()) self.exit = False bb.utils.set_process_name("ProcessEQueue")
def test_publisher_dynamic_test(self): logging.info(u"***** publisher/dynamic test") publisher = Publisher(context=self.context) self.context.set('general.switch', 'on') items = [ ('channel_A', "hello"), ('channel_B', "world"), ('channel_C', {"hello": "world"}), ] for (channel, message) in items: publisher.put(channel, message) publisher.fan.put(None) class MySocket(object): def __init__(self, context): self.context = context def send_string(self, item): pipe = self.context.get('pipe', []) pipe.append(item) self.context.set('pipe', pipe) def close(self): pass publisher.socket = MySocket(self.context) publisher.run() self.assertEqual(self.context.get('publisher.counter', 0), 3) self.assertEqual( self.context.get('pipe'), ['channel_A "hello"', 'channel_B "world"', 'channel_C {"hello": "world"}'])
def __init__(self, context): """ Represents an information bus between publishers and subscribers :param context: general settings :type context: Context """ self.context = context