我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用threading.Lock()。
def test_release_vif_parent_not_found(self): cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver m_driver = mock.Mock(spec=cls) neutron = self.useFixture(k_fix.MockNeutronClient()).client port_id = lib_utils.get_hash() pod = mock.sentinel.pod vif = mock.Mock() vif.id = port_id container_mac = mock.sentinel.mac_address container_ip = mock.sentinel.ip_address container_port = self._get_fake_port(port_id, container_ip, container_mac) neutron.show_port.return_value = container_port m_driver.lock = mock.MagicMock(spec=threading.Lock()) m_driver._get_parent_port.side_effect = n_exc.NeutronClientException self.assertRaises(n_exc.NeutronClientException, cls.release_vif, m_driver, pod, vif) neutron.show_port.assert_called_once_with(port_id) m_driver._get_parent_port.assert_called_once_with(neutron, pod) m_driver._remove_from_allowed_address_pairs.assert_not_called() neutron.delete_port.assert_not_called()
def __init__(self, protocol, serverAddr, deviceId, deviceKey, deviceHandler): self.serverAddr = serverAddr self.protocol = protocol self.deviceId = deviceId self.deviceKey = deviceKey self.lock = threading.Lock() if self.protocol == "udp": self.udpHeartbeatSeconds = 2 self.udpDataPacketInterval = 3 self.heartbeatCounter = 0 self.stateFile = "client.dat" elif self.protocol == "ssl": self.caCertFile = "servercert.pem" self.deviceCertFile = "devicecert.pem" self.deviceKeyFile = "devicekey.pem" self.sslIntervalSeconds = 6 self.deviceHandler = deviceHandler self.deviceHandler.service = self
def __init__(self, config): self.service = None self.webServer = None self.config = config self.httpsPort = int(self.config.get('web', 'httpsPort')) self.httpPort = int(self.config.get('web', 'httpPort')) self.adminPasswordHash = self.config.get('web', 'adminPasswordHash') self.apiSecret = self.config.get('web', 'apiSecret') self.uploadDir = self.config.get('web', 'uploadDir') self.dbFile = self.config.get('web', 'dbFile') self.httpsCertFile = self.config.get('web', 'httpsCertFile') self.httpsKeyFile = self.config.get('web', 'httpsKeyFile') self.httpsChainFile = self.config.get('web', 'httpsChainFile') self.localVideoPort = int(self.config.get('web', 'localVideoPort')) dir = os.path.dirname(os.path.realpath(sys.argv[0])) self.database = database.Database(self.dbFile) self.deviceConfig = dict() for deviceId, jsonConf in dict(self.config.items('devices')).iteritems(): self.deviceConfig[deviceId] = json.loads(jsonConf, object_pairs_hook=OrderedDict) self.trends = dict() self.lock = threading.Lock()
def __init__(self, parent): self.parent = parent # Initialize variables for input data processing self.data_queue = Queue.Queue() self.empty_queue = False # variables for thread management self.is_running = True self.timeout_check_period = 0.1 # this is in seconds self.process_thread_released = False # create mutex locks for handling issues with Reset self.reset_lock = threading.Lock() self.reset_signal = threading.Event() # create and start the main thread self.process_thread = threading.Thread(target=self.Process) self.process_thread.start()
def __init__(self, methodName='runTest', orbArgs=[]): unittest.TestCase.__init__(self, methodName) args = sys.argv self.debuglevel = 3 for arg in args: if '--debuglevel' in arg: self.debuglevel = arg.split('=')[-1] self._orb = CORBA.ORB_init(sys.argv + orbArgs, CORBA.ORB_ID) self._poa = self._orb.resolve_initial_references("RootPOA") self._poa._get_the_POAManager().activate() self._ns = self._orb.resolve_initial_references("NameService") self._root = self._ns._narrow(CosNaming.NamingContext) # Maintain a registry of the DomainManager (there should normally be just one) # and all spawned DeviceManagers, for easy cleanup. self._domainBooter = None self._domainManager = None self._deviceLock = threading.Lock() self._deviceBooters = [] self._deviceManagers = [] self._execparams = ""
def __init__(self, porttype): """ Instantiates a new object and generates a default StreamSRI. The porttype parameter corresponds to the type of data contained in the array of data being sent. The porttype is also used in the connectPort() method to narrow the connection """ self.port_type = porttype self.outPorts = {} self.refreshSRI = False self.sri=bulkio_helpers.defaultSRI self.port_lock = threading.Lock() self.done = False
def __init__(self, porttype): """ Instantiates a new object responsible for writing data from the port into an array. It is important to notice that the porttype is a BULKIO__POA type and not a BULKIO type. The reason is because it is used to generate a Port class that will be returned when the getPort() is invoked. The returned class is the one acting as a server and therefore must be a Portable Object Adapter rather and a simple BULKIO object. Inputs: <porttype> The BULKIO__POA data type """ StreamMgr.__init__(self) self.port_type = porttype self.sri=bulkio_helpers.defaultSRI self.data = [] self.timestamps = [] self.gotEOS = False self.breakBlock = False self.port_lock = threading.Lock() self.port_cond = threading.Condition(self.port_lock)
def __init__(self, porttype): """ Instantiates a new object responsible for writing data from the port into an array. It is important to notice that the porttype is a BULKIO__POA type and not a BULKIO type. The reason is because it is used to generate a Port class that will be returned when the getPort() is invoked. The returned class is the one acting as a server and therefore must be a Portable Object Adapter rather and a simple BULKIO object. Inputs: <porttype> The BULKIO__POA data type """ self.port_type = porttype self.sri=bulkio_helpers.defaultSRI self.data = [] self.gotEOS = False self.port_lock = threading.Lock() self.valid_streams = {} self.invalid_streams = {} self.received_data = {}
def __init__(self, porttype, throttle=False): """ Instantiates a new object and generates a default StreamSRI. The porttype parameter corresponds to the type of data contained in the array of data being sent. The porttype is also used in the connectPort() method to narrow the connection """ self.porttype = porttype self.outPorts = {} self.refreshSRI = False self.defaultStreamSRI = BULKIO.StreamSRI(1, 0.0, 0.001, 1, 0, 0.0, 0.001, 1, 0, "sampleStream", True, []) self.port_lock = threading.Lock() self._throttle=throttle self.done = False
def __init__(self, resource=None ): self._mgr_lock = threading.Lock() self._ecm = None self._logger = logging.getLogger("ossie.events.Manager") self._logger.setLevel(logging.INFO) self._allow = True self._registrations=[] if resource : try: self._logger.debug("Requesting Domain Manager Access....") dom = resource.getDomainManager() self._logger.debug("Requesting EventChannelManager Access....") self._ecm = dom.getRef()._get_eventChannelMgr() self._logger.debug("Acquired reference to EventChannelManager") except: #print traceback.format_exc() self._logger.warn("EventChannelManager - unable to resolve DomainManager's EventChannelManager ") pass
def __init__(self, name, logger=None, attachDetachCallback=None, sriCmp=sri.compare, timeCmp=timestamp.compare, PortType = _TYPE_, newSriCallback=None, sriChangeCallback=None,interface=None): self.name = name self.logger = logger self.port_lock = threading.Lock() self.sri_query_lock = threading.Lock() self._attachedStreams = {} # key=attach_id, value = (streamDef, userid) self.stats = InStats(name, PortType ) self.sriDict = {} # key=streamID, value=(StreamSRI, PrecisionUTCTime) self.attachDetachCallback = attachDetachCallback self.newSriCallback = newSriCallback self.sriChangeCallback = sriChangeCallback self.sri_cmp = sriCmp self.time_cmp = timeCmp self.sriChanged = False if not interface: if self.logger: self.logger.error("InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") raise Port.InvalidPort(1, "InAttachablePort __init__ - an interface must be specified, set to BULKIO.dataSDDS or BULKIO.dataVITA49") self.interface=interface # BULKIO port interface (valid options are BULKIO.dataSDDS or BULKIO.dataVITA49) self.setNewAttachDetachListener(attachDetachCallback) if self.logger: self.logger.debug("bulkio::InAttachablePort CTOR port:" + str(self.name) + " using interface " + str(self.interface))
def __init__(self, code=None, namespace=None, process_target=None, use_caching=False): """ Create a new signal. """ if not process_target: process_target = self.process self.process_target = process_target self.receivers = list() self.self_refs = dict() self.lock = threading.Lock() if code: self.code = code else: self.code = self.Meta.code if namespace: self.namespace = namespace else: self.namespace = self.Meta.namespace self.use_caching = use_caching self.sender_receivers_cache = weakref.WeakKeyDictionary() if use_caching else {} self._dead_receivers = False
def __init__(self, instance): """ Initiate registry with pre-loaded apps. :param instance: Instance of the controller. :type instance: pyplanet.core.instance.Instance """ self.instance = instance self.apps = OrderedDict() self.unloaded_apps = OrderedDict() # Set ready states. self.apps_ready = self.ready = False # Set a lock for threading. self._lock = threading.Lock() # Listen to events self.instance.signals.listen('contrib.mode:script_mode_changed', self._on_mode_change)
def new_build_thread(try_build): import threading for sub_pkg in list(try_build): dumb_mutex = threading.Lock() dumb_mutex.acquire() try: sub_thread = threading.Thread( target=slave_thread_build, args=[sub_pkg]) sub_thread.start() sub_thread.join() dumb_mutex.release() return 0 except: err_msg( 'Sub-build process using thread {}, building \033[36m{}\033[0m \033[93mfailed!\033[0m'.format(sub_thread.name, sub_pkg)) return 128
def __init__(self, maxsize=0): self.maxsize = maxsize self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the three conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = _threading.Lock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = _threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = _threading.Condition(self.mutex) # Notify all_tasks_done whenever the number of unfinished tasks # drops to zero; thread waiting to join() is notified to resume self.all_tasks_done = _threading.Condition(self.mutex) self.unfinished_tasks = 0
def __init__(self, **kwargs): self.agent_started = False self.agent_destroyed = False self.profiler_lock = threading.Lock() self.main_thread_func = None self.run_ts = None self.run_id = None self.config = Config(self) self.config_loader = ConfigLoader(self) self.message_queue = MessageQueue(self) self.frame_cache = FrameCache(self) self.process_reporter = ProcessReporter(self) self.cpu_reporter = CPUReporter(self) self.allocation_reporter = AllocationReporter(self) self.block_reporter = BlockReporter(self) self.error_reporter = ErrorReporter(self) self.options = None
def simulate_lock(): lock = threading.Lock() def lock_wait(): lock.acquire() lock.release() while True: lock.acquire() t = threading.Thread(target=lock_wait) t.start() time.sleep(1) lock.release() time.sleep(1)
def __init__(self, apply_light_policy_interval = 10, device_detection_interval = 10, device_offline_delay = 10, logging_level = logging.INFO): self.__yeelight_detection_thread = None self.__device_detection_thread = None self.__device_detection_thread_woker = {} self.__device_detection_thread_rlock = threading.Lock() self.__thread_rlock = threading.Lock() self.__apply_light_policy_thread = None self.__current_geo = None self.__compiled_policy = [] self.__compiled_policy_date = None self.__device_on_monitor = [] self.__device_online = [] self.__device_detection_interval = device_detection_interval self.__apply_light_policy_interval = apply_light_policy_interval self.__device_offline_delay = device_offline_delay self.__config = {} self.__RUNNING = False # a few setups self.register_signal_handler() self.__setup_log(logging_level = logging_level) self.__logger.info("Controller instance created")
def __init__(self, args, retries=3, timeout=None): """Construct an instance. Arguments: args - Array of program arguments retries - Number of times to try restarting the program before giving up. """ self.args = args self.retries = retries self.timeout = timeout self.lock = threading.Lock() self.program = None self.emitter = None self.parser = None self.__establish()
def __init__(self, filename, warn_on_readonly=True): """Initialize the class. This will create the file if necessary. """ self._file = LockedFile(filename, 'r+', 'r') self._thread_lock = threading.Lock() self._read_only = False self._warn_on_readonly = warn_on_readonly self._create_file_if_needed() # Cache of deserialized store. This is only valid after the # _MultiStore is locked or _refresh_data_cache is called. This is # of the form of: # # ((key, value), (key, value)...) -> OAuth2Credential # # If this is None, then the store hasn't been read yet. self._data = None
def __init__(self, topology_settings): self._settings = topology_settings topology_description = TopologyDescription( topology_settings.get_topology_type(), topology_settings.get_server_descriptions(), topology_settings.replica_set_name, None, None) self._description = topology_description # Store the seed list to help diagnose errors in _error_message(). self._seed_addresses = list(topology_description.server_descriptions()) self._opened = False self._lock = threading.Lock() self._condition = self._settings.condition_class(self._lock) self._servers = {} self._pid = None
def test_no_req_ids(self, *args): in_flight = 3 get_holders = self.make_get_holders(1) max_connection = Mock(spec=Connection, host='localhost', lock=Lock(), max_request_id=in_flight - 1, in_flight=in_flight, is_idle=True, is_defunct=False, is_closed=False) holder = get_holders.return_value[0] holder.get_connections.return_value.append(max_connection) self.run_heartbeat(get_holders) holder.get_connections.assert_has_calls([call()] * get_holders.call_count) self.assertEqual(max_connection.in_flight, in_flight) self.assertEqual(max_connection.send_msg.call_count, 0) self.assertEqual(max_connection.send_msg.call_count, 0) max_connection.defunct.assert_has_calls([call(ANY)] * get_holders.call_count) holder.return_connection.assert_has_calls( [call(max_connection)] * get_holders.call_count)
def __init__(self, *args, **kwargs): Connection.__init__(self, *args, **kwargs) self.deque = deque() self._deque_lock = Lock() self._connect_socket() self._socket.setblocking(0) with self._libevloop._lock: self._read_watcher = libev.IO(self._socket.fileno(), libev.EV_READ, self._libevloop._loop, self.handle_read) self._write_watcher = libev.IO(self._socket.fileno(), libev.EV_WRITE, self._libevloop._loop, self.handle_write) self._send_options_message() self._libevloop.connection_created(self) # start the global event loop if needed self._libevloop.maybe_start()
def __init__(self): self._pid = os.getpid() self._loop_lock = Lock() self._started = False self._shutdown = False self._thread = None self._timers = TimerManager() try: dispatcher = self._loop_dispatch_class() dispatcher.validate() log.debug("Validated loop dispatch with %s", self._loop_dispatch_class) except Exception: log.exception("Failed validating loop dispatch with %s. Using busy wait execution instead.", self._loop_dispatch_class) dispatcher.close() dispatcher = _BusyWaitDispatcher() self._loop_dispatcher = dispatcher atexit.register(partial(_cleanup, weakref.ref(self)))
def __init__(self, host, host_distance, session): self.host = host self.host_distance = host_distance self._session = weakref.proxy(session) self._lock = Lock() # this is used in conjunction with the connection streams. Not using the connection lock because the connection can be replaced in the lifetime of the pool. self._stream_available_condition = Condition(self._lock) self._is_replacing = False if host_distance == HostDistance.IGNORED: log.debug("Not opening connection to ignored host %s", self.host) return elif host_distance == HostDistance.REMOTE and not session.cluster.connect_to_remote_hosts: log.debug("Not opening connection to remote host %s", self.host) return log.debug("Initializing connection for host %s", self.host) self._connection = session.cluster.connection_factory(host.address) self._keyspace = session.keyspace if self._keyspace: self._connection.set_keyspace_blocking(self._keyspace) log.debug("Finished initializing connection for host %s", self.host)
def __init__(self, cluster, timeout, schema_event_refresh_window, topology_event_refresh_window, status_event_refresh_window, schema_meta_enabled=True, token_meta_enabled=True): # use a weak reference to allow the Cluster instance to be GC'ed (and # shutdown) since implementing __del__ disables the cycle detector self._cluster = weakref.proxy(cluster) self._connection = None self._timeout = timeout self._schema_event_refresh_window = schema_event_refresh_window self._topology_event_refresh_window = topology_event_refresh_window self._status_event_refresh_window = status_event_refresh_window self._schema_meta_enabled = schema_meta_enabled self._token_meta_enabled = token_meta_enabled self._lock = RLock() self._schema_agreement_lock = Lock() self._reconnection_handler = None self._reconnection_lock = RLock() self._event_schedule_times = {}
def __init__(self, maxsize=0): try: import threading except ImportError: import dummy_threading as threading self._init(maxsize) # mutex must be held whenever the queue is mutating. All methods # that acquire mutex must release it before returning. mutex # is shared between the three conditions, so acquiring and # releasing the conditions also acquires and releases mutex. self.mutex = threading.Lock() # Notify not_empty whenever an item is added to the queue; a # thread waiting to get is notified then. self.not_empty = threading.Condition(self.mutex) # Notify not_full whenever an item is removed from the queue; # a thread waiting to put is notified then. self.not_full = threading.Condition(self.mutex) # Notify all_tasks_done whenever the number of unfinished tasks # drops to zero; thread waiting to join() is notified to resume self.all_tasks_done = threading.Condition(self.mutex) self.unfinished_tasks = 0
def __init__(self, rules=None, default_subdomain='', charset='utf-8', strict_slashes=True, redirect_defaults=True, converters=None, sort_parameters=False, sort_key=None, encoding_errors='replace', host_matching=False): self._rules = [] self._rules_by_endpoint = {} self._remap = True self._remap_lock = Lock() self.default_subdomain = default_subdomain self.charset = charset self.encoding_errors = encoding_errors self.strict_slashes = strict_slashes self.redirect_defaults = redirect_defaults self.host_matching = host_matching self.converters = self.default_converters.copy() if converters: self.converters.update(converters) self.sort_parameters = sort_parameters self.sort_key = sort_key for rulefactory in rules or (): self.add(rulefactory)
def __init__(self, app=None, use_native_unicode=True, session_options=None, metadata=None): if session_options is None: session_options = {} session_options.setdefault('scopefunc', connection_stack.__ident_func__) self.use_native_unicode = use_native_unicode self.session = self.create_scoped_session(session_options) self.Model = self.make_declarative_base(metadata) self.Query = BaseQuery self._engine_lock = Lock() self.app = app _include_sqlalchemy(self) if app is not None: self.init_app(app)
def __init__(self, reader, partition, discretizer, normalizer, batch_size, steps, shuffle): self.reader = reader self.partition = partition self.discretizer = discretizer self.normalizer = normalizer self.batch_size = batch_size if steps is None: self.n_examples = reader.get_number_of_examples() self.steps = (self.n_examples + batch_size - 1) // batch_size else: self.n_examples = steps * batch_size self.steps = steps self.shuffle = shuffle self.chunk_size = min(1024, steps) * batch_size self.lock = threading.Lock() self.generator = self._generator()
def __init__(self, reader, discretizer, normalizer, batch_size, steps, shuffle): self.reader = reader self.discretizer = discretizer self.normalizer = normalizer self.batch_size = batch_size if steps is None: self.n_examples = reader.get_number_of_examples() self.steps = (self.n_examples + batch_size - 1) // batch_size else: self.n_examples = steps * batch_size self.steps = steps self.shuffle = shuffle self.chunk_size = min(1024, steps) * batch_size self.lock = threading.Lock() self.generator = self._generator()
def __init__(self, input_transcoder, output_transcoder, command=None, cwd=None, env=None): self.master = None self.slave = None self.process = None self.input_transcoder = input_transcoder self.output_transcoder = output_transcoder self.command = command self.cwd = cwd self.env = env self.mutex = Lock() self.read_thread = None self.write_thread = None self.stop = False
def __init__(self, split='train', folder=None, mode='train', num_replica=1, subtract_mean=True): """ Mode: train or valid or test Train: Random scale, random crop Valid: Single center crop Test: use 10-crop testing... Something that we haven't implemented yet. """ super(ImageNetDataProvider, self).__init__() self.log = tfplus.utils.logger.get() self._split = split self._folder = folder self._img_ids = None self._labels = None self._mode = mode self._rnd_proc = ImagePreprocessor( rnd_hflip=True, rnd_colour=False, rnd_resize=[256, 256], resize=256, crop=224) self._mean_img = np.array( [103.062623801, 115.902882574, 123.151630838], dtype='float32') self._mutex = threading.Lock() self.register_option('imagenet:dataset_folder') self._num_replica = num_replica pass
def __init__(self, filename, warn_on_readonly=True): """Initialize the class. This will create the file if necessary. """ self._file = LockedFile(filename, 'r+b', 'rb') self._thread_lock = threading.Lock() self._read_only = False self._warn_on_readonly = warn_on_readonly self._create_file_if_needed() # Cache of deserialized store. This is only valid after the # _MultiStore is locked or _refresh_data_cache is called. This is # of the form of: # # ((key, value), (key, value)...) -> OAuth2Credential # # If this is None, then the store hasn't been read yet. self._data = None
def _lock(self): """Lock the entire multistore.""" self._thread_lock.acquire() self._file.open_and_lock() if not self._file.is_locked(): self._read_only = True if self._warn_on_readonly: logger.warn('The credentials file (%s) is not writable. Opening in ' 'read-only mode. Any refreshed credentials will only be ' 'valid for this run.' % self._file.filename()) if os.path.getsize(self._file.filename()) == 0: logger.debug('Initializing empty multistore file') # The multistore is empty so write out an empty file. self._data = {} self._write() elif not self._read_only or self._data is None: # Only refresh the data if we are read/write or we haven't # cached the data yet. If we are readonly, we assume is isn't # changing out from under us and that we only have to read it # once. This prevents us from whacking any new access keys that # we have cached in memory but were unable to write out. self._refresh_data_cache()
def init_static_locks(cls): with cls._lock_init_lock: cls._ensure_ffi_initialized() if not cls._lock_cb_handle: wrapper = ffi_callback( "void(int, int, const char *, int)", name="Cryptography_locking_cb", ) cls._lock_cb_handle = wrapper(cls._lock_cb) # Use Python's implementation if available, importing _ssl triggers # the setup for this. __import__("_ssl") if cls.lib.CRYPTO_get_locking_callback() != cls.ffi.NULL: return # If nothing else has setup a locking callback already, we set up # our own num_locks = cls.lib.CRYPTO_num_locks() cls._locks = [threading.Lock() for n in range(num_locks)] cls.lib.CRYPTO_set_locking_callback(cls._lock_cb_handle)
def initialize(self, impl, time_func=None, **kwargs): super(PollIOLoop, self).initialize(**kwargs) self._impl = impl if hasattr(self._impl, 'fileno'): set_close_exec(self._impl.fileno()) self.time_func = time_func or time.time self._handlers = {} self._events = {} self._callbacks = [] self._callback_lock = threading.Lock() self._timeouts = [] self._cancellations = 0 self._running = False self._stopped = False self._closing = False self._thread_ident = None self._blocking_signal_threshold = None self._timeout_counter = itertools.count() # Create a pipe that we send bogus data to when we want to wake # the I/O loop when it is idle self._waker = Waker() self.add_handler(self._waker.fileno(), lambda fd, events: self._waker.consume(), self.READ)
def __init__(self, computation, host='', port=8181, poll_sec=10, DocumentRoot=None, keyfile=None, certfile=None, show_task_args=True): self._lock = threading.Lock() if not DocumentRoot: DocumentRoot = os.path.join(os.path.dirname(__file__), 'data') self._nodes = {} self._updates = {} if poll_sec < 1: pycos.logger.warning('invalid poll_sec value %s; it must be at least 1', poll_sec) poll_sec = 1 self._poll_sec = poll_sec self._show_args = bool(show_task_args) self._server = BaseHTTPServer.HTTPServer((host, port), lambda *args: HTTPServer._HTTPRequestHandler(self, DocumentRoot, *args)) if certfile: self._server.socket = ssl.wrap_socket(self._server.socket, keyfile=keyfile, certfile=certfile, server_side=True) self._httpd_thread = threading.Thread(target=self._server.serve_forever) self._httpd_thread.daemon = True self._httpd_thread.start() self.computation = computation self.status_task = pycos.Task(self.status_proc) if computation.status_task: client_task = computation.status_task def chain_msgs(task=None): task.set_daemon() while 1: msg = yield task.receive() self.status_task.send(msg) client_task.send(msg) computation.status_task = pycos.Task(chain_msgs) else: computation.status_task = self.status_task pycos.logger.info('Started HTTP%s server at %s', 's' if certfile else '', str(self._server.socket.getsockname()))
def test_request_vif(self, m_to_vif): cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver m_driver = mock.Mock(spec=cls) neutron = self.useFixture(k_fix.MockNeutronClient()).client pod = mock.sentinel.pod project_id = mock.sentinel.project_id subnets = mock.sentinel.subnets security_groups = mock.sentinel.security_groups container_mac = mock.sentinel.mac_address container_ip = mock.sentinel.ip_address container_port = self._get_fake_port(mac_address=container_mac, ip_address=container_ip) vif = mock.Mock() port_request = mock.sentinel.port_request vm_port = self._get_fake_port() m_to_vif.return_value = vif m_driver._get_port_request.return_value = port_request m_driver._get_parent_port.return_value = vm_port m_driver.lock = mock.MagicMock(spec=threading.Lock()) neutron.create_port.return_value = container_port self.assertEqual(vif, cls.request_vif(m_driver, pod, project_id, subnets, security_groups)) m_driver._get_port_request.assert_called_once_with( pod, project_id, subnets, security_groups) neutron.create_port.assert_called_once_with(port_request) m_driver._get_parent_port.assert_called_once_with(neutron, pod) m_driver._add_to_allowed_address_pairs.assert_called_once_with( neutron, vm_port, frozenset([container_ip]), container_mac) m_to_vif.assert_called_once_with(container_port['port'], subnets)
def test_request_vif_parent_not_found(self, m_to_vif): cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver m_driver = mock.Mock(spec=cls) neutron = self.useFixture(k_fix.MockNeutronClient()).client pod = mock.sentinel.pod project_id = mock.sentinel.project_id subnets = mock.sentinel.subnets security_groups = mock.sentinel.security_groups container_mac = mock.sentinel.mac_address container_ip = mock.sentinel.ip_address container_port = self._get_fake_port(mac_address=container_mac, ip_address=container_ip) port_request = mock.sentinel.port_request m_driver._get_port_request.return_value = port_request m_driver.lock = mock.MagicMock(spec=threading.Lock()) neutron.create_port.return_value = container_port m_driver._get_parent_port.side_effect = n_exc.NeutronClientException self.assertRaises(n_exc.NeutronClientException, cls.request_vif, m_driver, pod, project_id, subnets, security_groups) m_driver._get_port_request.assert_called_once_with( pod, project_id, subnets, security_groups) neutron.create_port.assert_called_once_with(port_request) m_driver._get_parent_port.assert_called_once_with(neutron, pod) m_driver._add_to_allowed_address_pairs.assert_not_called() m_to_vif.assert_not_called()
def test_release_vif(self): cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver m_driver = mock.Mock(spec=cls) neutron = self.useFixture(k_fix.MockNeutronClient()).client port_id = lib_utils.get_hash() pod = mock.sentinel.pod vif = mock.Mock() vif.id = port_id container_mac = mock.sentinel.mac_address container_ip = mock.sentinel.ip_address container_port = self._get_fake_port(port_id, container_ip, container_mac) neutron.show_port.return_value = container_port vm_port = self._get_fake_port() m_driver._get_parent_port.return_value = vm_port m_driver.lock = mock.MagicMock(spec=threading.Lock()) cls.release_vif(m_driver, pod, vif) neutron.show_port.assert_called_once_with(port_id) m_driver._get_parent_port.assert_called_once_with(neutron, pod) m_driver._remove_from_allowed_address_pairs.assert_called_once_with( neutron, vm_port, frozenset([container_ip]), container_mac) neutron.delete_port.assert_called_once_with(vif.id)
def test_release_vif_delete_failed(self): cls = nested_macvlan_vif.NestedMacvlanPodVIFDriver m_driver = mock.Mock(spec=cls) neutron = self.useFixture(k_fix.MockNeutronClient()).client port_id = lib_utils.get_hash() pod = mock.sentinel.pod vif = mock.Mock() vif.id = port_id container_mac = mock.sentinel.mac_address container_ip = mock.sentinel.ip_addresses container_port = self._get_fake_port(port_id, container_ip, container_mac) neutron.show_port.return_value = container_port neutron.delete_port.side_effect = n_exc.PortNotFoundClient vm_port = self._get_fake_port() m_driver._get_parent_port.return_value = vm_port m_driver.lock = mock.MagicMock(spec=threading.Lock()) cls.release_vif(m_driver, pod, vif) neutron.show_port.assert_called_once_with(port_id) m_driver._get_parent_port.assert_called_once_with(neutron, pod) m_driver._remove_from_allowed_address_pairs.assert_called_once_with( neutron, vm_port, frozenset([container_ip]), container_mac) neutron.delete_port.assert_called_once_with(vif.id)
def __init__(self,path,password=None,server_id=None, disable_auto_login=True,timeout=10): self.timeout = timeout self.server_id = server_id self.password = password self.disable_auto_login = disable_auto_login if disable_auto_login: self.client = toxclient.Toxclient(path) else: self.client = toxclient.Toxclient(path,password) self.exec_lock = threading.Lock()
def startTh(): q1 = Queue.Queue(10) ql1 = threading.Lock() collect = porterThread('collect', q1, ql1, interval=3) collect.start() time.sleep(0.5) sendjson = porterThread('sendjson', q1, ql1, interval=3) sendjson.start() #print "start" collect.join() sendjson.join()
def __init__(self, host, port): self.buf = deque() self.handle = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) self.handle.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1) try: #self.handle.connect((host, port)) self.handle.bind(("", port)) except socket.error, msg: pass super(UDPDriver, self).__init__(self.handle) self._write_lock = threading.Lock()