我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用weakref.proxy()。
def __newApplication(self, app): prof_path = app._get_profile() sadFile = self.fileManager.open(prof_path, True) sadContents = sadFile.read(sadFile.sizeOf()) sadFile.close() doc_sad = parsers.sad.parseString(sadContents) comp_list = app._get_componentNamingContexts() waveform_ns_name = '' if len(comp_list) > 0: comp_ns_name = comp_list[0].elementId waveform_ns_name = comp_ns_name.split('/')[1] app_name = app._get_name() if app_name[:7]=='OSSIE::': waveform_name = app_name[7:] else: waveform_name = app_name waveform_entry = App(name=waveform_name, domain=weakref.proxy(self), sad=doc_sad) waveform_entry.ref = app waveform_entry.ns_name = waveform_ns_name return waveform_entry
def open(self): """Start. Multiple calls have no effect. Not safe to call from multiple threads at once. """ self._stopped = False started = False try: started = self._thread and self._thread.is_alive() except ReferenceError: # Thread terminated. pass if not started: thread = threading.Thread(target=self._run, name=self._name) thread.daemon = True self._thread = weakref.proxy(thread) _register_executor(self) thread.start()
def __init__(self, host, host_distance, session): self.host = host self.host_distance = host_distance self._session = weakref.proxy(session) self._lock = Lock() # this is used in conjunction with the connection streams. Not using the connection lock because the connection can be replaced in the lifetime of the pool. self._stream_available_condition = Condition(self._lock) self._is_replacing = False if host_distance == HostDistance.IGNORED: log.debug("Not opening connection to ignored host %s", self.host) return elif host_distance == HostDistance.REMOTE and not session.cluster.connect_to_remote_hosts: log.debug("Not opening connection to remote host %s", self.host) return log.debug("Initializing connection for host %s", self.host) self._connection = session.cluster.connection_factory(host.address) self._keyspace = session.keyspace if self._keyspace: self._connection.set_keyspace_blocking(self._keyspace) log.debug("Finished initializing connection for host %s", self.host)
def __init__(self, host, host_distance, session): self.host = host self.host_distance = host_distance self._session = weakref.proxy(session) self._lock = RLock() self._conn_available_condition = Condition() log.debug("Initializing new connection pool for host %s", self.host) core_conns = session.cluster.get_core_connections_per_host(host_distance) self._connections = [session.cluster.connection_factory(host.address) for i in range(core_conns)] self._keyspace = session.keyspace if self._keyspace: for conn in self._connections: conn.set_keyspace_blocking(self._keyspace) self._trash = set() self._next_trash_allowed_at = time.time() self.open_count = core_conns log.debug("Finished initializing new connection pool for host %s", self.host)
def __init__(self, cluster, timeout, schema_event_refresh_window, topology_event_refresh_window, status_event_refresh_window, schema_meta_enabled=True, token_meta_enabled=True): # use a weak reference to allow the Cluster instance to be GC'ed (and # shutdown) since implementing __del__ disables the cycle detector self._cluster = weakref.proxy(cluster) self._connection = None self._timeout = timeout self._schema_event_refresh_window = schema_event_refresh_window self._topology_event_refresh_window = topology_event_refresh_window self._status_event_refresh_window = status_event_refresh_window self._schema_meta_enabled = schema_meta_enabled self._token_meta_enabled = token_meta_enabled self._lock = RLock() self._schema_agreement_lock = Lock() self._reconnection_handler = None self._reconnection_lock = RLock() self._event_schedule_times = {}
def __init__(self, path): package_name = '_jinja2_module_templates_%x' % id(self) # create a fake module that looks for the templates in the # path given. mod = _TemplateModule(package_name) if isinstance(path, string_types): path = [path] else: path = list(path) mod.__path__ = path sys.modules[package_name] = weakref.proxy(mod, lambda x: sys.modules.pop(package_name, None)) # the only strong reference, the sys.modules entry is weak # so that the garbage collector can remove it once the # loader that created it goes out of business. self.module = mod self.package_name = package_name
def __init__(self, *args, **kwds): '''Initialize an ordered dictionary. The signature is the same as regular dictionaries, but keyword arguments are not recommended because their insertion order is arbitrary. ''' if len(args) > 1: raise TypeError('expected at most 1 arguments, got %d' % len(args)) try: self.__root except AttributeError: self.__hardroot = _Link() self.__root = root = _proxy(self.__hardroot) root.prev = root.next = root self.__map = {} self.__update(*args, **kwds)
def __init__(self, parent_message): """Args: parent_message: The message whose _Modified() method we should call when we receive Modified() messages. """ # This listener establishes a back reference from a child (contained) object # to its parent (containing) object. We make this a weak reference to avoid # creating cyclic garbage when the client finishes with the 'parent' object # in the tree. if isinstance(parent_message, weakref.ProxyType): self._parent_message_weakref = parent_message else: self._parent_message_weakref = weakref.proxy(parent_message) # As an optimization, we also indicate directly on the listener whether # or not the parent message is dirty. This way we can avoid traversing # up the tree in the common case. self.dirty = False
def __init__(self, module_name): if Mock.active: print("Previous mock object still alive") Mock.active = True self.module = sys.modules[module_name] self.mocks = {} self.calls = defaultdict(list) self.opened = {} self.filesystem = {} # default mocks self.stderr = sys.stderr sys.stderr = sys.stdout self = weakref.proxy(self) self.mock(self.module, 'open', lambda *args, **kwargs: self.open(*args, **kwargs)) if hasattr(self.module, 'os'): self.mock(self.module.os.path, 'exists', lambda *args, **kwargs: self.exists(*args, **kwargs)) self.mock(self.module.os, 'walk', lambda *args, **kwargs: self.walk(*args, **kwargs))
def mock(self, module, name, return_value=None): if (module, name) not in self.mocks: self.mocks[(module, name)] = getattr(module or self.module, name, None) original_value = self.mocks[(module, name)] if original_value is None or callable(original_value): self = weakref.proxy(self) module_name = module.__name__ if hasattr(module, '__name__') else type(module).__name__ call_name = "{}.{}".format(module_name, name) if module else name def _mock(*attrs, **kwargs): self.calls[call_name].append((attrs, kwargs)) return return_value(*attrs, **kwargs) if callable(return_value) else return_value setattr(module or self.module, name, _mock) else: setattr(module or self.module, name, return_value)
def __init__(self, connection): """Initialize""" MySQLCursorAbstract.__init__(self) self._insert_id = 0 self._warning_count = 0 self._warnings = None self._affected_rows = -1 self._rowcount = -1 self._nextrow = None self._executed = None self._executed_list = [] self._stored_results = [] if not isinstance(connection, MySQLConnectionAbstract): raise errors.InterfaceError(errno=2048) self._cnx = weakref.proxy(connection)
def __new__(cls, obj=None, prop=None, func=None): """ Constructor of SinonBase It will new true base but return a proxy of weakref and store it in _queue Args: obj: None / function / instance method / module / class Inspected target prop: None / string Inspected target when obj contains callable things func: function / instance method ONLY used by stub, it will replace original target Return: weakref """ new = super(SinonBase, cls).__new__(cls) if func: new.__init__(obj, prop, func) else: new.__init__(obj, prop) cls._queue.append(new) return weakref.proxy(new)
def __init__(self, connection): ''' Do not create an instance of a Cursor yourself. Call connections.Connection.cursor(). ''' from weakref import proxy self.connection = proxy(connection) self.description = None self.rownumber = 0 self.rowcount = -1 self.arraysize = 1 self._executed = None self.messages = [] self.errorhandler = connection.errorhandler self._has_next = None self._rows = ()
def _setup_autocommit_worker(self): """Start the autocommitter thread""" self = weakref.proxy(self) def autocommitter(): while True: try: if not self._running: break if self._auto_commit_enable: self._auto_commit() self._cluster.handler.sleep(self._auto_commit_interval_ms / 1000) except ReferenceError: break except Exception: # surface all exceptions to the main thread self._worker_exception = sys.exc_info() break log.debug("Autocommitter thread exiting") log.debug("Starting autocommitter thread") return self._cluster.handler.spawn(autocommitter, name="pykafka.SimpleConsumer.autocommiter")
def _setup_fetch_workers(self): """Start the fetcher threads""" # NB this gets overridden in rdkafka.RdKafkaSimpleConsumer self = weakref.proxy(self) def fetcher(): while True: try: if not self._running: break self.fetch() self._cluster.handler.sleep(.0001) except ReferenceError: break except Exception: # surface all exceptions to the main thread self._worker_exception = sys.exc_info() break log.debug("Fetcher thread exiting") log.info("Starting %s fetcher threads", self._num_consumer_fetchers) return [self._cluster.handler.spawn(fetcher, name="pykafka.SimpleConsumer.fetcher") for i in range(self._num_consumer_fetchers)]
def test_proxy_ref(self): o = C() o.bar = 1 ref1 = weakref.proxy(o, self.callback) ref2 = weakref.proxy(o, self.callback) del o gc_collect() def check(proxy): proxy.bar self.assertRaises(ReferenceError, check, ref1) self.assertRaises(ReferenceError, check, ref2) ref3 = weakref.proxy(C()) gc_collect() self.assertRaises(ReferenceError, bool, ref3) self.assertEqual(self.cbcalled, 2)
def test_ref_reuse(self): o = C() ref1 = weakref.ref(o) # create a proxy to make sure that there's an intervening creation # between these two; it should make no difference proxy = weakref.proxy(o) ref2 = weakref.ref(o) self.assertTrue(ref1 is ref2, "reference object w/out callback should be re-used") o = C() proxy = weakref.proxy(o) ref1 = weakref.ref(o) ref2 = weakref.ref(o) self.assertTrue(ref1 is ref2, "reference object w/out callback should be re-used") self.assertTrue(weakref.getweakrefcount(o) == 2, "wrong weak ref count for object") del proxy gc_collect() self.assertTrue(weakref.getweakrefcount(o) == 1, "wrong weak ref count for object after deleting proxy")
def test_proxy_div(self): class C: def __floordiv__(self, other): return 42 def __ifloordiv__(self, other): return 21 o = C() p = weakref.proxy(o) self.assertEqual(p // 5, 42) p //= 5 self.assertEqual(p, 21) # The PyWeakref_* C API is documented as allowing either NULL or # None as the value for the callback, where either means "no # callback". The "no callback" ref and proxy objects are supposed # to be shared so long as they exist by all callers so long as # they are active. In Python 2.3.3 and earlier, this guarantee # was not honored, and was broken in different ways for # PyWeakref_NewRef() and PyWeakref_NewProxy(). (Two tests.)
def test_callable_proxy(self): o = Callable() ref1 = weakref.proxy(o) self.check_proxy(o, ref1) self.assertTrue(type(ref1) is weakref.CallableProxyType, "proxy is not of callable type") ref1('twinkies!') self.assertTrue(o.bar == 'twinkies!', "call through proxy not passed through to original") ref1(x='Splat.') self.assertTrue(o.bar == 'Splat.', "call through proxy not passed through to original") # expect due to too few args self.assertRaises(TypeError, ref1) # expect due to too many args self.assertRaises(TypeError, ref1, 1, 2, 3)
def check_proxy(self, o, proxy): o.foo = 1 self.assertTrue(proxy.foo == 1, "proxy does not reflect attribute addition") o.foo = 2 self.assertTrue(proxy.foo == 2, "proxy does not reflect attribute modification") del o.foo self.assertTrue(not hasattr(proxy, 'foo'), "proxy does not reflect attribute removal") proxy.foo = 1 self.assertTrue(o.foo == 1, "object does not reflect attribute addition via proxy") proxy.foo = 2 self.assertTrue( o.foo == 2, "object does not reflect attribute modification via proxy") del proxy.foo self.assertTrue(not hasattr(o, 'foo'), "object does not reflect attribute removal via proxy")
def test_getweakrefcount(self): o = C() ref1 = weakref.ref(o) ref2 = weakref.ref(o, self.callback) self.assertTrue(weakref.getweakrefcount(o) == 2, "got wrong number of weak reference objects") proxy1 = weakref.proxy(o) proxy2 = weakref.proxy(o, self.callback) self.assertTrue(weakref.getweakrefcount(o) == 4, "got wrong number of weak reference objects") del ref1, ref2, proxy1, proxy2 gc_collect() self.assertTrue(weakref.getweakrefcount(o) == 0, "weak reference objects not unlinked from" " referent when discarded.") # assumes ints do not support weakrefs self.assertTrue(weakref.getweakrefcount(1) == 0, "got wrong number of weak reference objects for int")
def __init__(self, connection): from weakref import proxy self.connection = proxy(connection) self.description = None self.description_flags = None self.rowcount = -1 self.arraysize = 1 self._executed = None self.lastrowid = None self.messages = [] self.errorhandler = connection.errorhandler self._result = None self._warnings = 0 self._info = None self.rownumber = None
def __init__(self, ds, dataset_type='chombo_hdf5'): self.domain_left_edge = ds.domain_left_edge self.domain_right_edge = ds.domain_right_edge self.dataset_type = dataset_type self.field_indexes = {} self.dataset = weakref.proxy(ds) # for now, the index file is the dataset! self.index_filename = os.path.abspath( self.dataset.parameter_filename) self.directory = ds.fullpath self._handle = ds._handle self._levels = [ key for key in self._handle.keys() if key.startswith('level') ] GridIndex.__init__(self, ds, dataset_type) self._read_particles()
def __init__(self, ds, dataset_type): ParallelAnalysisInterface.__init__(self) self.dataset = weakref.proxy(ds) self.ds = self.dataset self._initialize_state_variables() mylog.debug("Initializing data storage.") self._initialize_data_storage() mylog.debug("Setting up domain geometry.") self._setup_geometry() mylog.debug("Initializing data grid data IO") self._setup_data_io() # Note that this falls under the "geometry" object since it's # potentially quite expensive, and should be done with the indexing. mylog.debug("Detecting fields.") self._detect_output_fields()