我们从Python开源项目中,提取了以下19个代码示例,用于说明如何使用future.utils.native_str()。
def __new__(cls, offset, name=_Omitted): if not isinstance(offset, timedelta): raise TypeError("offset must be a timedelta") if name is cls._Omitted: if not offset: return cls.utc name = None elif not isinstance(name, str): ### # For Python-Future: if PY2 and isinstance(name, native_str): name = name.decode() else: raise TypeError("name must be a string") ### if not cls._minoffset <= offset <= cls._maxoffset: raise ValueError("offset must be a timedelta" " strictly between -timedelta(hours=24) and" " timedelta(hours=24).") if (offset.microseconds != 0 or offset.seconds % 60 != 0): raise ValueError("offset must be a timedelta" " representing a whole number of minutes") return cls._create(offset, name)
def reprocess_content(self): """ Allows re-parsing all content as if it were imported for the first time but without re-hitting the source, or changing the object ids. Call when a code change would change the representation in the database """ session = self.db emails = session.query(Email).filter( Email.source_id == self.id, ).options(joinedload_all(Email.parent)) for email in emails: #session = self.db #session.add(email) blob = email.imported_blob if not isinstance(blob, native_str): blob = blob.decode('ascii') (email_object, dummy, error) = self.parse_email(blob, email) #session.add(email_object) session.commit() #session.remove() with transaction.manager: self.thread_mails(emails)
def __init__(self, lib, dtype, N, C, K, D, H, W, T, R, S, M, P, Q, pad_d, pad_h, pad_w, str_d, str_h, str_w, dil_d, dil_h, dil_w): assert N % 4 == 0, "N dim must be multiple of 4" super(UpdateDirect, self).__init__(lib, dtype, N, C, K, D, H, W, T, R, S, M, P, Q, pad_d, pad_h, pad_w, str_d, str_h, str_w, dil_d, dil_h, dil_w) SMs = _get_sm_count() self.autotune_key = " ".join(native_str(x) for x in ( "direct_updat_64x32", SMs, dtype.itemsize, lib.deterministic > 0, N, C, K, D, H, W, T, R, S, M, P, Q )) # insert Python version in filename to avoid Py2/Py3 incompatibilities in shelve self.autotune_db_file = os.path.join(lib.cache_dir, "autotune%d.db" % sys.version_info[0]) self.init() lib.set_scratch_size(self.output_trans.size) # allow for .5 seconds worth of warmup when autotuning # assume 5 Tflops on 24 SMs self.warmup = min(max(int(2e12 / (M * P * Q * K * N * C * T * R * S * 2.0) * (SMs / 24.0)), 1), 5000)
def __init__(self, op, lib, dtype, N, C, K, H, W, P, Q, pad_h, pad_w, filter_extern=None, bprop=False): super(XpropWinograd_2x2_3x3, self).__init__(lib, dtype, N, C, K, 1, H, W, 1, 3, 3, 1, P, Q, 0, pad_h, pad_w, 1,1,1, 1,1,1, bprop) SMs = _get_sm_count() self.autotune_key = " ".join(native_str(x) for x in (op + "_2x2_3x3", SMs, dtype.itemsize, N, C, K, H, W, P, Q)) # insert Python version in filename to avoid Py2/Py3 incompatibilities in shelve self.autotune_db_file = os.path.join(lib.cache_dir, "autotune%d.db" % sys.version_info[0]) # allow for .5 seconds worth of warmup when autotuning # assume 10 Tflops on 24 SMs self.warmup = min(max(int(5e12 / (P * Q * K * N * C * 9 * 2.0) * (SMs / 24.0)), 1), 1000) if filter_extern is None: self.init() else: # allow manual override for unit testing self.initialized = True self.init(autotune=1, filter_extern=filter_extern) lib.set_scratch_size(self.filter_trans.size, self.bsum.size)
def __init__(self, lib, dtype, N, C, K, D, H, W, T, R, S, M, P, Q, pad_d, pad_h, pad_w, str_d, str_h, str_w, dil_d, dil_h, dil_w): # Support N = 1,2 and multiples of 4 for now assert N in (1,2) or N % 4 == 0 super(UpdateWinograd_3x3_2x2, self).__init__(lib, dtype, N, C, K, 1, H, W, 1, 3, 3, 1, P, Q, 0, pad_h, pad_w, 1,1,1, 1,1,1) SMs = _get_sm_count() self.autotune_key = [native_str(x) for x in ("update_3x3_2x2", SMs, 0, dtype.itemsize, N, C, K, H, W, P, Q)] # insert Python version in filename to avoid Py2/Py3 incompatibilities in shelve self.autotune_db_file = os.path.join(lib.cache_dir, "autotune%d.db" % sys.version_info[0]) self.init() lib.set_scratch_size(self.image_trans.size, self.output_trans.size) # allow for .5 seconds worth of warmup when autotuning # assume 10 Tflops on 24 SMs self.warmup = min(max(int(5e12 / (P * Q * K * N * C * 9 * 2.0) * (SMs / 24.0)), 1), 1000)
def __init__(self, op, lib, dtype, N, C, K, H, W, P, Q, pad_h, pad_w, external=None, bprop=False): super(XpropWinograd_4x4_3x3, self).__init__(lib, dtype, N, C, K, 1, H, W, 1, 3, 3, 1, P, Q, 0, pad_h, pad_w, 1,1,1, 1,1,1, bprop) SMs = _get_sm_count() self.autotune_key = " ".join(native_str(x) for x in (op + "_4x4_3x3", SMs, dtype.itemsize, N, C, K, H, W, P, Q)) # insert Python version in filename to avoid Py2/Py3 incompatibilities in shelve self.autotune_db_file = os.path.join(lib.cache_dir, "autotune%d.db" % sys.version_info[0]) # allow for .5 seconds worth of warmup when autotuning # assume 10 Tflops on 24 SMs self.warmup = min(max(int(5e12 / (P * Q * K * N * C * 9 * 2.0) * (SMs / 24.0)), 1), 1000) if external is None: self.init() else: # allow override for unit testing self.initialized = True self.init(autotune=1, external=external) lib.set_scratch_size(self.image_size, self.filter_trans.size, self.bsum.size)
def __init__(self, lib, dtype, N, C, K, D, H, W, T, R, S, M, P, Q, pad_d, pad_h, pad_w, str_d, str_h, str_w, dil_d, dil_h, dil_w): super(UpdateWinograd_3x3_4x4, self).__init__(lib, dtype, N, C, K, 1, H, W, 1, 3, 3, 1, P, Q, 0, pad_h, pad_w, 1,1,1, 1,1,1) SMs = _get_sm_count() self.autotune_key = [native_str(x) for x in ("update_3x3_4x4", SMs, 0, dtype.itemsize, N, C, K, H, W, P, Q)] # insert Python version in filename to avoid Py2/Py3 incompatibilities in shelve self.autotune_db_file = os.path.join(lib.cache_dir, "autotune%d.db" % sys.version_info[0]) self.init() lib.set_scratch_size(self.image_size, self.delta_size, self.output_trans.size) # allow for .5 seconds worth of warmup when autotuning # assume 10 Tflops on 24 SMs self.warmup = min(max(int(5e12 / (P * Q * K * N * C * 9 * 2.0) * (SMs / 24.0)), 1), 1000)
def generate_component_annotation_miriam_match(model, components): """ Tabulate which MIRIAM databases the component's annotation match. Parameters ---------- model : cobra.Model A cobrapy metabolic model. components : {"metabolites", "reactions", "genes"} A string denoting `cobra.Model` components. Returns ------- pandas.DataFrame The index of the table is given by the component identifiers. Each column corresponds to one MIRIAM database and a Boolean entry determines whether the annotation matches. """ def check_annotation(key, annotation): if key not in annotation: return False test = annotation[key] pattern = patterns[key] if isinstance(test, native_str): return pattern.match(test) is not None return all(pattern.match(elem) is not None for elem in test) patterns = { "metabolites": METABOLITE_ANNOTATIONS, "reactions": REACTION_ANNOTATIONS }[components] databases = list(patterns) data = list() index = list() for elem in getattr(model, components): index.append(elem.id) data.append(tuple(check_annotation(db, elem.annotation) for db in databases)) return pd.DataFrame(data, index=index, columns=databases)
def configure_zmq(sockdef, multiplex): global CHANGES_SOCKET, MULTIPLEX assert isinstance(sockdef, native_str) CHANGES_SOCKET = sockdef MULTIPLEX = multiplex