我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用io.BufferedReader()。
def test_resource_form_create_valid(self, mock_open): dataset = Dataset() app = self._get_test_app() env, response = _get_resource_new_page_as_sysadmin(app, dataset['id']) form = response.forms['resource-edit'] upload = ('upload', 'valid.csv', VALID_CSV) valid_stream = io.BufferedReader(io.BytesIO(VALID_CSV)) with mock.patch('io.open', return_value=valid_stream): submit_and_follow(app, form, env, 'save', upload_files=[upload]) dataset = call_action('package_show', id=dataset['id']) assert_equals(dataset['resources'][0]['validation_status'], 'success') assert 'validation_timestamp' in dataset['resources'][0]
def backport_makefile(self, mode="r", buffering=None, encoding=None, errors=None, newline=None): """ Backport of ``socket.makefile`` from Python 3.5. """ if not set(mode) <= set(["r", "w", "b"]): raise ValueError( "invalid mode %r (only r, w, b allowed)" % (mode,) ) writing = "w" in mode reading = "r" in mode or not writing assert reading or writing binary = "b" in mode rawmode = "" if reading: rawmode += "r" if writing: rawmode += "w" raw = SocketIO(self, rawmode) self._makefile_refs += 1 if buffering is None: buffering = -1 if buffering < 0: buffering = io.DEFAULT_BUFFER_SIZE if buffering == 0: if not binary: raise ValueError("unbuffered streams must be binary") return raw if reading and writing: buffer = io.BufferedRWPair(raw, raw, buffering) elif reading: buffer = io.BufferedReader(raw, buffering) else: assert writing buffer = io.BufferedWriter(raw, buffering) if binary: return buffer text = io.TextIOWrapper(buffer, encoding, errors, newline) text.mode = mode return text
def test_resource_form_create_invalid(self, mock_open): dataset = Dataset() app = self._get_test_app() env, response = _get_resource_new_page_as_sysadmin(app, dataset['id']) form = response.forms['resource-edit'] upload = ('upload', 'invalid.csv', INVALID_CSV) invalid_stream = io.BufferedReader(io.BytesIO(INVALID_CSV)) with mock.patch('io.open', return_value=invalid_stream): response = webtest_submit( form, 'save', upload_files=[upload], extra_environ=env) assert_in('validation', response.body) assert_in('missing-value', response.body) assert_in('Row 2 has a missing value in column 4', response.body)
def test_resource_form_update_valid(self, mock_open): dataset = Dataset(resources=[ { 'url': 'https://example.com/data.csv' } ]) app = self._get_test_app() env, response = _get_resource_update_page_as_sysadmin( app, dataset['id'], dataset['resources'][0]['id']) form = response.forms['resource-edit'] upload = ('upload', 'valid.csv', VALID_CSV) valid_stream = io.BufferedReader(io.BytesIO(VALID_CSV)) with mock.patch('io.open', return_value=valid_stream): submit_and_follow(app, form, env, 'save', upload_files=[upload]) dataset = call_action('package_show', id=dataset['id']) assert_equals(dataset['resources'][0]['validation_status'], 'success') assert 'validation_timestamp' in dataset['resources'][0]
def test_resource_form_update_invalid(self, mock_open): dataset = Dataset(resources=[ { 'url': 'https://example.com/data.csv' } ]) app = self._get_test_app() env, response = _get_resource_update_page_as_sysadmin( app, dataset['id'], dataset['resources'][0]['id']) form = response.forms['resource-edit'] upload = ('upload', 'invalid.csv', INVALID_CSV) invalid_stream = io.BufferedReader(io.BytesIO(INVALID_CSV)) with mock.patch('io.open', return_value=invalid_stream): response = webtest_submit( form, 'save', upload_files=[upload], extra_environ=env) assert_in('validation', response.body) assert_in('missing-value', response.body) assert_in('Row 2 has a missing value in column 4', response.body)
def test_validation_fails_on_upload(self, mock_open): invalid_file = StringIO.StringIO() invalid_file.write(INVALID_CSV) mock_upload = MockFieldStorage(invalid_file, 'invalid.csv') dataset = factories.Dataset() invalid_stream = io.BufferedReader(io.BytesIO(INVALID_CSV)) with mock.patch('io.open', return_value=invalid_stream): with assert_raises(t.ValidationError) as e: call_action( 'resource_create', package_id=dataset['id'], format='CSV', upload=mock_upload ) assert 'validation' in e.exception.error_dict assert 'missing-value' in str(e.exception) assert 'Row 2 has a missing value in column 4' in str(e.exception)
def test_validation_passes_on_upload(self, mock_open): invalid_file = StringIO.StringIO() invalid_file.write(VALID_CSV) mock_upload = MockFieldStorage(invalid_file, 'invalid.csv') dataset = factories.Dataset() valid_stream = io.BufferedReader(io.BytesIO(VALID_CSV)) with mock.patch('io.open', return_value=valid_stream): resource = call_action( 'resource_create', package_id=dataset['id'], format='CSV', upload=mock_upload ) assert_equals(resource['validation_status'], 'success') assert 'validation_timestamp' in resource
def __init__(self, data=None, stream=None): if data: self.stream = BytesIO(data) elif stream: self.stream = stream else: raise InvalidParameterError( 'Either bytes or a stream must be provided') self.reader = BufferedReader(self.stream) self._last = None # Should come in handy to spot -404 errors # region Reading # "All numbers are written as little endian." # https://core.telegram.org/mtproto
def __init__(self, data: Union[FileIO, BufferedReader]) -> None: chunk_type = data.read(4) if chunk_type != b'MThd': raise ValueError("File had invalid header chunk type") header_length = int.from_bytes(data.read(4), 'big') if header_length != 6: raise ValueError("File has unsupported header length") self.length = header_length format = int.from_bytes(data.read(2), 'big') if format not in [0, 1, 2]: raise ValueError("File has unsupported format") self.format = format ntrks = int.from_bytes(data.read(2), 'big') if ntrks > 0 and format == 0: raise ValueError("Multiple tracks in single track format") self.ntrks = ntrks self.tpqn = int.from_bytes(data.read(2), 'big')
def _body_file__get(self): """ Input stream of the request (wsgi.input). Setting this property resets the content_length and seekable flag (unlike setting req.body_file_raw). """ if not self.is_body_readable: return io.BytesIO() r = self.body_file_raw clen = self.content_length if not self.is_body_seekable and clen is not None: # we need to wrap input in LimitedLengthFile # but we have to cache the instance as well # otherwise this would stop working # (.remaining counter would reset between calls): # req.body_file.read(100) # req.body_file.read(100) env = self.environ wrapped, raw = env.get('webob._body_file', (0,0)) if raw is not r: wrapped = LimitedLengthFile(r, clen) wrapped = io.BufferedReader(wrapped) env['webob._body_file'] = wrapped, r r = wrapped return r
def test_dem(self): d0 = dem.Dem.open('./test_data/test.dem') d0.close() d0.save(self.buff) self.buff.seek(0) b = io.BufferedReader(self.buff) d1 = dem.Dem.open(b) self.assertEqual(d1.cd_track, '2', 'Cd track should be 2') self.assertEqual(len(d1.message_blocks), 168, 'The demo should have 168 message blocks') last_message_of_first_block = d1.message_blocks[0].messages[-1] self.assertTrue(isinstance(last_message_of_first_block, dem.SignOnNum), 'The last message of the first block should be a SignOnNum') self.assertEqual(last_message_of_first_block.sign_on, 1, 'Sign on value should be 1') self.assertTrue(isinstance(d1.message_blocks[-1].messages[0], dem.Disconnect), 'The last message should be a Disconnect') self.assertFalse(d1.fp.closed, 'File should be open') fp = d1.fp d1.close() self.assertTrue(fp.closed, 'File should be closed') self.assertIsNone(d1.fp, 'File pointer should be cleaned up')
def import_from_node(node_id, network): # Create temporary SDO client sdo_client = SdoClient(0x600 + node_id, 0x580 + node_id, None) sdo_client.network = network # Subscribe to SDO responses network.subscribe(0x580 + node_id, sdo_client.on_response) # Create file like object for Store EDS variable try: eds_fp = ReadableStream(sdo_client, 0x1021) eds_fp = io.BufferedReader(eds_fp) eds_fp = io.TextIOWrapper(eds_fp, "ascii") od = import_eds(eds_fp, node_id) except Exception as e: logger.error("No object dictionary could be loaded for node %d: %s", node_id, e) od = None finally: network.unsubscribe(0x580 + node_id) return od
def extractfile(self, member): """Extract a member from the archive as a file object. `member' may be a filename or a TarInfo object. If `member' is a regular file or a link, an io.BufferedReader object is returned. Otherwise, None is returned. """ self._check("r") if isinstance(member, str): tarinfo = self.getmember(member) else: tarinfo = member if tarinfo.isreg() or tarinfo.type not in SUPPORTED_TYPES: # Members with unknown types are treated as regular files. return self.fileobject(self, tarinfo) elif tarinfo.islnk() or tarinfo.issym(): if isinstance(self.fileobj, _Stream): # A small but ugly workaround for the case that someone tries # to extract a (sym)link as a file-object from a non-seekable # stream of tar blocks. raise StreamError("cannot extract (sym)link as file object") else: # A (sym)link's file object is its target's file object. return self.extractfile(self._find_link_target(tarinfo)) else: # If there's no data associated with the member (directory, chrdev, # blkdev, etc.), return None instead of a file object. return None
def test_validation_fails_on_upload(self, mock_open): dataset = factories.Dataset(resources=[ { 'url': 'https://example.com/data.csv' } ]) invalid_file = StringIO.StringIO() invalid_file.write(INVALID_CSV) mock_upload = MockFieldStorage(invalid_file, 'invalid.csv') invalid_stream = io.BufferedReader(io.BytesIO(INVALID_CSV)) with mock.patch('io.open', return_value=invalid_stream): with assert_raises(t.ValidationError) as e: call_action( 'resource_update', id=dataset['resources'][0]['id'], format='CSV', upload=mock_upload ) assert 'validation' in e.exception.error_dict assert 'missing-value' in str(e.exception) assert 'Row 2 has a missing value in column 4' in str(e.exception)
def test_validation_fails_no_validation_object_stored(self, mock_open): dataset = factories.Dataset(resources=[ { 'url': 'https://example.com/data.csv' } ]) invalid_file = StringIO.StringIO() invalid_file.write(INVALID_CSV) mock_upload = MockFieldStorage(invalid_file, 'invalid.csv') invalid_stream = io.BufferedReader(io.BytesIO(INVALID_CSV)) validation_count_before = model.Session.query(Validation).count() with mock.patch('io.open', return_value=invalid_stream): with assert_raises(t.ValidationError): call_action( 'resource_update', id=dataset['resources'][0]['id'], format='CSV', upload=mock_upload ) validation_count_after = model.Session.query(Validation).count() assert_equals(validation_count_after, validation_count_before)
def test_validation_passes_on_upload(self, mock_open): dataset = factories.Dataset(resources=[ { 'url': 'https://example.com/data.csv' } ]) valid_file = StringIO.StringIO() valid_file.write(INVALID_CSV) mock_upload = MockFieldStorage(valid_file, 'valid.csv') valid_stream = io.BufferedReader(io.BytesIO(VALID_CSV)) with mock.patch('io.open', return_value=valid_stream): resource = call_action( 'resource_update', id=dataset['resources'][0]['id'], format='CSV', upload=mock_upload ) assert_equals(resource['validation_status'], 'success') assert 'validation_timestamp' in resource
def test_job_local_paths_are_hidden(self, mock_open): invalid_csv = 'id,type\n' + '1,a,\n' * 1010 invalid_file = StringIO.StringIO() invalid_file.write(invalid_csv) mock_upload = MockFieldStorage(invalid_file, 'invalid.csv') resource = factories.Resource(format='csv', upload=mock_upload) invalid_stream = io.BufferedReader(io.BytesIO(invalid_csv)) with mock.patch('io.open', return_value=invalid_stream): run_validation_job(resource) validation = Session.query(Validation).filter( Validation.resource_id == resource['id']).one() source = validation.report['tables'][0]['source'] assert source.startswith('http') assert source.endswith('invalid.csv') warning = validation.report['warnings'][0] assert_equals( warning, 'Table inspection has reached 1000 row(s) limit')
def test_job_pass_validation_options_string(self, mock_open): invalid_csv = ''' a;b;c #comment 1;2;3 ''' validation_options = '''{ "headers": 3, "skip_rows": ["#"] }''' invalid_file = StringIO.StringIO() invalid_file.write(invalid_csv) mock_upload = MockFieldStorage(invalid_file, 'invalid.csv') resource = factories.Resource( format='csv', upload=mock_upload, validation_options=validation_options) invalid_stream = io.BufferedReader(io.BytesIO(invalid_csv)) with mock.patch('io.open', return_value=invalid_stream): run_validation_job(resource) validation = Session.query(Validation).filter( Validation.resource_id == resource['id']).one() assert_equals(validation.report['valid'], True)
def __init__(self, data=None, stream=None): if data: self.stream = BytesIO(data) elif stream: self.stream = stream else: raise InvalidParameterError( 'Either bytes or a stream must be provided') self.reader = BufferedReader(self.stream) # region Reading # "All numbers are written as little endian." |> Source: https://core.telegram.org/mtproto
def save_document(self, document: str or BufferedReader or provmodel.ProvDocument) -> list: """ Write a document into BigchainDB :param document: Document as JSON/XML/PROVN :type document: str or BufferedReader or ProvDocument :return: List of transaction ids :rtype: list """ log.info("Save document...") document_tx_ids = [] prov_document = utils.to_prov_document(content=document) elements = GraphConceptClient.calculate_account_data(prov_document) id_mapping = {} log.info("Create and Save instances") for prov_element, prov_relations, namespaces in elements: for rel in prov_relations['with_id']: id_mapping[rel.identifier] = '' for prov_element, prov_relations, namespaces in elements: account = accounts.GraphConceptAccount(prov_element, prov_relations, id_mapping, namespaces, self.store) self.accounts.append(account) tx_id = account.save_instance_asset(self._get_bigchain_connection()) document_tx_ids.append(tx_id) log.info("Save relations with ids") for account in filter(lambda acc: acc.has_relations_with_id, self.accounts): document_tx_ids += account.save_relations_with_ids(self._get_bigchain_connection()) log.info("Save relations without ids") for account in filter(lambda acc: acc.has_relations_without_id, self.accounts): document_tx_ids += account.save_relations_without_ids(self._get_bigchain_connection()) log.info("Saved document in %s Tx", len(document_tx_ids)) return document_tx_ids
def save_document(self, document: str or BufferedReader or provmodel.ProvDocument) -> list: """ Write a document into BigchainDB :param document: Document as JSON/XML/PROVN :type document: str or BufferedReader or ProvDocument :return: List of transaction ids :rtype: list """ log.info("Save document...") document_tx_ids = [] prov_document = utils.to_prov_document(content=document) account_data = RoleConceptClient.calculate_account_data(prov_document) id_mapping = {} log.info("Create and Save instances") for agent, relations, elements, namespaces in account_data: account = accounts.RoleConceptAccount(agent, relations, elements, id_mapping, namespaces, self.store) self.accounts.append(account) tx_id = account.save_instance_asset(self._get_bigchain_connection()) document_tx_ids.append(tx_id) log.info("Save elements") for account in self.accounts: document_tx_ids += account.save_elements(self._get_bigchain_connection()) log.info("Saved document in %s Tx", len(document_tx_ids)) return document_tx_ids
def __enter__(self): self.reader = io.BufferedReader(self.raw) return self
def _get_text_stdin(buffer_stream): text_stream = _NonClosingTextIOWrapper( io.BufferedReader(_WindowsConsoleReader(STDIN_HANDLE)), 'utf-16-le', 'strict', line_buffering=True) return ConsoleStream(text_stream, buffer_stream)
def read_gzip(filepath): # handles buffering # TODO: profile whether this is fast. with gzip.open(filepath, 'rb') as f: # leave in binary mode (default), let TextIOWrapper decode with io.BufferedReader(f, buffer_size=2**18) as g: # 256KB buffer with io.TextIOWrapper(g) as h: # bytes -> unicode yield h
def __init__(self, path, *args, **kwargs): super(WlTrace, self).__init__() self.path = path self.fh = io.BufferedReader(io.open(path, 'rb')) self.counter = 1 self.pkt_queue = collections.deque() self.has_phy_info = False self.fix_timestamp = kwargs.get('fix_timestamp', False)
def isfileobj(f): return isinstance(f, (io.FileIO, io.BufferedReader, io.BufferedWriter))
def _use_gzip(self): i = 0 with gzip.open(self.filename) as gz_file: with io.BufferedReader(gz_file) as f: for line in f: i += 1 return i
def _makefile(sock, mode): return io.BufferedReader(SocketIO(sock, mode))
def setUp(self): self.mock_file = MagicMock() # type: Union[BufferedReader, MagicMock] self.mock_file.read = MagicMock(side_effect=[b'\x84', b'\x18', b'\x00', b'\x3C']) # type: MagicMock
def sequence_number(data: Union[FileIO, BufferedReader]) -> Tuple[int, int, bytearray]: length_bytes = bytearray(data.read(4)) length = int.from_bytes(length_bytes, "big") if length != 2: raise EventLengthError("Sequence Number length was incorrect. It should be 2, but it was {}".format(length)) sequence_num_raw = bytearray(data.read(2)) sequence_num = int.from_bytes(sequence_num_raw, "big") return length, sequence_num, sequence_num_raw