我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.testing()。
def test_sort_tensor_by_length(self): tensor = torch.rand([5, 7, 9]) tensor[0, 3:, :] = 0 tensor[1, 4:, :] = 0 tensor[2, 1:, :] = 0 tensor[3, 5:, :] = 0 tensor = Variable(tensor) sequence_lengths = Variable(torch.LongTensor([3, 4, 1, 5, 7])) sorted_tensor, sorted_lengths, reverse_indices, _ = util.sort_batch_by_length(tensor, sequence_lengths) # Test sorted indices are padded correctly. numpy.testing.assert_array_equal(sorted_tensor[1, 5:, :].data.numpy(), 0.0) numpy.testing.assert_array_equal(sorted_tensor[2, 4:, :].data.numpy(), 0.0) numpy.testing.assert_array_equal(sorted_tensor[3, 3:, :].data.numpy(), 0.0) numpy.testing.assert_array_equal(sorted_tensor[4, 1:, :].data.numpy(), 0.0) assert sorted_lengths.data.equal(torch.LongTensor([7, 5, 4, 3, 1])) # Test restoration indices correctly recover the original tensor. assert sorted_tensor.index_select(0, reverse_indices).data.equal(tensor.data)
def test_weighted_sum_handles_3d_attention_with_3d_matrix(self): batch_size = 1 length_1 = 5 length_2 = 2 embedding_dim = 4 sentence_array = numpy.random.rand(batch_size, length_2, embedding_dim) attention_array = numpy.random.rand(batch_size, length_1, length_2) sentence_tensor = Variable(torch.from_numpy(sentence_array).float()) attention_tensor = Variable(torch.from_numpy(attention_array).float()) aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy() assert aggregated_array.shape == (batch_size, length_1, embedding_dim) for i in range(length_1): expected_array = (attention_array[0, i, 0] * sentence_array[0, 0] + attention_array[0, i, 1] * sentence_array[0, 1]) numpy.testing.assert_almost_equal(aggregated_array[0, i], expected_array, decimal=5)
def test_batched_index_select(self): indices = numpy.array([[[1, 2], [3, 4]], [[5, 6], [7, 8]]]) # Each element is a vector of it's index. targets = torch.ones([2, 10, 3]).cumsum(1) - 1 # Make the second batch double it's index so they're different. targets[1, :, :] *= 2 indices = Variable(torch.LongTensor(indices)) targets = Variable(targets) selected = util.batched_index_select(targets, indices) assert list(selected.size()) == [2, 2, 2, 3] ones = numpy.ones([3]) numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones) numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2) numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3) numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4) numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 10) numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 12) numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 14) numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 16)
def test_process_image(compress, out_dir): numpy.random.seed(8) image = numpy.random.randint(256, size=(16, 16, 3), dtype=numpy.uint16) meta = { "DNA": "/User/jcaciedo/LUAD/dna.tiff", "ER": "/User/jcaciedo/LUAD/er.tiff", "Mito": "/User/jcaciedo/LUAD/mito.tiff" } compress.stats["illum_correction_function"] = numpy.ones((16,16,3)) compress.stats["upper_percentiles"] = [255, 255, 255] compress.stats["lower_percentiles"] = [0, 0, 0] compress.process_image(0, image, meta) filenames = glob.glob(os.path.join(out_dir,"*")) real_filenames = [os.path.join(out_dir, x) for x in ["dna.png", "er.png", "mito.png"]] filenames.sort() assert real_filenames == filenames for i in range(3): data = scipy.misc.imread(filenames[i]) numpy.testing.assert_array_equal(image[:,:,i], data)
def test_apply(corrector): image = numpy.random.randint(256, size=(24, 24, 3), dtype=numpy.uint16) illum_corr_func = numpy.random.rand(24, 24, 3) illum_corr_func /= illum_corr_func.min() corrector.illum_corr_func = illum_corr_func corrected = corrector.apply(image) expected = image / illum_corr_func assert corrected.shape == (24, 24, 3) numpy.testing.assert_array_equal(corrected, expected)
def test_forward_probability2(): from numpy.testing import assert_array_almost_equal model, states, symbols, seq = _wikipedia_example_hmm() fp = 2**model._forward_probability(seq) # examples in wikipedia are normalized fp = (fp.T / fp.sum(axis=1)).T wikipedia_results = [ [0.8182, 0.1818], [0.8834, 0.1166], [0.1907, 0.8093], [0.7308, 0.2692], [0.8673, 0.1327], ] assert_array_almost_equal(wikipedia_results, fp, 4)
def test_backward_probability(): from numpy.testing import assert_array_almost_equal model, states, symbols, seq = _wikipedia_example_hmm() bp = 2**model._backward_probability(seq) # examples in wikipedia are normalized bp = (bp.T / bp.sum(axis=1)).T wikipedia_results = [ # Forward-backward algorithm doesn't need b0_5, # so .backward_probability doesn't compute it. # [0.6469, 0.3531], [0.5923, 0.4077], [0.3763, 0.6237], [0.6533, 0.3467], [0.6273, 0.3727], [0.5, 0.5], ] assert_array_almost_equal(wikipedia_results, bp, 4)
def test_log_bf(): import numpy.testing as test sep = numpy.array([0., 0.1, 0.2, 0.3, 0.4, 0.5]) for psi in sep: print(psi) print(' ', log_bf2(psi, 0.1, 0.2), ) print(' ', log_bf([[None, psi]], [0.1, 0.2]), ) test.assert_almost_equal(log_bf2(psi, 0.1, 0.2), log_bf([[None, psi]], [0.1, 0.2])) for psi in sep: print(psi) bf3 = log_bf3(psi, psi, psi, 0.1, 0.2, 0.3) print(' ', bf3) g = log_bf([[None, psi, psi], [psi, None, psi], [psi, psi, None]], [0.1, 0.2, 0.3]) print(' ', g) test.assert_almost_equal(bf3, g) q = numpy.zeros(len(sep)) print(log_bf(numpy.array([[numpy.nan + sep, sep, sep], [sep, numpy.nan + sep, sep], [sep, sep, numpy.nan + sep]]), [0.1 + q, 0.2 + q, 0.3 + q]))
def test_warning_calls(): # combined "ignore" and stacklevel error base = Path(numpy.__file__).parent for path in base.rglob("*.py"): if base / "testing" in path.parents: continue if path == base / "__init__.py": continue if path == base / "random" / "__init__.py": continue # use tokenize to auto-detect encoding on systems where no # default encoding is defined (e.g. LANG='C') with tokenize.open(str(path)) as file: tree = ast.parse(file.read()) FindFuncs(path).visit(tree)
def test_diag(self): # test that it builds a matrix with given diagonal when using # vector inputs x = theano.tensor.vector() y = diag(x) assert y.owner.op.__class__ == AllocDiag # test that it extracts the diagonal when using matrix input x = theano.tensor.matrix() y = extract_diag(x) assert y.owner.op.__class__ == ExtractDiag # other types should raise error x = theano.tensor.tensor3() ok = False try: y = extract_diag(x) except TypeError: ok = True assert ok # not testing the view=True case since it is not used anywhere.
def test_cholesky_and_cholesky_grad_shape(): if not imported_scipy: raise SkipTest("Scipy needed for the Cholesky op.") rng = numpy.random.RandomState(utt.fetch_seed()) x = tensor.matrix() for l in (cholesky(x), Cholesky(lower=True)(x), Cholesky(lower=False)(x)): f_chol = theano.function([x], l.shape) g = tensor.grad(l.sum(), x) f_cholgrad = theano.function([x], g.shape) topo_chol = f_chol.maker.fgraph.toposort() topo_cholgrad = f_cholgrad.maker.fgraph.toposort() if config.mode != 'FAST_COMPILE': assert sum([node.op.__class__ == Cholesky for node in topo_chol]) == 0 assert sum([node.op.__class__ == CholeskyGrad for node in topo_cholgrad]) == 0 for shp in [2, 3, 5]: m = numpy.cov(rng.randn(shp, shp + 10)).astype(config.floatX) yield numpy.testing.assert_equal, f_chol(m), (shp, shp) yield numpy.testing.assert_equal, f_cholgrad(m), (shp, shp)
def test_anchor(): x = numpy.array( [[-84., -40., 99., 55.], [-176., -88., 191., 103.], [-360., -184., 375., 199.], [-56., -56., 71., 71.], [-120., -120., 135., 135.], [-248., -248., 263., 263.], [-36., -80., 51., 95.], [-80., -168., 95., 183.], [-168., -344., 183., 359.]] ) y = keras_rcnn.backend.anchor( scales=keras.backend.cast([8, 16, 32], keras.backend.floatx())) y = keras.backend.eval(y) numpy.testing.assert_array_almost_equal(x, y)
def test_clip(): boxes = numpy.array( [[0, 0, 0, 0], [1, 2, 3, 4], [-4, 2, 1000, 6000], [3, -10, 223, 224]]) shape = [224, 224] boxes = keras.backend.variable(boxes) results = keras_rcnn.backend.clip(boxes, shape) results = keras.backend.eval(results) expected = numpy.array( [[0, 0, 0, 0], [1, 2, 3, 4], [0, 2, 223, 223], [3, 0, 223, 223]]) numpy.testing.assert_array_almost_equal(results, expected) boxes = numpy.reshape(numpy.arange(200, 200 + 12 * 5), (-1, 12)) shape = [224, 224] boxes = keras.backend.variable(boxes) results = keras_rcnn.backend.clip(boxes, shape) results = keras.backend.eval(results) expected = numpy.array( [[200, 201, 202, 203, 204, 205, 206, 207, 208, 209, 210, 211], [212, 213, 214, 215, 216, 217, 218, 219, 220, 221, 222, 223], [223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223], [223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223], [223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223, 223]]) numpy.testing.assert_array_almost_equal(results, expected, 0)
def test_bbox_transform(): gt_rois = numpy.array([[-84., -40., 99., 55.], [-176., -88., 191., 103.], [-360., -184., 375., 199.], [-56., -56., 71., 71.], [-120., -120., 135., 135.], [-248., -248., 263., 263.], [-36., -80., 51., 95.], [-80., -168., 95., 183.], [-168., -344., 183., 359.]]) ex_rois = 2 * gt_rois gt_rois = keras.backend.variable(gt_rois) ex_rois = keras.backend.variable(ex_rois) results = keras_rcnn.backend.bbox_transform(ex_rois, gt_rois) results = keras.backend.eval(results) expected = numpy.array( [[-0.02043597, -0.03926702, -0.69042609, -0.68792524], [-0.01020408, -0.01958225, -0.69178756, -0.69053962], [-0.00509857, -0.00977836, -0.6924676, -0.69184425], [-0.02941176, -0.02941176, -0.68923328, -0.68923328], [-0.0146771, -0.0146771, -0.69119215, -0.69119215], [-0.00733138, -0.00733138, -0.69217014, -0.69217014], [-0.04285714, -0.02136752, -0.68744916, -0.69030223], [-0.02136752, -0.01066856, -0.69030223, -0.69172572], [-0.01066856, -0.00533049, -0.69172572, -0.6924367]]) numpy.testing.assert_array_almost_equal(results, expected)
def test_scale_enum(): anchor = numpy.expand_dims(numpy.array([0, 0, 0, 0]), 0) scales = numpy.array([1, 2, 3]) anchor = keras.backend.variable(anchor) scales = keras.backend.variable(scales) results = keras_rcnn.backend.common._scale_enum(anchor, scales) results = keras.backend.eval(results) expected = numpy.array( [[0, 0, 0, 0], [-0.5, -0.5, 0.5, 0.5], [-1., -1., 1., 1.]]) numpy.testing.assert_array_equal(results, expected) anchor = keras.backend.cast( numpy.expand_dims(numpy.array([2, 3, 100, 100]), 0), 'float32') anchor = keras.backend.variable(anchor) results = keras_rcnn.backend.common._scale_enum(anchor, scales) results = keras.backend.eval(results) expected = numpy.array([[2., 3., 100., 100.], [-47.5, -46., 149.5, 149.], [-97., -95., 199., 198.]]) numpy.testing.assert_array_equal(results, expected)
def test_whctrs(): anchor = keras.backend.cast(keras.backend.expand_dims([0, 0, 0, 0], 0), 'float32') results0, results1, results2, results3 = keras_rcnn.backend.common._whctrs( anchor) results = numpy.array( [keras.backend.eval(results0), keras.backend.eval(results1), keras.backend.eval(results2), keras.backend.eval(results3)]) expected = numpy.expand_dims([1, 1, 0, 0], 1) numpy.testing.assert_array_equal(results, expected) anchor = keras.backend.cast(keras.backend.expand_dims([2, 3, 100, 100], 0), 'float32') results0, results1, results2, results3 = keras_rcnn.backend.common._whctrs( anchor) results = numpy.array( [keras.backend.eval(results0), keras.backend.eval(results1), keras.backend.eval(results2), keras.backend.eval(results3)]) expected = numpy.expand_dims([99, 98, 51, 51.5], 1) numpy.testing.assert_array_equal(results, expected)
def test_smooth_l1(): output = keras.backend.variable( [[[2.5, 0.0, 0.4, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 2.5, 0.0, 0.4]], [[3.5, 0.0, 0.0, 0.0], [0.0, 0.4, 0.0, 0.9], [0.0, 0.0, 1.5, 0.0]]] ) target = keras.backend.zeros_like(output) x = keras_rcnn.backend.smooth_l1(output, target) numpy.testing.assert_approx_equal(keras.backend.eval(x), 8.645) weights = keras.backend.variable( [[2, 1, 1], [0, 3, 0]] ) x = keras_rcnn.backend.smooth_l1(output, target, weights=weights) numpy.testing.assert_approx_equal(keras.backend.eval(x), 7.695)
def assert_gwas_1(unit_test, gwas): expected_snp = pandas.Series(["rs1666", "rs1", "rs2", "rs3", "rs4", "rs6", "rs7", "rs7666", "rs8", "rs9"], dtype=numpy.str) numpy.testing.assert_array_equal(gwas[SNP], expected_snp) expected_effect = pandas.Series(["A", "C", "C", "G", "A", "G", "T", "A", "A", "A"], dtype=numpy.str) numpy.testing.assert_array_equal(gwas[EFFECT_ALLELE], expected_effect) expected_non_effect = pandas.Series(["G", "T", "T", "A", "G", "A", "C", "G", "G", "G"], dtype=numpy.str) numpy.testing.assert_array_equal(gwas[NON_EFFECT_ALLELE], expected_non_effect) expected_zscore = pandas.Series([0.3, -0.2, 0.5, 1.3, -0.3, 2.9, 4.35, 1.3, 0.09, 0.09], dtype=numpy.float32) numpy.testing.assert_allclose(gwas[ZSCORE], expected_zscore, rtol=0.001) expected_chromosome = pandas.Series(["chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1"], dtype=numpy.str) numpy.testing.assert_array_equal(gwas[CHROMOSOME], expected_chromosome) expected_position = pandas.Series([0, 1, 5, 20, 30, 42, 43, 45, 50, 70]) numpy.testing.assert_array_equal(gwas[POSITION], expected_position)
def assert_gwas_2(unit_test, gwas): expected_snp = pandas.Series(["rsC", "rs1666", "rs1", "rs2", "rs4", "rsB", "rsA", "rs7666", "rs8", "rs9"], dtype=numpy.str) numpy.testing.assert_array_equal(gwas[SNP], expected_snp) expected_effect = pandas.Series(["T", "A", "C", "C", "A", "G", "G", "A", "A", "A"], dtype=numpy.str) numpy.testing.assert_array_equal(gwas[EFFECT_ALLELE], expected_effect) expected_non_effect = pandas.Series(["C", "G", "T", "T", "G", "A", "A", "G", "G", "G"], dtype=numpy.str) numpy.testing.assert_array_equal(gwas[NON_EFFECT_ALLELE], expected_non_effect) expected_zscore = pandas.Series([4.35, 0.3, -0.2, 1.3, -0.3, 2.9, 1.3, 1.3, 0.09, 0.09], dtype=numpy.float32) numpy.testing.assert_allclose(gwas[ZSCORE], expected_zscore, rtol=0.001) expected_chromosome = pandas.Series(["chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1", "chr1"], dtype=numpy.str) numpy.testing.assert_array_equal(gwas[CHROMOSOME], expected_chromosome) expected_position = pandas.Series([None, None, None, None, None, None, None, None, None, None]) numpy.testing.assert_array_equal(gwas[POSITION], expected_position)
def test_load_model(self): snp_model = PredictionModel.load_model("tests/_td/dbs/test_1.db") e_e = SampleData.dataframe_from_extra(SampleData.sample_extra_2()) numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_GENE], e_e[PredictionModel.WDBEQF.K_GENE]) numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_GENE_NAME], e_e[PredictionModel.WDBEQF.K_GENE_NAME]) numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_N_SNP_IN_MODEL], e_e[PredictionModel.WDBEQF.K_N_SNP_IN_MODEL]) numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_PRED_PERF_R2], e_e[PredictionModel.WDBEQF.K_PRED_PERF_R2]) numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_PRED_PERF_PVAL], e_e[PredictionModel.WDBEQF.K_PRED_PERF_PVAL]) numpy.testing.assert_array_equal(snp_model.extra[PredictionModel.WDBEQF.K_PRED_PERF_QVAL], e_e[PredictionModel.WDBEQF.K_PRED_PERF_QVAL]) e_w = SampleData.dataframe_from_weights(SampleData.sample_weights_2()) numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_RSID], e_w[PredictionModel.WDBQF.K_RSID]) numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_GENE], e_w[PredictionModel.WDBQF.K_GENE]) numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_WEIGHT], e_w[PredictionModel.WDBQF.K_WEIGHT]) numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_NON_EFFECT_ALLELE], e_w[PredictionModel.WDBQF.K_NON_EFFECT_ALLELE]) numpy.testing.assert_array_equal(snp_model.weights[PredictionModel.WDBQF.K_EFFECT_ALLELE], e_w[PredictionModel.WDBQF.K_EFFECT_ALLELE])
def test_from_load(self): m = MatrixManager.load_matrix_manager("tests/_td/cov/cov.txt.gz") snps, cov = m.get("ENSG00000239789.1") self.assertEqual(snps, cov_data.SNPS_ENSG00000239789_1) numpy.testing.assert_array_almost_equal(cov, cov_data.COV_ENSG00000239789_1) n = m.n_snps("ENSG00000239789.1") self.assertEqual(n, len(cov_data.SNPS_ENSG00000239789_1)) with self.assertRaises(Exceptions.InvalidArguments) as ctx: snps, cov = m.get("ENSG00000183742.8", ["rs7806506", "rs12718973"]) self.assertTrue("whitelist" in ctx.exception.message) #? whitelist = ["rs3094989", "rs7806506", "rs12536095", "rs10226814"] snps, cov = m.get("ENSG00000183742.8", whitelist) self.assertEqual(snps, cov_data.SNPS_ENSG00000183742_8_w) numpy.testing.assert_array_almost_equal(cov, cov_data.COV_ENSG00000183742_8_w) snps, cov = m.get("ENSG00000004766.11") self.assertEqual(snps, cov_data.SNPS_ENSG00000004766_11) numpy.testing.assert_array_almost_equal(cov, cov_data.COV_ENSG00000004766_11) n = m.n_snps("ENSG00000004766.11") self.assertEqual(n, len(cov_data.COV_ENSG00000004766_11))
def test_get_sequence_lengths_from_binary_mask(self): binary_mask = torch.ByteTensor([[1, 1, 1, 0, 0, 0], [1, 1, 0, 0, 0, 0], [1, 1, 1, 1, 1, 1], [1, 0, 0, 0, 0, 0]]) lengths = util.get_lengths_from_binary_sequence_mask(binary_mask) numpy.testing.assert_array_equal(lengths.numpy(), numpy.array([3, 2, 6, 1]))
def test_get_sequence_lengths_converts_to_long_tensor_and_avoids_variable_overflow(self): # Tests the following weird behaviour in Pytorch 0.1.12 # doesn't happen for our sequence masks: # # mask = torch.ones([260]).byte() # mask.sum() # equals 260. # var_mask = torch.autograd.Variable(mask) # var_mask.sum() # equals 4, due to 8 bit precision - the sum overflows. binary_mask = Variable(torch.ones(2, 260).byte()) lengths = util.get_lengths_from_binary_sequence_mask(binary_mask) numpy.testing.assert_array_equal(lengths.data.numpy(), numpy.array([260, 260]))
def test_weighted_sum_works_on_simple_input(self): batch_size = 1 sentence_length = 5 embedding_dim = 4 sentence_array = numpy.random.rand(batch_size, sentence_length, embedding_dim) sentence_tensor = Variable(torch.from_numpy(sentence_array).float()) attention_tensor = Variable(torch.FloatTensor([[.3, .4, .1, 0, 1.2]])) aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy() assert aggregated_array.shape == (batch_size, embedding_dim) expected_array = (0.3 * sentence_array[0, 0] + 0.4 * sentence_array[0, 1] + 0.1 * sentence_array[0, 2] + 0.0 * sentence_array[0, 3] + 1.2 * sentence_array[0, 4]) numpy.testing.assert_almost_equal(aggregated_array, [expected_array], decimal=5)
def test_weighted_sum_handles_higher_order_input(self): batch_size = 1 length_1 = 5 length_2 = 6 length_3 = 2 embedding_dim = 4 sentence_array = numpy.random.rand(batch_size, length_1, length_2, length_3, embedding_dim) attention_array = numpy.random.rand(batch_size, length_1, length_2, length_3) sentence_tensor = Variable(torch.from_numpy(sentence_array).float()) attention_tensor = Variable(torch.from_numpy(attention_array).float()) aggregated_array = util.weighted_sum(sentence_tensor, attention_tensor).data.numpy() assert aggregated_array.shape == (batch_size, length_1, length_2, embedding_dim) expected_array = (attention_array[0, 3, 2, 0] * sentence_array[0, 3, 2, 0] + attention_array[0, 3, 2, 1] * sentence_array[0, 3, 2, 1]) numpy.testing.assert_almost_equal(aggregated_array[0, 3, 2], expected_array, decimal=5)
def test_flatten_and_batch_shift_indices(self): indices = numpy.array([[[1, 2, 3, 4], [5, 6, 7, 8], [9, 9, 9, 9]], [[2, 1, 0, 7], [7, 7, 2, 3], [0, 0, 4, 2]]]) indices = Variable(torch.LongTensor(indices)) shifted_indices = util.flatten_and_batch_shift_indices(indices, 10) numpy.testing.assert_array_equal(shifted_indices.data.numpy(), numpy.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 9, 9, 9, 12, 11, 10, 17, 17, 17, 12, 13, 10, 10, 14, 12]))
def test_flattened_index_select(self): indices = numpy.array([[1, 2], [3, 4]]) targets = torch.ones([2, 6, 3]).cumsum(1) - 1 # Make the second batch double it's index so they're different. targets[1, :, :] *= 2 indices = Variable(torch.LongTensor(indices)) targets = Variable(targets) selected = util.flattened_index_select(targets, indices) assert list(selected.size()) == [2, 2, 2, 3] ones = numpy.ones([3]) numpy.testing.assert_array_equal(selected[0, 0, 0, :].data.numpy(), ones) numpy.testing.assert_array_equal(selected[0, 0, 1, :].data.numpy(), ones * 2) numpy.testing.assert_array_equal(selected[0, 1, 0, :].data.numpy(), ones * 3) numpy.testing.assert_array_equal(selected[0, 1, 1, :].data.numpy(), ones * 4) numpy.testing.assert_array_equal(selected[1, 0, 0, :].data.numpy(), ones * 2) numpy.testing.assert_array_equal(selected[1, 0, 1, :].data.numpy(), ones * 4) numpy.testing.assert_array_equal(selected[1, 1, 0, :].data.numpy(), ones * 6) numpy.testing.assert_array_equal(selected[1, 1, 1, :].data.numpy(), ones * 8) # Check we only accept 2D indices. with pytest.raises(ConfigurationError): util.flattened_index_select(targets, torch.ones([3, 4, 5]))
def test_bucket_values(self): indices = torch.LongTensor([1, 2, 7, 1, 56, 900]) bucketed_distances = util.bucket_values(indices) numpy.testing.assert_array_equal(bucketed_distances.numpy(), numpy.array([1, 2, 5, 1, 8, 9]))
def get_package_name(filepath): """ Given a path where a package is installed, determine its name. Parameters ---------- filepath : str Path to a file. If the determination fails, "numpy" is returned. Examples -------- >>> np.testing.nosetester.get_package_name('nonsense') 'numpy' """ fullpath = filepath[:] pkg_name = [] while 'site-packages' in filepath or 'dist-packages' in filepath: filepath, p2 = os.path.split(filepath) if p2 in ('site-packages', 'dist-packages'): break pkg_name.append(p2) # if package name determination failed, just default to numpy/scipy if not pkg_name: if 'scipy' in fullpath: return 'scipy' else: return 'numpy' # otherwise, reverse to get correct order and return pkg_name.reverse() # don't include the outer egg directory if pkg_name[0].endswith('.egg'): pkg_name.pop(0) return '.'.join(pkg_name)
def __init__(self, package=None, raise_warnings="release", depth=0): # Back-compat: 'None' used to mean either "release" or "develop" # depending on whether this was a release or develop version of # numpy. Those semantics were fine for testing numpy, but not so # helpful for downstream projects like scipy that use # numpy.testing. (They want to set this based on whether *they* are a # release or develop version, not whether numpy is.) So we continue to # accept 'None' for back-compat, but it's now just an alias for the # default "release". if raise_warnings is None: raise_warnings = "release" package_name = None if package is None: f = sys._getframe(1 + depth) package_path = f.f_locals.get('__file__', None) if package_path is None: raise AssertionError package_path = os.path.dirname(package_path) package_name = f.f_locals.get('__name__', None) elif isinstance(package, type(os)): package_path = os.path.dirname(package.__file__) package_name = getattr(package, '__name__', None) else: package_path = str(package) self.package_path = package_path # Find the package name under test; this name is used to limit coverage # reporting (if enabled). if package_name is None: package_name = get_package_name(package_path) self.package_name = package_name # Set to "release" in constructor in maintenance branches. self.raise_warnings = raise_warnings
def test_init(illumination_stats): histogram = numpy.zeros((3, 2**16), dtype=numpy.float64) assert illumination_stats.depth == 2 ** 16 assert illumination_stats.channels == ["DNA", "ER", "Mito"] assert illumination_stats.name == "" assert illumination_stats.down_scale_factor == 2 assert illumination_stats.median_filter_size == 3 numpy.testing.assert_array_equal(illumination_stats.hist, histogram) assert illumination_stats.count == 0 assert illumination_stats.expected == 1 assert illumination_stats.mean_image is None assert illumination_stats.original_image_size is None
def test_add_to_mean_no_scaling(illumination_stats): numpy.random.seed(8) image = numpy.random.randint(256, size=(16, 16, 3), dtype=numpy.uint16) illumination_stats.down_scale_factor = 1 illumination_stats.addToMean(image) assert illumination_stats.mean_image.shape == (16, 16, 3) # This method rescales the input image and normalizes pixels according to # the data type. We restore the values in this test to match the input for comparison. result_mean = illumination_stats.mean_image #* (2 ** 16) numpy.testing.assert_array_equal(numpy.round(result_mean).astype(numpy.uint16), image)
def test_add_to_mean_with_scaling(illumination_stats): numpy.random.seed(8) image = numpy.random.randint(256, size=(16, 16, 3), dtype=numpy.uint16) illumination_stats.addToMean(image) assert illumination_stats.mean_image.shape == (8, 8, 3) result_mean = illumination_stats.mean_image assert result_mean.sum() > 0 #numpy.testing.assert_array_equal(result_mean.astype(numpy.uint16), image)
def test_process_image(illumination_stats): numpy.random.seed(8) image = numpy.random.randint(256, size=(16, 16, 3), dtype=numpy.uint16) illumination_stats.processImage(0, image, None) histogram1 = numpy.histogram(image[:, :, 0], bins=2 ** 16, range=(0, 2 ** 16))[0] histogram2 = numpy.histogram(image[:, :, 1], bins=2 ** 16, range=(0, 2 ** 16))[0] histogram3 = numpy.histogram(image[:, :, 2], bins=2 ** 16, range=(0, 2 ** 16))[0] assert illumination_stats.count == 1 numpy.testing.assert_array_equal(illumination_stats.hist[0], histogram1) numpy.testing.assert_array_equal(illumination_stats.hist[1], histogram2) numpy.testing.assert_array_equal(illumination_stats.hist[2], histogram3)
def test_init(compress, out_dir): stats = {"original_size": [16, 16]} channels = ["DNA", "ER", "Mito"] control_distribution = numpy.zeros((3, 2 ** 8), dtype=numpy.float64) assert compress.stats == stats assert compress.channels == channels assert compress.out_dir == out_dir assert compress.count == 0 assert compress.expected == 1 assert not compress.metadata_control_filter("x") numpy.testing.assert_array_equal(compress.controls_distribution, control_distribution) assert compress.source_format == "tiff" assert compress.target_format == "png" assert compress.output_shape == [16, 16]
def test_set_control_samples_filter(compress): test_filter = lambda x: True control_distribution = numpy.zeros((3, 2 ** 8), dtype=numpy.float64) compress.set_control_samples_filter(test_filter) assert compress.metadata_control_filter(1) numpy.testing.assert_array_equal(compress.controls_distribution, control_distribution)