我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用theano._asarray()。
def perform(self, node, inputs, output_storage): """ Calculate ROC AUC score. Parameters ---------- node : Apply instance Symbolic inputs and outputs. inputs : list Sequence of inputs. output_storage : list List of mutable 1-element lists. """ if roc_auc_score is None: raise RuntimeError("Could not import from sklearn.") y_true, y_score = inputs try: roc_auc = roc_auc_score(y_true, y_score) except ValueError: roc_auc = np.nan #rvalue = np.array((roc_auc, prec, reca, f1)) #[0][0] output_storage[0][0] = theano._asarray(roc_auc, dtype=config.floatX)
def local_mean_subtraction(input, kernel_size=5): input_shape = (input.shape[0], 1, input.shape[1], input.shape[2]) input = input.reshape(input_shape).astype(floatX) X = T.tensor4(dtype=floatX) filter_shape = (1, 1, kernel_size, kernel_size) filters = mean_filter(kernel_size).reshape(filter_shape) filters = shared(_asarray(filters, dtype=floatX), borrow=True) mean = conv2d(input=X, filters=filters, input_shape=input.shape, filter_shape=filter_shape, border_mode='half') new_X = X - mean f = function([X], new_X) return f(input)
def perform(self, node, inp, outs): x, axes = inp max, max_idx = outs if axes is None: axes = tuple(range(x.ndim)) else: axes = tuple(int(ax) for ax in axes) max[0] = theano._asarray(numpy.max(x, axes), dtype=node.outputs[0].dtype) # Numpy does not support multiple axes for argmax # Work around keep_axes = numpy.array([i for i in range(x.ndim) if i not in axes], dtype='int64') # Not-reduced axes in front transposed_x = numpy.transpose(x, numpy.concatenate((keep_axes, axes))) kept_shape = transposed_x.shape[:len(keep_axes)] reduced_shape = transposed_x.shape[len(keep_axes):] new_shape = kept_shape + (numpy.prod(reduced_shape),) reshaped_x = transposed_x.reshape(new_shape) max_idx[0] = theano._asarray(numpy.argmax(reshaped_x, axis=-1), dtype='int64')
def mul_calculate(num, denum, aslist=False, out_type=None): if not num and not denum: # Smallest 1 possible. if aslist: return [] else: return numpy.int8(1) # Make sure we do not accidently upcast data types. if out_type is None: out_dtype = scalar.upcast(*[v.dtype for v in (num + denum)]) else: out_dtype = out_type.dtype one = theano._asarray(1, dtype=out_dtype) v = reduce(numpy.multiply, num, one) / reduce(numpy.multiply, denum, one) if aslist: if numpy.all(v == 1): return [] else: return [v] return v
def c_code(self, node, name, inp, out, sub): x, = inp z, = out # These constants were obtained by looking at the output of # python commands like: # for i in xrange(750): # print i, repr(numpy.log1p(numpy.exp(theano._asarray([i,-i], dtype=dt)))) # the boundary checks prevent us from generating inf # float16 limits: -17.0, 6.0 # We use the float32 limits for float16 for now as the # computation will happen in float32 anyway. if (node.inputs[0].type == scalar.float32 or node.inputs[0].type == scalar.float16): return """%(z)s = %(x)s < -103.0f ? 0.0 : %(x)s > 14.0f ? %(x)s : log1p(exp(%(x)s));""" % locals() elif node.inputs[0].type == scalar.float64: return """%(z)s = %(x)s < -745.0 ? 0.0 : %(x)s > 16.0 ? %(x)s : log1p(exp(%(x)s));""" % locals() else: raise NotImplementedError('only floatingpoint is implemented')
def test_givens(self): x = shared(0) assign = pfunc([], x, givens={x: 3}) assert assign() == 3 assert x.get_value(borrow=True) == 0 y = tensor.ivector() f = pfunc([y], (y * x), givens={x: 6}) assert numpy.all(f([1, 1, 1]) == [6, 6, 6]) assert x.get_value() == 0 z = tensor.ivector() c = z * y f = pfunc([y], (c + 7), givens={z: theano._asarray([4, 4, 4], dtype='int32')}) assert numpy.all(f([1, 1, 1]) == [11, 11, 11]) assert x.get_value() == 0
def test_elemwise_fusion(): """ Test the the GpuElemwise fusion work correctly""" shape = (3, 4) a = cuda.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a') b = tensor.fmatrix() c = tensor.fmatrix() f = pfunc([b, c], [a + b + c], mode=mode_with_gpu) topo = f.maker.fgraph.toposort() for i, node in enumerate(topo): print(i, node, file=sys.stdout) assert len(topo) == 4 assert isinstance(topo[2].op.scalar_op, theano.scalar.basic.Composite) # let debugmode catch errors f(theano._asarray(numpy.random.rand(*shape), dtype='float32'), theano._asarray(numpy.random.rand(*shape), dtype='float32'))
def run_conv_nnet2_classif(use_gpu, seed, isize, ksize, bsize, n_train=10, check_isfinite=True, verbose=0, version=-1): """Run the train function returned by build_conv_nnet2_classif on one device. """ utt.seed_rng(seed) # Seeds numpy.random with seed train, params, x_shape, y_shape, mode = build_conv_nnet2_classif( use_gpu=use_gpu, isize=isize, ksize=ksize, n_batch=bsize, verbose=verbose, version=version, check_isfinite=check_isfinite) xval = my_rand(*x_shape) yval = my_rand(*y_shape) lr = theano._asarray(0.01, dtype='float32') rvals = my_zeros(n_train) for i in xrange(n_train): rvals[i] = train(xval, yval, lr)[0]
def test_elemwise0(): a = tcn.shared_constructor(theano._asarray(numpy.random.rand(4, 4), dtype='float32'), 'a') b = tensor.fmatrix() f = pfunc([b], [], updates=[(a, a + b)], mode=mode_with_gpu) # check that we work inplace. assert (list( f.maker.fgraph.toposort()[1].op.destroy_map.items()) == [ (0, [0])]) a0 = a.get_value() * 1.0 f(numpy.ones((4, 4), dtype='float32')) assert numpy.all(a0 + 1.0 == a.get_value())
def test_elemwise3(): """ Several kinds of elemwise expressions with dimension permutations and broadcasting""" shape = (3, 4, 5, 6) a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a') b = tensor.fvector() new_val = (a + b).dimshuffle([2, 0, 3, 1]) new_val *= tensor.exp(1 + b ** a).dimshuffle([2, 0, 3, 1]) f = pfunc([b], [], updates=[(a, new_val)], mode=mode_with_gpu) has_elemwise = False for i, node in enumerate(f.maker.fgraph.toposort()): has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise) assert not has_elemwise # let debugmode catch errors f(theano._asarray(numpy.random.rand(6), dtype='float32'))
def test_elemwise4(): """ Test that two vectors can be broadcast to form an outer product (by performing rank-1 matrix update""" shape = (3, 4) a = tcn.shared_constructor(theano._asarray(numpy.random.rand(*shape), dtype='float32'), 'a') b = tensor.fvector() c = tensor.fvector() f = pfunc([b, c], [], updates=[(a, (a + b.dimshuffle('x', 0) * c.dimshuffle(0, 'x')))], mode=mode_with_gpu) has_elemwise = False for i, node in enumerate(f.maker.fgraph.toposort()): has_elemwise = has_elemwise or isinstance(node.op, tensor.Elemwise) assert not has_elemwise # let debugmode catch errors f(theano._asarray(numpy.random.rand(4), dtype='float32'), theano._asarray(numpy.random.rand(3), dtype='float32'))
def test_elemwise_comparaison_cast(): """ test if an elemwise comparaison followed by a cast to float32 are pushed to gpu. """ a = tensor.fmatrix() b = tensor.fmatrix() av = theano._asarray(numpy.random.rand(4, 4), dtype='float32') bv = numpy.ones((4, 4), dtype='float32') for g, ans in [(tensor.lt, av < bv), (tensor.gt, av > bv), (tensor.le, av <= bv), (tensor.ge, av >= bv)]: f = pfunc([a, b], tensor.cast(g(a, b), 'float32'), mode=mode_with_gpu) out = f(av, bv) assert numpy.all(out == ans) assert any([isinstance(node.op, cuda.GpuElemwise) for node in f.maker.fgraph.toposort()])
def speed_elemwise_collapse2(): """ used to test the speed up of the generalised collapse of ccontiguous dims""" shape = (30, 40, 50, 600) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2[:, :, :, ::2] b = tcn.CudaNdarrayType((False, False, False, False))() c = a3 + b * tensor.exp(1 + b ** a3) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(*shape), dtype='float32') v = v[:, :, :, ::2] v = cuda_ndarray.CudaNdarray(v) time.time() for i in range(100): # let debugmode catch errors f(v) time.time()
def test_elemwise_collapse(): """ Test when all inputs have one(and the same) broadcastable dimension """ shape = (4, 5, 60) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) b = tcn.CudaNdarrayType((False, True, False, False))() c = a3 + b f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(shape[0], 1, *shape[1:]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v) # print "Expected collapse of all dimensions"
def test_elemwise_collapse2(): """ Test when only one inputs have one broadcastable dimension """ shape = (4, 5, 9) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) b = tcn.CudaNdarrayType((False, False, False, False))() c = a3 + b f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(shape[0], 5, *shape[1:]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(shape[0], 1, *shape[1:]) + v) # print "Expected collapse to 3 dimensions"
def test_elemwise_collapse3(): """ Test when only one inputs have two broadcastable dimension at each ends """ shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 0, 1, 'x') b = tcn.CudaNdarrayType((False, False, False, False))() c = (a3 + b) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v) # print "Expected collapse to 3 dimensions"
def test_elemwise_collapse4(): """ Test when only one inputs have two broadcastable dimension at each ends and we add a scalar""" shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 0, 1, 'x') b = tcn.CudaNdarrayType((False, False, False, False))() c = (a3 + b + 2) f = pfunc([b], [c], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(5, shape[0], shape[1], 4), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, shape[0], shape[1], 1) + v + 2) # print "Expected collapse to 3 dimensions"
def test_elemwise_collapse6(): """ Test when all inputs have two broadcastable dimension at the beginning""" shape = (4, 5) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a, 'a') a3 = a2.dimshuffle('x', 'x', 0, 1) b = tcn.CudaNdarrayType((True, True, False, False))() f = pfunc([b], [a3 + b], mode=mode_with_gpu) v = theano._asarray(numpy.random.rand(1, 1, shape[0], shape[1]), dtype='float32') v = cuda_ndarray.CudaNdarray(v) # let debugmode catch errors out = f(v)[0] assert numpy.allclose(out, a.reshape(1, 1, shape[0], shape[1]) + v) # print "Expected collapse to c contiguous"
def test_elemwise_collapse7(atol=1e-6): """ Test when one input have one broadcastable dimension and the other is a scalar""" shape = (5, 4, 1) a = cuda_ndarray.CudaNdarray(theano._asarray(numpy.random.rand(*shape), dtype='float32')) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') a2 = tcn.shared_constructor(a.copy(), 'a') a3 = a2.dimshuffle(0, 'x', 1, 2) f = pfunc([], [a3 + 2], mode=mode_with_gpu) # let debugmode catch errors out = f()[0] ans = (a + 2).reshape(shape[0], 1, shape[1], shape[2]) assert numpy.allclose(out, ans, atol=atol) # print "Expected collapse to c contiguous"
def test_host_to_device(): # print >>sys.stdout, 'starting test_host_to_dev' for shape in ((), (3,), (2, 3), (3, 4, 5, 6)): a = theano._asarray(numpy.random.rand(*shape), dtype='float32') b = cuda_ndarray.CudaNdarray(a) c = numpy.asarray(b) assert numpy.all(a == c) # test with float32 dtype d = numpy.asarray(b, dtype='float32') assert numpy.all(a == d) # test with not float32 dtype try: numpy.asarray(b, dtype='int8') assert False except TypeError: pass
def test_copy(): # print >>sys.stdout, 'starting test_copy' shape = (500, 499) a = theano._asarray(numpy.random.rand(*shape), dtype='float32') # print >>sys.stdout, '.. creating device object' b = cuda_ndarray.CudaNdarray(a) # print >>sys.stdout, '.. copy' c = copy.copy(b) # print >>sys.stdout, '.. deepcopy' d = copy.deepcopy(b) # print >>sys.stdout, '.. comparisons' assert numpy.allclose(a, numpy.asarray(b)) assert numpy.allclose(a, numpy.asarray(c)) assert numpy.allclose(a, numpy.asarray(d)) b += b assert numpy.allclose(a + a, numpy.asarray(b)) assert numpy.allclose(a + a, numpy.asarray(c)) assert numpy.allclose(a, numpy.asarray(d))
def test_getshape(): shapelist = [ ((1, 2, 3), (1, 2, 3)), ((1,), (1,)), ((1, 2, 3), (3, 2, 1)), ((1, 2, 3), (6,)), ((1, 2, 3, 2), (6, 2)), ((2, 3, 2), (6, 2)) ] def subtest(shape): a = theano._asarray(numpy.random.rand(*shape_1), dtype='float32') b = cuda_ndarray.CudaNdarray(a) assert b.shape == a.shape for shape_1, shape_2 in shapelist: subtest(shape_1) subtest(shape_2)
def test_stride_manipulation(): a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32') b = cuda_ndarray.CudaNdarray(a) v = b.view() v._dev_data += 0 c = numpy.asarray(v) assert numpy.all(a == c) sizeof_float = 4 offset = 0 b_strides = b._strides for i in xrange(len(b.shape)): offset += (b.shape[i] - 1) * b_strides[i] v._set_stride(i, -b_strides[i]) v._dev_data += offset * sizeof_float c = numpy.asarray(v) assert numpy.all(c == [[5, 4, 3], [2, 1, 0]])
def test_setitem_matrixscalar0(): a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray(8, dtype='float32') _b = cuda_ndarray.CudaNdarray(b) # set an element to 8 _a[1, 1] = _b a[1, 1] = b assert numpy.allclose(a, numpy.asarray(_a)) # test direct transfert from numpy _a[1, 1] = theano._asarray(888, dtype='float32') a[1, 1] = theano._asarray(888, dtype='float32') assert numpy.allclose(a, numpy.asarray(_a)) # broadcast a 0 _a[1, 1] = 0 _a[0:2] = 0 _a[1:] = 0
def test_setitem_matrixvector1(): a = theano._asarray([[0, 1, 2], [3, 4, 5]], dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([8, 9], dtype='float32') _b = cuda_ndarray.CudaNdarray(b) # set second column to 8,9 _a[:, 1] = _b a[:, 1] = b assert numpy.allclose(a, numpy.asarray(_a)) # test direct transfert from numpy _a[:, 1] = b * 100 a[:, 1] = b * 100 assert numpy.allclose(a, numpy.asarray(_a)) row = theano._asarray([777, 888, 999], dtype='float32') _a[1, :] = row a[1, :] = row assert numpy.allclose(a, numpy.asarray(_a))
def test_setitem_matrix_tensor3(): a = numpy.arange(27) a.resize((3, 3, 3)) a = theano._asarray(a, dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([7, 8, 9], dtype='float32') _b = cuda_ndarray.CudaNdarray(b) # set middle row through cube to 7,8,9 _a[:, 1, 1] = _b a[:, 1, 1] = b assert numpy.allclose(a, numpy.asarray(_a)) # test direct transfert from numpy _a[:, 1, 1] = b * 100 a[:, 1, 1] = b * 100 assert numpy.allclose(a, numpy.asarray(_a)) row = theano._asarray([777, 888, 999], dtype='float32') _a[1, 1, :] = row a[1, 1, :] = row assert numpy.allclose(a, numpy.asarray(_a))
def test_setitem_matrix_bad_ndim(): a = numpy.arange(27) a.resize((3, 3, 3)) a = theano._asarray(a, dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([7, 8], dtype='float32') _b = cuda_ndarray.CudaNdarray(b) try: # attempt to assign the ndarray b with setitem _a[:, :, 1] = _b assert False except ValueError: # print e assert True # test direct transfert from numpy try: # attempt to assign the ndarray b with setitem _a[1, :, :] = b assert False except ValueError: # print e assert True
def test_setitem_matrix_bad_type(): a = numpy.arange(27) a.resize((3, 3, 3)) a = theano._asarray(a, dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([7, 8], dtype='float64') # test direct transfert from numpy try: # attempt to assign the ndarray b with setitem _a[1, :, :] = b assert False except TypeError: # print e assert True
def test_setitem_assign_to_slice(): a = numpy.arange(27) a.resize((3, 3, 3)) a = theano._asarray(a, dtype='float32') _a = cuda_ndarray.CudaNdarray(a) b = theano._asarray([7, 8, 9], dtype='float32') _b = cuda_ndarray.CudaNdarray(b) # first get a slice of a _c = _a[:, :, 1] # set middle row through cube to 7,8,9 # (this corresponds to middle row of matrix _c) _c[:, 1] = _b a[:, :, 1][:, 1] = b assert numpy.allclose(a, numpy.asarray(_a)) # test direct transfert from numpy _d = _a[1, :, :] _d[1, :] = b * 10 a[1, :, :][1, :] = b * 10 assert numpy.allclose(a, numpy.asarray(_a))
def sharedX_value(value, name=None, borrow=None, dtype=None): """Share a single value after transforming it to floatX type. value: a value name: variable name (str) borrow: boolean dtype: the type of the value when shared. default: theano.config.floatX """ if dtype is None: dtype = theano.config.floatX return theano.shared( theano._asarray(value, dtype=dtype), name=name, borrow=borrow)
def perform(self, node, inputs, output_storage): """ Calculate ROC AUC score. Parameters ---------- node : Apply instance Symbolic inputs and outputs. inputs : list Sequence of inputs. output_storage : list List of mutable 1-element lists. """ if roc_auc_score is None: raise RuntimeError("Could not import from sklearn.") y_true, y_score = inputs print(y_true.shape) y_true = np.argmax(y_true, axis=1) y_score = np.argmax(y_score, axis=1) #print(type(y_true), y_true.shape, type(y_score), y_score.shape) try: TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true) FP = np.sum(y_true[y_score==1]==0)*1. #/ (y_true.shape[0]-sum(y_true)) prec = TP / (TP+FP+1e-6) except ValueError: prec = np.nan #rvalue = np.array((roc_auc, prec, reca, f1)) #[0][0] output_storage[0][0] = theano._asarray(prec, dtype=config.floatX)
def perform(self, node, inputs, output_storage): """ Calculate ROC AUC score. Parameters ---------- node : Apply instance Symbolic inputs and outputs. inputs : list Sequence of inputs. output_storage : list List of mutable 1-element lists. """ if roc_auc_score is None: raise RuntimeError("Could not import from sklearn.") y_true, y_score = inputs y_true = np.argmax(y_true, axis=1) y_score = np.argmax(y_score, axis=1) try: TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true) FN = np.sum(y_true[y_score==0]==1)*1. #/ sum(y_true) reca = TP / (TP+FN+1e-6) except ValueError: reca = np.nan #rvalue = np.array((roc_auc, prec, reca, f1)) #[0][0] output_storage[0][0] = theano._asarray(reca, dtype=config.floatX)
def perform(self, node, inputs, output_storage): """ Calculate ROC AUC score. Parameters ---------- node : Apply instance Symbolic inputs and outputs. inputs : list Sequence of inputs. output_storage : list List of mutable 1-element lists. """ if roc_auc_score is None: raise RuntimeError("Could not import from sklearn.") y_true, y_score = inputs y_true = np.argmax(y_true, axis=1) y_score = np.argmax(y_score, axis=1) try: TP = np.sum(y_true[y_score==1]==1)*1. #/ sum(y_true) FP = np.sum(y_true[y_score==1]==0)*1. #/ (y_true.shape[0]-sum(y_true)) #TN = np.sum(truey[predy==0]==0)*1. / (truey.shape[0]-sum(truey)) FN = np.sum(y_true[y_score==0]==1)*1. #/ sum(y_true) #prec = TP / (TP+FP+1e-6) #reca = TP / (TP+FN+1e-6) #f1 = 2*prec*reca / (prec+reca+1e-6) f1 = 2*TP / (2*TP +FP +FN) except ValueError: f1 = np.nan #rvalue = np.array((roc_auc, prec, reca, f1)) #[0][0] output_storage[0][0] = theano._asarray(f1, dtype=config.floatX)
def test_may_share_memory_scipy(): a = scipy.sparse.csc_matrix(scipy.sparse.eye(5, 3)) b = scipy.sparse.csc_matrix(scipy.sparse.eye(4, 3)) def as_ar(a): return theano._asarray(a, dtype='int32') for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False), (a, a.data, True), (a, a.indptr, True), (a, a.indices, True), (a, as_ar(a.shape), False), (a.data, a, True), (a.indptr, a, True), (a.indices, a, True), (as_ar(a.shape), a, False), (b, b.data, True), (b, b.indptr, True), (b, b.indices, True), (b, as_ar(b.shape), False), (b.data, b, True), (b.indptr, b, True), (b.indices, b, True), (as_ar(b.shape), b, False), (b.data, a, False), (b.indptr, a, False), (b.indices, a, False), (as_ar(b.shape), a, False)]: assert may_share_memory(a_, b_) == rep assert may_share_memory(b_, a_) == rep # test that it raise error when needed. for a_, b_, rep in [(a, (0,), False), (a, 1, False), (a, None, False)]: assert may_share_memory(a_, b_, False) == rep assert may_share_memory(b_, a_, False) == rep try: may_share_memory(a_, b_) raise Exception("An error was expected") except TypeError: pass try: may_share_memory(b_, a_) raise Exception("An error was expected") except TypeError: pass
def test_may_share_memory(): a = scipy.sparse.csc_matrix(scipy.sparse.eye(5, 3)) b = scipy.sparse.csc_matrix(scipy.sparse.eye(4, 3)) as_ar = lambda a: theano._asarray(a, dtype='int32') for a_, b_, rep in [(a, a, True), (b, b, True), (a, b, False), (a, a.data, True), (a, a.indptr, True), (a, a.indices, True), (a, as_ar(a.shape), False), (a.data, a, True), (a.indptr, a, True), (a.indices, a, True), (as_ar(a.shape), a, False), (b, b.data, True), (b, b.indptr, True), (b, b.indices, True), (b, as_ar(b.shape), False), (b.data, b, True), (b.indptr, b, True), (b.indices, b, True), (as_ar(b.shape), b, False), (b.data, a, False), (b.indptr, a, False), (b.indices, a, False), (as_ar(b.shape), a, False), (a.transpose(), a, True), (b.transpose(), b, True), (a.transpose(), b, False), (b.transpose(), a, False), ]: assert SparseType.may_share_memory(a_, b_) == rep
def perform(self, node, inputs, out): (csm,) = inputs out[0][0] = csm.data if str(csm.data.dtype) == 'int32': out[0][0] = theano._asarray(out[0][0], dtype='int32') # backport out[1][0] = theano._asarray(csm.indices, dtype='int32') out[2][0] = theano._asarray(csm.indptr, dtype='int32') out[3][0] = theano._asarray(csm.shape, dtype='int32')
def perform(self, node, inputs, outputs): (x, ind1, ind2) = inputs (out,) = outputs assert _is_sparse(x) out[0] = theano._asarray(x[ind1, ind2], x.dtype)
def perform(self, node, inputs, outputs): (x, y) = inputs (out,) = outputs assert _is_dense(y) # The asarray is needed as in some case, this return a # numpy.matrixlib.defmatrix.matrix object and not an ndarray. out[0] = theano._asarray(x + y, dtype=node.outputs[0].type.dtype)
def perform(self, node, inputs, out): x, y = inputs out = out[0] x_is_sparse = _is_sparse(x) y_is_sparse = _is_sparse(y) if not x_is_sparse and not y_is_sparse: raise TypeError(x) rval = x * y if x_is_sparse and y_is_sparse: rval = rval.toarray() out[0] = theano._asarray(rval, dtype=node.outputs[0].dtype)
def perform(self, node, inputs, outputs): (a_val, a_ind, a_ptr, a_nrows, b) = inputs (out,) = outputs a = scipy.sparse.csc_matrix((a_val, a_ind, a_ptr), (a_nrows, b.shape[0]), copy=False) # out[0] = a.dot(b) out[0] = theano._asarray(a * b, dtype=node.outputs[0].type.dtype) assert _is_dense(out[0]) # scipy 0.7 automatically converts to dense
def test_add_canonizer_problem0(): n_segments = 10 label = lscalar('label') segment_labels = label + theano._asarray([0] * n_segments, dtype='int64') r = segment_labels * 5 f = function([label], r)
def test_gemv_dimensions(self): A = T.matrix('A') x, y = T.vectors('x', 'y') alpha = theano.shared(theano._asarray(1.0, dtype=config.floatX), name='alpha') beta = theano.shared(theano._asarray(1.0, dtype=config.floatX), name='beta') z = beta * y + alpha * T.dot(A, x) f = theano.function([A, x, y], z) # Matrix value A_val = numpy.ones((5, 3), dtype=config.floatX) # Different vector length ones_3 = numpy.ones(3, dtype=config.floatX) ones_4 = numpy.ones(4, dtype=config.floatX) ones_5 = numpy.ones(5, dtype=config.floatX) ones_6 = numpy.ones(6, dtype=config.floatX) f(A_val, ones_3, ones_5) f(A_val[::-1, ::-1], ones_3, ones_5) self.assertRaises(ValueError, f, A_val, ones_4, ones_5) self.assertRaises(ValueError, f, A_val, ones_3, ones_6) self.assertRaises(ValueError, f, A_val, ones_4, ones_6) # The following gemv tests were added in March 2011 by Ian Goodfellow # and are based on the gemv tests from scipy # http://projects.scipy.org/scipy/browser/trunk/scipy/linalg/tests/test_fblas.py?rev=6803 # NOTE: At the time these tests were written, theano did not have a # conjugate function. If such a thing is ever added, the tests involving # conjugate should be ported over as well.
def _numpy_true_div(x, y): """Performs true division, and cast the result in the type we expect. We define that function so we can use it in TrueDivTester.expected, because simply calling numpy.true_divide could cause a dtype mismatch. """ out = numpy.true_divide(x, y) # Use floatX as the result of int / int if x.dtype in tensor.discrete_dtypes and y.dtype in tensor.discrete_dtypes: out = theano._asarray(out, dtype=config.floatX) return out
def test_or(self): for dtype in self.dtype: x, y = vector(dtype=dtype), vector(dtype=dtype) fn = inplace_func([x, y], x | y) l = theano._asarray([0, 0, 1, 1], dtype=dtype) r = theano._asarray([0, 1, 0, 1], dtype=dtype) v = fn(l, r) self.assertTrue(numpy.all(v == (operator.or_(l, r))), (l, r, v))
def test_xor(self): for dtype in self.dtype: x, y = vector(dtype=dtype), vector(dtype=dtype) fn = inplace_func([x, y], x ^ y) ix = x ix = inplace.xor_inplace(ix, y) gn = inplace_func([x, y], ix) l = theano._asarray([0, 0, 1, 1], dtype=dtype) r = theano._asarray([0, 1, 0, 1], dtype=dtype) v = fn(l, r) self.assertTrue(numpy.all(v == (operator.xor(l, r))), (l, r, v)) v = gn(l, r) # test the in-place stuff self.assertTrue(numpy.all(l == numpy.asarray([0, 1, 1, 0])), l)
def test_and(self): for dtype in self.dtype: x, y = vector(dtype=dtype), vector(dtype=dtype) fn = inplace_func([x, y], x & y) l = theano._asarray([0, 0, 1, 1], dtype=dtype) r = theano._asarray([0, 1, 0, 1], dtype=dtype) v = fn(l, r) self.assertTrue(numpy.all(v == (operator.and_(l, r))), (l, r, v))