我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用theano.foldl()。
def foldl(fn, elems, initializer=None, name=None): '''Reduce elems using fn to combine them from left to right. # Arguments fn: Callable that will be called upon each element in elems and an accumulator, for instance lambda acc, x: acc + x elems: tensor initializer: The first value used (elems[0] in case of None) name: A string name for the foldl node in the graph # Returns Same type and shape as initializer ''' if initializer is None: initializer = elems[0] elems = elems[1:] # We need to change the order of the arguments because theano accepts x as # first parameter and accumulator as second fn2 = lambda x, acc: fn(acc, x) return theano.foldl(fn2, elems, initializer, name=name)[0]
def foldl(fn, elems, initializer=None, name=None): """Reduce elems using fn to combine them from left to right. # Arguments fn: Callable that will be called upon each element in elems and an accumulator, for instance lambda acc, x: acc + x elems: tensor initializer: The first value used (elems[0] in case of None) name: A string name for the foldl node in the graph # Returns Same type and shape as initializer """ if initializer is None: initializer = elems[0] elems = elems[1:] # We need to change the order of the arguments because theano accepts x as # first parameter and accumulator as second fn2 = lambda x, acc: fn(acc, x) return theano.foldl(fn2, elems, initializer, name=name)[0]
def accuracy_instance(predictions, targets, n=[1, 2, 3, 4, 5, 10], \ nb_classes=5, nb_samples_per_class=10, batch_size=1): accuracy_0 = theano.shared(np.zeros((batch_size, nb_samples_per_class), \ dtype=theano.config.floatX)) indices_0 = theano.shared(np.zeros((batch_size, nb_classes), \ dtype=np.int32)) batch_range = T.arange(batch_size) def step_(p, t, acc, idx): acc = T.inc_subtensor(acc[batch_range, idx[batch_range, t]], T.eq(p, t)) idx = T.inc_subtensor(idx[batch_range, t], 1) return (acc, idx) (raw_accuracy, _), _ = theano.foldl(step_, sequences=[predictions.dimshuffle(1, 0), \ targets.dimshuffle(1, 0)], outputs_info=[accuracy_0, indices_0]) accuracy = T.mean(raw_accuracy / nb_classes, axis=0) return accuracy
def test_foldl_memory_consumption(self): x = theano.shared(numpy.asarray( numpy.random.uniform(size=(10,)), dtype=theano.config.floatX)) o, _ = theano.foldl(lambda v, acc: acc + v, x, theano.tensor.constant( numpy.asarray(0., dtype=theano.config.floatX))) mode = theano.compile.mode.FAST_RUN mode = mode.excluding('inplace') f0 = theano.function([], o, mode=mode) inputs, outputs = clone_optimized_graph(f0) scan_nodes = grab_scan_node(outputs[0]) assert scan_nodes is not None scan_node = scan_nodes[0] f1 = theano.function(inputs, scan_node.inputs[2]) # Originally, the shape would have been 1 due to the SaveMem # optimization reducing the size to the number of taps (in this case # 1) provided to the inner function. Now, because of the memory-reuse # feature in Scan it can be 2 because SaveMem needs to keep a # larger buffer to avoid aliasing between the inputs and the outputs. if theano.config.scan.allow_output_prealloc: assert f1().shape[0] == 2 else: assert f1().shape[0] == 1 gx = theano.tensor.grad(o, x) f2 = theano.function([], gx) utt.assert_allclose(f2(), numpy.ones((10,)))