我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.rollaxis()。
def visualizeLayer(model, img, input_image, layerIndex): layer = model.layers[layerIndex] get_activations = K.function([model.layers[0].input, K.learning_phase()], [layer.output,]) activations = get_activations([input_image, 0])[0] output_image = activations ## If 4 dimensional then take the last dimension value as it would be no of filters if output_image.ndim == 4: # Rearrange dimension so we can plot the result o1 = np.rollaxis(output_image, 3, 1) output_image = np.rollaxis(o1, 3, 1) print "Dumping filter data of layer{} - {}".format(layerIndex,layer.__class__.__name__) filters = len(output_image[0,0,0,:]) fig=plt.figure(figsize=(8,8)) # This loop will plot the 32 filter data for the input image for i in range(filters): ax = fig.add_subplot(6, 6, i+1) #ax.imshow(output_image[img,:,:,i],interpolation='none' ) #to see the first filter ax.imshow(output_image[0,:,:,i],'gray') #ax.set_title("Feature map of layer#{} \ncalled '{}' \nof type {} ".format(layerIndex, # layer.name,layer.__class__.__name__)) plt.xticks(np.array([])) plt.yticks(np.array([])) plt.tight_layout() #plt.show() fig.savefig("img_" + str(img) + "_layer" + str(layerIndex)+"_"+layer.__class__.__name__+".png") #plt.close(fig) else: print "Can't dump data of this layer{}- {}".format(layerIndex, layer.__class__.__name__)
def _random_overlay(self, static_hidden=False): """Construct random max pool locations.""" s = self.shapes[2] if static_hidden: args = np.random.randint(s[2], size=np.prod(s) / s[2] / s[4]) overlay = np.zeros(np.prod(s) / s[4], np.bool) overlay[args + np.arange(len(args)) * s[2]] = True overlay = overlay.reshape([s[0], s[1], s[3], s[2]]) overlay = np.rollaxis(overlay, -1, 2) return arrays.extend(overlay, s[4]) else: args = np.random.randint(s[2], size=np.prod(s) / s[2]) overlay = np.zeros(np.prod(s), np.bool) overlay[args + np.arange(len(args)) * s[2]] = True overlay = overlay.reshape([s[0], s[1], s[3], s[4], s[2]]) return np.rollaxis(overlay, -1, 2)
def _local_add_sparse(ltenss): """Computes the local tensors of a sum of MPArrays (except for the boundary tensors). Works only for products right now :param ltenss: Raveled local tensors :returns: Correct local tensor representation """ dim = len(ltenss[0]) nr_summands = len(ltenss) indptr = np.arange(nr_summands * dim + 1) indices = np.concatenate((np.arange(nr_summands),) * dim) data = np.concatenate([lt[None, :] for lt in ltenss]) data = np.rollaxis(data, 1).ravel() return ssp.csc_matrix((data, indices, indptr), shape=(nr_summands, dim * nr_summands))
def axis_iter(self, axes=0): """Returns an iterator yielding Sub-MPArrays of ``self`` by iterating over the specified physical axes. **Example:** If ``self`` represents a bipartite (i.e. length 2) array with 2 physical dimensions on each site ``A[(k,l), (m,n)]``, ``self.axis_iter(0)`` is equivalent to:: (A[(k, :), (m, :)] for m in range(...) for k in range(...)) :param axes: Iterable or int specifiying the physical axes to iterate over (default 0 for each site) :returns: Iterator over :class:`.MPArray` """ if not isinstance(axes, collections.Iterable): axes = it.repeat(axes, len(self)) ltens_iter = it.product(*(iter(np.rollaxis(lten, i + 1)) for i, lten in zip(axes, self.lt))) return (MPArray(ltens) for ltens in ltens_iter) ########################## # Algebraic operations # ##########################
def _nanpercentile(a, q, axis=None, out=None, overwrite_input=False, interpolation='linear', keepdims=False): """ Private function that doesn't support extended axis or keepdims. These methods are extended to this function using _ureduce See nanpercentile for parameter usage """ if axis is None: part = a.ravel() result = _nanpercentile1d(part, q, overwrite_input, interpolation) else: result = np.apply_along_axis(_nanpercentile1d, axis, a, q, overwrite_input, interpolation) # apply_along_axis fills in collapsed axis with results. # Move that axis to the beginning to match percentile's # convention. if q.ndim != 0: result = np.rollaxis(result, axis) if out is not None: out[...] = result return result
def apply(self, im): """ Apply axis-localized displacements. Parameters ---------- im : ndarray The image or volume to shift """ from scipy.ndimage.interpolation import shift im = rollaxis(im, self.axis) im.setflags(write=True) for ind in range(0, im.shape[0]): im[ind] = shift(im[ind], map(lambda x: -x, self.delta[ind]), mode='nearest') im = rollaxis(im, 0, self.axis+1) return im
def _encode_as_webp(data, profile=None, affine=None): """ Uses BytesIO + PIL to encode a (3, 512, 512) array into a webp bytearray. Parameters ----------- data: ndarray (3 x 512 x 512) uint8 RGB array profile: None ignored affine: None ignored Returns -------- contents: bytearray webp-encoded bytearray of the provided input data """ with BytesIO() as f: im = Image.fromarray(np.rollaxis(data, 0, 3)) im.save(f, format='webp', lossless=True) return f.getvalue()
def adaptsize(x, where): """Adapt the dimension of an array depending of the tuple dim Args: x : the signal for swaping axis where : where each dimension should be put Example: >>> x = np.random.rand(2,4001,160) >>> adaptsize(x, (1,2,0)).shape -> (160, 2, 4001) """ if not isinstance(where, np.ndarray): where = np.array(where) where_t = list(where) for k in range(len(x.shape)-1): # Find where "where" is equal to "k" : idx = np.where(where == k)[0] # Roll axis : x = np.rollaxis(x, idx, k) # Update the where variable : where_t.remove(k) where = np.array(list(np.arange(k+1)) + where_t) return x
def _mean_and_std(X, axis=0, with_mean=True, with_std=True): """Compute mean and std deviation for centering, scaling. Zero valued std components are reset to 1.0 to avoid NaNs when scaling. """ X = np.asarray(X) Xr = np.rollaxis(X, axis) if with_mean: mean_ = Xr.mean(axis=0) else: mean_ = None if with_std: std_ = Xr.std(axis=0) if isinstance(std_, np.ndarray): std_[std_ == 0.] = 1.0 elif std_ == 0.: std_ = 1. else: std_ = None return mean_, std_
def logsumexp(a, axis=None, b=None): a = np.asarray(a) if axis is None: a = a.ravel() else: a = np.rollaxis(a, axis) a_max = a.max(axis=0) if b is not None: b = np.asarray(b) if axis is None: b = b.ravel() else: b = np.rollaxis(b, axis) out = np.log(np.sum(b * np.exp(a - a_max), axis=0)) else: out = np.log(np.sum(np.exp(a - a_max), axis=0)) out += a_max return out
def load_test_images(): ''' Loads 64 random images from SVNH test data sets :return: Tuple of (test images, image labels) ''' utils.download_train_and_test_data() _, testset = utils.load_data_sets() idx = np.random.randint(0, testset['X'].shape[3], size=64) test_images = testset['X'][:, :, :, idx] test_labels = testset['y'][idx] test_images = np.rollaxis(test_images, 3) test_images = utils.scale(test_images) return test_images, test_labels
def __call__(self, plot): ax = plot.data.axis xax = plot.data.ds.coordinates.x_axis[ax] yax = plot.data.ds.coordinates.y_axis[ax] if not hasattr(self.vertices, "in_units"): vertices = plot.data.pf.arr(self.vertices, "code_length") else: vertices = self.vertices l_cy = triangle_plane_intersect(plot.data.axis, plot.data.coord, vertices)[:,:,(xax, yax)] # l_cy is shape (nlines, 2, 2) # reformat for conversion to plot coordinates l_cy = np.rollaxis(l_cy,0,3) # convert all line starting points l_cy[0] = self.convert_to_plot(plot,l_cy[0]) # convert all line ending points l_cy[1] = self.convert_to_plot(plot,l_cy[1]) # convert back to shape (nlines, 2, 2) l_cy = np.rollaxis(l_cy,2,0) # create line collection and add it to the plot lc = matplotlib.collections.LineCollection(l_cy, **self.plot_args) plot._axes.add_collection(lc)
def add_channel(image, channelfirst): """ Add channel if missing and make first axis if requested. >>> import numpy as np >>> image = np.ones((10, 20)) >>> image = add_channel(image, True) >>> shapestr(image) '1x10x20' :param ndarray image: RBG (h,w,3) or gray-scale image (h,w). :param bool channelfirst: If True, make channel first axis :return: Numpy array with channel (as first axis if makefirst=True) :rtype: numpy.array """ if not 2 <= image.ndim <= 3: raise ValueError('Image must be 2 or 3 channel!') if image.ndim == 2: # gray-scale image image = np.expand_dims(image, axis=-1) # add channel axis return np.rollaxis(image, 2) if channelfirst else image
def eval(self, ss, returnResid=False, *args, **kwargs): ss = util.segmat(ss) preds = [] gi = [] for i in xrange(ss.shape[2]): v = ss[:,:,i] xs = self.getInputs(v) gs = self.getTargets(v) preds.append(self.model[i].evals(xs, *args, **kwargs).squeeze(2)) if returnResid: gi.append(gs.squeeze(2)) preds = np.rollaxis(np.array(preds), 0,3) if returnResid: gs = np.rollaxis(np.array(gi), 0,3) resids = gs - preds return preds, resids else: return preds
def ensurepil(self, invalidate=True): if self.dpil is None: if self.dbuf is not None: self.dpil = Image.frombytes("RGBA", self.shape, self.dbuf, "raw", "RGBA", 0, 1) elif self.darr is not None: data = self.scaledpixelarray(0,255.999) buf = np.rollaxis(data,1).astype(np.uint8).tostring() self.dpil = Image.frombytes("RGB", self.shape, buf, "raw", "RGB", 0, -1) else: raise ValueError("No source data for conversion to PIL image") if invalidate: self.dbuf = None self.darr = None self.rangearr = None ## This private function ensures that there is a valid buffer representation, converting from # one of the other representations if necessary, and invalidating the other representations if requested.
def ensurebuf(self, invalidate=True): if self.dbuf is None: if self.dpil is not None: self.dbuf = self.dpil.tostring("raw", "RGBX", 0, 1) elif self.darr is not None: data = self.scaledpixelarray(0,255.999) self.dbuf = np.dstack(( np.flipud(np.rollaxis(data,1)).astype(np.uint8), np.zeros(self.shape[::-1],np.uint8) )).tostring() else: raise ValueError("No source data for conversion to buffer") if invalidate: self.dpil = None self.darr = None self.rangearr = None ## This private function ensures that there is a valid numpy array representation, converting from # one of the other representations if necessary, and invalidating the other representations if requested.
def ensurearr(self, invalidate=True): if self.darr is None: if self.dpil is not None: self.darr = np.fromstring(self.dpil.tostring("raw", "RGB", 0, -1), np.uint8).astype(np.float64) self.darr = np.rollaxis(np.reshape(self.darr, (self.shape[1], self.shape[0], 3) ), 1) elif self.dbuf is not None: self.darr = np.fromstring(self.dbuf, np.uint8).astype(np.float64) self.darr = np.delete(np.reshape(self.darr, (self.shape[1], self.shape[0], 4) ), 3, 2) self.darr = np.rollaxis(np.flipud(self.darr), 1) else: raise ValueError("No source data for conversion to array") self.rangearr = ( 0, 255.999 ) if invalidate: self.dpil = None self.dbuf = None # ----------------------------------------------------------------- ## This private helper function returns a 2-tuple containing the least and most significant 16-bit portion # of the specified unsigned 32-bit integer value.
def plot_patches(fig, patches): if patches.ndim == 4: channel_step = patches.shape[3] // 3 # patches = np.concatenate([np.sum(patches[:, :, :, i * channel_step: # (i + 1) * channel_step], # axis=3)[..., np.newaxis] # for i in range(3)], axis=3) if patches.shape[3] == 1: patches = patches[:, :, :, 0] elif patches.shape[3] >= 3: patches = patches[:, :, :, :3] patches = np.rollaxis(patches, 3, 2).reshape( (patches.shape[0], patches.shape[1], patches.shape[2] * 3)) patches = patches[:256] side_size =ceil(sqrt(patches.shape[0])) for i, patch in enumerate(patches): ax = fig.add_subplot(side_size, side_size, i + 1) ax.imshow( patch, interpolation='nearest') ax.set_xticks(()) ax.set_yticks(()) fig.subplots_adjust(0.08, 0.02, 0.92, 0.85, 0.08, 0.23) return fig
def setUp(self): with self.test_session(): N = 4 M = 5 self.mu = tf.placeholder(settings.float_type, [M, N]) self.sqrt = tf.placeholder(settings.float_type, [M, N]) self.chol = tf.placeholder(settings.float_type, [M, M, N]) self.I = tf.placeholder(settings.float_type, [M, M]) self.rng = np.random.RandomState(0) self.mu_data = self.rng.randn(M, N) self.sqrt_data = self.rng.randn(M, N) q_sqrt = np.rollaxis(np.array([np.tril(self.rng.randn(M, M)) for _ in range(N)]), 0, 3) self.chol_data = q_sqrt self.feed_dict = { self.mu: self.mu_data, self.sqrt: self.sqrt_data, self.chol: self.chol_data, self.I: np.eye(M), }
def setUp(self): with self.test_session(): N = 4 M = 5 self.mu = tf.placeholder(settings.float_type, [M, N]) self.sqrt = tf.placeholder(settings.float_type, [M, N]) self.chol = tf.placeholder(settings.float_type, [M, M, N]) self.K = tf.placeholder(settings.float_type, [M, M]) self.Kdiag = tf.placeholder(settings.float_type, [M, M]) self.rng = np.random.RandomState(0) self.mu_data = self.rng.randn(M, N) sqrt_diag = self.rng.randn(M) self.sqrt_data = np.array([sqrt_diag for _ in range(N)]).T sqrt_chol = np.tril(self.rng.randn(M, M)) self.chol_data = np.rollaxis(np.array([sqrt_chol for _ in range(N)]), 0, 3) self.feed_dict = { self.mu: np.zeros((M, N)), self.sqrt: self.sqrt_data, self.chol: self.chol_data, self.K: squareT(sqrt_chol), self.Kdiag: np.diag(sqrt_diag ** 2), }
def collapse(T, W, divisive=False): if divisive: W = W / np.sum(np.square(W.reshape(W.shape[0], -1)), 1)[:,None,None,None] if T.shape[-6] == W.shape[0]: # Z ONLY (after 2nd-stage expansion) W = np.reshape (W, (1,)*(T.ndim-6) + (W.shape[0],1,1) + W.shape[1:]) T = ne.evaluate('T*W') T = np.reshape (T, T.shape[:-3] + (np.prod(T.shape[-3:]),)) T = np.sum(T, -1) else: # X ONLY (conv, before 2nd-stage expansion) T = np.squeeze (T, -6) T = np.tensordot(T, W, ([-3,-2,-1], [1,2,3])) T = np.rollaxis (T, -1, 1) return T
def show_samples(samples, nShow): """ Show some input samples. """ import math import matplotlib.pyplot as plt _, nFeatures, x, y = samples.shape nColumns = int(math.ceil(nShow/5.)) for i in range(nShow): plt.subplot(5, nColumns, i+1) image = samples[i] image = np.rollaxis(image, 0, 3)*5. plt.imshow(image) # plt.axis('off')
def backward_cpu(self, inputs, grad_outputs): x, W = inputs[:2] b = inputs[2] if len(inputs) == 3 else None gy = grad_outputs[0] h, w = x.shape[2:] gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5))) gcol = numpy.tensordot(W, gy, (0, 1)) gcol = numpy.rollaxis(gcol, 3) gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w) if b is None: return gx, gW else: gb = gy.sum(axis=(0, 2, 3)) return gx, gW, gb
def forward_cpu(self, inputs): x, W = inputs[:2] b = inputs[2] if len(inputs) == 3 else None kh, kw = W.shape[2:] _, _, h, w = x.shape gcol = numpy.tensordot(W, x, (0, 1)) # - k, m, n: shape of out_channel # - b: number of inputs # - h, w: height and width of kernels # k, m, n, b, h, w -> b, k, m, n, h, w gcol = numpy.rollaxis(gcol, 3) if self.outh is None: self.outh = conv.get_deconv_outsize(h, kh, self.sy, self.ph) if self.outw is None: self.outw = conv.get_deconv_outsize(w, kw, self.sx, self.pw) y = conv.col2im_cpu( gcol, self.sy, self.sx, self.ph, self.pw, self.outh, self.outw) # b, k, h, w if b is not None: y += b.reshape(1, b.size, 1, 1) return y,
def backward_cpu(self, inputs, grad_outputs): x, W = inputs[:2] b = inputs[2] if len(inputs) == 3 else None gy = grad_outputs[0] kh, kw = W.shape[2:] col = conv.im2col_cpu( gy, kh, kw, self.sy, self.sx, self.ph, self.pw) gW = numpy.tensordot(x, col, ([0, 2, 3], [0, 4, 5])) gx = numpy.tensordot(col, W, ([1, 2, 3], [1, 2, 3])) gx = numpy.rollaxis(gx, 3, 1) if b is None: return gx, gW else: gb = gy.sum(axis=(0, 2, 3)) return gx, gW, gb
def forward_gpu(self, inputs): cupy = cuda.cupy x, t = inputs log_y = cupy.log(x + 1e-5) self.y = x if(self.debug): ipdb.set_trace() if getattr(self, 'normalize', True): coeff = cupy.maximum(1, (t != self.ignore_label).sum()) else: coeff = max(1, len(t)) self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype) log_y = cupy.rollaxis(log_y, 1, log_y.ndim) ret = cuda.reduce( 'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out', 't == -1 ? 0 : log_y[_j * n_channel + t]', 'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd' )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff) return ret,
def forward_cpu(self, inputs): x, t = inputs if chainer.is_debug(): self._check_input_values(x, t) log_y = softmax_log(x, False) if self.cache_score: self.y = numpy.exp(log_y) log_yd = numpy.rollaxis(log_y, 1) log_yd = log_yd.reshape(len(log_yd), -1) log_p = log_yd[numpy.maximum(t.ravel(), 0), six.moves.range(t.size)] # deal with the case where the SoftmaxCrossEntropy is # unpickled from the old version if getattr(self, 'normalize', True): count = (t != self.ignore_label).sum() else: count = len(x) self._coeff = 1.0 / max(count, 1) y = (log_p * (t.ravel() != self.ignore_label)).sum(keepdims=True) \ * (-self._coeff) return y.reshape(()),
def forward_gpu(self, inputs): cupy = cuda.cupy x, t = inputs if chainer.is_debug(): self._check_input_values(x, t) log_y = softmax_log(x, self.use_cudnn) if self.cache_score: self.y = cupy.exp(log_y) if getattr(self, 'normalize', True): coeff = cupy.maximum(1, (t != self.ignore_label).sum()) else: coeff = max(1, len(t)) self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype) log_y = cupy.rollaxis(log_y, 1, log_y.ndim) ret = cuda.reduce( 'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out', 't == -1 ? T(0) : log_y[_j * n_channel + t]', 'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd' )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff) return ret,
def accuracy(x, t, ignore_label): x_ = numpy.rollaxis(x, 1, x.ndim).reshape(t.size, -1) t_ = t.ravel() if ignore_label is not None: count = 0 for i in six.moves.range(t_.size): pred = x_[i].argmax() if t_[i] != ignore_label and pred == t_[i]: count += 1 total = (t_ != ignore_label).sum() else: count = 0 for i in six.moves.range(t_.size): pred = x_[i].argmax() if pred == t_[i]: count += 1 total = t_.size if total == 0: return 0.0 else: return float(count) / total
def epoch_to_epoch16(self, epoch): """ Converts a CDF EPOCH to a CDF EPOCH16 value Parameters ========== epoch : double EPOCH to convert. Lists and numpy arrays are acceptable. Returns ======= out : (double, double) EPOCH16 corresponding to epoch """ e = numpy.require(epoch, numpy.float64) s = numpy.trunc(e / 1000.0) #ugly numpy stuff, probably a better way.... res = numpy.hstack((s, (e - s * 1000.0) * 1e9)) if len(res) <= 2: return res newshape = list(res.shape[0:-2]) newshape.append(res.shape[-1] // 2) newshape.append(2) return numpy.rollaxis(res.reshape(newshape), -1, -2)
def backward_cpu(self, inputs, grad_outputs): x, W = inputs[:2] Wb = binarize_cpu(W) b = inputs[2] if len(inputs) == 3 else None gy = grad_outputs[0] h, w = x.shape[2:] gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5))) gcol = numpy.tensordot(Wb, gy, (0, 1)) gcol = numpy.rollaxis(gcol, 3) gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w) if b is None: return gx, gW else: gb = gy.sum(axis=(0, 2, 3)) return gx, gW, gb
def forward_cpu(self, inputs): x, W = inputs[:2] b = inputs[2] if len(inputs) == 3 else None kh, kw = W.shape[2:] self.col = conv.im2col_cpu( x, kh, kw, self.sy, self.sx, self.ph, self.pw, cover_all=self.cover_all) Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False) y = numpy.tensordot( self.col, Wb, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False) if b is not None: y += b return numpy.rollaxis(y, 3, 1),
def backward_cpu(self, inputs, grad_outputs): x, W = inputs[:2] b = inputs[2] if len(inputs) == 3 else None gy = grad_outputs[0] h, w = x.shape[2:] gW = numpy.tensordot( gy, self.col, ((0, 2, 3), (0, 4, 5))).astype(W.dtype, copy=False) Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False) gcol = numpy.tensordot(Wb, gy, (0, 1)).astype(x.dtype, copy=False) gcol = numpy.rollaxis(gcol, 3) gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w) if b is None: return gx, gW else: gb = gy.sum(axis=(0, 2, 3)) return gx, gW, gb
def forward_cpu(self, inputs): x, W = inputs[:2] b = inputs[2] if len(inputs) == 3 else None kh, kw = W.shape[2:] self.col = conv.im2col_cpu( x, kh, kw, self.sy, self.sx, self.ph, self.pw, cover_all=self.cover_all) Xb = numpy.where(self.col>0,1,self.col).astype(x.dtype, copy=False) Xb = numpy.where(self.col<0,-1,Xb).astype(x.dtype, copy=False) Wb = numpy.where(W>=0,1,-1).astype(W.dtype, copy=False) y = numpy.tensordot( Xb, Wb, ((1, 2, 3), (1, 2, 3))).astype(x.dtype, copy=False) if b is not None: y += b return numpy.rollaxis(y, 3, 1),
def load_video(data): videoCapture = skvideo.io.VideoCapture(data, (456, 256)) videoCapture.open() x = [] while True: retval, image = videoCapture.read() if retval: x.append(numpy.rollaxis(image, 2)) else: break return numpy.array(x, 'float32')
def backward_cpu(self, inputs, grad_outputs): x, W = inputs[:2] Wb = numpy.where(W>=0, 1, -1).astype(numpy.float32, copy=False) b = inputs[2] if len(inputs) == 3 else None gy = grad_outputs[0] h, w = x.shape[2:] gW = numpy.tensordot(gy, self.col, ((0, 2, 3), (0, 4, 5))) gcol = numpy.tensordot(Wb, gy, (0, 1)) gcol = numpy.rollaxis(gcol, 3) gx = conv.col2im_cpu(gcol, self.sy, self.sx, self.ph, self.pw, h, w) if b is None: return gx, gW else: gb = gy.sum(axis=(0, 2, 3)) return gx, gW, gb
def forward_cpu(self, inputs): x, t = inputs if chainer.is_debug(): self._check_input_values(x, t) log_y = numpy.log(x) if self.cache_score: self.y = x log_yd = numpy.rollaxis(log_y, 1) log_yd = log_yd.reshape(len(log_yd), -1) log_p = log_yd[numpy.maximum(t.ravel(), 0), six.moves.range(t.size)] if getattr(self, 'normalize', True): count = (t != self.ignore_label).sum() else: count = len(x) self._coeff = 1.0 / max(count, 1) y = (log_p * (t.ravel() != self.ignore_label)).sum(keepdims=True) * (-self._coeff) return y.reshape(()),
def forward_gpu(self, inputs): cupy = cuda.cupy x, t = inputs if chainer.is_debug(): self._check_input_values(x, t) log_y = cupy.log(x) if self.cache_score: self.y = x if getattr(self, 'normalize', True): coeff = cupy.maximum(1, (t != self.ignore_label).sum()) else: coeff = max(1, len(t)) self._coeff = cupy.divide(1.0, coeff, dtype=x.dtype) log_y = cupy.rollaxis(log_y, 1, log_y.ndim) ret = cuda.reduce( 'S t, raw T log_y, int32 n_channel, raw T coeff', 'T out', 't == -1 ? 0 : log_y[_j * n_channel + t]', 'a + b', 'out = a * -coeff[0]', '0', 'crossent_fwd' )(t, log_y.reduced_view(), log_y.shape[-1], self._coeff) return ret,
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None): # Output summary try: W = layer.output except: W = layer wp = W.eval(feed_dict=feed_dict); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel() fields = np.reshape(temp,[1]+fieldShape) else: # Convolutional layer already has shape wp = np.rollaxis(wp,3,0) features, channels, iy,ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) perRow = int(math.floor(math.sqrt(fields.shape[0]))) perColumn = int(math.ceil(fields.shape[0]/float(perRow))) fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) tiled = [] for i in range(0,perColumn*perRow,perColumn): tiled.append(np.hstack(fields2[i:i+perColumn])) tiled = np.vstack(tiled) if figOffset is not None: mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
def plotOutput(layer,feed_dict,fieldShape=None,channel=None,figOffset=1,cmap=None): # Output summary W = layer.output wp = W.eval(feed_dict=feed_dict); if len(np.shape(wp)) < 4: # Fully connected layer, has no shape temp = np.zeros(np.product(fieldShape)); temp[0:np.shape(wp.ravel())[0]] = wp.ravel() fields = np.reshape(temp,[1]+fieldShape) else: # Convolutional layer already has shape wp = np.rollaxis(wp,3,0) features, channels, iy,ix = np.shape(wp) if channel is not None: fields = wp[:,channel,:,:] else: fields = np.reshape(wp,[features*channels,iy,ix]) perRow = int(math.floor(math.sqrt(fields.shape[0]))) perColumn = int(math.ceil(fields.shape[0]/float(perRow))) fields2 = np.vstack([fields,np.zeros([perRow*perColumn-fields.shape[0]] + list(fields.shape[1:]))]) tiled = [] for i in range(0,perColumn*perRow,perColumn): tiled.append(np.hstack(fields2[i:i+perColumn])) tiled = np.vstack(tiled) if figOffset is not None: mpl.figure(figOffset); mpl.clf(); mpl.imshow(tiled,cmap=cmap); mpl.title('%s Output' % layer.name); mpl.colorbar();
def transpose_x(U_send, Uc_hatT, num_processes): sx = U_send.shape sy = Uc_hatT.shape U_send[:] = np.rollaxis(Uc_hatT[:,:-1].reshape(sy[0], num_processes, sx[2]), 1) return U_send
def transpose_y(Uc_hatT, U_recv, num_processes): sx = Uc_hatT.shape sy = U_recv.shape Uc_hatT[:, :-1] = np.rollaxis(U_recv.reshape(num_processes, sx[0], sy[1]), 1).reshape((sx[0], sx[1]-1)) return Uc_hatT
def transform_Uc_xz(Uc_hat_x, Uc_hat_z, P1): sz = Uc_hat_z.shape sx = Uc_hat_x.shape Uc_hat_x[:] = np.rollaxis(Uc_hat_z[:,:,:-1].reshape((sz[0], sz[1], P1, sx[2])), 2).reshape(sx) return Uc_hat_x
def transform_Uc_zx(Uc_hat_z, Uc_hat_xr, P1): sz = Uc_hat_z.shape sx = Uc_hat_xr.shape Uc_hat_z[:, :, :-1] = np.rollaxis(Uc_hat_xr.reshape((P1, sz[0], sz[1], sx[2])), 0, 3).reshape((sz[0], sz[1], sz[2]-1)) return Uc_hat_z
def transform_Uc_xy(Uc_hat_x, Uc_hat_y, P): sy = Uc_hat_y.shape sx = Uc_hat_x.shape Uc_hat_x[:] = np.rollaxis(Uc_hat_y.reshape((sy[0], P, sx[1], sx[2])), 1).reshape(sx) return Uc_hat_x