我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用numpy.mod()。
def _get_data_dims(self, input_fname): """Briefly scan the data file for info""" # raw data formatting is nsamps by nchans + counter data = np.genfromtxt(input_fname, delimiter=',', comments='%', skip_footer=1) diff = np.abs(np.diff(data[:, 0])) diff = np.mod(diff, 254) - 1 missing_idx = np.where(diff != 0)[0] missing_samps = diff[missing_idx].astype(int) nsamps, nchan = data.shape # add the missing samples nsamps += sum(missing_samps) # remove the tracker column nchan -= 1 del data return nsamps, nchan
def laplace_gpu(y_gpu, mode='valid'): shape = np.array(y_gpu.shape).astype(np.uint32) dtype = y_gpu.dtype block_size = (16,16,1) grid_size = (int(np.ceil(float(shape[1])/block_size[0])), int(np.ceil(float(shape[0])/block_size[1]))) shared_size = int((2+block_size[0])*(2+block_size[1])*dtype.itemsize) preproc = _generate_preproc(dtype, shape) mod = SourceModule(preproc + kernel_code, keep=True) if mode == 'valid': laplace_fun_gpu = mod.get_function("laplace_valid") laplace_gpu = cua.empty((y_gpu.shape[0]-2, y_gpu.shape[1]-2), y_gpu.dtype) if mode == 'same': laplace_fun_gpu = mod.get_function("laplace_same") laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1]), y_gpu.dtype) laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata, block=block_size, grid=grid_size, shared=shared_size) return laplace_gpu
def paintGL(self): """Paint the scene. """ self.update_buffer() gl.glClear(gl.GL_COLOR_BUFFER_BIT) if (np.mod(self.enctime,4)==0 or ENC==0 or 1): if (1): gl.glBindTexture(gl.GL_TEXTURE_2D, self.idtexgl) gl.glEnable(gl.GL_TEXTURE_2D) gl.glBegin(gl.GL_QUADS) gl.glTexCoord2f(0.0, 0.0) gl.glVertex2f(0, 0); gl.glTexCoord2f(1.0, 0.0) gl.glVertex2f( 1.0, 0); gl.glTexCoord2f(1.0, 1.0) gl.glVertex2f( 1.0, 1.0); gl.glTexCoord2f(0.0, 1.0) gl.glVertex2f(0, 1.0); gl.glEnd() else: gl.glColor4d(0.5,0.7,0.8,0.04) gl.glEnable(gl.GL_BLEND) gl.glBlendEquationSeparate( gl.GL_FUNC_ADD, gl.GL_FUNC_ADD); gl.glBlendFuncSeparate(gl.GL_SRC_ALPHA,gl.GL_ONE_MINUS_SRC_ALPHA, gl.GL_ONE, gl.GL_ONE, gl.GL_ZERO); # bind the VBO self.glbuf.bind() # tell OpenGL that the VBO contains an array of vertices gl.glEnableClientState(gl.GL_VERTEX_ARRAY) # these vertices contain 2 simple precision coordinates gl.glVertexPointer(2, gl.GL_FLOAT, 0, self.glbuf) # draw "count" points from the VBO gl.glDrawArrays(gl.GL_POINTS, 0, self.count) self.update()
def _get_slice_(self, t_start, t_stop): x_beg = numpy.int64(t_start // self.SAMPLES_PER_RECORD) r_beg = numpy.mod(t_start, self.SAMPLES_PER_RECORD) x_end = numpy.int64(t_stop // self.SAMPLES_PER_RECORD) r_end = numpy.mod(t_stop, self.SAMPLES_PER_RECORD) if x_beg == x_end: g_offset = x_beg * self.bytes_per_block_div + self.block_offset_div data_slice = numpy.arange(g_offset + r_beg * self.nb_channels, g_offset + r_end * self.nb_channels, dtype=numpy.int64) yield data_slice else: for count, nb_blocks in enumerate(numpy.arange(x_beg, x_end + 1, dtype=numpy.int64)): g_offset = nb_blocks * self.bytes_per_block_div + self.block_offset_div if count == 0: data_slice = numpy.arange(g_offset + r_beg * self.nb_channels, g_offset + self.block_size_div, dtype=numpy.int64) elif (count == (x_end - x_beg)): data_slice = numpy.arange(g_offset, g_offset + r_end * self.nb_channels, dtype=numpy.int64) else: data_slice = numpy.arange(g_offset, g_offset + self.block_size_div, dtype=numpy.int64) yield data_slice
def _get_slice_(self, t_start, t_stop): x_beg = numpy.int64(t_start // self.SAMPLES_PER_RECORD) r_beg = numpy.mod(t_start, self.SAMPLES_PER_RECORD) x_end = numpy.int64(t_stop // self.SAMPLES_PER_RECORD) r_end = numpy.mod(t_stop, self.SAMPLES_PER_RECORD) data_slice = [] if x_beg == x_end: g_offset = x_beg * self.SAMPLES_PER_RECORD + self.OFFSET_PER_BLOCK[0]*(x_beg + 1) + self.OFFSET_PER_BLOCK[1]*x_beg data_slice = numpy.arange(g_offset + r_beg, g_offset + r_end, dtype=numpy.int64) else: for count, nb_blocks in enumerate(numpy.arange(x_beg, x_end + 1, dtype=numpy.int64)): g_offset = nb_blocks * self.SAMPLES_PER_RECORD + self.OFFSET_PER_BLOCK[0]*(nb_blocks + 1) + self.OFFSET_PER_BLOCK[1]*nb_blocks if count == 0: data_slice += numpy.arange(g_offset + r_beg, g_offset + self.SAMPLES_PER_RECORD, dtype=numpy.int64).tolist() elif (count == (x_end - x_beg)): data_slice += numpy.arange(g_offset, g_offset + r_end, dtype=numpy.int64).tolist() else: data_slice += numpy.arange(g_offset, g_offset + self.SAMPLES_PER_RECORD, dtype=numpy.int64).tolist() return data_slice
def train_step(self,sess,counter): ''' This is a generic function that will be called by the Trainer class once per iteration. The simplest body for this part would be simply "sess.run(self.train_op)". But you may have more complications. Running self.summary_op is handeled by Trainer.Supervisor and doesn't need to be addressed here Only counters, not epochs are explicitly kept track of ''' ###You can wait until counter>N to do stuff for example: if self.config.pretrain_LabelerR and counter < self.config.pretrain_LabelerR_no_of_iters: sess.run(self.d_label_optim) else: if np.mod(counter, 3) == 0: sess.run(self.g_optim) sess.run([self.train_op,self.k_t_update,self.inc_step])#all ops else: sess.run([self.g_optim, self.k_t_update ,self.inc_step]) sess.run(self.g_optim)
def get_min_pos_kinect(): (depth,_) = get_depth() minVal = np.min(depth) #This is the minimum value from the depth image minPos = np.argmin(depth) #This is the raw index of the minimum value above xPos = np.mod(minPos, xSize) #This is the x component of the raw index yPos = minPos//xSize #This is the y component of the raw index xList.append(xPos) del xList[0] xPos = int(np.mean(xList)) yList.append(yPos) del yList[0] yPos = int(np.mean(yList)) return (xSize - xPos-10, yPos, minVal)
def extract_top_plane_nodes(nodefile, top_face): """ :param nodefile: :param top_face: :return: planeNodeIDs """ import numpy as np import fem_mesh top_face = np.array(top_face) nodeIDcoords = fem_mesh.load_nodeIDs_coords(nodefile) [snic, axes] = fem_mesh.SortNodeIDs(nodeIDcoords) # extract spatially-sorted node IDs on a the top z plane axis = int(np.floor(np.divide(top_face.nonzero(), 2))) if np.mod(top_face.nonzero(), 2) == 1: plane = (axis, axes[axis].max()) else: plane = (axis, axes[axis].min()) planeNodeIDs = fem_mesh.extractPlane(snic, axes, plane) return planeNodeIDs
def test_NotImplemented_not_returned(self): # See gh-5964 and gh-2091. Some of these functions are not operator # related and were fixed for other reasons in the past. binary_funcs = [ np.power, np.add, np.subtract, np.multiply, np.divide, np.true_divide, np.floor_divide, np.bitwise_and, np.bitwise_or, np.bitwise_xor, np.left_shift, np.right_shift, np.fmax, np.fmin, np.fmod, np.hypot, np.logaddexp, np.logaddexp2, np.logical_and, np.logical_or, np.logical_xor, np.maximum, np.minimum, np.mod ] # These functions still return NotImplemented. Will be fixed in # future. # bad = [np.greater, np.greater_equal, np.less, np.less_equal, np.not_equal] a = np.array('1') b = 1 for f in binary_funcs: assert_raises(TypeError, f, a, b)
def get_op(self): """Returns all symmetry operations (including inversions and subtranslations), but unlike get_symop(), they are returned as two ndarrays.""" if self.centrosymmetric: rot = np.tile(np.vstack((self.rotations, -self.rotations)), (self.nsubtrans, 1, 1)) trans = np.tile(np.vstack((self.translations, -self.translations)), (self.nsubtrans, 1)) trans += np.repeat(self.subtrans, 2 * len(self.rotations), axis=0) trans = np.mod(trans, 1) else: rot = np.tile(self.rotations, (self.nsubtrans, 1, 1)) trans = np.tile(self.translations, (self.nsubtrans, 1)) trans += np.repeat(self.subtrans, len(self.rotations), axis=0) trans = np.mod(trans, 1) return rot, trans
def ecliptic_longitude(hUTC, dayofyear, year): """ Ecliptic longitude Args: hUTC: fractional hour (UTC time) dayofyear (int): year (int): Returns: (float) the ecliptic longitude (degrees) Details: World Meteorological Organization (2006).Guide to meteorological instruments and methods of observation. Geneva, Switzerland. """ jd = julian_date(hUTC, dayofyear, year) n = jd - 2451545 # mean longitude (deg) L = numpy.mod(280.46 + 0.9856474 * n, 360) # mean anomaly (deg) g = numpy.mod(357.528 + 0.9856003 * n, 360) return L + 1.915 * numpy.sin(numpy.radians(g)) + 0.02 * numpy.sin( numpy.radians(2 * g))
def hour_angle(hUTC, dayofyear, year, longitude): """ Sun hour angle Args: hUTC: fractional hour (UTC time) dayofyear (int): year (int): longitude (float): the location longitude (degrees, east positive) Returns: (float) the hour angle (hour) Details: World Meteorological Organization (2006).Guide to meteorological instruments and methods of observation. Geneva, Switzerland. """ jd = julian_date(hUTC, dayofyear, year) n = jd - 2451545 gmst = numpy.mod(6.697375 + 0.0657098242 * n + hUTC, 24) lmst = numpy.mod(gmst + longitude / 15., 24) ra = right_ascension(hUTC, dayofyear, year) ha = numpy.mod(lmst - ra / 15. + 12, 24) - 12 return ha
def eot(hUTC, dayofyear, year): """equation of time, ie the discrepancy between true solar time and local solar time Args: dayofyear: (int) the day of year Returns: (float) the eot disccrepancy (in hour) Details: Michalsky, J. J. "The Astronomical Almanac's Algorithm for Approximate Solar Position (1950-2050)". Solar Energy. Vol. 40, No. 3, 1988; pp. 227-235, USA """ jd = julian_date(hUTC, dayofyear, year) n = jd - 2451545 # mean longitude (deg) L = numpy.mod(280.46 + 0.9856474 * n, 360) ra = right_ascension(hUTC, dayofyear, year) return (L - ra) / 15.
def comp_ola_gdeconv(xx_gpu, xy_gpu, yx_gpu, yy_gpu, L_gpu, alpha, beta): """ Computes the division in Fourier space needed for gdirect deconvolution """ sfft = xx_gpu.shape block_size = (16,16,1) grid_size = (int(np.ceil(np.float32(sfft[0]*sfft[1])/block_size[0])), int(np.ceil(np.float32(sfft[2])/block_size[1]))) mod = cu.module_from_buffer(cubin) comp_ola_gdeconv_Kernel = mod.get_function("comp_ola_gdeconv_Kernel") z_gpu = cua.zeros(sfft, np.complex64) comp_ola_gdeconv_Kernel(z_gpu.gpudata, np.int32(sfft[0]), np.int32(sfft[1]), np.int32(sfft[2]), xx_gpu, xy_gpu, yx_gpu, yy_gpu, L_gpu.gpudata, np.float32(alpha), np.float32(beta), block=block_size, grid=grid_size) return z_gpu
def crop_gpu2cpu(x_gpu, sz, offset=(0,0)): sfft = x_gpu.shape block_size = (16, 16 ,1) grid_size = (int(np.ceil(np.float32(sfft[1])/block_size[1])), int(np.ceil(np.float32(sfft[0])/block_size[0]))) if x_gpu.dtype == np.float32: mod = cu.module_from_buffer(cubin) cropKernel = mod.get_function("crop_Kernel") elif x_gpu.dtype == np.complex64: mod = cu.module_from_buffer(cubin) cropKernel = mod.get_function("crop_ComplexKernel") x_cropped_gpu = cua.empty(tuple((int(sz[0]),int(sz[1]))), np.float32) cropKernel(x_cropped_gpu.gpudata, np.int32(sz[0]), np.int32(sz[1]), x_gpu.gpudata, np.int32(sfft[0]), np.int32(sfft[1]), np.int32(offset[0]), np.int32(offset[1]), block=block_size , grid=grid_size) return x_cropped_gpu
def comp_ola_sdeconv(gx_gpu, gy_gpu, xx_gpu, xy_gpu, Ftpy_gpu, f_gpu, L_gpu, alpha, beta, gamma=0): """ Computes the division in Fourier space needed for sparse deconvolution """ sfft = xx_gpu.shape block_size = (16,16,1) grid_size = (int(np.ceil(np.float32(sfft[0]*sfft[1])/block_size[0])), int(np.ceil(np.float32(sfft[2])/block_size[1]))) mod = cu.module_from_buffer(cubin) comp_ola_sdeconv_Kernel = mod.get_function("comp_ola_sdeconv_Kernel") z_gpu = cua.zeros(sfft, np.complex64) comp_ola_sdeconv_Kernel(z_gpu.gpudata, np.int32(sfft[0]), np.int32(sfft[1]), np.int32(sfft[2]), gx_gpu.gpudata, gy_gpu.gpudata, xx_gpu.gpudata, xy_gpu.gpudata, Ftpy_gpu.gpudata, f_gpu.gpudata, L_gpu.gpudata, np.float32(alpha), np.float32(beta), np.float32(gamma), block=block_size, grid=grid_size) return z_gpu
def impad_gpu(y_gpu, sf): sf = np.array(sf) shape = (np.array(y_gpu.shape) + sf).astype(np.uint32) dtype = y_gpu.dtype block_size = (16,16,1) grid_size = (int(np.ceil(float(shape[1])/block_size[0])), int(np.ceil(float(shape[0])/block_size[1]))) preproc = _generate_preproc(dtype, shape) mod = SourceModule(preproc + kernel_code, keep=True) padded_gpu = cua.empty((int(shape[0]), int(shape[1])), dtype) impad_fun = mod.get_function("impad") upper_left = np.uint32(np.floor(sf / 2.)) original_size = np.uint32(np.array(y_gpu.shape)) impad_fun(padded_gpu.gpudata, y_gpu.gpudata, upper_left[1], upper_left[0], original_size[0], original_size[1], block=block_size, grid=grid_size) return padded_gpu
def laplace_stack_gpu(y_gpu, mode='valid'): """ This funtion computes the Laplacian of each slice of a stack of images """ shape = np.array(y_gpu.shape).astype(np.uint32) dtype = y_gpu.dtype block_size = (6,int(np.floor(512./6./float(shape[0]))),int(shape[0])) grid_size = (int(np.ceil(float(shape[1])/block_size[0])), int(np.ceil(float(shape[0])/block_size[1]))) shared_size = int((2+block_size[0])*(2+block_size[1])*(2+block_size[2]) *dtype.itemsize) preproc = _generate_preproc(dtype, (shape[1],shape[2])) mod = SourceModule(preproc + kernel_code, keep=True) laplace_fun_gpu = mod.get_function("laplace_stack_same") laplace_gpu = cua.empty((y_gpu.shape[0], y_gpu.shape[1], y_gpu.shape[2]), y_gpu.dtype) laplace_fun_gpu(laplace_gpu.gpudata, y_gpu.gpudata, block=block_size, grid=grid_size, shared=shared_size) return laplace_gpu
def modify_sparse23_gpu(y_gpu, beta): shape = np.array(y_gpu.shape).astype(np.uint32) gpu_shape = np.array([np.prod(shape),np.prod(shape)]) gpu_shape = np.uint32(np.ceil(np.sqrt(gpu_shape))) dtype = y_gpu.dtype block_size = (16,16,1) grid_size = (int(np.ceil(float(gpu_shape[1])/block_size[0])), int(np.ceil(float(gpu_shape[0])/block_size[1]))) preproc = _generate_preproc(dtype, np.array(grid_size) * np.array(block_size)[0:1]) mod = SourceModule(preproc + kernel_code, keep=True) modify_alpha23_fun = mod.get_function("modify_alpha23") modify_alpha23_fun(y_gpu.gpudata, np.float32(beta), np.uint32(np.prod(shape)), block=block_size, grid=grid_size)
def modify_sparse_gpu(y_gpu, beta, alpha=2/3): shape = np.array(y_gpu.shape).astype(np.uint32) gpu_shape = np.array([np.prod(shape),np.prod(shape)]) gpu_shape = np.uint32(np.ceil(np.sqrt(gpu_shape))) dtype = y_gpu.dtype block_size = (16,16,1) grid_size = (int(np.ceil(float(gpu_shape[1])/block_size[0])), int(np.ceil(float(gpu_shape[0])/block_size[1]))) preproc = _generate_preproc(dtype, np.array(grid_size) * np.array(block_size)[0:1]) mod = SourceModule(preproc + kernel_code, keep=True) modify_alpha_fun = mod.get_function("modify_alpha") modify_alpha_fun(y_gpu.gpudata, np.float32(beta), np.float32(alpha), np.uint32(np.prod(shape)), block=block_size, grid=grid_size)
def ola_GPU(xs_gpu, sy, csf, hop): y_gpu = cua.empty(sy, np.float32) block_size = (16,16,1) grid_size = (int(np.ceil(np.float32(sx[0]*sz[0])/block_size[1])), int(np.ceil(np.float32(sz[1])/block_size[0]))) mod = cu.module_from_buffer(cubin) copy_Kernel = mod.get_function("copy_Kernel") for i in range(csf[0]): for j in range(csf[1]): copy_Kernel(y_gpu, np.uint32(sy[0]), np.uint32(sy[0]), xs_gpu, np.uint32(sx[0]), np.uint32(sx[1]), np.uint32(sx[2]), np.uint32(offset[0]), np.uint32(offset[1]), np.uint32(startrow), block=block_size, grid=grid_size) return np.real(y_gpu.get())
def project_on_basis_gpu(fs_gpu, basis_gpu): basis_length = basis_gpu.shape[0] shape = np.array(fs_gpu.shape).astype(np.uint32) dtype = fs_gpu.dtype block_size = (16,16,1) grid_size = (1,int(np.ceil(float(basis_length)/block_size[1]))) weights_gpu = cua.empty(basis_length, dtype=dtype) preproc = _generate_preproc(dtype, shape) preproc += '#define BLOCK_SIZE %d\n' % (block_size[0]*block_size[1]) mod = SourceModule(preproc + projection_code, keep=True) projection_fun = mod.get_function("projection") projection_fun(weights_gpu.gpudata, fs_gpu.gpudata, basis_gpu.gpudata, np.uint32(basis_length), block=block_size, grid=grid_size)
def encode(msg): """ passed a list of bits (integers, 1 or 0), returns a hamming(8,4)-coded list of bits """ while len(msg) % 4 != 0: # pad the message to length msg.append(0) msg = np.reshape(np.array(msg), (-1, 4)) # create parity bits using transition matrix transition = np.mat('1,0,0,0,0,1,1,1;\ 0,1,0,0,1,0,1,1;\ 0,0,1,0,1,1,0,1;\ 0,0,0,1,1,1,1,0') result = np.dot(msg, transition) # mod 2 the matrix multiplication return np.mod(result, 2)
def syndrome(msg): """ passed a list of hamming(8,4)-encoded bits (integers, 1 or 0), returns an error syndrome for that list """ msg = np.reshape(np.array(msg), (-1, 8)).T # syndrome generation matrix transition = np.mat('0,1,1,1,1,0,0,0;\ 1,0,1,1,0,1,0,0;\ 1,1,0,1,0,0,1,0;\ 1,1,1,0,0,0,0,1') result = np.dot(transition, msg) # mod 2 the matrix multiplication return np.mod(result, 2)
def contributions(in_length, out_length, scale, kernel, k_width): if scale < 1: h = lambda x: scale * kernel(scale * x) kernel_width = 1.0 * k_width / scale else: h = kernel kernel_width = k_width x = np.arange(1, out_length+1).astype(np.float64) u = x / scale + 0.5 * (1 - 1 / scale) left = np.floor(u - kernel_width / 2) P = int(ceil(kernel_width)) + 2 ind = np.expand_dims(left, axis=1) + np.arange(P) - 1 # -1 because indexing from 0 indices = ind.astype(np.int32) weights = h(np.expand_dims(u, axis=1) - indices - 1) # -1 because indexing from 0 weights = np.divide(weights, np.expand_dims(np.sum(weights, axis=1), axis=1)) aux = np.concatenate((np.arange(in_length), np.arange(in_length - 1, -1, step=-1))).astype(np.int32) indices = aux[np.mod(indices, aux.size)] ind2store = np.nonzero(np.any(weights, axis=0)) weights = weights[:, ind2store] indices = indices[:, ind2store] return weights, indices
def calculate_feature_statistics(feature_id): feature = Feature.objects.get(pk=feature_id) dataframe = _get_dataframe(feature.dataset.id) feature_col = dataframe[feature.name] feature.min = np.amin(feature_col).item() feature.max = np.amax(feature_col).item() feature.mean = np.mean(feature_col).item() feature.variance = np.nanvar(feature_col).item() unique_values = np.unique(feature_col) integer_check = (np.mod(unique_values, 1) == 0).all() feature.is_categorical = integer_check and (unique_values.size < 10) if feature.is_categorical: feature.categories = list(unique_values) feature.save(update_fields=['min', 'max', 'variance', 'mean', 'is_categorical', 'categories']) del unique_values, feature
def screw_axis(self): """ The rotation, translation and screw axis from the dual quaternion. """ rotation = 2. * np.degrees(np.arccos(self.q_rot.w)) rotation = np.mod(rotation, 360.) if (rotation > 1.e-12): translation = -2. * self.q_dual.w / np.sin(rotation / 2. * np.pi / 180.) screw_axis = self.q_rot.q[0:3] / np.sin(rotation / 2. * np.pi / 180.) else: translation = 2. * np.sqrt(np.sum(np.power(self.q_dual.q[0:3], 2.))) if (translation > 1.e-12): screw_axis = 2. * self.q_dual.q[0:3] / translation else: screw_axis = np.zeros(3) # TODO(ntonci): Add axis point for completeness return screw_axis, rotation, translation
def modIdx(i,l): """Returns index of list when input is larger than list by returning the modulo of the length of the list. Useful if lists refer to loop etc. Args: i (int): Index. l (list): Some list. Returns: int: New index. """ return np.mod(i,len(l))
def getPlotVec(self): """Returns vectors for plotting arc. Returns: tuple: Tuple containing: * x (numpy.ndarray): x-array. * y (numpy.ndarray): y-array. * z (numpy.ndarray): z-array. """ self.getNormVec() if np.mod(self.angle,np.pi/2.)<0.01: a = np.linspace(0,self.angle,1000) else: a = np.linspace(self.angleOffset-self.angle,self.angleOffset,1000) x,y,z=self.getPointOnArc(a) return x,y,z
def pyfftw_empty_aligned(shape, dtype, order='C', n=None): """ Construct an empty byte-aligned array for efficient use by :mod:`pyfftw`. This function is a wrapper for :func:`pyfftw.empty_aligned` Parameters ---------- shape : sequence of ints Output array shape dtype : dtype Output array dtype n : int, optional (default None) Output array should be aligned to n-byte boundary Returns ------- a : ndarray Empty array with required byte-alignment """ return pyfftw.empty_aligned(shape, dtype, order, n)
def blockcirculant(A): """ Construct a block circulant matrix from a tuple of arrays. This is a block-matrix variant of :func:`scipy.linalg.circulant`. Parameters ---------- A : tuple of array_like Tuple of arrays corresponding to the first block column of the output block matrix Returns ------- B : ndarray Output array """ r, c = A[0].shape B = np.zeros((len(A)*r, len(A)*c), dtype=A[0].dtype) for k in range(len(A)): for l in range(len(A)): kl = np.mod(k + l, len(A)) B[r*kl:r*(kl+1), c*k:c*(k+1)] = A[l] return B
def SortByAngle(kNearestPoints, currentPoint, prevPoint): ''' Sorts the k nearest points given by angle ''' angles = np.zeros(kNearestPoints.shape[0]) i = 0 for NearestPoint in kNearestPoints: # calculate the angle angle = np.arctan2(NearestPoint[1]-currentPoint[1], NearestPoint[0]-currentPoint[0]) - \ np.arctan2(prevPoint[1]-currentPoint[1], prevPoint[0]-currentPoint[0]) angle = np.rad2deg(angle) # only positive angles angle = np.mod(angle+360,360) #print NearestPoint[0], NearestPoint[1], angle angles[i] = angle i=i+1 return kNearestPoints[np.argsort(angles)]
def world_synthesis_time_base_generation(temporal_positions, f0, fs, vuv, time_axis, default_f0): f0_interpolated_raw = interp1d(temporal_positions, f0, kind="linear", fill_value="extrapolate")(time_axis) vuv_interpolated = interp1d(temporal_positions, vuv, kind="linear", fill_value="extrapolate")(time_axis) vuv_interpolated = vuv_interpolated > 0.5 f0_interpolated = f0_interpolated_raw * vuv_interpolated.astype("float32") f0_interpolated[f0_interpolated == 0] = f0_interpolated[f0_interpolated == 0] + default_f0 total_phase = np.cumsum(2 * np.pi * f0_interpolated / float(fs)) core = np.mod(total_phase, 2 * np.pi) core = np.abs(core[1:] - core[:-1]) # account for diff, avoid deprecation warning with [:-1] pulse_locations = time_axis[:-1][core > (np.pi / 2.)] pulse_locations_index = np.round(pulse_locations * fs).astype("int32") return pulse_locations, pulse_locations_index, vuv_interpolated
def setup(x_shape, resize_ratio): box_size = 1.0 / resize_ratio if np.mod(x_shape[1], box_size) != 0 or np.mod(x_shape[2], box_size) != 0: print "only support width (and height) * resize_ratio is an interger!" def A_fun(x): y = box_average(x, int(box_size)) return y def AT_fun(y): x = box_repeat(y, int(box_size)) return x return (A_fun, AT_fun)
def form_sets(samples, labels, max_num, verbose=False): """Form sample and label sets. """ # form training set data set_ids = form_set_data(labels, max_num, verbose) set_data = [] set_labels = [] print "forming set samples" sys.stdout.flush() count = 0 for key, ids in set_ids.iteritems(): # ignore small sets if len(ids) < max_num: continue set_data.append(samples[ids]) set_labels.append(labels[key]) count += 1 if np.mod(count, 500) == 0: sys.stdout.write(".") #sys.stdout.write(".{}-{}".format(key,train_labels[key])) sys.stdout.write("\n") return set_data, set_labels, set_ids
def get_periodic_rvec(data): coords = obtain_rvec(data) if sum(data.ds.periodicity) == 0: return coords le = data.ds.domain_left_edge.in_units("code_length").d dw = data.ds.domain_width.in_units("code_length").d for i in range(coords.shape[0]): if not data.ds.periodicity[i]: continue coords[i, ...] -= le[i] #figure out which measure is less mins = np.argmin([np.abs(np.mod(coords[i, ...], dw[i])), np.abs(np.mod(coords[i, ...], -dw[i]))], axis=0) temp_coords = np.mod(coords[i, ...], dw[i]) #Where second measure is better, updating temporary coords ii = mins==1 temp_coords[ii] = np.mod(coords[i, ...], -dw[i])[ii] # Putting the temporary coords into the actual storage coords[i, ...] = temp_coords coords[i, ...] + le[i] return coords
def bbox_filter(left, right, domain_width): def myfilter(chunk, mask=None): pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T # This hurts, but is useful for periodicity. Probably should check # first if it is even needed for a given left/right for i in range(3): pos[:, i] = np.mod(pos[:, i] - left[i], domain_width[i]) + left[i] # Now get all particles that are within the bbox if mask is None: mask = np.all(pos >= left, axis=1) np.logical_and(mask, np.all(pos < right, axis=1), mask) else: np.logical_and(mask, np.all(pos >= left, axis=1), mask) np.logical_and(mask, np.all(pos < right, axis=1), mask) return mask return myfilter
def sphere_filter(center, radius, domain_width): def myfilter(chunk, mask=None): pos = np.array([chunk['x'], chunk['y'], chunk['z']]).T left = center-radius # This hurts, but is useful for periodicity. Probably should check # first if it is even needed for a given left/right for i in range(3): pos[:, i] = np.mod(pos[:, i] - left[i], domain_width[i]) + left[i] # Now get all particles that are within the radius if mask is None: mask = ((pos-center)**2).sum(axis=1)**0.5 < radius else: np.multiply(mask, np.linalg.norm(pos - center, 2) < radius, mask) return mask return myfilter
def train(self, x, y, learning_rate=1e-3, reg = 1e-5, num_iter=1500, batch_size=200): num_train, num_feature = x.shape num_classes = np.max(y) + 1 if self.W == None: self.W = np.random.randn(num_feature, num_classes) loss_history = [] acc_history = [] for iter in range(num_iter): indices = np.random.choice(num_train, batch_size) x_batch = x[indices] y_batch = y[indices] loss, grad = self.loss(x_batch, y_batch, reg) acc = self.accuracy(x_batch, y_batch) loss_history.append(loss) acc_history.append(acc) self.W += -learning_rate * grad if np.mod(iter, 100) == 0: print("iteration {}/{} loss: {:.7f}".format(iter, num_iter, loss)) return loss_history, acc_history
def train(self, x, y, learning_rate=1e-3, reg = 1e-5, num_iter=1500, batch_size=200): num_train, num_feature = x.shape num_classes = np.max(y) + 1 if self.W == None: self.W = np.random.randn(num_feature, num_classes) loss_history = [] accuracy_history = [] for iter in range(num_iter): indices = np.random.choice(num_train, batch_size) x_batch = x[indices] y_batch = y[indices] loss, grad = self.loss(x_batch, y_batch, reg) acc = self.accuracy(x_batch, y_batch) loss_history.append(loss) accuracy_history.append(acc) self.W += -learning_rate * grad if np.mod(iter, 100) == 0: print("iteration {}/{} loss: {:.7f}".format(iter, num_iter, loss)) return loss_history, accuracy_history
def survey(self, quantity=None): if not quantity: quantity = len(self.ascii_vals) # x = np.linspace(0, len(self.ascii_vals) - 1, quantity).astype(int) # Size changes error granularity x = np.random.randint(len(self.ascii_vals), size=quantity) if self.noise: generated_noise = np.random.normal(0., scale=len(self.character_set) // 2, size=self.stimuli[x].shape).astype(int) mask = np.random.binomial(1, self.noise, size=self.stimuli[x].shape) stimuli = np.mod(self.stimuli[x] + generated_noise * mask, len(self.character_set)) else: stimuli = self.stimuli[x] print(self.reformat(stimuli)) if self.autoencoder: return [stimuli.T, self.stimuli[x].T] else: return [stimuli.T, self.expected[x].T]
def doc2word2vec(data_txt_path, word2vec_model, save_path, dim=300, length=10): # do not use tf-idf values as coefficients. # usually because the data_txt_path is a tfidf-sorted text. # length = 1: mean of vectors # length > 1: concate vectors word2vec = pk.load(open(word2vec_model, 'r')) docs = open(data_txt_path).readlines() N = len(docs) feat = np.zeros((N, dim * length), dtype=np.float32) t0 = time.time() for idx, doc in enumerate(docs): words = doc.strip().split(' ') feat[idx, :] = create_vec_from_words(words, word2vec, dim, length) if np.mod(idx, 10000) == 0: t = time.time() - t0 print '# %d, t = %d minutes' %(idx, t/60) h5file = h5py.File(save_path, 'w') h5file.create_dataset('feature', data=feat, dtype=np.float32) h5file.close() print 'saved to %s' %save_path
def tfidf_cluster_feature(data_txt_path, word2vec_distr_path, save_path, df_path, nDoc): word2vec_distr = pk.load(open(word2vec_distr_path)) docs = open(data_txt_path).readlines() DF = pk.load(open(df_path)) N = len(docs) DIM = word2vec_distr.values()[0].shape[0] h5file = h5py.File(save_path, 'w') feat = h5file.create_dataset('feature', shape=(N, DIM), dtype=np.float32) t0 = time.time() for idx, doc in enumerate(docs): words = doc.strip().split(' ') feat[idx, :] = compute_tfidf_cluster_feat(words, DF, nDoc, word2vec_distr) if np.mod(idx, 10000) == 0: t = time.time() - t0 print '#%d, t = %d mins' %(idx, t/60) h5file.close() print 'saved to %s' %save_path
def compute_word2vec(docs, DF, nDoc, model, vecDim=300): N = len(docs) nonExist_vocab = {} feat = np.zeros((N, 300), dtype=np.float32) for idx, doc in enumerate(docs): nonExist_list = [] TF = {} spt = doc.split(' ') nWord = len(spt) update_vocab(TF, spt) vec = np.zeros(vecDim, dtype=np.float32) for word, tf in TF.items(): try: tfidf = 1.0 * tf / nWord * np.log2(1.0 * nDoc / DF[word]) vec += tfidf * word2vec(model, word) except: nonExist_list.append(word) pass feat[idx, :] = vec update_vocab(nonExist_vocab, nonExist_list) if np.mod(idx, 10000) == 0: print '# %d' %idx print 'nonExist: %d' %len(nonExist_vocab.keys()) return feat, nonExist_vocab
def tfidf(data_txt_path, df_path, nDoc, word2id_path, save_path): t0 = time.time() docs = open(data_txt_path).readlines() word2id = pk.load(open(word2id_path, 'r')) N = len(docs) DIM = len(word2id.keys()) h5file = h5py.File(save_path, 'w') h5set = h5file.create_dataset('feature', shape=(N, DIM), dtype=np.float32) print 'word2id loaded from %s' %word2id_path print 'dataset created, shape (%d, %d)' %(N, DIM) # load DF DF = pk.load(open(df_path)) # compute tfidf for idx, doc in enumerate(docs): feat= compute_tfidf(doc, DIM, DF, nDoc, word2id) h5set[idx, :] = feat.copy() if np.mod(idx, 10000) ==0: t = time.time() - t0 print '# %d, t = %f hours' %(idx, t / 3600.) h5file.close() print 'TF-IDF feature saved to %s' %save_path
def row_col_from_lin(ct, sh): """ Convert a 1D counter into a col and row counter """ assert len(sh) == 2, 'Shape must be 2D' tot_rows = sh[0] tot_cols = sh[1] if isinstance(ct, _np.ndarray): if (ct > tot_rows*tot_cols).any(): print('Count is out-of-range. Returning None.') return None else: if ct > tot_rows*tot_cols: print('Count is out-of-range. Returning None.') return None row = _np.mod(ct, tot_rows) col = ct//tot_rows return [row, col]