我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.invert()。
def logpdf(self, samples): ''' Calculates the log of the probability density function. Parameters ---------- samples : array_like n-by-2 matrix of samples where n is the number of samples. Returns ------- vals : ndarray Log of the probability density function evaluated at `samples`. ''' samples = np.copy(np.asarray(samples)) samples = self.__rotate_input(samples) inner = np.all(np.bitwise_and(samples > 0.0, samples < 1.0), axis=1) outer = np.invert(inner) vals = np.zeros(samples.shape[0]) vals[inner] = self._logpdf(samples[inner, :]) # Assign zero mass to border vals[outer] = -np.inf return vals
def set_initial_conditions(self): # initial conditions self.temp[:, :, :, 0:2] = ((1 - self.zt[None, None, :] / self.zw[0]) * 15 * self.maskT)[..., None] self.salt[:, :, :, 0:2] = 35.0 * self.maskT[..., None] # wind stress forcing taux = np.zeros(self.ny + 1, dtype=self.default_float_type) yt = self.yt[2:self.ny + 3] taux = (.1e-3 * np.sin(np.pi * (self.yu[2:self.ny + 3] - yu_start) / (-20.0 - yt_start))) * (yt < -20) \ + (.1e-3 * (1 - np.cos(2 * np.pi * (self.yu[2:self.ny + 3] - 10.0) / (yu_end - 10.0)))) * (yt > 10) self.surface_taux[:, 2:self.ny + 3] = taux * self.maskU[:, 2:self.ny + 3, -1] # surface heatflux forcing self.t_star = 15 * np.invert((self.yt < -20) | (self.yt > 20)) \ + 15 * (self.yt - yt_start) / (-20 - yt_start) * (self.yt < -20) \ + 15 * (1 - (self.yt - 20) / (yt_end - 20)) * (self.yt > 20.) self.t_rest = self.dzt[np.newaxis, -1] / (30. * 86400.) * self.maskT[:, :, -1] if self.enable_tke: self.forc_tke_surface[2:-2, 2:-2] = np.sqrt((0.5 * (self.surface_taux[2:-2, 2:-2] + self.surface_taux[1:-3, 2:-2]))**2 + (0.5 * (self.surface_tauy[2:-2, 2:-2] + self.surface_tauy[2:-2, 1:-3]))**2)**(1.5) if self.enable_idemix: self.forc_iw_bottom[:] = 1.0e-6 * self.maskW[:, :, -1] self.forc_iw_surface[:] = 0.1e-6 * self.maskW[:, :, -1]
def omniglot_folder_to_NDarray(path_im): alphbts = os.listdir(path_im) ALL_IMGS = [] for alphbt in alphbts: chars = os.listdir(os.path.join(path_im, alphbt)) for char in chars: img_filenames = os.listdir(os.path.join(path_im, alphbt, char)) char_imgs = [] for img_fn in img_filenames: fn = os.path.join(path_im, alphbt, char, img_fn) I = imread(fn) I = np.invert(I) char_imgs.append(I) ALL_IMGS.append(char_imgs) return np.array(ALL_IMGS)
def GenerateChildren(self, axis): if self.IsLeaf: return False x = self.points[:,axis] med = (self.bounds[axis,0] + self.bounds[axis,1])/2 index = (x<med) if np.any(index): self.left = KDNode(self.points[index], self.masses[index], self.softening[index]) self.HasLeft = True index = np.invert(index) if np.any(index): self.right = KDNode(self.points[index],self.masses[index], self.softening[index]) self.HasRight = True self.points = empty((1,1)) self.masses = empty(1) self.softening = empty(1) return True
def create_test_set(x_lst): n = len(x_lst) x_lens = np.array(map(len, x_lst)) max_len = max(map(len, x_lst)) - 1 u_out = np.zeros((n, max_len, OUTDIM), dtype='float32')*np.nan x_out = np.zeros((n, max_len, OUTDIM), dtype='float32')*np.nan for row, vec in enumerate(x_lst): l = len(vec) - 1 u = vec[:-1] # all but last element x = vec[1:] # all but first element x_out[row, :l] = x u_out[row, :l] = u mask = np.invert(np.isnan(x_out)) x_out[np.isnan(x_out)] = 0 u_out[np.isnan(u_out)] = 0 mask = mask[:, :, 0] assert np.all((mask.sum(axis=1)+1) == x_lens) return u_out, x_out, mask.astype('float32')
def _get_air_voxels(self, input_data, border_offset=3): """Get a two dimensional list with all the voxels in the air. Returns: ndarray: The first dimension is the list of voxels, the second the signal per voxel. """ indices = np.where(input_data.mask > 0) max_dims = np.max(indices, axis=1) min_dims = np.min(indices, axis=1) mask = np.copy(input_data.mask) mask[min_dims[0]:max_dims[0]] = True mask[:, min_dims[1]:max_dims[1], :] = True mask[..., min_dims[2]:max_dims[2]] = True mask[0:border_offset] = True mask[-border_offset:] = True mask[:, 0:border_offset, :] = True mask[:, -border_offset:, :] = True mask[..., 0:border_offset] = True mask[..., -border_offset:] = True return create_roi(input_data.signal4d, np.invert(mask))
def _get_air_voxels(self, input_data, border_offset=3): """Get a two dimensional list with all the voxels in the air. Returns: ndarray: The first dimension is the list of voxels, the second the signal per voxel. """ mask = np.copy(input_data.mask) mask = binary_dilation(mask, iterations=1) mask[0:border_offset] = True mask[-border_offset:] = True mask[:, 0:border_offset, :] = True mask[:, -border_offset:, :] = True mask[..., 0:border_offset] = True mask[..., -border_offset:] = True return create_roi(input_data.signal4d, np.invert(mask))
def fix_variant_number_discrepancy(var_df, var_filt_df, filt_ids, ccert_ids, ccert, snvs): n_to_assign = len(var_df) var_df = pd.concat([var_df, var_filt_df]) var_in_ccert = np.array([var_id in ccert_ids for var_id in filt_ids]) var_filt_df = var_filt_df[var_in_ccert] filt_ids = get_var_ids(var_filt_df, snvs) ccert_in_df = np.array([cc_id in filt_ids for cc_id in ccert_ids]) ccert = ccert[ccert_in_df] to_assign = np.concatenate([ np.array([True] * n_to_assign, dtype=bool), np.invert(var_in_ccert) ]) var_df.index = range(len(var_df)) var_df = var_df[to_assign] return(var_df, var_filt_df, ccert)
def predict(): # get data from drawing canvas and save as image parseImage(request.get_data()) # read parsed image back in 8-bit, black and white mode (L) x = imread('output.png', mode='L') x = np.invert(x) x = imresize(x,(28,28)) # reshape image data for use in neural network x = x.reshape(1,28,28,1) with graph.as_default(): out = model.predict(x) print(out) print(np.argmax(out, axis=1)) response = np.array_str(np.argmax(out, axis=1)) return response
def create_mask(im_arr, erode=0): if im_arr.shape[2] == 3: im_arr = rgb2gray(im_arr) thresh = 0.05 inv_bin = np.invert(im_arr > thresh) all_labels = measure.label(inv_bin) # Select largest object and invert seg_arr = all_labels == 0 if erode > 0: strel = selem.disk(erode, dtype=np.bool) seg_arr = binary_erosion(seg_arr, selem=strel) elif erode < 0: strel = selem.disk(abs(erode), dtype=np.bool) seg_arr = binary_dilation(seg_arr, selem=strel) return seg_arr.astype(np.bool)
def plot_reg_2D_stoc(X,stoc_vector): deter_vec = np.invert(stoc_vector) dom_max = np.amax(X[stoc_vector,:]) + 1 A = np.zeros((dom_max,dom_max)) stoc_indexs = np.arange(0,X.shape[0],1)[stoc_vector].astype(int) for i,deter_element in enumerate(deter_vec): if deter_element == True: A[X[int(stoc_indexs[0]),:].astype(int), X[int(stoc_indexs[1]),:].astype(int)] = X[i,:] pl.figure(i) #ax = fig.gca(projection='3d') #surf = ax.plot_surface(X[int(stoc_indexs[0]),:].astype(int), X[int(stoc_indexs[1]),:].astype(int),X[i,:], rstride=1, cstride=1, #cmap=cm.coolwarm,linewidth=0, antialiased=False) pl.contour(A,X[i,:]) #ax.zaxis.set_major_locator(LinearLocator(10)) #ax.zaxis.set_major_formatter(FormatStrFormatter('%.02f')) #fig.colorbar(surf, shrink=0.5, aspect=5) pl.show()
def train(sess, q_network, target_network, observations): # Sample a minibatch to train on mini_batch = random.sample(observations, MINI_BATCH_SIZE) states = [d['state'] for d in mini_batch] actions = [d['action'] for d in mini_batch] rewards = [d['reward'] for d in mini_batch] next_states = [d['next_state'] for d in mini_batch] terminal = np.array([d['terminal'] for d in mini_batch]) # Compute Q(s', a'; theta'), where theta' are the parameters for the target # network. This is an unbiased estimator for y_i as in eqn 2 in the DQN # paper. next_q = sess.run(target_network.output_layer, feed_dict={ target_network.input_layer: next_states }) target_q = rewards + np.invert(terminal).astype('float32') * DISCOUNT_FACTOR * np.max(next_q, axis=1) one_hot_actions = compute_one_hot_actions(actions) # Train the q-network (i.e. the parameters theta). q_network.train(sess, states, one_hot_actions, target_q) # Return a one hot vector with a 1 at the index for the action.
def normalize_simple(matrix, mask): """Normalizes a matrix by columns, and then by rows. With multiple time-series, the data are normalized to the within-series total, not the entire data set total. Parameters ---------- matrix: np.matrix Time-series matrix of abundance counts. Rows are sequences, columns are samples/time-points. mask: list or np.array List of objects with length matching the number of timepoints, where unique values delineate multiple time-series. If there is only one time-series in the data set, it's a list of identical objects. Returns ------- normal_matrix: np.matrix Matrix where the columns (within-sample) have been converted to proportions, then the rows are normalized to sum to 1. """ normal_matrix = matrix / matrix.sum(0) normal_matrix[np.invert(np.isfinite(normal_matrix))] = 0 for mask_val in np.unique(mask): y = normal_matrix[:, np.where(mask == mask_val)[0]] y = np.apply_along_axis(zscore, 1, y) normal_matrix[:, np.where(mask == mask_val)[0]] = y del y return normal_matrix
def predict(): #whenever the predict method is called, we're going #to input the user drawn character as an image into the model #perform inference, and return the classification #get the raw data format of the image imgData = request.get_data() #encode it into a suitable format convertImage(imgData) print "debug" #read the image into memory x = imread('output.png',mode='L') #compute a bit-wise inversion so black becomes white and vice versa x = np.invert(x) #make it the right size x = imresize(x,(28,28)) #imshow(x) #convert to a 4D tensor to feed into our model x = x.reshape(1,28,28,1) print "debug2" #in our computation graph with graph.as_default(): #perform the prediction out = model.predict(x) print(out) print(np.argmax(out,axis=1)) print "debug3" #convert the response to a string response = np.array_str(np.argmax(out,axis=1)) return response
def _ccdf(self, samples): vals = np.zeros(samples.shape[0]) # Avoid subtraction of infinities neqz = np.bitwise_and(np.any(samples > 0.0, axis=1), np.any(samples < 1.0, axis=1)) nrvs = norm.ppf(samples[neqz, :]) vals[neqz] = norm.cdf((nrvs[:, 0] - self.theta * nrvs[:, 1]) / np.sqrt(1 - self.theta**2)) vals[np.invert(neqz)] = norm.cdf(0.0) return vals
def test_in1d_invert(self): "Test in1d's invert parameter" # We use two different sizes for the b array here to test the # two different paths in in1d(). for mult in (1, 10): a = np.array([5, 4, 5, 3, 4, 4, 3, 4, 3, 5, 2, 1, 5, 5]) b = [2, 3, 4] * mult assert_array_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True))
def setdiff1d(ar1, ar2, assume_unique=False): """ Find the set difference of two arrays. Return the sorted, unique values in `ar1` that are not in `ar2`. Parameters ---------- ar1 : array_like Input array. ar2 : array_like Input comparison array. assume_unique : bool If True, the input arrays are both assumed to be unique, which can speed up the calculation. Default is False. Returns ------- setdiff1d : ndarray Sorted 1D array of values in `ar1` that are not in `ar2`. See Also -------- numpy.lib.arraysetops : Module with a number of other functions for performing set operations on arrays. Examples -------- >>> a = np.array([1, 2, 3, 2, 4, 1]) >>> b = np.array([3, 4, 5, 6]) >>> np.setdiff1d(a, b) array([1, 2]) """ if assume_unique: ar1 = np.asarray(ar1).ravel() else: ar1 = unique(ar1) ar2 = unique(ar2) return ar1[in1d(ar1, ar2, assume_unique=True, invert=True)]
def test_in1d_invert(self): # Test in1d's invert parameter a = array([1, 2, 5, 7, -1], mask=[0, 0, 0, 0, 1]) b = array([1, 2, 3, 4, 5, -1], mask=[0, 0, 0, 0, 0, 1]) assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) a = array([5, 5, 2, 1, -1], mask=[0, 0, 0, 0, 1]) b = array([1, 5, -1], mask=[0, 0, 1]) assert_equal(np.invert(in1d(a, b)), in1d(a, b, invert=True)) assert_array_equal([], in1d([], [], invert=True))
def __invert__(self): return invert(self)
def assert_set_equality(test_case, expected, actual): """Asserts that two lists are equal without order. Given two lists, treat them as sets and test equality. This function only requires an __eq__ method to be defined on the objects, and not __hash__ which set comparison requires. This function removes the burden of defining a __hash__ method just for testing. This function calls into tf.test.TestCase.assert* methods and behaves like a test assert. The function returns if `expected` and `actual` contain the same objects regardless of ordering. Note, this is an O(n^2) operation and is not suitable for large lists. Args: test_case: A tf.test.TestCase instance from a test. expected: A list of objects. actual: A list of objects. """ actual_found = np.zeros(len(actual), dtype=bool) for expected_obj in expected: found = False for i, actual_obj in enumerate(actual): if expected_obj == actual_obj: actual_found[i] = True found = True break if not found: test_case.fail('Expected %s not found in actual collection' % expected_obj) if not np.all(actual_found): test_case.fail('Actual objects %s not found in expected collection' % np.array(actual)[np.invert(actual_found)])
def set_initial_conditions(self): m = self.main_module # initial conditions m.temp[:, :, :, 0:2] = ((1 - m.zt[None, None, :] / m.zw[0]) * 15 * m.maskT)[..., None] m.salt[:, :, :, 0:2] = 35.0 * m.maskT[..., None] # wind stress forcing taux = np.zeros(m.ny + 1, dtype=self.default_float_type) yt = m.yt[2:m.ny + 3] taux = (.1e-3 * np.sin(np.pi * (m.yu[2:m.ny + 3] - yu_start) / (-20.0 - yt_start))) * (yt < -20) \ + (.1e-3 * (1 - np.cos(2 * np.pi * (m.yu[2:m.ny + 3] - 10.0) / (yu_end - 10.0)))) * (yt > 10) m.surface_taux[:, 2:m.ny + 3] = taux * m.maskU[:, 2:m.ny + 3, -1] # surface heatflux forcing self.t_star = 15 * np.invert((m.yt < -20) | (m.yt > 20)) \ + 15 * (m.yt - yt_start) / (-20 - yt_start) * (m.yt < -20) \ + 15 * (1 - (m.yt - 20) / (yt_end - 20)) * (m.yt > 20.) self.t_rest = m.dzt[None, -1] / (30. * 86400.) * m.maskT[:, :, -1] t = self.tke_module if t.enable_tke: t.forc_tke_surface[2:-2, 2:-2] = np.sqrt((0.5 * (m.surface_taux[2:-2, 2:-2] + m.surface_taux[1:-3, 2:-2]))**2 + (0.5 * (m.surface_tauy[2:-2, 2:-2] + m.surface_tauy[2:-2, 1:-3]))**2)**(1.5) i = self.idemix_module if i.enable_idemix: i.forc_iw_bottom[:] = 1.0e-6 * m.maskW[:, :, -1] i.forc_iw_surface[:] = 0.1e-6 * m.maskW[:, :, -1]
def signature_predictions(X0,n_test_samples,n_signatures): sidx = np.zeros(X0.shape[1],dtype=np.bool) sidx[:n_test_samples] = True np.random.shuffle(sidx) Xtrain = X0[:,np.invert(sidx)] Xtest = X0[:,sidx] sg = get_signature_genes(Xtrain,n_signatures,lda=10000000) model = build_signature_model(Xtrain,sg) Xhat = (model.predict(Xtest[sg].T)).T return sg,Xtest,Xhat
def compute_scores(self): m_aps = [] for oc_i in range(self.n_classes): sorted_idxs = np.argsort(- self.submission_array[:, oc_i]) tp = self.gt_array[:, oc_i][sorted_idxs] == 1 fp = np.invert(tp) n_pos = tp.sum() if n_pos < 0.1: m_aps.append(float('nan')) continue n_neg = fp.sum() f_pcs = np.cumsum(fp) t_pcs = np.cumsum(tp) prec = t_pcs / (f_pcs + t_pcs) if self.normalize_map: k = self.N_all/n_pos k2 = self.F_all/n_neg prec=(t_pcs*k) / (f_pcs*k2+t_pcs*k) avg_prec = 0 for i in range(self.submission_array.shape[0]): if tp[i]: avg_prec += prec[i] m_aps.append(avg_prec / n_pos) m_aps = np.array(m_aps) m_ap = np.mean(m_aps) w_ap = (m_aps * self.gt_array.sum(axis=0) / self.gt_array.sum()).sum() return m_ap, w_ap, m_aps
def analyze_controls(self, config_file): with open(config_file, 'r') as myfile: config = myfile.read() m = re.search('available_buttons[\s]*\=[\s]*\{([^\}]*)\}', config) avail_controls = m.group(1).split() cont_controls = np.array([bool(re.match('.*_DELTA', c)) for c in avail_controls]) discr_controls = np.invert(cont_controls) return avail_controls, np.squeeze(np.nonzero(cont_controls)), np.squeeze(np.nonzero(discr_controls))
def _filter(self, filter_, items_vector): if not self.is_inclusive: filter_ = np.invert(filter_) items_vector *= filter_
def _bin_results(self, length, results): """ Add hits to the bins corresponding to these results. length_hit_bins is flattened, so we need to figure out the offset for this hit by factoring the sizes of the other dimensions. """ hit_bin = np.zeros(results.shape[0], dtype='int64') multi = 1 good = np.ones(results.shape[0], dtype='bool') for dim in range(len(self.out_labels)): for d1 in range(dim): multi *= self.bin_edges[d1].size if dim == 0 and len(self.out_labels)==1: try: digi = np.digitize(results, self.bin_edges[dim]) except ValueError: # The user probably did something like # return a * b rather than # return a[0] * b[0], which will only happen # for single field functions. digi = np.digitize(results[0], self.bin_edges[dim]) else: digi = np.digitize(results[:,dim], self.bin_edges[dim]) too_low = (digi == 0) too_high = (digi == self.bin_edges[dim].size) self.too_low[dim] += (too_low).sum() self.too_high[dim] += (too_high).sum() newgood = np.bitwise_and(np.invert(too_low), np.invert(too_high)) good = np.bitwise_and(good, newgood) hit_bin += np.multiply((digi - 1), multi) digi_bins = np.arange(self.length_bin_hits[length].size+1) hist, digi_bins = np.histogram(hit_bin[good], digi_bins) self.length_bin_hits[length] += hist
def __ne__(self, other): return np.invert(self == other) # Export and import functionality
def gen_batch_function(data_folder, image_shape): """ Generate function to create batches of training data :param data_folder: Path to folder that contains all the datasets :param image_shape: Tuple - Shape of image :return: """ def get_batches_fn(batch_size): """ Create batches of training data :param batch_size: Batch Size :return: Batches of training data """ image_paths = glob(os.path.join(data_folder, 'image_2', '*.png')) label_paths = { re.sub(r'_(lane|road)_', '_', os.path.basename(path)): path for path in glob(os.path.join(data_folder, 'gt_image_2', '*_road_*.png'))} background_color = np.array([255, 0, 0]) random.shuffle(image_paths) for batch_i in range(0, len(image_paths), batch_size): images = [] gt_images = [] for image_file in image_paths[batch_i:batch_i + batch_size]: gt_image_file = label_paths[os.path.basename(image_file)] image = scipy.misc.imresize(scipy.misc.imread(image_file), image_shape) gt_image = scipy.misc.imresize(scipy.misc.imread(gt_image_file), image_shape) gt_bg = np.all(gt_image == background_color, axis=2) gt_bg = gt_bg.reshape(*gt_bg.shape, 1) gt_image = np.concatenate((gt_bg, np.invert(gt_bg)), axis=2) images.append(image) gt_images.append(gt_image) yield np.array(images), np.array(gt_images) return get_batches_fn
def relabel_half_side_one_label(in_data, label_old, label_new, side_to_modify, axis, plane_intercept): """ :param in_data: :param label_old: :param label_new: :param side_to_copy: :param axis: :param plane_intercept: :return: """ msg = 'Input array must be 3-dimensional.' assert in_data.ndim == 3, msg msg = 'side_to_copy must be one of the two {}.'.format(['below', 'above']) assert side_to_modify in ['below', 'above'], msg msg = 'axis variable must be one of the following: {}.'.format(['x', 'y', 'z']) assert axis in ['x', 'y', 'z'], msg positions = in_data == label_old halfed_positions = np.zeros_like(positions) if axis == 'x': if side_to_modify == 'above': halfed_positions[plane_intercept:, :, :] = positions[plane_intercept:, :, :] if side_to_modify == 'below': halfed_positions[:plane_intercept, :, :] = positions[:plane_intercept, :, :] if axis == 'y': if side_to_modify == 'above': halfed_positions[: ,plane_intercept:, :] = positions[:, plane_intercept:, :] if side_to_modify == 'below': halfed_positions[:, plane_intercept, :, :] = positions[:, plane_intercept, :] if axis == 'z': if side_to_modify == 'above': halfed_positions[ :, :, plane_intercept:] = positions[ :, :, plane_intercept:] if side_to_modify == 'below': halfed_positions[:, :, :plane_intercept] = positions[:, :, :plane_intercept] new_data = in_data * np.invert(halfed_positions) + label_new * halfed_positions.astype(np.int) return new_data
def get_bonus(self,path): if self._fit_steps > self._yield_zeros_until: bonus = self._coeff * self._f_predict(path['observations']).reshape(-1) if self._filter_bonuses: bonus = bonus * (np.invert(self._wrapped_constraint.evaluate(path))) return bonus else: return np.zeros(path["rewards"].size)
def show_isomap(window, isomap): #isomap_copy = isomap.copy() background = np.zeros([ISOMAP_SIZE, ISOMAP_SIZE, 4], dtype='uint8') background[:,:,3]=10 mask = np.array([[int(x/8) %2==int(y/8) %2 for x in range(isomap.shape[0])] for y in range(isomap.shape[1])]) #mask = np.array([[int(x/8) %2==0 for x in range(isomap.shape[0])] for y in range(isomap.shape[1])]) background[mask,:3]=[200,200,200] mask = np.invert(mask) background[mask,:3]=[150,150,150] cv2.imshow(window, merge([background,isomap]))
def apply_mask(im, mask): im[np.invert(mask.astype(np.bool))] = 0 return np.transpose(im, (1, 2, 0))
def mask_od_vessels(skel, od_center): # Create optic disk mask od_mask = np.zeros_like(skel, dtype=np.uint8) cv2.circle(od_mask, od_center, 30, (1, 1, 1), -1) od_mask_inv = np.invert(od_mask) / 255. skel = skel.astype(np.float) masked_skel = skel * od_mask_inv return masked_skel.astype(np.uint8) # def line_diameters(edt, lines): # # diameters = [] # # for line in lines: # # p0, p1 = [np.asarray(pt) for pt in line] # vec = p1 - p0 # vector between segment end points # vec_len = np.linalg.norm(vec) # # pts_along_line = np.uint(np.asarray([p0 + (i * vec) for i in np.arange(0., 1., 1. / vec_len)])) # # for pt in pts_along_line: # # try: # diameters.append(edt[pt[0], pt[1]]) # except IndexError: # pass # # return diameters
def positions(X, V_s, stoc_vector,domain_enum): ''' Get the positions of the previous positions. ''' # X is the state space vector. N \times N_s # stoc_vector is a vector $N_s$ with 1 when a variable is stochastic and zero otherwise. # Initialising the positions ##pdb.set_trace() N = X.shape[1] # Number of states. N_s = np.sum(stoc_vector) N_r_s = len(V_s) # N_r_s is the number of propensities which are purely stochastic ( N_r_s = len(V_s)) position = np.zeros((N,N_r_s),dtype=np.int64) valid = np.zeros((N,N_r_s),dtype=np.bool) #shift_M = np.zeros((N_r_s,N,N_s),dtype=np.int) # Loops through the stochiometry and find the coresponding indexes. ##pdb.set_trace() for i in range(N_r_s): pre_states = X - np.array(V_s[i])[:,np.newaxis] interior = domain_enum.contains(pre_states) #print("shape In" + str(interior.shape)) #print("shape valid" + str(valid[:,i].shape)) valid[:,i] = interior #exterior = np.invert(interior) if np.sum(valid[:,i]) >0: position[interior,i] = domain_enum.indices(pre_states[:,interior]) return valid, position
def derivative_G(propensities,V,X,w,deter_vector,stoc_positions, positions, valid): # just the deterministics X_d = X[deter_vector,:].copy() temp_eta = np.zeros((np.sum(deter_vector),X.shape[1])) j = 0 for i in range(len(stoc_positions)): ##pdb.set_trace() # If x-\nu_i is non zero if stoc_positions[i] == True: if np.sum(valid[:,j]) != 0: #print(" X shape: " + str(X.shape)) #print(" w shape: " + str(w.shape)) #print("test :" + str(map(propensities[i],*X[:,positions[valid[:,j]][:,j]]))) temp_eta[:,valid[:,j]] += (X_d[:,positions[valid[:,j]][:,j]] - X_d[:,valid[:,j]] + V[i][deter_vector][:,np.newaxis] )*map(propensities[i],* X[:,positions[valid[:,j]][:,j]])*w[positions[valid[:,j]][:,j]] j += 1 else: temp_eta[:,:] += (V[i][deter_vector][:,np.newaxis])*map(propensities[i],* X)*w return_X = np.zeros(X.shape) return_X[deter_vector,:] = temp_eta return_X[np.invert(deter_vector),:] = X[np.invert(deter_vector),:].copy() return return_X #return temp_eta
def derivative_G(propensities,V,X,w,deter_vector,stoc_positions, positions, valid,jac): # just the deterministics X_d = X[deter_vector,:].copy() temp_eta = np.zeros((np.sum(deter_vector),X.shape[1])) j = 0 for i in range(len(stoc_positions)): # If x-\nu_i is non zero if stoc_positions[i] == True: if np.sum(valid[:,j]) != 0: #print(" X shape: " + str(X.shape)) #print(" w shape: " + str(w.shape)) #print("test :" + str(map(propensities[i],*X[:,positions[valid[:,j]][:,j]]))) # original Terms temp_eta[:,valid[:,j]] += (X_d[:,positions[valid[:,j]][:,j]] - X_d[:,valid[:,j]] + V[i][deter_vector][:,np.newaxis] )*map(propensities[i],* X[:,positions[valid[:,j]][:,j]])*w[positions[valid[:,j]][:,j]] # Correction terms # x terms temp_eta[:,:] -= jac(X,deter_vector,i)*w[np.newaxis,:] # these should be all the terms which are minusing out. # x-v_j term. temp_eta[:,valid[:,j]] += jac(X[:,positions[valid[:,j]][:,j]],deter_vector,i)*w[positions[valid[:,j]][:,j]][np.newaxis,:] j += 1 else: temp_eta[:,:] += (V[i][deter_vector][:,np.newaxis])*map(propensities[i],* X)*w #return_X = np.zeros(X.shape) #return_X[deter_vector,:] = temp_eta #return_X[np.invert(deter_vector),:] = X[np.invert(deter_vector),:].copy() return temp_eta
def ff(self,x): n_nodes = self.n_nodes assert len(x) == n_nodes[0] self.nas[0:n_nodes[0]] = x # input node_a's # pl_ : of previous (left) layer pl_nas = np.append([1.0],self.nas[0:n_nodes[0]]) for l in range(1,len(n_nodes)): thsM = self.__get_thsM(l-1) nzs = self.__get_nzs(l) nas = self.__get_nas(l) nzs[:] = np.dot(thsM,pl_nas) # ??? ??? cross-entropy? ???? ??? # ??? ???? sigmoid? ????? # ??? ??? quadric? ????? # ??? ???? activate ?? ??? ?? ??? ? if (l<len(n_nodes)-1): nas[:] = self.activate(nzs) else: nas[:] = self.__sigmoid(nzs) # ???? ????? traing ?? testing ?? ?? # ????? ???. if (self.doDropout): dropout = self.__get_dropout(l) nas[:] = nas*np.invert(dropout) else: nas[:] = nas*(1.0-self.DORATE) pl_nas = nas pl_nas = np.append([1.0],pl_nas) # add bias node