我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.bool()。
def load_data(from_file, input_words, grammar, max_length): inputs = [] input_lengths = [] parses = [] labels = [] label_lengths = [] with open(from_file, 'r') as data: for line in data: split = line.strip().split('\t') if len(split) == 4: _, sentence, canonical, parse = split else: _, sentence, canonical = split parse = None input, in_len = vectorize(sentence, input_words, max_length, add_eos=False) inputs.append(input) input_lengths.append(in_len) label, label_len = grammar.vectorize_program(canonical, max_length) labels.append(label) label_lengths.append(label_len) if parse is not None: parses.append(vectorize_constituency_parse(parse, max_length, in_len)) else: parses.append(np.zeros((2*max_length-1,), dtype=np.bool)) return inputs, input_lengths, parses, labels, label_lengths
def is_task_done(self): """ Queries the status of the task and indicates if it completed execution. Use this function to ensure that the specified operation is complete before you stop the task. Returns: bool: Indicates if the measurement or generation completed. """ is_task_done = c_bool32() cfunc = lib_importer.windll.DAQmxIsTaskDone if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.POINTER(c_bool32)] error_code = cfunc( self._handle, ctypes.byref(is_task_done)) check_for_error(error_code) return is_task_done.value
def _write_digital_lines( task_handle, write_array, num_samps_per_chan, auto_start, timeout, data_layout=FillMode.GROUP_BY_CHANNEL): samps_per_chan_written = ctypes.c_int() cfunc = lib_importer.windll.DAQmxWriteDigitalLines if cfunc.argtypes is None: with cfunc.arglock: if cfunc.argtypes is None: cfunc.argtypes = [ lib_importer.task_handle, ctypes.c_int, c_bool32, ctypes.c_double, ctypes.c_int, wrapped_ndpointer(dtype=numpy.bool, flags=('C', 'W')), ctypes.POINTER(ctypes.c_int), ctypes.POINTER(c_bool32)] error_code = cfunc( task_handle, num_samps_per_chan, auto_start, timeout, data_layout.value, write_array, ctypes.byref(samps_per_chan_written), None) check_for_error(error_code) return samps_per_chan_written.value
def test_one_sample_one_line(self, x_series_device, seed): # Reset the pseudorandom number generator with seed. random.seed(seed) do_line = random.choice(x_series_device.do_lines).name with nidaqmx.Task() as task: task.do_channels.add_do_chan( do_line, line_grouping=LineGrouping.CHAN_PER_LINE) writer = DigitalSingleChannelWriter(task.out_stream) reader = DigitalSingleChannelReader(task.in_stream) # Generate random values to test. values_to_test = [bool(random.getrandbits(1)) for _ in range(10)] values_read = [] for value_to_test in values_to_test: writer.write_one_sample_one_line(value_to_test) time.sleep(0.001) values_read.append(reader.read_one_sample_one_line()) numpy.testing.assert_array_equal(values_read, values_to_test)
def __init__(self, task_out_stream, auto_start=AUTO_START_UNSET): """ Args: task_out_stream: Specifies the output stream associated with an NI-DAQmx task which to write samples. auto_start (Optional[bool]): Specifies if the write method automatically starts the task if you did not explicitly start it with the DAQmx Start Task method. If you do not specify a value for this parameter, NI-DAQmx determines its value based on the type of write method used. If you use a one sample write method, the value is True; conversely, if you use a many sample write method, the value is False. """ self._out_stream = task_out_stream self._task = task_out_stream._task self._handle = task_out_stream._task._handle self._verify_array_shape = True self._auto_start = auto_start
def corrupt_image(img, MAR_prob=0, min_rects=0, max_rects=0, min_width=0, max_width=0): new_img = img.copy() mask = np.zeros(img.shape[0:2], dtype=np.bool) if MAR_prob > 0: mask[(random_sample(mask.shape) < MAR_prob)] = True if max_rects > 0 and max_width > 0: h, w = mask.shape num_rects = random_integers(min_rects, max_rects) for i in range(num_rects): px1 = random_integers(0, w - min(max(min_width, 1), w)) py1 = random_integers(0, h - min(max(min_width, 1), h)) px2 = px1 + (min_width - 1) + random_integers(0, max(min(w - px1 - min_width, max_width - min_width), 0)); py2 = py1 + (min_width - 1) + random_integers(0, max(min(h - py1 - min_width, max_width - min_width), 0)); if px1 <= px2 and py1 <= py2: mask[py1:py2, px1:px2] = True else: # One of the sides has length 0, so we should remove any pixels4 pass if len(new_img.shape) == 2: new_img[mask] = 0 else: new_img[mask,:] = 0 return (new_img, 1.0 * mask) # Process command line inputs
def label_and_build_mask(self, episode): is_catastrophe_array = np.array( [is_catastrophe(frame.image) for frame in episode.frames if frame.action is not None]) # should_block_array = np.array([should_block(frame.image, frame.action) for frame in episode.frames]) labels = np.full(len(episode.frames), fill_value=False, dtype=np.bool) mask = np.full(len(episode.frames), fill_value=True, dtype=np.bool) for i in range(len(episode.frames)): if i + self.block_radius + 1 >= len(episode.frames): mask[i] = False continue if is_catastrophe_array[i]: mask[i] = False continue for j in range(self.block_radius + 1): if is_catastrophe_array[i + j + 1]: labels[i] = True break return labels, mask
def reg2bin_vector(begin, end): '''Vectorized tabix reg2bin -- much faster than reg2bin''' result = np.zeros(begin.shape) # Entries filled done = np.zeros(begin.shape, dtype=np.bool) for (bits, bins) in rev_bit_bins: begin_shift = begin >> bits new_done = (begin >> bits) == (end >> bits) mask = np.logical_and(new_done, np.logical_not(done)) offset = ((1 << (29 - bits)) - 1) / 7 result[mask] = offset + begin_shift[mask] done = new_done return result.astype(np.int32)
def load_data(infile, chroms, resolutions): starts = infile['starts'][...] chromosomes = infile['chromosomes'][...] data = {} for res in resolutions: data[res] = {} for i, chrom in enumerate(chromosomes): if chrom not in chroms: continue start = (starts[i] / res) * res dist = infile['dist.%s.%i' % (chrom, res)][...] valid_rows = infile['valid.%s.%i' % (chrom, res)][...] corr = infile['corr.%s.%i' % (chrom, res)][...] valid = numpy.zeros(corr.shape, dtype=numpy.bool) N, M = corr.shape valid = numpy.zeros((N, M), dtype=numpy.int32) for i in range(min(N - 1, M)): P = N - i - 1 valid[:P, i] = valid_rows[(i + 1):] * valid_rows[:P] temp = corr * dist valid[numpy.where(numpy.abs(temp) == numpy.inf)] = False data[res][chrom] = [start, temp, valid] return data
def test_bbs_in_bbs(self): bbs_a = np.array([1, 1, 2.0, 3]) bbs_b = np.array([1, 0, 4, 5]) bbs_c = np.array([0, 0, 2, 2]) assert bbs_in_bbs(bbs_a, bbs_b).all() assert bbs_in_bbs(bbs_b, bbs_c).any() is not True assert bbs_in_bbs(bbs_a, bbs_c).any() is not True bbs_d = np.array([ [0, 0, 5, 5], [1, 2, 4, 4], [2, 3, 4, 5] ]) assert (bbs_in_bbs(bbs_a, bbs_d) == np.array([1, 0, 0], dtype=np.bool)).all() assert (bbs_in_bbs(bbs_d, bbs_d) == np.ones((3), dtype=np.bool)).all() bbs_a *= 100 bbs_d *= 100 assert (bbs_in_bbs(bbs_a, bbs_d) == np.array([1, 0, 0], dtype=np.bool)).all()
def test_pts_in_bbs(self): pt = np.array([1, 2]) bbs_a = np.array([1, 2, 3, 4]) assert isinstance(pts_in_bbs(pt, bbs_a), np.bool_) assert pts_in_bbs(pt, bbs_a) pts = np.array([ [1, 2], [2, 3], [3, 4] ]) bbs_b = np.array([ [1, 2, 3, 4], [5, 6, 7, 8], [2, 3, 4, 5] ]) assert (pts_in_bbs(pts, bbs_b) == np.array([1, 0, 1], dtype=np.bool)).all()
def _neighbors_filtering_by_contact_area(self, label, neighbors, min_contact_area, real_area): """ Function used to filter the returned neighbors according to a given minimal contact area between them! Args: label: (int) - label of the image to threshold by the min contact area. neighbors` (list) - list of neighbors of the `label: to be filtered. min_contact_area: (None|int|float) - value of the min contact area threshold. real_area: (bool) - indicate wheter the min contact area is a real world value or a number of voxels. """ areas = self.cell_wall_area(label, neighbors, real_area) nei = cp.copy(neighbors) for i,j in areas.keys(): if areas[(i,j)] < min_contact_area: nei.remove( i if j==label else j ) return nei
def primes_2_to_n(n): """ Efficient algorithm to find and list primes from 2 to `n'. Args: n (int): highest number from which to search for primes Returns: np array of all primes from 2 to n References: Robert William Hanks, https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n/ """ sieve = np.ones(int(n / 3 + (n % 6 == 2)), dtype=np.bool) for i in range(1, int((n ** 0.5) / 3 + 1)): if sieve[i]: k = 3 * i + 1 | 1 sieve[int(k * k / 3)::2 * k] = False sieve[int(k * (k - 2 * (i & 1) + 4) / 3)::2 * k] = False return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
def _random_overlay(self, static_hidden=False): """Construct random max pool locations.""" s = self.shapes[2] if static_hidden: args = np.random.randint(s[2], size=np.prod(s) / s[2] / s[4]) overlay = np.zeros(np.prod(s) / s[4], np.bool) overlay[args + np.arange(len(args)) * s[2]] = True overlay = overlay.reshape([s[0], s[1], s[3], s[2]]) overlay = np.rollaxis(overlay, -1, 2) return arrays.extend(overlay, s[4]) else: args = np.random.randint(s[2], size=np.prod(s) / s[2]) overlay = np.zeros(np.prod(s), np.bool) overlay[args + np.arange(len(args)) * s[2]] = True overlay = overlay.reshape([s[0], s[1], s[3], s[4], s[2]]) return np.rollaxis(overlay, -1, 2)
def store_effect(self, idx, action, reward, done): """Store effects of action taken after obeserving frame stored at index idx. The reason `store_frame` and `store_effect` is broken up into two functions is so that once can call `encode_recent_observation` in between. Paramters --------- idx: int Index in buffer of recently observed frame (returned by `store_frame`). action: int Action that was performed upon observing this frame. reward: float Reward that was received when the actions was performed. done: bool True if episode was finished after performing that action. """ self.action[idx] = action self.reward[idx] = reward self.done[idx] = done
def get_training_data_page(self, num_samples): """ Returns a TrainingDataPage with shuffled, transformed transitions from replay memory. :param num_samples: Number of transitions to sample from replay memory. """ states, actions, rewards, next_states, next_actions, terminals,\ possible_next_actions = self.sample_memories(num_samples) return TrainingDataPage( np.array(states, dtype=np.float32), np.array(actions, dtype=np.float32), np.array(rewards, dtype=np.float32), np.array(next_states, dtype=np.float32), np.array(next_actions, dtype=np.float32), np.array(possible_next_actions, dtype=np.float32), None, None, np.logical_not(terminals, dtype=np.bool) )
def true_values_for_sample( self, states, actions, assume_optimal_policy: bool ): true_q_values = self.true_q_values(DISCOUNT, assume_optimal_policy) print("TRUE Q") print(true_q_values.reshape([5, 5])) results = [] for x in range(len(states)): int_state = int(list(states[x].keys())[0]) next_state = self.move_on_index_limit(int_state, actions[x]) if self.is_terminal(int_state): results.append(self.reward(int_state)) else: results.append( self.reward(int_state) + (DISCOUNT * true_q_values[next_state]) ) return results
def calculate_plane_histogram(plane, doseplane, dosegridpoints, maxdose, dd, id, structure, hist): """Calculate the DVH for the given plane in the structure.""" contours = [[x[0:2] for x in c['data']] for c in plane] # If there is no dose for the current plane, go to the next plane if not len(doseplane): return (np.arange(0, maxdose), 0) # Create a zero valued bool grid grid = np.zeros((dd['rows'], dd['columns']), dtype=np.uint8) # Calculate the histogram for each contour in the plane # and boolean xor to remove holes for i, contour in enumerate(contours): m = get_contour_mask(dd, id, dosegridpoints, contour) grid = np.logical_xor(m.astype(np.uint8), grid).astype(np.bool) hist, vol = calculate_contour_dvh( grid, doseplane, maxdose, dd, id, structure) return (hist, vol)
def decodeMask(R): """ Decode binary mask M encoded via run-length encoding. :param R (object RLE) : run-length encoding of binary mask :return: M (bool 2D array) : decoded binary mask """ N = len(R['counts']) M = np.zeros( (R['size'][0]*R['size'][1], )) n = 0 val = 1 for pos in range(N): val = not val for c in range(R['counts'][pos]): R['counts'][pos] M[n] = val n += 1 return M.reshape((R['size']), order='F')
def _build_graph(self): """Compute the graph Laplacian.""" # Graph sparsification if self.sparsify == 'epsilonNN': self.A_ = radius_neighbors_graph(self.X_, self.radius, include_self=False) else: Q = kneighbors_graph( self.X_, self.n_neighbors, include_self = False ).astype(np.bool) if self.sparsify == 'kNN': self.A_ = (Q + Q.T).astype(np.float64) elif self.sparsify == 'MkNN': self.A_ = (Q.multiply(Q.T)).astype(np.float64) # Edge re-weighting if self.reweight == 'rbf': W = rbf_kernel(self.X_, gamma=self.t) self.A_ = self.A_.multiply(W) return sp.csgraph.laplacian(self.A_, normed=self.normed)
def load_annoataion(p): ''' load annotation from the text file :param p: :return: ''' text_polys = [] text_tags = [] if not os.path.exists(p): return np.array(text_polys, dtype=np.float32) with open(p, 'r') as f: reader = csv.reader(f) for line in reader: label = line[-1] # strip BOM. \ufeff for python3, \xef\xbb\bf for python2 line = [i.strip('\ufeff').strip('\xef\xbb\xbf') for i in line] x1, y1, x2, y2, x3, y3, x4, y4 = list(map(float, line[:8])) text_polys.append([[x1, y1], [x2, y2], [x3, y3], [x4, y4]]) if label == '*' or label == '###': text_tags.append(True) else: text_tags.append(False) return np.array(text_polys, dtype=np.float32), np.array(text_tags, dtype=np.bool)
def classifier_accuracy_report(self, prediction_vector, threshold=0.5): """ Determine AUC and other metrics, write report. prediction_vector: vector of booleans (or outcome probabilities) of length n_subjects, e.g. self.point_predictions, self.ensemble_probabilities()... If this has dtype other than bool, prediction_vector > threshold is used for the confusion matrix. Returns: one string (multiple lines joined with \n, including trailing newline) containing a formatted report. """ auc = roc_auc_score(self.model.data.y.astype(float), prediction_vector.astype(float)) if not (prediction_vector.dtype == np.bool): prediction_vector = prediction_vector >= threshold conf = confusion_matrix(self.model.data.y, prediction_vector) lines = ['AUC: %.3f' % auc, 'Confusion matrix: \n\t%s' % str(conf).replace('\n','\n\t')] return '\n'.join(lines) + '\n' ######################################## # BAYES-FACTOR-BASED METHODS
def _make_border_mask(sz, borderSize, omitSlices=[]): """ Creates a logical tensor of size (#slices, #rows, #colums) where 1/true is an "included" pixel, where "included" means - not within borderSize pixels the edge of the xy plane - not within a slice that is to be omitted. """ [s,m,n] = sz bitMask = np.ones(sz, dtype=bool) bitMask[omitSlices,:,:] = 0 if borderSize > 0: bitMask[:, 0:borderSize, :] = 0 bitMask[:, (m-borderSize):m, :] = 0 bitMask[:, :, 0:borderSize] = 0 bitMask[:, :, (n-borderSize):n] = 0 return bitMask
def _downsample_mask(X, pct): """ Create a boolean mask indicating which subset of X should be evaluated. """ if pct < 1.0: Mask = np.zeros(X.shape, dtype=np.bool) m = X.shape[-2] n = X.shape[-1] nToEval = np.round(pct*m*n).astype(np.int32) idx = sobol(2, nToEval ,0) idx[0] = np.floor(m*idx[0]) idx[1] = np.floor(n*idx[1]) idx = idx.astype(np.int32) Mask[:,:,idx[0], idx[1]] = True else: Mask = np.ones(X.shape, dtype=np.bool) return Mask
def get_poly_centers(ob, type=np.float32): mod = False m_count = len(ob.modifiers) if m_count > 0: show = np.zeros(m_count, dtype=np.bool) ren_set = np.copy(show) ob.modifiers.foreach_get('show_render', show) ob.modifiers.foreach_set('show_render', ren_set) mod = True mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER') p_count = len(mesh.polygons) center = np.zeros(p_count * 3)#, dtype=type) mesh.polygons.foreach_get('center', center) center.shape = (p_count, 3) bpy.data.meshes.remove(mesh) if mod: ob.modifiers.foreach_set('show_render', show) return center
def get_poly_normals(ob, type=np.float32): mod = False m_count = len(ob.modifiers) if m_count > 0: show = np.zeros(m_count, dtype=np.bool) ren_set = np.copy(show) ob.modifiers.foreach_get('show_render', show) ob.modifiers.foreach_set('show_render', ren_set) mod = True mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER') p_count = len(mesh.polygons) normal = np.zeros(p_count * 3)#, dtype=type) mesh.polygons.foreach_get('normal', normal) normal.shape = (p_count, 3) bpy.data.meshes.remove(mesh) if mod: ob.modifiers.foreach_set('show_render', show) return normal
def get_v_normals(ob, type=np.float32): mod = False m_count = len(ob.modifiers) if m_count > 0: show = np.zeros(m_count, dtype=np.bool) ren_set = np.copy(show) ob.modifiers.foreach_get('show_render', show) ob.modifiers.foreach_set('show_render', ren_set) mod = True mesh = ob.to_mesh(bpy.context.scene, True, 'RENDER') v_count = len(mesh.vertices) normal = np.zeros(v_count * 3)#, dtype=type) mesh.vertices.foreach_get('normal', normal) normal.shape = (v_count, 3) bpy.data.meshes.remove(mesh) if mod: ob.modifiers.foreach_set('show_render', show) return normal
def basic_unwrap(): ob = bpy.context.object mode = ob.mode data = ob.data key = ob.active_shape_key_index bpy.ops.object.mode_set(mode='OBJECT') layers = [i.name for i in ob.data.uv_layers] if "UV_Shape_key" not in layers: bpy.ops.mesh.uv_texture_add() ob.data.uv_layers[len(ob.data.uv_layers) - 1].name = 'UV_Shape_key' ob.data.uv_layers.active_index = len(ob.data.uv_layers) - 1 ob.active_shape_key_index = 0 data.vertices.foreach_set('select', np.ones(len(data.vertices), dtype=np.bool)) bpy.ops.object.mode_set(mode='EDIT') bpy.ops.uv.unwrap(method='ANGLE_BASED', margin=0.0635838) bpy.ops.object.mode_set(mode=mode) ob.active_shape_key_index = key
def _iter_test_masks(self, frame, y=None): """Generates boolean masks corresponding to the tests set. Parameters ---------- frame : H2OFrame The h2o frame to split y : string, optional (default=None) The column to stratify. Returns ------- test_mask : np.ndarray, shape=(n_samples,) The indices for the test split """ for test_index in self._iter_test_indices(frame, y): test_mask = np.zeros(frame.shape[0], dtype=np.bool) test_mask[test_index] = True yield test_mask
def is_iterable(x): """Python 3.x adds the ``__iter__`` attribute to strings. Thus, our previous tests for iterable will fail when using ``hasattr``. Parameters ---------- x : object The object or primitive to test whether or not is an iterable. Returns ------- bool True if ``x`` is an iterable """ if isinstance(x, six.string_types): return False return hasattr(x, '__iter__')
def _is_integer(x): """Determine whether some object ``x`` is an integer type (int, long, etc). This is part of the ``fixes`` module, since Python 3 removes the long datatype, we have to check the version major. Parameters ---------- x : object The item to assess whether is an integer. Returns ------- bool True if ``x`` is an integer type """ return (not isinstance(x, (bool, np.bool))) and \ isinstance(x, (numbers.Integral, int, np.int, np.long, long)) # no long type in python 3
def is_entirely_numeric(X): """Determines whether an entire pandas frame is numeric in dtypes. Parameters ---------- X : Pandas ``DataFrame`` or ``H2OFrame``, shape=(n_samples, n_features) The dataframe to test Returns ------- bool True if the entire pd.DataFrame is numeric else False """ return X.shape[1] == len(get_numeric(X))
def is_float(x): """Determine whether some object ``x`` is a float type (float, np.float, etc). Parameters ---------- x : object The item to assess Returns ------- bool True if ``x`` is a float type """ return isinstance(x, (float, np.float)) or \ (not isinstance(x, (bool, np.bool)) and isinstance(x, numbers.Real))
def is_numeric(x): """Determine whether some object ``x`` is a numeric type (float, int, etc). Parameters ---------- x : object The item to assess Returns ------- bool True if ``x`` is a float or integer type """ return is_float(x) or is_integer(x)
def __init__(self, config, model_dir, ob_shape_list): self.model_dir = model_dir self.cnn_format = config.cnn_format self.memory_size = config.memory_size self.actions = np.empty(self.memory_size, dtype = np.uint8) self.rewards = np.empty(self.memory_size, dtype = np.integer) # print(self.memory_size, config.screen_height, config.screen_width) # self.screens = np.empty((self.memory_size, config.screen_height, config.screen_width), dtype = np.float16) self.screens = np.empty([self.memory_size] + ob_shape_list, dtype = np.float16) self.terminals = np.empty(self.memory_size, dtype = np.bool) self.history_length = config.history_length # self.dims = (config.screen_height, config.screen_width) self.dims = tuple(ob_shape_list) self.batch_size = config.batch_size self.count = 0 self.current = 0 # pre-allocate prestates and poststates for minibatch self.prestates = np.empty((self.batch_size, self.history_length) + self.dims, dtype = np.float16) self.poststates = np.empty((self.batch_size, self.history_length) + self.dims, dtype = np.float16) # self.prestates = np.empty((self.batch_size, self.history_length, self.dims), dtype = np.float16) # self.poststates = np.empty((self.batch_size, self.history_length, self.dims), dtype = np.float16)
def __make_net(self, input_images, input_measure, input_actions, reuse=False): if reuse: tf.get_variable_scope().reuse_variables() fc_val_params = copy.deepcopy(self.__fc_joint_params) fc_val_params[-1]['out_dims'] = self.__target_dim fc_adv_params = copy.deepcopy(self.__fc_joint_params) fc_adv_params[-1]['out_dims'] = len(self.__net_discrete_actions) * self.__target_dim if self.verbose: print 'fc_val_params:', fc_val_params print 'fc_adv_params:', fc_adv_params p_img_conv = ly.conv_encoder(input_images, self.__conv_params, 'p_img_conv', msra_coeff=0.9) p_img_fc = ly.fc_net(ly.flatten(p_img_conv), self.__fc_img_params, 'p_img_fc', msra_coeff=0.9) p_meas_fc = ly.fc_net(input_measure, self.__fc_measure_params, 'p_meas_fc', msra_coeff=0.9) p_val_fc = ly.fc_net(tf.concat([p_img_fc, p_meas_fc], 1), fc_val_params, 'p_val_fc', last_linear=True, msra_coeff=0.9) p_adv_fc = ly.fc_net(tf.concat([p_img_fc, p_meas_fc], 1), fc_adv_params, 'p_adv_fc', last_linear=True, msra_coeff=0.9) p_adv_fc_nomean = p_adv_fc - tf.reduce_mean(p_adv_fc, reduction_indices=1, keep_dims=True) self.__pred_all_nomean = tf.reshape(p_adv_fc_nomean, [-1, len(self.__net_discrete_actions), self.__target_dim]) self.__pred_all = self.__pred_all_nomean + tf.reshape(p_val_fc, [-1, 1, self.__target_dim]) self.__pred_relevant = tf.boolean_mask(self.__pred_all, tf.cast(input_actions, tf.bool))
def setUp(self): self.f = np.ones(256, dtype=np.float32) self.ef = np.ones(self.f.size, dtype=np.bool) self.d = np.ones(128, dtype=np.float64) self.ed = np.ones(self.d.size, dtype=np.bool) # generate values for all permutation of 256bit simd vectors s = 0 for i in range(32): self.f[s:s+8] = [i & 2**x for x in range(8)] self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)] s += 8 s = 0 for i in range(16): self.d[s:s+4] = [i & 2**x for x in range(4)] self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)] s += 4 self.nf = self.f.copy() self.nd = self.d.copy() self.nf[self.ef] = np.nan self.nd[self.ed] = np.nan
def test_float(self): # offset for alignment test for i in range(4): assert_array_equal(self.f[i:] > 0, self.ef[i:]) assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:]) assert_array_equal(self.f[i:] == 0, ~self.ef[i:]) assert_array_equal(-self.f[i:] < 0, self.ef[i:]) assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:]) r = self.f[i:] != 0 assert_array_equal(r, self.ef[i:]) r2 = self.f[i:] != np.zeros_like(self.f[i:]) r3 = 0 != self.f[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 assert_array_equal(r.view(np.int8), r.astype(np.int8)) assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
def test_double(self): # offset for alignment test for i in range(2): assert_array_equal(self.d[i:] > 0, self.ed[i:]) assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:]) assert_array_equal(self.d[i:] == 0, ~self.ed[i:]) assert_array_equal(-self.d[i:] < 0, self.ed[i:]) assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:]) r = self.d[i:] != 0 assert_array_equal(r, self.ed[i:]) r2 = self.d[i:] != np.zeros_like(self.d[i:]) r3 = 0 != self.d[i:] assert_array_equal(r, r2) assert_array_equal(r, r3) # check bool == 0x1 assert_array_equal(r.view(np.int8), r.astype(np.int8)) assert_array_equal(r2.view(np.int8), r2.astype(np.int8)) assert_array_equal(r3.view(np.int8), r3.astype(np.int8)) # isnan on amd64 takes the same code path assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
def test_array_equal(self): res = np.array_equal(np.array([1, 2]), np.array([1, 2])) assert_(res) assert_(type(res) is bool) res = np.array_equal(np.array([1, 2]), np.array([1, 2, 3])) assert_(not res) assert_(type(res) is bool) res = np.array_equal(np.array([1, 2]), np.array([3, 4])) assert_(not res) assert_(type(res) is bool) res = np.array_equal(np.array([1, 2]), np.array([1, 3])) assert_(not res) assert_(type(res) is bool) res = np.array_equal(np.array(['a'], dtype='S1'), np.array(['a'], dtype='S1')) assert_(res) assert_(type(res) is bool) res = np.array_equal(np.array([('a', 1)], dtype='S1,u4'), np.array([('a', 1)], dtype='S1,u4')) assert_(res) assert_(type(res) is bool)
def test_truth_table_logical(self): # 2, 3 and 4 serves as true values input1 = [0, 0, 3, 2] input2 = [0, 4, 0, 2] typecodes = (np.typecodes['AllFloat'] + np.typecodes['AllInteger'] + '?') # boolean for dtype in map(np.dtype, typecodes): arg1 = np.asarray(input1, dtype=dtype) arg2 = np.asarray(input2, dtype=dtype) # OR out = [False, True, True, True] for func in (np.logical_or, np.maximum): assert_equal(func(arg1, arg2).astype(bool), out) # AND out = [False, False, False, True] for func in (np.logical_and, np.minimum): assert_equal(func(arg1, arg2).astype(bool), out) # XOR out = [False, True, True, False] for func in (np.logical_xor, np.not_equal): assert_equal(func(arg1, arg2).astype(bool), out)
def test_basic(self): dt_numeric = np.typecodes['AllFloat'] + np.typecodes['AllInteger'] dt_complex = np.typecodes['Complex'] # test real a = np.eye(3) for dt in dt_numeric + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test complex a = np.eye(3) * 1j for dt in dt_complex + 'O': b = a.astype(dt) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), 3) # test boolean b = np.eye(3, dtype=np.bool) res = np.vdot(b, b) assert_(np.isscalar(res)) assert_equal(np.vdot(b, b), True)
def test_basic(self): dts = [np.bool, np.int16, np.int32, np.int64, np.double, np.complex128, np.longdouble, np.clongdouble] for dt in dts: c = np.ones(53, dtype=np.bool) assert_equal(np.where( c, dt(0), dt(1)), dt(0)) assert_equal(np.where(~c, dt(0), dt(1)), dt(1)) assert_equal(np.where(True, dt(0), dt(1)), dt(0)) assert_equal(np.where(False, dt(0), dt(1)), dt(1)) d = np.ones_like(c).astype(dt) e = np.zeros_like(d) r = d.astype(dt) c[7] = False r[7] = e[7] assert_equal(np.where(c, e, e), e) assert_equal(np.where(c, d, e), r) assert_equal(np.where(c, d, e[0]), r) assert_equal(np.where(c, d[0], e), r) assert_equal(np.where(c[::2], d[::2], e[::2]), r[::2]) assert_equal(np.where(c[1::2], d[1::2], e[1::2]), r[1::2]) assert_equal(np.where(c[::3], d[::3], e[::3]), r[::3]) assert_equal(np.where(c[1::3], d[1::3], e[1::3]), r[1::3]) assert_equal(np.where(c[::-2], d[::-2], e[::-2]), r[::-2]) assert_equal(np.where(c[::-3], d[::-3], e[::-3]), r[::-3]) assert_equal(np.where(c[1::-3], d[1::-3], e[1::-3]), r[1::-3])
def test_dtype_mix(self): c = np.array([False, True, False, False, False, False, True, False, False, False, True, False]) a = np.uint32(1) b = np.array([5., 0., 3., 2., -1., -4., 0., -10., 10., 1., 0., 3.], dtype=np.float64) r = np.array([5., 1., 3., 2., -1., -4., 1., -10., 10., 1., 1., 3.], dtype=np.float64) assert_equal(np.where(c, a, b), r) a = a.astype(np.float32) b = b.astype(np.int64) assert_equal(np.where(c, a, b), r) # non bool mask c = c.astype(np.int) c[c != 0] = 34242324 assert_equal(np.where(c, a, b), r) # invert tmpmask = c != 0 c[c == 0] = 41247212 c[tmpmask] = 0 assert_equal(np.where(c, b, a), r)
def test_allany_oddities(self): # Some fun with all and any store = empty((), dtype=bool) full = array([1, 2, 3], mask=True) self.assertTrue(full.all() is masked) full.all(out=store) self.assertTrue(store) self.assertTrue(store._mask, True) self.assertTrue(store is not masked) store = empty((), dtype=bool) self.assertTrue(full.any() is masked) full.any(out=store) self.assertTrue(not store) self.assertTrue(store._mask, True) self.assertTrue(store is not masked)