我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用statistics.mode()。
def MODE(df, n, price='Close'): """ Mode (most common value) of discrete data """ mode_list = [] i = 0 while i < len(df[price]): if i + 1 < n: mode = float('NaN') else: start = i + 1 - n end = i + 1 mode = statistics.mode(df[price][start:end]) mode_list.append(mode) i += 1 return mode_list
def listen_for_end(self, keypress): """ Listen for 'q', left, or right keys to end game. """ if keypress != 255: print(keypress) if keypress == ord('q'): # 'q' pressed to quit print("Escape key entered") return "END" elif self.curr_level == 0: # Select mode self.curr_level = 1 self.tickcount = 0 if keypress == 81 or keypress == 2: # left self.easy_mode = True elif keypress == 83 or keypress == 3: # right self.easy_mode = False elif self.curr_level == 2: print("Resetting") self.reset()
def temp_stat(temps): """ computes the average, median, std dev, and variance of temps """ import statistics print(temps) print("Mean: ", statistics.mean(temps)) print("Median: ", statistics.median(temps)) print("Standard deviation: ", statistics.stdev(temps)) print("Variance: ", statistics.variance(temps)) try: print("Mode: ", statistics.mode(temps)) except statistics.StatisticsError as e: print("Mode error: ", e) #%%
def test(): """Tests the statistical functions. Raises: AssertionError if a test fails. """ testlist0 = [1, 2, 3, 4, 5] testlist1 = [1, 2, 3, 4, 5, 6] testlist2 = [2, 2, 3, 4, 4, 6] testlist3 = [2, 2, 3, 4, 5, 6, 7] assert mean(testlist0) - 5 <= 1e-6, mean(testlist0) assert mean(testlist1) - 3.5 <= 1e-6, mean(testlist1) assert mean(testlist2) - 21 / 6 <= 1e-6, mean(testlist2) assert mean(testlist3) - 29 / 7 <= 1e-6, mean(testlist3) assert median(testlist0) == 3, median(testlist0) assert median(testlist1) - 3.5 <= 1e-6, median(testlist1) assert median(testlist2) - 3.5 <= 1e-6, median(testlist2) assert median(testlist3) == 4, median(testlist3) assert mode(testlist3) == 2, mode(testlist3)
def get_stats(self, metrics, lang=UNSPECIFIED_TRANSLATION, limit=100): stats = super(NumField, self).get_stats(metrics, lang, limit) stats.update({ 'median': '*', 'mean': '*', 'mode': '*', 'stdev': '*' }) try: # require a non empty dataset stats['mean'] = statistics.mean(self.flatten_dataset(metrics)) stats['median'] = statistics.median(self.flatten_dataset(metrics)) # requires at least 2 values in the dataset stats['stdev'] = statistics.stdev(self.flatten_dataset(metrics), xbar=stats['mean']) # requires a non empty dataset and a unique mode stats['mode'] = statistics.mode(self.flatten_dataset(metrics)) except statistics.StatisticsError: pass return stats
def average(numbers, type='mean'): import statistics type = type.lower() try: statistics.mean(numbers) except: raise RuntimeError('An Error Has Occured: List Not Specified (0018)') if type == 'mean': return statistics.mean(numbers) elif type == 'mode': return statistics.mode(numbers) elif type == 'median': return statistics.median(numbers) elif type == 'min': return min(numbers) elif type == 'max': return max(numbers) elif type == 'range': return max(numbers) - min(numbers) else: raise RuntimeError('An Error Has Occured: You Entered An Invalid Operation (0003)') # Throw A Runtime Error
def mode(self): return statistics.mode(self.price) # ??? # ??
def get_mode_trade_size(self, side: OrderSide, order_type: OrderType, seconds_ago: int, group_by_period: Optional[int] = None) -> Optional[float]: order_quantities = self.get_trade_quantities(side, order_type, seconds_ago, group_by_period) if len(order_quantities) == 0: return None try: return mode(order_quantities) except StatisticsError: return None
def classify(self, features): if not self.votes: self.get_votes(features) return mode(self.votes)
def confidence(self, features): if not self.votes: self.get_votes(features) choice_votes = self.votes.count(mode(self.votes)) conf = choice_votes / len(self.votes) return conf
def classify(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) return mode(votes)
def confidence(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) choice_votes = votes.count(mode(votes)) conf = choice_votes / len(votes) return conf
def __init__(self, *classifiers): self._classifiers = classifiers #Creating our own classify method. #After iterating we return mode(votes), which just returns the most popular vote.
def classify(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) return mode(votes) #Defining another parameter, confidence. #Since we have algorithms voting, we can tally the votes for and against the winning vote, and call this "confidence.
def confidence(self, features): votes = [] for c in self._classifiers: v = c.classify(features) votes.append(v) choice_votes = votes.count(mode(votes)) conf = choice_votes / len(votes) return conf # Defining and Accessing the corporas. # In total, approx 10,000 feeds to be trained and tested on.
def __init__(self, *classifiers): self._classifiers = classifiers # this classifies the vote and returns the mode # of the result. # must be handed: # *featured words
def clify(self, features): self.votes = [] for self.i in self._classifiers: self.j = self.i.clify(features) self.votes.append(self.j) return mode(self.votes) # find the confidents of results # must be handed: # *featured words
def conf(self, features): self.votes = [] for self.i in self._classifiers: self.j = self.i.clify(features) self.votes.append(self.j) self.choice_votes = self.votes.count(mode(self.votes)) self.conf = self.choice_votes / len(self.votes) return self.conf # find the features of document # must be handed: # *document to find feature of # *word features
def classify(self, features): self.votes = [] for self.i in self._classifiers: self.j = self.i.classify(features) self.votes.append(self.j) return mode(self.votes) # find the confidents of results # must be handed: # *featured words
def main(): print(stats.mean(range(6))) print(stats.median(range(6))) print(stats.median_low(range(6))) print(stats.median_high(range(6))) print(stats.median_grouped(range(6))) try: print(stats.mode(range(6))) except Exception as e: print(e) print(stats.mode(list(range(6)) + [3])) print(stats.pstdev(list(range(6)) + [3])) print(stats.stdev(list(range(6)) + [3])) print(stats.pvariance(list(range(6)) + [3])) print(stats.variance(list(range(6)) + [3]))
def prepare_data(self): """Overload method from UnivariateCommonMixin.""" # Make sure test data has exactly one mode. return [1, 1, 1, 1, 3, 4, 7, 9, 0, 8, 2]
def test_nominal_data(self): # Test mode with nominal data. data = 'abcbdb' self.assertEqual(self.func(data), 'b') data = 'fe fi fo fum fi fi'.split() self.assertEqual(self.func(data), 'fi')
def test_discrete_data(self): # Test mode with discrete numeric data. data = list(range(10)) for i in range(10): d = data + [i] random.shuffle(d) self.assertEqual(self.func(d), i)
def test_bimodal_data(self): # Test mode with bimodal data. data = [1, 1, 2, 2, 2, 2, 3, 4, 5, 6, 6, 6, 6, 7, 8, 9, 9] assert data.count(2) == data.count(6) == 4 # Check for an exception. self.assertRaises(statistics.StatisticsError, self.func, data)
def test_unique_data_failure(self): # Test mode exception when data points are all unique. data = list(range(10)) self.assertRaises(statistics.StatisticsError, self.func, data)
def my_stats(slis): import statistics print("Mean: ", statistics.mean(slis)) print("Median: ", statistics.median(slis)) # print("Mode: ", statistics.mode(slis)) try: print("Mode: ", statistics.mode(slis)) except statistics.StatisticsError as e: print("Mode error: ", e) print("Standard Deviation: ", statistics.stdev(slis)) print("Variance: ", statistics.variance(slis)) #%%
def mode(list): data = Counter(list) data.most_common() value=data.most_common(1) return value[0][0] #this is the final function which caluclates the divergence of current tweest from past tweets
def feature2Extractor(list): ls=[] ls.append(st.mean(list)) ls.append(st.median(list)) try: ls.append(st.mode(list)) except Exception: ls.append(mode(list)) ls.append(st.stdev(list)) ls.append(min(list)) ls.append(max(list)) featureList = ls return featureList
def mode(values): """Returns the mode of the values. If multiples values tie, one value is returned. Args: values: A list of values. Returns: The mode. """ counts = {k: values.count(k) for k in set(values)} return sorted(counts, key=counts.__getitem__)[-1]
def calculate_dominance(cppn: FeedForwardNetwork, ca_config: CAConfig) -> float: alphabet = ca_config.alphabet neighbourhood = ca_config.neighbourhood nbhs = list(product(alphabet, repeat=len(neighbourhood))) rules = create_state_normalization_rules(states=alphabet) quiescent = alphabet[0] def transition_f(inputs_discrete_values: Sequence[CELL_STATE_T]) -> CELL_STATE_T: if all((x == quiescent) for x in inputs_discrete_values): return quiescent inputs_float_values = tuple(rules[x] for x in inputs_discrete_values) outputs = cppn.serial_activate(inputs_float_values) return max(zip(alphabet, outputs), key=itemgetter(1))[0] heterogenous, homogenous = 0, 0 for nbh in nbhs: try: output = transition_f(nbh) except OverflowError: continue m = mode(nbh) if output != m: continue elif all(x == m for x in nbh): homogenous += 1 else: heterogenous += 1 return 3 * homogenous + heterogenous
def mode(text): """ Finds the mode of a space-separated list of numbers. Example:: /mode 33 54 43 65 43 62 """ return format_output(statistics.mode(parse_numeric_list(text)))
def setup(): commands.add(mean) commands.add(median) commands.add(median_low) commands.add(median_high) commands.add(median_grouped) commands.add(mode) commands.add(pstdev) commands.add(pvariance) commands.add(stdev) commands.add(variance)
def _try_compute_mode(objects): """ Computes the mode of a set of object, if a unique such exists. Args: objects (list[T]): the object whose mode is to be computed Returns: T: the modal value, or None if a unique mode does not exist """ try: numeric_value = statistics.mode(objects) # This _is_ 'None' friendly except statistics.StatisticsError: # No unique value, or empty data numeric_value = None return numeric_value
def select_mode(self): """ Select a mode: Easy or Hard. """ self.tickcount + 1 if self.raspberry: self.tickcount += 1 bgr_image = self.capture_frame() # Draw "Easy" and "Hard". # bgr_image = self.overlayUI(bgr_image) easy_coord = (self.screenwidth // 8, (self.screenheight * 3) // 4) draw_text(easy_coord, bgr_image, "Easy", font_scale=3) hard_coord = (self.screenwidth // 2, (self.screenheight * 3) // 4) draw_text(hard_coord, bgr_image, "Hard", font_scale=3) # Listen for mode selection. if self.currPosX and self.currPosX < self.screenwidth / 2: cv2.rectangle(self.overlay, (0, 0), (self.screenwidth // 2, int(self.screenheight)), (211, 211, 211), -1) else: cv2.rectangle(self.overlay, (self.screenwidth // 2, 0), (self.screenwidth, self.screenheight), (211, 211, 211), -1) if self.click_point_x: # If user clicks left mouse button. # OPTIONAL: Positional mode selection # self.easy_mode = True if self.click_point_x < self.screenwidth / 2 # else False self.easy_mode = True self.tickcount = 0 self.curr_level = 1 self.click_point_x = None self.click_point_right_x = None if self.click_point_right_x: self.easy_mode = False self.tickcount = 0 self.curr_level = 1 self.click_point_x = None self.click_point_right_x = None # Draw faces. gray_image = cv2.cvtColor(bgr_image, cv2.COLOR_BGR2GRAY) faces = detect_faces(face_detection, gray_image) cv2.addWeighted(self.overlay, OPACITY, bgr_image, 1 - OPACITY, 0, bgr_image) if self.debug: for face in faces: draw_bounding_box(face, bgr_image, (255, 0, 0)) # Draw Christmas logo. self.draw_hats(bgr_image, faces) self.draw_christmas_logo(bgr_image) # Only for christmas # Show image. cv2.imshow('PartyPi', bgr_image)
def main(with_csv=False): """Performs some simple data analysis. If with_csv is True, the csv module is used for loading the data. Otherwise, a simple custom solution is used. Args: with_csv: If True, uses the csv module. """ if with_csv: data = read_with_csv(IRIS_FILE) else: data = read_without_csv(IRIS_FILE) data = make_data_numeric(data, SEPAL_LENGTH, SEPAL_WIDTH, PETAL_LENGTH, PETAL_WIDTH) print('Total number of rows:', len(data)) class_counts = count_occurences(data, CLASS) print('Instances:', class_counts) sepal_lengths = [d[SEPAL_LENGTH] for d in data] print('Mean sepal length (statistics):', statistics.mean(sepal_lengths)) print('Mean sepal length (custom):', mean(sepal_lengths)) sepal_l_setosa = [d[SEPAL_LENGTH] for d in data if 'setosa' in d[CLASS]] print('Mean sepal length (setosa, statistics):', statistics.mean(sepal_l_setosa)) print('Mean sepal length (setosa, custom):', mean(sepal_l_setosa)) sepal_widths = [d[SEPAL_WIDTH] for d in data] print('Median sepal width (statistics):', statistics.median(sepal_widths)) print('Median sepal width (custom):', median(sepal_widths)) sepal_w_virginica = [d[SEPAL_WIDTH] for d in data if 'vir' in d[CLASS]] print('Median sepal width (virginica, statistics):', statistics.median(sepal_w_virginica)) print('Median sepal width (virginica, custom):', median(sepal_w_virginica)) petal_l_versicolor = [d[PETAL_LENGTH] for d in data if 'ver' in d[CLASS]] print('Mode petal length (versicolor, statistics):', statistics.mode(petal_l_versicolor)) print('Mode petal length (versicolor, custom):', mode(petal_l_versicolor))
def _retake_photos_until_valid_mode(self, target_number_cluster, mode_is_invalid=lambda m: m is None) -> None: """ Take 0 or more extra photos at the average location of the target numbers to do what we can to ensure a valid modal numeric value exists. Args: target_number_cluster (GlobalNumberCluster): The different representations of a single real life number to be recognised. This is extended to include all extra photos taken during this method. mode_is_invalid: A function which accepts a given mode (int) and returns true if it is invalid. By default this simply returns True if a unique mode does not exist. """ average_location = target_number_cluster.average_dot_location_yx numeric_value = target_number_cluster.modal_numeric_value jitters = np.array([[0, 0], [10, 0], [0, 10], [-10, 0], [0, -10]]) retry_number = -1 while mode_is_invalid(numeric_value) and retry_number + 1 < len(jitters): retry_number += 1 # Take a new photo print('Could not determine number at location ({0[0]:.0f},{0[1]:.0f}), current value {1}\n' 'Retrying...'.format(average_location, numeric_value)) processing_job = self._take_photo_and_extract_numbers(average_location + jitters[retry_number]) self._processing_station.join() new_global_numbers = processing_job.return_value new_global_numbers = [n for n in new_global_numbers if np.linalg.norm( n.dot_location_yx_mm - average_location) < self._min_millimetres_between_distinct_spots] number_recognition.print_recognised_global_numbers(new_global_numbers) target_number_cluster.extend(new_global_numbers) # Try again to get the mode values numeric_value = _try_compute_mode([n.numeric_value for n in target_number_cluster])
def get_disaggregated_stats(self, metrics, top_splitters, lang=UNSPECIFIED_TRANSLATION, limit=100): parent = super(NumField, self) stats = parent.get_disaggregated_stats(metrics, top_splitters, lang, limit) substats = {} # transpose the metrics data structure to look like # {splitter1: [x, y, z], splitter2...}} inversed_metrics = defaultdict(list) for val, counter in metrics.items(): if val is None: continue for splitter, count in counter.items(): inversed_metrics[splitter].extend([val] * count) for splitter, values in inversed_metrics.items(): val_stats = substats[splitter] = { 'median': '*', 'mean': '*', 'mode': '*', 'stdev': '*' } try: # require a non empty dataset val_stats['mean'] = statistics.mean(values) val_stats['median'] = statistics.median(values) # requires at least 2 values in the dataset val_stats['stdev'] = statistics.stdev(values, xbar=val_stats['mean']) # requires a non empty dataset and a unique mode val_stats['mode'] = statistics.mode(values) except statistics.StatisticsError: pass stats.update({ 'values': tuple(substats.items())[:limit] }) return stats
def update_state(self, blocks): block_version = None char_offset = None group_type = None curr_AB = {0: None, 2: None, None:None} last_AB = {0: None, 2: None, None:None} for block in blocks: blkid = block['ID'] if blkid == "A": self.PIs.append(block['PI']) char_offset = None if blkid == "B": group_type = block['group_type'] block_version = block['version_AB'] if blkid == "B" and group_type == 0: curr_AB[group_type] = block['text_AB'] char_offset = block['text_segment'] * 2 if blkid == "B" and group_type == 2: char_offset = block['text_segment'] * 4 if (curr_AB[group_type] != None) and (block['text_AB'] != curr_AB[group_type]) and (char_offset == 0) and (block_version == 'A'): print("CLEARING") self.cur_state[curr_AB[group_type]^1] = ['_']*64 curr_AB[group_type] = block['text_AB'] if (char_offset is not None) and (blkid == "C") and (group_type == 0) and (block_version == 'B'): self.PIs.append((ord(block['B1'])<<8)+ord(block['B0'])) if char_offset is not None and (blkid == "C") and (group_type == 2): self.cur_state[curr_AB[group_type]][char_offset] = block['B0'] self.cur_state[curr_AB[group_type]][char_offset+1] = block['B1'] if char_offset is not None and (blkid == "D") and (group_type == 2): self.cur_state[curr_AB[group_type]][char_offset+2] = block['B0'] self.cur_state[curr_AB[group_type]][char_offset+3] = block['B1'] if (char_offset is not None) and (blkid == "D") and (group_type == 0) and (block_version == 'B'): self.cur_state[curr_AB[group_type]][char_offset] = block['B0'] self.cur_state[curr_AB[group_type]][char_offset+1] = block['B1'] if (char_offset is not None) and (blkid == "D") and (group_type == 0) and (block_version == 'A'): self.cur_state[curr_AB[group_type]][char_offset+10] = block['B0'] self.cur_state[curr_AB[group_type]][char_offset+11] = block['B1'] if group_type in (0,2): #print(blkid, group_type, curr_AB[group_type], block_version) print(' '.join([str(x) for x in block.values()])) #print('\n'.join([''.join(x) for x in self.prog_name])) if blkid == "D": print('\n'.join([''.join(x) for x in self.cur_state]).replace('\r','?')) group_type == None char_offset = None try: self.PI = hex(statistics.mode(self.PIs))[2:] except statistics.StatisticsError: self.PI = hex(self.PIs[0])[2:] self.callsign = picode.rdscall(self.PI) print(self.callsign)