我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用math.log10()。
def majorize(values): """Filter sequence to return only major considered numbers""" sorted_values = sorted(values) if len(values) <= 3 or ( abs(2 * sorted_values[1] - sorted_values[0] - sorted_values[2]) > abs(1.5 * (sorted_values[1] - sorted_values[0]))): return [] values_step = sorted_values[1] - sorted_values[0] full_range = sorted_values[-1] - sorted_values[0] step = 10 ** int(log10(full_range)) if step == values_step: step *= 10 step_factor = 10 ** (int(log10(step)) + 1) if round(step * step_factor) % (round(values_step * step_factor) or 1): # TODO: Find lower common multiple instead step *= values_step if full_range <= 2 * step: step *= .5 elif full_range >= 5 * step: step *= 5 major_values = [ value for value in values if value / step == round(value / step)] return [value for value in sorted_values if value in major_values]
def compute_logarithmic_scale(min_, max_, min_scale, max_scale): """Compute an optimal scale for logarithmic""" if max_ <= 0 or min_ <= 0: return [] min_order = int(floor(log10(min_))) max_order = int(ceil(log10(max_))) positions = [] amplitude = max_order - min_order if amplitude <= 1: return [] detail = 10. while amplitude * detail < min_scale * 5: detail *= 2 while amplitude * detail > max_scale * 3: detail /= 2 for order in range(min_order, max_order + 1): for i in range(int(detail)): tick = (10 * i / detail or 1) * 10 ** order tick = round_to_scale(tick, tick) if min_ <= tick <= max_ and tick not in positions: positions.append(tick) return positions
def get_audio_levels(self): """ Returns a tuple with left and right audio levels, or (None, None) if frame is not valid """ if not self.version_is_valid(): return (None, None) else: int16_max = 0x7FFF if self.audiolevel_left: dB_l = int(20*math.log10(float(self.audiolevel_left) / int16_max)) else: dB_l = -90 if self.audiolevel_right: dB_r = int(20*math.log10(float(self.audiolevel_right) / int16_max)) else: dB_r = -90 return (dB_l, dB_r)
def test_calculate_SNR_positive_1(self): source_array = [89, -89] * 6000 + [502, -502] * 8000 + [89, -89] * 7000 source_data = reduce( lambda a, b: a + struct.pack('>h', b), source_array[1:], struct.pack('>h', source_array[0]) ) sampling_frequency = 8000 bounds_of_speech = [(2.0 * 6000.0 / sampling_frequency, 2.0 * (6000.0 + 8000.0) / sampling_frequency)] silence_energy = reduce( lambda a, b: a + b * b, source_array[0:(2 * 6000)] + source_array[(2 * (6000 + 8000)):], vad.EPS ) / (2.0 * (6000.0 + 7000.0)) speech_energy = reduce( lambda a, b: a + b * b, source_array[(2 * 6000):(2 * (6000 + 8000))], vad.EPS ) / (2.0 * 8000.0) target_snr = 20.0 * math.log10(speech_energy / silence_energy) self.assertAlmostEqual(target_snr, vad.calculate_SNR(source_data, sampling_frequency, bounds_of_speech))
def calculate_features_for_VAD(sound_frames, frequencies_axis, spectrogram): features = numpy.empty((spectrogram.shape[0], 3)) # smooted_spectrogram, smoothed_frequencies_axis = smooth_spectrogram(spectrogram, frequencies_axis, 24) for time_ind in range(spectrogram.shape[0]): mean_spectrum = spectrogram[time_ind].mean() if mean_spectrum > 0.0: sfm = -10.0 * math.log10(stats.gmean(spectrogram[time_ind]) / mean_spectrum) else: sfm = 0.0 # max_freq = smoothed_frequencies_axis[smooted_spectrogram[time_ind].argmax()] max_freq = frequencies_axis[spectrogram[time_ind].argmax()] features[time_ind][0] = numpy.square(sound_frames[time_ind]).mean() features[time_ind][1] = sfm features[time_ind][2] = max_freq """medfilt_order = 3 for feature_ind in range(features.shape[0]): features[feature_ind] = signal.medfilt(features[feature_ind], medfilt_order)""" return features
def to_data(self, x, y): '''Convert window coords to data coords. :Parameters: `x, y`: The coordinates to convert (in window coords). ''' adj_x = float(x - self._plot_area.pos[0]) adj_y = float(y - self._plot_area.pos[1]) norm_x = adj_x / self._plot_area.size[0] norm_y = adj_y / self._plot_area.size[1] if self.xlog: xmin, xmax = log10(self.xmin), log10(self.xmax) conv_x = 10. ** (norm_x * (xmax - xmin) + xmin) else: conv_x = norm_x * (self.xmax - self.xmin) + self.xmin if self.ylog: ymin, ymax = log10(self.ymin), log10(self.ymax) conv_y = 10. ** (norm_y * (ymax - ymin) + ymin) else: conv_y = norm_y * (self.ymax - self.ymin) + self.ymin return [conv_x, conv_y]
def draw(self, *args): super(MeshLinePlot, self).draw(*args) points = self.points mesh = self._mesh vert = mesh.vertices ind = mesh.indices params = self._params funcx = log10 if params['xlog'] else lambda x: x funcy = log10 if params['ylog'] else lambda x: x xmin = funcx(params['xmin']) ymin = funcy(params['ymin']) diff = len(points) - len(vert) // 4 size = params['size'] ratiox = (size[2] - size[0]) / float(funcx(params['xmax']) - xmin) ratioy = (size[3] - size[1]) / float(funcy(params['ymax']) - ymin) if diff < 0: del vert[4 * len(points):] del ind[len(points):] elif diff > 0: ind.extend(range(len(ind), len(ind) + diff)) vert.extend([0] * (diff * 4)) for k in range(len(points)): vert[k * 4] = (funcx(points[k][0]) - xmin) * ratiox + size[0] vert[k * 4 + 1] = (funcy(points[k][1]) - ymin) * ratioy + size[1] mesh.vertices = vert
def __scale_coefficient(self, result, result_index, t, sum_log=False): """ ????? :param result:???? :param result_index:?????? :param t: ?????? :param sum_log: ??c_coefficient??? :return: """ sum_column = np.sum(result[result_index][:, t], axis=0) if sum_column == 0.: result[result_index][:, t] = 1. / len(self.__states) sum_column = 1. result[result_index][:, t] /= sum_column if sum_log: self.__c_coefficient += math.log10(sum_column)
def _compute_divisions(self, xi, xf): assert xf > xi dx = xf - xi size = dx ndiv = 5 text_width = dx/ndiv/2 def rint(x): return math.floor(x+0.5) dx_over_ndiv = dx / ndiv for n in range(5): # iterate 5 times to find optimum division size #/* div: length of each division */ tbe = math.log10(dx_over_ndiv)#; /* looking for approx. 'ndiv' divisions in a length 'dx' */ div = pow(10, rint(tbe))#; /* div: power of 10 closest to dx/ndiv */ if math.fabs(div/2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): #/* test if div/2 is closer to dx/ndiv */ div /= 2 elif math.fabs(div*2 - dx_over_ndiv) < math.fabs(div - dx_over_ndiv): div *= 2 # /* test if div*2 is closer to dx/ndiv */ x0 = div*math.ceil(xi / div) - div if n > 1: ndiv = rint(size / text_width) return x0, div
def _round_up_max(max_val): "Rounds up a maximum value." # Prevent zero values raising an error. Rounds up to 10 at a minimum. max_val = max(10, max_val) e = int(math.log10(max_val)) if e >= 2: e -= 1 m = 10**e return math.ceil(float(max_val)/m)*m # Copied from Anki with the following changes: # - Set tickDecimals to 0. # - Update tickFormatter to show 1 decimal unless whole number # TODO pull request to Anki to include these changes
def proba_to_quality_sanger(pe): """A value between 0 and 93 :param pe: the probability of error. :return: Q is the quality score. - a high probability of error (0.99) gives Q=0 - q low proba of errors (0.05) gives Q = 13 - q low proba of errors (0.01) gives Q = 20 """ if pe > 1: pe = 1 if pe < 1e-90: pe = 1e-90 Qs = -10 * log10(pe) if Qs > 93: Qs = 93 return Qs
def proba_to_quality_solexa(pe): """prior v1.3 (ref: wikipedia https://en.wikipedia.org/wiki/FASTQ_format """ if pe > 1: pe = 1 return -5 if pe <1e-90: pe = 1e-90 Qs = -10 * log10(pe/(1-pe)) if Qs > 62: Qs = 62 if Qs < -5: Qs = -5 return Qs
def rep_log10(rep): def log10(string): leading_digits = int(string[0:4]) log = math.log10(leading_digits) + 0.00000001 num = len(string) - 1 return num + (log - int(log)) rep = str(rep) if rep == "0": return 25 sign = -1 if rep[0] == '-' else 1 if sign < 0: rep = rep[1:] out = log10(rep) out = max(out - 9, 0) * sign # @ -9, $1 earned is approx magnitude 1 out = (out * 9) + 25 # 9 points per magnitude. center at 25 return round(out, 2)
def classify_naive_bayes(X_test, prior, likelihood, num): p_not = math.log10(prior[0]) p_free = math.log10(prior[1]) not_dict = likelihood[0] free_dict = likelihood[1] not_num = num[0] free_num = num[1] voc_num = num[2] for word in X_test: # not free if word in not_dict: p_not += math.log10(1.0 * not_dict[word]) else: p_not += math.log10(1.0 / (not_num + voc_num)) # free if word in free_dict: p_free += math.log10(1.0 * free_dict[word]) else: p_free += math.log10(1.0 / (free_num + voc_num)) if p_free >= p_not: return True else: return False
def find(self, query, cutoff, limit=None): """Find similar fragments to query. Args: query (str): Query fragment identifier cutoff (float): Cutoff, similarity scores below cutoff are discarded. limit (int): Maximum number of hits. Default is None for no limit. Returns: list[tuple[str,float]]: Hit fragment identifier and similarity score """ precision = float(self.score_precision) precision10 = float(10**(floor(log10(precision)))) scutoff = int(cutoff * precision) query_id = self.cache_l2i[query] subjects = self.h5file.root.scores[query_id, ...] filled_subjects_ids = subjects.nonzero()[0] filled_subjects = [(i, subjects[i]) for i in filled_subjects_ids] hits = [(self.cache_i2l[k], ceil(precision10 * v / precision) / precision10) for k, v in filled_subjects if v >= scutoff] sorted_hits = sorted(hits, key=lambda r: r[1], reverse=True) if limit is not None: sorted_hits = sorted_hits[:limit] return sorted_hits
def __getitem__(self, item): """Get all similarities of fragment. Self is excluded. Args: item (STR): Label of a fragment Returns: list[tuple[str, float]]: list of (fragment_label, score) """ precision = float(self.score_precision) precision10 = float(10**(floor(log10(precision)))) query_id = self.cache_l2i[item] subjects = self.h5file.root.scores[query_id, ...] hits = [(self.cache_i2l[k], ceil(precision10 * v / precision) / precision10) for k, v in enumerate(subjects) if k != query_id] return hits
def get_conf_int(nvar): slices = [] for x in range(0, int(math.ceil(math.log(nvar,2)))): slices.append(2**x) slices.append(nvar-1); slices.reverse() points = [] for slice in slices: rv = scipy.stats.beta(slice, nvar-slice) points.append(( round(-math.log10((slice-0.5)/nvar),2), round(-math.log10(rv.ppf(0.05/2)),2), round(-math.log10(rv.ppf(1-(0.05/2))),2) )) return points
def write_languages(self, file_path='',date=str(datetime.date.today())): """ Updates languages.csv file with current data. """ self.remove_date(file_path=file_path, date=date) languages_exists = os.path.isfile(file_path) with open(file_path, 'a') as out_languages: if not languages_exists: out_languages.write('date,language,count,size,size_log\n') languages_sorted = sorted(self.languages_size) #self.delete_last_line(date=date, file_path=file_path) for language in languages_sorted: try: out_languages.write(date + ',' + language + ',' + str(self.languages[language]) + ',' + str(self.languages_size[language]) + ',' + str(math.log10(int(self.languages_size[language]))) + '\n') except (TypeError, KeyError) as e: out_languages.write(date + ',' + language + ',' + str(0) + ',' + str(self.languages_size[language]) + ',' + str(math.log10(int(self.languages_size[language]))) + '\n')
def select_tweets(timeline, allow_rts=True, allow_replies=False, popular_only=True): texts = [] for t in timeline: if not 'retweeted_status' in t: if not allow_replies and t['in_reply_to_status_id_str']: continue t['tweet_score'] = log(t['retweet_count'] + 1.0) + log(t['favorite_count'] + 1.0) t['__is_rt__'] = False texts.append(t) else: if allow_rts: t['retweeted_status']['tweet_score'] = log10(t['retweet_count'] + 1.0) + log10(t['favorite_count'] + 1.0) t['retweeted_status']['source_created_at'] = t['retweeted_status']['created_at'] t['retweeted_status']['created_at'] = t['created_at'] t['retweeted_status']['text'] = t['retweeted_status']['text'] t['retweeted_status']['__is_rt__'] = True texts.append(t['retweeted_status']) #texts = sorted(texts, key=lambda x: x['tweet_score'], reverse=True)[0:100] if popular_only: texts = list(filter(lambda x: x['tweet_score'] > 0, texts)) return texts
def to_data(self, x, y): '''Convert window coords to data coords. :Parameters: `x, y`: The coordinates to convert (in window coords). ''' adj_x = float(x - self._plot_area.pos[0]) adj_y = float(y - self._plot_area.pos[1]) norm_x = adj_x / self._plot_area.size[0] norm_y = adj_y / self._plot_area.size[1] if self.xlog: xmin, xmax = log10(self.xmin), log10(self.xmax) conv_x = 10.**(norm_x * (xmax - xmin) + xmin) else: conv_x = norm_x * (self.xmax - self.xmin) + self.xmin if self.ylog: ymin, ymax = log10(self.ymin), log10(self.ymax) conv_y = 10.**(norm_y * (ymax - ymin) + ymin) else: conv_y = norm_y * (self.ymax - self.ymin) + self.ymin return [conv_x, conv_y]
def get_bon_thresh(normalized,power): #same """ Calculate the bonferroni correction threshold. Divide the power by the sum of all finite values (all non-nan values). :param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value. :param power: the threshold power being used (usually 0.05) :type normalized: numpy array :type power: float :returns: The bonferroni correction :rtype: float """ return power/sum(np.isfinite(normalized))
def get_bon_thresh(normalized, power): # same """ Calculate the bonferroni correction threshold. Divide the power by the sum of all finite values (all non-nan values). :param normalized: an array of all normalized p-values. Normalized p-values are -log10(p) where p is the p-value. :param power: the threshold power being used (usually 0.05) :type normalized: numpy array :type power: float :returns: The bonferroni correction :rtype: float """ return power / sum(np.isfinite(normalized))
def test_logs(self): LOG10E = math.log10(math.e) for exp in list(range(10)) + [100, 1000, 10000]: value = 10 ** exp log10 = math.log10(value) self.assertAlmostEqual(log10, exp) # log10(value) == exp, so log(value) == log10(value)/log10(e) == # exp/LOG10E expected = exp / LOG10E log = math.log(value) self.assertAlmostEqual(log, expected) for bad in -(1 << 10000), -2, 0: self.assertRaises(ValueError, math.log, bad) self.assertRaises(ValueError, math.log10, bad)
def get_histogram(data,n=20,log=False): """ Groups data in N steps """ import math mn = logfloor(min(data)) mx = logroof(max(data)) print('data=[%e:%e],ranges=[%e:%e]'%(min(data),max(data),mn,mx)) if log: mn,mx = log10(mn),log10(mx) step = float(mx-mn)/n print('mn,mx,step = %s, %s, %s'%(mn,mx,step)) ranges = [] for i in range(n): r0 = mn+i*step r1 = mn+(i+1)*step if log: r0,r1 = 10**r0,10**r1 ranges.append((r0,len([d for d in data if r0<=d<r1]))) return ranges
def get_channel(self, previous_value, new_value): """ Prepares signal value depending on the previous one and algorithm. """ if self.stereo_algorithm == STEREO_ALGORITHM_NEW: channel_value = new_value elif self.stereo_algorithm == STEREO_ALGORITHM_LOGARITHM: if previous_value == 0.0: channel_value = 0.0 else: channel_value = 20 * math.log10(new_value/previous_value) if channel_value < -20: channel_value = -20 if channel_value > 3: channel_value = 3 channel_value = (channel_value + 20) * (100/23) elif self.stereo_algorithm == STEREO_ALGORITHM_AVERAGE: channel_value = statistics.mean([previous_value, new_value]) return channel_value
def visualizeCrossValidation(results): # Visualize the cross-validation results x_scatter = [math.log10(x[0]) for x in results] y_scatter = [math.log10(x[1]) for x in results] # plot training accuracy marker_size = 100 colors = [results[x][0] for x in results] plt.subplot(2, 1, 1) plt.scatter(x_scatter, y_scatter, marker_size, c=colors) plt.colorbar() plt.xlabel('log learning rate') plt.ylabel('log regularization strength') plt.title('CIFAR-10 training accuracy') # plot validation accuracy colors = [results[x][1] for x in results] # default size of markers is 20 plt.subplot(2, 1, 2) plt.scatter(x_scatter, y_scatter, marker_size, c=colors) plt.colorbar() plt.xlabel('log learning rate') plt.ylabel('log regularization strength') plt.title('CIFAR-10 validation accuracy') plt.show()
def _set_numticks(self): self.numticks = 11 # todo; be smart here; this is just for dev # def view_limits(self, vmin, vmax): # 'Try to choose the view limits intelligently' # if vmax<vmin: # vmin, vmax = vmax, vmin # if vmin==vmax: # vmin-=1 # vmax+=1 # exponent, remainder = divmod(math.log10(vmax - vmin), 1) # if remainder < 0.5: # exponent -= 1 # scale = 10**(-exponent) # vmin = math.floor(scale*vmin)/scale # vmax = math.ceil(scale*vmax)/scale # return mtransforms.nonsingular(vmin, vmax)
def test_logs(self): LOG10E = math.log10(math.e) for exp in range(10) + [100, 1000, 10000]: value = 10 ** exp log10 = math.log10(value) self.assertAlmostEqual(log10, exp) # log10(value) == exp, so log(value) == log10(value)/log10(e) == # exp/LOG10E expected = exp / LOG10E log = math.log(value) self.assertAlmostEqual(log, expected) for bad in -(1L << 10000), -2L, 0L: self.assertRaises(ValueError, math.log, bad) self.assertRaises(ValueError, math.log10, bad)
def _parse_code(code): parsed_code = [] while len(code): next_token, code = CAM._get_next_token_new(code) parsed_code.append(next_token) if next_token == u'?' or next_token == 'Y': arg, code = get_term_in_brackets(code) parsed_code.append(CAM._parse_code(arg)) elif next_token == '\'': arg = int(UnicodeHack(re.search(CAM.nums_re, code).group())) parsed_code.append([arg]) length = int(log10(abs(arg))) + 1 if arg != 0 else 1 code = code[length if arg >= 0 else length + 1:] elif next_token == 'br': args, code = get_term_in_brackets(code, remove_brackets=False) arg1, arg2 = parse_args_in_brackets(args) parsed_code.append([CAM._parse_code(arg1), CAM._parse_code(arg2)]) return parsed_code[::-1]
def round_sig( val, sig ) : if ( val is None ) : return None elif ( val == 0. ) : return 0. else : return round( val, sig - int( floor( log10( abs( val ) ) ) ) - 1 ) ################################################################################ ## DEFINE THE FUNCTION FOR COMPUTING UNIT VECTOR ################################################################################ # Define the function for computing unit vector
def ansi_density(color, density_standard): """ Calculates density for the given SpectralColor using the spectral weighting function provided. For example, ANSI_STATUS_T_RED. These may be found in :py:mod:`colormath.density_standards`. :param SpectralColor color: The SpectralColor object to calculate density for. :param numpy.ndarray std_array: NumPy array of filter of choice from :py:mod:`colormath.density_standards`. :rtype: float :returns: The density value for the given color and density standard. """ # Load the spec_XXXnm attributes into a Numpy array. sample = color.get_numpy_array() # Matrix multiplication intermediate = sample * density_standard # Sum the products. numerator = intermediate.sum() # This is the denominator in the density equation. sum_of_standard_wavelengths = density_standard.sum() # This is the top level of the density formula. return -1.0 * log10(numerator / sum_of_standard_wavelengths)
def getMapLines(dmin, dmax, nlines): drange = dmax-dmin if drange > 4: near = 1 else: if drange >= 0.5: near = 0.25 else: near = 0.125 inc = roundToNearest(drange/nlines, near) if inc == 0: # make the increment the closest power of 10 near = np.power(10, round(math.log10(drange))) inc = ceilToNearest(drange/nlines, near) newdmin = floorToNearest(dmin, near) newdmax = ceilToNearest(dmax, near) else: newdmin = ceilToNearest(dmin, near) newdmax = floorToNearest(dmax, near) darray = np.arange(newdmin, newdmax+inc, inc) if darray[-1] > dmax: darray = darray[0:-1] return darray
def show_threads(): """ Log the name, ident and daemon flag of all alive threads in DEBUG level """ if logger.isEnabledFor(logging.DEBUG): all_threads = threading.enumerate() max_name = reduce(max, map(len, [t.name for t in all_threads])) max_ident = reduce(max, map(int, map(math.ceil, map(math.log10, [t.ident for t in all_threads if t.ident is not None])))) msg = ['Name' + ' '*(max_name-2) + 'Ident' + ' '*(max_ident-3) + 'Daemon', '='*max_name + ' ' + '=' * max_ident + ' ======'] fmt = '%{0}.{0}s %{1}d %d'.format(max_name, max_ident) for t in threading.enumerate(): msg.append(fmt % (t.name, t.ident, t.daemon)) logger.debug("Threads currently alive on process %d:\n%s", os.getpid(), '\n'.join(msg))
def calculateWordsIDF(texts): all_documents_count = len(texts); idf_data = dict() for text in texts: for word, frequency in text.word_frequency.items(): word_doc_freq = 0.0; for doc in texts: if(isSentencesContainsWord(doc.register_pass_centences, word)): word_doc_freq = word_doc_freq + 1.0 continue pre_idx = (0.0 + all_documents_count)/word_doc_freq inverse_document_frequency = math.log10(pre_idx) idf_data[word] = inverse_document_frequency return idf_data # ????????? TF*IDF ??? ??????? ????? ??????? ?????? ? ?????????? ? text.words_tf_idf[word]
def generate_R_scripts(): r_file = open(outdir + "/r_script.r","w") if len(feature_list)==0: r_file.close() else : cmd = "output_corr_matrix <- read.delim(\"" + outdir + "/output_corr_matrix.txt\")\n" cmd = cmd + "data = output_corr_matrix\n" cmd = cmd + "d3 <- as.dist((1 - data[,-1]))\n" cmd = cmd + "clust3 <- hclust(d3, method = \"average\")\n" if len(feature_list) < 5: cmd = cmd + "pdf(\"" +outdir+ "/" + pdf_tag + ".pdf\", width=10, height=7)\n" else: cmd = cmd + "pdf(\"" +outdir+ "/" + pdf_tag + ".pdf\", width="+str(math.log10(len(feature_list))*10) +", height=7)\n" cmd = cmd + "op = par(bg = \"gray85\")\n" cmd = cmd + "par(plt=c(0.05, 0.95, 0.2, 0.9))\n" cmd = cmd + "plot(clust3, lwd = 2, lty = 1,cex=0.8, xlab=\"Samples\", sub = \"\", ylab=\"Distance (1-Pearson correlation)\",hang = -1, axes = FALSE)\n" cmd = cmd + "axis(side = 2, at = seq(0, 1, 0.2), labels = FALSE, lwd = 2)\n" cmd = cmd + "mtext(seq(0, 1, 0.2), side = 2, at = seq(0, 1, 0.2), line = 1, las = 2)\n" cmd = cmd + "dev.off()\n" r_file.write(cmd) r_file.close()
def predict(testSet,PP,PN,positive_probabilities,negative_probabilities,unseen_pos_prob,unseen_neg_prob): predicted_class = [] for review in testSet: negative_probab = math.log10(PN) positive_probab = math.log10(PP) review_words = word_tokenize(review) for w in review_words: if w in negative_probabilities: negative_probab = negative_probab + math.log10(negative_probabilities[w]) else: negative_probab = negative_probab + math.log10(unseen_neg_prob) if w in positive_probabilities: positive_probab = positive_probab + math.log10(positive_probabilities[w]) else: positive_probab = positive_probab + math.log10(unseen_pos_prob) if(negative_probab > positive_probab): result = '-' else: result = '+' predicted_class.append(result) return predicted_class
def format_message(self, current, total): """Creates message to be written on console""" if total: ratio = float(current) / total filled_bricks = int((ratio + 0.05) * self.progress_num_bricks) num_digits = int(math.log10(total)) else: ratio = 1.0 filled_bricks = self.progress_num_bricks num_digits = 1 last_step = total == current if last_step: eta = datetime.timedelta(0) elif ratio and self.start_datetime: total_seconds_ = total_seconds( datetime.datetime.now() - self.start_datetime ) eta = datetime.timedelta( seconds=total_seconds_ / ratio - total_seconds_ ) else: eta = '?' screw = " " if last_step else next(self.screw_cycle) return self.line_template.format( bricks=self.progress_brick * filled_bricks, num_bricks=self.progress_num_bricks, ratio=ratio, current=current, total=total, num_digits=num_digits, eta=eta, screw=screw )
def log2(x): """Base 2 logarithm. >>> log2(1024) 10.0 """ return math.log10(x) / math.log10(2)
def compute_scale( min_, max_, logarithmic, order_min, min_scale, max_scale): """Compute an optimal scale between min and max""" if min_ == 0 and max_ == 0: return [0] if max_ - min_ == 0: return [min_] if logarithmic: log_scale = compute_logarithmic_scale( min_, max_, min_scale, max_scale) if log_scale: return log_scale # else we fallback to normal scalling order = round(log10(max(abs(min_), abs(max_)))) - 1 if order_min is not None and order < order_min: order = order_min else: while ((max_ - min_) / (10 ** order) < min_scale and (order_min is None or order > order_min)): order -= 1 step = float(10 ** order) while (max_ - min_) / step > max_scale: step *= 2. positions = [] position = round_to_scale(min_, step) while position < (max_ + step): rounded = round_to_scale(position, step) if min_ <= rounded <= max_: if rounded not in positions: positions.append(rounded) position += step if len(positions) < 2: return [min_, max_] return positions
def dot(self, serie, r_max): """Draw a dot line""" serie_node = self.svg.serie(serie) view_values = list(map(self.view, serie.points)) for i, value in safe_enumerate(serie.values): x, y = view_values[i] if self.logarithmic: log10min = log10(self._min) - 1 log10max = log10(self._max or 1) if value != 0: size = r_max * ( (log10(abs(value)) - log10min) / (log10max - log10min) ) else: size = 0 else: size = r_max * (abs(value) / (self._max or 1)) metadata = serie.metadata.get(i) dots = decorate( self.svg, self.svg.node(serie_node['plot'], class_="dots"), metadata) alter(self.svg.node( dots, 'circle', cx=x, cy=y, r=size, class_='dot reactive tooltip-trigger' + ( ' negative' if value < 0 else '')), metadata) val = self._format(serie, i) self._tooltip_data( dots, val, x, y, 'centered', self._get_x_label(i)) self._static_value(serie_node, val, x, y, metadata)
def __init__(self, width, height, box): """Create the view with a width an height and a box bounds""" super(PolarLogView, self).__init__(width, height, box) if not hasattr(box, '_rmin') or not hasattr(box, '_rmax'): raise Exception( 'Box must be set with set_polar_box for polar charts') self.log10_rmax = log10(self.box._rmax) self.log10_rmin = log10(self.box._rmin) if self.log10_rmin == self.log10_rmax: self.log10_rmax = self.log10_rmin + 1
def __init__(self, width, height, box, aperture=pi / 3): """Create the view with a width an height and a box bounds""" super(PolarThetaLogView, self).__init__(width, height, box) self.aperture = aperture if not hasattr(box, '_tmin') or not hasattr(box, '_tmax'): raise Exception( 'Box must be set with set_polar_box for polar charts') self.log10_tmax = log10(self.box._tmax) if self.box._tmax > 0 else 0 self.log10_tmin = log10(self.box._tmin) if self.box._tmin > 0 else 0 if self.log10_tmin == self.log10_tmax: self.log10_tmax = self.log10_tmin + 1
def __call__(self, rhotheta): """Project rho and theta""" if None in rhotheta: return None, None rho, theta = rhotheta # Center case if theta == 0: return super(PolarThetaLogView, self).__call__((0, 0)) theta = self.box._tmin + (self.box._tmax - self.box._tmin) * ( log10(theta) - self.log10_tmin) / ( self.log10_tmax - self.log10_tmin) start = 3 * pi / 2 + self.aperture / 2 theta = start + (2 * pi - self.aperture) * ( theta - self.box._tmin) / ( self.box._tmax - self.box._tmin) return super(PolarThetaLogView, self).__call__( (rho * cos(theta), rho * sin(theta)))