我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sys.stdout.flush()。
def print_status(progress, file_size, start): """ This function - when passed as `on_progress` to `Video.download` - prints out the current download progress. :params progress: The lenght of the currently downloaded bytes. :params file_size: The total size of the video. :params start: The time when started """ percent_done = int(progress) * 100. / file_size done = int(50 * progress / int(file_size)) dt = (clock() - start) if dt > 0: stdout.write("\r [%s%s][%3.2f%%] %s at %s/s " % ('=' * done, ' ' * (50 - done), percent_done, sizeof(file_size), sizeof(progress // dt))) stdout.flush()
def parallel_cone(pipe,cells,time,cone_input,cone_layer,Vis_dark,Vis_resting_potential): # Initialize array of cone_response copying cone_input cone_response = cone_input for cell in cells: if multiprocessing.current_process().name=="root": progress = 100*(cell-cells[0])/len(cells) stdout.write("\r progress: %d %%"% progress) stdout.flush() # Time-driven simulation for t in np.arange(0,time): # Update dynamics of the model cone_layer[cell].feedInput(cone_input[cell,t]) cone_layer[cell].update() # Record response cone_response[cell,t] = (cone_layer[cell].LF_taum.last_values[0] -\ cone_layer[cell].LF_tauh.last_values[0] - Vis_dark - Vis_resting_potential) pipe.send(cone_response[cells,:]) pipe.close() #! ================ #! Class runNetwork #! ================
def fry(img): coords = find_chars(img) img = add_b_emojis(img, coords) img = add_laughing_emojis(img, 5) eyecoords = find_eyes(img) img = add_flares(img, eyecoords) # bulge at random coordinates [w, h] = [img.width - 1, img.height - 1] w *= np.random.random(1) h *= np.random.random(1) r = int(((img.width + img.height) / 10) * (np.random.random(1)[0] + 1)) img = bulge(img, np.array([int(w), int(h)]), r, 3, 5, 1.8) # some finishing touches # print("Adding some finishing touches... ", end='') stdout.flush() img = add_noise(img, 0.2) img = change_contrast(img, 200) # print("Done") return img # Downloads image from url to RAM, fries it and saves to disk
def main(): """ Connect to an SNI-enabled server and request a specific hostname, specified by argv[1], of it. """ if len(argv) < 2: print 'Usage: %s <hostname>' % (argv[0],) return 1 client = socket() print 'Connecting...', stdout.flush() client.connect(('127.0.0.1', 8443)) print 'connected', client.getpeername() client_ssl = Connection(Context(TLSv1_METHOD), client) client_ssl.set_connect_state() client_ssl.set_tlsext_host_name(argv[1]) client_ssl.do_handshake() print 'Server subject is', client_ssl.get_peer_certificate().get_subject() client_ssl.close()
def main(): """ Run an SNI-enabled server which selects between a few certificates in a C{dict} based on the handshake request it receives from a client. """ port = socket() port.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1) port.bind(('', 8443)) port.listen(3) print 'Accepting...', stdout.flush() server, addr = port.accept() print 'accepted', addr server_context = Context(TLSv1_METHOD) server_context.set_tlsext_servername_callback(pick_certificate) server_ssl = Connection(server_context, server) server_ssl.set_accept_state() server_ssl.do_handshake() server.close()
def test(self, dataset, subset='test', name='Test'): global g_args train_writer = tf.summary.FileWriter( os.path.join(hparams.SUMMARY_DIR, str(datetime.datetime.now().strftime("%m%d_%H%M%S")) + ' ' + hparams.SUMMARY_TITLE), g_sess.graph) cli_report = {} for data_pt in dataset.epoch( subset, hparams.BATCH_SIZE * hparams.MAX_N_SIGNAL): # note: this disables dropout during test to_feed = dict( zip(self.train_feed_keys, ( np.reshape(data_pt[0], [hparams.BATCH_SIZE, hparams.MAX_N_SIGNAL, -1, hparams.FEATURE_SIZE]), 1.))) step_summary, step_fetch = g_sess.run( self.valid_fetches, to_feed)[:2] train_writer.add_summary(step_summary) stdout.write('.') stdout.flush() _dict_add(cli_report, step_fetch) stdout.write(name + ': %s\n' % ( _dict_format(cli_report)))
def printStats(self): """ Print generation statistics :rtype: the printed statistics as string .. versionchanged:: 0.6 The return of *printStats* method. """ percent = self.currentGeneration * 100 / float(self.nGenerations) message = "Gen. %d (%.2f%%):" % (self.currentGeneration, percent) log.info(message) print(message,) sys_stdout.flush() self.internalPop.statistics() stat_ret = self.internalPop.printStats() return message + stat_ret # -----------------------------------------------------------------
def build_onehots(self, vocab_size=None): """Build one-hot encodings of each sequence.""" # If we're passed a charset size, great - if not, fall back to inferring vocab size if vocab_size: self.charsize = vocab_size vocab = vocab_size else: vocab = self.charsize stderr.write("Constructing one-hot vector data...") stderr.flush() time1 = time.time() # These can be large, so we don't necessarily want them on the GPU # Thus they're not Theano shared vars # Also, numpy fancy indexing is fun! self.x_onehots = np.eye(vocab, dtype=th.config.floatX)[self.x_array] self.y_onehots = np.eye(vocab, dtype=th.config.floatX)[self.y_array] time2 = time.time() stderr.write("done!\nTook {0:.4f} ms.\n".format((time2 - time1) * 1000.0))
def _vis_graph(graph, points, worker, status): total_points = len(points) visible_edges = [] if status: t0 = default_timer() points_done = 0 for p1 in points: for p2 in visible_vertices(p1, graph, scan='half'): visible_edges.append(Edge(p1, p2)) if status: points_done += 1 avg_time = round((default_timer() - t0) / points_done, 3) time_stat = (points_done, total_points-points_done, avg_time) status = '\r\033[' + str(21*worker) + 'C[{:4}][{:4}][{:5.3f}] \r' stdout.write(status.format(*time_stat)) stdout.flush() return visible_edges
def run_chars(strhash, initial, final, chars, types): starttime = gmtime()[5] for n in range(initial, final + 1): for xs in product(chars, repeat=n): string=''.join(xs) if types == "md5" or types == "MD5": password = md5(string).hexdigest() elif types == "sha1" or types == "SHA1": password = sha1(string).hexdigest() elif types == "sha512" or types == "SHA512": password = sha512(string).hexdigest() else: print "[-] Este formato nao esta incluso no script, talvez voce tenha que fazer isso manualmente" exit() if strhash == password: final = gmtime()[5] - starttime print "\n[+] Crackeada => %s\n"%(string) print "[+] Duracao => %i segundos\n"%(final) print "\a" stdout.flush() exit() else: print "[-] Tentando => %s"%(string) final = gmtime()[5] - starttime print "\n[+] Duracao => %i segundos\n"%(final)
def bruteforce(host, port, uname, wordlist): try: lista = open(wordlist, "r") except IOError: stdout.write(colored(" [x] Error opening word list\n", "red", attrs=['bold'])) exit() url = "http://"+host+":"+port+"/" init = time() for l in lista: pwd = l.strip() try: r=get(url, auth=(uname, pwd), timeout=3) except: stdout.write(colored("\n [-] There was an error connecting to the router %s\n"%(host), "red", attrs=['bold'])) exit() if r.status_code == 200: stdout.write(colored("\n\n [+] Cracked => %s:%s\n [+] Duration => %s seconds\n\n"%(uname, pwd, time() - init), "green", attrs=['bold'])) lista.close() exit() else: stdout.write(colored("\r [-] Current login %s:%s"%(uname, pwd), "yellow", attrs=['bold'])) stdout.flush() print "" lista.close()
def generator(min_lenght, max_lenght, chars, name): lines = 0 try: file=open(name, "w") except IOError: print "\n[x] Error : %s este caminho nao existe\n"%(name) exit() file_stats(max_lenght) print "" for n in range(min_lenght, max_lenght + 1): for xs in product(chars, repeat=n): lines = lines + 1 string=''.join(xs) file.write(string + "\n") stdout.write('\r[+] Saving character `%s`' % string) stdout.flush() print "\a" file.close()
def generator_param(min_lenght, max_lenght, chars, name, param, value): try: file=open(name, "w") except IOError: print "\n[x] Error : %s este caminho nao existe\n"%(name) exit() file_stats(max_lenght) print "" for n in range(min_lenght, max_lenght + 1): for xs in product(chars, repeat=n): string=''.join(xs) if param == "-pre": file.write(value+string + "\n") elif param == "-pos": file.write(string+value + "\n") else: return 1 if param == "-pre": stdout.write('\r[+] Saving character `%s%s`'%(value,string)) elif param == "-pos": stdout.write('\r[+] Saving character `%s%s`'%(string,value)) stdout.flush() print "\a" file.close()
def check_deploy_result(operation, console, appname, auth_header): i = 0 while True: s = (i % 3 + 1) * '.' if len(s) < 3: s = s + (3 - len(s)) * ' ' i += 1 stdout.write("\r%s... %s " % (operation, s)) stdout.flush() sleep(0.5) result = app_status(console, appname, auth_header) if result: stdout.write("\r%s... %s. " % (operation, result)) stdout.flush() stdout.write("\n") return result
def download_and_uncompress_tarball(base_url, filename, data_dir): def _progress(count, block_size, total_size): stdout.write('\r>> Downloading %s %.1f%%' % ( filename, float(count * block_size) / float(total_size) * 100.0)) stdout.flush() tarball_url = base_url + filename filepath = osp.join(data_dir, filename) if not tf.gfile.Exists( osp.join(download_dir, model_dl) ): filepath, _ = urllib.request.urlretrieve(tarball_url, filepath, _progress) print() statinfo = stat(filepath) print('Successfully downloaded', filename, statinfo.st_size, 'bytes.') else: print('{} tarball already exists -- not downloading'.format(filename)) tarfile.open(filepath, 'r:*').extractall(data_dir)
def optimize(self, X, lmbd, Z=None, max_iter=1, tol=1e-5): if Z is None: batch_size = X.shape[0] K = self.D.shape[0] z_curr = np.zeros((batch_size, K)) else: z_curr = np.copy(Z) self.train_cost, self.train_z = [], [] feed = {self.X: X, self.Z: z_curr, self.lmbd: lmbd} for k in range(max_iter): z_curr[:], dz, cost = self.session.run( [self.step_optim, self.dz, self._cost], feed_dict=feed) self.train_cost += [cost] self.train_z += [np.copy(z_curr)] if dz < tol: print("\r{} reached optimal solution in {}-iteration" .format(self.name, k)) break out.write("\rIterative optimization ({}): {:7.1%} - {:.4e}" "".format(self.name, k/max_iter, dz)) out.flush() self.train_cost += [self.session.run(self._cost, feed_dict=feed)] print("\rIterative optimization ({}): {:7}".format(self.name, "done")) return z_curr
def print_progress(self, threshold=0, decimals=1, barLength=100): """Print a terminal progress bar.""" # Based on @Greenstick's reply (https://stackoverflow.com/a/34325723) iteration = self.stream.tell() if iteration > self.file_size: return total = self.file_size if total == 0: return progress = 100.0 * iteration / total if self.progress and progress - self.progress < threshold: return self.progress = progress percents = ("%03." + str(decimals) + "f") % progress filledLength = int(round(barLength * iteration / float(total))) barText = '*' * filledLength + '-' * (barLength - filledLength) stdout.write('%s| %s%% Completed\r' % (barText, percents)) stdout.flush()
def wait_time(self, data={'waitSeconds': None}): def complete(i, wait): return ((100 * (float(i) / float(wait))) * 50) / 100 if data['waitSeconds'] is not None: wait = data['waitSeconds'] + (random.randint(2, 4) / 3.33) print(I18n.get('Waiting %s seconds') % str(wait)) c = i = 0 while c < 50: c = complete(i, wait) time.sleep(wait - i if i == int(wait) else 1) out.write("[{}]\0\r".format('+' * int(c) + '-' * (50 - int(c)))) out.flush() i += 1 out.write("\n") out.flush() return data['waitSeconds'] return 99999999
def LOG(message=None,type=None): if VERBOSITY<=0: return elif VERBOSITY==1: #minimal verbosity ... dot style output if type in MSGSCHEME_MIN: message = MSGSCHEME_MIN[type] if DO_COLOR and type in COLORSCHEME: message = COLORSCHEME[type]%message stdout.write("%s"%message) stdout.flush() else: if type in MSGSCHEME: message = MSGSCHEME[type]%message if DO_COLOR and type in COLORSCHEME: message = COLORSCHEME[type]%message if MODE_FUZZ: stdout.write("[FUZZ] %s\n"% (message)) else: stdout.write("%s\n"% (message)) stdout.flush()
def try_it(qu): stdout.write("\r{} ---> ".format(qu)) stdout.flush() passed = 0 req = None try: req = qu.run(c) if basic_test(req): passed = 1 stdout.write("PASS\n") else: fails.append(err_format(q, req)) stdout.write("FAIL\n") print err_format(q, req) exit() except (preqlerrors.TopologyError, preqlerrors.ValueTypeError, preqlerrors.NonexistenceError) as e: errors.append(err_format(q, str(e.msg))) stdout.write("ERROR\n") stdout.flush() return passed, 1, req
def fetch(): a = IPSet([]) for blocklist in blocklists: r = requests.get(blocklist) for line in r.iter_lines(): if linefilter(line): a.add(makeprefix(linefilter(line))) for prefix in b: if b.len() > 0 and b.__contains__(prefix) and not a.__contains__(prefix): a.discard(prefix) stdout.write('withdraw route ' + str(prefix) + nexthop) stdout.flush() for prefix in a: if a.__contains__(prefix) and not b.__contains__(prefix): stdout.write('announce route ' + str(prefix) + nexthop) stdout.flush() b.add(a)
def process_photos(photos): if 'error' in photos: print "Error = ", error raise Exception("Error in Response") no_of_photos = 0 if 'data' not in photos: return while len(photos['data']) > 0: for photo in photos['data']: if 'tags' in photo: process_photo_tags(photo['tags']) if 'comments' in photo: process_photo_comments(photo['comments']) no_of_photos += 1 stdout.write("\rNumber of Photos Processed = %d" % no_of_photos) stdout.flush() if 'paging' in photos and 'next' in photos['paging']: request_str = photos['paging']['next'].replace('https://graph.facebook.com/', '') request_str = request_str.replace('limit=25', 'limit=200') photos = graph.get(request_str) else: photos['data'] = []
def dump_state(state, children=None, move=None): if children is None: children = [] if state.current_player == 0: overall_percent = (sum(child.wins_by_player[0] for child in children) / 1000.0) * 100 else: overall_percent = None children = { child.move: {'ucb': child.ucb1(child.parent.current_player), 'visits': child.visits, 'wins': child.wins_by_player[child.parent.current_player]} for child in children} print(dumps({'state': state.__dict__, 'children': children, 'overall_percent': overall_percent, 'error': None}, cls=GameEncoder)) stdout.flush()
def update(self): while self.running: # Read the length of the image as a 32-bit unsigned int. data_len = struct.unpack('<L', self.connection.read(struct.calcsize('<L')))[0] if data_len: printD('Updating...') printD('data_len: %s' % data_len) data = self.connection.read(data_len) deserialized_data = msgpack.unpackb(data, object_hook=msgpack_numpy.decode) printD('Frame received') #print(deserialized_data) #stdout.flush() img = Image.fromarray(deserialized_data) newImage = ImageTk.PhotoImage(img) self.gui.stream_label.configure(image=newImage) self.gui.stream_label.image = newImage printD("image updated") else: time.sleep(0.001)
def update_2(self): if self.running == False: return # Read the length of the image as a 32-bit unsigned int. data_len = struct.unpack('<L', self.connection.read(struct.calcsize('<L')))[0] if data_len: printD('Updating...') printD('data_len: %s' % data_len) data = self.connection.read(data_len) deserialized_data = msgpack.unpackb(data, object_hook=msgpack_numpy.decode) printD('Frame received') #print(deserialized_data) #stdout.flush() img = Image.fromarray(deserialized_data) newImage = ImageTk.PhotoImage(img) self.gui.stream_label.configure(image=newImage) self.gui.stream_label.image = newImage self.gui.master.after(70, self.update_2)
def start(self): printD("streamserver: start") self.running = True while self.running: frame = self.videostream.read() serialized_data = msgpack.packb(frame, default=msgpack_numpy.encode) # Write the length of the capture to the stream and flush to ensure it actually gets sent data_len = len(serialized_data) printD("data_len: %d" % data_len) self.connection.write(struct.pack('<L', data_len)) self.connection.flush() # Send the image data over the wire self.connection.write(serialized_data) self.connection.flush() printD("send.") sleep(0.001)
def __init__(self, file=None, stringio=False, encoding=None): if file is None: if stringio: self.stringio = file = py.io.TextIO() else: from sys import stdout as file elif py.builtin.callable(file) and not ( hasattr(file, "write") and hasattr(file, "flush")): file = WriteFile(file, encoding=encoding) if hasattr(file, "isatty") and file.isatty() and colorama: file = colorama.AnsiToWin32(file).stream self.encoding = encoding or getattr(file, 'encoding', "utf-8") self._file = file self.hasmarkup = should_do_markup(file) self._lastlen = 0 self._chars_on_current_line = 0
def write_out(fil, msg): # XXX sometimes "msg" is of type bytes, sometimes text which # complicates the situation. Should we try to enforce unicode? try: # on py27 and above writing out to sys.stdout with an encoding # should usually work for unicode messages (if the encoding is # capable of it) fil.write(msg) except UnicodeEncodeError: # on py26 it might not work because stdout expects bytes if fil.encoding: try: fil.write(msg.encode(fil.encoding)) except UnicodeEncodeError: # it might still fail if the encoding is not capable pass else: fil.flush() return # fallback: escape all unicode characters msg = msg.encode("unicode-escape").decode("ascii") fil.write(msg) fil.flush()
def _get(self, url, query=None, timeout=30): payload=self._generatePayload(query) a = 0 while 1: try: a = self._session.get(url, headers=self._header, params=payload, timeout=timeout) except : #print(exc_info()) a = a+1 if self.listening: stdout.write("_get "+url+" failed, retrying..."+str(a)+"\r") stdout.flush() continue break stdout.write(" \r") stdout.flush() return a
def print_event_stream(): """Print event stream""" controllers = discover() # for now only care about one controller controller = next(controllers, None) or exit('no tellstick devices found') if argv[-1] == "raw": stream = map(prepend_timestamp, controller.packets()) else: stream = controller.events() for packet in stream: print(packet) try: stdout.flush() except IOError: # broken pipe pass
def test_pos(model, sentences, display=False): from sys import stdout count = correct = 0 for sentence in sentences: sentence = [(token[0], None) for token in sentence] pts = model.best_path(sentence) if display: print sentence print 'HMM >>>' print pts print model.entropy(sentences) print '-' * 60 else: print '\b.', stdout.flush() for token, tag in zip(sentence, pts): count += 1 if tag == token[TAG]: correct += 1 print 'accuracy over', count, 'tokens %.1f' % (100.0 * correct / count)
def updateDB(): """ Download latest exploits DB """ update_url = 'https://raw.githubusercontent.com/offensive-security/exploit-database/master/files.csv' try: print 'Updating exploits db', from sys import stdout stdout.flush() import urllib urllib.urlretrieve(update_url, 'files.csv') print 'DONE.' except: print 'ERROR. Unable to download.'
def main(): timings = False start = time.time() initialize() if timings: print('initialize {} s'.format(time.time() - start), file=stderr) start = time.time() command_table = load_command_table() if timings: print('load_command_table {} s'.format(time.time() - start), file=stderr) start = time.time() group_index = get_group_index(command_table) if timings: print('get_group_index {} s'.format(time.time() - start), file=stderr) start = time.time() snippets = get_snippets(command_table) if AUTOMATIC_SNIPPETS_ENABLED else [] if timings: print('get_snippets {} s'.format(time.time() - start), file=stderr) while True: line = stdin.readline() start = time.time() request = json.loads(line) response_data = None if request['data'].get('request') == 'status': response_data = get_status() if timings: print('get_status {} s'.format(time.time() - start), file=stderr) elif request['data'].get('request') == 'hover': response_data = get_hover_text(group_index, command_table, request['data']['command']) if timings: print('get_hover_text {} s'.format(time.time() - start), file=stderr) else: response_data = get_completions(group_index, command_table, snippets, request['data'], True) if timings: print('get_completions {} s'.format(time.time() - start), file=stderr) response = { 'sequence': request['sequence'], 'data': response_data } output = json.dumps(response) stdout.write(output + '\n') stdout.flush() stderr.flush()
def __next__(self): """ next overload. If display is true the latest stetistics are displayed :return: The next number in iterator """ if self.display: self.__restart_line() stdout.write(str(self)) stdout.flush() if self.current >= self.end: raise StopIteration self.current += self.step return self.current - self.step
def __restart_line(): """ Writes return carriage to stdout and flushes. This allows writing to the same line. :return: None """ stdout.write('\r') stdout.flush()
def monitor(s): r = s.get(url=progress_url) try: progress_data = json.loads(r.text) except ValueError: print """No JSON object could be decoded. Get progress failed to return expected data. Return code: %s """ % (r.status_code) result = ['No JSON object could be decoded\ - get progress failed to return expected data\ Return code: %s """ % (r.status_code)', False] # Timeout waiting for remote backup to complete # (since it sometimes fails) in 5s multiples global timeout timeout_count = timeout*12 # timeout x 12 = number of iterations of 5s time_left = timeout while 'fileName' not in progress_data or timeout_count > 0: # Clears the line before re-writing to avoid artifacts stdout.write("\r\x1b[2k") stdout.write("\r\x1b[2K%s. Timeout remaining: %sm" % (progress_data['alternativePercentage'], str(time_left))) stdout.flush() r = s.get(url=progress_url) progress_data = json.loads(r.text) time.sleep(5) timeout_count = timeout_count - 5 if timeout_count % 12 == 0: time_left = time_left - 1 if 'fileName' in progress_data: result = [progress_data['fileName'], True] return result
def download(s, l): filename = get_filename(s) if not filename: return False print "Filename found: %s" % filename print "Checking if url is valid" r = s.get(url=download_url + filename, stream=True) print "Status code: %s" % str(r.status_code) if int(r.status_code) == 200: print "Url returned '200', downloading file" if not create_backup_location(l): result = ['Failed to create backup location', False] return result date_time = datetime.datetime.now().strftime("%Y%m%d") with open(l + '/' + application + '-' + date_time + '.zip', 'wb') as f: file_total = 0 for chunk in r.iter_content(chunk_size=1024): if chunk: f.write(chunk) file_total = file_total + 1024 file_total_m = float(file_total)/1048576 # Clears the line before re-writing to avoid artifacts stdout.write("\r\x1b[2k") stdout.write("\r\x1b[2K%.2fMB downloaded" % file_total_m) stdout.flush() stdout.write("\n") result = ['Backup downloaded successfully', True] return result else: print "Download file not found on remote server - response code %s" % \ str(r.status_code) print "Download url: %s" % download_url + filename result = ['Download file not found on remote server', False] return result
def LoadingCallBack(j,k): stdout.write("\r [+] Files: [{}] (strings: [{}])".format(j,k)) stdout.flush()
def run_inversion(self): try: self.clear() except: pass self.sel_files = [str(self.open_files[i]) for i in self.text_files.curselection()] if len(self.sel_files) == 0: tkinter.messagebox.showwarning("Inversion error", "No data selected for inversion \nSelect at least one data file in the left panel", parent=self.master) if len(self.sel_files) >= 1: try: self.Inversion() stdout.flush() except: tkinter.messagebox.showerror("Inversion error", "Error\nMake sure all fields are OK\nMake sure data file is correctly formatted", parent=self.master) return
def merge_csv_files(self): if len(self.files) > 1: self.sol.merge_results(self.files) print("=====================") else: print("Can't merge csv files: Only 1 file inverted in last batch") print("=====================") stdout.flush()
def set_plot_par(self): # Setting up plotting parameters try: print("\nLoading plot parameters") rcParams.update(iR.plot_par()) print("Plot parameters successfully loaded") except: print("Plot parameters not found, using default values") stdout.flush()
def plot_diagnostic(self, which): f_n = self.var_review.get() sol = self.all_results[f_n]["sol"] try: if which == "traces": trace_plot = sol.plot_traces(save=False) self.plot_window(trace_plot, "Parameter traces: "+f_n) if which == "histo": histo_plot = sol.plot_histograms(save=False) self.plot_window(histo_plot, "Parameter histograms: "+f_n) if which == "autocorr": autocorr_plot = sol.plot_autocorrelation(save=False) self.plot_window(autocorr_plot, "Parameter autocorrelation: "+f_n) if which == "geweke": geweke_plot = sol.plot_scores(save=False) self.plot_window(geweke_plot, "Geweke scores: "+f_n) if which == "summary": summa_plot = sol.plot_summary(save=False) self.plot_window(summa_plot, "Parameter summary: "+f_n) if which == "deviance": devi_plot = sol.plot_model_deviance(save=False) self.plot_window(devi_plot, "Model deviance: "+f_n) if which == "logp": logp_plot = sol.plot_log_likelihood(save=False) self.plot_window(logp_plot, "Log-likelihood: "+f_n) if which == "hexbin": try: self.top_bivar.destroy() except: pass hex_plot = sol.plot_hexbin(self.biv1.get(), self.biv2.get(), save=False) self.plot_window(hex_plot, "Hexagonal binning: "+f_n) if which == "KDE": try: self.top_bivar.destroy() except: pass kde_plot = sol.plot_KDE(self.biv1.get(), self.biv2.get(), save=False) self.plot_window(kde_plot, "Bivariate KDE: "+f_n) stdout.flush() except: tkinter.messagebox.showwarning("Error analyzing results", "Error\nProblem with inversion results\nTry adding iterations", parent=self.master)
def load(self): print("\nLoading root_ini from:\n", self.working_path) try: with open(self.working_path+'root_ini') as f: self.root_ini = jload(f) print("root_ini successfully loaded") except: print("root_ini not found, using default values") self.root_ini = self.use_default_root_ini() stdout.flush()