我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用sys.stderr()。
def get_named_set(lang_codes, feature_set): if feature_set == 'id': return get_id_set(lang_codes) if feature_set not in FEATURE_SETS: print("ERROR: Invalid feature set " + feature_set, file=sys.stderr) sys.exit() filename, source, prefix = FEATURE_SETS[feature_set] feature_database = np.load(filename) lang_codes = [ get_language_code(l, feature_database) for l in lang_codes ] lang_indices = [ get_language_index(l, feature_database) for l in lang_codes ] feature_names = get_feature_names(prefix, feature_database) feature_indices = [ get_feature_index(f, feature_database) for f in feature_names ] source_index = get_source_index(source, feature_database) feature_values = feature_database["data"][lang_indices,:,:][:,feature_indices,:][:,:,source_index] feature_values = feature_values.squeeze(axis=2) return feature_names, feature_values
def convert_image(inpath, outpath, size): """Convert an image file using `sips`. Args: inpath (str): Path of source file. outpath (str): Path to destination file. size (int): Width and height of destination image in pixels. Raises: RuntimeError: Raised if `sips` exits with non-zero status. """ cmd = [ b'sips', b'-z', b'{0}'.format(size), b'{0}'.format(size), inpath, b'--out', outpath] # log().debug(cmd) with open(os.devnull, 'w') as pipe: retcode = subprocess.call(cmd, stdout=pipe, stderr=subprocess.STDOUT) if retcode != 0: raise RuntimeError('sips exited with {0}'.format(retcode))
def print_exception(etype, value, tb, limit=None, file=None): """Print exception up to 'limit' stack trace entries from 'tb' to 'file'. This differs from print_tb() in the following ways: (1) if traceback is not None, it prints a header "Traceback (most recent call last):"; (2) it prints the exception type and value after the stack trace; (3) if type is SyntaxError and value has the appropriate format, it prints the line where the syntax error occurred with a caret on the next line indicating the approximate position of the error. """ if file is None: file = sys.stderr if tb: _print(file, 'Traceback (most recent call last):') print_tb(tb, limit, file) lines = format_exception_only(etype, value) for line in lines: _print(file, line, '')
def build(self, file): if self.built: raise PermissionError("You cannot build multiple times!") if not self.loaded: self.load(file) old = os.getcwd() sys.path.append(os.path.dirname(os.path.abspath(file))) # for module import that aren't "include" call try: content = open(file, "rb").read() os.chdir(os.path.dirname(os.path.abspath(file))) # set the current working directory, for open() etc. exec(compile(content, file, 'exec'), self.user_functions) except Exception as err: print("An exception occured while building: ", file=sys.stderr) lines = traceback.format_exc(None, err).splitlines() print(" " + lines[-1], file=sys.stderr) for l in lines[3:-1]: print(l, file=sys.stderr) exit(1) os.chdir(old) sys.path.remove(os.path.dirname(os.path.abspath(file))) self.built = True
def load(self, file): if self.loaded: return sys.path.append(os.path.dirname(os.path.abspath(file))) # for module import that aren't "include" call old = os.getcwd() try: content = open(file, "rb").read() os.chdir(os.path.dirname(os.path.abspath(file))) # set the current working directory, for open() etc. exec(compile(content, file, 'exec'), self.user_functions) except Exception as err: print("An exception occured while loading: ", file=sys.stderr) lines = traceback.format_exc(None, err).splitlines() print(" " + lines[-1], file=sys.stderr) for l in lines[3:-1]: print(l, file=sys.stderr) exit(1) os.chdir(old) sys.path.remove(os.path.dirname(os.path.abspath(file))) self.loaded = True self.mem_offset = 0
def log(message, level=None): """Write a message to the juju log""" command = ['juju-log'] if level: command += ['-l', level] if not isinstance(message, six.string_types): message = repr(message) command += [message] # Missing juju-log should not cause failures in unit tests # Send log output to stderr try: subprocess.call(command) except OSError as e: if e.errno == errno.ENOENT: if level: message = "{}: {}".format(level, message) message = "juju-log: {}".format(message) print(message, file=sys.stderr) else: raise
def generate2(): """ Call an external Python 2 program to retrieve the AST symbols of that language version :return: """ import subprocess as sp import tempfile, shutil, sys, traceback tempdir = tempfile.mkdtemp() tempfile = os.path.join(tempdir, "py2_ast_code.py") py2_proc_out = "" try: with open(tempfile, 'w') as py2code: py2code.write(generate_str + WRITESYMS_CODE) py2_proc_out = sp.check_output(["python2", tempfile]).decode() finally: try: shutil.rmtree(tempdir) except: print("Warning: error trying to delete the temporal directory:", file=sys.stderr) print(traceback.format_exc(), file=sys.stderr) return set(py2_proc_out.splitlines())
def check_response(self, response): if type(response) is not dict: self.error('Bad response : ' + str(response)) status = response.get('response_code', -1) if status == 204: self.error('VirusTotal api rate limit exceeded (Status 204).') if status != 200: self.error('Bad status : ' + str(status)) results = response.get('results', {}) if 'verbose_msg' in results: print >> sys.stderr, str(results.get('verbose_msg')) return results # 0 => not found # -2 => in queue # 1 => ready
def _get_embedding_layer(self, embedding_file=None): if self.embedding_layer is None: if embedding_file is None: if not self.tune_embedding: print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True." self.tune_embedding = True embedding = None else: # Put the embedding in a list for Keras to treat it as initiali weights of the embedding # layer. embedding = [self.data_processor.get_embedding_matrix(embedding_file, onto_aware=False)] vocab_size = self.data_processor.get_vocab_size(onto_aware=False) self.embedding_layer = Embedding(input_dim=vocab_size, output_dim=self.embed_dim, weights=embedding, trainable=self.tune_embedding, mask_zero=True, name="embedding") return self.embedding_layer
def build(self, input_shape): self.input_spec = [InputSpec(shape=input_shape)] input_dim = input_shape[-1] reader_input_shape = self.get_reader_input_shape(input_shape) print >>sys.stderr, "NSE reader input shape:", reader_input_shape writer_input_shape = (input_shape[0], 1, self.output_dim * 2) # Will process one timestep at a time print >>sys.stderr, "NSE writer input shape:", writer_input_shape composer_input_shape = self.get_composer_input_shape(input_shape) print >>sys.stderr, "NSE composer input shape:", composer_input_shape self.reader.build(reader_input_shape) self.writer.build(writer_input_shape) self.composer.build(composer_input_shape) # Aggregate weights of individual components for this layer. reader_weights = self.reader.trainable_weights writer_weights = self.writer.trainable_weights composer_weights = self.composer.trainable_weights self.trainable_weights = reader_weights + writer_weights + composer_weights if self.initial_weights is not None: self.set_weights(self.initial_weights) del self.initial_weights
def process_test_data(self, input_file, onto_aware, is_labeled=True): if not self.model: raise RuntimeError, "Model not trained yet!" print >>sys.stderr, "Reading test data" label_ind = [] tagged_sentences = [] for line in open(input_file): lnstrp = line.strip() if is_labeled: label, tagged_sentence = lnstrp.split("\t") if label not in self.label_map: self.label_map[label] = len(self.label_map) label_ind.append(self.label_map[label]) else: tagged_sentence = lnstrp tagged_sentences.append(tagged_sentence) print >>sys.stderr, "Indexing test data" # Infer max sentence length if the model is trained input_shape = self.model.get_input_shape_at(0)[0] # take the shape of the first of two inputs at 0. sentlenlimit = input_shape[1] # (num_sentences, num_words, num_senses, num_hyps) test_inputs = self.data_processor.prepare_paired_input(tagged_sentences, onto_aware=onto_aware, sentlenlimit=sentlenlimit, for_test=True) test_labels = self.data_processor.make_one_hot(label_ind) return test_inputs, test_labels
def _factor_target_indices(self, Y_inds, vocab_size=None, base=2): if vocab_size is None: vocab_size = len(self.dp.word_index) print >>sys.stderr, "Factoring targets of vocabulary size: %d"%(vocab_size) num_vecs = int(math.ceil(math.log(vocab_size)/math.log(base))) + 1 base_inds = [] div_Y_inds = Y_inds print >>sys.stderr, "Number of factors: %d"%num_vecs for i in range(num_vecs): new_inds = div_Y_inds % base if i == num_vecs - 1: if new_inds.sum() == 0: # Most significant "digit" is a zero. Omit it. break base_inds.append(new_inds) div_Y_inds = numpy.copy(div_Y_inds/base) base_vecs = [self._make_one_hot(base_inds_i, base) for base_inds_i in base_inds] return base_vecs
def get_attention(self, C_ind): if not self.model: raise RuntimeError, "Model not trained!" model_embedding = None model_weights = None for layer in self.model.layers: if layer.name.lower() == "embedding": model_embedding = layer if layer.name.lower() == "sent_lstm": model_lstm = layer if model_embedding is None or model_lstm is None: raise RuntimeError, "Did not find expected layers" lstm_weights = model_lstm.get_weights() embedding_weights = model_embedding.get_weights() embed_in_dim, embed_out_dim = embedding_weights[0].shape att_embedding = HigherOrderEmbedding(input_dim=embed_in_dim, output_dim=embed_out_dim, weights=embedding_weights) onto_lstm = OntoAttentionLSTM(input_dim=embed_out_dim, output_dim=embed_out_dim, input_length=model_lstm.input_length, num_senses=self.num_senses, num_hyps=self.num_hyps, use_attention=True, return_attention=True, weights=lstm_weights) att_input = Input(shape=C_ind.shape[1:], dtype='int32') att_sent_rep = att_embedding(att_input) att_output = onto_lstm(att_sent_rep) att_model = Model(input=att_input, output=att_output) att_model.compile(optimizer='adam', loss='mse') # optimizer and loss are not needed since we are not going to train this model. C_att = att_model.predict(C_ind) print >>sys.stderr, "Got attention values. Input, output shapes:", C_ind.shape, C_att.shape return C_att
def read_preposition_senses(self): num_senses_per_prep = [] for filename in os.listdir(self.prep_senses_dir): if '.defs.xml' in filename: prep_str = filename.replace('.defs.xml', '') xml_root = ElementTree.parse("%s/%s" % (self.prep_senses_dir, filename)).getroot() senses = [] for child_el in xml_root.getchildren(): sense_id = child_el.findtext('senseid') if sense_id is not None: # This will add strings like 'into-1(1)' senses.append("%s-%s" % (prep_str, sense_id)) num_senses_per_prep.append(len(senses)) self.prep_senses[prep_str] = senses num_preps = len(self.prep_senses) print >>sys.stderr, "Read senses for %d prepositions." % num_preps print >>sys.stderr, "Senses per preposition: %f" % (float(sum(num_senses_per_prep))/num_preps) # TODO: Take a coarse-grained mapping file and implement the following function.
def get_environ(self): env = {} # The following code snippet does not follow PEP8 conventions # but it's formatted the way it is for demonstration purposes # to emphasize the required variables and their values # # Required WSGI variables env['wsgi.version'] = (1, 0) env['wsgi.url_scheme'] = 'http' env['wsgi.input'] = StringIO(self.request_data) env['wsgi.errors'] = sys.stderr env['wsgi.multithread'] = False env['wsgi.multiprocess'] = False env['wsgi.run_once'] = False # Required CGI variables env['REQUEST_METHOD'] = self.request_method # GET env['PATH_INFO'] = self.path # /hello env['SERVER_NAME'] = self.server_name # localhost env['SERVER_PORT'] = str(self.server_port) # 8888 return env
def main(): """Small main program""" import sys, getopt try: opts, args = getopt.getopt(sys.argv[1:], 'deut') except getopt.error as msg: sys.stdout = sys.stderr print(msg) print("""usage: %s [-d|-e|-u|-t] [file|-] -d, -u: decode -e: encode (default) -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0]) sys.exit(2) func = encode for o, a in opts: if o == '-e': func = encode if o == '-d': func = decode if o == '-u': func = decode if o == '-t': test(); return if args and args[0] != '-': with open(args[0], 'rb') as f: func(f, sys.stdout.buffer) else: func(sys.stdin.buffer, sys.stdout.buffer)
def debug(msg , level=1 , protId=0): if not DEBUG: return if level <= DEBUG: out = '[%s] DEBUG: ' % time.strftime('%H:%M:%S') if protId: out += 'ID: %d ; ' % protId out += msg print(out, file=sys.stderr) # }}} # Response Constants {{{ # # Constants for responses back to the MTA. You should use these actions # at the end of each callback. If none of these are specified, # CONTINUE is used as the default #
def create_smcog_hit(cur, feature, gene_id): '''Create an smCOG hit entry''' try: smcog_name, smcog_score, smcog_evalue = parse_smcog(feature) smcog_score = float(smcog_score) smcog_evalue = float(smcog_evalue) smcog_id = get_smcog_id(cur, smcog_name) cur.execute("SELECT gene_id, smcog_id FROM antismash.smcog_hits WHERE smcog_id = %s AND gene_id = %s", (smcog_id, gene_id)) ret = cur.fetchone() if ret is None: cur.execute("INSERT INTO antismash.smcog_hits (score, evalue, smcog_id, gene_id) VALUES (%s, %s, %s, %s)", (smcog_score, smcog_evalue, smcog_id, gene_id)) except ValueError as e: # no smcog qualifier is an expected error, don't log that err_msg = str(e) if not (err_msg.startswith('No smcog qualifier') or err_msg.startswith('No note qualifier')): print(e, file=sys.stderr)
def main(cb, args): username = args.get("username") password = args.get("password") output = args.get("output") extensions = args.get("extensions").split(",") listener = ExtensionFileWatcherAndGrabber(args.get('server_url'), cb, username, password, extensions, output) try: print "Extension File Watcher and Grabber -- started. Watching for:", extensions listener.process() except KeyboardInterrupt: print >> sys.stderr, "Caught Ctrl-C" listener.stop() print "Extension File Watcher and Grabber -- stopped."
def test_main(args): """ """ parser = argparse.ArgumentParser(description=description()) parser.add_argument('-f', '--image_file', default='../examples/GodRoss.jpg', type=str, help='The file path of the image to test') parser.add_argument('test_name', type=str, help='The name of the test to perform') args = parser.parse_args(args) try: if args.test_name == "test_many_random": test_many_random(args.image_file, 5, 5) elif args.test_name == "test_multi_origin": test_multi_origin(args.image_file, 4) else: print("Error: Test function {} doesn't exist".format(args.test_name), file=sys.stderr) except OSError: print("Error: File: {} doesn't exist".format(args.image_file), file=sys.stderr)
def notify(self, *args, **kwargs): """ Call all listener callbacks. The wrapped function is not executed. """ for listener in self.nm_listeners: # Ensure that all callbacks get called, and no exceptions escape to # the caller. try: listener(*args, **kwargs) except: # If the target is a notify_callback--which should always be # the case as long as the proper API is used--show the actual # function target = getattr(listener, 'nc_func', listener) print >>sys.stderr, 'Exception in notification %s:' % (repr(target),) traceback.print_exception(*sys.exc_info())
def redirectSTDOUT(filename): if _DEBUG == True: print "redirectSTDOUT(): redirecting stdout/stderr to filename " + str(filename) if type(filename) == str: dirname = os.path.dirname(filename) if len(dirname) == 0 or \ (len(dirname) > 0 and os.path.isdir(dirname)): try: f = open(filename,'w') # Send stdout and stderr to provided filename sys.stdout = f sys.stderr = f except Exception, e: print "redirectSTDOUT(): ERROR - Unable to open file " + str(filename) + " for writing stdout and stderr " + str(e) elif type(filename) == cStringIO.OutputType: sys.stdout = filename sys.stderr = filename else: print 'redirectSTDOUT(): failed to redirect stdout/stderr to ' + str(filename) print 'redirectSTDOUT(): argument must be: string filename, cStringIO.StringIO object'
def _install_modules(command_table): for cmd in command_table: command_table[cmd].load_arguments() try: mods_ns_pkg = import_module('azure.cli.command_modules') installed_command_modules = [modname for _, modname, _ in pkgutil.iter_modules(mods_ns_pkg.__path__) if modname not in BLACKLISTED_MODS] except ImportError: pass for mod in installed_command_modules: try: mod = import_module('azure.cli.command_modules.' + mod) mod.load_params(mod) mod.load_commands() except Exception: # pylint: disable=broad-except print("Error loading: {}".format(mod), file=stderr) traceback.print_exc(file=stderr) _update_command_definitions(command_table)
def _run(self): with tf.Session() as session: self.io.restore_session(session) inputs = sys.stdin singsen = SingleSentenceData() scounter = SpeedCounter().start() while True: senlen = singsen.read_from_file(sys.stdin, self.io.w2id) if senlen is None: break if senlen < 2: print(-9999) continue o = run_epoch(session, self.test_model, singsen) scounter.next() if self.params.progress and scounter.val % 20 ==0: print("\rLoglikes per secs: %f" % scounter.speed, end="", file=sys.stderr) print("%f" % o)
def _restoreStdout(self): if self.buffer: if self._mirrorOutput: output = sys.stdout.getvalue() error = sys.stderr.getvalue() if output: if not output.endswith('\n'): output += '\n' self._original_stdout.write(STDOUT_LINE % output) if error: if not error.endswith('\n'): error += '\n' self._original_stderr.write(STDERR_LINE % error) sys.stdout = self._original_stdout sys.stderr = self._original_stderr self._stdout_buffer.seek(0) self._stdout_buffer.truncate() self._stderr_buffer.seek(0) self._stderr_buffer.truncate()
def s_unload(self, *args): """Unload the module. Removes it from the restricted environment's sys.modules dictionary. This method is implicitly called by code executing in the restricted environment. Overriding this method in a subclass is used to change the policies enforced by a restricted environment. Similar to the r_unload() method, but has access to restricted versions of the standard I/O streams sys.stdin, sys.stderr, and sys.stdout. """ return self.s_apply(self.r_unload, args) # Restricted open(...)
def data(self, msg): """SMTP 'DATA' command -- sends message data to server. Automatically quotes lines beginning with a period per rfc821. Raises SMTPDataError if there is an unexpected reply to the DATA command; the return value from this method is the final response code received when the all data is sent. """ self.putcmd("data") (code, repl) = self.getreply() if self.debuglevel > 0: print>>stderr, "data:", (code, repl) if code != 354: raise SMTPDataError(code, repl) else: q = quotedata(msg) if q[-2:] != CRLF: q = q + CRLF q = q + "." + CRLF self.send(q) (code, msg) = self.getreply() if self.debuglevel > 0: print>>stderr, "data:", (code, msg) return (code, msg)
def connect(self, host='localhost', port=0): """Connect to the LMTP daemon, on either a Unix or a TCP socket.""" if host[0] != '/': return SMTP.connect(self, host, port) # Handle Unix-domain sockets. try: self.sock = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM) self.sock.connect(host) except socket.error, msg: if self.debuglevel > 0: print>>stderr, 'connect fail:', host if self.sock: self.sock.close() self.sock = None raise socket.error, msg (code, msg) = self.getreply() if self.debuglevel > 0: print>>stderr, "connect:", msg return (code, msg) # Test the sendmail method, which tests most of the others. # Note: This always sends to localhost.
def _raw_input(prompt="", stream=None, input=None): # A raw_input() replacement that doesn't save the string in the # GNU readline history. if not stream: stream = sys.stderr if not input: input = sys.stdin prompt = str(prompt) if prompt: stream.write(prompt) stream.flush() # NOTE: The Python C API calls flockfile() (and unlock) during readline. line = input.readline() if not line: raise EOFError if line[-1] == '\n': line = line[:-1] return line
def handleError(self, record): """ Handle errors which occur during an emit() call. This method should be called from handlers when an exception is encountered during an emit() call. If raiseExceptions is false, exceptions get silently ignored. This is what is mostly wanted for a logging system - most users will not care about errors in the logging system, they are more interested in application errors. You could, however, replace this with a custom handler if you wish. The record which was being processed is passed in to this method. """ if raiseExceptions: ei = sys.exc_info() try: traceback.print_exception(ei[0], ei[1], ei[2], None, sys.stderr) sys.stderr.write('Logged from file %s, line %s\n' % ( record.filename, record.lineno)) except IOError: pass # see issue 5971 finally: del ei
def callHandlers(self, record): """ Pass a record to all relevant handlers. Loop through all handlers for this logger and its parents in the logger hierarchy. If no handler was found, output a one-off error message to sys.stderr. Stop searching up the hierarchy whenever a logger with the "propagate" attribute set to zero is found - that will be the last logger whose handlers are called. """ c = self found = 0 while c: for hdlr in c.handlers: found = found + 1 if record.levelno >= hdlr.level: hdlr.handle(record) if not c.propagate: c = None #break out else: c = c.parent if (found == 0) and raiseExceptions and not self.manager.emittedNoHandlerWarning: sys.stderr.write("No handlers could be found for logger" " \"%s\"\n" % self.name) self.manager.emittedNoHandlerWarning = 1
def __init__(self, group=None, target=None, name=None, args=(), kwargs=None, verbose=None): assert group is None, "group argument must be None for now" _Verbose.__init__(self, verbose) if kwargs is None: kwargs = {} self.__target = target self.__name = str(name or _newname()) self.__args = args self.__kwargs = kwargs self.__daemonic = self._set_daemon() self.__ident = None self.__started = Event() self.__stopped = False self.__block = Condition(Lock()) self.__initialized = True # sys.stderr is not stored in the class like # sys.exc_info since it can be changed between instances self.__stderr = _sys.stderr
def _run_exitfuncs(): """run any registered exit functions _exithandlers is traversed in reverse order so functions are executed last in, first out. """ exc_info = None while _exithandlers: func, targs, kargs = _exithandlers.pop() try: func(*targs, **kargs) except SystemExit: exc_info = sys.exc_info() except: import traceback print >> sys.stderr, "Error in atexit._run_exitfuncs:" traceback.print_exc() exc_info = sys.exc_info() if exc_info is not None: raise exc_info[0], exc_info[1], exc_info[2]
def test(): """Small test program""" import sys, getopt try: opts, args = getopt.getopt(sys.argv[1:], 'deut') except getopt.error, msg: sys.stdout = sys.stderr print msg print """usage: %s [-d|-e|-u|-t] [file|-] -d, -u: decode -e: encode (default) -t: encode and decode string 'Aladdin:open sesame'"""%sys.argv[0] sys.exit(2) func = encode for o, a in opts: if o == '-e': func = encode if o == '-d': func = decode if o == '-u': func = decode if o == '-t': test1(); return if args and args[0] != '-': with open(args[0], 'rb') as f: func(f, sys.stdout) else: func(sys.stdin, sys.stdout)
def __init__(self, counts=None, calledfuncs=None, infile=None, callers=None, outfile=None): self.counts = counts if self.counts is None: self.counts = {} self.counter = self.counts.copy() # map (filename, lineno) to count self.calledfuncs = calledfuncs if self.calledfuncs is None: self.calledfuncs = {} self.calledfuncs = self.calledfuncs.copy() self.callers = callers if self.callers is None: self.callers = {} self.callers = self.callers.copy() self.infile = infile self.outfile = outfile if self.infile: # Try to merge existing counts file. try: counts, calledfuncs, callers = \ pickle.load(open(self.infile, 'rb')) self.update(self.__class__(counts, calledfuncs, callers)) except (IOError, EOFError, ValueError), err: print >> sys.stderr, ("Skipping counts file %r: %s" % (self.infile, err))
def add_to_vcs(self, summary): if ( self.git_add and (SyncStatus.DELETED in summary or SyncStatus.ADDED in summary) and not self.dry_run and self.confirm( question=( 'Do you want to add created and removed files to GIT?' ) ) ): output, errors = subprocess.Popen( ['git', '-C', app_settings.SYNC_DIRECTORY, 'add', '-A', app_settings.SYNC_DIRECTORY], stdout=subprocess.PIPE, stderr=subprocess.PIPE ).communicate() if errors: raise self.error('Adding file changes to GIT failed!')
def reporterrors(self, job, jobres): # type: (ExeCall, ExeResult) -> None if not self.should_report_error(job, jobres): return category = INFO_PROCERRORS if jobres.error is not None: iprint(category, red("Error: calling %s caused this error: %s" % (job.exe, jobres.error))) else: iprint(category, red("Error: %s returned code %s" % (job.exe, jobres.returncode))) iprint(category, " for these arguments: %s" % colored_cmdargs(job.cmdargs, RED)) if jobres.stderr: text = jobres.stderr try: text = unistr(text) except UnicodeDecodeError: pass iprint(INFO_PROCERRORS, 'formatter stderr:"""\\\n%s"""' % red(text))
def debug(self, fh=sys.stderr): self.cursor.execute('select * from kv') pprint.pprint(self.cursor.fetchall(), stream=fh) self.cursor.execute('select * from kv_revisions') pprint.pprint(self.cursor.fetchall(), stream=fh)
def get_language_code(lang_code, feature_database): # first, normalize to an ISO 639-3 code if lang_code in LETTER_CODES: lang_code = LETTER_CODES[lang_code] if lang_code not in feature_database["langs"]: print("ERROR: Language " + lang_code + " not found.", file=sys.stderr) sys.exit(2) return lang_code
def __init__(self): self.__set_encoding() # Prepare in/out/err streams self.fperror = sys.stderr self.fpinput = sys.stdin self.fpoutput = sys.stdout # Load input self.__input = json.load(self.fpinput) # Set parameters self.data_type = self.get_param('dataType', None, 'Missing dataType field') self.tlp = self.get_param('tlp', 2) self.enable_check_tlp = self.get_param('config.check_tlp', False) self.max_tlp = self.get_param('config.max_tlp', 2) # Set proxy configuration if available self.http_proxy = self.get_param('config.proxy.http') self.https_proxy = self.get_param('config.proxy.https') self.__set_proxies() # Finally run check tlp if not (self.__check_tlp()): self.error('TLP is higher than allowed.') # Not breaking compatibility self.artifact = self.__input # Check for auto extraction config self.auto_extract = self.get_param('config.auto_extract', True) # Not breaking compatibility
def __set_encoding(self): try: if sys.stdout.encoding != 'UTF-8': if sys.version_info[0] == 3: sys.stdout = codecs.getwriter('utf-8')(sys.stdout.buffer, 'strict') else: sys.stdout = codecs.getwriter('utf-8')(sys.stdout, 'strict') if sys.stderr.encoding != 'UTF-8': if sys.version_info[0] == 3: sys.stderr = codecs.getwriter('utf-8')(sys.stderr.buffer, 'strict') else: sys.stderr = codecs.getwriter('utf-8')(sys.stderr, 'strict') except: pass
def failureMessage(message): """ Displaying a message.""" printLine(message, "\n", sys.stderr)
def CheckForUpdates(fileServerPort, debug): """ Check for updates. Channel options are stable, beta & alpha Patches are only created & applied on the stable channel """ assert CLIENT_CONFIG.PUBLIC_KEY is not None client = Client(CLIENT_CONFIG, refresh=True) appUpdate = client.update_check(CLIENT_CONFIG.APP_NAME, wxupdatedemo.__version__, channel='stable') if appUpdate: if hasattr(sys, "frozen"): downloaded = appUpdate.download() if downloaded: status = UpdateStatus.EXTRACTING_UPDATE_AND_RESTARTING if 'WXUPDATEDEMO_TESTING_FROZEN' in os.environ: sys.stderr.write("Exiting with status: %s\n" % UPDATE_STATUS_STR[status]) ShutDownFileServer(fileServerPort) sys.exit(0) ShutDownFileServer(fileServerPort) if debug: logger.debug('Extracting update and restarting...') time.sleep(10) appUpdate.extract_restart() else: status = UpdateStatus.UPDATE_DOWNLOAD_FAILED else: status = UpdateStatus.UPDATE_AVAILABLE_BUT_APP_NOT_FROZEN else: status = UpdateStatus.NO_AVAILABLE_UPDATES return status
def Run(argv, clientConfig=None): """ The main entry point. """ args = ParseArgs(argv) if args.version: DisplayVersionAndExit() InitializeLogging(args.debug) fileServerDir = os.environ.get('PYUPDATER_FILESERVER_DIR') fileServerPort = StartFileServer(fileServerDir) if fileServerPort: UpdatePyUpdaterClientConfig(clientConfig, fileServerPort) status = CheckForUpdates(fileServerPort, args.debug) else: status = UpdateStatus.COULDNT_CHECK_FOR_UPDATES if 'WXUPDATEDEMO_TESTING_FROZEN' in os.environ: sys.stderr.write("Exiting with status: %s\n" % UPDATE_STATUS_STR[status]) ShutDownFileServer(fileServerPort) sys.exit(0) mainLoop = (argv[0] != 'RunTester') if not 'WXUPDATEDEMO_TESTING_FROZEN' in os.environ: return PyUpdaterWxDemoApp.Run( fileServerPort, UPDATE_STATUS_STR[status], mainLoop) else: return None
def _get_embedding_layer(self, embedding_file=None): if self.embedding_layer is None: word_vocab_size = self.data_processor.get_vocab_size(onto_aware=False) synset_vocab_size = self.data_processor.get_vocab_size(onto_aware=True) if embedding_file is None: if not self.tune_embedding: print >>sys.stderr, "Pretrained embedding is not given. Setting tune_embedding to True." self.tune_embedding = True embedding_weights = None else: # TODO: Other sources for prior initialization embedding = self.data_processor.get_embedding_matrix(embedding_file, onto_aware=True) # Put the embedding in a list for Keras to treat it as weights of the embedding layer. embedding_weights = [embedding] if self.set_sense_priors: initial_sense_prior_parameters = numpy.random.uniform(low=0.01, high=0.99, size=(word_vocab_size, 1)) # While setting weights, Keras wants trainable weights first, and then the non trainable # weights. If we are not tuning the embedding, we need to keep the sense priors first. if not self.tune_embedding: embedding_weights = [initial_sense_prior_parameters] + embedding_weights else: embedding_weights.append(initial_sense_prior_parameters) self.embedding_layer = OntoAwareEmbedding(word_vocab_size, synset_vocab_size, self.embed_dim, weights=embedding_weights, mask_zero=True, set_sense_priors=self.set_sense_priors, tune_embedding=self.tune_embedding, name="embedding") return self.embedding_layer
def process_data(self, input_file, onto_aware, for_test=False): ''' Reads an input file and makes input for training or testing. ''' dataset_type = "test" if for_test else "training" print >>sys.stderr, "Reading %s data" % dataset_type label_ind = [] tagged_sentences = [] max_sentence_length = 0 all_sentence_lengths = [] for line in open(input_file): lnstrp = line.strip() label, tagged_sentence = lnstrp.split("\t") sentence_length = len(tagged_sentence.split()) all_sentence_lengths.append(sentence_length) if sentence_length > max_sentence_length: max_sentence_length = sentence_length label_ind.append(int(label)) tagged_sentences.append(tagged_sentence) if for_test: if not self.model: raise RuntimeError("Model not trained yet!") input_shape = self.model.get_input_shape_at(0) # (num_sentences, num_words, ...) sentlenlimit = input_shape[1] else: sentlenlimit = max_sentence_length # We need to readjust the labels because padding would affect the sentence indices. for i in range(len(label_ind)): length = all_sentence_lengths[i] label_ind[i] += sentlenlimit - length if not for_test: # Shuffling so that when Keras does validation split, it is not always at the end. sentences_and_labels = zip(tagged_sentences, label_ind) random.shuffle(sentences_and_labels) tagged_sentences, label_ind = zip(*sentences_and_labels) print >>sys.stderr, "Indexing %s data" % dataset_type inputs = self.data_processor.prepare_input(tagged_sentences, onto_aware=onto_aware, sentlenlimit=sentlenlimit, for_test=for_test, remove_singletons=False) labels = self.data_processor.make_one_hot(label_ind) return inputs, labels
def define_attention_model(self): ''' Take necessary parts out of the model to get OntoLSTM attention. ''' if not self.model: raise RuntimeError("Model not trained yet!") input_shape = self.model.get_input_shape_at(0) input_layer = Input(input_shape[1:], dtype='int32') # removing batch size embedding_layer = None encoder_layer = None for layer in self.model.layers: if layer.name == "embedding": embedding_layer = layer elif layer.name == "onto_lstm": # We need to redefine the OntoLSTM layer with the learned weights and set return attention to True. # Assuming we'll want attention values for all words (return_sequences = True) if isinstance(layer, Bidirectional): onto_lstm = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim, num_senses=self.num_senses, num_hyps=self.num_hyps, use_attention=True, return_attention=True, return_sequences=True, consume_less='gpu') encoder_layer = Bidirectional(onto_lstm, weights=layer.get_weights()) else: encoder_layer = OntoAttentionLSTM(input_dim=self.embed_dim, output_dim=self.embed_dim, num_senses=self.num_senses, num_hyps=self.num_hyps, use_attention=True, return_attention=True, return_sequences=True, consume_less='gpu', weights=layer.get_weights()) break if not embedding_layer or not encoder_layer: raise RuntimeError("Required layers not found!") attention_output = encoder_layer(embedding_layer(input_layer)) self.attention_model = Model(inputs=input_layer, outputs=attention_output) print >>sys.stderr, "Attention model summary:" self.attention_model.summary() self.attention_model.compile(loss="mse", optimizer="sgd") # Loss and optimizer do not matter!