我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用ntpath.dirname()。
def browse_picture(self): self.image = QtGui.QFileDialog.getOpenFileName(None,'OpenFile','c:\\',"Image file(*.png *.jpg)") #self.progressBar.setValue(0) self.image = str(self.image) self.labeLAnomaly.setStyleSheet("QLabel {background-color:red;color:white;}") self.file_name = ntpath.basename(self.image) self.file_name, ext=os.path.splitext(self.file_name) self.file_path = ntpath.dirname(self.image) self.write_path = ntpath.expanduser('~\\Documents\\Document Analysis') # creating write path if not exists if not os.path.exists(self.write_path): os.makedirs(self.write_path) if self.image: self.imgPreview.setPixmap(QtGui.QPixmap(self.image). scaled(self.imgPreview.width(), self.imgPreview.height()))
def getValue(self, keyValue): # returns a tuple with (ValueType, ValueData) for the requested keyValue regKey = ntpath.dirname(keyValue) regValue = ntpath.basename(keyValue) key = self.findKey(regKey) if key is None: return None if key['NumValues'] > 0: valueList = self.__getValueBlocks(key['OffsetValueList'], key['NumValues']+1) for value in valueList: if value['Name'] == regValue: return value['ValueType'], self.__getValueData(value) elif regValue == 'default' and value['Flag'] <=0: return value['ValueType'], self.__getValueData(value) return None
def runIndexedSearch(dbfilenameFullPath, search_space, options): # todo: Handle duplicate hit supression logger.info("Performing indexed search") DB = appDB.DBClass(dbfilenameFullPath, True, settings.__version__) DB.appInitDB() DB.appConnectDB() searchTerm = options.searchLiteral[0] numHits = 0 # Run actual indexed query data = DB.Query("SELECT RowID FROM Entries_FilePaths WHERE %s == '%s';" % (search_space, searchTerm)) if data: # results = [] # results.append(('cyan', "FileName,HitCount".split(','))) with open(options.outputFile, "w") as text_file: with open(os.path.join(ntpath.dirname(options.outputFile), ntpath.splitext(options.outputFile)[0] + ".mmd"), "w") as markdown_file: for row in data: # results.append(('white', row)) record = retrieveSearchData(row[0], DB, search_space) saveSearchData(record, None, None, text_file, markdown_file) numHits += 1 # outputcolum(results) return (numHits, 0, []) else: return(0, 0, [])
def processFile(self, file_fullpath, hostID, instanceID, rowsData): rowNumber = 0 file_object = loadFile(file_fullpath) rows = _processAmCacheFile_StringIO(file_object) file_object.close() for r in rows: namedrow = settings.EntriesFields(HostID = hostID, EntryType = settings.__AMCACHE__, RowNumber = rowNumber, FilePath = (None if r.path == None else ntpath.dirname(r.path)), FileName = (None if r.path == None else ntpath.basename(r.path)), Size = r.size, ExecFlag = 'True', SHA1 = (None if r.sha1 == None else r.sha1[4:]), FileDescription = r.file_description, FirstRun = r.first_run, Created = r.created_timestamp, Modified1 = r.modified_timestamp, Modified2 = r.modified_timestamp2, LinkerTS = r.linker_timestamp, Product = r.product, Company = r.company, PE_sizeofimage = r.pe_sizeofimage, Version_number = r.version_number, Version = r.version, Language = r.language, Header_hash = r.header_hash, PE_checksum = r.pe_checksum, SwitchBackContext = r.switchbackcontext, InstanceID = instanceID) rowsData.append(namedrow) rowNumber += 1
def browse_picture(self): image = QtGui.QFileDialog.getOpenFileName(None,'OpenFile','c:\\',"Image file(*.png *.jpg)") self.progressBar.setValue(0) image = str(image) print(image) self.file_name = ntpath.basename(image) self.file_name, ext=os.path.splitext(self.file_name) self.file_path = ntpath.dirname(image) self.write_path = ntpath.expanduser('~\\Documents\\Document Analysis') # creating write path if not exists if not os.path.exists(self.write_path): os.makedirs(self.write_path) if image: self.labelInputImage.setPixmap(QtGui.QPixmap(image). scaled(self.labelInputImage.width(), self.labelInputImage.height()))
def browse_picture(self): self.image = QtGui.QFileDialog.getOpenFileName(None,'OpenFile','c:\\',"Image file(*.png *.jpg)") self.progressBar.setValue(0) self.image = str(self.image) print(self.image) self.file_name = ntpath.basename(self.image) self.file_name, ext=os.path.splitext(self.file_name) self.file_path = ntpath.dirname(self.image) self.write_path = ntpath.expanduser('~\\Documents\\Document Analysis') # creating write path if not exists if not os.path.exists(self.write_path): os.makedirs(self.write_path) if self.image: pixmap = QtGui.QPixmap(self.image) pixmap = pixmap.scaled(pixmap.width()/5, pixmap.height()/5, QtCore.Qt.KeepAspectRatio, QtCore.Qt.SmoothTransformation) #self.labelInputImage.resize(pixmap.width()/4,pixmap.height()/4) #self.labelInputImage.setPixmap(QtGui.QPixmap(self.image). # scaled(self.labelInputImage.width(), # self.labelInputImage.height())) self.labelInputImage.setPixmap(pixmap) #self.btnImageBrowse.setEnabled(False)
def extract_fbank_htk(self, scriptfile): ''' :param scriptfile: path to the HCopy's script file :return: list of path to feature files ''' with open(scriptfile, 'r') as scpf: featfiles = scpf.readlines() featfiles = [f.split(' ')[1].replace('\n', '') for f in featfiles] for f in featfiles: if not os.path.exists(ntpath.dirname(f)): os.makedirs(ntpath.dirname(f)) cmd = self.HCopyExe + ' -C ' + self.HConfigFile + ' -S ' + scriptfile os.system(cmd) return featfiles
def getClass(reg, className): regKey = ntpath.dirname(className) regClass = ntpath.basename(className) value = reg.getClass(className) if value is None: return print "[%s]" % regKey print "Value for Class %s: \n" % regClass, winregistry.hexdump(value,' ')
def getValue(reg, keyValue): regKey = ntpath.dirname(keyValue) regValue = ntpath.basename(keyValue) value = reg.getValue(keyValue) print "[%s]\n" % regKey if value is None: return print "Value for %s:\n " % regValue, reg.printValue(value[0],value[1])
def listPath(self, shareName, path, password = None): # ToDo: Handle situations where share is password protected path = string.replace(path,'/', '\\') path = ntpath.normpath(path) if len(path) > 0 and path[0] == '\\': path = path[1:] treeId = self.connectTree(shareName) fileId = None try: # ToDo, we're assuming it's a directory, we should check what the file type is fileId = self.create(treeId, ntpath.dirname(path), FILE_READ_ATTRIBUTES | FILE_READ_DATA ,FILE_SHARE_READ | FILE_SHARE_WRITE |FILE_SHARE_DELETE, FILE_DIRECTORY_FILE | FILE_SYNCHRONOUS_IO_NONALERT, FILE_OPEN, 0) res = '' files = [] from impacket import smb while True: try: res = self.queryDirectory( treeId, fileId, ntpath.basename(path), maxBufferSize = 65535, informationClass = FILE_FULL_DIRECTORY_INFORMATION ) nextOffset = 1 while nextOffset != 0: fileInfo = smb.SMBFindFileFullDirectoryInfo(smb.SMB.FLAGS2_UNICODE) fileInfo.fromString(res) files.append(smb.SharedFile(fileInfo['CreationTime'],fileInfo['LastAccessTime'],fileInfo['LastChangeTime'],fileInfo['EndOfFile'],fileInfo['AllocationSize'],fileInfo['ExtFileAttributes'],fileInfo['FileName'].decode('utf-16le'), fileInfo['FileName'].decode('utf-16le'))) nextOffset = fileInfo['NextEntryOffset'] res = res[nextOffset:] except SessionError, e: if (e.get_error_code()) != STATUS_NO_MORE_FILES: raise break finally: if fileId is not None: self.close(treeId, fileId) self.disconnectTree(treeId) return files
def adjustDcfFileRef(self,dcf,basedir): for elem in dcf['fileRef']: basename = ntpath.basename(elem['path']) dirname=ntpath.normpath(ntpath.join(basedir,ntpath.dirname(elem['path']))) elem['path']=ntpath.join(dirname,basename) if os.path.sep == '/': #are we running in cygwin/Linux? elem['path'] = elem['path'].replace(r'\\','/')
def readFile(self,filename): basedir = ntpath.dirname(filename) xmlroot = self.parseXML(filename) dcf = self.parseDCF(xmlroot) self.adjustDcfFileRef(dcf,basedir) return dcf
def _adjustFileRef(self,fileRef,basedir): basename = ntpath.basename(fileRef['path']) dirname=ntpath.normpath(ntpath.join(basedir,ntpath.dirname(fileRef['path']))) retval=ntpath.join(dirname,basename) if os.path.sep == '/': #are we running in cygwin/Linux? retval = retval.replace(r'\\','/') return retval
def _loadPackageInternal(self, result, xmlPackage, packagename, role): name = xmlPackage.find("./SHORT-NAME").text found = False if packagename=='*' or packagename==name: found=True package = self.find(name) if package is None: package = autosar.package.Package(name, parent=self) self.packages.append(package) result.append(package) self.packageParser.loadXML(package,xmlPackage) if (packagename==name) and (role is not None): self.setRole(package.ref, role) return found # def loadJSON(self, filename): # with open(filename) as fp: # basedir = ntpath.dirname(filename) # data = json.load(fp) # if data is not None: # for item in data: # if item['type']=='fileRef': # adjustedPath = self._adjustFileRef(item, basedir) # if adjustedPath.endswith('.arxml'): # self.loadXML(adjustedPath) # else: # raise NotImplementedError(adjustedPath) # else: # raise ValueError('Unknown type: %s'%item['type'])
def KnownBadRegexCount(file_full_path): file_path = ntpath.dirname(file_full_path) file_name, file_extension = os.path.splitext(file_full_path) # Load base file total_regex_count = KnownBadRegexCountFile(file_full_path) # Load extra files for filename in glob.iglob(file_name + '-*' + file_extension): total_regex_count += KnownBadRegexCountFile(filename) return total_regex_count
def directoryPath(filepath): """ Returns directory path for a given filepath >>> directoryPath('/var/log/apache.log') '/var/log' """ retVal = filepath if filepath: retVal = ntpath.dirname(filepath) if isWindowsDriveLetterPath(filepath) else posixpath.dirname(filepath) return retVal
def getRemoteTempPath(self): if not conf.tmpPath and Backend.isDbms(DBMS.MSSQL): debugMsg = "identifying Microsoft SQL Server error log directory " debugMsg += "that sqlmap will use to store temporary files with " debugMsg += "commands' output" logger.debug(debugMsg) _ = unArrayizeValue(inject.getValue("SELECT SERVERPROPERTY('ErrorLogFileName')", safeCharEncode=False)) if _: conf.tmpPath = ntpath.dirname(_) if not conf.tmpPath: if Backend.isOs(OS.WINDOWS): if conf.direct: conf.tmpPath = "%TEMP%" else: self.checkDbmsOs(detailed=True) if Backend.getOsVersion() in ("2000", "NT"): conf.tmpPath = "C:/WINNT/Temp" elif Backend.isOs("XP"): conf.tmpPath = "C:/Documents and Settings/All Users/Application Data/Temp" else: conf.tmpPath = "C:/Windows/Temp" else: conf.tmpPath = "/tmp" if re.search(r"\A[\w]:[\/\\]+", conf.tmpPath, re.I): Backend.setOs(OS.WINDOWS) conf.tmpPath = normalizePath(conf.tmpPath) conf.tmpPath = ntToPosixSlashes(conf.tmpPath) singleTimeDebugMessage("going to use '%s' as temporary files directory" % conf.tmpPath) hashDBWrite(HASHDB_KEYS.CONF_TMP_PATH, conf.tmpPath) return conf.tmpPath
def browse_picture(self): image = QtGui.QFileDialog.getOpenFileName(None,'OpenFile','c:\\',"Image file(*.png *.jpg)") image = str(image) print(image) self.file_name = ntpath.basename(image) self.file_name, ext=os.path.splitext(self.file_name) self.file_path = ntpath.dirname(image) self.write_path = ntpath.expanduser('~\\Documents\\Document Analysis') # creating write path if not exists if not os.path.exists(self.write_path): os.makedirs(self.write_path) if image: self.labelInputImage.setPixmap(QtGui.QPixmap(image).scaled(self.labelInputImage.width(), self.labelInputImage.height()))
def getRemoteTempPath(self): if not conf.tmpPath and Backend.isDbms(DBMS.MSSQL): debugMsg = "identifying Microsoft SQL Server error log directory " debugMsg += "that sqlmap will use to store temporary files with " debugMsg += "commands' output" logger.debug(debugMsg) _ = unArrayizeValue(inject.getValue("SELECT SERVERPROPERTY('ErrorLogFileName')", safeCharEncode=False)) if _: conf.tmpPath = ntpath.dirname(_) if not conf.tmpPath: if Backend.isOs(OS.WINDOWS): if conf.direct: conf.tmpPath = "%TEMP%" else: self.checkDbmsOs(detailed=True) if Backend.getOsVersion() in ("2000", "NT"): conf.tmpPath = "C:/WINNT/Temp" elif Backend.isOs("XP"): conf.tmpPath = "C:/Documents and Settings/All Users/Application Data/Temp" else: conf.tmpPath = "C:/Windows/Temp" else: conf.tmpPath = "/tmp" if re.search(r"\A[\w]:[\/\\]+", conf.tmpPath, re.I): Backend.setOs(OS.WINDOWS) conf.tmpPath = normalizePath(conf.tmpPath) conf.tmpPath = ntToPosixSlashes(conf.tmpPath) singleTimeDebugMessage("going to use %s as temporary files directory" % conf.tmpPath) hashDBWrite(HASHDB_KEYS.CONF_TMP_PATH, conf.tmpPath) return conf.tmpPath
def open_value(self, path): key = self.open_key(ntpath.dirname(path)) return key.open_value(ntpath.basename(path))
def search(self, results, media, lang, manual): fn = urllib.unquote(media.filename) filename = ntpath.basename(fn) path = ntpath.dirname(fn) website = self.matcher.Find(filename, path, self.websites) if website is None: Log("No matching site found!") return website.search(results, filename, media, lang, manual) results.Sort('score', descending=True)
def processFile(self, file_fullpath, hostID, instanceID, rowsData): rowNumber = 0 check_tags = ['LastModified', 'FilePath'] # the 'end' event signifies when the end of the XML node has been reached, # and therefore when all values can be parsed try: xml_data = loadFile(file_fullpath) for event, element in etree.iterparse(xml_data, events=("end",)): skip_entry = False tag_dict = {} if element.tag == "PersistenceItem": self._processElement(element, tag_dict) # Check we have everything we need and ignore entries with critical XML errors on them for tag in check_tags: if tag in tag_dict: if tag_dict[tag] is None: if 'AppCompatPath' in tag_dict: logger.warning("Malformed tag [%s: %s] in %s, entry: %s (skipping entry)" % (tag, tag_dict[tag], tag_dict['AppCompatPath'], file_fullpath)) else: logger.warning( "Malformed tag [%s: %s] in %s, entry: Unknown (skipping entry)" % (tag, tag_dict[tag], file_fullpath)) skip_entry = True break # If the entry is valid do some housekeeping: if not skip_entry: if tag_dict['ExecutionFlag'] == '1': tmpExecFlag = True elif tag_dict['ExecutionFlag'] == '0': tmpExecFlag = False else: tmpExecFlag = tag_dict['ExecutionFlag'] namedrow = settings.EntriesFields(HostID=hostID, EntryType=settings.__APPCOMPAT__, RowNumber=rowNumber, InstanceID=instanceID, LastModified=(tag_dict['LastModified'].replace("T"," ").replace("Z","") if 'LastModified' in tag_dict else '0001-01-01 00:00:00'), LastUpdate=(tag_dict['LastUpdate'].replace("T"," ").replace("Z","") if 'LastUpdate' in tag_dict else '0001-01-01 00:00:00'), FileName=ntpath.basename(tag_dict['FilePath']), FilePath=ntpath.dirname(tag_dict['FilePath']), Size=(tag_dict['Size'] if 'Size' in tag_dict else 'N/A'), ExecFlag=tmpExecFlag) rowsData.append(namedrow) rowNumber += 1 else: pass element.clear() xml_data.close() except Exception as e: print e.message print traceback.format_exc() pass
def processFile(self, file_fullpath, hostID, instanceID, rowsData): rowNumber = 0 check_tags = ['LastModified', 'AppCompatPath'] try: xml_data = loadFile(file_fullpath) for event, element in etree.iterparse(xml_data, events=("end",)): skip_entry = False tag_dict = {} if element.tag == "AppCompatItemExtended": self._processElement(element, tag_dict) # From time to time we get some entries with no real data on them for some unknown reason, skip for now if 'AppCompatPath' in tag_dict: if tag_dict['AppCompatPath'] == 'N/A': logger.debug("ShimCache entry with no AppCompatPath [ControlSetSeq: %s], entry: %s. (skipping entry)" % (tag_dict['ControlSetSeq'], file_fullpath)) break # Check we have everything we need and ignore entries with critical XML errors on them for tag in check_tags: if tag not in tag_dict or tag_dict[tag] is None: if tag not in tag_dict: if 'AppCompatPath' in tag_dict: logger.warning("Missing tag [%s] in %s, entry: %s (skipping entry)" % (tag, tag_dict['AppCompatPath'], file_fullpath)) else: logger.warning("Malformed tag [%s] in %s, entry: Unknown (skipping entry)" % (tag, file_fullpath)) skip_entry = True break if tag_dict[tag] is None: if 'AppCompatPath' in tag_dict: logger.warning("Malformed tag [%s: %s] in %s, entry: %s (skipping entry)" % (tag, tag_dict[tag], tag_dict['AppCompatPath'], file_fullpath)) else: logger.warning("Malformed tag [%s: %s] in %s, entry: Unknown (skipping entry)" % (tag, tag_dict[tag], file_fullpath)) skip_entry = True break # If the entry is valid do some housekeeping: if not skip_entry: if tag_dict['ExecutionFlag'] == '1': tmpExecFlag = True elif tag_dict['ExecutionFlag'] == '0': tmpExecFlag = False else: tmpExecFlag = tag_dict['ExecutionFlag'] namedrow = settings.EntriesFields(HostID=hostID, EntryType=settings.__APPCOMPAT__, RowNumber=rowNumber, InstanceID=instanceID, LastModified=(tag_dict['LastModified'].replace("T"," ").replace("Z","") if 'LastModified' in tag_dict else '0001-01-01 00:00:00'), LastUpdate=(tag_dict['LastUpdate'].replace("T"," ").replace("Z","") if 'LastUpdate' in tag_dict else '0001-01-01 00:00:00'), FileName=ntpath.basename(tag_dict['AppCompatPath']), FilePath=ntpath.dirname(tag_dict['AppCompatPath']), Size=(tag_dict['Size'] if 'Size' in tag_dict else 'N/A'), ExecFlag=tmpExecFlag) rowsData.append(namedrow) rowNumber += 1 else: pass element.clear() xml_data.close() except Exception as e: print e.message print traceback.format_exc() pass
def processFile(self, file_fullpath, hostID, instanceID, rowsData): rowNumber = 0 check_tags = ['LastModified', 'AppCompatPath'] try: xml_data = loadFile(file_fullpath) for event, element in etree.iterparse(xml_data, events=("end",)): skip_entry = False tag_dict = {} if element.tag == "ShimCacheItem": self._processElement(element, tag_dict) # Check we have everything we need and ignore entries with critical XML errors on them for tag in check_tags: if tag not in tag_dict or tag_dict[tag] is None: if 'AppCompatPath' in tag_dict: logger.warning("Malformed tag [%s] in %s, entry: %s (skipping entry)" % (tag, tag_dict['AppCompatPath'], file_fullpath)) else: logger.warning( "Malformed tag [%s: %s] in %s, entry: Unknown (skipping entry)" % (tag, tag_dict[tag], file_fullpath)) skip_entry = True break # If the entry is valid do some housekeeping: if not skip_entry: if 'ExecutionFlag' in tag_dict: tmpExexFlag = tag_dict['ExecutionFlag'] else: # Note that Shim Shady does not extract ExecFlag on some platforms (at least Windows 10). tmpExexFlag = 'unk' namedrow = settings.EntriesFields(HostID=hostID, EntryType=settings.__APPCOMPAT__, RowNumber=rowNumber, InstanceID=instanceID, LastModified=(tag_dict['LastModified'].replace("T"," ").replace("Z","") if 'LastModified' in tag_dict else '0001-01-01 00:00:00'), LastUpdate=(tag_dict['LastUpdate'].replace("T"," ").replace("Z","") if 'LastUpdate' in tag_dict else '0001-01-01 00:00:00'), FileName=ntpath.basename(tag_dict['AppCompatPath']), FilePath=ntpath.dirname(tag_dict['AppCompatPath']), Size=(tag_dict['Size'] if 'Size' in tag_dict else 'N/A'), ExecFlag=tmpExexFlag) rowsData.append(namedrow) rowNumber += 1 else: pass element.clear() xml_data.close() except Exception as e: print e.message print traceback.format_exc() pass