我们从Python开源项目中,提取了以下16个代码示例,用于说明如何使用wx.ProgressDialog()。
def on_ok(self, event): songs = yt_extract(self.audio_links.GetValue().split(',')) if not songs: error = wx.MessageDialog(parent=self, message="Invalid/Unsupported URL!", caption="Error!", style=wx.OK | wx.ICON_WARNING) error.ShowModal() error.Destroy() return self.num_songs = len(songs) self.progress_dialog = wx.ProgressDialog(title="Download", message="Downloading songs...", maximum=self.num_songs * 100, parent=self, style=PD_STYLE) self.downloader = DownloaderThread(self, songs, self.out_dir.GetPath()) self.downloader.start()
def on_ok(self, event): if not os.path.exists(self.out_dir.GetPath()): os.makedirs(self.out_dir.GetPath()) self.num_songs = len(self.in_files) if self.num_songs <= 0: alert = wx.MessageDialog(self, "No songs selected!", "pyjam", wx.ICON_EXCLAMATION) alert.ShowModal() alert.Destroy() return self.progress_dialog = wx.ProgressDialog(title="Conversion", message="Converting songs...", maximum=self.num_songs * 2, parent=self, style=PD_STYLE) self.converter = FFmpegConvertThread(parent=self, dest=self.out_dir.GetPath(), rate=self.game_rate.GetValue(), vol=self.volume.GetValue(), songs=self.in_files) self.converter.start()
def start(self): """Start the download. """ self._shouldCancel = False # Use a timer because timers aren't re-entrant. self._guiExecTimer = wx.PyTimer(self._guiExecNotify) gui.mainFrame.prePopup() # Translators: The title of the dialog displayed while downloading add-on update. self._progressDialog = wx.ProgressDialog(_("Downloading Add-on Update"), # Translators: The progress message indicating that a connection is being established. _("Connecting"), # PD_AUTO_HIDE is required because ProgressDialog.Update blocks at 100% # and waits for the user to press the Close button. style=wx.PD_CAN_ABORT | wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE, parent=gui.mainFrame) self._progressDialog.Raise() t = threading.Thread(target=self._bg) t.daemon = True t.start()
def __init__(self, title, message, maximum=100, parent=None, style=wx.PD_AUTO_HIDE|wx.PD_APP_MODAL): wx.ProgressDialog.__init__(self, title, message, maximum, parent, style) self.SetBackgroundColour(backgroundColour)
def __init__(self, title, maxval): import wx self.maxval = maxval self.last = 0 self._pbar = wx.ProgressDialog("Working...", title, maximum=maxval, style=wx.PD_REMAINING_TIME|wx.PD_ELAPSED_TIME|wx.PD_APP_MODAL)
def trainTDENet(self, segs): trainData = [seg.data for seg in segs] convs = ((25,7),) # first session maxIter = 400 nHidden = None poolSize = 1 poolMethod = 'average' self.stand = ml.ClassSegStandardizer(trainData) trainDataStd = self.stand.apply(trainData) dialog = wx.ProgressDialog('Training Classifier', 'Featurizing', maximum=maxIter+2, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) def progressCB(optable, iteration, *args, **kwargs): dialog.Update(iteration, 'Iteration: %d/%d' % (iteration, maxIter)) self.classifier = ml.CNA(trainDataStd, convs=convs, nHidden=nHidden, maxIter=maxIter, optimFunc=ml.optim.scg, accuracy=0.0, precision=0.0, poolSize=poolSize, poolMethod=poolMethod, verbose=False, callback=progressCB) trainCA = self.classifier.ca(trainDataStd) trainConfusion = np.round(100*self.classifier.confusion(trainDataStd)) dialog.Destroy() resultText = (('Final Training CA: %f\n' % trainCA) + ('Confusion Matrix:\n' + str(trainConfusion) + '\n') + ('Choices: ' + str(self.choices))) wx.MessageBox(message=resultText, caption='Training Completed!', style=wx.OK | wx.ICON_INFORMATION) self.saveResultText(resultText)
def trainConvNet(self, segs): trainData = [seg.data for seg in segs] #convs = ((16,9), (8,9)) #maxIter = 400 # 49 convs = ((8,3), (8,3), (8,3)) maxIter = 1000 nHidden = None poolSize = 1 poolMethod = 'average' self.stand = ml.ClassSegStandardizer(trainData) trainDataStd = self.stand.apply(trainData) dialog = wx.ProgressDialog('Training Classifier', 'Featurizing', maximum=maxIter+2, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) def progressCB(optable, iteration, *args, **kwargs): dialog.Update(iteration, 'Iteration: %d/%d' % (iteration, maxIter)) self.classifier = ml.CNA(trainDataStd, convs=convs, nHidden=nHidden, maxIter=maxIter, optimFunc=ml.optim.scg, accuracy=0.0, precision=0.0, poolSize=poolSize, poolMethod=poolMethod, verbose=False, callback=progressCB) trainCA = self.classifier.ca(trainDataStd) trainConfusion = np.round(100*self.classifier.confusion(trainDataStd)) dialog.Destroy() resultText = (('Final Training CA: %f\n' % trainCA) + ('Confusion Matrix:\n' + str(trainConfusion) + '\n') + ('Choices: ' + str(self.choices))) wx.MessageBox(message=resultText, caption='Training Completed!', style=wx.OK | wx.ICON_INFORMATION) self.saveResultText(resultText)
def trainClassifier(self): if self.trainCap is None: raise Exception('No data available for training.') nFold = self.nTrainTrial dialog = wx.ProgressDialog('Training Classifier', 'Featurizing', maximum=nFold+1, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) segmented = self.trainCap.segment(start=0.0, end=self.trainTrialSecs) segs = [segmented.select(matchFunc=lambda mark: self.markToStim(mark) == choice) for choice in self.choices] assert segs[0].getNSeg() == self.nTrainTrial assert segs[1].getNSeg() == self.nTrainTrial # split segments segs = [cls.split(self.width, self.overlap) for cls in segs] ##print 'nSplit segs: ', segs[0].getNSeg() if self.method == 'Welch Power': self.trainWelch(segs, dialog) elif self.method == 'Autoregressive': self.trainAutoreg(segs, dialog) else: raise Exception('Invalid method: %s.' % str(self.method)) if self.gameActive: self.plotPanel.showPong() else: self.plotPanel.showPieMenu()
def trainClassifier(self): if self.trainCap is None: raise Exception('No data available for training.') dialog = wx.ProgressDialog('Training Classifier', 'Featurizing', maximum=self.nFold+1, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) segmented = self.trainCap.segment(start=0.0, end=self.trialSecs) segs = [segmented.select(matchFunc=lambda mark: self.markToStim(mark) == choice) for choice in self.choices] assert segs[0].getNSeg() == self.nTrainTrial ##print 'nSegs:' ##for sg in segs: ## print sg.getNSeg(), sg.data.shape freqs, trainData = self.powerize(segs) self.plotPanel.plotFeatures(trainData, freqs, self.choices, self.trainCap.getChanNames()) ##print trainData[0].mean(axis=0) ##print trainData[0].mean(axis=0).shape ##print trainData[1].mean(axis=0) ##print trainData[1].mean(axis=0).shape if self.classifierKind == 'lda': self.trainLDA(trainData, dialog) elif self.classifierKind == 'nn': self.trainNN(trainData, dialog) else: raise Exception('Invalid classifier kind: %s.' % str(self.classifierKind)) self.plotPanel.showPieMenu()
def updateFilter(self): if self.trainCap is not None: self.dialog = wx.ProgressDialog('Training ICA', 'Training', maximum=101, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) STrans.updateFilter(self) #self.dialog.Update(101, 'Reason: %s' % self.stransFilter.reason) self.dialog.Destroy() else: STrans.updateFilter(self)
def __init__(self, title, entries): self.progressBar = wx.ProgressDialog(title, "Time remaining", entries, style=wx.PD_ELAPSED_TIME | wx.PD_REMAINING_TIME | wx.PD_AUTO_HIDE | wx.PD_CAN_ABORT | wx.PD_APP_MODAL)
def trainWelchLDA(self, trainData): self.stand = ml.ClassStandardizer(trainData) trainDataStd = self.stand.apply(trainData) #penalties = np.insert(np.power(10.0, np.linspace(-3.0, 0.0, 50)), 0, 0.0) penalties = np.linspace(0.0, 1.0, 51) nFold = self.nTrainTrial trnCA = np.zeros((nFold, penalties.size)) valCA = np.zeros((nFold, penalties.size)) dialog = wx.ProgressDialog('Training Classifier', 'Featurizing', maximum=nFold+1, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) for fold, trnData, valData in ml.part.classStratified(trainDataStd, nFold=nFold): dialog.Update(fold, 'Validation Fold: %d' % fold) for i, penalty in enumerate(penalties): classifier = ml.LDA(trnData, shrinkage=penalty) trnCA[fold,i] = classifier.ca(trnData) valCA[fold,i] = classifier.ca(valData) dialog.Update(nFold, 'Training Final Classifier') meanTrnCA = np.mean(trnCA, axis=0) meanValCA = np.mean(valCA, axis=0) bestPenaltyIndex = np.argmax(meanValCA) bestPenalty = penalties[bestPenaltyIndex] bestMeanTrnCA = meanTrnCA[bestPenaltyIndex] bestMeanValCA = meanValCA[bestPenaltyIndex] self.classifier = ml.LDA(trainDataStd, shrinkage=bestPenalty) trainCA = self.classifier.ca(trainDataStd) trainConfusion = np.round(100*self.classifier.confusion(trainDataStd)) dialog.Destroy() resultText = (('Best Shrinkage: %f\n' % bestPenalty) + ('Best Mean Training CA: %f\n' % bestMeanTrnCA) + ('Best Mean Validation CA: %f\n' % bestMeanValCA) + ('Final Training CA: %f\n' % trainCA) + ('Confusion Matrix:\n' + str(trainConfusion) + '\n') + ('Choices: ' + str(self.choices))) wx.MessageBox(message=resultText, caption='Training Completed!', style=wx.OK | wx.ICON_INFORMATION) self.saveResultText(resultText)
def trainAutoregRR(self, trainData): self.stand = ml.ClassSegStandardizer(trainData) trainDataStd = self.stand.apply(trainData) orders = np.arange(2,30) nFold = self.nTrainTrial trnCA = np.zeros((nFold, orders.size)) valCA = np.zeros((nFold, orders.size)) dialog = wx.ProgressDialog('Training Classifier', 'Featurizing', maximum=nFold+1, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) for fold, trnData, valData in ml.part.classStratified(trainDataStd, nFold=nFold): dialog.Update(fold, 'Validation Fold: %d' % fold) for i, order in enumerate(orders): classifier = ml.ARC(trnData, order=order) trnCA[fold,i] = classifier.ca(trnData) valCA[fold,i] = classifier.ca(valData) dialog.Update(nFold, 'Training Final Classifier') meanTrnCA = np.mean(trnCA, axis=0) meanValCA = np.mean(valCA, axis=0) bestOrderIndex = np.argmax(meanValCA) bestOrder = orders[bestOrderIndex] bestMeanTrnCA = meanTrnCA[bestOrderIndex] bestMeanValCA = meanValCA[bestOrderIndex] self.classifier = ml.ARC(trainDataStd, order=bestOrder) trainCA = self.classifier.ca(trainDataStd) trainConfusion = np.round(100*self.classifier.confusion(trainDataStd)) dialog.Destroy() resultText = (('Best Order: %f\n' % bestOrder) + ('Best Mean Training CA: %f\n' % bestMeanTrnCA) + ('Best Mean Validation CA: %f\n' % bestMeanValCA) + ('Final Training CA: %f\n' % trainCA) + ('Confusion Matrix:\n' + str(trainConfusion) + '\n') + ('Choices: ' + str(self.choices))) wx.MessageBox(message=resultText, caption='Training Completed!', style=wx.OK | wx.ICON_INFORMATION) self.saveResultText(resultText)
def trainClassifier(self): if self.trainCap is None: raise Exception('No data available for training.') self.plotPanel.plotERP(self.trainCap) dialog = wx.ProgressDialog('Training Classifier', 'Featurizing', maximum=self.nFold+1, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) cap = self.bandpass(self.trainCap) seg = cap.segment(start=self.windowStart, end=self.windowEnd) seg = self.downsample(seg) print 'nSeg: ', seg.getNSeg() targ = seg.select(matchFunc=lambda mark: self.markToStim(mark) == 'Play') nTarg = targ.getNSeg() #print 'nTarg: ', nTarg #nonTarg = seg.select(matchFunc=lambda mark: self.markToStim(mark) == 'Backward') nonTarg = seg.select(matchFunc=lambda mark: self.markToStim(mark) != 'Play') nNonTarg = nonTarg.getNSeg() #print 'nNonTarg: ', nNonTarg classData = [targ.chanEmbed(), nonTarg.chanEmbed()] self.standardizer = ml.Standardizer(np.vstack(classData)) classData = [self.standardizer.apply(cls) for cls in classData] #print 'classData shape', [cls.shape for cls in classData] if self.classifierKind == 'Linear Discriminant': self.trainLDA(classData, dialog) elif self.classifierKind == 'K-Nearest Euclidean': self.trainKNN(classData, dialog, metric='euclidean') elif self.classifierKind == 'K-Nearest Cosine': self.trainKNN(classData, dialog, metric='cosine') elif self.classifierKind == 'Linear Logistic': self.trainLGR(classData, dialog) elif self.classifierKind == 'Neural Network': self.trainNN(classData, dialog) else: raise Exception('Invalid classifier kind: %s.' % str(self.classifierKind)) self.plotPanel.showMPlayer()
def trainClassifier(self): if self.trainCap is None: raise Exception('No data available for training.') self.plotPanel.plotERP(self.trainCap) dialog = wx.ProgressDialog('Training Classifier', 'Featurizing', maximum=self.nFold+1, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) cap = self.decimate(self.trainCap) seg = cap.segment(start=self.windowStart, end=self.windowEnd) #print 'nSeg: ', seg.getNSeg() targ = seg.select(matchFunc=lambda mark: self.markToStim(mark) == self.targStr) nTarg = targ.getNSeg() #print 'nTarg: ', nTarg #nonTarg = seg.select(matchFunc=lambda mark: self.markToStim(mark) == self.nonTargStr) nonTarg = seg.select(matchFunc=lambda mark: self.markToStim(mark) != self.targStr) nNonTarg = nonTarg.getNSeg() #print 'nNonTarg: ', nNonTarg classData = [targ.chanEmbed(), nonTarg.chanEmbed()] self.standardizer = ml.Standardizer(np.vstack(classData)) classData = [self.standardizer.apply(cls) for cls in classData] #print 'classData shape', [cls.shape for cls in classData] if self.classifierKind == 'LDA': self.trainLDA(classData, dialog) elif self.classifierKind == 'KNNE': self.trainKNN(classData, dialog, metric='euclidean') elif self.classifierKind == 'KNNC': self.trainKNN(classData, dialog, metric='cosine') elif self.classifierKind == 'LGR': self.trainLGR(classData, dialog) elif self.classifierKind == 'NN': self.trainNN(classData, dialog) else: raise Exception('Invalid classifier kind: %s.' % str(self.classifierKind)) self.plotPanel.showPieMenu()
def updatePlot(self, event=None): """Draw the BMU plot. """ # get EEG data from current source cap = self.src.getEEGSecs(self.width, copy=False) data = cap.timeEmbed(lags=self.lags) if (time.time() - self.lastTrainTime) > self.retrainDelay: progressDialog = wx.ProgressDialog('Training Classifier', 'Training', maximum=self.maxIter // 50 + 1, style=wx.PD_ELAPSED_TIME | wx.PD_SMOOTH) def updateProgressDialog(iteration, weights, learningRate, radius): if not (iteration % 50): progressDialog.Update(updateProgressDialog.i, 'Training') updateProgressDialog.i += 1 updateProgressDialog.i = 0 if self.som is None: self.som = ml.SOM(data, latticeSize=self.latticeSize, maxIter=self.maxIter, distMetric=self.distMetric, learningRate=self.learningRate, learningRateFinal=self.learningRateFinal, radius=self.radius, radiusFinal=self.radiusFinal, callback=updateProgressDialog, verbose=False) else: self.som.callback = updateProgressDialog self.som.train(data) progressDialog.Destroy() self.lastTrainTime = time.time() if self.som is not None: points = self.som.getBMUIndices(data[-self.nPoints:,:]) else: pointsX = np.round(np.random.uniform(0, self.latticeSize[0]-1, size=self.nPoints)) pointsY = np.round(np.random.uniform(0, self.latticeSize[1]-1, size=self.nPoints)) points = np.vstack((pointsX,pointsY)).T self.plot.draw(points, latticeSize=self.latticeSize)