我们从Python开源项目中,提取了以下48个代码示例,用于说明如何使用matplotlib.dates.date2num()。
def __loadTicksFromMongo(self,host,port,dbName,symbolName,startDatetimeStr,endDatetimeStr): """mid ??mongodb????????????????? """ mongoConnection = MongoClient( host=host,port=port) collection = mongoConnection[dbName][symbolName] startDate = dt.datetime.strptime(startDatetimeStr, '%Y-%m-%d %H:%M:%S') endDate = dt.datetime.strptime(endDatetimeStr, '%Y-%m-%d %H:%M:%S') cx = collection.find({'datetime': {'$gte': startDate, '$lte': endDate}}) tickDatetimeNums = [] tickPrices = [] for d in cx: tickDatetimeNums.append(mpd.date2num(d['datetime'])) tickPrices.append(d['lastPrice']) return tickDatetimeNums,tickPrices #----------------------------------------------------------------------
def handler_all_price_csv(txtPath:str, pictureRoot:str): """??csv??????????,??k?????label :param txtPath: ??txt?? :param pictureRoot: ?????? """ if not os.path.exists(pictureRoot): os.mkdir(pictureRoot) DATA = pd.read_csv(txtPath, low_memory=False) # ?matplotlib.dates?time???????? DATA['??'] = DATA['??'].map(lambda x: mdates.date2num(datetime.datetime.strptime(x, '%Y/%m/%d')) if re.match(r"[0-9]{4}/[0-9]{1,2}/[0-9]{1,2}", str(x)) else x) # ?????????????? stocknames = list(set(DATA['??'].tolist())) n=0 while n<11000: stockname=random.choice(stocknames) data= DATA[DATA['??'] == stockname ] # ????????????? length=len(data)#?????????? if length<360: continue#????120+240??? idx=random.randint(360,length-1) df=data.iloc[idx-120:idx]#???i?120??? #print(len(df)) kplot(df,os.path.join(pictureRoot,"%s-%s" % (stockname,str(idx).zfill(6)))) n+=1
def extract_subset(self,start_date,end_date): """Function for extracting shorter time series Provide a starting and ending dates and the function will return an other ChannelList object with a subset of the original """ templist=ChannelList() tsub=[t for t in self.t if date2num(t)>=date2num(start_date) and date2num(t)<=date2num(end_date)] indexes=[self.t.index(t) for t in tsub] indexes.sort() templist.line=[self.line[ind] for ind in indexes] templist.station=[self.station[ind] for ind in indexes] templist.alt=[self.alt[ind] for ind in indexes] templist.grav=[self.grav[ind] for ind in indexes] templist.sd=[self.sd[ind] for ind in indexes] templist.tiltx=[self.tiltx[ind] for ind in indexes] templist.tilty=[self.tilty[ind] for ind in indexes] templist.temp=[self.temp[ind] for ind in indexes] templist.etc=[self.etc[ind] for ind in indexes] templist.dur=[self.dur[ind] for ind in indexes] templist.rej=[self.rej[ind] for ind in indexes] templist.t=[self.t[ind] for ind in indexes] templist.keepdata=[self.keepdata[ind] for ind in indexes] return templist
def timevect(d_StartDate, d_EndDate, c_TimeFreq, DT=None): f_Time = [] d_Time = [] while d_StartDate <= d_EndDate: d_Time.append(d_StartDate) f_Time.append(date2num(d_StartDate)) f_Date_aux = date2num(d_StartDate) if c_TimeFreq == 'Monthly': DT_aux = monthrange(num2date(f_Date_aux).year, num2date(f_Date_aux).month)[1] DT = dt.timedelta(days=DT_aux) elif c_TimeFreq == 'Yearly': # finding out if it is a leap-year if isleap(d_StartDate.year + 1): DT = dt.timedelta(days=366) else: DT = dt.timedelta(days=365) d_StartDate += DT return f_Time, d_Time
def load_symbol(self): start = parser.parse(str(self.ui_controller.dateStartEdit.text())) end = parser.parse(str(self.ui_controller.dateEndEdit.text())) symbol = str(self.ui_controller.symbolLineEdit.text()) if not symbol: return data = _load_raw_yahoo_data(stocks=[symbol], indexes={}, start=start, end=end) self.df = data[symbol] self.df.columns = [col.lower() for col in self.df.columns] self.df['datetime'] = self.df.index self.df['datetime'] = self.df.apply( lambda row: mdates.date2num(row['datetime']), axis=1) if 'adj close' in self.df.columns: self.df['close'] = self.df['adj close'] self.ui_controller.matplotlibWidget.set_data(self.df) self.ui_controller.matplotlibWidget.draw_data() self.ui_controller.symbolLineEdit.setText('')
def _candlestick_ax(df, ax): quotes = df.reset_index() quotes.loc[:, 'datetime'] = mdates.date2num(quotes.loc[:, 'datetime'].astype(dt.date)) fplt.candlestick_ohlc(ax, quotes.values, width=0.4, colorup='red', colordown='green')
def add_cal_coefficients_to_axes(ax, table): """ Plots calibration coefficients on the CO plot. """ _xlim=ax.get_xlim() _ylim=ax.get_ylim() for line in table: #print(line) if ((date2num(datetime.datetime.strptime(line[0], '%Y-%m-%d %H:%M:%S')) > _xlim[0]) & (date2num(datetime.datetime.strptime(line[1], '%Y-%m-%d %H:%M:%S')) < _xlim[1])): x=(date2num(datetime.datetime.strptime(line[0], '%Y-%m-%d %H:%M:%S')) + \ date2num(datetime.datetime.strptime(line[1], '%Y-%m-%d %H:%M:%S')))/2.0 y=0.8*_ylim[1] #http://stackoverflow.com/questions/17086847/box-around-text-in-matplotlib ax.text(x, y, '\n'.join([i.strip() for i in line[2:]]), horizontalalignment='center', verticalalignment='top', fontsize='small', color='black', bbox=dict(facecolor='wheat', edgecolor='black', boxstyle='round,pad=0.6'))
def plot_day_summary_oclh(ax, quotes, ticksize=3, colorup='r', colordown='g', ): """Plots day summary Represent the time, open, close, high, low as a vertical line ranging from low to high. The left tick is the open and the right tick is the close. Parameters ---------- ax : `Axes` an `Axes` instance to plot to quotes : sequence of (time, open, close, high, low, ...) sequences data to plot. time must be in float date format - see date2num ticksize : int open/close tick marker in points colorup : color the color of the lines where close >= open colordown : color the color of the lines where close < open Returns ------- lines : list list of tuples of the lines added (one tuple per quote) """ return _plot_day_summary(ax, quotes, ticksize=ticksize, colorup=colorup, colordown=colordown, ochl=True)
def plot_day_summary_ohlc(ax, quotes, ticksize=3, colorup='r', colordown='g', ): """Plots day summary Represent the time, open, high, low, close as a vertical line ranging from low to high. The left tick is the open and the right tick is the close. Parameters ---------- ax : `Axes` an `Axes` instance to plot to quotes : sequence of (time, open, high, low, close, ...) sequences data to plot. time must be in float date format - see date2num ticksize : int open/close tick marker in points colorup : color the color of the lines where close >= open colordown : color the color of the lines where close < open Returns ------- lines : list list of tuples of the lines added (one tuple per quote) """ return _plot_day_summary(ax, quotes, ticksize=ticksize, colorup=colorup, colordown=colordown, ochl=False)
def candlestick_ochl(ax, quotes, width=0.2, colorup='r', colordown='g', alpha=1.0): """ Plot the time, open, close, high, low as a vertical line ranging from low to high. Use a rectangular bar to represent the open-close span. If close >= open, use colorup to color the bar, otherwise use colordown Parameters ---------- ax : `Axes` an Axes instance to plot to quotes : sequence of (time, open, close, high, low, ...) sequences As long as the first 5 elements are these values, the record can be as long as you want (e.g., it may store volume). time must be in float days format - see date2num width : float fraction of a day for the rectangle width colorup : color the color of the rectangle where close >= open colordown : color the color of the rectangle where close < open alpha : float the rectangle alpha level Returns ------- ret : tuple returns (lines, patches) where lines is a list of lines added and patches is a list of the rectangle patches added """ return _candlestick(ax, quotes, width=width, colorup=colorup, colordown=colordown, alpha=alpha, ochl=True)
def __getTickDatetimeByXPosition(self,xAxis): """mid ????????datetimeNum??x? ????view????????x???????tick?time?xAxis???index?????datetime??????datetimeNum return:str """ tickDatetimeRet = xAxis minYearDatetimeNum = mpd.date2num(dt.datetime(1900,1,1)) if(xAxis > minYearDatetimeNum): tickDatetime = mpd.num2date(xAxis).astimezone(pytz.timezone('utc')) if(tickDatetime.year >=1900): tickDatetimeRet = tickDatetime return tickDatetimeRet #----------------------------------------------------------------------
def getTickDatetimeByXPosition(self,xAxis): """mid ?????x?????????????? """ tickDatetimeRet = xAxis minYearDatetimeNum = mpd.date2num(dt.datetime(1900,1,1)) if(xAxis > minYearDatetimeNum): tickDatetime = mpd.num2date(xAxis).astimezone(pytz.timezone('utc')) if(tickDatetime.year >=1900): tickDatetimeRet = tickDatetime return tickDatetimeRet
def initHistoricalData(self, symbol): """??????""" d = {} cx = self.mainEngine.dbQuery(MINUTE_DB_NAME, symbol, d) if cx: for data in cx: date = datetime.strptime(data['date'], "%Y%m%d") n = date2num(date) o = data['open'] # OHLC h = data['high'] l = data['low'] c = data['close'] oi = data['openInterest'] self.listBar.append((n, o, c, l, h)) self.listOpen.append(o) self.listClose.append(c) self.listHigh.append(h) self.listLow.append(l) self.listOpenInterest.append(oi) self.initCompleted = True # ???????? print "initCompleted!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!" self.plotKline() # K?? #----------------------------------------------------------------------
def interpolateOnGivenTimes(self,t): """ interpolate the time series on the user input time vector overlain the previous t and d fields """ tord=[date2num(tmp) for tmp in self.t] # tord=date2num(self.t) # create np.array f=interp1d(tord, self.d, kind='linear',bounds_error=False) self.d=f([date2num(tmp) for tmp in t]) # self.d=f(date2num(t)) # create np.array... self.t=t
def createArrayData(self,ChannelList_obj) : """ Create the np array data for table display, and update the ChannelList_obj. This function can be called from outside to update the table display """ self.ChannelList_obj = ChannelList_obj self.arraydata=np.concatenate((ChannelList_obj.station, np.array(ChannelList_obj.grav)*1000,np.array(ChannelList_obj.sd)*1000,ChannelList_obj.tiltx, ChannelList_obj.tilty,ChannelList_obj.temp,ChannelList_obj.dur, ChannelList_obj.rej, (date2num(ChannelList_obj.t)-date2num(ChannelList_obj.t[0]))*24*60, np.array(ChannelList_obj.t))).reshape(len(ChannelList_obj.t),10,order='F')
def read_verticalperiodfile(c_Model, c_Var, planes=False): # Routine to read variable from hourly (HH) file c_Files = ops.find(IncF.c_ModelDir, 'PP_' + c_Model + '-3D*') if c_Files is None: return None c_File = c_Files[0] logging.info('Reading File: %s', c_File.split(os.sep)[-1]) if planes: assert IncF.f_Make_Cuts # To make sure this option was filled. This doesn't check format new_data = ncfile.read_ncfile3D(c_File, c_Var, IncF.f_Make_Cuts) else: new_data = ncfile.read_ncfile3D(c_File, c_Var) if new_data is not None: f_Data_aux, f_lat, f_lon, f_elev_aux, d_Time_aux, c_Units = new_data else: return None # CHECKING FOR TIME FREQUENCY BETWEEN TIME STEPS c_TimeFreq, Dt = delta_time_freq(d_Time_aux) f_Time_aux = map(float, date2num(d_Time_aux)) f_date_i = date2num(dt.datetime.strptime(IncF.c_Start_Date[0], '%d-%m-%Y')) f_date_f = date2num(dt.datetime.strptime(IncF.c_Last_Date[0], '%d-%m-%Y')) if planes: f_Data, d_Time = time_crop(f_date_i, f_date_f, Dt, f_Time_aux, f_Data_aux, multiple=True) f_elev, _ = time_crop(f_date_i, f_date_f, Dt, f_Time_aux, f_elev_aux, multiple=True) else: f_Data, d_Time = time_crop(f_date_i, f_date_f, Dt, f_Time_aux, f_Data_aux) f_elev, _ = time_crop(f_date_i, f_date_f, Dt, f_Time_aux, f_elev_aux) return f_Data, f_lat, f_lon, f_elev, d_Time, c_TimeFreq, c_Units
def read_periodfile(c_Model, c_Var): # Routine to read variable from hourly (HH) file c_Files = ops.find(IncF.c_ModelDir, 'PP_' + c_Model + '-2D*') if c_Files is None: return None c_FileName = c_Files[0] c_ModelFile = c_FileName.split(os.sep)[-1] logging.info('Reading File: %s', c_ModelFile) new_data = ncfile.read_ncfile(c_FileName, c_Var) if new_data is not None: f_Data_aux, f_lat, f_lon, d_Time_aux, c_Units = new_data else: return None # CHECKING FOR TIME FREQUENCIES BETWEEN TIME STEPS c_TimeFreq, Dt = delta_time_freq(d_Time_aux) f_Time_aux = map(float, date2num(d_Time_aux)) f_date_i = date2num(dt.datetime.strptime(IncF.c_Start_Date[0], '%d-%m-%Y')) f_date_f = date2num(dt.datetime.strptime(IncF.c_Last_Date[0], '%d-%m-%Y')) f_Data, d_Time = time_crop(f_date_i, f_date_f, Dt, f_Time_aux, f_Data_aux) return f_Data, f_lat, f_lon, d_Time, c_ModelFile, c_TimeFreq, c_Units # Funciones auxiliares
def perform_read(model_name, var_name, dest_dict, read_routine): """ Retrieve data from read_routine and formats it into dest_disc :param model_name: String. The name of the model. :param var_name: String. The name of the variable. :param dest_dict: Dictionary. Where data will be stored :param read_routine: lambda. Lambda that wrap the read routine to use it as an argument for this function. :return: Perform the given read_routine and updates the data of dest_dict. """ if model_name not in dest_dict: new_data = read_routine((model_name, var_name)) if new_data is not None: f_data, f_lat, f_lon, d_time, c_ModelFile, c_TimeFreq, c_Units = new_data else: logging.warning('No data for model %s and variable %s', model_name, var_name) return # Solo para detener la ejecucion. t_temp = {'f_Lat': f_lat, 'f_Lon': f_lon, 'd_Time': np.array(d_time), 'f_Time': date2num(d_time), 'c_TimeFreq': c_TimeFreq} dest_dict[model_name] = dict(t_temp) dest_dict[model_name]['t_Units'] = dict() else: # TODO: Handle if new_data is None, same as above f_data, _, _, _, _, _, c_Units = read_routine((model_name, var_name)) dest_dict[model_name][var_name] = f_data dest_dict[model_name]['t_Units'][var_name] = c_Units
def read_mixedlayer(self, c_Network): c_ObsNetDirs = IncDir.c_ObsNetDir c_ObsNetName = IncDir.c_ObsNetName idx_Net = c_ObsNetName.index(c_Network) c_Files = os.listdir(c_ObsNetDirs[idx_Net]) logging.info('Data Directory: %s' % c_ObsNetDirs[idx_Net]) i_count = 0 if 'PBLHmunoz.txt' in c_Files: c_FileName = 'PBLHmunoz.txt' logging.info('Reading File: %s' % c_FileName) f_AuxData = [] d_AuxDate = [] with open(c_ObsNetDirs[idx_Net] + c_FileName, 'r') as f: for line in (row.split(',') for row in f): if i_count > 0: f_AuxData.append(float(line[3])) d_AuxDate.append(dt.datetime.strptime(line[0], '%d-%m-%Y_%H:%M')) i_count += 1 f_AuxDate = date2num(d_AuxDate) f_date_i = date2num(dt.datetime.strptime(IncF.c_Start_Date[0], '%d-%m-%Y')) f_date_f = date2num(dt.datetime.strptime(IncF.c_Last_Date[0], '%d-%m-%Y')) f_Stntime = [] d_Stntime = [] f_date_aux = f_date_i d_date_aux = num2date(f_date_i) while f_date_aux <= f_date_f + 23 / 24.: f_Stntime.append(date2num(d_date_aux)) d_Stntime.append(d_date_aux) d_date_aux = d_date_aux + dt.timedelta(hours=1) f_date_aux = date2num(d_date_aux) f_Data = np.empty(len(f_Stntime)) f_Data.fill(IncF.f_FillValue) f_Data[np.in1d(f_Stntime, f_AuxDate)] = f_AuxData return f_Data, f_Stntime
def read_mcpheedata(self, c_FileName, i_ncol): f_data_aux = [] f_date_aux = [] d_date_aux = [] f_date_i = date2num(dt.datetime.strptime(IncF.c_Start_Date[0], '%d-%m-%Y')) f_date_f = date2num(dt.datetime.strptime(IncF.c_Last_Date[0], '%d-%m-%Y')) f_Stntime = [] d_Stntime = [] f_time_aux = f_date_i d_time_aux = num2date(f_date_i) while f_time_aux <= f_date_f + 23 / 24.: f_Stntime.append(date2num(d_time_aux)) d_Stntime.append(d_time_aux) d_time_aux = d_time_aux + dt.timedelta(hours=1) f_time_aux = date2num(d_time_aux) with open(c_FileName, 'r') as f: file_data = csv.reader(f, delimiter=',') for f_row in list(file_data)[4::]: c_Value = f_row[i_ncol].replace(',', '.') if c_Value == '': f_data_aux.append(IncF.f_FillValue) else: f_data_aux.append(float(c_Value)) d_date_utc = dt.datetime.strptime(f_row[0], '%d-%m-%Y %H:%M') + dt.timedelta(hours=4) d_date_local = d_date_utc + dt.timedelta(hours=IncF.i_TimeZone) d_date_aux.append(d_date_local) f_date_aux.append(date2num(d_date_local)) f.close() i_start = np.where(np.array(f_date_aux) >= f_date_i)[0][0] i_end = np.where(np.array(f_date_aux) <= f_date_f)[-1][-1] f_VarData = np.empty(len(f_Stntime)) f_VarData.fill(IncF.f_FillValue) f_VarData[np.in1d(f_Stntime, f_date_aux)] = np.array(f_data_aux)[np.in1d(f_date_aux, f_Stntime)] return f_VarData, f_Stntime, d_Stntime
def setxlim(self, size): if self.main_x is None or self.main_y is None: return xmax = max(self.main_x) date = mdates.num2date(xmax).date() if size == WindowSize.ONEDAY: return # requires per min quotes elif size == WindowSize.FIVEDAY: return # requires per min quotes elif size == WindowSize.ONEMONTH: xmin = mdates.date2num(date-timedelta(days=30)) elif size == WindowSize.THREEMONTH: xmin = mdates.date2num(date-timedelta(days=90)) elif size == WindowSize.SIXMONTH: xmin = mdates.date2num(date-timedelta(days=180)) elif size == WindowSize.ONEYEAR: xmin = mdates.date2num(date-timedelta(days=365)) elif size == WindowSize.TWOYEAR: xmin = mdates.date2num(date-timedelta(days=365*2)) elif size == WindowSize.FIVEYEAR: xmin = mdates.date2num(date-timedelta(days=365*5)) elif size == WindowSize.MAX: xmin = min(self.main_x) self.axes.set_xlim([xmin, xmax]) self.adjust_ylim(xmin, xmax) self.fig.canvas.draw()
def on_loadQuoteClicked(self): logger.info('load quote') fileName = QtGui.QFileDialog.getOpenFileName( self, self.tr("Open Quote Data"), data_path, self.tr("Quote Files (*.csv)")) logger.info("Filename %s" % fileName) if os.path.isfile(fileName): df = pd.read_csv(unicode(fileName)) df.columns = [col.lower() for col in df.columns] if 'datetime' in df.columns: df = df.sort(['datetime']) df['datetime'] = df.apply( lambda row: mdates.date2num(parser.parse(row['datetime'])), axis=1) elif 'date' in df.columns: df = df.sort(['date']) df['datetime'] = df.apply( lambda row: mdates.date2num(parser.parse(row['date'])), axis=1) if 'datetime' in df.columns and not df['datetime'].empty: self.ui_controller.matplotlibWidget.set_data(df) self.ui_controller.matplotlibWidget.draw_data() self.df = df
def plot_tick_range_normalised(tick_path, range_start, range_end): if os.path.exists(tick_path) == False: print(tick_path + ' file doesnt exist') quit() date_cols = ['RateDateTime'] df = pd.read_csv(tick_path, usecols=['RateDateTime','RateBid','RateAsk']) start_index = tfh.find_index_closest_date(range_start, tick_path) end_index = tfh.find_index_closest_date(range_end, tick_path) # dont proceed if we didnt find indices if (start_index is None or end_index is None): print('start_index or end_index was None') quit() ticks_s = df.iloc[start_index:end_index] ticks = ((ticks_s['RateAsk'] + ticks_s['RateBid']) / 2.0) ticks_norm = (ticks - ticks.min()) / (ticks.max() - ticks.min()) dates_dt = [dt.datetime.strptime(str.split(x, '.')[0], '%Y-%m-%d %H:%M:%S') for x in ticks_s['RateDateTime'].values] dates = mdates.date2num(dates_dt) plt.plot_date(dates, ticks_norm, 'b-')
def getRDT(self): """ a.RDT or a.RDT() convert dtype data into Rata Die (lat.) Time (days since 1/1/0001) Returns ======== out : numpy array elapsed days since 1/1/1 Examples ======== >>> a = Ticktock('2002-02-02T12:00:00', 'ISO') >>> a.RDT array([ 730883.5]) See Also ========= getUTC, getUNX, getISO, getJD, getMJD, getCDF, getTAI, getDOY, geteDOY """ from matplotlib.dates import date2num, num2date # import matplotlib.dates as mpd # nTAI = len(self.data) UTC = self.UTC #RDT = np.zeros(nTAI) RDT = datamodel.dmarray(date2num(UTC)) #for i in np.arange(nTAI): #RDT[i] = UTC[i].toordinal() + UTC[i].hour/24. + UTC[i].minute/1440. + \ #UTC[i].second/86400. + UTC[i].microsecond/86400000000. self.RDT = RDT return self.RDT # -----------------------------------------------
def _dt_to_float_ordinal(dt): """ Convert :mod:`datetime` to the Gregorian date as UTC float days, preserving hours, minutes, seconds and microseconds. Return value is a :func:`float`. """ if (isinstance(dt, (np.ndarray, Index, Series)) and com.is_datetime64_ns_dtype(dt)): base = dates.epoch2num(dt.asi8 / 1.0E9) else: base = dates.date2num(dt) return base # Datetime Conversion
def convert(values, unit, axis): def try_parse(values): try: return _dt_to_float_ordinal(tools.to_datetime(values)) except Exception: return values if isinstance(values, (datetime, pydt.date)): return _dt_to_float_ordinal(values) elif isinstance(values, np.datetime64): return _dt_to_float_ordinal(lib.Timestamp(values)) elif isinstance(values, pydt.time): return dates.date2num(values) elif (com.is_integer(values) or com.is_float(values)): return values elif isinstance(values, compat.string_types): return try_parse(values) elif isinstance(values, (list, tuple, np.ndarray, Index)): if isinstance(values, Index): values = values.values if not isinstance(values, np.ndarray): values = com._asarray_tuplesafe(values) if com.is_integer_dtype(values) or com.is_float_dtype(values): return values try: values = tools.to_datetime(values) if isinstance(values, Index): values = values.map(_dt_to_float_ordinal) else: values = [_dt_to_float_ordinal(x) for x in values] except Exception: pass return values
def autoscale(self): """ Set the view limits to include the data range. """ dmin, dmax = self.datalim_to_dt() if dmin > dmax: dmax, dmin = dmin, dmax # We need to cap at the endpoints of valid datetime # TODO(wesm): unused? # delta = relativedelta(dmax, dmin) # try: # start = dmin - delta # except ValueError: # start = _from_ordinal(1.0) # try: # stop = dmax + delta # except ValueError: # # The magic number! # stop = _from_ordinal(3652059.9999999) dmin, dmax = self.datalim_to_dt() vmin = dates.date2num(dmin) vmax = dates.date2num(dmax) return self.nonsingular(vmin, vmax)
def get_mpl_time(ds, *freq): """Return a float wihich is usabale for plt.plot_date from matplotlib. :param ds: core_faam dataset :type param: netCDF4.Dataset :param freq: frequency of the time stamp default=1; if freq > 1 a multidimensional array is returned :return: array containing the matplotlib timestamps :rtype: numpy.array >>> ds = netCDF4.Dataset('core_faam_20130403_v004_r0_b768.nc', 'r') >>> t_1hz = get_mpl_time(ds) >>> t_1hz.shape Out[1]: (37137,) >>> t_32hz = get_mpl_time(ds, 32) >>> t_32hz.shape Out[1]: (37137, 32) >>> plot_date(t_32hz.ravel(), ds.variables['U_C'][:].ravel(), 'b-') >>> """ if 'Time' in ds.variables.keys(): vtime=ds.variables['Time'][:] elif 'time' in ds.variables.keys(): vtime=ds.variables['time'][:] elif 'TIME' in ds.variables.keys(): vtime=ds.variables['TIME'][:] #in old core files the 'Time' variable was c2alled PARA0515 elif 'PARA0515' in ds.variables.keys(): vtime=ds.variables['PARA0515'][:] else: return None vtime=np.array(vtime) if freq: rows = len(vtime) vtime = vtime.repeat(freq[0]).reshape((rows, freq[0])) + np.array(range(freq[0]), dtype=np.float64)/freq[0] result=np.float64(vtime/86400.) + np.float64(date2num(get_base_time(ds))) return result
def get_mpl_time(ds, basetime=None, freq=1): """Return a matplotlib usable time format from the faam core netCDF4. >>> ds = netCDF4.Dataset('core_faam_20130403_v004_r0_b768.nc', 'r') >>> t_1hz = get_mpl_time(ds) >>> t_1hz.shape Out[1]: (37137,) >>> t_32hz = get_mpl_time(ds, 32) >>> t_32hz.shape Out[1]: (37137, 32) >>> plot_date(t_32hz.ravel(), ds.variables['U_C'][:].ravel(), 'b-') >>> """ if hasattr(ds, 'variables'): if 'Time' in ds.variables.keys(): vtime=ds.variables['Time'][:] elif 'time' in ds.variables.keys(): vtime=ds.variables['time'][:] elif 'TIME' in ds.variables.keys(): vtime=ds.variables['TIME'][:] #in old core files the 'Time' variable was c2alled PARA0515 elif 'PARA0515' in ds.variables.keys(): vtime=ds.variables['PARA0515'][:] elif isinstance(ds, dict): if ds.has_key('Time'): vtime=ds['Time'] else: return None import numpy as np rows = len(vtime) vtime = vtime.repeat(freq).reshape((rows, freq)) + np.array(range(freq), dtype=np.float64)/freq if not basetime: basetime=get_base_time(ds) result=np.float64(vtime/86400.) + np.float64(date2num(basetime)) return result
def _format_date(self, tdoy): ''' Convert to MatPlotLib date ''' mpl_date = mdates.date2num(tdoy.dtobject) return mpl_date
def to_xaxis(self, value): if self.axis_has_datelocator(self.axes.xaxis): return date2num(value) else: return value
def __call__(self, x): """ Format a sequence of inputs Parameters ---------- x : array Input Return ------ out : list List of strings. """ # Formatter timezone if self.tz is None and len(x): tz = self.formatter.tz = x[0].tzinfo if not all(value.tzinfo == tz for value in x): msg = ("Dates have different time zones. " "Choosen `{}` the time zone of the first date. " "To use a different time zone, create a " "formatter and pass the time zone.") warn(msg.format(tz.zone)) # The formatter is tied to axes and takes # breaks in ordinal format. x = [date2num(val) for val in x] return _format(self.formatter, x)
def transform(x): """ Transform from date to a numerical format """ try: x = date2num(x) except AttributeError: # numpy datetime64 # This is not ideal because the operations do not # preserve the np.datetime64 type. May be need # a datetime64_trans x = [pd.Timestamp(item) for item in x] x = date2num(x) return x
def make_efficiency_date( total_data, avg_data, f_name, title=None, x_label=None, y_label=None, x_ticks=None, y_ticks=None): fig = plt.figure() if title is not None: plt.title(title, fontsize=16) if x_label is not None: plt.ylabel(x_label) if y_label is not None: plt.xlabel(y_label) v_date = [] v_val = [] for data in total_data: dates = dt.date2num(datetime.datetime.strptime(data[0], '%H:%M')) to_int = round(float(data[1])) plt.plot_date(dates, data[1], color=plt.cm.brg(to_int)) for data in avg_data: dates = dt.date2num(datetime.datetime.strptime(data[0], '%H:%M')) v_date.append(dates) v_val.append(data[1]) plt.plot_date(v_date, v_val, "^y-", label='Average') plt.legend() plt.savefig(f_name) plt.close(fig)
def __init__(self, ax, x, y,ttype, ith=0, formatter=fmt): try: x = np.asarray(x, dtype='float') except (TypeError, ValueError): x = np.asarray(mdates.date2num(x), dtype='float') y = np.asarray(y, dtype='float') mask = ~(np.isnan(x) | np.isnan(y)) x = x[mask] y = y[mask] self._points = np.column_stack((x, y)) # All plots use the same pointer now # if(ith==0): self.offsets =(-20,20) # else: # self.offsets=(-20-10*ith,20+25*ith) self.type=ttype y = y[np.abs(y - y.mean()) <= 3 * y.std()] self.scale = x.ptp() self.scale = y.ptp() / self.scale if self.scale else 1 self.tree = spatial.cKDTree(self.scaled(self._points)) self.formatter = formatter self.ax = ax self.fig = ax.figure self.ax.xaxis.set_label_position('top') self.dot = ax.scatter( [x.min()], [y.min()], s=130, color='green', alpha=0.7) self.annotation = self.setup_annotation() plt.connect('motion_notify_event', self)
def plot_states_and_var(data, hidden_states, cmap=None, columns=None, by='Activity'): """ Make a plot of the data and the states Parameters ---------- data : pandas DataFrame Data to plot hidden_states: iteretable the hidden states corresponding to the timesteps columns : list, optional Which columns to plot by : str The column to group on """ fig, ax = plt.subplots(figsize=(15, 5)) if columns is None: columns = data.columns df = data[columns].copy() stateseq = np.array(hidden_states) stateseq_norep, durations = rle(stateseq) datamin, datamax = np.array(df).min(), np.array(df).max() y = np.array( [datamin, datamax]) maxstate = stateseq.max() + 1 x = np.hstack(([0], durations.cumsum()[:-1], [len(df.index) - 1])) C = np.array( [[float(state) / maxstate] for state in stateseq_norep]).transpose() ax.set_xlim((min(x), max(x))) if cmap is None: num_states = max(hidden_states) + 1 colormap, cmap = get_color_map(num_states) pc = ax.pcolorfast(x, y, C, vmin=0, vmax=1, alpha=0.3, cmap=cmap) plt.plot(df.as_matrix()) locator = AutoDateLocator() locator.create_dummy_axis() num_index = pd.Index(df.index.map(date2num)) ticks_num = locator.tick_values(min(df.index), max(df.index)) ticks = [num_index.get_loc(t) for t in ticks_num] plt.xticks(ticks, df.index.strftime('%H:%M')[ticks], rotation='vertical') cb = plt.colorbar(pc) cb.set_ticks(np.arange(1./(2*cmap.N), 1, 1./cmap.N)) cb.set_ticklabels(np.arange(0, cmap.N)) # Plot the activities if by is not None: actseq = np.array(data[by]) sca = ax.scatter( np.arange(len(hidden_states)), #data.index, np.ones_like(hidden_states) * datamax, c=actseq, edgecolors='none' ) plt.show() return fig, ax
def plot_resid(d,savename='resfig1.png'): """ Plots the residual frequency after the first wipe using the TLE velocity. """ flim = [-2.e3, 2.e3] t = d['tvec'] dates = [dt.datetime.fromtimestamp(ts) for ts in t] datenums = md.date2num(dates) xfmt = md.DateFormatter('%Y-%m-%d %H:%M:%S') fig1 = plt.figure(figsize=(7, 9)) doppler_residual = sp.interpolate.interp1d(d['tvec'],d['dopfit']) fvec = d["fvec"] res0 = d["res0"] res1 = d["res1"] plt.subplot(211) mesh = plt.pcolormesh(datenums, fvec, sp.transpose(10.*sp.log10(res0+1e-12)), vmin=-5, vmax=25) plt.plot(datenums, (150.0/400.0)*doppler_residual(t), "r--", label="doppler resid") ax = plt.gca() ax.xaxis.set_major_formatter(xfmt) plt.ylim(flim) plt.subplots_adjust(bottom=0.2) plt.xticks(rotation=25) plt.xlabel("UTC") plt.ylabel("Frequency (Hz)") plt.title("Power ch0 (dB) %1.2f MHz"%(150.012)) plt.legend() plt.colorbar(mesh, ax=ax) # quicklook spectra of residuals spectra along with measured Doppler residual from second channel. plt.subplot(212) mesh = plt.pcolormesh(datenums, fvec, sp.transpose(10.*sp.log10(res1+1e-12)), vmin=-5, vmax=25) plt.plot(datenums, doppler_residual(t), "r--", label="doppler resid") ax = plt.gca() ax.xaxis.set_major_formatter(xfmt) plt.ylim(flim) plt.xlabel("UTC") plt.ylabel("Frequency (Hz)") plt.title("Power ch1 (dB), %1.2f MHz"%(400.032)) plt.subplots_adjust(bottom=0.2) plt.xticks(rotation=25) plt.legend() plt.colorbar(mesh, ax=ax) plt.tight_layout() print('Saving residual plots: '+savename) plt.savefig(savename, dpi=300) plt.close(fig1)
def parse_yahoo_historical_ochl(fh, adjusted=True, asobject=False): """Parse the historical data in file handle fh from yahoo finance. Parameters ---------- adjusted : bool If True (default) replace open, close, high, low prices with their adjusted values. The adjustment is by a scale factor, S = adjusted_close/close. Adjusted prices are actual prices multiplied by S. Volume is not adjusted as it is already backward split adjusted by Yahoo. If you want to compute dollars traded, multiply volume by the adjusted close, regardless of whether you choose adjusted = True|False. asobject : bool or None If False (default for compatibility with earlier versions) return a list of tuples containing d, open, close, high, low, volume If None (preferred alternative to False), return a 2-D ndarray corresponding to the list of tuples. Otherwise return a numpy recarray with date, year, month, day, d, open, close, high, low, volume, adjusted_close where d is a floating poing representation of date, as returned by date2num, and date is a python standard library datetime.date instance. The name of this kwarg is a historical artifact. Formerly, True returned a cbook Bunch holding 1-D ndarrays. The behavior of a numpy recarray is very similar to the Bunch. """ return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject, ochl=True)
def parse_yahoo_historical_ohlc(fh, adjusted=True, asobject=False): """Parse the historical data in file handle fh from yahoo finance. Parameters ---------- adjusted : bool If True (default) replace open, high, low, close prices with their adjusted values. The adjustment is by a scale factor, S = adjusted_close/close. Adjusted prices are actual prices multiplied by S. Volume is not adjusted as it is already backward split adjusted by Yahoo. If you want to compute dollars traded, multiply volume by the adjusted close, regardless of whether you choose adjusted = True|False. asobject : bool or None If False (default for compatibility with earlier versions) return a list of tuples containing d, open, high, low, close, volume If None (preferred alternative to False), return a 2-D ndarray corresponding to the list of tuples. Otherwise return a numpy recarray with date, year, month, day, d, open, high, low, close, volume, adjusted_close where d is a floating poing representation of date, as returned by date2num, and date is a python standard library datetime.date instance. The name of this kwarg is a historical artifact. Formerly, True returned a cbook Bunch holding 1-D ndarrays. The behavior of a numpy recarray is very similar to the Bunch. """ return _parse_yahoo_historical(fh, adjusted=adjusted, asobject=asobject, ochl=False)
def time_crop(f_init_date, f_final_date, delta, f_time_array, data_array, multiple=False): """ Crop the data_array between f_init_date and f_final_date. :param f_init_date: Float. Initial date :param f_final_date: Float. Final date :param delta: a datetime instance to step in dates :param f_time_array: Float array. All dates of data_array :param data_array: The data to be cropped. Its shape must be of the form (time, ...) :param multiple: False, just one data_array. True a list of data_arrays. :return: Cropped data and according datetime list. """ i_start = np.where(np.array(f_time_array) >= f_init_date)[0][0] i_end = np.where(np.array(f_time_array) <= f_final_date + 23 / 24.)[0][-1] # TODO: Refactor this while. You can transform delta and operate only on f_dates and then convert the entire list. d_date = num2date(f_init_date).replace(minute=0) d_Time = [] f_Time = [] while f_init_date <= f_final_date + 23 / 24.: d_Time.append(d_date) f_Time.append(date2num(d_date)) d_date = d_date + delta f_init_date = date2num(d_date) if multiple: all_cropped_data = [] for data in data_array: new_shape = [len(d_Time)] new_shape.extend(list(data.shape[1:])) new_shape = tuple(new_shape) cropped_data = np.empty(new_shape) cropped_data.fill(np.nan) # TODO: Use find_nearest cropped_data[np.in1d(f_Time, f_time_array[i_start:i_end + 1])] = data[i_start:i_end + 1] all_cropped_data.append(cropped_data) return all_cropped_data, d_Time else: new_shape = [len(d_Time)] new_shape.extend(list(data_array.shape[1:])) new_shape = tuple(new_shape) cropped_data = np.empty(new_shape) cropped_data.fill(np.nan) cropped_data[np.in1d(f_Time, f_time_array[i_start:i_end + 1])] = data_array[i_start:i_end + 1] return cropped_data, d_Time
def __generate_figure(self, user, user_name): # Set up figure fig = plt.figure(figsize=(8, 6), dpi=150) fig.suptitle('{}\'s activity'.format(user_name), fontsize=20) ax1 = fig.add_subplot(221) ax2 = fig.add_subplot(222) ax3 = fig.add_subplot(212) # Plot 24 hour participation data, accumulated over all time t = [x for x in range(24)] y = [user['average_day_cycle'][x] for x in t] ax1.plot(t, y) y = [user['recent_day_cycle'][x] for x in t] ax1.plot(t, y) y = [user['weekly_day_cycle'][x] for x in t] ax1.plot(t, y) ax1.set_xlim([0, 24]) ax1.grid() ax1.set_title('Daily Activity') ax1.set_xlabel('Hour (UTC)') ax1.set_ylabel('Message Count per Hour') ax1.legend(['Average', 'Last Day', 'Last Week']) # Create pie chart of the most active channels top5 = sorted(user['participation_per_channel'], key=user['participation_per_channel'].get, reverse=True)[:5] labels = top5 sizes = [user['participation_per_channel'][x] for x in top5] explode = [0] * len(top5) explode[0] = 0.1 ax2.pie(sizes, explode=explode, labels=labels, autopct='%1.1f%%', shadow=True) # Create overall activity dates, values = zip(*sorted(user['participation_per_day'].items(), key=lambda dv: dv[0])) dates = [datetime.fromtimestamp(float(x)) for x in dates] dates = date2num(dates) if len(values) > 80: ax3.bar(dates, values, width=1) else: ax3.bar(dates, values) ax3.xaxis_date() ax3.set_title('Total Activity') ax3.set_xlim([dates[0], dates[-1]]) ax3.set_ylabel('Message Count per Day') ax3.grid() spacing = 2 for label in ax3.xaxis.get_ticklabels()[::spacing]: label.set_visible(False) image_file_name = path.join(self.cache_dir, user_name + '.png') fig.savefig(image_file_name) return image_file_name
def plot_tick_range(tick_path, range_start, range_end): if os.path.exists(tick_path) == False: print(tick_path + ' file doesnt exist') quit() date_cols = ['RateDateTime'] df = pd.read_csv(tick_path, usecols=['RateDateTime','RateBid','RateAsk']) start_index = tfh.find_index_closest_date(range_start, tick_path) end_index = tfh.find_index_closest_date(range_end, tick_path) # dont proceed if we didnt find indices if (start_index is None or end_index is None): print('start_index or end_index was None') quit() ticks_s = df.iloc[start_index:end_index] ticks = (ticks_s['RateAsk'] + ticks_s['RateBid']) / 2.0 dates_dt = [dt.datetime.strptime(str.split(x, '.')[0], '%Y-%m-%d %H:%M:%S') for x in ticks_s['RateDateTime'].values] dates = mdates.date2num(dates_dt) #fig = plt.figure() #ax1 = plt.subplot2grid((1,1), (0,0)) plt.plot_date(dates, ticks, 'b-') # candlestick_ohlc(ax1, ohlc, width=0.0004, colorup='#77d879', colordown='#db3f3f') # for label in ax1.xaxis.get_ticklabels(): # label.set_rotation(45) # ax1.xaxis.set_major_formatter(mdates.DateFormatter('%m-%d %H:%M')) # ax1.xaxis.set_major_locator(mticker.MaxNLocator(10)) # ax1.grid(True) # plt.xlabel('Date') # plt.ylabel('Price') # plt.title(ohlc_path) # plt.legend() # plt.subplots_adjust(left=0.09, bottom=0.20, right=0.94, top=0.90, wspace=0.2, hspace=0) #plt.show() # plot_ohlc_range
def randomDate(dt1, dt2, N=1, tzinfo=False, sorted=False): """ Return a (or many) random datetimes between two given dates, this is done under the convention dt <=1 rand < dt2 Parameters ========== dt1 : datetime.datetime start date for the the random date dt2 : datetime.datetime stop date for the the random date Other Parameters ================ N : int (optional) the number of random dates to generate (defualt=1) tzinfo : bool (optional) maintain the tzinfo of the input datetimes (default=False) sorted : bool (optional) return the times sorted (default=False) Returns ======= out : datetime.datetime or numpy.ndarray of datetime.datetime the new time for the next call to EventTimer Examples ======== """ from matplotlib.dates import date2num, num2date if dt1.tzinfo != dt2.tzinfo: raise(ValueError('tzinfo for the input and output datetimes must match')) dt1n = date2num(dt1) dt2n = date2num(dt2) rnd_tn = np.random.uniform(dt1n, dt2n, size=N) rnd_t = num2date(rnd_tn) if not tzinfo: tzinfo = None else: tzinfo = dt1.tzinfo rnd_t = np.asarray([val.replace(tzinfo=tzinfo) for val in rnd_t]) if sorted: rnd_t.sort() return rnd_t
def logspace(min, max, num, **kwargs): """ Returns log-spaced bins. Same as numpy.logspace except the min and max are the min and max not log10(min) and log10(max) Parameters ========== min : float minimum value max : float maximum value num : integer number of log spaced bins Other Parameters ================ kwargs : dict additional keywords passed into matplotlib.dates.num2date Returns ======= out : array log-spaced bins from min to max in a numpy array Notes ===== This function works on both numbers and datetime objects Examples ======== >>> import spacepy.toolbox as tb >>> tb.logspace(1, 100, 5) array([ 1. , 3.16227766, 10. , 31.6227766 , 100. ]) See Also ======== geomspace linspace """ if isinstance(min, datetime.datetime): from matplotlib.dates import date2num, num2date ans = num2date(np.logspace(np.log10(date2num(min)), np.log10(date2num(max)), num, **kwargs)) ans = spt.no_tzinfo(ans) return np.array(ans) else: return np.logspace(np.log10(min), np.log10(max), num, **kwargs)
def linspace(min, max, num, **kwargs): """ Returns linear-spaced bins. Same as numpy.linspace except works with datetime and is faster Parameters ========== min : float, datetime minimum value max : float, datetime maximum value num : integer number of linear spaced bins Other Parameters ================ kwargs : dict additional keywords passed into matplotlib.dates.num2date Returns ======= out : array linear-spaced bins from min to max in a numpy array Notes ===== This function works on both numbers and datetime objects Examples ======== >>> import spacepy.toolbox as tb >>> tb.linspace(1, 10, 4) array([ 1., 4., 7., 10.]) See Also ======== geomspace logspace """ if hasattr(min, 'shape') and min.shape is (): min = min.item() if hasattr(max, 'shape') and max.shape is (): max = max.item() if isinstance(min, datetime.datetime): from matplotlib.dates import date2num, num2date ans = num2date(np.linspace(date2num(min), date2num(max), num, **kwargs)) ans = spt.no_tzinfo(ans) return np.array(ans) else: return np.linspace(min, max, num, **kwargs)
def __call__(self): # if no data have been set, this will tank with a ValueError try: dmin, dmax = self.viewlim_to_dt() except ValueError: return [] if dmin > dmax: dmax, dmin = dmin, dmax # We need to cap at the endpoints of valid datetime # TODO(wesm) unused? # delta = relativedelta(dmax, dmin) # try: # start = dmin - delta # except ValueError: # start = _from_ordinal(1.0) # try: # stop = dmax + delta # except ValueError: # # The magic number! # stop = _from_ordinal(3652059.9999999) nmax, nmin = dates.date2num((dmax, dmin)) num = (nmax - nmin) * 86400 * 1000 max_millis_ticks = 6 for interval in [1, 10, 50, 100, 200, 500]: if num <= interval * (max_millis_ticks - 1): self._interval = interval break else: # We went through the whole loop without breaking, default to 1 self._interval = 1000. estimate = (nmax - nmin) / (self._get_unit() * self._get_interval()) if estimate > self.MAXTICKS * 2: raise RuntimeError(('MillisecondLocator estimated to generate %d ' 'ticks from %s to %s: exceeds Locator.MAXTICKS' '* 2 (%d) ') % (estimate, dmin, dmax, self.MAXTICKS * 2)) freq = '%dL' % self._get_interval() tz = self.tz.tzname(None) st = _from_ordinal(dates.date2num(dmin)) # strip tz ed = _from_ordinal(dates.date2num(dmax)) all_dates = date_range(start=st, end=ed, freq=freq, tz=tz).asobject try: if len(all_dates) > 0: locs = self.raise_if_exceeds(dates.date2num(all_dates)) return locs except Exception: # pragma: no cover pass lims = dates.date2num([dmin, dmax]) return lims
def initializeLines(self, timestamp): print "initializing %s" % self.name anomalyRange = (0.0, 1.0) self.dates = deque([timestamp] * WINDOW, maxlen=WINDOW) self.convertedDates = deque( [date2num(date) for date in self.dates], maxlen=WINDOW ) self.value = deque([0.0] * WINDOW, maxlen=WINDOW) self.rawValue = deque([0.0] * WINDOW, maxlen=WINDOW) self.predicted = deque([0.0] * WINDOW, maxlen=WINDOW) self.anomalyScore = deque([0.0] * WINDOW, maxlen=WINDOW) self.anomalyLikelihood = deque([0.0] * WINDOW, maxlen=WINDOW) actualPlot, = self._mainGraph.plot(self.dates, self.value) self.actualLine = actualPlot rawPlot, = self._mainGraph.plot(self.dates, self.rawValue) self.rawLine = rawPlot predictedPlot, = self._mainGraph.plot(self.dates, self.predicted) self.predictedLine = predictedPlot self._mainGraph.legend(tuple(['actual', 'raw', 'predicted']), loc=3) anomalyScorePlot, = self._anomalyGraph.plot( self.dates, self.anomalyScore, 'm' ) anomalyScorePlot.axes.set_ylim(anomalyRange) self.anomalyScoreLine = anomalyScorePlot anomalyLikelihoodPlot, = self._anomalyGraph.plot( self.dates, self.anomalyScore, 'r' ) anomalyLikelihoodPlot.axes.set_ylim(anomalyRange) self.anomalyLikelihoodLine = anomalyLikelihoodPlot self._anomalyGraph.legend( tuple(['anomaly score', 'anomaly likelihood']), loc=3 ) dateFormatter = DateFormatter('%H:%M:%S.%f') self._mainGraph.xaxis.set_major_formatter(dateFormatter) self._anomalyGraph.xaxis.set_major_formatter(dateFormatter) self._mainGraph.relim() self._mainGraph.autoscale_view(True, True, True) self.linesInitialized = True
def write(self, timestamp, value, predicted, anomalyScore, rawValue): # We need the first timestamp to initialize the lines at the right X value, # so do that check first. if not self.linesInitialized: self.initializeLines(timestamp) anomalyLikelihood = self.anomalyLikelihoodHelper.anomalyProbability( value, anomalyScore, timestamp ) self.dates.append(timestamp) self.convertedDates.append(date2num(timestamp)) self.value.append(value) self.rawValue.append(rawValue) self.allValues.append(value) self.allRawValues.append(rawValue) self.predicted.append(predicted) self.anomalyScore.append(anomalyScore) self.anomalyLikelihood.append(anomalyLikelihood) # Update main chart data self.actualLine.set_xdata(self.convertedDates) self.actualLine.set_ydata(self.value) self.rawLine.set_xdata(self.convertedDates) self.rawLine.set_ydata(self.rawValue) self.predictedLine.set_xdata(self.convertedDates) self.predictedLine.set_ydata(self.predicted) # Update anomaly chart data self.anomalyScoreLine.set_xdata(self.convertedDates) self.anomalyScoreLine.set_ydata(self.anomalyScore) self.anomalyLikelihoodLine.set_xdata(self.convertedDates) self.anomalyLikelihoodLine.set_ydata(self.anomalyLikelihood) # Remove previous highlighted regions for poly in self._chartHighlights: poly.remove() self._chartHighlights = [] # weekends = extractWeekendHighlights(self.dates) anomalies = extractAnomalyIndices(self.anomalyLikelihood) # Highlight weekends in main chart # self.highlightChart(weekends, self._mainGraph) # Highlight anomalies in anomaly chart self.highlightChart(anomalies, self._anomalyGraph) maxValue = max(max(self.allValues), max(self.allRawValues)) self._mainGraph.relim() self._mainGraph.axes.set_ylim(0, maxValue + (maxValue * 0.02)) self._mainGraph.relim() self._mainGraph.autoscale_view(True, scaley=False) self._anomalyGraph.relim() self._anomalyGraph.autoscale_view(True, True, True) plt.draw() plt.pause(0.00000000001)