我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用timeit.Timer()。
def time_call(func, setup=None, maxtime=1, bestof=10): """ timeit() wrapper which tries to get as accurate a measurement as possible w/in maxtime seconds. :returns: ``(avg_seconds_per_call, log10_number_of_repetitions)`` """ from timeit import Timer from math import log timer = Timer(func, setup=setup or '') number = 1 end = tick() + maxtime while True: delta = min(timer.repeat(bestof, number)) if tick() >= end: return delta/number, int(log(number, 10)) number *= 10
def timer(s, v='', nloop=500, nrep=3): units = ["s", "ms", "µs", "ns"] scaling = [1, 1e3, 1e6, 1e9] print("%s : %-50s : " % (v, s), end=' ') varnames = ["%ss,nm%ss,%sl,nm%sl" % tuple(x*4) for x in 'xyz'] setup = 'from __main__ import numpy, ma, %s' % ','.join(varnames) Timer = timeit.Timer(stmt=s, setup=setup) best = min(Timer.repeat(nrep, nloop)) / nloop if best > 0.0: order = min(-int(numpy.floor(numpy.log10(best)) // 3), 3) else: order = 3 print("%d loops, best of %d: %.*g %s per loop" % (nloop, nrep, 3, best * scaling[order], units[order]))
def benchmark_tracer_trace(): tracer = Tracer() tracer.writer = DummyWriter() # testcase def trace(tracer): # explicit vars with tracer.trace("a", service="s", resource="r", span_type="t") as s: s.set_tag("a", "b") s.set_tag("b", 1) with tracer.trace("another.thing"): pass with tracer.trace("another.thing"): pass # benchmark print("## tracer.trace() benchmark: {} loops ##".format(NUMBER)) timer = timeit.Timer(lambda: trace(tracer)) result = timer.repeat(repeat=REPEAT, number=NUMBER) print("- trace execution time: {:8.6f}".format(min(result)))
def timed(func, setup="pass", limit=None): """Adaptively measure execution time of a function. """ timer = timeit.Timer(func, setup=setup) repeat, number = 3, 1 for i in range(1, 10): if timer.timeit(number) >= 0.2: break elif limit is not None and number >= limit: break else: number *= 10 time = min(timer.repeat(repeat, number)) / number if time > 0.0: order = min(-int(math.floor(math.log10(time)) // 3), 3) else: order = 3 return (number, time, time*_scales[order], _units[order]) # Code for doing inline timings of recursive algorithms.
def bench(name, cleanup=lambda: None, *, seconds=1, repeat=3): """Bench the given statement as many times as necessary until total executions take one second.""" stmt = "__import__({!r})".format(name) timer = timeit.Timer(stmt) for x in range(repeat): total_time = 0 count = 0 while total_time < seconds: try: total_time += timer.timeit(1) finally: cleanup() count += 1 else: # One execution too far if total_time > seconds: count -= 1 yield count // seconds
def timeit(self, stmt, setup, number=None): self.fake_timer = FakeTimer() t = timeit.Timer(stmt=stmt, setup=setup, timer=self.fake_timer) kwargs = {} if number is None: number = DEFAULT_NUMBER else: kwargs['number'] = number delta_time = t.timeit(**kwargs) self.assertEqual(self.fake_timer.setup_calls, 1) self.assertEqual(self.fake_timer.count, number) self.assertEqual(delta_time, number) # Takes too long to run in debug build. #def test_timeit_default_iters(self): # self.timeit(self.fake_stmt, self.fake_setup)
def repeat(self, stmt, setup, repeat=None, number=None): self.fake_timer = FakeTimer() t = timeit.Timer(stmt=stmt, setup=setup, timer=self.fake_timer) kwargs = {} if repeat is None: repeat = DEFAULT_REPEAT else: kwargs['repeat'] = repeat if number is None: number = DEFAULT_NUMBER else: kwargs['number'] = number delta_times = t.repeat(**kwargs) self.assertEqual(self.fake_timer.setup_calls, repeat) self.assertEqual(self.fake_timer.count, repeat * number) self.assertEqual(delta_times, repeat * [float(number)]) # Takes too long to run in debug build. #def test_repeat_default(self): # self.repeat(self.fake_stmt, self.fake_setup)
def test_multifc(n=5, b=100, d_in=500, d_out=1000, n_trials=1000): x = [Output(T.matrix(), shape=(b, d_in)) for _ in xrange(n)] x_in = [xi.value for xi in x] x_sample = [np.asarray(np.random.rand(*xi.shape), dtype=xi.value.dtype) for xi in x] # method A: concat then multiply N_a = Net(name='A') x_cat = N_a.Concat(*x, axis=1) y_a = N_a.FC(x_cat, nout=d_out) f_a = theano.function(x_in, y_a.value) time_a = Timer(partial(f_a, *x_sample)) # method B: multiply each one then sum results N_b = Net(name='B') ys = [N_b.FC(xi, nout=d_out) for xi in x] y_b = N_b.EltwiseSum(*ys) f_b = theano.function(x_in, y_b.value) time_b = Timer(partial(f_b, *x_sample)) # time them print 'Time A:', time_a.timeit(number=n_trials) print 'Time B:', time_b.timeit(number=n_trials)
def timeit_2vector_theano(init, nb_element=1e6, nb_repeat=3, nb_call=int(1e2), expr="a**2 + b**2 + 2*a*b"): t3 = timeit.Timer("tf(av,bv)", """ import theano import theano.tensor as T import numexpr as ne from theano.tensor import exp %(init)s av=a bv=b a=T.dvector() b=T.dvector() tf= theano.function([a,b],%(expr)s) """%locals() ) ret=t3.repeat(nb_repeat,nb_call) return np.asarray(ret)
def main(): fp = open('testkeys.txt', 'w') fp.write(repr(keys)) fp.close() print ("Nodes: %d" % len(keys)) t = Timer("avl_build()", setup_AVLTree) print_result(t.timeit(COUNT), 'AVLTree build only') t = Timer("cavl_build()", setup_FastAVLTree) print_result(t.timeit(COUNT), 'FastAVLTree build only') t = Timer("avl_build_delete()", setup_AVLTree) print_result(t.timeit(COUNT), 'AVLTree build & delete') t = Timer("cavl_build_delete()", setup_FastAVLTree) print_result(t.timeit(COUNT), 'FastAVLTree build & delete') # shuffle search keys shuffle(keys) t = Timer("avl_search()", setup_AVLTree) print_result(t.timeit(COUNT), 'AVLTree search') t = Timer("cavl_search()", setup_FastAVLTree) print_result(t.timeit(COUNT), 'FastAVLTree search')
def main(): print("Nodes: %d" % len(keys)) t = Timer("dict_build()", setup_dict) print_result(t.timeit(COUNT), 'dict build only') t = Timer("crb_build()", setup_FastRBTree) print_result(t.timeit(COUNT), 'FastRBTree build only') t = Timer("dict_build_delete()", setup_dict) print_result(t.timeit(COUNT), 'dict build & delete') t = Timer("crb_build_delete()", setup_FastRBTree) print_result(t.timeit(COUNT), 'FastRBTree build & delete') # shuffle search keys shuffle(keys) t = Timer("dict_search()", setup_dict) print_result(t.timeit(COUNT), 'dict search') t = Timer("crb_search()", setup_FastRBTree) print_result(t.timeit(COUNT), 'FastRBTree search')
def main(): fp = open('testkeys.txt', 'w') fp.write(repr(keys)) fp.close() print("Nodes: %d" % len(keys)) shuffle(keys) t = Timer("rb_pop_min()", setup_RBTree) print_result(t.timeit(COUNT), 'RBTree pop_min') t = Timer("rb_pop_max()", setup_RBTree) print_result(t.timeit(COUNT), 'RBTree pop_max') t = Timer("crb_pop_min()", setup_FastRBTree) print_result(t.timeit(COUNT), 'FastRBTree pop_min') t = Timer("crb_pop_max()", setup_FastRBTree) print_result(t.timeit(COUNT), 'FastRBTree pop_max')
def main(): fp = open('testkeys.txt', 'w') fp.write(repr(keys)) fp.close() print ("Nodes: %d" % len(keys)) t = Timer("rb_build()", setup_RBTree) print_result(t.timeit(COUNT), 'RBTree build only') t = Timer("crb_build()", setup_FastRBTree) print_result(t.timeit(COUNT), 'FastRBTree build only') t = Timer("rb_build_delete()", setup_RBTree) print_result(t.timeit(COUNT), 'RBTree build & delete') t = Timer("crb_build_delete()", setup_FastRBTree) print_result(t.timeit(COUNT), 'FastRBTree build & delete') # shuffle search keys shuffle(keys) t = Timer("rb_search()", setup_RBTree) print_result(t.timeit(COUNT), 'RBTree search') t = Timer("crb_search()", setup_FastRBTree) print_result(t.timeit(COUNT), 'FastRBTree search')
def measure_best(repeat, iters, common_setup='pass', common_cleanup='pass', *funcs): funcs = list(funcs) results = dict([(f, []) for f in funcs]) for i in six.moves.range(repeat): random.shuffle(funcs) for func in funcs: gc.collect() t = timeit.Timer(func, setup=common_setup) results[func].append(t.timeit(iters)) common_cleanup() best_results = {} for func, times in six.iteritems(results): best_results[func] = min(times) return best_results
def easy_timer(code_to_benchmark, *, repeat=3, number=1000): """ Wrap timeit.Timer().repeat() to catch locals. Rather than put our setup statement in a string for :py:func:`timeit.timeit`, we can just pull locals and globals from the calling stack frame. Args: code_to_benchmark(str): A string containing the Python code that we want to benchmark. repeat(int): Number of times to repeat the timer trial. number(int): Number of iterations **per** trial. Returns: (float): The best measured time of ``repeat`` times. """ timer = timeit.Timer(stmt=code_to_benchmark, globals=copy_environment(2)) best_time = min(timer.repeat(repeat=repeat, number=number)) return best_time
def print_easy_timer(code_to_benchmark, *, repeat=3, number=1000): """ Repeatedly time code and print results. Args: code_to_benchmark(str): A string containing the Python code that we want to benchmark. repeat(int): Number of times to repeat the timer trial. number(int): Number of iterations **per** trial. Returns: (float): The best measured time of ``repeat`` times. """ timer = timeit.Timer(stmt=code_to_benchmark, globals=copy_environment(2)) best_time = min(timer.repeat(repeat=repeat, number=number)) print(":\t\t".join(( code_to_benchmark, str(best_time) ))) return best_time
def timetest(command, info, info2='2 floats', num=100, numt=1, mem=16384): initct(mem) print " " print info print "Timing over", num*num, "calls to tiles,", numt, "tiling each for", info2 t= timeit.Timer(command + '('+str(num)+','+str(mem)+','+str(numt)+')', 'from __main__ import ' + command) print "With no collision table", t.timeit(1), "seconds" t= timeit.Timer(command + '('+str(num)+', ctu'+','+str(numt)+')', 'from __main__ import ctu, ' + command) print "With unsafe collision table", t.timeit(1), "seconds" print ctu t= timeit.Timer(command + '('+str(num)+', cts'+','+str(numt)+')', 'from __main__ import cts, ' + command) print "With safe collision table", t.timeit(1), "seconds" print cts t= timeit.Timer(command + '('+str(num)+', ctss'+','+str(numt)+')', 'from __main__ import ctss, ' + command) print "With super safe collision table", t.timeit(1), "seconds" print ctss print " " #print "Timing over", num*num, "calls to tiles, 16 tilings each for", info2 #t= timeit.Timer(command + '('+str(num)+', 16384, 16)', 'from __main__ import ' + command) #print "With no collision table", t.timeit(1), "seconds"
def main(): parse_command_line() t = Timer(e1) results = t.timeit(options.num) / options.num print('engine: %0.3f ms per iteration' % (results * 1000)) t = Timer(c1) results = t.timeit(options.num) / options.num print('coroutine: %0.3f ms per iteration' % (results * 1000))
def time_stmt(stmt='pass', setup='pass', number=0, repeat=3): """Timer function with the same behaviour as running `python -m timeit ` in the command line. :return: elapsed time in seconds or NaN if the command failed. :rtype: float """ t = Timer(stmt, setup) if not number: # determine number so that 0.2 <= total time < 2.0 for i in range(1, 10): number = 10**i try: x = t.timeit(number) except: print(t.print_exc()) return float('NaN') if x >= 0.2: break try: r = t.repeat(repeat, number) except: print(t.print_exc()) return float('NaN') best = min(r) return best / number
def benchmark(stmt, n=1000, r=3): setup = ( 'from ansimarkup import parse;' 'from colorama import Style as S, Fore as F;' 'from termcolor import colored;' 'from colr import color;' 'from plumbum import colors;' 'from pastel import colorize' ) timer = Timer(stmt, setup=setup) best = min(timer.repeat(r, n)) usec = best * 1e6 / n return usec
def get_timeit(self, setup): return min(timeit.Timer( 'for n in range(64, 10000): _sample_n_k(n, 64)', setup=setup). repeat(repeat=10, number=1))
def measure_pattern_time_v2(iteration_number, size, pattern): gw = execnet.makegateway("popen//python=python2.7") channel = gw.remote_exec(""" from nltk.corpus import brown words = brown.words()[:%s] text = ' '.join(words) from pattern.en import parsetree text_tree = parsetree(text, tokenize = True, # Split punctuation marks from words? tags = True, # Parse part-of-speech tags? (NN, JJ, ...) chunks = False, # Parse chunks? (NP, VP, PNP, ...) relations = False, # Parse chunk relations? (-SBJ, -OBJ, ...) lemmata = False, # Parse lemmata? (ate => eat) encoding = 'utf-8', # Input string encoding. tagset = None) # Penn Treebank II (default) or UNIVERSAL. from pattern.search import search def measure_pattern_search(): global pattern_search_result #Make measure_me able to modify the value pattern_search_result = search("%s", text_tree) from timeit import Timer pattern_search_time = Timer(measure_pattern_search) def pattern_search_timeit(): runtimes = [pattern_search_time.timeit(number=1) for i in range (0, %s)] average = sum(runtimes)/len(runtimes) # return ''.join(['timit: #runs=', str(%s), ' ; average=', str(average),' ; min=', str(min(runtimes))]) return [runtimes, average, min(runtimes)] channel.send(pattern_search_timeit()) """ % (size, pattern, iteration_number, iteration_number)) channel.send([]) return channel.receive()
def measure_time (Function, iteration_number): function_time = Timer(Function) runtimes = [function_time.timeit(number=1) for i in range (0, iteration_number)] average = sum(runtimes)/len(runtimes) return runtimes, average, min(runtimes)
def func_27(): jointimer = timeit.Timer('join_test()', 'from __main__ import join_test') print(jointimer.timeit(number=100)) plustimer = timeit.Timer('plus_test()', 'from __main__ import plus_test') print(plustimer.timeit(number=100))
def benchmark_tracer_wrap(): tracer = Tracer() tracer.writer = DummyWriter() # testcase class Foo(object): @staticmethod @tracer.wrap() def s(): return 0 @classmethod @tracer.wrap() def c(cls): return 0 @tracer.wrap() def m(self): return 0 f = Foo() # benchmark print("## tracer.trace() wrapper benchmark: {} loops ##".format(NUMBER)) timer = timeit.Timer(f.s) result = timer.repeat(repeat=REPEAT, number=NUMBER) print("- staticmethod execution time: {:8.6f}".format(min(result))) timer = timeit.Timer(f.c) result = timer.repeat(repeat=REPEAT, number=NUMBER) print("- classmethod execution time: {:8.6f}".format(min(result))) timer = timeit.Timer(f.m) result = timer.repeat(repeat=REPEAT, number=NUMBER) print("- method execution time: {:8.6f}".format(min(result)))
def benchmark_getpid(): timer = timeit.Timer(getpid) result = timer.repeat(repeat=REPEAT, number=NUMBER) print("## getpid wrapper benchmark: {} loops ##".format(NUMBER)) print("- getpid execution time: {:8.6f}".format(min(result)))
def __init__(self, stmt, setup='pass', timer=timeit.default_timer, globals=globals()): # copy of timeit.Timer.__init__ # similarity index 95% self.timer = timer stmt = timeit.reindent(stmt, 8) setup = timeit.reindent(setup, 4) src = timeit.template % {'stmt': stmt, 'setup': setup} self.src = src # Save for traceback display code = compile(src, timeit.dummy_src_name, "exec") ns = {} #exec code in globals(), ns -- original timeit code exec_(code, globals, ns) # -- we use caller-provided globals instead self.inner = ns["inner"]
def test_timer_invalid_stmt(self): self.assertRaises(ValueError, timeit.Timer, stmt=None)
def test_print_exc(self): s = io.StringIO() t = timeit.Timer("1/0") try: t.timeit() except: t.print_exc(s) self.assert_exc_string(s.getvalue(), 'ZeroDivisionError')
def time_regex_test_case(compiled_regex, test_case, iterations): """ Execute and time a single regex on a single test case :param compiled_regex: :param test_case: :param iterations: :return: """ try: repeats = 10 search_string = test_case.search_string def wrap(): # Timing bug, lazy eval defers computation if we don't # force (convert to list evals result here) # https://swizec.com/blog/python-and-lazy-evaluation/swizec/5148 return list(compiled_regex.finditer(search_string)) t = timeit.Timer(wrap) repeat_iterations = t.repeat(repeat=repeats, number=iterations) best_run = list(repeat_iterations[0]) for repeated_timeit in repeat_iterations: if best_run[0] > list(repeated_timeit)[0]: best_run = list(repeated_timeit) return_vals = list(best_run) return_vals.append(iterations) return_vals.append(test_case) except: traceback.print_exc() return return_vals
def main(sys_argv): args = sys_argv[1:] count = int(args[0]) print "Benchmarking: %sx" % count print for example in examples: test = make_test_function(example) t = Timer(test,) print min(t.repeat(repeat=3, number=count)) print "Done"