我们从Python开源项目中,提取了以下11个代码示例,用于说明如何使用rq.get_current_job()。
def unregister_all_dirty(self, decrement=1): """Unregister current TreeItem and all parent paths as dirty (should be called from RQ job procedure after cache is updated) """ r_con = get_connection() job = get_current_job() for p in self.all_pootle_paths(): if job: logger.debug('UNREGISTER %s (-%s) where job_id=%s', p, decrement, job.id) else: logger.debug('UNREGISTER %s (-%s)', p, decrement) r_con.zincrby(POOTLE_DIRTY_TREEITEMS, p, 0 - decrement)
def unregister_dirty(self, decrement=1): """Unregister current TreeItem as dirty (should be called from RQ job procedure after cache is updated) """ r_con = get_connection() job = get_current_job() if job: logger.debug('UNREGISTER %s (-%s) where job_id=%s', self.cache_key, decrement, job.id) else: logger.debug('UNREGISTER %s (-%s)', self.cache_key, decrement) r_con.zincrby(POOTLE_DIRTY_TREEITEMS, self.cache_key, 0 - decrement)
def update_cache_job(instance): """RQ job""" job = get_current_job() job_wrapper = JobWrapper(job.id, job.connection) keys, decrement = job_wrapper.get_job_params() # close unusable and obsolete connections before and after the job # Note: setting CONN_MAX_AGE parameter can have negative side-effects # CONN_MAX_AGE value should be lower than DB wait_timeout connection.close_if_unusable_or_obsolete() instance._update_cache_job(keys, decrement) connection.close_if_unusable_or_obsolete() job_wrapper.clear_job_params()
def access_self(): return get_current_job().id
def test_get_current_job(self): """ Ensure that functions using RQ's ``get_current_job`` doesn't fail when run from rqworker (the job id is not in the failed queue). """ queue = get_queue() job = queue.enqueue(access_self) call_command('rqworker', '--burst') failed_queue = Queue(name='failed', connection=queue.connection) self.assertFalse(job.id in failed_queue.job_ids) job.delete()
def _cancel_by_status(ticket): """ Action cancelled because of ticket status """ current_job = get_current_job() Logger.error(unicode('Ticket %d is %s, Skipping...' % (ticket.id, ticket.status))) ServiceActionJob.objects.filter( asynchronousJobId=current_job.id ).update( status='cancelled', comment='ticket is %s' % (ticket.status) )
def process_document(path, options, meta): current_task = get_current_job() with Office(app.config["LIBREOFFICE_PATH"]) as office: # acquire libreoffice lock with office.documentLoad(path) as original_document: # open original document with TemporaryDirectory() as tmp_dir: # create temp dir where output'll be stored for fmt in options["formats"]: # iterate over requested formats current_format = app.config["SUPPORTED_FORMATS"][fmt] output_path = os.path.join(tmp_dir, current_format["path"]) original_document.saveAs(output_path, fmt=current_format["fmt"]) if options.get("thumbnails", None): is_created = False if meta["mimetype"] == "application/pdf": pdf_path = path elif "pdf" in options["formats"]: pdf_path = os.path.join(tmp_dir, "pdf") else: pdf_tmp_file = NamedTemporaryFile() pdf_path = pdf_tmp_file.name original_document.saveAs(pdf_tmp_file.name, fmt="pdf") is_created = True image = Image(filename=pdf_path, resolution=app.config["THUMBNAILS_DPI"]) if is_created: pdf_tmp_file.close() thumbnails = make_thumbnails(image, tmp_dir, options["thumbnails"]["size"]) result_path, result_url = make_zip_archive(current_task.id, tmp_dir) remove_file.schedule( datetime.timedelta(seconds=app.config["RESULT_FILE_TTL"]), result_path ) return result_url
def reducer(): current_job = get_current_job(redis_conn) words = current_job.dependency.result # we should generate sorted lists which are then merged, # but to keep things simple, we use dicts word_count = {} for word, count in words: if word not in word_count: word_count[word] = 0 word_count[word] += count # print('reducer: %s to %s' % (len(words), len(word_count))) return word_count
def apply_action(ticket_id=None, action_id=None, ip_addr=None, user_id=None): """ Apply given action on customer service :param int ticket_id: The id of the Cerberus `Ticket` :param int action_id: The id of the Cerberus `ServiceAction` :param int user_id: The id of the Cerberus `User` :rtype: bool :return: if action has been applied """ current_job = get_current_job() ticket = Ticket.objects.get(id=ticket_id) user = User.objects.get(id=user_id) if ticket.status in ('Closed', 'Answered'): _cancel_by_status(ticket) common.set_ticket_status( ticket, 'ActionError', user=user ) return False # Call action service try: result = implementations.instance.get_singleton_of( 'ActionServiceBase' ).apply_action_on_service( ticket_id, action_id, ip_addr, user.id ) _update_job( current_job.id, todo_id=result.todo_id, status=result.status, comment=result.comment ) return True except ActionServiceException as ex: _update_job(current_job.id, status='actionError', comment=str(ex)) common.set_ticket_status( ticket, 'ActionError', user=user ) return False
def xloader_data_into_datastore(input): '''This is the func that is queued. It is a wrapper for xloader_data_into_datastore, and makes sure it finishes by calling xloader_hook to update the task_status with the result. Errors are stored in task_status and job log and this method returns 'error' to let RQ know too. Should task_status fails, then we also return 'error'. ''' # First flag that this task is running, to indicate the job is not # stillborn, for when xloader_submit is deciding whether another job would # be a duplicate or not job_dict = dict(metadata=input['metadata'], status='running') callback_xloader_hook(result_url=input['result_url'], api_key=input['api_key'], job_dict=job_dict) job_id = get_current_job().id errored = False try: xloader_data_into_datastore_(input, job_dict) job_dict['status'] = 'complete' db.mark_job_as_completed(job_id, job_dict) except JobError as e: db.mark_job_as_errored(job_id, str(e)) job_dict['status'] = 'error' job_dict['error'] = str(e) log = logging.getLogger(__name__) log.error('xloader error: {}'.format(e)) errored = True except Exception as e: db.mark_job_as_errored( job_id, traceback.format_tb(sys.exc_traceback)[-1] + repr(e)) job_dict['status'] = 'error' job_dict['error'] = str(e) log = logging.getLogger(__name__) log.error('xloader error: {}'.format(e)) errored = True finally: # job_dict is defined in xloader_hook's docstring is_saved_ok = callback_xloader_hook(result_url=input['result_url'], api_key=input['api_key'], job_dict=job_dict) errored = errored or not is_saved_ok return 'error' if errored else None
def start_automated_run(path, automated_run_id): """Starts automated run. This will automatically create base learners until the run finishes or errors out. Args: path (str): Path to Xcessiv notebook automated_run_id (str): Automated Run ID """ with functions.DBContextManager(path) as session: automated_run = session.query(models.AutomatedRun).filter_by(id=automated_run_id).first() if not automated_run: raise exceptions.UserError('Automated run {} ' 'does not exist'.format(automated_run_id)) automated_run.job_id = get_current_job().id automated_run.job_status = 'started' session.add(automated_run) session.commit() try: if automated_run.category == 'bayes': automatedruns.start_naive_bayes(automated_run, session, path) elif automated_run.category == 'tpot': automatedruns.start_tpot(automated_run, session, path) elif automated_run.category == 'greedy_ensemble_search': automatedruns.start_greedy_ensemble_search(automated_run, session, path) else: raise Exception('Something went wrong. Invalid category for automated run') automated_run.job_status = 'finished' session.add(automated_run) session.commit() except: session.rollback() automated_run.job_status = 'errored' automated_run.description['error_type'] = repr(sys.exc_info()[0]) automated_run.description['error_value'] = repr(sys.exc_info()[1]) automated_run.description['error_traceback'] = \ traceback.format_exception(*sys.exc_info()) session.add(automated_run) session.commit() raise