我们从Python开源项目中,提取了以下35个代码示例,用于说明如何使用django.db.transaction.commit()。
def dehydrate_stats(self, bundle): from chroma_core.models import SimpleHistoStoreTime from chroma_core.models import SimpleHistoStoreBin stats = {} for s in StorageResourceStatistic.objects.filter(storage_resource = bundle.obj): from django.db import transaction stat_props = s.storage_resource.get_statistic_properties(s.name) if isinstance(stat_props, statistics.BytesHistogram): with transaction.commit_manually(): transaction.commit() try: time = SimpleHistoStoreTime.objects.filter(storage_resource_statistic = s).latest('time') bins = SimpleHistoStoreBin.objects.filter(histo_store_time = time).order_by('bin_idx') finally: transaction.commit() type_name = 'histogram' # Composite type data = { 'bin_labels': [u'\u2264%s' % (bin[1:] or '') for bin in stat_props.bins], 'values': [bin.value for bin in bins], } else: type_name = 'timeseries' # Go get the data from <resource>/metrics/ data = None label = stat_props.label if not label: label = s.name stat_data = {'name': s.name, 'label': label, 'type': type_name, 'unit_name': stat_props.get_unit_name(), 'data': data} stats[s.name] = stat_data return stats
def tearDown(self): super(TestAgentRpc, self).tearDown() try: with transaction.commit_manually(): transaction.commit() host = ManagedHost.objects.get(fqdn = self.CLIENT_NAME) for host_contact_alert in HostContactAlert.filter_by_item(host): AlertEmail.objects.filter(alerts__in=[host_contact_alert]).delete() host_contact_alert.delete() host.mark_deleted() except ManagedHost.DoesNotExist: pass
def schedule_changed(self): try: # If MySQL is running with transaction isolation level # REPEATABLE-READ (default), then we won't see changes done by # other transactions until the current transaction is # committed (Issue #41). try: transaction.commit() except transaction.TransactionManagementError: pass # not in transaction management. last, ts = self._last_timestamp, self.Changes.last_change() except DatabaseError as exc: logger.exception('Database gave error: %r', exc) return False try: if ts and ts > (last if last else ts): return True finally: self._last_timestamp = ts return False
def _get_command(self, command_id): with transaction.commit_manually(): transaction.commit() return Command.objects.get(pk = command_id)
def save(self, *args, **kwargs): sid = transaction.savepoint() if self.pk is None: i = 1 while self.pk is None: # Protection from infinite loop if i > 20: raise IntegrityError('Too many iterations while generating unique Invoice number.') self.payment_no = random.randint(1, 2147483646) try: super(Invoice, self).save(*args, **kwargs) except IntegrityError: transaction.savepoint_rollback(sid) i += 1 else: super(Invoice, self).save(*args, **kwargs) transaction.savepoint_commit(sid) transaction.commit()
def purge_trans(options): days_opt = options['days'] days_default = False if days_opt == -1: days_opt = 0 days_default = True t = Transmission.objects.filter(start_datetime__lt=datetime.now() - timedelta(days=days_opt)) print('Pruning %s transmissions older than %s days.' % (t.count(), days_opt)) t.delete() print('Pruning complete') if 'sqlite' in db_engine: def vacuum_db(using='default'): cursor = connections[using].cursor() cursor.execute("VACUUM") transaction.commit() print ("Vacuuming database...") before = os.stat(db_name).st_size print ("Size before: %s bytes" % before) vacuum_db() after = os.stat(db_name).st_size print ("Size after: %s bytes" % after) print ("Reclaimed: %s bytes" % (before - after))
def forwards(self, orm): from sentry.utils.query import RangeQuerySetWrapperWithProgressBar Organization = orm['sentry.Organization'] Team = orm['sentry.Team'] queryset = Team.objects.filter( organization__isnull=True, ) user_orgs = {} for team in RangeQuerySetWrapperWithProgressBar(queryset): if team.owner_id not in user_orgs: user_orgs[team.owner_id] = Organization.objects.create( name=team.name.strip() or 'Default', owner_id=team.owner_id, ) team.organization = user_orgs[team.owner_id] team.save() transaction.commit()
def forwards(self, orm): from sentry.db.models import create_or_update from sentry.utils.query import RangeQuerySetWrapperWithProgressBar OrganizationMember = orm['sentry.OrganizationMember'] Team = orm['sentry.Team'] queryset = Team.objects.all() for team in RangeQuerySetWrapperWithProgressBar(queryset): sid = transaction.savepoint() try: OrganizationMember.objects.create( organization_id=team.organization_id, user_id=team.owner_id, type=0, # OWNER ) except IntegrityError: transaction.savepoint_rollback(sid) else: transaction.savepoint_commit(sid) transaction.commit()
def managed_transaction(func): """ This decorator wraps a function so that all sql executions in the function are atomic It's used instead of django.db.transaction.commit_on_success in cases where reporting exceptions is necessary as commit_on_success swallows exceptions """ @wraps(func) @transaction.commit_manually def _inner(*args, **kwargs): try: ret = func(*args, **kwargs) except Exception: transaction.rollback() raise else: transaction.commit() return ret return _inner
def tearDown(self): super(TestHttpAgent, self).tearDown() try: with transaction.commit_manually(): transaction.commit() host = ManagedHost.objects.get(fqdn = self.CLIENT_NAME) HostContactAlert.filter_by_item(host).delete() host.mark_deleted() except ManagedHost.DoesNotExist: pass
def root_resource_ids(self, plugin): """Return the PK of all StorageResourceRecords for 'plugin' which have no parents""" from chroma_core.lib.storage_plugin.manager import storage_plugin_manager # We will be polling, to need to commit to see new data with transaction.commit_manually(): transaction.commit() ids = storage_plugin_manager.get_scannable_resource_ids(plugin) transaction.commit() return ids
def global_remove_resource(self, resource_id): with self._instance_lock: with transaction.commit_manually(): # Be extra-sure to see a fresh view (HYD-1301) transaction.commit() with transaction.commit_on_success(): log.debug("global_remove_resource: %s" % resource_id) try: record = StorageResourceRecord.objects.get(pk = resource_id) except StorageResourceRecord.DoesNotExist: log.error("ResourceManager received invalid request to remove non-existent resource %s" % resource_id) return self._delete_resource(record)
def _refresh_power_devices(self): # Ensure that we have a fresh view of the DB with transaction.commit_manually(): transaction.commit() with self._lock: for device in PowerControlDevice.objects.all(): if device.sockaddr not in self._power_devices: self._power_devices[device.sockaddr] = device
def on_data(self, fqdn, data): with transaction.commit_manually(): transaction.commit() try: host = ManagedHost.objects.get(fqdn = fqdn) UpdateScan().run(host.id, data) except Exception: log.error("Error handling lustre message: %s", '\n'.join(traceback.format_exception(*(sys.exc_info()))))
def complete_job(self, job_id, errored): if django.db.connection.connection and django.db.connection.connection != DISABLED_CONNECTION: log.info("Job %d: open DB connection during completion" % job_id) # Ensure that any changes made by this thread are visible to other threads before # we ask job_scheduler to advance with transaction.commit_manually(): transaction.commit() self.put(('complete_job', (job_id, errored), {}))
def parse(self, fqdn, message): hit = find_one_in_many(message['message'], self.selectors.keys()) if hit: h = self.get_host(fqdn) if h is None: return fn = self.selectors[hit] with transaction.commit_manually(): try: fn(message['message'], h) except Exception, e: syslog_events_log.error("Failed to parse log line '%s' using handler %s: %s" % (message['message'], fn, e)) transaction.rollback() else: transaction.commit()
def process_response(selfself, request, response): if transaction.is_managed(): if transaction.is_dirty(): successful = not isinstance(response, http.HttpApplicationError) if successful: transaction.commit() else: transaction.rollback() transaction.leave_transaction_management() return response
def manage_subtask(): _tasklist = Task.objects.filter(status='WAITTING') for _task in _tasklist: attack_type = _task.attack_type attack_target = _task.attack_target if attack_type == 'fnascan': attack_target_list = get_ip_list(attack_target) print ">>>>>>>attack_target_list",attack_target_list size = 10 #??????ip?? lol = lambda lst, sz: [lst[i:i+sz] for i in range(0, len(lst), sz)] for i in lol(attack_target_list,size): i = ','.join(i) _subtask = SubTask(attack_target = i, attack_type = attack_type,task_name = '', status = 'WAITTING', parameter = '') _subtask.save() #????????running _maintask = Task.objects.get(id = _task.id) _maintask.status = 'RUNNING' _maintask.save() transaction.commit() if attack_type == 'bugscan': attack_target_list = [attack_target,] print ">>>bugscan>>>>attack_target_list",attack_target_list _t = run_bugscan.delay(attack_target_list) ## _maintask = Task.objects.get(id = _task.id) _maintask.status = 'RUNNING' _maintask.save() transaction.commit()
def run_subtask(task_id): _task = SubTask.objects.get(id=task_id) attack_type = _task.attack_type attack_target = _task.attack_target #print ">>>Run Task>>",attack_type,attack_target if attack_type == 'fnascan': _t = run_fnascan.delay(attack_target) ##221.226.15.243-221.226.15.245 , 221.226.15.243,221.226.15.2 _task.status='RUNNING' _task.start_time = timezone.now() if attack_type == 'subdomainbrute': _t = run_subdomainbrute.delay(attack_target) ## _task.status='RUNNING' _task.start_time = timezone.now() if attack_type == 'test': _t = add.delay(attack_target) _task.status='RUNNING' _task.start_time = timezone.now() #attck_type?ATK???????? if attack_type == 'ATK_K0': pass #attack_type???????????? try: _t except: _task.except_message = 'Can not identify scantype' _task.save() transaction.commit() return _task.task_id = _t.id _task.save() transaction.commit() RUNNING_TASK[task_id] = _t #????task_id?????????attack——type RUNNING_TASK[str(task_id) + '_'] = attack_type
def result_2_db(task_id,attack_type,task_obj): #task_obj == RUNNING_TASK[key] #????task_id???? _templist = task_obj.get() if "The" in _templist[0][:5]: _templist[0] = _templist[1] _templist[1] = _templist[2] _simple_dic = eval(_templist[0]) _detail_dic = eval(_templist[1]) #_templist[0] = {'221.226.15.246': ['443', '80 web \xe5\x8d\x97\xe7\x91\x9e\xe7\xbb\xa7\xe4\xbf\x9dVPN\xe7\x99\xbb\xe9\x99\x86'], '221.226.15.249': ['8081 web Apache Tomcat/7.0.57'], '221.226.15.250': ['80'], '221.226.15.243': ['80', '9200 Elasticsearch(default)', '8000 web']} #print _simple_dic,_detail_dic for _ip in _simple_dic.keys(): for service_name in _simple_dic[_ip]: _port = service_name.split(' ')[0] ip_port = '%s:%s' % (_ip,_port) web_title = '' if len(service_name.split(' ')) > 2: web_title = service_name.split(' ')[-1] detail = _get_detail(_detail_dic,_ip,_port) #?? _result = FnascanResult(task_id=task_id,ip=_ip,port = _port ,service_name = service_name,service_detail = detail,web_title = web_title) _result.save() if attack_type == 'subdomainbrute': print _templist transaction.commit()
def check_task(): for key in RUNNING_TASK.keys(): #task_id??????? if str(key)[-1] != '_': if RUNNING_TASK[key].ready(): # ???????? _end_task = Task.objects.get(id=key) if RUNNING_TASK[key].failed(): _end_task.status = 'FAILURE' else: _end_task.status = 'SUCCESS' attack_type = RUNNING_TASK[str(key)+'_'] task_id = key result_2_db(task_id,attack_type,RUNNING_TASK[key]) _end_task.end_time = timezone.now() _tmp_task = Result(task_id = _end_task.task_id,detail =RUNNING_TASK[key].get() ) _end_task.save() _tmp_task.save() transaction.commit() del RUNNING_TASK[key] #?????????????????RUNNING_TASK???????? ''' RUNNING_TASK_IN_DB = Task.objects.filter(Q(status = 'RUNNING')) for _i in RUNNING_TASK_IN_DB: if _i.id not in RUNNING_TASK.keys(): _i.status = 'FAILURE' _i.except_message = 'Something error when check_task()' #_i.end_time = timezone.now() _i.save() #RUNNING_TASK_IN_DB.save() transaction.commit() ''' #??????????????
def task_sched(): while True: time.sleep(CHECK_TIME) tasklist = SubTask.objects.filter(status='WAITTING') #???????task #print '>>>>>>RUNNING_TASK',RUNNING_TASK for _task in tasklist: #_task.status='RUNNING' #_task.start_time = timezone.now() #_task.save() #transaction.commit() run_subtask(_task.id) manage_subtask() check_task()
def atk_add(request): '''?????????''' #print 100*"b" if request.method == 'GET': form = TaskAddForm() else: form = TaskAddForm(request.POST) # end if if not form.is_valid(): #print 1000*'A' return render( request, 'task_add.html', context_instance = RequestContext(request, { 'form': form , }) # end if ) attack_route = form.cleaned_data.get('attack_route','') target = form.cleaned_data.get('target','') task_name = form.cleaned_data.get('task_name', '') target = target.strip() #print 100*'A' #ATK?????????ATK_K0,ATK_K1... task = Task(attack_target = target, attack_type = 'ATK_'+str(attack_route),task_name = task_name, status = 'WAITTING') task.save() transaction.commit() print ">>>>>>>Staring Multi Module ATTACK %s<<<<<<<" % str(target) html = '<!doctype html><html><head><script>confirm("%s"); window.location = "/";</script></head><body> </html>' if len('cc') > 1: ret = html % '??%s???' % str(target) #????? else: ret = html % '??%i???' % str(target) #????? # end if return response(ret) # end def atk_add
def task_operate(request): if request.method == 'GET': operate_type = request.GET.get('type', '') id = request.GET.get('task_id', '') id = int(id) # end if _task = Task.objects.get(id = id) _result = Result.objects.get(task_id = _task.task_id) if operate_type == 'stop': pass # end if if operate_type == 'delete': _task.delete() _result.delete() pass # end if if operate_type == 'restart': pass # end if _task.save() _result.save() transaction.commit() anoncement = 'TASK %d has %s' % (id,operate_type) return response(anoncement) # end def task_operate
def _get_icp(dt,icp_code): #?“??”??????? _tmp_list = dt.split(u'\u8be6\u7ec6\r\n') for one_record in _tmp_list: _tmp_record = {} one_record = one_record.split('\t') _tmp_domainlist = one_record[5].split('\r\n') for _tmp_domain in _tmp_domainlist: if len(_tmp_domain.strip()) > 3: #???? _tmp_record['domain_name'] = _tmp_domain #?????? _tmp_record['co_name'] = one_record[1] #?????? ??/?? _tmp_record['domain_type'] = one_record[2] #???? _tmp_record['domain_title'] = one_record[4] #???? _tmp_record['check_time'] = one_record[6] #???? #icp_code = models.TextField(null=True, blank=True) #????? _tmp_record['icp_code_2'] = one_record[3] #??IDleo??? #beian_id = one_record[] #insert_time = models.DateTimeField(null=True, blank=True) #print _tmp_record _t = ICPCheck(domain_name =_tmp_record['domain_name'],co_name = _tmp_record['co_name'],domain_type = _tmp_record['domain_type'],domain_title = _tmp_record['domain_title'] ,check_time = _tmp_record['check_time'],icp_code = icp_code ,icp_code_2 = _tmp_record['icp_code_2'],byhand = 1) _t.save() # end if # end for # end for transaction.commit() # end def _get_icp
def forwards(self, orm): from sentry.constants import RESERVED_ORGANIZATION_SLUGS from sentry.db.models.utils import slugify_instance from sentry.utils.query import RangeQuerySetWrapperWithProgressBar Organization = orm['sentry.Organization'] queryset = Organization.objects.filter(slug__isnull=True) for org in RangeQuerySetWrapperWithProgressBar(queryset): slugify_instance(org, org.name, RESERVED_ORGANIZATION_SLUGS) org.save() transaction.commit()
def forwards(self, orm): Organization = orm['sentry.Organization'] OrganizationMember = orm['sentry.OrganizationMember'] OrganizationMemberTeam = orm['sentry.OrganizationMemberTeam'] Team = orm['sentry.Team'] for org in Organization.objects.all(): members = OrganizationMember.objects.filter( organization=org, has_global_access=True, ) teams = Team.objects.filter(organization=org) for member in members: for team in teams: # XXX(dcramer): South doesnt like us using transactions here try: sid = transaction.savepoint() OrganizationMemberTeam.objects.create( team=team, organizationmember=member, is_active=True, ) except IntegrityError: transaction.savepoint_rollback(sid) else: transaction.savepoint_commit(sid) transaction.commit()
def test_created_bracketed_by_before_and_after_time(self): before = now() obj = TimestampedModelTestModel() obj.save() transaction.commit() after = now() self.assertLessEqual(before, obj.created) self.assertGreaterEqual(after, obj.created)
def test_updated_is_updated_when_object_saved(self): obj = TimestampedModelTestModel() obj.save() old_updated = obj.updated transaction.commit() obj.save() self.assertLessEqual(old_updated, obj.updated)
def test_now_returns_transaction_time(self): date_now = now() # Perform a write database operation. obj = TimestampedModelTestModel() obj.save() transaction.commit() self.assertLessEqual(date_now, now())
def handle(self, **options): ''' In this example command, the values that come from the user input are added up and the result is printed in the screen. ''' stack = options['path'][0] output_vector_file = '/Users/agutierrez/%s.gpkg' % get_basename(stack) segmentation = bis.Model() shapes, transform, meta = segmentation.predict(stack) # Vectorize #shapes = features.shapes(segments.astype(np.uint16), transform=transform) start_time = time.time() print 'about to start query' query = persist_database(shapes, meta) print 'done' print time.time() - start_time start_time = time.time() with connection.cursor() as cursor: cursor.execute(query) transaction.commit() print time.time() - start_time #persist_file(shapes, output_vector_file, meta)
def test_timeout(self): """Test that when a session is established, then left idle for the timeout period, the http_agent service emits a termination message on the RX channel.""" session_id = self._open_session() # No alert to begin with alerts = HostContactAlert.filter_by_item(self.host) self.assertEqual(alerts.count(), 0) time.sleep(HostState.CONTACT_TIMEOUT + HostStatePoller.POLL_INTERVAL + RABBITMQ_GRACE_PERIOD) # Should be one SESSION_TERMINATE message to AMQP with a matching session ID message = self._receive_one_amqp() self.assertDictEqual(message, { 'fqdn': self.CLIENT_NAME, 'type': 'SESSION_TERMINATE', 'plugin': self.PLUGIN, 'session_seq': None, 'session_id': session_id, 'body': None }) with transaction.commit_manually(): transaction.commit() alerts = HostContactAlert.filter_by_item(self.host) self.assertEqual(alerts.count(), 1) # Should be a message waiting for the agent telling it that its session was terminated # (timing out doesn't mean the agent is gone, it could just be experiencing network difficulties) # What's more, the agent doesn't necessarily *know* that it had network difficulties, e.g. if it # just got real slow and waited too long between GETs. # This has to cut both ways to be reliable: # * We have to tell the agent that we thought it went away, by sending a TERMINATE for sessions # * If the agent finds that a GET fails then it has to assume that we might have put session # messages in that GET, and terminate all its sessions in case one of those GET messages # was really a TERMINATE response = self._get() self.assertResponseOk(response) forwarded_messages = response.json()['messages'] self.assertEqual(len(forwarded_messages), 1) self.assertDictEqual(forwarded_messages[0], { 'fqdn': self.CLIENT_NAME, 'type': 'SESSION_TERMINATE', 'plugin': self.PLUGIN, 'session_seq': None, 'session_id': None, 'body': None })
def _call(self, fn_name, *args, **kwargs): with transaction.commit_manually(): transaction.commit() # If the caller specified rcp_timeout then fetch it from the args and remove. rpc_timeout = kwargs.pop('rpc_timeout', RESPONSE_TIMEOUT) request_id = uuid.uuid4().__str__() request = { 'method': fn_name, 'args': args, 'kwargs': kwargs, 'request_id': request_id} log.debug("Starting rpc: %s, id: %s " % (fn_name, request_id)) log.debug("_call: %s %s %s %s" % (request_id, fn_name, args, kwargs)) rpc_client = RpcClientFactory.get_client(self.__class__.__name__) result = rpc_client.call(request, rpc_timeout) if result['exception']: log.error("ServiceRpcInterface._call: exception %s: %s \ttraceback: %s" % (result['exception'], result['exception_type'], result.get('traceback'))) raise RpcError(result['exception'], result.get('exception_type'), traceback=result.get('traceback')) else: # NB: 'result' can be very large, and almost cripple the various logs where # rpcs are run: http.log, job_scheduler.log, etc. # If you want to see response result data from rpcs at the INFO level, consider writing # log messages into the JobSchedulerClient calls. Leaving this in for DEBUG. if log.getEffectiveLevel() is not logging.DEBUG: # Truncate message result100 = str(result)[:100] if str(result) != result100: result100 += "..." result_str = result100 else: result_str = result log.debug("Completed rpc: %s, id: %s, result: %s" % (fn_name, request_id, result_str)) return result['result']
def task_add(request): '''????''' if request.method == 'GET': form = TaskAddForm() else: form = TaskAddForm(request.POST) # end if if not form.is_valid(): #print 1000*'A' return render( request, 'task_add.html', context_instance = RequestContext(request, { 'form': form , }) # end if ) target = form.cleaned_data.get('target','') #????????? if str(request.path).lower() == '/task/add': attack_type = form.cleaned_data.get('attack_type', '') task_name = form.cleaned_data.get('task_name', '') target = target.strip() target = target.strip().split('\r\n') if len(target)>1: target = ','.join(target)# fnascan ?????? elif len(target)==1: target = target[0] # end if param = dict(form.data) for k in param.keys(): param[k] = param[k][0] # end for param[u'ip_range'] = target param_str = json.dumps(param) print ">>>>>>>Staring Single Module ATTACK %s<<<<<<<" ,target task = Task(attack_target = target, attack_type = attack_type,task_name = task_name, status = 'WAITTING', parameter = param_str) task.save() transaction.commit() # end if html = '<!doctype html><html><head><script>confirm("%s"); window.location = "/";</script></head><body> </html>' if len('cc') > 1: ret = html % '??%s???' % str(target) #????? else: ret = html % '??%i???' % str(target) #????? # end if return response(ret) # end def task_add
def create_instance(username, xml_file, media_files, status=u'submitted_via_web', uuid=None, date_created_override=None, request=None): """ I used to check if this file had been submitted already, I've taken this out because it was too slow. Now we're going to create a way for an admin to mark duplicate instances. This should simplify things a bit. Submission cases: If there is a username and no uuid, submitting an old ODK form. If there is a username and a uuid, submitting a new ODK form. """ try: instance = None submitted_by = request.user \ if request and request.user.is_authenticated() else None if username: username = username.lower() xml = xml_file.read() xform = get_xform_from_submission(xml, username, uuid) check_submission_permissions(request, xform) existing_instance_count = Instance.objects.filter( xml=xml, xform__user=xform.user).count() if existing_instance_count > 0: existing_instance = Instance.objects.filter( xml=xml, xform__user=xform.user)[0] if not existing_instance.xform or\ existing_instance.xform.has_start_time: # Ignore submission as a duplicate IFF # * a submission's XForm collects start time # * the submitted XML is an exact match with one that # has already been submitted for that user. raise DuplicateInstance() # get new and depracated uuid's new_uuid = get_uuid_from_xml(xml) duplicate_instances = Instance.objects.filter(uuid=new_uuid) if duplicate_instances: for f in media_files: Attachment.objects.get_or_create( instance=duplicate_instances[0], media_file=f, mimetype=f.content_type) # ensure we have saved the extra attachments transaction.commit() raise DuplicateInstance() instance = save_submission(xform, xml, media_files, new_uuid, submitted_by, status, date_created_override) # commit all changes transaction.commit() return instance except Exception: transaction.rollback() raise