Python django.db.connection 模块,cursor() 实例源码

我们从Python开源项目中,提取了以下49个代码示例,用于说明如何使用django.db.connection.cursor()

项目:pfb-network-connectivity    作者:azavea    | 项目源码 | 文件源码
def get(self, request, format=None, *args, **kwargs):
        query = """
        SELECT row_to_json(fc)
        FROM (
            SELECT 'FeatureCollection' AS type,
                array_to_json(array_agg(f)) AS features
            FROM (SELECT 'Feature' AS type,
                  ST_AsGeoJSON(g.geom_simple)::json AS geometry,
                  g.uuid AS id,
                  row_to_json((SELECT p FROM (
                    SELECT uuid AS id, name, label, state_abbrev, organization_id) AS p))
                    AS properties
            FROM pfb_analysis_neighborhood AS g WHERE g.visibility <> %s) AS f) AS fc;
        """

        with connection.cursor() as cursor:
            cursor.execute(query, [Neighborhood.Visibility.HIDDEN])
            json = cursor.fetchone()
            if not json or not len(json):
                return Response({})

        return Response(json[0])
项目:wizard    作者:honor100    | 项目源码 | 文件源码
def getWorkChartsByPercent():
    cursor = connection.cursor()
    sql = "select sum(notes->'$.insert') `insert`,sum(notes->'$.delete') `delete`,sum(notes->'$.update') `update`," \
          "sum(notes->'$.create') `create`,sum(notes->'$.alter') `alter` from %s" % TABLE
    cursor.execute(sql)
    field_names = [item[0] for item in cursor.description]
    rawData = cursor.fetchall()
    #
    result = []
    for row in rawData:
        objDict = {}
        # ?????????????Dict?
        for index, value in enumerate(row):
            objDict[field_names[index]] = value
        result.append(objDict)
    return result

# ??????,????????????????????
项目:handelsregister    作者:Amsterdam    | 项目源码 | 文件源码
def health(request):
    # check database
    try:
        with connection.cursor() as cursor:
            cursor.execute("select 1")
            assert cursor.fetchone()
    except:
        log.exception("Database connectivity failed")
        return HttpResponse(
            "Database connectivity failed",
            content_type="text/plain", status=500)

    # check debug
    if settings.DEBUG:
        log.exception("Debug mode not allowed in production")
        return HttpResponse(
            "Debug mode not allowed in production",
            content_type="text/plain", status=500)

    return HttpResponse("Health OK", content_type='text/plain', status=200)
项目:DCPanel    作者:vladgr    | 项目源码 | 文件源码
def validate_server_db1():
    """Server may have only single database of particular type"""
    cursor = connection.cursor()

    query = """
        SELECT MAX(counted) FROM
        (
            SELECT COUNT(*) AS counted
            FROM api_db
            WHERE type = "S"
            GROUP BY server_id,type_db
        ) AS counts;
        """

    cursor.execute(query)
    res = cursor.fetchone()[0]
    if res > 1:
        return [
            'Server may have only single database of particular type!']
    return []
项目:pfb-network-connectivity    作者:azavea    | 项目源码 | 文件源码
def get(self, request, format=None, *args, **kwargs):
        """
        Uses raw query for fetching as GeoJSON because it is much faster to let PostGIS generate
        than Djangonauts serializer.
        """
        query = """
        SELECT row_to_json(fc)
        FROM (
            SELECT 'FeatureCollection' AS type,
                array_to_json(array_agg(f)) AS features
            FROM (SELECT 'Feature' AS type, ST_AsGeoJSON(g.geom_pt)::json AS geometry, g.uuid AS id,
                  row_to_json((SELECT p FROM (
                    SELECT uuid AS id, name, label, state_abbrev, organization_id) AS p))
                    AS properties
            FROM pfb_analysis_neighborhood AS g WHERE g.visibility <> %s) AS f)  AS fc;
        """

        with connection.cursor() as cursor:
            cursor.execute(query, [Neighborhood.Visibility.HIDDEN])
            json = cursor.fetchone()
            if not json or not len(json):
                return Response({})

        return Response(json[0])
项目:django-calaccess-processed-data    作者:california-civic-data-coalition    | 项目源码 | 文件源码
def load_raw_data(self):
        """
        Load the model by executing its raw sql load query.

        Temporarily drops any constraints or indexes on the model.
        """
        try:
            self.drop_constraints_and_indexes()
        except ValueError as e:
            print(e)
            print('Constrained fields: %s' % self.constrained_fields)
            print('Indexed fields: %s' % self.indexed_fields)
            dropped = False
        else:
            dropped = True

        c = connection.cursor()
        try:
            c.execute(self.raw_data_load_query)
        finally:
            c.close()
            if dropped:
                self.add_constraints_and_indexes()
项目:django-scrub-pii    作者:MatthewWilkes    | 项目源码 | 文件源码
def test_query_sanitises_date_data(self):
        first_purchase = Purchase.objects.get(book__title="Baking things")
        second_purchase = Purchase.objects.get(book__title="Treaty Negotiations")
        clive = Person.objects.get(first_name="Clive")

        assert first_purchase.purchased_at > second_purchase.purchased_at
        assert clive.date_of_birth == datetime.date(1920, 1, 9)

        cursor = connection.cursor()
        cursor.execute(get_updates_for_model(Purchase))
        cursor.execute(get_updates_for_model(Person))

        first_purchase = Purchase.objects.get(book__title="Baking things")
        second_purchase = Purchase.objects.get(book__title="Treaty Negotiations")
        clive = Person.objects.get(pk=clive.pk)

        assert first_purchase.purchased_at == second_purchase.purchased_at
        assert clive.date_of_birth != datetime.date(1920, 1, 9)
项目:PrivacyScore    作者:PrivacyScore    | 项目源码 | 文件源码
def faq(request: HttpRequest):
    num_scans  = Site.objects.filter(scans__isnull=False).count()
    num_scanning_sites = Scan.objects.filter(end__isnull=True).count()

    query = '''SELECT
        COUNT(jsonb_array_length("result"->'leaks'))
        FROM backend_scanresult
        WHERE backend_scanresult.scan_id IN (
            SELECT backend_site.last_scan_id
            FROM backend_site
            WHERE backend_site.last_scan_id IS NOT NULL)
        AND jsonb_array_length("result"->'leaks') > 0'''

    with connection.cursor() as cursor:
        cursor.execute(query)
        num_sites_failing_serverleak = cursor.fetchone()[0]

    return render(request, 'frontend/faq.html', {
        'num_scanning_sites': num_scanning_sites,
        'num_scans':  num_scans,
        'num_sites': Site.objects.count(),
        'num_sites_failing_serverleak': num_sites_failing_serverleak
    })
项目:DCRM    作者:82Flex    | 项目源码 | 文件源码
def db_status():
    cursor = connection.cursor()
    status = collections.OrderedDict()
    query = ['Uptime','Queries','Threads_running','Slow_queries','Flush_commands','Open_tables']

    for key in query:
        sql = ("SHOW STATUS LIKE '%s'") % key
        cursor.execute(sql)
        for (Variable_name, Value) in cursor:
            status[Variable_name] = int(Value)
    try:
        status['QPS'] = round(status['Queries']/status['Uptime'],2)
    except:
        status['QPS'] = 0

    return status
项目:healthchecks_asgards    作者:andela    | 项目源码 | 文件源码
def _pg(cursor):
    cursor.execute("""
    CREATE OR REPLACE FUNCTION update_alert_after()
    RETURNS trigger AS $update_alert_after$
        BEGIN
            IF NEW.last_ping IS NOT NULL THEN
                NEW.alert_after := NEW.last_ping + NEW.timeout + NEW.grace;
            END IF;
            RETURN NEW;
        END;
    $update_alert_after$ LANGUAGE plpgsql;

    DROP TRIGGER IF EXISTS update_alert_after ON api_check;

    CREATE TRIGGER update_alert_after
    BEFORE INSERT OR UPDATE OF last_ping, timeout, grace  ON api_check
    FOR EACH ROW EXECUTE PROCEDURE update_alert_after();
    """)
项目:healthchecks_asgards    作者:andela    | 项目源码 | 文件源码
def _sqlite(cursor):
    cursor.execute("""
    DROP TRIGGER IF EXISTS update_alert_after;
    """)

    cursor.execute("""
    CREATE TRIGGER update_alert_after
    AFTER UPDATE OF last_ping, timeout, grace ON api_check
    FOR EACH ROW BEGIN
        UPDATE api_check
        SET alert_after =
            datetime(strftime('%s', last_ping) +
            timeout/1000000 + grace/1000000, 'unixepoch')
        WHERE id = OLD.id;
    END;
    """)
项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def remove_delete_protection(*models):
    """
    Temporarily removes delete protection on any number of models

    Args:
        *models: One or more models whose tables will have delete protection temporarily removed
    """
    table_names = [model._meta.db_table for model in models]
    with connection.cursor() as cursor:
        for table_name in table_names:
            cursor.execute("DROP RULE delete_protect ON {}".format(table_name))
        try:
            yield
        finally:
            for table_name in reversed(table_names):
                cursor.execute("CREATE RULE delete_protect AS ON DELETE TO {} DO INSTEAD NOTHING".format(table_name))
项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def django_db_setup(django_db_setup, django_db_blocker, database_loader):
    """
    Fixture provided by pytest-django to allow for custom Django database config.
    'django_db_setup' exists in the arguments because we want to perform the normal pytest-django
    database setup before applying our own changes.
    """
    with django_db_blocker.unblock():
        with connection.cursor() as cur:
            load_from_existing_db = should_load_from_existing_db(database_loader, cur)
            if not load_from_existing_db:
                # Drop a wagtail table due to a bug: https://github.com/wagtail/wagtail/issues/1824
                cur.execute('DROP TABLE IF EXISTS wagtailsearch_editorspick CASCADE;')
                # Create the initial post-migration database backup to be restored before each test case
                database_loader.create_backup(db_cursor=cur)
    if load_from_existing_db:
        with django_db_blocker.unblock():
            terminate_db_connections()
        database_loader.load_backup()
项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def terminate_db_connections():
    """Terminates active connections to the database being used by Django"""
    kill_connection_sql = \
        "SELECT pg_terminate_backend(pg_stat_activity.pid) FROM pg_stat_activity WHERE pid <> pg_backend_pid();"
    with connection.cursor() as cur:
        cur.execute(kill_connection_sql)
    connection.close()
项目:micromasters    作者:mitodl    | 项目源码 | 文件源码
def should_load_from_existing_db(database_loader, cursor):
    """
    Helper method to determine whether or not a backup database should be loaded to begin
    test execution. A backup db should be used if that backup exists, and if the pytest config
    options don't indicate that the database should be freshly created to start the the test
    suite execution.

    Args:
        database_loader (DatabaseLoader): A DatabaseLoader instance
        cursor (django.db.connection.cursor): A database cursor

    Returns:
        bool: Whether or not a backup database should be loaded to begin test execution
    """
    # We should load a db backup to start the test suite if that backup exists,
    # and if the config options don't indicate that the database should be freshly
    # created to start the the test suite execution
    return (
        pytest.config.option.reuse_db and
        not pytest.config.option.create_db and
        database_loader.has_backup(db_cursor=cursor)
    )
项目:wizard    作者:honor100    | 项目源码 | 文件源码
def updateStatistics():
    cursor = connection.cursor()
    sql = "select id, sql_content, notes from %s" % TABLE
    cursor.execute(sql)
    result = cursor.fetchall()
    #
    for id, sql_content, notes in result:
        dxl = {'insert':0, 'delete':0, 'update':0, 'create':0, 'alter':0}
        sqlList = sql_content.lower().split(';')
        for sql in sqlList:
            for key in dxl.keys():
                if sql.strip().startswith(key):
                    dxl[key] += 1
        #
        notes = json.loads(notes)
        notes.update(dxl)
        update_sql = """update %s set notes='%s' where id=%d""" % (TABLE, json.dumps(notes), id)
        cursor.execute(update_sql)
项目:wizard    作者:honor100    | 项目源码 | 文件源码
def getDbsFromClusterName(request):
    if request.is_ajax():
        cluster_name = request.POST.get("cluster_name")
    else:
        cluster_name = request.POST['cluster_name']

    # cursor = connection.cursor()
    # cursor.execute("select distinct table_schema from dbconfig_mysql_cluster_metadata where cluster_name=%s", (cluster_name,))
    # dbs = [db[0] for db in cursor.fetchall()]
    # cursor.close()

    dictConn = getSlaveConnStr(cluster_name)
    if not dictConn:
        dictConn = getMasterConnStr(cluster_name)
    Host = dictConn['Host']
    Port = dictConn['Port']
    User = dictConn['User']
    Password = dictConn['Password']
    dbs = getMySQLClusterDbs(Host, Port, User, Password)

    result = {'status':0, 'msg':'ok', 'data':dbs}
    return HttpResponse(json.dumps(result), content_type='application/json')
项目:django-mpathy    作者:craigds    | 项目源码 | 文件源码
def post_migrate_mpathnode(model):
    # Note: model *isn't* a subclass of MPathNode, because django migrations are Weird.
    # if not issubclass(model, MPathNode):
    # Hence the following workaround:
    try:
        ltree_field = model._meta.get_field('ltree')
        if not isinstance(ltree_field, LTreeField):
            return
    except FieldDoesNotExist:
        return

    names = {
        "table": quote_ident(model._meta.db_table, connection.connection),
        "check_constraint": quote_ident('%s__check_ltree' % model._meta.db_table, connection.connection),
    }

    cur = connection.cursor()
    # Check that the ltree is always consistent with being a child of _parent
    cur.execute('''
        ALTER TABLE %(table)s ADD CONSTRAINT %(check_constraint)s CHECK (
            (parent_id IS NOT NULL AND ltree ~ (parent_id::text || '.*{1}')::lquery)
            OR (parent_id IS NULL AND ltree ~ '*{1}'::lquery)
        )
    ''' % names)
项目:autoreg    作者:pbeyssac    | 项目源码 | 文件源码
def domainundelete(request, fqdn):
  if request.method != "POST":
    raise SuspiciousOperation
  if not request.user.is_authenticated() or not request.user.is_active:
    raise PermissionDenied
  if fqdn != fqdn.lower():
    raise PermissionDenied
  if not check_handle_domain_auth(connection.cursor(),
                                  request.user.username, fqdn):
    return HttpResponseForbidden(_("Unauthorized"))

  dbh = psycopg2.connect(autoreg.conf.dbstring)
  dd = autoreg.dns.db.db(dbh)
  dd.login('autoreg')

  dd.undelete(fqdn, None)

  return HttpResponseRedirect(reverse(domainedit, args=[fqdn]))
项目:autoreg    作者:pbeyssac    | 项目源码 | 文件源码
def _gen_checksoa_log(domain, handle, nsiplist=None, doit=False,
                      newdomain=False, form=None, dnsdb=None,
                      level=autoreg.dns.check.LEVEL_NS):
  """Same as _gen_checksoa(), and keep a log of the output."""
  soac = autoreg.dns.check.SOAChecker(domain, {}, {})
  soac.set_level(level)
  rec = []
  dbc = connection.cursor()
  contact = Contacts.objects.get(handle=handle.upper())
  for line in _gen_checksoa(domain, nsiplist, doit, dnsdb, soac, contact,
                            newdomain, form):
    rec.append(line)
    yield line
  dbc.execute("INSERT INTO requests_log"
              " (fqdn, contact_id, output, errors, warnings)"
              " VALUES (%s, %s, %s, %s, %s)",
              (domain, contact.id, ''.join(rec), soac.errs, soac.warns))
  assert dbc.rowcount == 1
项目:autoreg    作者:pbeyssac    | 项目源码 | 文件源码
def rqdom(request, domain):
  if request.method != "GET":
    raise SuspiciousOperation
  if not request.user.is_authenticated() or not request.user.is_active:
    return HttpResponseRedirect(URILOGIN + '?next=%s' % request.path)
  login = admin_login(connection.cursor(), request.user.username)
  if not login:
    raise PermissionDenied
  if domain.upper() != domain:
    return HttpResponseRedirect(reverse(rqlistdom, args=[domain.upper()]))

  rlist = _rq_list_dom(domain)
  i = 1
  for r in rlist:
    _rq1(request, r)
    r.suffix = i
    i += 1
  vars = { 'rlist': rlist,
           'goto': request.GET.get('page', '') }
  return render(request, 'requests/rqdisplay.html', vars)
项目:autoreg    作者:pbeyssac    | 项目源码 | 文件源码
def rqdisplaychecked(request):
  if request.method != "GET":
    raise SuspiciousOperation
  if not request.user.is_authenticated() or not request.user.is_active:
    return HttpResponseRedirect(URILOGIN + '?next=%s' % request.path)
  login = admin_login(connection.cursor(), request.user.username)
  if not login:
    raise PermissionDenied

  rlist = _rq_list_dom(domain)
  i = 1
  for r in rlist:
    _rq1(request, r)
    r.suffix = i
    i += 1
  vars = { 'rlist': rlist,
           'goto': request.GET.get('page', '') }
  return render(request, 'requests/rqdisplay.html', vars)
项目:autoreg    作者:pbeyssac    | 项目源码 | 文件源码
def rqlistdom(request, domain=None):
  if request.method != "GET":
    raise SuspiciousOperation
  if not request.user.is_authenticated() or not request.user.is_active:
    return HttpResponseRedirect(URILOGIN + '?next=%s' % request.path)
  login = admin_login(connection.cursor(), request.user.username)
  if not login:
    raise PermissionDenied
  if domain is None:
    # domain not in URL, provided by "?domain=..." argument (search form)
    domain = request.GET.get('domain', '').upper()
  elif domain.upper() != domain:
    return HttpResponseRedirect(reverse(rqlistdom, args=[domain.upper()]))

  z = autoreg.zauth.ZAuth(connection.cursor())

  rlist = _rq_list_dom(domain)
  for r in rlist:
    if not z.checkparent(r.fqdn, login):
      continue
    _rq_decorate(r)

  vars = { 'rlist': rlist, 'fqdn': domain,
           'goto': request.GET.get('page', '') }
  return render(request, 'requests/rqlistdom.html', vars)
项目:autoreg    作者:pbeyssac    | 项目源码 | 文件源码
def rq_run(out):
  import autoreg.dns.db
  dd = autoreg.dns.db.db(dbc=connection.cursor())
  dd.login('autoreg')
  whoisdb = autoreg.whois.db.Main(dbc=connection.cursor())

  rl = Requests.objects.exclude(pending_state=None).order_by('id')

  for r in rl:
    with transaction.atomic():
      r2 = Requests.objects.select_for_update().get(id=r.id)
      try:
        r2.do_pending_exc(out, dd, whoisdb)
        ok = True
      except IntegrityError as e:
        print(six.text_type(e), file=out)
        ok = False
      if ok:
        print(_("Status: committed"), file=out)
      else:
        print(_("Status: cancelled"), file=out)
项目:prestashop-sync    作者:dragoon    | 项目源码 | 文件源码
def handle_noargs(self, **opts):
        from django.db import connection

        c = connection.cursor()

        for article in Article.objects.all():
            c.execute("""SELECT c.slug
FROM articles_article_categories aac
JOIN articles_category c
ON aac.category_id = c.id
WHERE aac.article_id=%s""", (article.id,))

            names = [row[0] for row in c.fetchall()]
            tags = [Tag.objects.get_or_create(name=t)[0] for t in names]
            article.tags = tags
            article.save()
项目:automated-reporting    作者:Waikato    | 项目源码 | 文件源码
def get_max_years():
    """
    Determines the maximum number of available years to go back.

    :return: the number of years
    :rtype: int
    """

    cursor = connection.cursor()
    cursor.execute("""
        select extract(year from min(start_date))
        from %s
        where start_date > '1900-01-01'
        """ % StudentDates._meta.db_table)
    min_year = None
    max_years = None
    for row in cursor.fetchall():
        min_year = row[0]
    if min_year is not None:
        max_years = date.today().year - min_year

    return int(max_years)
项目:automated-reporting    作者:Waikato    | 项目源码 | 文件源码
def get_schools():
    """
    Retrieves a sorted list of all the deparments.

    :return: the list of departments
    :rtype: list
    """

    cursor = connection.cursor()
    cursor.execute("""
        SELECT DISTINCT(school)
        FROM %s
        WHERE char_length(school) > 0
        ORDER BY school ASC
        """ % StudentDates._meta.db_table)
    result = []
    for row in cursor.fetchall():
        result.append(row[0])
    return result
项目:automated-reporting    作者:Waikato    | 项目源码 | 文件源码
def get_departments(schools):
    """
    Retrieves a sorted list of all the departments.

    :param schools: the list of schools to list the departments for.
    :type schools: list
    :return: the list of departments
    :rtype: list
    """

    sql = """
        select distinct(owning_department_clevel)
        from %s
        where owning_school_clevel in ('%s')
        order by owning_department_clevel
        """ % (GradeResults._meta.db_table, "','".join(schools))
    cursor = connection.cursor()
    cursor.execute(sql)
    result = []
    for row in cursor.fetchall():
        result.append(row[0])
    return result
项目:automated-reporting    作者:Waikato    | 项目源码 | 文件源码
def get_papers(paper):
    """
    Retrieves a sorted list of all the papers that match the search (eg COMP900 or PSYC9%).

    :param paper: the paper search string
    :type paper: str
    :return: the list of papers
    :rtype: list
    """

    sql = """
        select distinct(paper_master_code)
        from %s
        where paper_master_code like '%s'
        order by paper_master_code
        """ % (GradeResults._meta.db_table, paper)
    cursor = connection.cursor()
    cursor.execute(sql)
    result = []
    for row in cursor.fetchall():
        result.append(row[0])
    return result
项目:automated-reporting    作者:Waikato    | 项目源码 | 文件源码
def get_scholarships():
    """
    Retrieves a sorted list of all scholarships available.

    :return: the list of scholarships
    :rtype: list
    """
    cursor = connection.cursor()
    cursor.execute("""
        SELECT DISTINCT(name)
        FROM %s
        WHERE char_length(name) > 0
        ORDER BY name ASC
        """ % Scholarship._meta.db_table)
    result = []
    for row in cursor.fetchall():
        result.append(row[0])
    return result
项目:automated-reporting    作者:Waikato    | 项目源码 | 文件源码
def query_to_csv(cursor, cols, outname):
    """
    Turns the query into a CSV file for the GPA calculation.

    :param cursor: the database cursor
    :type cursor: DictCursor
    :param cols: the header names
    :type cols: list
    :param outname: the CSV output filename
    :type outname: str
    """

    logger.info("Generating CSV: {0}".format(outname))
    with open(outname, 'w') as outfile:
        writer = csv.writer(outfile, quoting=csv.QUOTE_NONNUMERIC)
        writer.writerow(cols.split(","))
        for row in cursor.fetchall():
            writer.writerow(row)
        outfile.flush()
    logger.info("Generated CSV ({0}) exists: ".format(outname, os.path.isfile(outname)))
项目:automated-reporting    作者:Waikato    | 项目源码 | 文件源码
def index(request):
    # get all schools
    cursor = connection.cursor()
    cursor.execute("""
        SELECT DISTINCT(owning_school_clevel)
        FROM %s
        ORDER BY owning_school_clevel ASC
        """ % GradeResults._meta.db_table)
    schools = list()
    for row in cursor.fetchall():
        schools.append(row[0])
    # configure template
    template = loader.get_template('leave/index.html')
    context = applist.template_context('leave')
    context['schools'] = schools
    context['minimum'] = MINIMUM_DAYS
    return HttpResponse(template.render(context, request))
项目:open-ledger    作者:creativecommons    | 项目源码 | 文件源码
def server_cursor_query(queryset, cursor_id=0, chunk_size=DEFAULT_CHUNK_SIZE):
    connection.cursor()

    compiler = queryset.query.get_compiler(using=queryset.db)
    sql, params = compiler.as_sql()

    model = compiler.klass_info['model']
    select_fields = compiler.klass_info['select_fields']
    fields = [field[0].target.attname
              for field in compiler.select[select_fields[0]:select_fields[-1] + 1]]

    cursor_name = 'cursor-large-%d' % cursor_id
    cursor = connection.connection.cursor(name=cursor_name)
    with transaction.atomic(savepoint=False):
        cursor.execute(sql, params)

        while True:
            rows = cursor.fetchmany(chunk_size)
            if not rows:
                break
            for row in rows:
                DBObj = namedtuple('DBObj', fields)
                obj = DBObj(*row[select_fields[0]:select_fields[-1] + 1])
                yield obj
项目:cocreate    作者:ngageoint    | 项目源码 | 文件源码
def unreadForUser(self, user_id):
        from django.db import connection
        cursor = connection.cursor()
        cursor.execute("SELECT n.id, n.added_at, n.owner_id, n.read, n.msg, n.related_model_type, n.related_model_id, n.progress, n.object_name, n.error FROM cocreate_notification n WHERE n.owner_id = ? and n.read = 0", (user_id,))
        result_list = []

        for row in cursor.fetchall():
            p = self.model(
                id = row[0],
                added_at = row[1],
                owner_id = row[2],
                read = row[3],
                msg = row[4],
                related_model_type = row[5],
                related_model_id = row[6],
                progress = row[7],
                object_name = row[8],
                error = row[9]
            )
            result_list.append(p)
        return result_list
项目:morango    作者:learningequality    | 项目源码 | 文件源码
def _dequeuing_delete_rmcb_records(cursor, transfersession_id):
    # delete all RMCBs which are a reverse FF (store version newer than buffer version)
    delete_rmcb_records = """DELETE FROM {rmcb}
                             WHERE model_uuid IN
                             (SELECT rmcb.model_uuid FROM {store} as store, {buffer} as buffer, {rmc} as rmc, {rmcb} as rmcb
                             /*Scope to a single record*/
                             WHERE store.id = buffer.model_uuid
                             AND  store.id = rmc.store_model_id
                             AND  store.id = rmcb.model_uuid
                             /*Checks whether LSB of buffer or less is in RMC of store*/
                             AND buffer.last_saved_instance = rmc.instance_id
                             AND buffer.last_saved_counter <= rmc.counter
                             AND rmcb.transfer_session_id = '{transfer_session_id}'
                             AND buffer.transfer_session_id = '{transfer_session_id}')
                              """.format(buffer=Buffer._meta.db_table,
                                         store=Store._meta.db_table,
                                         rmc=RecordMaxCounter._meta.db_table,
                                         rmcb=RecordMaxCounterBuffer._meta.db_table,
                                         transfer_session_id=transfersession_id)

    cursor.execute(delete_rmcb_records)
项目:morango    作者:learningequality    | 项目源码 | 文件源码
def _dequeuing_delete_buffered_records(cursor, transfersession_id):
    # delete all buffer records which are a reverse FF (store version newer than buffer version)
    delete_buffered_records = """DELETE FROM {buffer}
                                 WHERE model_uuid in
                                 (SELECT buffer.model_uuid FROM {store} as store, {buffer} as buffer, {rmc} as rmc
                                 /*Scope to a single record*/
                                 WHERE store.id = buffer.model_uuid
                                 AND rmc.store_model_id = buffer.model_uuid
                                 /*Checks whether LSB of buffer or less is in RMC of store*/
                                 AND buffer.last_saved_instance = rmc.instance_id
                                 AND buffer.last_saved_counter <= rmc.counter
                                 AND buffer.transfer_session_id = '{transfer_session_id}')
                              """.format(buffer=Buffer._meta.db_table,
                                         store=Store._meta.db_table,
                                         rmc=RecordMaxCounter._meta.db_table,
                                         rmcb=RecordMaxCounterBuffer._meta.db_table,
                                         transfer_session_id=transfersession_id)
    cursor.execute(delete_buffered_records)
项目:morango    作者:learningequality    | 项目源码 | 文件源码
def _dequeuing_merge_conflict_rmcb(cursor, transfersession_id):
    # transfer record max counters for records with merge conflicts + perform max
    merge_conflict_rmc = """REPLACE INTO {rmc} (instance_id, counter, store_model_id)
                                SELECT rmcb.instance_id, rmcb.counter, rmcb.model_uuid
                                FROM {rmcb} AS rmcb, {store} AS store, {rmc} AS rmc, {buffer} AS buffer
                                /*Scope to a single record.*/
                                WHERE store.id = rmcb.model_uuid
                                AND store.id = rmc.store_model_id
                                AND store.id = buffer.model_uuid
                                /*Where buffer rmc is greater than store rmc*/
                                AND rmcb.instance_id = rmc.instance_id
                                AND rmcb.counter > rmc.counter
                                AND rmcb.transfer_session_id = '{transfer_session_id}'
                                /*Exclude fast-forwards*/
                                AND NOT EXISTS (SELECT 1 FROM {rmcb} AS rmcb2 WHERE store.id = rmcb2.model_uuid
                                                                              AND store.last_saved_instance = rmcb2.instance_id
                                                                              AND store.last_saved_counter <= rmcb2.counter
                                                                              AND rmcb2.transfer_session_id = '{transfer_session_id}')
                           """.format(buffer=Buffer._meta.db_table,
                                      store=Store._meta.db_table,
                                      rmc=RecordMaxCounter._meta.db_table,
                                      rmcb=RecordMaxCounterBuffer._meta.db_table,
                                      transfer_session_id=transfersession_id)
    cursor.execute(merge_conflict_rmc)
项目:morango    作者:learningequality    | 项目源码 | 文件源码
def _dequeuing_merge_conflict_buffer(cursor, current_id, transfersession_id):
    # transfer buffer serialized into conflicting store
    merge_conflict_store = """REPLACE INTO {store} (id, serialized, deleted, last_saved_instance, last_saved_counter, model_name,
                                                    profile, partition, source_id, conflicting_serialized_data, dirty_bit, _self_ref_fk)
                                        SELECT store.id, store.serialized, store.deleted OR buffer.deleted, '{current_instance_id}',
                                               {current_instance_counter}, store.model_name, store.profile, store.partition, store.source_id,
                                               buffer.serialized || '\n' || store.conflicting_serialized_data, 1, store._self_ref_fk
                                        FROM {buffer} AS buffer, {store} AS store
                                        /*Scope to a single record.*/
                                        WHERE store.id = buffer.model_uuid
                                        AND buffer.transfer_session_id = '{transfer_session_id}'
                                        /*Exclude fast-forwards*/
                                        AND NOT EXISTS (SELECT 1 FROM {rmcb} AS rmcb2 WHERE store.id = rmcb2.model_uuid
                                                                                      AND store.last_saved_instance = rmcb2.instance_id
                                                                                      AND store.last_saved_counter <= rmcb2.counter
                                                                                      AND rmcb2.transfer_session_id = '{transfer_session_id}')
                                  """.format(buffer=Buffer._meta.db_table,
                                             rmcb=RecordMaxCounterBuffer._meta.db_table,
                                             store=Store._meta.db_table,
                                             rmc=RecordMaxCounter._meta.db_table,
                                             transfer_session_id=transfersession_id,
                                             current_instance_id=current_id.id,
                                             current_instance_counter=current_id.counter)
    cursor.execute(merge_conflict_store)
项目:morango    作者:learningequality    | 项目源码 | 文件源码
def _dequeuing_delete_mc_buffer(cursor, transfersession_id):
    # delete records with merge conflicts from buffer
    delete_mc_buffer = """DELETE FROM {buffer}
                                WHERE EXISTS
                                (SELECT 1 FROM {store} AS store, {buffer} AS buffer
                                /*Scope to a single record.*/
                                WHERE store.id = {buffer}.model_uuid
                                AND {buffer}.transfer_session_id = '{transfer_session_id}'
                                /*Exclude fast-forwards*/
                                AND NOT EXISTS (SELECT 1 FROM {rmcb} AS rmcb2 WHERE store.id = rmcb2.model_uuid
                                                                              AND store.last_saved_instance = rmcb2.instance_id
                                                                              AND store.last_saved_counter <= rmcb2.counter
                                                                              AND rmcb2.transfer_session_id = '{transfer_session_id}'))
                           """.format(buffer=Buffer._meta.db_table,
                                      store=Store._meta.db_table,
                                      rmc=RecordMaxCounter._meta.db_table,
                                      rmcb=RecordMaxCounterBuffer._meta.db_table,
                                      transfer_session_id=transfersession_id)
    cursor.execute(delete_mc_buffer)
项目:morango    作者:learningequality    | 项目源码 | 文件源码
def _dequeuing_delete_mc_rmcb(cursor, transfersession_id):
    # delete rmcb records with merge conflicts
    delete_mc_rmc = """DELETE FROM {rmcb}
                                WHERE EXISTS
                                (SELECT 1 FROM {store} AS store, {rmc} AS rmc
                                /*Scope to a single record.*/
                                WHERE store.id = {rmcb}.model_uuid
                                AND store.id = rmc.store_model_id
                                /*Where buffer rmc is greater than store rmc*/
                                AND {rmcb}.instance_id = rmc.instance_id
                                AND {rmcb}.transfer_session_id = '{transfer_session_id}'
                                /*Exclude fast fast-forwards*/
                                AND NOT EXISTS (SELECT 1 FROM {rmcb} AS rmcb2 WHERE store.id = rmcb2.model_uuid
                                                                              AND store.last_saved_instance = rmcb2.instance_id
                                                                              AND store.last_saved_counter <= rmcb2.counter
                                                                              AND rmcb2.transfer_session_id = '{transfer_session_id}'))
                           """.format(buffer=Buffer._meta.db_table,
                                      store=Store._meta.db_table,
                                      rmc=RecordMaxCounter._meta.db_table,
                                      rmcb=RecordMaxCounterBuffer._meta.db_table,
                                      transfer_session_id=transfersession_id)
    cursor.execute(delete_mc_rmc)
项目:morango    作者:learningequality    | 项目源码 | 文件源码
def _dequeue_into_store(transfersession):
    """
    Takes data from the buffers and merges into the store and record max counters.
    """
    with connection.cursor() as cursor:
        _dequeuing_delete_rmcb_records(cursor, transfersession.id)
        _dequeuing_delete_buffered_records(cursor, transfersession.id)
        current_id = InstanceIDModel.get_current_instance_and_increment_counter()
        _dequeuing_merge_conflict_buffer(cursor, current_id, transfersession.id)
        _dequeuing_merge_conflict_rmcb(cursor, transfersession.id)
        _dequeuing_update_rmcs_last_saved_by(cursor, current_id, transfersession.id)
        _dequeuing_delete_mc_rmcb(cursor, transfersession.id)
        _dequeuing_delete_mc_buffer(cursor, transfersession.id)
        _dequeuing_insert_remaining_buffer(cursor, transfersession.id)
        _dequeuing_insert_remaining_rmcb(cursor, transfersession.id)
        _dequeuing_delete_remaining_rmcb(cursor, transfersession.id)
        _dequeuing_delete_remaining_buffer(cursor, transfersession.id)
    if getattr(settings, 'MORANGO_DESERIALIZE_AFTER_DEQUEUING', True):
        _deserialize_from_store(transfersession.sync_session.profile)
项目:morango    作者:learningequality    | 项目源码 | 文件源码
def calculate_filter_max_counters(cls, filters):

        # create string of prefixes to place into sql statement
        condition = " UNION ".join(["SELECT '{}' AS a".format(prefix) for prefix in filters])

        filter_max_calculation = """
        SELECT PMC.instance, MIN(PMC.counter)
        FROM
            (
            SELECT dmc.instance_id as instance, MAX(dmc.counter) as counter, filter as filter_partition
            FROM {dmc_table} as dmc, (SELECT T.a as filter FROM ({filter_list}) as T)
            WHERE filter LIKE dmc.partition || '%'
            GROUP BY instance, filter_partition
            ) as PMC
        GROUP BY PMC.instance
        HAVING {count} = COUNT(PMC.filter_partition)
        """.format(dmc_table=cls._meta.db_table,
                   filter_list=condition,
                   count=len(filters))

        with connection.cursor() as cursor:
            cursor.execute(filter_max_calculation)
            return dict(cursor.fetchall())
项目:kolibri    作者:learningequality    | 项目源码 | 文件源码
def activate_pragmas_per_connection(sender, connection, **kwargs):
        """
        Activate SQLite3 PRAGMAs that apply on a per-connection basis. A no-op
        right now, but kept around as infrastructure if we ever want to add
        PRAGMAs in the future.
        """

        if connection.vendor == "sqlite":
            cursor = connection.cursor()

            # Shorten the default WAL autocheckpoint from 1000 pages to 500
            cursor.execute(CONNECTION_PRAGMAS)

            # We don't turn on the following pragmas, because they have negligible
            # performance impact. For reference, here's what we've tested:

            # Don't ensure that the OS has fully flushed
            # our data to disk.
            # cursor.execute("PRAGMA synchronous=OFF;")

            # Store cross-database JOINs in memory.
            # cursor.execute("PRAGMA temp_store=MEMORY;")
项目:kolibri    作者:learningequality    | 项目源码 | 文件源码
def activate_pragmas_on_start():
        """
        Activate a set of PRAGMAs that apply to the database itself,
        and not on a per connection basis.
        :return:
        """
        from django.db import connection

        if connection.vendor == "sqlite":
            cursor = connection.cursor()

            # http://www.sqlite.org/wal.html
            # WAL's main advantage allows simultaneous reads
            # and writes (vs. the default exclusive write lock)
            # at the cost of a slight penalty to all reads.
            cursor.execute(START_PRAGMAS)
项目:intel-manager-for-lustre    作者:intel-hpdd    | 项目源码 | 文件源码
def get_stats_size(self):
        stats_size = LazyStruct()
        from django.db import connection
        cursor = connection.cursor()
        if 'postgres' in connection.settings_dict['ENGINE']:
            stats_size.row_count = stats_size.data = stats_size.index = 0

            for model in Stats:
                cursor.execute("select count(id) as rows, pg_relation_size('{0}') as data_length, pg_total_relation_size('{0}') - pg_relation_size('{0}') as index_length from {0}".format(model._meta.db_table))
                rows, data, index = cursor.fetchone()
                stats_size.row_count += rows
                stats_size.data += data
                stats_size.index += index
        else:
            raise RuntimeError("Unsupported DB: %s" % connection.settings_dict['ENGINE'])
        return stats_size
项目:intel-manager-for-lustre    作者:intel-hpdd    | 项目源码 | 文件源码
def _get_table(table, silence_columns = []):
    "Returns all rows from a cursor as a dict"

    cursor = connection.cursor()
    cursor.execute("SELECT * FROM %s;" % table)
    desc = cursor.description

    select_columns = []
    columns = [col[0] for col in desc]
    for i, col_name in enumerate(columns):
        if col_name not in silence_columns:
            select_columns.append(i)

    return {
        'columns': [columns[i] for i in select_columns],
        'rows': [
            [row[i] for i in select_columns] for row in cursor.fetchall()
        ]
    }
项目:vaultier    作者:Movile    | 项目源码 | 文件源码
def _migrate_cards(vault_node):
    cursor = connection.cursor()
    cursor.execute("SELECT * FROM vaultier_card WHERE vault_id = %s",
                   [vault_node._vault])

    for c in _dictfetchall(cursor):
        node = Node(
            name=c['name'],
            meta=json.dumps({'description': c['description']}),
            created_by_id=c['created_by_id'],
            parent=vault_node,
            type=1
        )
        node._card = c['id']
        node.save(force_insert=True)
        _migrate_secret(node)
        _migrate_acl('card', c['id'], node)
项目:vaultier    作者:Movile    | 项目源码 | 文件源码
def migrate_from_workspaces(apps, schema_editor):

    if not workspace_exists():
        return

    cursor = connection.cursor()
    cursor.execute("ALTER TABLE public.vaultier_member ALTER COLUMN workspace_id DROP NOT NULL;")
    cursor.execute("SELECT * FROM vaultier_workspace")
    nodes = []
    for w in _dictfetchall(cursor):
        node = Node(
            name=w['name'],
            meta=json.dumps({'description': w['description']}),
            created_by_id=w['created_by_id'],
            type=1


        )
        node.save()
        node.acl.initialize_node()
        node._workspace = w['id']
        nodes.append(node)
        _migrate_members(node)
        _migrate_vaults(node)
        _migrate_acl('workspace', w['id'], node)
项目:django-icekit    作者:ic-labs    | 项目源码 | 文件源码
def update_site(sender, **kwargs):
    """
    Update `Site` object matching `SITE_ID` setting with `SITE_DOMAIN` and
    `SITE_PORT` settings.
    """
    Site = apps.get_model('sites', 'Site')
    domain = settings.SITE_DOMAIN
    if settings.SITE_PORT:
        domain += ':%s' % settings.SITE_PORT
    Site.objects.update_or_create(
        pk=settings.SITE_ID,
        defaults=dict(
            domain=domain,
            name=settings.SITE_NAME))

    # We set an explicit pk instead of relying on auto-incrementation,
    # so we need to reset the database sequence.
    sequence_sql = connection.ops.sequence_reset_sql(no_style(), [Site])
    if sequence_sql:
        cursor = connection.cursor()
        for command in sequence_sql:
            cursor.execute(command)