我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用bson.json_util.default()。
def response(status, data): hr = HttpResponse() hr['Access-Control-Allow-Origin'] = '*' hr['Content-Type'] = 'application/json' hr['charset'] = 'utf-8' hr.status_code = status if type(data) == str or type(data) == unicode: data = { 'message': data, } try: hr.write(json.dumps(data)) except: try: hr.write(json.dumps(data, default=json_util.default)) except: hr.status_code = 500 hr.write(json.dumps({ 'error': "json serialize failed", })) return hr
def execQuery (tree, query, aggcode=None): hosts = check_source (tree, query['name']) if send_source (hosts, tree, query['name']) == False: return [] if aggcode: hosts = check_source (tree, aggcode['name']) if send_source (hosts, tree, aggcode['name']) == False: return [] req = buildReq ('execQuery', tree, query, aggcode) resp, content = r.get (controller, json.dumps (req, default=json_util.default), "pathdump") if resp['status'] != '200': return [] else: return json.loads (content, object_hook=json_util.object_hook)
def registerQuery (filepath): filename = os.path.basename (filepath) try: with open (filepath, 'r') as f: filedata = f.read() except EnvironmentError: return [False] req = {'api': 'registerQuery'} req.update ({'name': filename}) req.update ({'data': filedata}) resp, content = r.get (controller, json.dumps (req, default=json_util.default), "pathdump") if resp['status'] != '200': return [] else: return json.loads (content, object_hook=json_util.object_hook)
def execRequest (req, url): global results workers = [] tree = req['tree'] for child in tree['controller']['child']: t = Thread (target = wrapper, args = (httpcmd, (child, req, url), results)) workers.append (t) for worker in workers: worker.start() for worker in workers: worker.join() data = [] for res in results: resp, content = res if resp['status'] == '200': data += json.loads (content, object_hook=json_util.object_hook) results = [] return json.dumps (data, default=json_util.default)
def main(): questions = db.test_questions.find({ "is_bound": True }) table_key_set = set() question_str_list = list() for question in questions: table_key_set.add(get_table_key(question["table_loc"])) question_str_list.append(json.dumps(question, default=json_util.default)) save("test_questions.txt", question_str_list) query_table(table_key_set) print("Questions: %d" % questions.count()) print("Tables: %d" % len(table_key_set))
def on_get(self, request): """ Various getters. """ what = request.form['what'] resp = {} if what == 'results': dbid = request.form['dbid'] resp['results'] = list(self.storage.get_results(dbid)) elif what == 'dorks': dorks = list(self.storage.get_dorks()) resp['categories'] = list(set([d['category'] for d in dorks])) resp['dorks'] = dorks elif what == 'blacklist': resp['blacklist'] = {'url': [], 'text': []} for bl in self.storage.get_blacklist(): resp['blacklist'][bl['type']].append(bl['term']) else: resp['error'] = 'Unknown' return Response(json.dumps(resp, default=json_util.default), mimetype='application/json')
def TRAC_OPTIONS(func): # pylint: disable=invalid-name @click.option( '--trac-uri', default='http://localhost/xmlrpc', show_default=True, help='uri of the Trac instance XMLRpc endpoint', ) @click.option( '--ssl-verify / --no-ssl-verify', default=True, show_default=True, help='Enable/disable SSL certificate verification' ) @functools.wraps(func) def wrapper(*args, **kwargs): return func(*args, **kwargs) return wrapper
def handle_scan(self, scan, action, tags, notes, store=False): try: nmap = self._parseNMap(scan) enhanced = self._enhance(nmap) if store: self._store_in_db(nmap, tags=tags, notes=notes) if action == "json": returndata = json.dumps(enhanced, indent=2, default=json_util.default) elif action == "pdf": returndata = str(base64.b64encode(self._generatePDF(enhanced)), "utf-8") elif action == "webview": app = Flask(__name__, template_folder=os.path.join(callLocation, "templates")) with app.test_request_context("/"): returndata = render_template(self.html, scan=enhanced) return returndata except Exception as e: traceback.print_exc()
def to_json(self, obj): return json.dumps(obj, default=json_util.default)
def serialize(value): return dumps(value, default=json_util.default)
def httpcmd (node, req): return restapi.post (node, json.dumps (req, default=json_util.default), "pathdump")
def getpathdumppost(): if not request.json or not 'api' in request.json: abort (404) else: output = handleRequest (request.json) return json.dumps (output, default=json_util.default)
def getpathdumpget(): if not request.json or not 'api' in request.json: abort (404) else: output = handleRequest (request.json) return json.dumps (output, default=json_util.default)
def installQuery (tree, query, interval): hosts = check_source (tree, query['name']) if send_source (hosts, tree, query['name']) == False: return [] req = buildReq ('installQuery', tree, query, None, interval) resp, content = r.get (controller, json.dumps (req, default=json_util.default), "pathdump") if resp['status'] != '200': return [] else: return json.loads (content, object_hook=json_util.object_hook)
def check_source (tree, filename): req = {'api': 'check_source'} req.update ({'tree': tree}) req.update ({'name': filename}) resp, content = r.post (controller, json.dumps (req, default=json_util.default), "pathdump") return json.loads (content, object_hook=json_util.object_hook)
def send_source (hosts, tree, filename): if source_available_at (hosts): return True # need to send a copy of source to hosts which don't have it send_tree = remove_hosts_from_tree (hosts, tree) req = {'api': 'send_source'} req.update ({'tree': send_tree}) req.update ({'name': filename}) resp, content = r.post (controller, json.dumps (req, default=json_util.default), "pathdump") return source_available_at (json.loads (content, object_hook=json_util.object_hook))
def getAggTree (groupnodes): req = {'api': 'getAggTree'} req.update ({'groupnodes': groupnodes}) resp, content = r.get (controller, json.dumps (req, default=json_util.default), "pathdump") if resp['status'] != '200': return {} else: return json.loads (content, object_hook=json_util.object_hook)[0]
def getFlowCollectionDir(): req = {'api': 'getFlowCollDir'} resp, content = r.get (controller, json.dumps (req, default=json_util.default), "pathdump") if resp['status'] != '200': return '' else: return json.loads (content, object_hook=json_util.object_hook)[0]
def dump_json(obj): """Dump Python object as JSON string.""" return simplejson.dumps(obj, ignore_nan=True, default=json_util.default)
def db2json(db_obj): return json.loads(json.dumps(db_obj, indent=4, default=json_util.default)) # return a dictionary of spent (txids, vout) => transaction when spent # TODO: add vout to this
def _object_schema_to_line_tuples(cls, object_schema, columns_to_get, field_prefix): """ Get the list of tuples describing lines in object_schema - Sort fields by count - Add the tuples describing each field in object - Recursively add tuples for nested objects :param object_schema: dict :param columns_to_get: iterable columns to create for each field :param field_prefix: str, default '' allows to create full name. '.' is the separator for object subfields ':' is the separator for list of objects subfields :return line_tuples: list of tuples describing lines """ line_tuples = [] sorted_fields = sorted(list(object_schema.items()), key=lambda x: (-x[1]['count'], x[0]) if 'count' in x[1] else x[0]) for field, field_schema in sorted_fields: line_columns = cls._field_schema_to_columns( field, field_schema, field_prefix, columns_to_get) line_tuples.append(line_columns) types = field_schema.get('types_count', [field_schema['type']]) if 'object' in field_schema: if 'ARRAY' in types: current_prefix = field_prefix + field + ':' elif 'OBJECT' in types: current_prefix = field_prefix + field + '.' else: logger.warning('Field {} has key "object" but has types {} while should have ' '"OBJECT" or "ARRAY"'.format(field, types)) continue line_tuples += cls._object_schema_to_line_tuples( field_schema['object'], columns_to_get, field_prefix=current_prefix) return line_tuples
def _field_schema_to_columns(cls, field_name, field_schema, field_prefix, columns_to_get): """ Given fields information, returns a tuple representing columns_to_get. :param field_name: :param field_schema: :param field_prefix: str, default '' :param columns_to_get: iterable columns to create for each field :return field_columns: tuple """ field_columns = list() for column in columns_to_get: field_columns.append(cls.make_column_value(column, field_schema, field_name, field_prefix)) return tuple(field_columns)
def _format_types_count(types_count, array_types_count=None): """ Format types_count to a readable sting. >>> format_types_count({'integer': 10, 'boolean': 5, 'null': 3, }) 'integer : 10, boolean : 5, null : 3' >>> format_types_count({'ARRAY': 10, 'null': 3, }, {'float': 4}) 'ARRAY(float : 4) : 10, null : 3' :param types_count: dict :param array_types_count: dict, default None :return types_count_string : str """ if types_count is None: return str(None) types_count = sorted(types_count.items(), key=lambda x: x[1], reverse=True) type_count_list = list() for type_name, count in types_count: if type_name == 'ARRAY': array_type_name = _SchemaPreProcessing._format_types_count(array_types_count) type_count_list.append('ARRAY(' + array_type_name + ') : ' + str(count)) else: type_count_list.append(str(type_name) + ' : ' + str(count)) types_count_string = ', '.join(type_count_list) return types_count_string
def __init__(self, data, category='schema', without_counts=False, **kwargs): """ :param data: json like structure - schema, mapping, ... :param without_counts: bool - default False, remove all count fields in output if True :param kwargs: unused - exists for a unified interface with other subclasses of BaseOutput """ data_processor = OutputPreProcessing(category) if without_counts: self.data = data_processor.filter_data(data) else: self.data = data
def get_default_columns(cls): """List default columns by category""" return { 'schema': cls._default_columns.get('schema', _SchemaPreProcessing.default_columns), 'mapping': cls._default_columns.get('mapping', _MappingPreProcessing.default_columns), 'diff': cls._default_columns.get('diff', _DiffPreProcessing.default_columns)}
def write_data(self, file_descr): """Use json module dump function to write into file_descr (opened with opener).""" json.dump(self.data, file_descr, indent=4, ensure_ascii=False, default=json_util.default, sort_keys=True)
def transform_data_to_file(data, formats, output=None, category='schema', **kwargs): """ Transform data into each of output_formats and write result to output_filename or stdout. :param data: dict (schema, mapping or diff) :param formats: list of str - extensions of output desired among: 'json', 'yaml' (hierarchical formats) 'tsv', 'html', 'md' or 'xlsx' (list like formats) :param output: str full path to file where formatted output will be saved saved (default is std out) :param category: string in 'schema', 'mapping', 'diff' - describe input data :param kwargs: may contain additional specific arguments columns: list of columns to display in the output for list like formats without_counts: bool to display count fields in output for hierarchical formats """ wrong_formats = set(formats) - {'tsv', 'xlsx', 'json', 'yaml', 'html', 'md'} if wrong_formats: raise ValueError("Output format should be tsv, xlsx, html, md, json or yaml. " "{} is/are not supported".format(wrong_formats)) for output_format in formats: output_maker = rec_find_right_subclass(output_format)( data, category=category, columns_to_get=kwargs.get('columns'), without_counts=kwargs.get('without_counts')) with output_maker.open(output) as file_descr: output_maker.write_data(file_descr)
def process_table(table): columns = group_table_by_column(table["rows"]) clean_columns = list() column_name = list() for c in columns: column_name.append(c.pop(0)) clean_columns.append(list(set(c))) column_type = list() for c in clean_columns: data_type = list() for value in c: data_type.append(check_value_type(value)) column_type.append(check_column_type(data_type)) """ print(table["_id"]) print("column_name: ", column_name) print("column_type: ", column_type) print("columns: ", clean_columns) """ table_info = { "columns": clean_columns, "column_name": column_name, "column_type": column_type, "table_name": process_value(table["title"]), "map_id": table["map_id"], "_id": table["_id"] } return json.dumps(table_info, default=json_util.default)
def query_table(table_key_set): tables = db.tables.find( { "map_id": { "$in": list(table_key_set) } } ) table_str_list = list() for table in tables: table_str_list.append(json.dumps(table, default=json_util.default)) save("test_tables.txt", table_str_list)
def fetch_all_table(): tables = db.tables.find() table_str_list = list() for table in tables: table_str_list.append(json.dumps(table, default=json_util.default)) save("all_tables.txt", table_str_list)
def default(self, obj): if isinstance(obj, (datetime.datetime, datetime.date)): return obj.isoformat() elif isinstance(obj, ObjectId): return unicode(obj) return json.JSONEncoder.default(self, obj)
def jsonify(*args, **kwargs): """ jsonify with support for MongoDB ObjectId See https://gist.github.com/akhenakh/2954605 """ return Response(json.dumps(dict(*args, **kwargs), default=json_util.default, indent=2, cls=MongoJsonEncoder), mimetype='application/json')
def get_conf(must_have_keys=DEAFULT_CONF_REQUIRED_KEYS, config_file=DEFAULT_CONF_FILE, env_config_file=DEFAULT_ENV_CONF_FILE, with_pardir_fallback=True): ''' Read a configuration file, ensure all the `must_have_keys` are present and return a config dict. The file is read from `config_file` and if it's not there and it's a default request, `conf/app_server.yaml` is used. ''' if os.path.exists(config_file): # this will load either the default conf file if it exists or the given config_file parameter fh = open(config_file) elif os.environ.get("BH_ENV") and os.path.exists(env_config_file.format(BH_ENV=os.environ["BH_ENV"])): # environment is set in BH_ENV and corresponding conf file exists fh = open(env_config_file.format(BH_ENV=os.environ["BH_ENV"])) elif with_pardir_fallback and os.path.exists(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'conf', 'app_server.yaml')): # fallback to local file for development fh = open(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'conf', 'app_server.yaml')) else: raise Exception("Could not find a conf file") conf = yaml.load(fh) if not conf: raise ValueError('Empty config file') # Check that all the must_have_keys are present config_keys = set(conf.keys()) missing_keys = list(must_have_keys.difference(config_keys)) if missing_keys != []: keys_message = gen_missing_keys_error(missing_keys) error_message = 'Invalid configuration file: ' + keys_message raise ValueError(error_message) return Struct(**conf) # Enables dot access
def humanify(obj, status_code=200): """ Gets an obj and possibly a status code and returns a flask Resonse with a jsonified obj, not suitable for humans >>> humanify({"a": 1}) <Response 8 bytes [200 OK]> >>> humanify({"a": 1}, 404) <Response 8 bytes [404 NOT FOUND]> >>> humanify({"a": 1}).get_data() '{"a": 1}' >>> humanify([1,2,3]).get_data() '[1, 2, 3]' """ # TODO: refactor the name to `response` # jsonify function doesn't work with lists if type(obj) == list: data = json.dumps(obj, default=json_util.default) elif type(obj) == pymongo.cursor.Cursor: rv = [] for doc in obj: doc['_id'] = str(doc['_id']) rv.append(dumps(doc)) data = '[' + ',\n'.join(rv) + ']' + '\n' else: data = dumps(obj, default=json_util.default, cls=MongoJsonEncoder) resp = Response(data, mimetype='application/json') resp.status_code = status_code return resp
def toJson(data): return json.dumps( data, default=json_util.default, ensure_ascii=False )
def MyResponse(response, status, mimetype="application/json", **kwargs): return Response(json.dumps(response, default=json_util.default), status=status, mimetype="application/json", **kwargs)
def to_json(l): return json.dumps(list(l), indent=2, default=json_util.default)
def keywords(): connection = MongoClient(MONGODB_HOST, MONGODB_PORT) collection = connection[DBS_NAME][COLLECTION_NAME0] projects = collection.find(projection=FIELDS0) json_projects = [] for project in projects: json_projects.append(project) json_projects = json.dumps(json_projects, default=json_util.default) connection.close() return json_projects
def hashtags(): connection = MongoClient(MONGODB_HOST, MONGODB_PORT) collection = connection[DBS_NAME][COLLECTION_NAME1] projects = collection.find(projection=FIELDS1) json_projects = [] for project in projects: json_projects.append(project) json_projects = json.dumps(json_projects, default=json_util.default) connection.close() return json_projects
def counts(): connection = MongoClient(MONGODB_HOST, MONGODB_PORT) collection = connection[DBS_NAME][COLLECTION_NAME2] projects = collection.find(projection=FIELDS2) json_projects = [] for project in projects: json_projects.append(project) json_projects = json.dumps(json_projects, default=json_util.default) connection.close() return json_projects
def ratio(): connection = MongoClient(MONGODB_HOST, MONGODB_PORT) collection = connection[DBS_NAME][COLLECTION_NAME3] projects = collection.find(projection=FIELDS3) json_projects = [] for project in projects: json_projects.append(project) json_projects = json.dumps(json_projects, default=json_util.default) connection.close() return json_projects
def tracking_word(): connection = MongoClient(MONGODB_HOST, MONGODB_PORT) collection = connection[DBS_NAME][COLLECTION_NAME4] projects = collection.find(projection=FIELDS4) json_projects = [] for project in projects: json_projects.append(project) json_projects = json.dumps(json_projects, default=json_util.default) connection.close() return json_projects
def time(): connection = MongoClient(MONGODB_HOST, MONGODB_PORT) collection = connection[DBS_NAME][COLLECTION_NAME6] projects = collection.find(projection=FIELDS6) json_projects = [] for project in projects: json_projects.append(project) json_projects = json.dumps(json_projects, default=json_util.default) connection.close() return json_projects
def get(self, type): parser = reqparse.RequestParser() parser.add_argument('user' , type=str, default=None) parser.add_argument('name' , type=str, default=None) parser.add_argument('status', type=str, default=[], action='append') args = parser.parse_args() query = {} if type == 'steps': query = { 'single_step' : True, } elif type == 'pipelines': query = { 'single_step' : {'$nin': [True]}, 'name' : args['name'] } query['user'] = args['user'] arg_status = args.get('status', []) if arg_status: query['status'] = {"$in": arg_status} # filter None values from query query = {k: v for k, v in query.items() if not v == None} return db.pipelines.find(query).count()
def get(self): """ Get the number of runs (useful for pagination) """ parser = reqparse.RequestParser() parser.add_argument('user' , type=str, default=None) args = parser.parse_args() #stats = {'total': db.pipelines.find({}).count()} stats = { 'pipelines': [ ], 'totals': { 'stats':{}}} for pipeline in pipelines: stats['pipelines'].append(dbmodel.get_stats({'name':pipeline['name']})) # Get user stat if args['user']: stats['user'] = dbmodel.get_stats({'user': args['user']}, 'user') # compute totals tottot = 0 for pipeline in stats['pipelines']: for stat, value in pipeline['stats'].iteritems(): if stat in stats['totals']['stats']: stats['totals']['stats'][stat] += value tottot += value else: stats['totals']['stats'][stat] = value tottot += value stats['totals']['total'] = tottot return stats
def default(self, o): if isinstance(o, decimal.Decimal): if o % 1 > 0: return float(o) else: return int(o) return super(DecimalEncoder, self).default(o)
def get_expiration(url, expiration_rules): exp = expiration_rules['default'] sw = expiration_rules['starts_with'] prefixes = sw.keys() for prefix in prefixes: if url.startswith(prefix): exp = sw[prefix] return exp
def process_one(url, s3, expiration_rules, headers): tld = tldextract.extract(url) if tld.subdomain != '' and tld.subdomain != 'www': tld = tld.subdomain + '.' + tld.domain + '.' + tld.suffix else: tld = tld.domain + '.' + tld.suffix i = url.find(tld) s3key = tld + url[i+len(tld):] exp = get_expiration(url, expiration_rules) try: o = s3.ObjectSummary(bucket, s3key) lm = o.last_modified now = datetime.datetime.utcnow() diff = exp - now expires_on = now - diff if lm.replace(tzinfo=None) < expires_on: exists = False else: exists = True except botocore.exceptions.ClientError as e: exists = False if not(exists): logger.info('Processing: ' + url) crawl = crawl_one(url, expiration_rules, headers) contents = json.dumps(crawl, default=json_util.default) fake_handle = StringIO(contents) b = s3.create_bucket(Bucket=bucket) res = b.put_object(Key=s3key, Body=fake_handle) # TODO: check for errors dt = datetime.datetime.today().strftime('%Y-%m-%d') trackStats(tld, dt, True) summaryKey = dt trackStats(summaryKey, dt, True) summaryKey = tld + "|" + dt trackStats(summaryKey, dt, True) return True return False
def __call__(self, value, system): from bson import json_util request = system.get('request') if request is not None: if hasattr(request, 'response'): request.response.content_type = 'application/json' return json.dumps(value, default=json_util.default) # https://gist.github.com/kamalgill/b1f682dbdc6d6df4d052