我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用oslo_serialization.jsonutils.loads()。
def watch(self, path): params = {'watch': 'true'} url = self._base_url + path header = {} if self.token: header.update({'Authorization': 'Bearer %s' % self.token}) # TODO(ivc): handle connection errors and retry on failure while True: with contextlib.closing( requests.get(url, params=params, stream=True, cert=self.cert, verify=self.verify_server, headers=header)) as response: if not response.ok: raise exc.K8sClientException(response.text) for line in response.iter_lines(delimiter='\n'): line = line.strip() if line: yield jsonutils.loads(line)
def test_run_add(self, m_k8s_add): vif = fake._fake_vif() m_k8s_add.return_value = vif m_fin = StringIO() m_fout = StringIO() env = { 'CNI_COMMAND': 'ADD', 'CNI_ARGS': 'foo=bar', } self.runner.run(env, m_fin, m_fout) self.assertTrue(m_k8s_add.called) self.assertEqual('foo=bar', m_k8s_add.call_args[0][0].CNI_ARGS) result = jsonutils.loads(m_fout.getvalue()) self.assertDictEqual( {"cniVersion": "0.3.0", "dns": {"nameservers": ["192.168.0.1"]}, "ip4": {"gateway": "192.168.0.1", "ip": "192.168.0.2/24"}}, result)
def action_peek_json(body): """Determine action to invoke.""" try: decoded = jsonutils.loads(body) except ValueError: msg = _("cannot understand JSON") raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = _("too many body keys") raise exception.MalformedRequestBody(reason=msg) # Return the action and the decoded body... return list(decoded.keys())[0]
def test_user_session_get(self, kc): request = self.mock_rest_request() request.user = mock.Mock( services_region='some region', super_secret_thing='not here', is_authenticated=lambda: True, spec=['services_region', 'super_secret_thing'] ) response = keystone.UserSession().get(request) self.assertStatusCode(response, 200) content = jsonutils.loads(response.content) self.assertEqual(content['services_region'], 'some region') self.assertNotIn('super_secret_thing', content) # # Services #
def _test_and_check_delete(self, volumes, tenant_id): for test_vol in volumes: if test_vol.get('volume'): response = self.app.post_json( '/v2/' + tenant_id + '/volumes', dict(volume=test_vol['volume']), expect_errors=True) self.assertEqual(response.status_int, test_vol['expected_error']) if response.status_int == 202: json_body = jsonutils.loads(response.body) _id = json_body.get('volume')['id'] query_resp = self.app.get( '/v2/' + tenant_id + '/volumes/' + _id) self.assertEqual(query_resp.status_int, 200) delete_resp = self.app.delete( '/v2/' + tenant_id + '/volumes/' + _id) self.assertEqual(delete_resp.status_int, 202)
def action_peek_json(body): """Determine action to invoke.""" try: decoded = jsonutils.loads(body) except ValueError: msg = "cannot understand JSON" raise exception.MalformedRequestBody(reason=msg) # Make sure there's exactly one key... if len(decoded) != 1: msg = "too many body keys" raise exception.MalformedRequestBody(reason=msg) # Return the action and the decoded body... return list(decoded.keys())[0]
def __init__(self, message=None, code=None): super(HTTPException, self).__init__(message) try: self.error = jsonutils.loads(message) if 'error' not in self.error: raise KeyError(_('Key "error" not exists')) except KeyError: # NOTE(jianingy): If key 'error' happens not exist, # self.message becomes no sense. In this case, we # return doc of current exception class instead. self.error = {'error': {'message': self.__class__.__doc__}} except Exception: self.error = {'error': {'message': self.message or self.__class__.__doc__}} if self.code == "N/A" and code is not None: self.code = code
def update_image(self, image_uuid, values): if 'uuid' in values: msg = _('Cannot overwrite UUID for an existing image.') raise exception.InvalidParameterValue(err=msg) try: target = self.client.read('/images/' + image_uuid) target_value = json.loads(target.value) target_value.update(values) target.value = json.dump_as_bytes(target_value) self.client.update(target) except etcd.EtcdKeyNotFound: raise exception.ImageNotFound(image=image_uuid) except Exception as e: LOG.error('Error occurred while updating image: %s', six.text_type(e)) raise return translate_etcd_result(target, 'image')
def update_compute_node(self, context, node_uuid, values): if 'uuid' in values: msg = _('Cannot overwrite UUID for an existing node.') raise exception.InvalidParameterValue(err=msg) try: target = self.client.read('/compute_nodes/' + node_uuid) target_value = json.loads(target.value) target_value.update(values) target.value = json.dumps(target_value) self.client.update(target) except etcd.EtcdKeyNotFound: raise exception.ComputeNodeNotFound(compute_node=node_uuid) except Exception as e: LOG.error( 'Error occurred while updating compute node: %s', six.text_type(e)) raise return translate_etcd_result(target, 'compute_node')
def update_capsule(self, context, capsule_id, values): if 'uuid' in values: msg = _("Cannot overwrite UUID for an existing Capsule.") raise exception.InvalidParameterValue(err=msg) try: target_uuid = self.get_capsule_by_uuid( context, capsule_id).uuid target = self.client.read('/capsules/' + target_uuid) target_value = json.loads(target.value) target_value.update(values) target.value = json.dump_as_bytes(target_value) self.client.update(target) except etcd.EtcdKeyNotFound: raise exception.CapsuleNotFound(capsule=capsule_id) except Exception as e: LOG.error('Error occurred while updating capsule: %s', six.text_type(e)) raise return translate_etcd_result(target, 'capsule')
def update_pci_device(self, node_id, address, values): try: pci_device = self.get_pci_device_by_addr(node_id, address) target = self.client.read('/pcidevices/' + pci_device.uuid) target_value = json.loads(target.value) target_value.update(values) target.value = json.dump_as_bytes(target_value) self.client.update(target) except exception.PciDeviceNotFound: values.update({'compute_node_uuid': node_id, 'address': address}) return self._create_pci_device(values) except Exception as e: LOG.error('Error occurred while updating pci device: %s', six.text_type(e)) raise return translate_etcd_result(target, 'pcidevice')
def update_volume_mapping(self, context, volume_mapping_uuid, values): if 'uuid' in values: msg = _('Cannot overwrite UUID for an existing VolumeMapping.') raise exception.InvalidParameterValue(err=msg) try: target_uuid = self.get_volume_mapping_by_uuid( context, volume_mapping_uuid).uuid target = self.client.read('/volume_mapping/' + target_uuid) target_value = json.loads(target.value) target_value.update(values) target.value = json.dump_as_bytes(target_value) self.client.update(target) except etcd.EtcdKeyNotFound: raise exception.VolumeMappingNotFound( volume_mapping=volume_mapping_uuid) except Exception as e: LOG.error('Error occurred while updating volume mappping: %s', six.text_type(e)) raise return translate_etcd_result(target, 'volume_mapping')
def detach_volume(self, volume): volume_id = volume.volume_id cinder_api = cinder.CinderAPI(self.context) try: cinder_api.begin_detaching(volume_id) except cinder_exception.BadRequest as e: raise exception.Invalid(_("Invalid volume: %s") % str(e)) conn_info = jsonutils.loads(volume.connection_info) try: self._disconnect_volume(conn_info) except Exception: with excutils.save_and_reraise_exception(): LOG.exception('Failed to disconnect volume %(volume_id)s', {'volume_id': volume_id}) cinder_api.roll_detaching(volume_id) cinder_api.terminate_connection( volume_id, get_volume_connector_properties()) cinder_api.detach(volume_id)
def from_pci_stats(pci_stats): """Create and return a PciDevicePoolList from the data stored in the db, which can be either the serialized object, or, prior to the creation of the device pool objects, a simple dict or a list of such dicts. """ pools = [] if isinstance(pci_stats, six.string_types): try: pci_stats = jsonutils.loads(pci_stats) except (ValueError, TypeError): pci_stats = None if pci_stats: # Check for object-ness, or old-style storage format. if 'zun_object.namespace' in pci_stats: return PciDevicePoolList.obj_from_primitive(pci_stats) else: # This can be either a dict or a list of dicts if isinstance(pci_stats, list): pools = [PciDevicePool.from_dict(stat) for stat in pci_stats] else: pools = [PciDevicePool.from_dict(pci_stats)] return PciDevicePoolList(objects=pools)
def update_devices_from_compute_resources(self, devices_json): """Sync the pci device tracker with compute node information. To support pci device hot plug, we sync with the compute node periodically, fetching all devices information from compute node, update the tracker and sync the DB information. Devices should not be hot-plugged when assigned to a container, but possibly the compute node has no such guarantee. The best we can do is to give a warning if a device is changed or removed while assigned. :param devices_json: The JSON-ified string of device information that is returned from the compute node. """ devices = [] for dev in jsonutils.loads(devices_json): if self.dev_filter.device_assignable(dev): devices.append(dev) self._set_hvdevs(devices)
def process_response(self, req, resp, resource): """Converts responses to ``application/x-yaml`` content type.""" if resp.status != '204 No Content': resp.set_header('Content-Type', 'application/x-yaml') for attr in ('body', 'data'): if not hasattr(resp, attr): continue resp_attr = getattr(resp, attr) try: resp_attr = json.loads(resp_attr) except (TypeError, ValueError): pass if isinstance(resp_attr, dict): setattr(resp, attr, yaml.safe_dump(resp_attr)) elif isinstance(resp_attr, (list, tuple)): setattr(resp, attr, yaml.safe_dump_all(resp_attr))
def test_volumedriver_create(self): self.volume_providers_setup(['cinder']) fake_request = { u'Name': u'test-vol', u'Opts': {u'size': u'1'}, } for provider in app.volume_providers.values(): provider.check_exist = mock.MagicMock() provider.check_exist.return_value = False provider.create = mock.MagicMock() response = self.app.post('/VolumeDriver.Create', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Err': u'' } self.assertEqual(200, response.status_code) self.assertEqual(fake_response, jsonutils.loads(response.data))
def test_volumedriver_create_invalid_volume_provider(self): self.volume_providers_setup(['cinder']) fake_request = { u'Name': u'test-vol', u'Opts': {u'size': u'1', u'volume_provider': u'provider'}} for provider in app.volume_providers.values(): provider.check_exist = mock.MagicMock() provider.check_exist.return_value = False provider.create = mock.MagicMock() response = self.app.post('VolumeDriver.Create', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Err': u'' } self.assertEqual(200, response.status_code) self.assertNotEqual(fake_response, jsonutils.loads(response.data))
def test_volumedriver_remove_with_volume_not_exist(self): self.volume_providers_setup(['cinder']) fake_request = { u'Name': u'test-vol', } for provider in app.volume_providers.values(): provider.delete = mock.MagicMock() provider.delete.return_value = False response = self.app.post('/VolumeDriver.Remove', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Err': u'' } self.assertEqual(200, response.status_code) self.assertEqual(fake_response, jsonutils.loads(response.data))
def test_volumedriver_mount(self): self.volume_providers_setup(['cinder']) fake_name = u'test-vol' fake_request = { u'Name': fake_name } for provider in app.volume_providers.values(): provider.check_exist = mock.MagicMock() provider.check_exist.return_value = True provider.mount = mock.MagicMock() provider.mount.return_value = fake_mountpoint(fake_name) response = self.app.post('/VolumeDriver.Mount', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Mountpoint': fake_mountpoint(fake_name), u'Err': u'' } self.assertEqual(fake_response, jsonutils.loads(response.data))
def test_volumedriver_mount_with_volume_not_exist(self): self.volume_providers_setup(['cinder']) fake_name = u'test-vol' fake_request = { u'Name': fake_name, } for provider in app.volume_providers.values(): provider.check_exit = mock.MagicMock() provider.check_exit.return_value = False response = self.app.post('/VolumeDriver.Mount', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Mountpoint': fake_mountpoint(fake_name), u'Err': u'' } self.assertEqual(200, response.status_code) self.assertNotEqual(fake_response, jsonutils.loads(response.data))
def test_volumedriver_path(self): self.volume_providers_setup(['cinder']) fake_name = u'test-vol' fake_request = { u'Name': fake_name } for provider in app.volume_providers.values(): provider.show = mock.MagicMock() provider.show.return_value = fake_volume(fake_name) response = self.app.post('/VolumeDriver.Path', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Mountpoint': fake_mountpoint(fake_name), u'Err': u'' } self.assertEqual(fake_response, jsonutils.loads(response.data))
def test_volumedriver_path_with_volume_not_exist(self): self.volume_providers_setup(['cinder']) fake_docker_volume_name = u'test-vol' fake_request = { u'Name': fake_docker_volume_name } for provider in app.volume_providers.values(): provider.show = mock.MagicMock(side_effect=exceptions.NotFound) response = self.app.post('/VolumeDriver.Path', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Err': u'Mountpoint Not Found' } self.assertEqual(200, response.status_code) self.assertEqual(fake_response, jsonutils.loads(response.data))
def test_volumedriver_get(self): self.volume_providers_setup(['cinder']) fake_name = u'test-vol' fake_request = { u'Name': fake_name } for provider in app.volume_providers.values(): provider.show = mock.MagicMock() provider.show.return_value = fake_volume(fake_name) response = self.app.post('/VolumeDriver.Get', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Volume': {u'Name': fake_name, u'Mountpoint': fake_mountpoint(fake_name)}, u'Err': u'' } self.assertEqual(200, response.status_code) self.assertEqual(fake_response, jsonutils.loads(response.data))
def test_volumedriver_get_with_volume_not_exist(self): self.volume_providers_setup(['cinder']) fake_docker_volume_name = u'test-vol' fake_request = { u'Name': fake_docker_volume_name } for provider in app.volume_providers.values(): provider.show = mock.MagicMock(side_effect=exceptions.NotFound()) response = self.app.post('/VolumeDriver.Get', content_type='application/json', data=jsonutils.dumps(fake_request)) fake_response = { u'Err': u'Volume Not Found' } self.assertEqual(200, response.status_code) self.assertEqual(fake_response, jsonutils.loads(response.data))
def test_ipam_driver_request_pool_with_default_v6pool(self, mock_list_subnetpools): fake_kuryr_subnetpool_id = uuidutils.generate_uuid() fake_name = 'kuryr6' kuryr_subnetpools = self._get_fake_v6_subnetpools( fake_kuryr_subnetpool_id, prefixes=['fe80::/64']) mock_list_subnetpools.return_value = { 'subnetpools': kuryr_subnetpools['subnetpools']} fake_request = { 'AddressSpace': '', 'Pool': '', 'SubPool': '', # In the case --ip-range is not given 'Options': {}, 'V6': True } response = self.app.post('/IpamDriver.RequestPool', content_type='application/json', data=jsonutils.dumps(fake_request)) self.assertEqual(200, response.status_code) mock_list_subnetpools.assert_called_with(name=fake_name) decoded_json = jsonutils.loads(response.data) self.assertEqual(fake_kuryr_subnetpool_id, decoded_json['PoolID'])
def test_network_driver_endpoint_operational_info_with_no_port(self): docker_network_id = lib_utils.get_hash() docker_endpoint_id = lib_utils.get_hash() fake_port_response = {"ports": []} with mock.patch.object(app.neutron, 'list_ports') as mock_list_ports: data = { 'NetworkID': docker_network_id, 'EndpointID': docker_endpoint_id, } mock_list_ports.return_value = fake_port_response response = self.app.post('/NetworkDriver.EndpointOperInfo', content_type='application/json', data=jsonutils.dumps(data)) decoded_json = jsonutils.loads(response.data) self.assertEqual(200, response.status_code) port_name = utils.get_neutron_port_name(docker_endpoint_id) mock_list_ports.assert_called_once_with(name=port_name) self.assertEqual({}, decoded_json['Value'])
def test_network_driver_allocate_network(self): docker_network_id = lib_utils.get_hash() allocate_network_request = { 'NetworkID': docker_network_id, 'IPv4Data': [{ 'AddressSpace': 'foo', 'Pool': '192.168.42.0/24', 'Gateway': '192.168.42.1/24', }], 'IPv6Data': [], 'Options': {} } response = self.app.post('/NetworkDriver.AllocateNetwork', content_type='application/json', data=jsonutils.dumps( allocate_network_request)) self.assertEqual(200, response.status_code) decoded_json = jsonutils.loads(response.data) self.assertEqual({'Options': {}}, decoded_json)
def test_join_bad_request(self): fake_docker_network_id = lib_utils.get_hash() invalid_docker_endpoint_id = 'id-should-be-hexdigits' fake_container_id = lib_utils.get_hash() response = self._invoke_join_request( fake_docker_network_id, invalid_docker_endpoint_id, fake_container_id) self.assertEqual( w_exceptions.BadRequest.code, response.status_code) decoded_json = jsonutils.loads(response.data) self.assertIn('Err', decoded_json) # TODO(tfukushima): Add the better error message validation. self.assertIn(invalid_docker_endpoint_id, decoded_json['Err']) self.assertIn('EndpointID', decoded_json['Err'])
def handle_request(self, request): if self._is_targeted(request.headers): return body = jsonutils.loads(request.body) if request.service == 'image': if request.version == 'v1': name = request.headers.get('X-IMAGE-META-NAME', '') else: name = body.get('name', '') elif request.service == 'volume': name = body['volume'].get('name', '') name = name.split('@') if len(name) == 2: request.headers['MM-SERVICE-PROVIDER'] = name[1]
def take_action(self, args): try: if args.playbook: host = (models.Host.query .filter_by(playbook_id=args.playbook) .filter((models.Host.id == args.host) | (models.Host.name == args.host)).one()) else: host = models.Host.query.filter_by(id=args.host).one() except (models.NoResultFound, models.MultipleResultsFound): raise RuntimeError('Host %s could not be found' % args.host) if not host.facts: raise RuntimeError('No facts available for host %s' % args.host) facts = ((k, v) for k, v in six.iteritems(jsonutils.loads(host.facts.values)) if not args.fact or k in args.fact ) return six.moves.zip(*sorted(facts))
def test_playbook_treeview(self): ctx = ansible_run() treeview = jsonutils.loads(u.playbook_treeview(ctx['playbook'].id)) # ansible_run provides two fake files: # /some/path/main.yml and /playbook.yml for f in treeview: if f['text'] == 'some': self.assertEqual(f['text'], 'some') child = f['nodes'][0] self.assertEqual(child['text'], 'path') child = child['nodes'][0] self.assertEqual(child['text'], 'main.yml') self.assertEqual(child['dataAttr']['load'], ctx['task_file'].id) else: self.assertEqual(f['text'], 'playbook.yml') self.assertEqual(f['dataAttr']['load'], ctx['pb_file'].id)
def get_local_disk_property(self, instance): if instance.system_metadata.get('local_disk_property'): return jsonutils.loads( instance.system_metadata.get('local_disk_property')) result = {} extra_specs = self.get_instance_extra_specs(instance) if extra_specs: local_disk_property = extra_specs.get('quota:local_disk') if local_disk_property: local_disk_property = local_disk_property.split(':') result['type'] = local_disk_property[0] result['count'] = int(local_disk_property[1]) result['size'] = int(local_disk_property[2]) result['safe_format'] = local_disk_property[3] if len(result) == 4: return result
def simple_parse(in_str): try: out_dict = jsonutils.loads(in_str) except ValueError: try: out_dict = yaml.load(in_str, Loader=YamlLoader) except yaml.YAMLError as yea: yea = six.text_type(yea) msg = _('Error parsing input: %s') % yea raise ValueError(msg) else: if out_dict is None: out_dict = {} if not isinstance(out_dict, dict): msg = _('The input is not a JSON object or YAML mapping.') raise ValueError(msg) return out_dict
def __init__(self, message=None): super(HTTPException, self).__init__(message) try: self.error = jsonutils.loads(message) # Adjutant client: mangle the 'errors' return list into # standard 'error' format if 'errors' in self.error: self.error['error'] = { "message": ', '.join(self.error['errors']), } if 'error' not in self.error: raise KeyError(_('Key "error" does not exist.')) except KeyError: # NOTE(jianingy): If key 'error' happens not exist, # self.message becomes no sense. In this case, we # return doc of current exception class instead. self.error = {'error': {'message': self.__class__.__doc__}} except Exception: self.error = {'error': {'message': self.message or self.__class__.__doc__}}
def test_detail_servers(self, mock_get_all): # NOTE(danms): Orphan these fakes (no context) so that we # are sure that the API is requesting what it needs without # having to lazy-load. meta = {"preemptible": True} mock_get_all.return_value = objects.InstanceList( objects=[os_api_fakes.stub_instance_obj(ctxt=None, id=1, system_metadata=meta), os_api_fakes.stub_instance_obj(ctxt=None, id=2, system_metadata=meta)]) req = os_api_fakes.HTTPRequest.blank(self.base_url + 'servers/detail') res = req.get_response(self.app) server_dicts = jsonutils.loads(res.body)['servers'] self.assertNotEqual(len(server_dicts), 0) for server_dict in server_dicts: self.assertIn('preemptible', server_dict)
def json_request(self, method, conn_url, **kwargs): kwargs.setdefault('headers', {}) kwargs['headers'].setdefault('Content-Type', 'application/json') kwargs['headers'].setdefault('Accept', 'application/json') if 'body' in kwargs: kwargs['body'] = jsonutils.dump_as_bytes(kwargs['body']) resp, body_iter = self._http_request(conn_url, method, **kwargs) content_type = resp.headers.get('Content-Type') if(resp.status_code in (http_client.NO_CONTENT, http_client.RESET_CONTENT) or content_type is None): return resp, list() if 'application/json' in content_type: body = ''.join([chunk for chunk in body_iter]) try: body = jsonutils.loads(body) except ValueError: LOG.error(_LE('Could not decode response body as JSON')) else: body = None return resp, body
def create(self, code, runtime=None, package=None, **kwargs): data = { 'runtime_id': runtime, 'code': jsonutils.dumps(code) } for k, v in kwargs.items(): if v is not None: data.update({k: v}) params = {"data": data} if package: params.update({"files": {'package': package}}) response = self.http_client.request( '/v1/functions', 'POST', **params ) body = jsonutils.loads(response.text) return self.resource_class(self, body)
def update(self, id, code=None, package=None, **kwargs): if code: kwargs.update(code) params = {"data": kwargs} if package: params.update({"files": {'package': package}}) response = self.http_client.request( '/v1/functions/%s' % id, 'PUT', **params ) body = jsonutils.loads(response.text) return self.resource_class(self, body)
def json_request(self, url, method, **kwargs): headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = kwargs.pop('content_type', 'application/json') if 'data' in kwargs: kwargs['data'] = jsonutils.dumps(kwargs['data']) # NOTE(starodubcevna): We need to prove that json field is empty, # or it will be modified by keystone adapter. kwargs['json'] = None resp = self.request(url, method, **kwargs) body = resp.text if body: try: body = jsonutils.loads(body) except ValueError: pass return resp, body
def extract_resources(args): resources = [] for data in args.resources.split(','): if '=' in data and len(data.split('=')) in [3, 4]: resource = dict(zip(['id', 'type', 'name', 'extra_info'], data.split('='))) if resource.get('extra_info'): resource['extra_info'] = jsonutils.loads( resource.get('extra_info')) else: raise exceptions.CommandError( "Unable to parse parameter resources. " "The keys of resource are id , type, name and " "extra_info. The extra_info field is optional.") resources.append(resource) return resources
def json_request(self, method, url, **kwargs): headers = kwargs.setdefault('headers', {}) headers['Content-Type'] = kwargs.pop('content_type', 'application/json') if 'body' in kwargs: if 'data' in kwargs: raise ValueError("Can't provide both 'data' and " "'body' to a request") LOG.warning("Use of 'body' is deprecated; use 'data' instead") kwargs['data'] = kwargs.pop('body') if 'data' in kwargs: kwargs['data'] = jsonutils.dumps(kwargs['data']) # NOTE(starodubcevna): We need to prove that json field is empty, # or it will be modified by keystone adapter. kwargs['json'] = None resp, body = self.request(url, method, **kwargs) if body: try: body = jsonutils.loads(body) except ValueError: pass return resp, body
def added(self, router_key, value): token1, token2 = self.parse_key(router_key) if token1 and token2: if token1 != 'floatingip': port_id = token2 router_data = jsonutils.loads(value) self.data.vppf.ensure_router_interface_on_host( port_id, router_data) if router_data.get('net_type') == 'vxlan': self.data.add_gpe_remote_mapping( router_data['segmentation_id'], router_data['loopback_mac'], router_data['gateway_ip']) else: floating_ip = token2 floatingip_dict = jsonutils.loads(value) self.data.vppf.associate_floatingip(floating_ip, floatingip_dict)
def _get_vif(self, pod): # TODO(ivc): same as VIFHandler._get_vif try: annotations = pod['metadata']['annotations'] vif_annotation = annotations[k_const.K8S_ANNOTATION_VIF] except KeyError: return None vif_dict = jsonutils.loads(vif_annotation) vif = obj_vif.vif.VIFBase.obj_from_primitive(vif_dict) LOG.debug("Got VIF from annotation: %r", vif) return vif
def test_run_write_version(self, *args): m_fin = StringIO() m_fout = StringIO() code = self.runner.run( {'CNI_COMMAND': 'VERSION', 'CNI_ARGS': 'foo=bar'}, m_fin, m_fout) result = jsonutils.loads(m_fout.getvalue()) self.assertEqual(0, code) self.assertEqual(api.CNIRunner.SUPPORTED_VERSIONS, result['supportedVersions']) self.assertEqual(api.CNIRunner.VERSION, result['cniVersion'])
def _get_in_use_ports(self): kubernetes = clients.get_kubernetes_client() in_use_ports = [] running_pods = kubernetes.get(constants.K8S_API_BASE + '/pods') for pod in running_pods['items']: try: annotations = jsonutils.loads(pod['metadata']['annotations'][ constants.K8S_ANNOTATION_VIF]) except KeyError: LOG.debug("Skipping pod without kuryr VIF annotation: %s", pod) else: in_use_ports.append( annotations['versioned_object.data']['id']) return in_use_ports