Java 类com.hp.hpl.jena.rdf.model.impl.ResourceImpl 实例源码

项目:gerbil    文件:SimpleSubClassInferencer.java   
@Override
public void inferSubClasses(String classURI, ClassSet hierarchy, ClassNodeFactory<? extends ClassNode> factory) {
    Resource classResource = new ResourceImpl(classURI);
    Set<String> alreadySeenUris = new HashSet<String>();
    addOrUpdateUri(classResource, hierarchy, factory, alreadySeenUris);

    if (!classModel.containsResource(classResource)) {
        return;
    }

    StmtIterator iterator = classModel.listStatements(null, RDFS.subClassOf, classResource);
    Statement stmt;
    Resource resource;
    while (iterator.hasNext()) {
        stmt = iterator.next();
        resource = stmt.getSubject();
        if (!alreadySeenUris.contains(resource.getURI())) {
            addOrUpdateUri(resource, hierarchy, factory, alreadySeenUris);
        }
    }
}
项目:parkjam    文件:BaseXMLWriter.java   
Resource[] setBlockRules(Object o) {
    Resource rslt[] = blockedRules;
    unblockAll();
    if (o instanceof Resource[]) {
        blockedRules = (Resource[]) o;
    } else {
        StringTokenizer tkn = new StringTokenizer((String) o, ", ");
        Vector<Resource> v = new Vector<Resource>();
        while (tkn.hasMoreElements()) {
            String frag = tkn.nextToken();
            //  System.err.println("Blocking " + frag);
            if (frag.equals("daml:collection"))
                v.add(DAML_OIL.collection);
            else
                v.add(new ResourceImpl(RDFSyntax.getURI() + frag));
        }

        blockedRules = new Resource[v.size()];
        v.copyInto(blockedRules);
    }
    for (int i = 0; i < blockedRules.length; i++)
        blockRule(blockedRules[i]);
    return rslt;
}
项目:semanticoctopus    文件:ResultSetFormatterProviderTest.java   
/**
 * @return
 */
private ResultSet createResultSet() {
    final Model model = ModelFactory.createDefaultModel();
    model.add(new ResourceImpl(SUBJECT), ResourceFactory.createProperty(PREDICATE), new ResourceImpl(OBJECT));

    final QueryExecution execution = QueryExecutionFactory.create(QueryFactory.create(QUERY), model);
    final ResultSet resultSet = execution.execSelect();

    return resultSet;
}
项目:OntoMapper    文件:TestOntologyMapper.java   
private List<StatementsPair> fakeMapping(){
    Resource subjectPattern = new ResourceImpl();
    Property predPattern = new PropertyImpl("http://www.recshop.fake/cd#year");
    RDFNode objectPattern = new ResourceImpl();
    StatementPattern pattern = new StatementPattern(subjectPattern, predPattern, objectPattern,
                                    true,false,true);

    Resource subject = new ResourceImpl();
    Property pred = new PropertyImpl("http://www.recshop.fake/new#year");
    RDFNode object = new ResourceImpl();
    StatementPattern replacement = new StatementPattern(subject, pred, object,
                                    true,false,true);

    return asList( new StatementsPair(pattern, replacement) );
}
项目:Tapioca    文件:ESEngine.java   
/**
    * Mapping the properties of RDF meta model to Elasticsearch Server
    * @param  
    * @return 
    */  
public void MappingRDFToESServer() throws IOException, InterruptedException{        
       String indexName = INDEX_NAME;
       String documentType = DOCUMENT_TYPE;

       //check if index is exist for a new clean index
       IndicesExistsResponse resource = transportClient.admin().indices().prepareExists(indexName).execute().actionGet();        
       if (resource.isExists()) {
           DeleteIndexRequestBuilder deleteIndex = transportClient.admin().indices().prepareDelete(indexName);
           deleteIndex.execute().actionGet();
           LOGGER.info("Index already exists, creating new clean index...");
       }
       CreateIndexRequestBuilder createIndexRequestBuilder = transportClient.admin().indices().prepareCreate(indexName);

    // construct mapping
       XContentBuilder builder = jsonBuilder()
            .startObject()
                .startObject(documentType)
                    .startObject("_meta")
                        .field("Title", "string")
                        .field("URI", "uri")
                        .field("Description", "string")
                    .endObject()
                .endObject()
               .endObject();        
       createIndexRequestBuilder.addMapping(documentType, builder);
       createIndexRequestBuilder.execute().actionGet();

       ResIterator listResources = rdfMetaDataModel.listSubjects();
       long documentID = 0;
       while (listResources.hasNext())
       {
        String uri = listResources.next().toString();
        String titel = rdfMetaDataModel.listStatements(new ResourceImpl(uri), RDFS.label, (RDFNode) null)
                .next().getObject().toString();
        String description = rdfMetaDataModel.listStatements(new ResourceImpl(uri), RDFS.comment, (RDFNode) null)
                .next().getObject().toString();

        documentID++;
        // Add documents
        IndexRequestBuilder indexRequestBuilder = transportClient.prepareIndex(indexName, documentType, "" + documentID);
        // build json object
        final XContentBuilder contentBuilder = jsonBuilder().startObject().prettyPrint();

        contentBuilder.field("Title", titel)
                          .field("URI", uri)
                          .field("Description", description);
        indexRequestBuilder.setSource(contentBuilder);
        indexRequestBuilder.execute().actionGet();
       }
    LOGGER.info( "RDF metadatamodel has been successfully mapped to elasticserach server.");        
}
项目:Tapioca    文件:SearchEngineBean.java   
/**
 * Execute search
 * @param inputText
 * @return
 * @throws InterruptedException 
 * @throws IOException 
 */
public List<SearchResult> run(String inputText) throws IOException, InterruptedException {
    TopDoubleObjectCollection<String> mostSimilarDatasets = elasticsearchEngine
            .searchKeyWords(elasticsearchEngine.getTransportClient(),
                            elasticsearchEngine.getIndexName(), 
                            elasticsearchEngine.getType(),
                            inputText);

    // initialize
    List<SearchResult> searchResults  = new ArrayList<SearchResult>();
    Model metaDataModel = topicmodelEngine.getRDFMetaModel();

    // set result
       for (int i=mostSimilarDatasets.values.length - 1; i>=0; i--) {
        Statement s;
        String uri = (String)mostSimilarDatasets.objects[i];
        String titel = "";
        String description = "";
        double similary = mostSimilarDatasets.values[i];
        if (metaDataModel.containsResource(new ResourceImpl(uri))) {
            s = metaDataModel.listStatements(new ResourceImpl(uri), RDFS.label, (RDFNode) null).next();
            titel += s.getObject().toString();
            s = metaDataModel.listStatements(new ResourceImpl(uri), RDFS.comment, (RDFNode) null).next();
            description += s.getObject().toString();
        }

        if (titel.equals("")) {
            titel = (String) mostSimilarDatasets.objects[i];
        }

        if (description.equals("")) {
            description = "No description.";
        }

        // create datasets
        searchResults.add(new SearchResult(new Dataset(titel, uri, description), similary));
    }

    // transmit to bean
    return searchResults;
}
项目:Tapioca    文件:SearchEngineBean.java   
/**
 * Execute search
 * @param inputModel
 * @return
 */
public List<SearchResult> run(Model inputModel) {                   
    TopDoubleObjectCollection<String> mostSimilarDatasets = topicmodelEngine.retrieveSimilarDatasets(inputDocument);

    // initialize
    List<SearchResult> searchResults  = new ArrayList<SearchResult>();
    Corpus corpus = topicmodelEngine.getCorpus();
    Model metaDataModel = topicmodelEngine.getRDFMetaModel();

    // set result
    mostSimilarDatasets = toAscendingOrder(mostSimilarDatasets);
       for (int i=mostSimilarDatasets.values.length - 1; i>=0; i--) {
        Statement s;
        String uri = "";
        String titel = "";          
        String description = "";
        double similary = mostSimilarDatasets.values[i];
        for (int j=0;j<corpus.getNumberOfDocuments();j++){
            Document document = corpus.getDocument(j);
            if (document.getProperty(DocumentURI.class).getStringValue().equals((String)mostSimilarDatasets.objects[i])){
                uri = document.getProperty( DocumentURI.class ).getStringValue();

                if (metaDataModel.containsResource(new ResourceImpl(uri))) {
                    s = metaDataModel.listStatements(new ResourceImpl(uri), RDFS.label, (RDFNode) null).next();
                    titel += s.getObject().toString();
                    s = metaDataModel.listStatements(new ResourceImpl(uri), RDFS.comment, (RDFNode) null).next();
                    description += s.getObject().toString();
                }

                if (titel.equals("")){
                    titel = "(ID: " + document.getDocumentId() + ") " + document.getProperty(DocumentName.class).getName();
                }

                if (description.equals("")){
                    description = "No description.";
                }                   
                break;
            }
        }

        // create datasets          
        searchResults.add(new SearchResult(new Dataset(titel, uri, description ), similary));
       }
    // transmit to bean
    return searchResults;
}