Java 类com.mongodb.client.gridfs.GridFSBuckets 实例源码

项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * Create a file in GridFS with the given filename and write
 * some random data to it.
 * @param filename the name of the file to create
 * @param size the number of random bytes to write
 * @param vertx the Vert.x instance
 * @param handler a handler that will be called when the file
 * has been written
 */
private void prepareData(String filename, int size, Vertx vertx,
    Handler<AsyncResult<String>> handler) {
  vertx.<String>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      try (GridFSUploadStream os = gridFS.openUploadStream(filename)) {
        for (int i = 0; i < size; ++i) {
          os.write((byte)(i & 0xFF));
        }
      }
    }
    f.complete(filename);
  }, handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void validateAfterStoreAdd(TestContext context, Vertx vertx,
    String path, Handler<AsyncResult<Void>> handler) {
  vertx.executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);

      GridFSFindIterable files = gridFS.find();

      GridFSFile file = files.first();
      ByteArrayOutputStream baos = new ByteArrayOutputStream();
      gridFS.downloadToStream(file.getFilename(), baos);
      String contents = new String(baos.toByteArray(), StandardCharsets.UTF_8);
      context.assertEquals(CHUNK_CONTENT, contents);
    }
    f.complete();
  }, handler);
}
项目:reactive-hamster    文件:ResumeTest.java   
@Test
public void testWriteReplacePage() throws IOException, ClassNotFoundException {
    HamsterPage page = createTestPage(testUIEngine);        
    Document mo=testCollection.createNew();
    TestComponent c = new TestComponent();
    c.m=mo;
    c.test="test";
    page.addComponent(c);
    testUIEngine.initDB(db,GridFSBuckets.create(db));
    testUIEngine.persistPage(page);
    HamsterPage p2= testUIEngine.resumePage( page.getId());
    assertNotNull(p2);
    TestComponent t=(TestComponent) p2.components.get(0);
    assertNotNull(t);
    assertEquals("test", t.test);
    assertNotNull(t.m);
    assertTrue(t.m == mo);
}
项目:opensearchserver    文件:MongoDbCrawlCache.java   
@Override
public void init(String configString) throws IOException {
    rwl.w.lock();
    try {
        closeNoLock();
        final MongoClientURI connectionString = new MongoClientURI(configString);
        mongoClient = new MongoClient(connectionString);
        final MongoDatabase database = mongoClient.getDatabase(
                connectionString.getDatabase() == null ? DEFAULT_DATABASE : connectionString.getDatabase());
        metaCollection = database.getCollection(META_COLLECTION);
        metaCollection.createIndex(Document.parse("{\"uri\":1}"));
        indexedCollection = database.getCollection(INDEXED_COLLECTION);
        indexedCollection.createIndex(Document.parse("{\"uri\":1}"));
        contentGrid = GridFSBuckets.create(database);
    } finally {
        rwl.w.unlock();
    }
}
项目:awplab-core    文件:LogAdminProvider.java   
private void deleteLog(Date olderThan) {
    MongoCollection<Log> logCollection = mongoService.getMongoClient().getDatabase(database).getCollection(collection, Log.class);
    Bson filter = Filters.lt("timeStamp", olderThan);
    logCollection.find(filter).forEach((Block<? super Log>) log -> {
        log.getLogFiles().forEach(logFile -> {
            GridFSBucket gridFSBucket = GridFSBuckets.create(mongoService.getMongoClient().getDatabase(database), logFile.getBucket());
            gridFSBucket.delete(logFile.getFileObjectId());
        });
    });
    DeleteResult deleteResult = logCollection.deleteMany(filter);
}
项目:awplab-core    文件:BucketStreamResource.java   
public BucketStreamResource(MongoDatabase database, String bucket, ObjectId objectId) {
    super((StreamSource) () -> {
        GridFSBucket gridFSBucket = GridFSBuckets.create(database, bucket);
        return gridFSBucket.openDownloadStream(objectId);
    }, gridFSFile(database, bucket, objectId).getFilename());

    this.database = database;
    this.bucket = bucket;
    this.objectId = objectId;
}
项目:awplab-core    文件:LogFile.java   
public void save(MongoDatabase database) throws IOException {
    if (temporaryFile == null) return;

    try (FileInputStream fileInputStream = new FileInputStream(temporaryFile)) {
        ObjectId objectId = GridFSBuckets.create(database, bucket).uploadFromStream(temporaryFile.getName(), fileInputStream);
        this.setFileObjectId(objectId);
        this.setBucket(bucket);
    }
    finally {
        temporaryFile.close();
    }

    temporaryFile = null;

}
项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * Connect to MongoDB and get the GridFS chunk size
 * @param vertx the Vert.x instance
 * @param handler a handler that will be called with the chunk size
 */
private void getChunkSize(Vertx vertx, Handler<AsyncResult<Integer>> handler) {
  vertx.<Integer>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      f.complete(gridFS.getChunkSizeBytes());
    }
  }, handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void prepareData(TestContext context, Vertx vertx, String path,
    Handler<AsyncResult<String>> handler) {
  String filename = PathUtils.join(path, ID);
  vertx.<String>executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);
      byte[] contents = CHUNK_CONTENT.getBytes(StandardCharsets.UTF_8);
      gridFS.uploadFromStream(filename, new ByteArrayInputStream(contents));
      f.complete(filename);
    }
  }, handler);
}
项目:georocket    文件:MongoDBStoreTest.java   
@Override
protected void validateAfterStoreDelete(TestContext context, Vertx vertx,
    String path, Handler<AsyncResult<Void>> handler) {
  vertx.executeBlocking(f -> {
    try (MongoClient client = new MongoClient(mongoConnector.serverAddress)) {
      MongoDatabase db = client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
      GridFSBucket gridFS = GridFSBuckets.create(db);

      GridFSFindIterable files = gridFS.find();
      context.assertTrue(Iterables.isEmpty(files));
    }
    f.complete();
  }, handler);
}
项目:reactive-hamster    文件:UITest.java   
@Override
public UIEngine createTestEngine() {
    if(testEngine !=null) {
        testEngine.destroy();
    }
    loader = new HamsterLoader();
    UIEngine engine = createUITestEngine();
    engine.initDB(db, GridFSBuckets.create(db));
    loader.setEngine(engine);
    testEngine=engine;
    CometProcessor.setEngine(engine);  
    return engine;
}
项目:reactive-hamster    文件:ExampleChatEngine.java   
protected void initDB(String dbName) {
    //set up the persistence layer
    //Connect to the local MongoDB instance
    MongoClient m = new MongoClient();
    //get the DB with the given Name
    MongoDatabase chatDB = m.getDatabase(dbName);
    //initialize our collections
    DocumentCollections.init(this, chatDB);
    //set up GridFs for storing files
    GridFSBucket fs = GridFSBuckets.create(chatDB,"persistedPages");
        //the base class UIEngine needs the gridFS for
    //persisting sessions
    super.initDB(chatDB, fs);

}
项目:mandrel    文件:MongoBlobStore.java   
public MongoBlobStore(TaskContext context, MongoClient mongoClient, String databaseName, String bucketName, int batchSize) {
    super(context);
    this.mongoClient = mongoClient;
    this.databaseName = databaseName;
    this.bucket = GridFSBuckets.create(mongoClient.getDatabase(databaseName), bucketName);
    this.batchSize = batchSize;
    this.mapper = new ObjectMapper();
}
项目:restheart    文件:GridFsDAO.java   
@Override
public OperationResult deleteFile(
        final Database db,
        final String dbName,
        final String bucketName,
        final BsonValue fileId,
        final String requestEtag,
        final boolean checkEtag) {
    final String bucket = extractBucketName(bucketName);

    GridFSBucket gridFSBucket = GridFSBuckets.create(
            db.getDatabase(dbName),
            bucket);

    GridFSFile file = gridFSBucket
            .find(eq("_id", fileId))
            .limit(1).iterator().tryNext();

    if (file == null) {
        return new OperationResult(HttpStatus.SC_NOT_FOUND);
    } else if (checkEtag) {
        Object oldEtag = file.getMetadata().get("_etag");

        if (oldEtag != null) {
            if (requestEtag == null) {
                return new OperationResult(HttpStatus.SC_CONFLICT, oldEtag);
            } else if (!Objects.equals(oldEtag.toString(), requestEtag)) {
                return new OperationResult(
                        HttpStatus.SC_PRECONDITION_FAILED, oldEtag);
            }
        }
    }

    gridFSBucket.delete(fileId);

    return new OperationResult(HttpStatus.SC_NO_CONTENT);
}
项目:restheart    文件:GetFileBinaryHandler.java   
@Override
public void handleRequest(
        HttpServerExchange exchange,
        RequestContext context)
        throws Exception {
    if (context.isInError()) {
        next(exchange, context);
        return;
    }

    LOGGER.trace("GET " + exchange.getRequestURL());
    final String bucket = extractBucketName(context.getCollectionName());

    GridFSBucket gridFSBucket = GridFSBuckets.create(
            MongoDBClientSingleton.getInstance().getClient()
                    .getDatabase(context.getDBName()),
            bucket);

    GridFSFile dbsfile = gridFSBucket
            .find(eq("_id", context.getDocumentId()))
            .limit(1).iterator().tryNext();

    if (dbsfile == null) {
        fileNotFound(context, exchange);
    } else if (!checkEtag(exchange, dbsfile)) {
        sendBinaryContent(context, gridFSBucket, dbsfile, exchange);
    }

    next(exchange, context);
}
项目:otus-api    文件:FileStoreBucket.java   
@PostConstruct
public void setUp() {
    fileStore = GridFSBuckets.create(db, FILESTORE);
}
项目:mongo-obj-framework    文件:Smof.java   
public void loadBucket(String bucketName) {
    final GridFSBucket bucket = GridFSBuckets.create(database, bucketName);
    dispatcher.put(bucketName, bucket);
}
项目:awplab-core    文件:LogFile.java   
@BeanCodecKey(ignore = true)
public GridFSFile getGridFSFile(MongoDatabase mongoDatabase) {
    return GridFSBuckets.create(mongoDatabase, bucket).find(Filters.eq("_id", fileObjectId)).first();
}
项目:georocket    文件:MongoDBChunkReadStreamTest.java   
/**
 * The actual test method. Creates a temporary file with random contents. Writes
 * <code>size</code> bytes to it and reads it again through
 * {@link MongoDBChunkReadStream}. Finally, checks if the file has been read correctly.
 * @param size the number of bytes to write/read
 * @param chunkSize the GridFS chunk size
 * @param vertx the Vert.x instance
 * @param context the current test context
 */
private void doRead(int size, int chunkSize, Vertx vertx, TestContext context) {
  Async async = context.async();

  // create a test file in GridFS
  prepareData("test_" + size + ".bin", size, vertx, context.asyncAssertSuccess(filename -> {
    // connect to GridFS
    com.mongodb.async.client.MongoClient client = createAsyncClient();
    com.mongodb.async.client.MongoDatabase db =
        client.getDatabase(MongoDBTestConnector.MONGODB_DBNAME);
    com.mongodb.async.client.gridfs.GridFSBucket gridfs =
        com.mongodb.async.client.gridfs.GridFSBuckets.create(db);

    // open the test file
    GridFSDownloadStream is = gridfs.openDownloadStream(filename);
    MongoDBChunkReadStream rs = new MongoDBChunkReadStream(is, size, chunkSize,
        vertx.getOrCreateContext());

    // read from the test file
    rs.exceptionHandler(context::fail);

    int[] pos = { 0 };

    rs.endHandler(v -> {
      // the file has been completely read
      rs.close();
      context.assertEquals(size, pos[0]);
      async.complete();
    });

    rs.handler(buf -> {
      // check number of read bytes
      if (size - pos[0] > chunkSize) {
        context.assertEquals(chunkSize, buf.length());
      } else {
        context.assertEquals(size - pos[0], buf.length());
      }

      // check file contents
      for (int i = pos[0]; i < pos[0] + buf.length(); ++i) {
        context.assertEquals((byte)(i & 0xFF), buf.getByte(i - pos[0]));
      }

      pos[0] += buf.length();
    });
  }));
}
项目:eds-starter6-mongodb    文件:MongoDb.java   
public GridFSBucket createBucket(String bucketName) {
    return GridFSBuckets.create(this.getMongoDatabase(), bucketName);
}
项目:lumongo    文件:MongoDocumentStorage.java   
private GridFSBucket createGridFSConnection() {
    MongoDatabase db = mongoClient.getDatabase(database);
    return GridFSBuckets.create(db, ASSOCIATED_FILES);
}
项目:awplab-core    文件:BucketStreamResource.java   
public static GridFSFile gridFSFile(MongoDatabase database, String bucket, ObjectId objectId) {
    return  GridFSBuckets.create(database, bucket).find(Filters.eq("_id", objectId)).first();

}