/** * @see io.vertx.core.AbstractVerticle#start(io.vertx.core.Future) */ @Override public void start(final Future<Void> startFuture) throws Exception { this.logger.trace("Client for SFDC Platform Events starting"); this.router = Router.router(this.getVertx()); // Load config and verticles and signal back that we are done final Future<Void> verticleLoad = Future.future(); verticleLoad.setHandler(ar -> { if (ar.succeeded()) { this.startWebServer(startFuture); } else { startFuture.fail(ar.cause()); } }); this.loadAppConfig(verticleLoad); }
@Override public void start(Future<Void> startFuture) throws Exception { datasourceConfig = new DatasourceConfig(config().getJsonObject("datasource", new JsonObject())); dbClient = JDBCClient.createShared(vertx, datasourceConfig.toJson(), "MusicStoreDS"); templateEngine = FreeMarkerTemplateEngine.create(); Completable couchbase = createCouchbaseClient().doOnSuccess(cluster -> couchbaseCluster = cluster) .flatMap(v -> openBucket()).doOnSuccess(bucket -> albumCommentsBucket = bucket) .flatMap(v -> loadProperties("couchbase/queries.xml")).doOnSuccess(props -> couchbaseQueries = props) .toCompletable() .andThen(Completable.defer(() -> setupBucket())); Completable database = updateDB() .andThen(loadProperties("db/queries.xml")).doOnSuccess(props -> dbQueries = props) .toCompletable(); couchbase.mergeWith(database) .andThen(Completable.fromAction(() -> setupAuthProvider())) .andThen(Completable.defer(() -> setupWebServer())) .subscribe(CompletableHelper.toObserver(startFuture)); }
@Override public void start(Future<Void> startFuture) throws Exception { Future<String> dbVerticleDeployment = Future.future(); // <1> vertx.deployVerticle(new WikiDatabaseVerticle(), dbVerticleDeployment.completer()); // <2> dbVerticleDeployment.compose(id -> { // <3> Future<String> httpVerticleDeployment = Future.future(); vertx.deployVerticle( "io.vertx.guides.wiki.HttpServerVerticle", // <4> new DeploymentOptions().setInstances(2), // <5> httpVerticleDeployment.completer()); return httpVerticleDeployment; // <6> }).setHandler(ar -> { // <7> if (ar.succeeded()) { startFuture.complete(); } else { startFuture.fail(ar.cause()); } }); }
WikiDatabaseServiceImpl(JDBCClient dbClient, HashMap<SqlQuery, String> sqlQueries, Handler<AsyncResult<WikiDatabaseService>> readyHandler) { this.dbClient = dbClient; this.sqlQueries = sqlQueries; dbClient.getConnection(ar -> { if (ar.failed()) { LOGGER.error("Could not open a database connection", ar.cause()); readyHandler.handle(Future.failedFuture(ar.cause())); } else { SQLConnection connection = ar.result(); connection.execute(sqlQueries.get(SqlQuery.CREATE_PAGES_TABLE), create -> { connection.close(); if (create.failed()) { LOGGER.error("Database preparation error", create.cause()); readyHandler.handle(Future.failedFuture(create.cause())); } else { readyHandler.handle(Future.succeededFuture(this)); } }); } }); }
/** * Check for the Authentication info if required * * @return a future that resolves when successful got AuthInfo */ private Future<AuthInfo> getAuthInfo() { Future<AuthInfo> result; final String authName = this.getConsumerConfig().getAuthName(); if ((this.authInfo == null) && (authName != null)) { result = Future.future(); final EventBus eb = this.getVertx().eventBus(); final String address = Constants.BUS_AUTHREQUEST + authName; eb.send(address, null, replyHandler -> { if (replyHandler.succeeded()) { this.authInfo = (AuthInfo) replyHandler.result().body(); result.complete(this.authInfo); } else { result.fail(replyHandler.cause()); } }); } else { result = Future.succeededFuture(this.authInfo); } return result; }
@Override public void start(Future<Void> startFuture) throws Exception { Future<String> dbVerticleDeployment = Future.future(); vertx.deployVerticle(new WikiDatabaseVerticle(), dbVerticleDeployment.completer()); dbVerticleDeployment.compose(id -> { Future<String> httpVerticleDeployment = Future.future(); vertx.deployVerticle( "io.vertx.guides.wiki.http.HttpServerVerticle", new DeploymentOptions().setInstances(2), httpVerticleDeployment.completer()); return httpVerticleDeployment; }).setHandler(ar -> { if (ar.succeeded()) { startFuture.complete(); } else { startFuture.fail(ar.cause()); } }); }
@Override public void start(Future<Void> startFuture) throws Exception { HashMap<SqlQuery, String> sqlQueries = loadSqlQueries(); JDBCClient dbClient = JDBCClient.createShared(vertx, new JsonObject() .put("url", config().getString(CONFIG_WIKIDB_JDBC_URL, "jdbc:hsqldb:file:db/wiki")) .put("driver_class", config().getString(CONFIG_WIKIDB_JDBC_DRIVER_CLASS, "org.hsqldb.jdbcDriver")) .put("max_pool_size", config().getInteger(CONFIG_WIKIDB_JDBC_MAX_POOL_SIZE, 30))); WikiDatabaseService.create(dbClient, sqlQueries, ready -> { if (ready.succeeded()) { ProxyHelper.registerService(WikiDatabaseService.class, vertx, ready.result(), CONFIG_WIKIDB_QUEUE); startFuture.complete(); } else { startFuture.fail(ready.cause()); } }); }
private Future<Void> importTx(StorableBlock block, JsonObject transactions) { Future<Void> future = Future.future(); DeliveryOptions delivery = new DeliveryOptions().setSendTimeout(ONE_MINUTE); if (config.isTxImport()) { context.bus().send(TX_ADDR, transactions, delivery, done -> { listener.onImported(block.getHash(), block.getNumber()); if (done.succeeded()) { Throwable result = (Throwable) done.result().body(); if (result == null) { future.complete(); } else { future.fail(result); } } else { future.fail(done.cause()); } }); } else { future.complete(); } return future; }
@Override public void render( RoutingContext context, String templateFileName, Handler<AsyncResult<Buffer>> handler) { try { Template template = cache.get(templateFileName); if (template == null) { synchronized (this) { loader.setVertx(context.vertx()); template = handlebars.compile(templateFileName); cache.put(templateFileName, template); } } Context engineContext = Context.newBuilder(context.data()).resolver(getResolvers()).build(); handler.handle(Future.succeededFuture(Buffer.buffer(template.apply(engineContext)))); } catch (Exception ex) { handler.handle(Future.failedFuture(ex)); } }
@Override public WikiDatabaseService fetchPage(String name, Handler<AsyncResult<JsonObject>> resultHandler) { dbClient.queryWithParams(sqlQueries.get(SqlQuery.GET_PAGE), new JsonArray().add(name), fetch -> { if (fetch.succeeded()) { JsonObject response = new JsonObject(); ResultSet resultSet = fetch.result(); if (resultSet.getNumRows() == 0) { response.put("found", false); } else { response.put("found", true); JsonArray row = resultSet.getResults().get(0); response.put("id", row.getInteger(0)); response.put("rawContent", row.getString(1)); } resultHandler.handle(Future.succeededFuture(response)); } else { LOGGER.error("Database query error", fetch.cause()); resultHandler.handle(Future.failedFuture(fetch.cause())); } }); return this; }
private void setupCompletionListeners() { List<Future> futures = new ArrayList<>(); if (config.isTxImport()) { futures.add(txImport); } if (config.isBlockImport()) { futures.add(blockImport); } CompositeFuture.all(futures).setHandler(done -> { if (done.succeeded()) { finishWithSuccess(); } else { cancelImport(null); Form.showAlertFromError(done.cause()); Async.setScene(SETTINGS_FXML); } }); }
<T> void fetchWithRetry(Logger logger, Callable<T> blockingHandler, Future<T> done) { vertx.executeBlocking((Future<T> fut) -> { try { fut.complete(blockingHandler.call()); } catch (Exception e) { fut.fail(e); } }, ar -> { if (ar.failed() && !(ar.cause() instanceof AcmeRetryAfterException)) { done.fail(ar.cause()); return; } if (ar.succeeded() && ar.result() != null) { done.complete(ar.result()); return; } long nextSleep = ar.succeeded() ? 3000 : ((AcmeRetryAfterException) ar.cause()).getRetryAfter().getTime() - currentTimeMillis(); logger.info("Recheck in {}ms @ {}", nextSleep, new Date(System.currentTimeMillis() + nextSleep)); vertx.setTimer(nextSleep, timerId -> fetchWithRetry(logger, blockingHandler, done)); }); }
@Override public WikiDatabaseService fetchAllPages(Handler<AsyncResult<JsonArray>> resultHandler) { dbClient.query(sqlQueries.get(SqlQuery.ALL_PAGES), res -> { if (res.succeeded()) { JsonArray pages = new JsonArray(res.result() .getResults() .stream() .map(json -> json.getString(0)) .sorted() .collect(Collectors.toList())); resultHandler.handle(Future.succeededFuture(pages)); } else { LOGGER.error("Database query error", res.cause()); resultHandler.handle(Future.failedFuture(res.cause())); } }); return this; }
private Future<String> deployOrderBookVerticle(DeploymentOptions options) { Future<String> future = Future.future(); vertx.deployVerticle(new BittrexOrderBookVerticle(), options, res -> { if (res.succeeded()) { o_id = res.result(); System.out.println("OrderBookVerticle Deployment id is: " + res.result()); future.complete(); } else { System.err.println("OrderBookVerticle failed: " + res.cause().getMessage()); future.fail(res.cause()); } }); return future; }
private Future<String> undeployWebSocketVerticle() { Future<String> future = Future.future(); if(ws_id == null){ future.complete(); } else { vertx.undeploy(ws_id, res -> { if (res.succeeded()) { ws_id = null; System.out.println("WebsocketVerticle undeployed"); future.complete(); } else { System.err.println("WebsocketVerticle Undeployment failed: " + res.cause().getMessage()); future.fail(res.cause()); } }); } return future; }
/** * 解析报文头。 * * @param headerBuffer header Buffer * @param expectedCommand expectedCommand * @param expectedBodyLength expectedBodyLength * @return async result of the length of the packet body */ public static Future<Long> parseHeader(Buffer headerBuffer, byte expectedCommand, long expectedBodyLength) { if (headerBuffer.length() != HEADER_BYTE_LENGTH) { return Future.failedFuture(new FdfsException("receive packet size" + headerBuffer.length() + " is not equal to the expected header size: " + HEADER_BYTE_LENGTH)); } byte command = headerBuffer.getByte(PROTO_HEADER_CMD_INDEX); if (command != expectedCommand) { return Future.failedFuture(new FdfsException( "receive command: " + command + " is not equal to the expected command: " + expectedCommand)); } byte status = headerBuffer.getByte(PROTO_HEADER_STATUS_INDEX); if (status != HEADER_STATUS_SUCCESS) { return Future.failedFuture(new FdfsException("receive packet errno is: " + status)); } long bodyLength = headerBuffer.getLong(0); if (expectedBodyLength > 0 && bodyLength != expectedBodyLength) { return Future.failedFuture(new FdfsException("receive packet body length: " + bodyLength + " is not equal to the expected: " + expectedBodyLength)); } return Future.succeededFuture(bodyLength); }
@Override public FdfsStorage append(String fileFullPathName, FdfsFileId fileId, Handler<AsyncResult<Void>> handler) { LocalFile.readFile(vertx.fileSystem(), fileFullPathName).setHandler(ar -> { if (ar.succeeded()) { LocalFile localFile = ar.result(); append(localFile.getFile(), localFile.getSize(), fileId, append -> { localFile.closeFile(); handler.handle(append); }); } else { handler.handle(Future.failedFuture(ar.cause())); } }); return this; }
/** * Deploys the list of local and/or remote components * * @param future Vert.x Future object to update the status of deployment * @see <a href="http://vertx.io/docs/apidocs/io/vertx/core/Future.html" target="_blank">Future</a> */ public void deployAll(Future<Void> future) { if (null == this.vertx) { String errMesg = "Not setup yet! Call 'setup' method first."; logger.error(errMesg); future.fail(new Exception(errMesg)); return; } JsonArray compList = this.deployConfig.getJsonArray("components"); List<DeployComponent> components = new ArrayList<>(); int listLen = compList.size(); JsonObject deployConf; for (int idx = 0; idx < listLen; idx++) { deployConf = compList.getJsonObject(idx); components.add(this.setupComponent(deployConf)); } this.deployRecords.deployAll(components, future); }
@Override public FdfsStorage download(FdfsFileId fileId, String fileFullPathName, long offset, long bytes, Handler<AsyncResult<Void>> handler) { vertx.fileSystem().open(fileFullPathName, new OpenOptions().setCreate(true).setWrite(true), ar -> { if (ar.succeeded()) { AsyncFile file = ar.result(); download(fileId, file, offset, bytes, download -> { file.close(); handler.handle(download); }); } else { handler.handle(Future.failedFuture(ar.cause())); } }); return this; }
protected void install(Future<Gateway> future) { // Install Gateway Gateway gateway = (Gateway) new Gateway() .setProfile(gatewayOptionsHolder.getOptions().getProfile()) .setOwner(gatewayOptionsHolder.getOptions().getOwner()) .setSerialKey(gatewayOptionsHolder.getOptions().getSerialKey()) .setVersion(VERSION) .setBuild(getBuild()) .setPrivateKey(UUID.randomUUID().toString()); // TODO feature validate serialKey on hub, for now all of project are // open source persistance.persistGateway(gateway).setHandler(res -> { if (res.succeeded() && res.result() != null) { this.currentGateway = gateway; future.complete(gateway); } else { future.fail(res.cause()); } }); }
@Override public FdfsClient delete(FdfsFileId fileId, Handler<AsyncResult<Void>> handler) { getTracker().setHandler(tracker -> { if (tracker.succeeded()) { tracker.result().getUpdateStorage(fileId, storage -> { if (storage.succeeded()) { storage.result().delete(fileId, delete -> { handler.handle(delete); }); } else { handler.handle(Future.failedFuture(storage.cause())); } }); } else { handler.handle(Future.failedFuture(tracker.cause())); } }); return this; }
@Override public void start(Future<Void> future) throws Exception { Router router = Router.router(vertx); circuit = CircuitBreaker.create("circuit-breaker", vertx, new CircuitBreakerOptions() .setFallbackOnFailure(true) .setMaxFailures(3) .setResetTimeout(5000) .setTimeout(1000) ); router.get("/").handler(this::getShoppingList); ServiceDiscovery.create(vertx, discovery -> { Single<WebClient> s1 = HttpEndpoint.rxGetWebClient(discovery, rec -> rec.getName().equals("shopping-backend")); Single<WebClient> s2 = HttpEndpoint.rxGetWebClient(discovery, rec -> rec.getName().equals("pricer-service")); Single.zip(s1, s2, (x, y) -> { shopping = x; pricer = y; return vertx.createHttpServer() .requestHandler(router::accept) .listen(8080); }).subscribe(); }); }
@Override public void stop(Future<Void> stopFuture) throws Exception { vertx.rxExecuteBlocking(fut -> { if (couchbaseCluster != null) { couchbaseCluster.release(); } fut.complete(/* RxJava2 does not want null */ CouchbaseAsyncCluster.class); }).toCompletable() .doOnError(Throwable::printStackTrace) .onErrorComplete() .subscribe(CompletableHelper.toObserver(stopFuture)); }
private synchronized void readInWorker(Future<ReadResult> future) { try { ReadResult readResult = new ReadResult(); readResult.doRead(); future.complete(readResult); } catch (Throwable e) { future.fail(e); } }
@Override public void start(Future<Void> startFuture) throws Exception { Future<String> generatorFuture = future(); vertx.deployVerticle( GeneratorVerticle.class.getName(), new DeploymentOptions().setConfig(config().getJsonObject("generator")), generatorFuture ); Future<String> analyticsFuture = future(); vertx.deployVerticle( AnalyticsVerticle.class.getName(), new DeploymentOptions().setConfig(config().getJsonObject("analytics")), analyticsFuture ); Future<String> webFuture = future(); vertx.deployVerticle( WebVerticle.class.getName(), new DeploymentOptions().setConfig(config().getJsonObject("web")), webFuture ); CompositeFuture.all(asList(generatorFuture, analyticsFuture, webFuture)).setHandler(ar -> { if (ar.failed()) { log.error("Vertx starter failed to start: {}", ar.cause().getMessage()); ar.cause().printStackTrace(); } else { log.info("\n----------------------------------------------------------\n\t" + "{} is running!\n" + "----------------------------------------------------------", MainVerticle.class.getSimpleName()); startFuture.complete(); } }); }
@Override public WikiDatabaseService savePage(int id, String markdown, Handler<AsyncResult<Void>> resultHandler) { JsonArray data = new JsonArray().add(markdown).add(id); dbClient.updateWithParams(sqlQueries.get(SqlQuery.SAVE_PAGE), data, res -> { if (res.succeeded()) { resultHandler.handle(Future.succeededFuture()); } else { LOGGER.error("Database query error", res.cause()); resultHandler.handle(Future.failedFuture(res.cause())); } }); return this; }
/** * * Undeploy a gdh verticle and register a handler for the result * * @param gdh * the GDHVertex to be undeployed * @param aHandler * the handler which handles the undeployment */ public void kill(GDHVertex gdh, Handler<AsyncResult<String>> aHandler) { vertx.undeploy(gdh.deploymentID(), undeployment -> { if (undeployment.succeeded()) { aHandler.handle(Future.succeededFuture(gdh.deploymentID())); } else { aHandler.handle(Future.failedFuture("Undeployment Failure!")); } }); }
@Override public void start(Future<Void> startFuture) throws Exception { final JsonObject mongoConfig = config().getJsonObject(MONGO); this.client = MongoClient.createShared(vertx, mongoConfig); this.collectionName = mongoConfig.getString("col_name"); this.address = config().getString(ADDRESS, "/query"); vertx.eventBus().consumer(address, this::queryTimeSeries); }
@Override public void stop(Future<Void> future) throws Exception { server.close(res -> { if (res.succeeded()) { future.complete(); conf.getLogger().info(getNode().toString() + " stopped listening on: " + conf.getPort()); } else { future.fail(res.cause()); conf.getLogger().info(getNode().toString() + " stoppage failure: " + conf.getPort() + res.cause().getMessage()); } }); }
@Override public void start(Future<Void> startFuture) throws Exception { final JsonObject mongoConfig = config().getJsonObject(MONGO); this.client = MongoClient.createShared(vertx, mongoConfig); this.collectionName = mongoConfig.getString("col_name"); this.address = config().getString(ADDRESS, "/search"); vertx.eventBus().consumer(this.address, this::searchLabels); }
public User isAuthorised(GateAuthProvider authProvider,String authority, Handler<AsyncResult<Boolean>> resultHandler) { if (cachedPermissions.contains(authority)) { resultHandler.handle(Future.succeededFuture(true)); } else { doIsPermitted(authProvider,authority, res -> { if (res.succeeded()) { if (res.result()) { cachedPermissions.add(authority); } } resultHandler.handle(res); }); } return this; }
@Override public void start(final Future<Void> startFuture) throws Exception { vertx.createHttpServer() .requestHandler(requestHandler()) .listen(httpPort, serverStartHandler(startFuture)); }
@Override public Future<Gateway> persistGateway(Gateway gateway) { return JDBCExecutor.create().execute(session -> { session.persist(gateway); return gateway; }); }
@Override public FdfsClient uploadAppender(ReadStream<Buffer> stream, long size, String ext, Handler<AsyncResult<FdfsFileId>> handler) { if (Buffer.buffer(ext, options.getCharset()).length() > FdfsProtocol.FDFS_FILE_EXT_NAME_MAX_LEN) { handler.handle(Future .failedFuture("ext is too long ( greater than " + FdfsProtocol.FDFS_FILE_EXT_NAME_MAX_LEN + ")")); return this; } getTracker().setHandler(tracker -> { if (tracker.succeeded()) { tracker.result().getStoreStorage(storage -> { if (storage.succeeded()) { storage.result().uploadAppender(stream, size, ext, uploadAppender -> { handler.handle(uploadAppender); }); } else { handler.handle(Future.failedFuture(storage.cause())); } }); } else { handler.handle(Future.failedFuture(tracker.cause())); } }); return this; }
private FdfsConnection connect(Handler<AsyncResult<FdfsConnection>> handler) { if (this.state.compareAndSet(State.DISCONNECTED, State.CONNECTING)) { client.connect(address, ar -> { if (ar.succeeded()) { this.socket = ar.result().closeHandler(v -> { this.state.set(State.DISCONNECTED); }); this.state.set(State.CONNECTED); if (handler != null) { handler.handle(Future.succeededFuture(this)); } } else { if (handler != null) { handler.handle(Future.failedFuture(ar.cause())); } } }); } else { if (handler != null) { handler.handle(Future.succeededFuture(this)); } } return this; }