public ApacheThriftMethodInvoker( ListeningExecutorService executorService, ListeningScheduledExecutorService delayService, TTransportFactory transportFactory, TProtocolFactory protocolFactory, Duration connectTimeout, Duration requestTimeout, Optional<HostAndPort> socksProxy, Optional<SSLContext> sslContext) { this.executorService = requireNonNull(executorService, "executorService is null"); this.delayService = requireNonNull(delayService, "delayService is null"); this.transportFactory = requireNonNull(transportFactory, "transportFactory is null"); this.protocolFactory = requireNonNull(protocolFactory, "protocolFactory is null"); this.connectTimeoutMillis = Ints.saturatedCast(requireNonNull(connectTimeout, "connectTimeout is null").toMillis()); this.requestTimeoutMillis = Ints.saturatedCast(requireNonNull(requestTimeout, "requestTimeout is null").toMillis()); this.socksProxy = requireNonNull(socksProxy, "socksProxy is null"); this.sslContext = requireNonNull(sslContext, "sslContext is null"); }
private static int logThrift(HostAndPort address, List<LogEntry> messages) { try { TSocket socket = new TSocket(address.getHost(), address.getPort()); socket.open(); try { TBinaryProtocol tp = new TBinaryProtocol(new TFramedTransport(socket)); assertEquals(new scribe.Client(tp).Log(messages), ResultCode.OK); } finally { socket.close(); } } catch (TException e) { throw new RuntimeException(e); } return 1; }
@Override public Future<Channel> getConnection(HostAndPort address) { Future<Channel> future; synchronized (this) { if (closed) { return group.next().newFailedFuture(new TTransportException("Connection pool is closed")); } try { future = cachedConnections.get(address); } catch (ExecutionException e) { throw new RuntimeException(e); } } // check if connection is failed if (isFailed(future)) { // remove failed connection cachedConnections.asMap().remove(address, future); } return future; }
@Override public Future<Channel> getConnection(HostAndPort address) { try { Bootstrap bootstrap = new Bootstrap() .group(group) .channel(NioSocketChannel.class) .option(CONNECT_TIMEOUT_MILLIS, saturatedCast(connectTimeout.toMillis())) .handler(new ThriftClientInitializer( messageFraming, messageEncoding, requestTimeout, socksProxy, sslContextSupplier)); Promise<Channel> promise = group.next().newPromise(); bootstrap.connect(new InetSocketAddress(address.getHost(), address.getPort())) .addListener((ChannelFutureListener) future -> notifyConnect(future, promise)); return promise; } catch (Throwable e) { return group.next().newFailedFuture(new TTransportException(e)); } }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("thrift.client.thread-count", "99") .put("thrift.client.ssl-context.refresh-time", "33m") .put("thrift.client.socks-proxy", "example.com:9876") .build(); DriftNettyConnectionFactoryConfig expected = new DriftNettyConnectionFactoryConfig() .setThreadCount(99) .setSslContextRefreshTime(new Duration(33, MINUTES)) .setSocksProxy(HostAndPort.fromParts("example.com", 9876)); assertFullMapping(properties, expected); }
@Override public Optional<Address> selectAddress(Optional<String> addressSelectionContext) { checkArgument(!addressSelectionContext.isPresent(), "addressSelectionContext should not be set"); List<HostAndPort> result = new ArrayList<>(); for (HostAndPort address : addresses) { try { for (InetAddress ip : InetAddress.getAllByName(address.getHost())) { result.add(HostAndPort.fromParts(ip.getHostAddress(), address.getPort())); } } catch (UnknownHostException ignored) { } } if (result.isEmpty()) { return Optional.empty(); } HostAndPort hostAndPort = result.get(ThreadLocalRandom.current().nextInt(result.size())); return Optional.of(() -> hostAndPort); }
private static int testApacheServer(List<MethodInvocationFilter> filters) throws Exception { ScribeService scribeService = new ScribeService(); TProcessor processor = new scribe.Processor<>(scribeService); int invocationCount = 0; for (boolean secure : ImmutableList.of(true, false)) { for (Transport transport : Transport.values()) { for (Protocol protocol : Protocol.values()) { invocationCount += testApacheServer(secure, transport, protocol, processor, ImmutableList.<ToIntFunction<HostAndPort>>builder() .addAll(legacyApacheThriftTestClients(filters, transport, protocol, secure)) .addAll(driftNettyTestClients(filters, transport, protocol, secure)) .addAll(apacheThriftTestClients(filters, transport, protocol, secure)) .build()); } } } assertEquals(scribeService.getMessages(), newArrayList(concat(nCopies(invocationCount, MESSAGES)))); return invocationCount; }
private static TSocket createClientSocket(boolean secure, HostAndPort address) throws TTransportException { if (!secure) { return new TSocket(address.getHost(), address.getPort()); } try { SSLContext serverSslContext = ClientTestUtils.getClientSslContext(); SSLSocket clientSocket = (SSLSocket) serverSslContext.getSocketFactory().createSocket(address.getHost(), address.getPort()); // clientSocket.setSoTimeout(timeout); return new TSocket(clientSocket); } catch (Exception e) { throw new TTransportException("Error initializing secure socket", e); } }
@Override public final SafariBrowser launch(DesiredCapabilities caps) throws BrowserException { String udid = (String) caps.getCapability("uuid"); SafariBrowser browser = udid == null ? launch() : launch(udid); @SuppressWarnings("unchecked") Map<String, String> proxyDict = (Map<String, String>) caps.getCapability("proxy"); if (proxyDict != null) { HostAndPort proxy = HostAndPort.fromString(proxyDict.get("httpProxy")); browser.setHttpProxy(proxy); } @SuppressWarnings("unchecked") Map<String, String> cert = (Map<String, String>) caps.getCapability("httpsCert"); if (cert != null) { browser.installHttpsCert(cert.get("certName"), cert.get("certContentBase64")); } return browser; }
private GelfConfiguration getGelfConfiguration(Configuration config) { final Integer queueCapacity = config.getInt("graylog2.appender.queue-size", 512); final Long reconnectInterval = config.getMilliseconds("graylog2.appender.reconnect-interval", 500L); final Long connectTimeout = config.getMilliseconds("graylog2.appender.connect-timeout", 1000L); final Boolean isTcpNoDelay = config.getBoolean("graylog2.appender.tcp-nodelay", false); final String hostString = config.getString("graylog2.appender.host", "127.0.0.1:12201"); final String protocol = config.getString("graylog2.appender.protocol", "udp"); final HostAndPort hostAndPort = HostAndPort.fromString(hostString); GelfTransports gelfTransport = GelfTransports.valueOf(protocol.toUpperCase()); final Integer sendBufferSize = config.getInt("graylog2.appender.sendbuffersize", 0); // causes the socket default to be used return new GelfConfiguration(hostAndPort.getHost(), hostAndPort.getPort()) .transport(gelfTransport) .reconnectDelay(reconnectInterval.intValue()) .queueSize(queueCapacity) .connectTimeout(connectTimeout.intValue()) .tcpNoDelay(isTcpNoDelay) .sendBufferSize(sendBufferSize); }
private boolean createLeaderWrapper(String leaderUrlStr) { try { URL tURL = new URL(leaderUrlStr); HostAndPort newLeader = HostAndPort.fromParts(tURL.getHost(), tURL.getPort()); leaderUrlStr = newLeader.toString(); if (leaderWrapper != null && leaderUrlStr.equals(leaderWrapper.getLeaderInfo())) { return true; } // create new Leader ManagedChannel clientChannel = session.getChannel(leaderUrlStr); leaderWrapper = new LeaderWrapper( leaderUrlStr, PDGrpc.newBlockingStub(clientChannel), PDGrpc.newStub(clientChannel), System.nanoTime()); } catch (MalformedURLException e) { logger.error("Error updating leader.", e); return false; } logger.info(String.format("Switched to new leader: %s", leaderWrapper)); return true; }
private void initCluster() { GetMembersResponse resp = null; List<HostAndPort> pdAddrs = getSession().getConf().getPdAddrs(); for(HostAndPort u: pdAddrs) { resp = getMembers(u); if(resp != null) { break; } } checkNotNull(resp, "Failed to init client for PD cluster."); long clusterId = resp.getHeader().getClusterId(); header = RequestHeader.newBuilder().setClusterId(clusterId).build(); tsoReq = TsoRequest.newBuilder().setHeader(header).build(); this.pdAddrs = pdAddrs; createLeaderWrapper(resp.getLeader().getClientUrls(0)); service = Executors.newSingleThreadScheduledExecutor(new ThreadFactoryBuilder().setDaemon(true).build()); service.scheduleAtFixedRate(this::updateLeader, 1, 1, TimeUnit.MINUTES); }
public synchronized ManagedChannel getChannel(String addressStr) { ManagedChannel channel = connPool.get(addressStr); if (channel == null) { HostAndPort address; try { address = HostAndPort.fromString(addressStr); } catch (Exception e) { throw new IllegalArgumentException("failed to form address"); } // Channel should be lazy without actual connection until first call // So a coarse grain lock is ok here channel = ManagedChannelBuilder.forAddress(address.getHostText(), address.getPort()) .maxInboundMessageSize(conf.getMaxFrameSize()) .usePlaintext(true) .idleTimeout(60, TimeUnit.SECONDS) .build(); connPool.put(addressStr, channel); } return channel; }
public static void startOstrichService(String tsdbHostPort, int ostrichPort) { final int TSDB_METRICS_PUSH_INTERVAL_IN_MILLISECONDS = 10 * 1000; OstrichAdminService ostrichService = new OstrichAdminService(ostrichPort); ostrichService.startAdminHttpService(); if (tsdbHostPort != null) { LOG.info("Starting the OpenTsdb metrics pusher"); try { HostAndPort pushHostPort = HostAndPort.fromString(tsdbHostPort); MetricsPusher metricsPusher = new MetricsPusher( pushHostPort.getHostText(), pushHostPort.getPort(), new OpenTsdbMetricConverter("KafkaOperator", HostName), TSDB_METRICS_PUSH_INTERVAL_IN_MILLISECONDS); metricsPusher.start(); LOG.info("OpenTsdb metrics pusher started!"); } catch (Throwable t) { // pusher fail is OK, do LOG.error("Exception when starting stats pusher: ", t); } } }
private void updateSeeds(ClusterConfig config) throws SyncException { List<String> hosts = new ArrayList<String>(); for (Node n : config.getNodes()) { if (!config.getNode().equals(n)) { HostAndPort h = HostAndPort.fromParts(n.getHostname(), n.getPort()); hosts.add(h.toString()); } } Collections.sort(hosts); String seeds = Joiner.on(',').join(hosts); while (true) { try { Versioned<String> sv = unsyncStoreClient.get(SEEDS); if (sv.getValue() == null || !sv.getValue().equals(seeds)) { if (logger.isDebugEnabled()) { logger.debug("[{}] Updating seeds to \"{}\" from \"{}\"", new Object[]{config.getNode().getNodeId(), seeds, sv.getValue()}); } unsyncStoreClient.put(SEEDS, seeds); } break; } catch (ObsoleteVersionException e) { } } }
public boolean bootstrap(HostAndPort seed, Node localNode) throws SyncException { this.localNode = localNode; succeeded = false; SocketAddress sa = new InetSocketAddress(seed.getHostText(), seed.getPort()); ChannelFuture future = bootstrap.connect(sa); future.awaitUninterruptibly(); if (!future.isSuccess()) { logger.debug("Could not connect to " + seed, future.cause()); return false; } Channel channel = future.channel(); logger.debug("[{}] Connected to {}", localNode != null ? localNode.getNodeId() : null, seed); try { channel.closeFuture().await(); } catch (InterruptedException e) { logger.debug("Interrupted while waiting for bootstrap"); return succeeded; } return succeeded; }
public boolean bootstrap(HostAndPort seed, Node localNode) throws SyncException { this.localNode = localNode; succeeded = false; SocketAddress sa = new InetSocketAddress(seed.getHostText(), seed.getPort()); ChannelFuture future = bootstrap.connect(sa); future.awaitUninterruptibly(); if (!future.isSuccess()) { logger.debug("Could not connect to " + seed, future.getCause()); return false; } Channel channel = future.getChannel(); logger.debug("[{}] Connected to {}", localNode != null ? localNode.getNodeId() : null, seed); try { channel.getCloseFuture().await(); } catch (InterruptedException e) { logger.debug("Interrupted while waiting for bootstrap"); return succeeded; } return succeeded; }
/** * Gets the optional HTTP endpoint that should be redirected to in the event that this * scheduler is not the leader. * * @return Optional redirect target. */ @VisibleForTesting Optional<HostAndPort> getRedirect() { Optional<HostAndPort> leaderHttp = getLeaderHttp(); Optional<HostAndPort> localHttp = getLocalHttp(); if (leaderHttp.isPresent()) { if (leaderHttp.equals(localHttp)) { return Optional.absent(); } else { return leaderHttp; } } else { LOG.info("No leader found, not redirecting."); return Optional.absent(); } }
/** * Gets the current status of the elected leader. * * @return a {@code LeaderStatus} indicating whether there is an elected leader (and if so, if * this instance is the leader). */ LeaderStatus getLeaderStatus() { Optional<ServiceInstance> leadingScheduler = getLeader(); if (!leadingScheduler.isPresent()) { return LeaderStatus.NO_LEADER; } if (!leadingScheduler.get().isSetServiceEndpoint()) { LOG.warn("Leader service instance seems to be incomplete: " + leadingScheduler); return LeaderStatus.NO_LEADER; } Optional<HostAndPort> leaderHttp = getLeaderHttp(); Optional<HostAndPort> localHttp = getLocalHttp(); if (leaderHttp.isPresent() && leaderHttp.equals(localHttp)) { return LeaderStatus.LEADING; } return LeaderStatus.NOT_LEADING; }
public HostAndPort getRandomReplicaAddress(byte partition) throws ReplicaManagerException { try { String path = BASEPATH + "/" + Byte.toString(partition); List<String> replicas = this.zk.getChildren(path, false); if (replicas.size() < 1) { return null; } else { String rep = replicas.get(this.random.nextInt(replicas.size())); path += "/" + rep; byte[] data = zk.getData(path, false, null); return HostAndPort.fromString(new String(data)); } } catch (KeeperException | InterruptedException e) { throw new ReplicaManagerException(e); } }
private ResourceTestRule setupResourceTestRule() { _authIdentityManager = new InMemoryAuthIdentityManager<>(); _authIdentityManager.createIdentity(UAC_ALL_API_KEY, new ApiKeyModification().addRoles("uac-all")); final EmoPermissionResolver permissionResolver = new EmoPermissionResolver(mock(DataStore.class), mock(BlobStore.class)); final InMemoryPermissionManager permissionManager = new InMemoryPermissionManager(permissionResolver); _roleManager = new InMemoryRoleManager(permissionManager); LocalSubjectUserAccessControl uac = new LocalSubjectUserAccessControl(_roleManager, permissionResolver, _authIdentityManager, HostAndPort.fromParts("localhost", 8080), new MetricRegistry()); createRole(_roleManager, null, "uac-all", ImmutableSet.of("role|*", "apikey|*")); createRole(_roleManager, null, "uac-none", ImmutableSet.of()); return setupResourceTestRule( ImmutableList.of( new UserAccessControlResource1( new RoleResource1(uac), new ApiKeyResource1(uac)), new UserAccessControlRequestMessageBodyReader()), _authIdentityManager, permissionManager); }
/** Create an SOA DedupQueue client for forwarding non-partition-aware clients to the right server. */ @Provides @Singleton @PartitionAwareClient DedupQueueServiceAuthenticator provideDedupQueueClient(MultiThreadedServiceFactory<AuthDedupQueueService> serviceFactory, @DedupQueueHostDiscovery HostDiscovery hostDiscovery, DedupQueueService databus, @SelfHostAndPort HostAndPort self, HealthCheckRegistry healthCheckRegistry, MetricRegistry metricRegistry) { AuthDedupQueueService client = ServicePoolBuilder.create(AuthDedupQueueService.class) .withHostDiscovery(hostDiscovery) .withServiceFactory(new PartitionAwareServiceFactory<>( AuthDedupQueueService.class, serviceFactory, new TrustedDedupQueueService(databus), self, healthCheckRegistry, metricRegistry)) .withMetricRegistry(_environment.metrics()) .withCachingPolicy(ServiceCachingPolicyBuilder.getMultiThreadedClientPolicy()) .buildProxy(new ExponentialBackoffRetry(5, 50, 1000, TimeUnit.MILLISECONDS)); _environment.lifecycle().manage(new ManagedServicePoolProxy(client)); return DedupQueueServiceAuthenticator.proxied(client); }
public static void main(String[] args) { if (args.length != 1) { usage(); throw new RuntimeException("Incorrect arguments"); } HillviewLogger.initialize("worker", "hillview.log"); try { final IDataSet<Empty> dataSet = new LocalDataSet<Empty>(Empty.getInstance()); final String hostnameAndPort = args[0]; final HillviewServer server = new HillviewServer(HostAndPort.fromString(hostnameAndPort), dataSet); HillviewLogger.instance.info("Created HillviewServer"); Thread.currentThread().join(); } catch (Exception ex) { HillviewLogger.instance.error("Caught exception", ex); } }
@Test public void remoteDataSetTest() throws IOException { final int numCols = 3; final int size = 1000, resolution = 20; final SmallTable randTable = TestTables.getIntTable(size, numCols); RecordOrder cso = new RecordOrder(); for (String colName : randTable.getSchema().getColumnNames()) { cso.append(new ColumnSortOrientation(randTable.getSchema().getDescription(colName), true)); } final SampleQuantileSketch sqSketch = new SampleQuantileSketch(cso, resolution, size, 0); final HostAndPort h1 = HostAndPort.fromParts("127.0.0.1", 1234); final HillviewServer server1 = new HillviewServer(h1, new LocalDataSet<ITable>(randTable)); try { final RemoteDataSet<ITable> rds1 = new RemoteDataSet<>(h1); final SampleList sl = rds1.blockingSketch(sqSketch); IndexComparator comp = cso.getComparator(sl.table); for (int i = 0; i < (sl.table.getNumOfRows() - 1); i++) assertTrue(comp.compare(i, i + 1) <= 0); } finally { server1.shutdown(); } }
@Override public InternalElasticsearchClient create(String allIndices) { Settings settings = Settings.builder() .put("cluster.name", cluster) .put("lazyClient.transport.sniff", true) .build(); TransportClient client = TransportClient.builder() .settings(settings) .build(); for (String host : hosts.get()) { HostAndPort hostAndPort = HostAndPort.fromString(host).withDefaultPort(9300); try { client.addTransportAddress(new InetSocketTransportAddress(InetAddress.getByName( hostAndPort.getHostText()), hostAndPort.getPort())); } catch (UnknownHostException e) { // Hosts may be down transiently, we should still try to connect. If all of them happen // to be down we will fail later when trying to use the client when checking the index // template. continue; } } return new NativeClient(client, flushOnWrites); }
public static String getTSOHostAndPort(TSOServerConfig config) throws SocketException, UnknownHostException { // Build TSO host:port string and validate it final String tsoNetIfaceName = config.getNetworkIfaceName(); InetAddress addr = getIPAddressFromNetworkInterface(tsoNetIfaceName); final int tsoPort = config.getPort(); String tsoHostAndPortAsString = "N/A"; try { tsoHostAndPortAsString = HostAndPort.fromParts(addr.getHostAddress(), tsoPort).toString(); } catch (IllegalArgumentException e) { LOG.error("Cannot parse TSO host:port string {}", tsoHostAndPortAsString); throw e; } return tsoHostAndPortAsString; }
/** Binds server socket, incrementing port on {@link BindException} if {@code shouldRetry}. */ ServerSocket createServerSocket(HostAndPort bind, boolean tryAlternativePortsOnFailure) throws IOException { InetAddress host = InetAddress.getByName(bind.getHost()); int port = bind.getPort(); BindException bindException = null; for (int n = 0; n < MAX_BIND_ATTEMPTS; n++) { try { return serverSocketFactory.createServerSocket(port, CONNECTION_BACKLOG, host); } catch (BindException e) { if (port == 0 || !tryAlternativePortsOnFailure) { throw e; } if (bindException == null) { bindException = e; } else if (!e.equals(bindException)) { bindException.addSuppressed(e); } port++; } } throw bindException; }
@Override public void nodeChanged() throws Exception { String tsoInfo = getCurrentTSOInfoFoundInZK(zkCurrentTsoPath); // TSO info includes the new TSO host:port address and epoch String[] currentTSOAndEpochArray = tsoInfo.split("#"); HostAndPort hp = HostAndPort.fromString(currentTSOAndEpochArray[0]); setTSOAddress(hp.getHostText(), hp.getPort()); epoch = Long.parseLong(currentTSOAndEpochArray[1]); LOG.info("CurrentTSO ZNode changed. New TSO Host & Port {}/Epoch {}", hp, getEpoch()); if (currentChannel != null && currentChannel.isConnected()) { LOG.info("\tClosing channel with previous TSO {}", currentChannel); currentChannel.close(); } }
@Inject public HintsPollerManager(LifeCycleRegistry lifeCycle, @HintsConsistencyTimeValues final Map<String, ValueStore<Long>> timestampCache, @GlobalFullConsistencyZooKeeper CuratorFramework curator, @SelfHostAndPort HostAndPort self, @CQLSessionForHintsPollerMap final Map<String, HintsPollerCQLSession> cqlSessionForHintsPollerMap, final ClusterHintsPoller clusterHintsPoller, LeaderServiceTask dropwizardTask, final MetricRegistry metricRegistry) { _lifeCycle = lifeCycle; _timestampCache = checkNotNull(timestampCache, "timestampCache"); _curator = checkNotNull(curator, "curator"); _self = checkNotNull(self, "self"); _cqlSessionForHintsPollerMap = checkNotNull(cqlSessionForHintsPollerMap, "cqlSessionForHintsPollerMap"); _clusterHintsPoller = checkNotNull(clusterHintsPoller, "clusterHintsPoller"); _dropwizardTask = checkNotNull(dropwizardTask, "dropwizardTask"); _metricRegistry = checkNotNull(metricRegistry, "metricRegistry"); _lifeCycle.manage(this); }
@Inject public DefaultFanoutManager(final EventStore eventStore, final SubscriptionDAO subscriptionDao, SubscriptionEvaluator subscriptionEvaluator, DataCenters dataCenters, @DatabusZooKeeper CuratorFramework curator, @SelfHostAndPort HostAndPort self, LeaderServiceTask dropwizardTask, RateLimitedLogFactory logFactory, MetricRegistry metricRegistry, Clock clock) { _eventStore = checkNotNull(eventStore, "eventStore"); _subscriptionDao = checkNotNull(subscriptionDao, "subscriptionDao"); _subscriptionEvaluator = checkNotNull(subscriptionEvaluator, "subscriptionEvaluator"); _dataCenters = checkNotNull(dataCenters, "dataCenters"); _curator = checkNotNull(curator, "curator"); _selfId = checkNotNull(self, "self").toString(); _dropwizardTask = checkNotNull(dropwizardTask, "dropwizardTask"); _logFactory = checkNotNull(logFactory, "logFactory"); _metricRegistry = metricRegistry; _clock = clock; }
private void configureServerAddresses(final Map<String, Object> properties) { final String port = (String) properties.get(KUNDERA_PORT); final String hosts = (String) properties.get(KUNDERA_NODES); final String[] hostList = hosts != null ? hosts.split(",") : new String[] {}; final Integer defaultPort = port != null ? Integer.valueOf(port) : 27017; serverAddresses = new ArrayList<>(); for (int i = 0; i < hostList.length; i++) { final HostAndPort hostAndPort = HostAndPort.fromString(hostList[i].trim()); serverAddresses.add(new ServerAddress(hostAndPort.getHost(), hostAndPort.getPortOrDefault(defaultPort))); } if (serverAddresses.isEmpty()) { serverAddresses.add(new ServerAddress()); } }
@Nonnull public static Replica fromString(@Nonnull String info) { try { checkNotNull(info); HostAndPort hostAndPort = HostAndPort.fromString(info); InetAddress addr = InetAddress.getByName(hostAndPort.getHostText()); InetSocketAddress saddr = new InetSocketAddress(addr, hostAndPort.getPort()); return new Replica(saddr); } catch (UnknownHostException e) { throw Throwables.propagate(e); } }
@Test public void testExplicitPropertyMappings() { Map<String, String> properties = new ImmutableMap.Builder<String, String>() .put("thrift.client.transport", "HEADER") .put("thrift.client.protocol", "COMPACT") .put("thrift.client.connect-timeout", "99ms") .put("thrift.client.request-timeout", "33m") .put("thrift.client.socks-proxy", "localhost:11") .put("thrift.client.max-frame-size", "55MB") .put("thrift.client.max-string-size", "66MB") .put("thrift.client.ssl.enabled", "true") .put("thrift.client.ssl.trust-certificate", "trust") .put("thrift.client.ssl.key", "key") .put("thrift.client.ssl.key-password", "key_password") .build(); ApacheThriftClientConfig expected = new ApacheThriftClientConfig() .setTransport(HEADER) .setProtocol(COMPACT) .setConnectTimeout(new Duration(99, MILLISECONDS)) .setRequestTimeout(new Duration(33, MINUTES)) .setSocksProxy(HostAndPort.fromParts("localhost", 11)) .setMaxFrameSize(new DataSize(55, MEGABYTE)) .setMaxStringSize(new DataSize(66, MEGABYTE)) .setSslEnabled(true) .setTrustCertificate(new File("trust")) .setKey(new File("key")) .setKeyPassword("key_password"); assertFullMapping(properties, expected); }
private static int testProcessor(TProcessor processor, List<ToIntFunction<HostAndPort>> clients) throws Exception { try (TServerSocket serverTransport = new TServerSocket(0)) { TProtocolFactory protocolFactory = new Factory(); TTransportFactory transportFactory = new TFramedTransport.Factory(); TServer server = new TSimpleServer(new Args(serverTransport) .protocolFactory(protocolFactory) .transportFactory(transportFactory) .processor(processor)); Thread serverThread = new Thread(server::serve); try { serverThread.start(); int localPort = serverTransport.getServerSocket().getLocalPort(); HostAndPort address = HostAndPort.fromParts("localhost", localPort); int sum = 0; for (ToIntFunction<HostAndPort> client : clients) { sum += client.applyAsInt(address); } return sum; } finally { server.stop(); serverThread.interrupt(); } } }
private static int logApacheThriftInvocationHandler(HostAndPort address, List<io.airlift.drift.transport.apache.scribe.drift.LogEntry> entries) { ApacheThriftClientConfig config = new ApacheThriftClientConfig(); ApacheThriftConnectionFactoryConfig factoryConfig = new ApacheThriftConnectionFactoryConfig(); try (ApacheThriftMethodInvokerFactory<Void> methodInvokerFactory = new ApacheThriftMethodInvokerFactory<>(factoryConfig, clientIdentity -> config)) { MethodInvoker methodInvoker = methodInvokerFactory.createMethodInvoker(null); ParameterMetadata parameter = new ParameterMetadata( (short) 1, "messages", (ThriftCodec<Object>) codecManager.getCodec(list(codecManager.getCodec(io.airlift.drift.transport.apache.scribe.drift.LogEntry.class).getType()))); MethodMetadata methodMetadata = new MethodMetadata( "Log", ImmutableList.of(parameter), (ThriftCodec<Object>) (Object) codecManager.getCodec(io.airlift.drift.transport.apache.scribe.drift.ResultCode.class), ImmutableMap.of(), false); ListenableFuture<Object> future = methodInvoker.invoke(new InvokeRequest(methodMetadata, () -> address, ImmutableMap.of(), ImmutableList.of(entries))); assertEquals(future.get(), DRIFT_OK); return 1; } catch (Exception e) { throw new RuntimeException(e); } }