@Override public Connector create(final String connectorId, Map<String, String> requiredConfig, ConnectorContext context) { requireNonNull(requiredConfig, "config is null"); try { Bootstrap app = new Bootstrap( binder -> binder.bind(NodeManager.class).toInstance(context.getNodeManager()), new KuduModule(connectorId)); Injector injector = app .strictConfig() .doNotInitializeLogging() .setRequiredConfigurationProperties(requiredConfig) .initialize(); return injector.getInstance(KuduConnector.class); } catch (Exception e) { throw Throwables.propagate(e); } }
public RaptorConnectorFactory( String name, Module metadataModule, Map<String, Module> backupProviders, Map<String, String> optionalConfig, NodeManager nodeManager, PageSorter pageSorter, BlockEncodingSerde blockEncodingSerde, TypeManager typeManager) { checkArgument(!isNullOrEmpty(name), "name is null or empty"); this.name = name; this.metadataModule = requireNonNull(metadataModule, "metadataModule is null"); this.backupProviders = ImmutableMap.copyOf(requireNonNull(backupProviders, "backupProviders is null")); this.optionalConfig = requireNonNull(optionalConfig, "optionalConfig is null"); this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.pageSorter = requireNonNull(pageSorter, "pageSorter is null"); this.blockEncodingSerde = requireNonNull(blockEncodingSerde, "blockEncodingSerde is null"); this.typeManager = requireNonNull(typeManager, "typeManager is null"); }
@Inject public ShardCleaner( @ForMetadata IDBI dbi, NodeManager nodeManager, StorageService storageService, Optional<BackupStore> backupStore, ShardCleanerConfig config) { this(dbi, nodeManager.getCurrentNode().getNodeIdentifier(), nodeManager.getCoordinators().contains(nodeManager.getCurrentNode()), storageService, backupStore, config.getMaxTransactionAge(), config.getLocalCleanerInterval(), config.getLocalCleanTime(), config.getLocalPurgeTime(), config.getBackupCleanerInterval(), config.getBackupCleanTime(), config.getBackupPurgeTime(), config.getBackupDeletionThreads()); }
public ShardRecoveryManager( StorageService storageService, Optional<BackupStore> backupStore, NodeManager nodeManager, ShardManager shardManager, Duration missingShardDiscoveryInterval, int recoveryThreads) { this.storageService = requireNonNull(storageService, "storageService is null"); this.backupStore = requireNonNull(backupStore, "backupStore is null"); this.nodeIdentifier = requireNonNull(nodeManager, "nodeManager is null").getCurrentNode().getNodeIdentifier(); this.shardManager = requireNonNull(shardManager, "shardManager is null"); this.missingShardDiscoveryInterval = requireNonNull(missingShardDiscoveryInterval, "missingShardDiscoveryInterval is null"); this.shardQueue = new MissingShardsQueue(new PrioritizedFifoExecutor<>(executorService, recoveryThreads, new MissingShardComparator())); this.stats = new ShardRecoveryStats(); }
@Inject public ShardEjector( NodeManager nodeManager, ShardManager shardManager, StorageService storageService, StorageManagerConfig config, Optional<BackupStore> backupStore, RaptorConnectorId connectorId) { this(nodeManager, shardManager, storageService, config.getShardEjectorInterval(), backupStore, connectorId.toString()); }
@Inject public ClusterMemoryManager( @ForMemoryManager HttpClient httpClient, NodeManager nodeManager, LocationFactory locationFactory, MBeanExporter exporter, JsonCodec<MemoryInfo> memoryInfoCodec, JsonCodec<MemoryPoolAssignmentsRequest> assignmentsRequestJsonCodec, QueryIdGenerator queryIdGenerator, ServerConfig serverConfig, MemoryManagerConfig config) { requireNonNull(config, "config is null"); this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.locationFactory = requireNonNull(locationFactory, "locationFactory is null"); this.httpClient = requireNonNull(httpClient, "httpClient is null"); this.exporter = requireNonNull(exporter, "exporter is null"); this.memoryInfoCodec = requireNonNull(memoryInfoCodec, "memoryInfoCodec is null"); this.assignmentsRequestJsonCodec = requireNonNull(assignmentsRequestJsonCodec, "assignmentsRequestJsonCodec is null"); this.maxQueryMemory = config.getMaxQueryMemory(); this.coordinatorId = queryIdGenerator.getCoordinatorId(); this.enabled = serverConfig.isCoordinator(); this.killOnOutOfMemoryDelay = config.getKillOnOutOfMemoryDelay(); this.killOnOutOfMemory = config.isKillOnOutOfMemory(); }
@Inject public ConnectorManager(MetadataManager metadataManager, AccessControlManager accessControlManager, SplitManager splitManager, PageSourceManager pageSourceManager, IndexManager indexManager, PageSinkManager pageSinkManager, HandleResolver handleResolver, NodeManager nodeManager, TransactionManager transactionManager) { this.metadataManager = metadataManager; this.accessControlManager = accessControlManager; this.splitManager = splitManager; this.pageSourceManager = pageSourceManager; this.indexManager = indexManager; this.pageSinkManager = pageSinkManager; this.handleResolver = handleResolver; this.nodeManager = nodeManager; this.transactionManager = transactionManager; }
public SystemConnector( String connectorId, NodeManager nodeManager, Set<SystemTable> tables, Function<TransactionId, ConnectorTransactionHandle> transactionHandleFunction) { requireNonNull(connectorId, "connectorId is null"); requireNonNull(nodeManager, "nodeManager is null"); requireNonNull(tables, "tables is null"); requireNonNull(transactionHandleFunction, "transactionHandleFunction is null"); this.connectorId = connectorId; this.metadata = new SystemTablesMetadata(connectorId, tables); this.splitManager = new SystemSplitManager(nodeManager, tables); this.recordSetProvider = new SystemRecordSetProvider(tables); this.transactionHandleFunction = transactionHandleFunction; }
public TopologyAwareNodeSelector( NodeManager nodeManager, NodeTaskMap nodeTaskMap, boolean includeCoordinator, boolean doubleScheduling, Supplier<NodeMap> nodeMap, int minCandidates, int maxSplitsPerNode, int maxSplitsPerNodePerTaskWhenFull, List<CounterStat> topologicalSplitCounters, List<String> networkLocationSegmentNames, NetworkLocationCache networkLocationCache) { this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.nodeTaskMap = requireNonNull(nodeTaskMap, "nodeTaskMap is null"); this.includeCoordinator = includeCoordinator; this.doubleScheduling = doubleScheduling; this.nodeMap = new AtomicReference<>(nodeMap); this.minCandidates = minCandidates; this.maxSplitsPerNode = maxSplitsPerNode; this.maxSplitsPerNodePerTaskWhenFull = maxSplitsPerNodePerTaskWhenFull; this.topologicalSplitCounters = requireNonNull(topologicalSplitCounters, "topologicalSplitCounters is null"); this.networkLocationSegmentNames = requireNonNull(networkLocationSegmentNames, "networkLocationSegmentNames is null"); this.networkLocationCache = requireNonNull(networkLocationCache, "networkLocationCache is null"); }
public SimpleNodeSelector( NodeManager nodeManager, NodeTaskMap nodeTaskMap, boolean includeCoordinator, boolean doubleScheduling, Supplier<NodeMap> nodeMap, int minCandidates, int maxSplitsPerNode, int maxSplitsPerNodePerTaskWhenFull) { this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.nodeTaskMap = requireNonNull(nodeTaskMap, "nodeTaskMap is null"); this.includeCoordinator = includeCoordinator; this.doubleScheduling = doubleScheduling; this.nodeMap = new AtomicReference<>(nodeMap); this.minCandidates = minCandidates; this.maxSplitsPerNode = maxSplitsPerNode; this.maxSplitsPerNodePerTaskWhenFull = maxSplitsPerNodePerTaskWhenFull; }
@Override public Connector create(String s, Map<String, String> config, ConnectorContext context) { NodeManager nodeManager = context.getNodeManager(); return new RestConnector(nodeManager, restFactory.create(config)); }
@Override public Connector create(String connectorId, Map<String, String> config, ConnectorContext context) { requireNonNull(connectorId, "connectorId is null"); requireNonNull(config, "config is null"); try { Bootstrap app = new Bootstrap( // new JsonModule(), new EthereumConnectorModule(), binder -> { binder.bind(EthereumConnectorId.class).toInstance(new EthereumConnectorId(connectorId)); binder.bind(TypeManager.class).toInstance(context.getTypeManager()); binder.bind(NodeManager.class).toInstance(context.getNodeManager()); } ); Injector injector = app.strictConfig() .doNotInitializeLogging() .setRequiredConfigurationProperties(config) .initialize(); return injector.getInstance(EthereumConnector.class); } catch (Exception e) { throw Throwables.propagate(e); } }
@Inject public KafkaSimpleConsumerManager( KafkaConnectorId connectorId, KafkaConnectorConfig kafkaConnectorConfig, NodeManager nodeManager) { this.connectorId = requireNonNull(connectorId, "connectorId is null").toString(); this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); requireNonNull(kafkaConnectorConfig, "kafkaConfig is null"); this.connectTimeoutMillis = Ints.checkedCast(kafkaConnectorConfig.getKafkaConnectTimeout().toMillis()); this.bufferSizeBytes = Ints.checkedCast(kafkaConnectorConfig.getKafkaBufferSize().toBytes()); this.consumerCache = CacheBuilder.newBuilder().build(new SimpleConsumerCacheLoader()); }
KafkaConnectorFactory(TypeManager typeManager, NodeManager nodeManager, Optional<Supplier<Map<SchemaTableName, KafkaTopicDescription>>> tableDescriptionSupplier, Map<String, String> optionalConfig) { this.typeManager = requireNonNull(typeManager, "typeManager is null"); this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.optionalConfig = requireNonNull(optionalConfig, "optionalConfig is null"); this.tableDescriptionSupplier = requireNonNull(tableDescriptionSupplier, "tableDescriptionSupplier is null"); }
@Override public Connector create(String connectorId, Map<String, String> config) { requireNonNull(connectorId, "connectorId is null"); requireNonNull(config, "config is null"); try { Bootstrap app = new Bootstrap( new JsonModule(), new KafkaConnectorModule(), binder -> { binder.bind(KafkaConnectorId.class).toInstance(new KafkaConnectorId(connectorId)); binder.bind(TypeManager.class).toInstance(typeManager); binder.bind(NodeManager.class).toInstance(nodeManager); if (tableDescriptionSupplier.isPresent()) { binder.bind(new TypeLiteral<Supplier<Map<SchemaTableName, KafkaTopicDescription>>>() {}).toInstance(tableDescriptionSupplier.get()); } else { binder.bind(new TypeLiteral<Supplier<Map<SchemaTableName, KafkaTopicDescription>>>() {}).to(KafkaTableDescriptionSupplier.class).in(Scopes.SINGLETON); } } ); Injector injector = app.strictConfig() .doNotInitializeLogging() .setRequiredConfigurationProperties(config) .setOptionalConfigurationProperties(optionalConfig) .initialize(); return injector.getInstance(KafkaConnector.class); } catch (Exception e) { throw Throwables.propagate(e); } }
@Override public Connector create(String connectorId, Map<String, String> config) { try { Bootstrap app = new Bootstrap( new JsonModule(), new MBeanModule(), binder -> { CurrentNodeId currentNodeId = new CurrentNodeId(nodeManager.getCurrentNode().getNodeIdentifier()); MBeanServer mbeanServer = new RebindSafeMBeanServer(getPlatformMBeanServer()); binder.bind(MBeanServer.class).toInstance(mbeanServer); binder.bind(CurrentNodeId.class).toInstance(currentNodeId); binder.bind(NodeManager.class).toInstance(nodeManager); binder.bind(PageSorter.class).toInstance(pageSorter); binder.bind(BlockEncodingSerde.class).toInstance(blockEncodingSerde); binder.bind(TypeManager.class).toInstance(typeManager); }, metadataModule, new BackupModule(backupProviders), new StorageModule(connectorId), new RaptorModule(connectorId)); Injector injector = app .strictConfig() .doNotInitializeLogging() .setRequiredConfigurationProperties(config) .setOptionalConfigurationProperties(optionalConfig) .initialize(); return injector.getInstance(RaptorConnector.class); } catch (Exception e) { throw Throwables.propagate(e); } }
@Inject public ShardRecoveryManager( StorageService storageService, Optional<BackupStore> backupStore, NodeManager nodeManager, ShardManager shardManager, StorageManagerConfig config) { this(storageService, backupStore, nodeManager, shardManager, config.getMissingShardDiscoveryInterval(), config.getRecoveryThreads()); }
public ShardEjector( NodeManager nodeManager, ShardManager shardManager, StorageService storageService, Duration interval, Optional<BackupStore> backupStore, String connectorId) { this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.shardManager = requireNonNull(shardManager, "shardManager is null"); this.storageService = requireNonNull(storageService, "storageService is null"); this.interval = requireNonNull(interval, "interval is null"); this.backupStore = requireNonNull(backupStore, "backupStore is null"); this.executor = newScheduledThreadPool(1, daemonThreadsNamed("shard-ejector-" + connectorId)); }
@Inject public ShardCompactionManager(@ForMetadata IDBI dbi, NodeManager nodeManager, ShardManager shardManager, ShardCompactor compactor, StorageManagerConfig config) { this(dbi, nodeManager.getCurrentNode().getNodeIdentifier(), shardManager, compactor, config.getCompactionInterval(), config.getMaxShardSize(), config.getMaxShardRows(), config.getCompactionThreads(), config.isCompactionEnabled()); }
private static ConnectorFactory createRaptorConnectorFactory(String cacheDir, NodeManager nodeManager) { try { File dataDir = new File(cacheDir); File databaseDir = new File(dataDir, "db"); Map<String, String> config = ImmutableMap.<String, String>builder() .put("metadata.db.type", "h2") .put("metadata.db.filename", databaseDir.getAbsolutePath()) .put("storage.data-directory", dataDir.getAbsolutePath()) .put("storage.compress", "false") .build(); TypeManager typeManager = new TypeRegistry(); BlockEncodingSerde blockEncodingSerde = new BlockEncodingManager(typeManager); RaptorPlugin plugin = new RaptorPlugin(); plugin.setOptionalConfig(config); plugin.setNodeManager(nodeManager); plugin.setBlockEncodingSerde(blockEncodingSerde); plugin.setTypeManager(typeManager); return getOnlyElement(plugin.getServices(ConnectorFactory.class)); } catch (Exception e) { throw Throwables.propagate(e); } }
private static NodeManager createNodeManager(String current, String... others) { Node currentNode = new TestingNode(current); ImmutableSet.Builder<Node> nodes = ImmutableSet.builder(); nodes.add(currentNode); for (String other : others) { nodes.add(new TestingNode(other)); } return new TestingNodeManager(nodes.build(), currentNode); }
@Inject RedisJedisManager( RedisConnectorConfig redisConnectorConfig, NodeManager nodeManager) { this.redisConnectorConfig = requireNonNull(redisConnectorConfig, "redisConfig is null"); this.jedisPoolCache = CacheBuilder.newBuilder().build(new JedisPoolCacheLoader()); this.jedisPoolConfig = new JedisPoolConfig(); }
RedisConnectorFactory( TypeManager typeManager, NodeManager nodeManager, Optional<Supplier<Map<SchemaTableName, RedisTableDescription>>> tableDescriptionSupplier, Map<String, String> optionalConfig) { this.typeManager = requireNonNull(typeManager, "typeManager is null"); this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.optionalConfig = requireNonNull(optionalConfig, "optionalConfig is null"); this.tableDescriptionSupplier = requireNonNull(tableDescriptionSupplier, "tableDescriptionSupplier is null"); }
@Override public Connector create(String connectorId, Map<String, String> config) { requireNonNull(connectorId, "connectorId is null"); requireNonNull(config, "config is null"); try { Bootstrap app = new Bootstrap( new JsonModule(), new RedisConnectorModule(), binder -> { binder.bind(RedisConnectorId.class).toInstance(new RedisConnectorId(connectorId)); binder.bind(TypeManager.class).toInstance(typeManager); binder.bind(NodeManager.class).toInstance(nodeManager); if (tableDescriptionSupplier.isPresent()) { binder.bind(new TypeLiteral<Supplier<Map<SchemaTableName, RedisTableDescription>>>() {}).toInstance(tableDescriptionSupplier.get()); } else { binder.bind(new TypeLiteral<Supplier<Map<SchemaTableName, RedisTableDescription>>>() {}) .to(RedisTableDescriptionSupplier.class) .in(Scopes.SINGLETON); } } ); Injector injector = app.strictConfig() .doNotInitializeLogging() .setRequiredConfigurationProperties(config) .setOptionalConfigurationProperties(optionalConfig) .initialize(); return injector.getInstance(RedisConnector.class); } catch (Exception e) { throw Throwables.propagate(e); } }
public InformationSchemaConnector(String catalogName, NodeManager nodeManager, Metadata metadata) { requireNonNull(catalogName, "catalogName is null"); requireNonNull(nodeManager, "nodeManager is null"); requireNonNull(metadata, "metadata is null"); this.metadata = new InformationSchemaMetadata(catalogName); this.splitManager = new InformationSchemaSplitManager(nodeManager); this.pageSourceProvider = new InformationSchemaPageSourceProvider(metadata); }
public NodeScheduler( NetworkLocationCache networkLocationCache, NetworkTopology networkTopology, NodeManager nodeManager, NodeSchedulerConfig config, NodeTaskMap nodeTaskMap) { this.networkLocationCache = networkLocationCache; this.nodeManager = nodeManager; this.minCandidates = config.getMinCandidates(); this.includeCoordinator = config.isIncludeCoordinator(); this.doubleScheduling = config.isMultipleTasksPerNodeEnabled(); this.maxSplitsPerNode = config.getMaxSplitsPerNode(); this.maxSplitsPerNodePerTaskWhenFull = config.getMaxPendingSplitsPerNodePerTask(); this.nodeTaskMap = requireNonNull(nodeTaskMap, "nodeTaskMap is null"); checkArgument(maxSplitsPerNode > maxSplitsPerNodePerTaskWhenFull, "maxSplitsPerNode must be > maxSplitsPerNodePerTaskWhenFull"); this.useNetworkTopology = !config.getNetworkTopology().equals(LEGACY_NETWORK_TOPOLOGY); ImmutableList.Builder<CounterStat> builder = ImmutableList.builder(); if (useNetworkTopology) { networkLocationSegmentNames = ImmutableList.copyOf(networkTopology.getLocationSegmentNames()); for (int i = 0; i < networkLocationSegmentNames.size() + 1; i++) { builder.add(new CounterStat()); } } else { networkLocationSegmentNames = ImmutableList.of(); } topologicalSplitCounters = builder.build(); }
private static SourcePartitionedScheduler getSourcePartitionedScheduler( StageExecutionPlan plan, SqlStageExecution stage, NodeManager nodeManager, NodeTaskMap nodeTaskMap, int splitBatchSize) { NodeSchedulerConfig nodeSchedulerConfig = new NodeSchedulerConfig() .setIncludeCoordinator(false) .setMaxSplitsPerNode(20) .setMaxPendingSplitsPerNodePerTask(0); NodeScheduler nodeScheduler = new NodeScheduler(new LegacyNetworkTopology(), nodeManager, nodeSchedulerConfig, nodeTaskMap); SplitSource splitSource = plan.getDataSource().get(); SplitPlacementPolicy placementPolicy = new SplitPlacementPolicy(nodeScheduler.createNodeSelector(splitSource.getDataSourceName()), stage::getAllTasks); return new SourcePartitionedScheduler(stage, splitSource, placementPolicy, splitBatchSize); }
@Test public void testGetCoordinators() throws Exception { NodeManager manager = new DiscoveryNodeManager(selector, nodeInfo, new NoOpFailureDetector(), expectedVersion, testHttpClient); assertEquals(manager.getCoordinators(), ImmutableSet.of(coordinator)); }
public TpchSplitManager(String connectorId, NodeManager nodeManager, int splitsPerNode) { this.connectorId = connectorId; this.nodeManager = nodeManager; checkArgument(splitsPerNode > 0, "splitsPerNode must be at least 1"); this.splitsPerNode = splitsPerNode; }
public RestConnector(NodeManager nodeManager, Rest rest) { this.nodeManager = nodeManager; this.rest = rest; }
public RestSplitManager(NodeManager nodeManager) { this.nodeManager = nodeManager; }
@Inject public KuduSplitManager(NodeManager nodeManager, KuduClientManager kuduClientManager) { this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.kuduClientManager = requireNonNull(kuduClientManager, "kuduClientManager is null"); }
@Inject KuduClientManager(KuduConfig kuduConfig, NodeManager nodeManager) { this.kuduConfig = kuduConfig; }
@Inject public synchronized void setNodeManager(NodeManager nodeManager) { this.nodeManager = requireNonNull(nodeManager, "node is null"); }
@Inject public void setNodeManager(NodeManager nodeManager) { this.nodeManager = nodeManager; }
@Inject public RaptorNodeSupplier(NodeManager nodeManager, RaptorConnectorId connectorId) { this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.connectorId = requireNonNull(connectorId, "connectorId is null").toString(); }
public IndexedTpchConnectorFactory(NodeManager nodeManager, TpchIndexSpec indexSpec, int defaultSplitsPerNode) { this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); this.indexSpec = requireNonNull(indexSpec, "indexSpec is null"); this.defaultSplitsPerNode = defaultSplitsPerNode; }
public JmxConnectorFactory(MBeanServer mbeanServer, NodeManager nodeManager) { this.mbeanServer = requireNonNull(mbeanServer, "mbeanServer is null"); this.nodeManager = requireNonNull(nodeManager, "nodeManager is null"); }