/** * * Very transient * * @param timeOutSeconds * @param numberOfThreads * @param outputWriter */ public TransferManager( Application csapApp, int timeOutSeconds, BufferedWriter outputWriter ) { this.csapApp = csapApp; logger.debug( "Number of workers: {}", csapApp.lifeCycleSettings().getNumberWorkerThreads() ); this.timeOutSeconds = timeOutSeconds; osCommandRunner = new OsCommandRunner( timeOutSeconds, 1, "TransferMgr" ); this.globalWriterForResults = outputWriter; updateProgress( "\nExecuting distribution using : " + csapApp.lifeCycleSettings().getNumberWorkerThreads() + " threads.\n\n" ); BasicThreadFactory schedFactory = new BasicThreadFactory.Builder() .namingPattern( "CsapFileTransfer-%d" ) .daemon( true ) .priority( Thread.NORM_PRIORITY ) .build(); fileTransferService = Executors.newFixedThreadPool( csapApp.lifeCycleSettings().getNumberWorkerThreads(), schedFactory ); fileTransferComplete = new ExecutorCompletionService<String>( fileTransferService ); }
public CsapEventClient( ) { BasicThreadFactory eventThreadFactory = new BasicThreadFactory.Builder() .namingPattern( "CsapEventPost-%d" ) .daemon( true ) .priority( Thread.NORM_PRIORITY + 1 ) .build(); eventPostQueue = new ArrayBlockingQueue<>( MAX_EVENT_BACKLOG ); // Use a single thread to sequence and post // eventPostPool = Executors.newFixedThreadPool(1, schedFactory, queue); // really only needs to be 1 - adding the others for lt scenario eventPostPool = new ThreadPoolExecutor( 1, 1, 30, TimeUnit.SECONDS, eventPostQueue, eventThreadFactory ); eventPostCompletionService = new ExecutorCompletionService<String>( eventPostPool ); }
protected void scheduleCollection( Runnable collector) { // Thread commandThread = new Thread( this ); // commandThread.start(); String scheduleName = collector.getClass().getSimpleName() + "_" + collectionIntervalSeconds ; BasicThreadFactory schedFactory = new BasicThreadFactory.Builder() .namingPattern( scheduleName +"-%d" ) .daemon( true ) .priority( Thread.NORM_PRIORITY ) .build(); // Single collection thread scheduledExecutorService = Executors .newScheduledThreadPool( 1, schedFactory ); int initialSleep = 10 ; if (this.collectionIntervalSeconds >= 60) { initialSleep += 30 + rg.nextInt(30) ; } scheduledExecutorService .scheduleAtFixedRate( collector, initialSleep, collectionIntervalSeconds, TimeUnit.SECONDS ); logger.info("Adding Job: {}", scheduleName); }
@PostConstruct public void init() { scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new BasicThreadFactory.Builder().namingPattern("SendNodeServerInfo-schedule-pool-%d").daemon(true).build()); scheduledExecutorService.scheduleAtFixedRate(() -> { //将负载加载到ZK中 if (!CollectionUtils.isEmpty(dataCenterChannelStore.getAllChannels())) { dataCenterChannelStore.getAllChannels().stream().forEach(e -> { log.info("channel id:{}, {}", e.id(), e); }); } applicationEventPublisher.publishEvent( NodeServerInfoEvent.builder() .name(goPushNodeServerConfig.getName()) .nodeServerInfo(watch()) .build()); // 写入zk 其实不需要发送 NodeInfoReq nodeSender.send(NodeInfoReq.builder().build()); } , delay, delay, TimeUnit.MILLISECONDS); }
public GremlinExecutor create() { final BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("gremlin-executor-default-%d").build(); final AtomicBoolean poolCreatedByBuilder = new AtomicBoolean(); final AtomicBoolean suppliedExecutor = new AtomicBoolean(true); final AtomicBoolean suppliedScheduledExecutor = new AtomicBoolean(true); final ExecutorService es = Optional.ofNullable(executorService).orElseGet(() -> { poolCreatedByBuilder.set(true); suppliedExecutor.set(false); return Executors.newScheduledThreadPool(4, threadFactory); }); executorService = es; final ScheduledExecutorService ses = Optional.ofNullable(scheduledExecutorService).orElseGet(() -> { // if the pool is created by the builder and we need another just re-use it, otherwise create // a new one of those guys suppliedScheduledExecutor.set(false); return (poolCreatedByBuilder.get()) ? (ScheduledExecutorService) es : Executors.newScheduledThreadPool(4, threadFactory); }); scheduledExecutorService = ses; return new GremlinExecutor(this, suppliedExecutor.get(), suppliedScheduledExecutor.get()); }
/** * 初始化应用数据 */ private static void initDbData(final MyTvData data) { final TvService tvService = new TvServiceImpl(); makeCache(tvService); // 启动抓取任务 ExecutorService executorService = Executors .newSingleThreadExecutor(new BasicThreadFactory.Builder() .namingPattern("Mytv_Crawl_Task_%d").build()); executorService.execute(new Runnable() { @Override public void run() { runCrawlTask(data, tvService); } }); executorService.shutdown(); // 启动每天定时任务 logger.info("create everyday crawl task."); createEverydayCron(data, tvService); }
/** * Constructor sets up the scheduled executor service that runs a background task to * calculate non-accumulating gauge readings at periodic intervals. * * @param garbageCollectorMetricSet a metric set that collects counts and times of garbage collections * @param interval the time interval over which to calculate non-accumulating gauge readings * for all the gauges in {@code garbageCollectorMetricSet} * @param scheduledExecutorService scheduled executor service that runs the task to calculate * non-accumulating gauge readings at a frequency determined by * {@code interval}. */ public NonAccumulatingGarbageCollectorMetricSet( GarbageCollectorMetricSet garbageCollectorMetricSet, long interval, ScheduledExecutorService scheduledExecutorService) { this.garbageCollectorMetricSet = garbageCollectorMetricSet; this.interval = interval; previousValues = new HashMap<String, Long>(); nonAccumulatingValues = new ConcurrentHashMap<String, Long>(); if (scheduledExecutorService == null) { BasicThreadFactory basicThreadFactory = new BasicThreadFactory.Builder() .namingPattern("metrics-gc-stats-update-%d") .daemon(false) .priority(Thread.NORM_PRIORITY) .build(); this.scheduledExecutorService = Executors.newSingleThreadScheduledExecutor(basicThreadFactory); } else { this.scheduledExecutorService = scheduledExecutorService; } scheduleBackgroundCollectionOfNonAccumulatingValues(); }
@Override public ExecutorService createConsumerThreadPool(int numberOfThreads, int queueCapacity) { BasicThreadFactory threadFactory = new BasicThreadFactory.Builder() .namingPattern("msb-consumer-thread-%d") .build(); BlockingQueue<Runnable> queue; if (queueCapacity == QUEUE_SIZE_UNLIMITED) { queue = new LinkedBlockingQueue<>(); } else { queue = new ArrayBlockingQueue<>(queueCapacity); } return new ThreadPoolExecutor(numberOfThreads, numberOfThreads, 0L, TimeUnit.MILLISECONDS, queue, threadFactory); }
public FrontierContainer(Job job, Accumulators accumulators, MandrelClient client) { super(accumulators, job, client); context.setDefinition(job); // Init stores MetadataStore metadatastore = job.getDefinition().getStores().getMetadataStore().build(context); metadatastore.init(); MetadataStores.add(job.getId(), metadatastore); // Init frontier frontier = job.getDefinition().getFrontier().build(context); // Revisitor BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("frontier-" + job.getId() + "-%d").daemon(true) .priority(Thread.MAX_PRIORITY).build(); executor = Executors.newFixedThreadPool(1, threadFactory); revisitor = new Revisitor(frontier, metadatastore); executor.submit(revisitor); current.set(ContainerStatus.INITIATED); }
public Fetcher( E environment, Class<I> inputType, Class<U> unitType ) { this.environment = environment; handlerCreator = new SourceHandlerCreator<>( inputType, unitType ); this.inputType = inputType; this.unitType = unitType; this.optimizer = ThreadCountOptimizer.withDefaultStrategies( environment ); this.sourceWatchdogInterval = ( new DurationParser() ).parse( environment.getConfiguration().getJSONObject( "fetcher" ).getString( "source-watchdog-interval" ) ); ThreadFactory threads = new BasicThreadFactory.Builder().namingPattern( "FetchThread[initial]" ).build(); fetchPool = new ThreadPoolExecutor( 1, 1, // thread count is set to the real initial value on the first run() 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), threads ); }
private void startQueryExpirer() { ThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("QueryExpirer-%d") .daemon(true) .priority(Thread.NORM_PRIORITY) .build(); queryExpirer = Executors.newSingleThreadScheduledExecutor(factory); long expiryRunInterval = conf.getLong(QUERY_EXPIRY_INTERVAL_MILLIS, DEFAULT_QUERY_EXPIRY_INTERVAL_MILLIS); queryExpirer.scheduleWithFixedDelay(new Runnable() { @Override public void run() { try { expireQueries(); } catch (Exception e) { incrCounter(QUERY_EXPIRY_FAILURE_COUNTER); log.error("Unable to expire queries", e); } } }, expiryRunInterval, expiryRunInterval, TimeUnit.MILLISECONDS); log.info("Enabled periodic exipry of queries at {} millis interval", expiryRunInterval); }
public ZkOffsetStorageImpl(LogInfoStorage logInfoStorage, AsyncCuratorFramework asyncCuratorFramework) { this.logInfoStorage = logInfoStorage; this.asyncCuratorFramework = asyncCuratorFramework; offsetThreadPool = Executors.newSingleThreadExecutor( new BasicThreadFactory.Builder().uncaughtExceptionHandler((t, e) -> logger .error("Uncaught exception of thread :" + t.getClass().getName(), e)) .build()); }
public HostStatusManager( Application csapApplication, int numberOfThreads, ArrayList<String> hostsToQuery ) { this.csapApp = csapApplication; csapApp.loadCacheFromDisk( getAlertHistory(), this.getClass().getSimpleName() ); alertThrottleTimer = CsapSimpleCache.builder( csapApplication.getCsapCoreService().getAlerts().getThrottle().getFrequency(), CsapSimpleCache.parseTimeUnit( csapApplication.getCsapCoreService().getAlerts().getThrottle().getTimeUnit(), TimeUnit.HOURS ), HostStatusManager.class, "Global Alert Throttle" ); logger.warn( "Constructed with thread count: {}, connectionTimeout: {} Host Count: {}, \n Hosts: {}, \n Alert: {}", numberOfThreads, this.connectionTimeoutSeconds, hostsToQuery.size(), hostsToQuery, csapApplication.getCsapCoreService().getAlerts() ); BasicThreadFactory statusFactory = new BasicThreadFactory.Builder() .namingPattern( "CsapHostStatus-%d" ) .daemon( true ) .priority( Thread.NORM_PRIORITY ) .build(); hostStatusWorkers = Executors.newFixedThreadPool( numberOfThreads, statusFactory ); hostStatusService = new ExecutorCompletionService<AgentStatus>( hostStatusWorkers ); hostList = new CopyOnWriteArrayList<String>( hostsToQuery ); initialize_refresh_worker() ; restartHostRefreshTimer( 3 ); }
private void initialize_refresh_worker() { BasicThreadFactory schedFactory = new BasicThreadFactory.Builder() .namingPattern( "CsapHostJobsScheduler-%d" ) .daemon( true ) .priority( Thread.NORM_PRIORITY ) .build(); hostStatusScheduler = Executors.newScheduledThreadPool( 1, schedFactory ); }
@PostConstruct public void init() { scheduledExecutorService = new ScheduledThreadPoolExecutor(1, new BasicThreadFactory.Builder().namingPattern("SendDataCenterInfo-schedule-pool-%d").daemon(true).build()); scheduledExecutorService.scheduleAtFixedRate(() -> applicationEventPublisher.publishEvent(DataCenterInfoEvent.builder() .name(goPushDataCenterConfig.getName()) .dataCenterInfo(watch()) .build()), delay, delay, TimeUnit.MILLISECONDS); }
public GenerateFullPbf(OsmMerger osmMerger, String inputDirectoryPath, String outputDirectoryPath, String outputFileName, int nbThreads) { this.osmMerger = osmMerger; this.inputDirectoryPath = inputDirectoryPath; this.outputDirectoryPath = outputDirectoryPath; this.outputFileName = outputFileName; BasicThreadFactory threadFactory = new Builder().namingPattern("mappy-GenerateFullPbf-%d").daemon(false).build(); executorService = new ThreadPoolExecutor(nbThreads, nbThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<>(), threadFactory); }
@Autowired public ConversationIndexer(SolrCoreContainer solrServer, StoreService storeService){ this.solrServer = solrServer; this.storeService = storeService; this.indexerPool = Executors.newSingleThreadExecutor( new BasicThreadFactory.Builder().namingPattern("conversation-indexing-thread-%d").daemon(true).build()); }
public ExecutorService createExecuterService(){ return Executors.newFixedThreadPool(numThreads <= 0 ? DEFAULT_NUM_THREADS : numThreads, new BasicThreadFactory.Builder() .daemon(true) .namingPattern(THREAD_NAME) .build()); }
public static void start() { BasicThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("stallion-health-tracker-thread-%d") .build(); instance().timedChecker = new ScheduledThreadPoolExecutor(2, factory); instance().timedChecker.scheduleAtFixedRate(instance().metrics, 0, 1, TimeUnit.MINUTES); instance().timedChecker.scheduleAtFixedRate(instance().dailyMetrics, 0, 24*60, TimeUnit.MINUTES); }
protected AsyncCoordinator() { threads = new ArrayList<>(); int poolSize = 4; BasicThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("stallion-async-task-runnable-%d") .build(); // Create an executor service for single-threaded execution pool = Executors.newFixedThreadPool(poolSize, factory); }
private JobCoordinator() { queue = new PriorityBlockingQueue<>(); BasicThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("stallion-job-execution-thread-%d") .build(); // Create an executor service for single-threaded execution pool = Executors.newFixedThreadPool(25, factory); registeredJobs = new HashSet<>(); }
public static void main(String[] args) throws ExecutionException, InterruptedException { List<Future<?>> randomTasks = new LinkedList<>(); ExecutorService executorService = Executors.newCachedThreadPool( new BasicThreadFactory.Builder() .daemon(true) .namingPattern("ExampleThread") .build() ); try { Future<String> result = executorService.submit(new HelloCallable()); System.out.printf("Result %s\n", result.get()); //will wait until the submitted job is done for(int count = 0; count < 10; count++) { randomTasks.add(executorService.submit(new RandomTask(count))); } waitForTheFuture(randomTasks); } finally { executorService.shutdown(); //make sure you shut it down } }
private void initialize() { BasicThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("download-slave-%d") .build(); executor = Executors.newFixedThreadPool(SLAVE_POOL_SIZE, factory); statistics.setDownloadFolderLocation(DOWNLOAD_DIRECTORY_LOCATION); safekeeper.loadIDsFromFile(); stopWatch.start(); }
private Manager(final Builder builder) { this.loadBalancingStrategy = builder.loadBalancingStrategy; this.authProps = builder.authProps; this.contactPoints = builder.getContactPoints(); connectionPoolSettings = new Settings.ConnectionPoolSettings(); connectionPoolSettings.maxInProcessPerConnection = builder.maxInProcessPerConnection; connectionPoolSettings.minInProcessPerConnection = builder.minInProcessPerConnection; connectionPoolSettings.maxSimultaneousUsagePerConnection = builder.maxSimultaneousUsagePerConnection; connectionPoolSettings.minSimultaneousUsagePerConnection = builder.minSimultaneousUsagePerConnection; connectionPoolSettings.maxSize = builder.maxConnectionPoolSize; connectionPoolSettings.minSize = builder.minConnectionPoolSize; connectionPoolSettings.maxWaitForConnection = builder.maxWaitForConnection; connectionPoolSettings.maxWaitForSessionClose = builder.maxWaitForSessionClose; connectionPoolSettings.maxContentLength = builder.maxContentLength; connectionPoolSettings.reconnectInitialDelay = builder.reconnectInitialDelay; connectionPoolSettings.reconnectInterval = builder.reconnectInterval; connectionPoolSettings.resultIterationBatchSize = builder.resultIterationBatchSize; connectionPoolSettings.enableSsl = builder.enableSsl; connectionPoolSettings.trustCertChainFile = builder.trustCertChainFile; connectionPoolSettings.keyCertChainFile = builder.keyCertChainFile; connectionPoolSettings.keyFile = builder.keyFile; connectionPoolSettings.keyPassword = builder.keyPassword; connectionPoolSettings.channelizer = builder.channelizer; sslContextOptional = Optional.ofNullable(builder.sslContext); this.factory = new Factory(builder.nioPoolSize); this.serializer = builder.serializer; this.executor = Executors.newScheduledThreadPool(builder.workerPoolSize, new BasicThreadFactory.Builder().namingPattern("gremlin-driver-worker-%d").build()); }
protected void initAsyncJobExecutionThreadPool() { if (threadPoolQueue == null) { LOGGER.info("Creating thread pool queue of size {}", queueSize); threadPoolQueue = new ArrayBlockingQueue<>(queueSize); } if (executorService == null) { LOGGER.info("Creating executor service with corePoolSize {}, maxPoolSize {} and keepAliveTime {}", corePoolSize, maxPoolSize, keepAliveTime); BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("flowable-async-job-executor-thread-%d").build(); executorService = new ThreadPoolExecutor(corePoolSize, maxPoolSize, keepAliveTime, TimeUnit.MILLISECONDS, threadPoolQueue, threadFactory); } }
private RunOnShutdownScheduledExecutorDecorator createTimeoutExecutorDecorator(int threadPoolSize) { BasicThreadFactory threadFactory = new BasicThreadFactory.Builder() .namingPattern("timer-provider-thread-%d") .build(); return new RunOnShutdownScheduledExecutorDecorator("timeout manager", threadPoolSize, threadFactory); }
private Manager(final Builder builder) { validateBuilder(builder); this.loadBalancingStrategy = builder.loadBalancingStrategy; this.authProps = builder.authProps; this.contactPoints = builder.getContactPoints(); connectionPoolSettings = new Settings.ConnectionPoolSettings(); connectionPoolSettings.maxInProcessPerConnection = builder.maxInProcessPerConnection; connectionPoolSettings.minInProcessPerConnection = builder.minInProcessPerConnection; connectionPoolSettings.maxSimultaneousUsagePerConnection = builder.maxSimultaneousUsagePerConnection; connectionPoolSettings.minSimultaneousUsagePerConnection = builder.minSimultaneousUsagePerConnection; connectionPoolSettings.maxSize = builder.maxConnectionPoolSize; connectionPoolSettings.minSize = builder.minConnectionPoolSize; connectionPoolSettings.maxWaitForConnection = builder.maxWaitForConnection; connectionPoolSettings.maxWaitForSessionClose = builder.maxWaitForSessionClose; connectionPoolSettings.maxContentLength = builder.maxContentLength; connectionPoolSettings.reconnectInterval = builder.reconnectInterval; connectionPoolSettings.resultIterationBatchSize = builder.resultIterationBatchSize; connectionPoolSettings.enableSsl = builder.enableSsl; connectionPoolSettings.trustCertChainFile = builder.trustCertChainFile; connectionPoolSettings.keyCertChainFile = builder.keyCertChainFile; connectionPoolSettings.keyFile = builder.keyFile; connectionPoolSettings.keyPassword = builder.keyPassword; connectionPoolSettings.keepAliveInterval = builder.keepAliveInterval; connectionPoolSettings.channelizer = builder.channelizer; sslContextOptional = Optional.ofNullable(builder.sslContext); nioPoolSize = builder.nioPoolSize; workerPoolSize = builder.workerPoolSize; port = builder.port; this.factory = new Factory(builder.nioPoolSize); this.serializer = builder.serializer; this.executor = Executors.newScheduledThreadPool(builder.workerPoolSize, new BasicThreadFactory.Builder().namingPattern("gremlin-driver-worker-%d").build()); }
public ServerThread( E environment, JSONObject config, ThrottleControl throttle, IncomingConnectionHandlerFactory<E> connectionHandlerFactory ) throws IOException { this.name = config.getString( "name" ); setName( "Server-" + name ); this.server = new ServerSocket( config.getInt( "port" ) ); this.maximumPoolSize = config.getInt( "poolsize" ); ThreadFactory threads = new BasicThreadFactory.Builder().namingPattern( name + "-%d" ).build(); this.queue = new LinkedBlockingQueue<Runnable>(); threadPool = new ThreadPoolExecutor( 1, 1, // thread count is set to the real initial value on the first run() 0L, TimeUnit.MILLISECONDS, queue, threads ); this.throttle = throttle; this.connectionHandlerFactory = connectionHandlerFactory; JSONObject monitor = config.optJSONObject( "monitor" ); if ( monitor != null ) { long warnLimit = JSONUtils.getTime( "warnlimit", monitor, TIME_UNIT.SECOND ); long frequency = JSONUtils.getTime( "frequency", monitor, TIME_UNIT.SECOND ); environment.taskRunner().runRepeated( new CommandMonitor( name, warnLimit ), "Command Monitor", Duration.ZERO, Duration.ofSeconds( frequency ), false ); } SimpleProgramStateTracker.forContext( CLIENT_TIMING + name ) .storingIn( "Server.ClientTiming." + name ) .build(); }
@Bean public ScheduledExecutorService jobExecutorService() { return Executors.newScheduledThreadPool( ontrackConfigProperties.getJobs().getPoolSize(), new BasicThreadFactory.Builder() .daemon(true) .namingPattern("job-%s") .build() ); }
public BoundedBuffer(KhronusConfig config) { this.measures = new LinkedBlockingQueue<Measure>(config.getMaximumMeasures()); this.sender = new Sender(config); this.jsonSerializer = new JsonSerializer(config.getSendIntervalMillis(), config.getApplicationName()); BasicThreadFactory threadFactory = new BasicThreadFactory.Builder().namingPattern("KhronusClientSender").build(); this.executor = Executors.newScheduledThreadPool(1, threadFactory); this.executor.scheduleWithFixedDelay(send(), config.getSendIntervalMillis(), config.getSendIntervalMillis(), TimeUnit.MILLISECONDS); LOG.debug("Buffer to store metrics created [MaximumMeasures: {}; SendIntervalMillis: {}]", config.getMaximumMeasures(), config.getSendIntervalMillis()); }
public synchronized void start() { if (getServiceState() != STATE.STARTED) { super.start(); } if (!isServerStatePersistenceEnabled) { log.info("Server restart is not enabled. Not persisting lens server state"); } else { ThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("Lens-server-snapshotter-Thread-%d") .daemon(true) .priority(Thread.NORM_PRIORITY) .build(); serverSnapshotScheduler = Executors.newSingleThreadScheduledExecutor(factory); serverSnapshotScheduler.scheduleWithFixedDelay(new Runnable() { @Override public void run() { try { final String runId = UUID.randomUUID().toString(); logSegregationContext.setLogSegregationId(runId); persistLensServiceState(); log.info("SnapShot of Lens Services created"); } catch (Exception e) { incrCounter(SERVER_STATE_PERSISTENCE_ERRORS); log.error("Unable to persist lens server state", e); } } }, serverStatePersistenceInterval, serverStatePersistenceInterval, TimeUnit.MILLISECONDS); log.info("Enabled periodic persistence of lens server state at {} millis interval", serverStatePersistenceInterval); } }
private void startQueryCancellationPool() { ThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("query-cancellation-pool-Thread-%d") .priority(Thread.NORM_PRIORITY) .build(); //Using fixed values for pool . corePoolSize = maximumPoolSize = 3 and keepAliveTime = 60 secs queryCancellationPool = new ThreadPoolExecutor(3, 3, 60, TimeUnit.SECONDS, new LinkedBlockingQueue(), factory); }
@Override public synchronized void init(HiveConf hiveConf) { int numProcs = Runtime.getRuntime().availableProcessors(); ThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern("Event_Service_Thread-%d") .daemon(false) .priority(Thread.NORM_PRIORITY) .build(); eventHandlerPool = Executors.newFixedThreadPool(hiveConf.getInt(LensConfConstants.EVENT_SERVICE_THREAD_POOL_SIZE, numProcs), factory); super.init(hiveConf); }
/** * Create an asynchronous event listener which uses a thread poool to process events. * * @param poolSize size of the event processing pool * @param timeOutSeconds time out in seconds when an idle thread is destroyed * @param isDaemon if the threads used to process should be daemon threads, * if false, then implementation should call stop() * to stop the thread pool */ public AsyncEventListener(int poolSize, long timeOutSeconds, final boolean isDaemon) { eventQueue = new LinkedBlockingQueue<>(); ThreadFactory factory = new BasicThreadFactory.Builder() .namingPattern(getName()+"_AsyncThread-%d") .daemon(isDaemon) .priority(Thread.NORM_PRIORITY) .build(); // fixed pool with min and max equal to poolSize processor = new ThreadPoolExecutor(poolSize, poolSize, timeOutSeconds, TimeUnit.SECONDS, eventQueue, factory); processor.allowCoreThreadTimeOut(true); }
public BlockingThreadPoolExecutor(int corePoolSize, String poolName, BlockingQueue<Runnable> workQueue) { super(corePoolSize, corePoolSize, 0, TimeUnit.SECONDS, workQueue, new BasicThreadFactory.Builder() .namingPattern(poolName + "-%d") .priority(Thread.MAX_PRIORITY).build(), defaultHandler); }
public ServiceJobRunner( Application csapApplication ) { this.csapApplication = csapApplication; long initialDelay = 5; long interval = 60; TimeUnit logRotationTimeUnit = TimeUnit.MINUTES; if ( Application.isRunningOnDesktop() ) { logger.warn( "Setting DESKTOP to seconds" ); logRotationTimeUnit = TimeUnit.SECONDS; } logger.warn( "Creating job schedule thread, invoked: {} {}.", interval, logRotationTimeUnit ); BasicThreadFactory schedFactory = new BasicThreadFactory.Builder() .namingPattern( "CsapLogRotation-%d" ) .daemon( true ) .priority( Thread.NORM_PRIORITY ) .build(); jobTimerService = Executors .newScheduledThreadPool( 1, schedFactory ); ScheduledFuture<?> jobHandle = jobTimerService .scheduleAtFixedRate( () -> findAndRunActiveJobs(), initialDelay, interval, logRotationTimeUnit ); logger.warn( "Creating job runner thread pool: {} threads. Maximum jobs queued: {}", MAX_JOBS_CONCURRENT, MAX_JOBS_QUEUED ); BasicThreadFactory jobRunnerThreadFactory = new BasicThreadFactory.Builder() .namingPattern( "CsapServiceJobRunner-%d" ) .daemon( true ) .priority( Thread.NORM_PRIORITY + 1 ) .build(); // jobRunnerQueue = new ArrayBlockingQueue<>( MAX_JOBS_QUEUED ); jobRunnerService = new ThreadPoolExecutor( MAX_JOBS_CONCURRENT, MAX_JOBS_CONCURRENT, 30, TimeUnit.SECONDS, jobRunnerQueue, jobRunnerThreadFactory ); }
public ExecutorServiceObject(final String namingPattern, final int threadSize) { workQueue = new LinkedBlockingQueue<>(); threadPoolExecutor = new ThreadPoolExecutor(threadSize, threadSize, 5L, TimeUnit.MINUTES, workQueue, new BasicThreadFactory.Builder().namingPattern(Joiner.on("-").join(namingPattern, "%s")).build()); threadPoolExecutor.allowCoreThreadTimeOut(true); }
protected BasicThreadFactory createThreadFactory(Object named) { return new BasicThreadFactory.Builder().namingPattern(named.toString() + "-%d").build(); }
public static ExecutorService newFixedThreadPool(int nThreads, String name) { BasicThreadFactory factory = new BasicThreadFactory.Builder().namingPattern(name + "-%d") .daemon(true).priority(Thread.MAX_PRIORITY).build(); return new MdcThreadPoolExecutor(nThreads, nThreads, 0L, TimeUnit.MILLISECONDS, new LinkedBlockingQueue<Runnable>(), factory); }