void interruptAndJoin() throws InterruptedException { Daemon daemonCopy = null; synchronized (this) { if (isRunning()) { daemon.interrupt(); daemonCopy = daemon; } } if (daemonCopy != null) { if(LOG.isDebugEnabled()) { LOG.debug("Wait for lease checker to terminate"); } daemonCopy.join(); } }
private synchronized void startExpiryDaemon() { // start daemon only if not already started if (isDaemonStarted() == true) { return; } daemon = new Daemon(new Runnable() { @Override public void run() { try { PeerCache.this.run(); } catch(InterruptedException e) { //noop } finally { PeerCache.this.clear(); } } @Override public String toString() { return String.valueOf(PeerCache.this); } }); daemon.start(); }
public Daemon recoverBlocks( final String who, final Collection<RecoveringBlock> blocks) { Daemon d = new Daemon(threadGroup, new Runnable() { /** Recover a list of blocks. It is run by the primary datanode. */ @Override public void run() { for(RecoveringBlock b : blocks) { try { logRecoverBlock(who, b); recoverBlock(b); } catch (IOException e) { LOG.warn("recoverBlocks FAILED: " + b, e); } } } }); d.start(); return d; }
/** * BlockRecoveryFI_07. max replica length from all DNs is zero. * * @throws IOException in case of an error */ @Test public void testZeroLenReplicas() throws IOException, InterruptedException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } DataNode spyDN = spy(dn); doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN). initReplicaRecovery(any(RecoveringBlock.class)); Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks()); d.join(); DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID); verify(dnP).commitBlockSynchronization( block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null); }
private synchronized void startExpiryDaemon() { // start daemon only if not already started if (isDaemonStarted()) { return; } daemon = new Daemon(new Runnable() { @Override public void run() { try { PeerCache.this.run(); } catch(InterruptedException e) { //noop } finally { PeerCache.this.clear(); } } @Override public String toString() { return String.valueOf(PeerCache.this); } }); daemon.start(); }
public Daemon recoverBlocks(final String who, final Collection<RecoveringBlock> blocks) { Daemon d = new Daemon(datanode.threadGroup, new Runnable() { @Override public void run() { for(RecoveringBlock b : blocks) { try { logRecoverBlock(who, b); if (b.isStriped()) { new RecoveryTaskStriped((RecoveringStripedBlock) b).recover(); } else { new RecoveryTaskContiguous(b).recover(); } } catch (IOException e) { LOG.warn("recoverBlocks FAILED: " + b, e); } } } }); d.start(); return d; }
private void initializeStripedReadThreadPool(int num) { if (LOG.isDebugEnabled()) { LOG.debug("Using striped reads; pool threads=" + num); } STRIPED_READ_THREAD_POOL = new ThreadPoolExecutor(1, num, 60, TimeUnit.SECONDS, new SynchronousQueue<Runnable>(), new Daemon.DaemonFactory() { private final AtomicInteger threadIndex = new AtomicInteger(0); @Override public Thread newThread(Runnable r) { Thread t = super.newThread(r); t.setName("stripedRead-" + threadIndex.getAndIncrement()); return t; } }, new ThreadPoolExecutor.CallerRunsPolicy() { @Override public void rejectedExecution(Runnable runnable, ThreadPoolExecutor e) { LOG.info("Execution for striped reading rejected, " + "Executing in current thread"); // will run in the current thread super.rejectedExecution(runnable, e); } }); STRIPED_READ_THREAD_POOL.allowCoreThreadTimeOut(true); }
private void initializeStripedBlkRecoveryThreadPool(int num) { if (LOG.isDebugEnabled()) { LOG.debug("Using striped block recovery; pool threads=" + num); } STRIPED_BLK_RECOVERY_THREAD_POOL = new ThreadPoolExecutor(2, num, 60, TimeUnit.SECONDS, new LinkedBlockingQueue<Runnable>(), new Daemon.DaemonFactory() { private final AtomicInteger threadIdx = new AtomicInteger(0); @Override public Thread newThread(Runnable r) { Thread t = super.newThread(r); t.setName("stripedBlockRecovery-" + threadIdx.getAndIncrement()); return t; } }); STRIPED_BLK_RECOVERY_THREAD_POOL.allowCoreThreadTimeOut(true); }
/** * main() has some simple utility methods. * @param argv Command line parameters. * @exception Exception if the filesystem does not exist. */ public static void main(String[] argv) throws Exception { org.apache.hadoop.hdfs.DnsMonitorSecurityManager.setTheManager(); StringUtils.startupShutdownMessage(SecondaryNameNode.class, argv, LOG); Configuration tconf = new Configuration(); try { argv = DFSUtil.setGenericConf(argv, tconf); } catch (IllegalArgumentException e) { System.err.println(e.getMessage()); printUsage(""); return; } if (argv.length >= 1) { SecondaryNameNode secondary = new SecondaryNameNode(tconf); int ret = secondary.processArgs(argv); System.exit(ret); } // Create a never ending deamon Daemon checkpointThread = new Daemon(new SecondaryNameNode(tconf)); checkpointThread.start(); }
DirectoryScanner(DataNode dn, FSDataset dataset, Configuration conf) { this.datanode = dn; this.dataset = dataset; int interval = conf.getInt(DFS_DATANODE_DIRECTORYSCAN_INTERVAL_KEY, DFS_DATANODE_DIRECTORYSCAN_INTERVAL_DEFAULT); scanPeriodMsecs = interval * 1000L; // msec int numThreads = conf.getInt(DFS_DATANODE_DIRECTORYSCAN_THREADS_KEY, DFS_DATANODE_DIRECTORYSCAN_THREADS_DEFAULT); reportCompileThreadPool = Executors.newFixedThreadPool(numThreads, new Daemon.DaemonFactory()); threadPoolExecutor = new ScheduledThreadPoolExecutor(1, new Daemon.DaemonFactory()); this.delta = new FSDatasetDelta(); this.dataset.setDatasetDelta(delta); }
private void initDataXceiver(Configuration conf) throws IOException { String address = NetUtils.getServerAddress(conf, "dfs.datanode.bindAddress", "dfs.datanode.port", FSConstants.DFS_DATANODE_ADDRESS_KEY); InetSocketAddress socAddr = NetUtils.createSocketAddr(address); // find free port ServerSocket ss = (socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, socAddr, conf.getInt("dfs.datanode.xceiver.listen.queue.size", 128)); ss.setReceiveBufferSize(DEFAULT_DATA_SOCKET_SIZE); // adjust machine name with the actual port int tmpPort = ss.getLocalPort(); selfAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), tmpPort); LOG.info("Opened info server at " + tmpPort); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); this.threadGroup.setDaemon(true); // auto destroy when empty }
public Daemon recoverBlocks(final int namespaceId, final Block[] blocks, final DatanodeInfo[][] targets, long processStartTime) { final long deadline = processStartTime + blkRecoveryTimeout; Daemon d = new Daemon(threadGroup, new Runnable() { /** Recover a list of blocks. It is run by the primary datanode. */ public void run() { for(int i = 0; i < blocks.length; i++) { try { logRecoverBlock("NameNode", namespaceId, blocks[i], targets[i]); recoverBlock(namespaceId, blocks[i], false, targets[i], true, deadline); } catch (IOException e) { LOG.warn("recoverBlocks FAILED, blocks[" + i + "]=" + blocks[i], e); } } } }); d.start(); return d; }
void interruptAndJoin() throws InterruptedException { Daemon daemonCopy = null; synchronized (this) { if (isRunning()) { daemon.interrupt(); daemonCopy = daemon; } } if (daemonCopy != null) { if (LOG.isDebugEnabled()) { LOG.debug("Wait for lease checker to terminate"); } daemonCopy.join(); } }
private synchronized void startExpiryDaemon() { // start daemon only if not already started if (isDaemonStarted() == true) { return; } daemon = new Daemon(new Runnable() { @Override public void run() { try { SocketCache.this.run(); } catch(InterruptedException e) { //noop } finally { SocketCache.this.clear(); } } @Override public String toString() { return String.valueOf(SocketCache.this); } }); daemon.start(); }
/** * Start services required in active state * * @throws IOException */ void startActiveServices() throws IOException { LOG.info("Starting services required for active state"); LOG.info("Catching up to latest edits from old active before " + "taking over writer role in edits logs"); blockManager.getDatanodeManager().markAllDatanodesStale(); if (isClusterInSafeMode()) { if (!isInSafeMode() || (isInSafeMode() && safeMode.isPopulatingReplicationQueues())) { LOG.info("Reprocessing replication and invalidation queues"); blockManager.processMisReplicatedBlocks(); } } leaseManager.startMonitor(); startSecretManagerIfNecessary(); //ResourceMonitor required only at ActiveNN. See HDFS-2914 this.nnrmthread = new Daemon(new NameNodeResourceMonitor()); nnrmthread.start(); if (erasureCodingEnabled) { erasureCodingManager.activate(); } }
private void initDataXceiver(Configuration conf) throws IOException { // find free port or use privileged port provided ServerSocket ss; if (secureResources == null) { InetSocketAddress addr = DataNode.getStreamingAddr(conf); ss = (dnConf.socketWriteTimeout > 0) ? ServerSocketChannel.open().socket() : new ServerSocket(); Server.bind(ss, addr, 0); } else { ss = secureResources.getStreamingSocket(); } ss.setReceiveBufferSize(HdfsConstants.DEFAULT_DATA_SOCKET_SIZE); streamingAddr = new InetSocketAddress(ss.getInetAddress().getHostAddress(), ss.getLocalPort()); LOG.info("Opened streaming server at " + streamingAddr); this.threadGroup = new ThreadGroup("dataXceiverServer"); this.dataXceiverServer = new Daemon(threadGroup, new DataXceiverServer(ss, conf, this)); this.threadGroup.setDaemon(true); // auto destroy when empty }
public Daemon recoverBlocks(final String who, final Collection<RecoveringBlock> blocks) { Daemon d = new Daemon(threadGroup, new Runnable() { /** Recover a list of blocks. It is run by the primary datanode. */ @Override public void run() { for (RecoveringBlock b : blocks) { try { logRecoverBlock(who, b); recoverBlock(b); } catch (IOException e) { LOG.warn("recoverBlocks FAILED: " + b, e); } } } }); d.start(); return d; }
/** * BlockRecoveryFI_05. One DN throws RecoveryInProgressException. * * @throws IOException * in case of an error */ @Test public void testRecoveryInProgressException() throws IOException, InterruptedException { if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } try { DataNode spyDN = spy(dn); doThrow( new RecoveryInProgressException("Replica recovery is in progress")). when(spyDN).initReplicaRecovery(any(RecoveringBlock.class)); Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks()); d.join(); verify(spyDN, never()) .syncBlock(any(RecoveringBlock.class), anyListOf(BlockRecord.class)); } catch (Exception e) { e.printStackTrace(); } }
/** * BlockRecoveryFI_07. max replica length from all DNs is zero. * * @throws IOException * in case of an error */ @Test public void testZeroLenReplicas() throws IOException, InterruptedException { if (LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } DataNode spyDN = spy(dn); doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN). initReplicaRecovery(any(RecoveringBlock.class)); Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks()); d.join(); DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID); verify(dnP).commitBlockSynchronization(block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null); }
/** * Check and trigger safe mode if needed. */ private void checkMode() { if (needEnter()) { enter(); reportStatus("STATE* Safe mode ON.", false); return; } // the threshold is reached if (!isOn() || // safe mode is off extension <= 0 || threshold <= 0) { // don't need to wait this.leave(true); // leave safe mode return; } if (reached > 0) { // threshold has already been reached before reportStatus("STATE* Safe mode ON.", false); return; } // start monitor reached = now(); smmthread = new Daemon(new SafeModeMonitor()); smmthread.start(); reportStatus("STATE* Safe mode extension entered.", true); }