public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(ClientProtocol.class.getName())) { return ClientProtocol.versionID; } else if (protocol.equals(DatanodeProtocol.class.getName())){ return DatanodeProtocol.versionID; } else if (protocol.equals(NamenodeProtocol.class.getName())){ return NamenodeProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){ return RefreshUserMappingsProtocol.versionID; } else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) { return RefreshCallQueueProtocol.versionID; } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){ return GetUserMappingsProtocol.versionID; } else if (protocol.equals(TraceAdminProtocol.class.getName())){ return TraceAdminProtocol.versionID; } else { throw new IOException("Unknown protocol to name node: " + protocol); } }
@Override // DatanodeProtocol public void errorReport(DatanodeRegistration nodeReg, int errorCode, String msg) throws IOException { checkNNStartup(); String dnName = (nodeReg == null) ? "Unknown DataNode" : nodeReg.toString(); if (errorCode == DatanodeProtocol.NOTIFY) { LOG.info("Error report from " + dnName + ": " + msg); return; } verifyRequest(nodeReg); if (errorCode == DatanodeProtocol.DISK_ERROR) { LOG.warn("Disk error on " + dnName + ": " + msg); } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { LOG.warn("Fatal disk error on " + dnName + ": " + msg); namesystem.getBlockManager().getDatanodeManager().removeDatanode(nodeReg); } else { LOG.info("Error report from " + dnName + ": " + msg); } }
private void handleDiskError(String errMsgr) { final boolean hasEnoughResources = data.hasEnoughResource(); LOG.warn("DataNode.handleDiskError: Keep Running: " + hasEnoughResources); // If we have enough active valid volumes then we do not want to // shutdown the DN completely. int dpError = hasEnoughResources ? DatanodeProtocol.DISK_ERROR : DatanodeProtocol.FATAL_DISK_ERROR; metrics.incrVolumeFailures(); //inform NameNodes for(BPOfferService bpos: blockPoolManager.getAllNamenodeThreads()) { bpos.trySendErrorReport(dpError, errMsgr); } if(hasEnoughResources) { scheduleAllBlockReport(0); return; // do not shutdown } LOG.warn("DataNode is shutting down: " + errMsgr); shouldRun = false; }
public static BlockIdCommandProto convert(BlockIdCommand cmd) { BlockIdCommandProto.Builder builder = BlockIdCommandProto.newBuilder() .setBlockPoolId(cmd.getBlockPoolId()); switch (cmd.getAction()) { case DatanodeProtocol.DNA_CACHE: builder.setAction(BlockIdCommandProto.Action.CACHE); break; case DatanodeProtocol.DNA_UNCACHE: builder.setAction(BlockIdCommandProto.Action.UNCACHE); break; default: throw new AssertionError("Invalid action"); } long[] blockIds = cmd.getBlockIds(); for (int i = 0; i < blockIds.length; i++) { builder.addBlockIds(blockIds[i]); } return builder.build(); }
public static BlockIdCommand convert(BlockIdCommandProto blkIdCmd) { int numBlockIds = blkIdCmd.getBlockIdsCount(); long blockIds[] = new long[numBlockIds]; for (int i = 0; i < numBlockIds; i++) { blockIds[i] = blkIdCmd.getBlockIds(i); } int action = DatanodeProtocol.DNA_UNKNOWN; switch (blkIdCmd.getAction()) { case CACHE: action = DatanodeProtocol.DNA_CACHE; break; case UNCACHE: action = DatanodeProtocol.DNA_UNCACHE; break; default: throw new AssertionError("Unknown action type: " + blkIdCmd.getAction()); } return new BlockIdCommand(action, blkIdCmd.getBlockPoolId(), blockIds); }
/** * Send a heartbeat to the name-node and replicate blocks if requested. */ @SuppressWarnings("unused") // keep it for future blockReceived benchmark int replicateBlocks() throws IOException { // register datanode StorageReport[] rep = { new StorageReport(storage, false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) }; DatanodeCommand[] cmds = nameNodeProto.sendHeartbeat(dnRegistration, rep, 0L, 0L, 0, 0, 0, null).getCommands(); if (cmds != null) { for (DatanodeCommand cmd : cmds) { if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) { // Send a copy of a block to another datanode BlockCommand bcmd = (BlockCommand)cmd; return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(), bcmd.getTargetStorageIDs()); } } } return 0; }
/** * BlockRecoveryFI_07. max replica length from all DNs is zero. * * @throws IOException in case of an error */ @Test public void testZeroLenReplicas() throws IOException, InterruptedException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } DataNode spyDN = spy(dn); doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN). initReplicaRecovery(any(RecoveringBlock.class)); Daemon d = spyDN.recoverBlocks("fake NN", initRecoveringBlocks()); d.join(); DatanodeProtocol dnP = dn.getActiveNamenodeForBP(POOL_ID); verify(dnP).commitBlockSynchronization( block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null); }
/** * BlockRecoveryFI_10. DN has no ReplicaUnderRecovery. * * @throws IOException in case of an error */ @Test public void testNoReplicaUnderRecovery() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } dn.data.createRbw(StorageType.DEFAULT, block, false); try { dn.syncBlock(rBlock, initBlockRecords(dn)); fail("Sync should fail"); } catch (IOException e) { e.getMessage().startsWith("Cannot recover "); } DatanodeProtocol namenode = dn.getActiveNamenodeForBP(POOL_ID); verify(namenode, never()).commitBlockSynchronization( any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class)); }
/** * Send a heartbeat to the name-node and replicate blocks if requested. */ @SuppressWarnings("unused") // keep it for future blockReceived benchmark int replicateBlocks() throws IOException { // register datanode StorageReport[] rep = { new StorageReport(storage, false, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED) }; DatanodeCommand[] cmds = dataNodeProto.sendHeartbeat(dnRegistration, rep, 0L, 0L, 0, 0, 0, null, true).getCommands(); if (cmds != null) { for (DatanodeCommand cmd : cmds) { if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) { // Send a copy of a block to another datanode BlockCommand bcmd = (BlockCommand)cmd; return transferBlocks(bcmd.getBlocks(), bcmd.getTargets(), bcmd.getTargetStorageIDs()); } } } return 0; }
/** * BlockRecoveryFI_07. max replica length from all DNs is zero. * * @throws IOException in case of an error */ @Test public void testZeroLenReplicas() throws IOException, InterruptedException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } doReturn(new ReplicaRecoveryInfo(block.getBlockId(), 0, block.getGenerationStamp(), ReplicaState.FINALIZED)).when(spyDN). initReplicaRecovery(any(RecoveringBlock.class)); for(RecoveringBlock rBlock: initRecoveringBlocks()){ BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock); BlockRecoveryWorker.RecoveryTaskContiguous spyTask = spy(RecoveryTaskContiguous); spyTask.recover(); } DatanodeProtocol dnP = recoveryWorker.getActiveNamenodeForBP(POOL_ID); verify(dnP).commitBlockSynchronization( block, RECOVERY_ID, 0, true, true, DatanodeID.EMPTY_ARRAY, null); }
/** * BlockRecoveryFI_10. DN has no ReplicaUnderRecovery. * * @throws IOException in case of an error */ @Test public void testNoReplicaUnderRecovery() throws IOException { if(LOG.isDebugEnabled()) { LOG.debug("Running " + GenericTestUtils.getMethodName()); } dn.data.createRbw(StorageType.DEFAULT, block, false); BlockRecoveryWorker.RecoveryTaskContiguous RecoveryTaskContiguous = recoveryWorker.new RecoveryTaskContiguous(rBlock); try { RecoveryTaskContiguous.syncBlock(initBlockRecords(dn)); fail("Sync should fail"); } catch (IOException e) { e.getMessage().startsWith("Cannot recover "); } DatanodeProtocol namenode = recoveryWorker.getActiveNamenodeForBP(POOL_ID); verify(namenode, never()).commitBlockSynchronization( any(ExtendedBlock.class), anyLong(), anyLong(), anyBoolean(), anyBoolean(), any(DatanodeID[].class), any(String[].class)); }
public void testSinglePortStartup() throws IOException { Configuration conf = new Configuration(); MiniDFSCluster cluster = new MiniDFSCluster(conf, 2, true, null); NameNode nn = cluster.getNameNode(); InetSocketAddress dnAddress = nn.getNameNodeDNAddress(); InetSocketAddress clientAddress = nn.getNameNodeAddress(); assertEquals(clientAddress, dnAddress); DatanodeProtocol dnProtocol = (DatanodeProtocol) RPC.waitForProxy( DatanodeProtocol.class, DatanodeProtocol.versionID, dnAddress, conf); // perform a dummy call dnProtocol.getProtocolVersion(DatanodeProtocol.class.getName(), DatanodeProtocol.versionID); ClientProtocol client = (ClientProtocol) RPC.waitForProxy( ClientProtocol.class, ClientProtocol.versionID, dnAddress, conf); client.getProtocolVersion(ClientProtocol.class.getName(), ClientProtocol.versionID); cluster.shutdown(); }
/** * Send a heartbeat to the name-node and replicate blocks if requested. */ int replicateBlocks() throws IOException { // register datanode DatanodeCommand[] cmds = nameNode.sendHeartbeat( dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0); if (cmds != null) { for (DatanodeCommand cmd : cmds) { if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) { // Send a copy of a block to another datanode BlockCommand bcmd = (BlockCommand)cmd; return transferBlocks(bcmd.getBlocks(), bcmd.getTargets()); } } } return 0; }
/** * Send a heartbeat to the name-node and replicate blocks if requested. */ @SuppressWarnings("unused") int replicateBlocks() throws IOException { // register datanode DatanodeCommand[] cmds = nameNode.sendHeartbeat(dnRegistration, DF_CAPACITY, DF_USED, DF_CAPACITY - DF_USED, DF_USED, 0, 0); if (cmds != null) { for (DatanodeCommand cmd : cmds) { if (cmd.getAction() == DatanodeProtocol.DNA_TRANSFER) { // Send a copy of a block to another datanode BlockCommand bcmd = (BlockCommand) cmd; return transferBlocks(bcmd.getBlocks(), bcmd.getTargets()); } } } return 0; }
/** * Offer service to the specified namenode */ public OfferService(AvatarDataNode anode, ServicePair servicePair, DatanodeProtocol namenode, InetSocketAddress namenodeAddress, AvatarProtocol avatarnode, InetSocketAddress avatarnodeAddress) { this.anode = anode; this.servicePair = servicePair; this.namenode = namenode; this.avatarnode = avatarnode; this.namenodeAddress = namenodeAddress; this.avatarnodeAddress = avatarnodeAddress; nsRegistration = servicePair.nsRegistration; data = anode.data; myMetrics = anode.myMetrics; scheduleBlockReport(anode.initialBlockReportDelay); backlogSize = anode.getConf().getInt("dfs.datanode.blockreceived.backlog", 10000); fullBlockReportDelay = anode.getConf().getInt( "dfs.datanode.fullblockreport.delay", 5 * 60 * 1000); blockReceivedRetryInterval = anode.getConf().getInt( "dfs.datanode.blockreceived.retry.internval", 10000); }
/** */ public void errorReport(DatanodeRegistration nodeReg, int errorCode, String msg) throws IOException { // Log error message from datanode String dnName = (nodeReg == null ? "unknown DataNode" : nodeReg.getName()); LOG.info("Error report from " + dnName + ": " + msg); if (errorCode == DatanodeProtocol.NOTIFY) { return; } verifyRequest(nodeReg); if (errorCode == DatanodeProtocol.DISK_ERROR) { LOG.warn("Volume failed on " + dnName); } else if (errorCode == DatanodeProtocol.FATAL_DISK_ERROR) { namesystem.removeDatanode(nodeReg); } }
RaidTaskCommand getRaidCommand(int maxEncodingTasks, int maxDecodingTasks) { List<RaidTask> tasks = new ArrayList<RaidTask>(); synchronized (raidEncodingTasks) { tasks.addAll(raidEncodingTasks.pollN ( Math.min(raidEncodingTasks.size(), maxEncodingTasks))); } synchronized (raidDecodingTasks) { tasks.addAll(raidDecodingTasks.pollN( Math.min(raidDecodingTasks.size(), maxDecodingTasks))); } return (tasks.size() == 0) ? null : new RaidTaskCommand(DatanodeProtocol.DNA_RAIDTASK, tasks.toArray(new RaidTask[tasks.size()])); }
void setupNS(Configuration conf, AbstractList<File> dataDirs) throws IOException { // get NN proxy DatanodeProtocol dnp = (DatanodeProtocol)RPC.waitForProxy(DatanodeProtocol.class, DatanodeProtocol.versionID, nnAddr, conf); setNameNode(dnp); // handshake with NN NamespaceInfo nsInfo = handshake(); setNamespaceInfo(nsInfo); synchronized(DataNode.this){ setupNSStorage(); } nsRegistration.setIpcPort(ipcServer.getListenerAddress().getPort()); nsRegistration.setInfoPort(infoServer.getPort()); }
public long getProtocolVersion(String protocol, long clientVersion) throws IOException { if (protocol.equals(ClientProtocol.class.getName())) { return ClientProtocol.versionID; } else if (protocol.equals(DatanodeProtocol.class.getName())){ return DatanodeProtocol.versionID; } else if (protocol.equals(NamenodeProtocol.class.getName())){ return NamenodeProtocol.versionID; } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){ return RefreshAuthorizationPolicyProtocol.versionID; } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){ return RefreshUserMappingsProtocol.versionID; } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){ return GetUserMappingsProtocol.versionID; } else { throw new IOException("Unknown protocol to name node: " + protocol); } }