@VisibleForTesting public static boolean doRollback(Configuration conf, boolean isConfirmationNeeded) throws IOException { String nsId = DFSUtil.getNamenodeNameServiceId(conf); String namenodeId = HAUtil.getNameNodeId(conf, nsId); initializeGenericKeys(conf, nsId, namenodeId); FSNamesystem nsys = new FSNamesystem(conf, new FSImage(conf)); System.err.print( "\"rollBack\" will remove the current state of the file system,\n" + "returning you to the state prior to initiating your recent.\n" + "upgrade. This action is permanent and cannot be undone. If you\n" + "are performing a rollback in an HA environment, you should be\n" + "certain that no NameNode process is running on any host."); if (isConfirmationNeeded) { if (!confirmPrompt("Roll back file system state?")) { System.err.println("Rollback aborted."); return true; } } nsys.getFSImage().doRollback(nsys); return false; }
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
public static DFSZKFailoverController create(Configuration conf) { Configuration localNNConf = DFSHAAdmin.addSecurityConfiguration(conf); String nsId = DFSUtil.getNamenodeNameServiceId(conf); if (!HAUtil.isHAEnabled(localNNConf, nsId)) { throw new HadoopIllegalArgumentException( "HA is not enabled for this namenode."); } String nnId = HAUtil.getNameNodeId(localNNConf, nsId); if (nnId == null) { String msg = "Could not get the namenode ID of this node. " + "You may run zkfc on the node other than namenode."; throw new HadoopIllegalArgumentException(msg); } NameNode.initializeGenericKeys(localNNConf, nsId, nnId); DFSUtil.setGenericConf(localNNConf, nsId, nnId, ZKFC_CONF_KEYS); NNHAServiceTarget localTarget = new NNHAServiceTarget( localNNConf, nsId, nnId); return new DFSZKFailoverController(localNNConf, localTarget); }
/** * Download the most recent fsimage from the name node, and save it to a local * file in the given directory. * * @param argv * List of of command line parameters. * @param idx * The index of the command that is being processed. * @return an exit code indicating success or failure. * @throws IOException */ public int fetchImage(final String[] argv, final int idx) throws IOException { Configuration conf = getConf(); final URL infoServer = DFSUtil.getInfoServer( HAUtil.getAddressOfActive(getDFS()), conf, DFSUtil.getHttpClientScheme(conf)).toURL(); SecurityUtil.doAsCurrentUser(new PrivilegedExceptionAction<Void>() { @Override public Void run() throws Exception { TransferFsImage.downloadMostRecentImageToDirectory(infoServer, new File(argv[idx])); return null; } }); return 0; }
/** * Dumps DFS data structures into specified file. * Usage: hdfs dfsadmin -metasave filename * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException if an error occurred while accessing * the file or path. */ public int metaSave(String[] argv, int idx) throws IOException { String pathname = argv[idx]; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress()); } } else { dfs.metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri()); } return 0; }
/** * Test to verify legacy proxy providers are correctly wrapped. */ @Test public void testWrappedFailoverProxyProvider() throws Exception { // setup the config with the dummy provider class Configuration config = new HdfsConfiguration(conf); String logicalName = HATestUtil.getLogicalHostname(cluster); HATestUtil.setFailoverConfigurations(cluster, config, logicalName); config.set(DFS_CLIENT_FAILOVER_PROXY_PROVIDER_KEY_PREFIX + "." + logicalName, DummyLegacyFailoverProxyProvider.class.getName()); Path p = new Path("hdfs://" + logicalName + "/"); // not to use IP address for token service SecurityUtil.setTokenServiceUseIp(false); // Logical URI should be used. assertTrue("Legacy proxy providers should use logical URI.", HAUtil.useLogicalUri(config, p.toUri())); }
@Before public void setupCluster() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .waitSafeMode(false) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); }
@Before public void setupCluster() throws IOException { conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HA_LOGROLL_PERIOD_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); HAUtil.setAllowStandbyReads(conf, true); MiniDFSNNTopology topology = MiniDFSNNTopology.simpleHATopology(); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(topology) .numDataNodes(0) .build(); cluster.waitActive(); shutdownClusterAndRemoveSharedEditsDir(); }
@Before public void setupCluster() throws Exception { Configuration conf = new Configuration(); conf.setInt(DFSConfigKeys.DFS_HEARTBEAT_INTERVAL_KEY, 1); conf.setInt(DFSConfigKeys.DFS_HA_TAILEDITS_PERIOD_KEY, 1); conf.setLong(DFSConfigKeys.DFS_BLOCK_SIZE_KEY, BLOCK_SIZE); HAUtil.setAllowStandbyReads(conf, true); cluster = new MiniDFSCluster.Builder(conf) .nnTopology(MiniDFSNNTopology.simpleHATopology()) .numDataNodes(1) .waitSafeMode(false) .build(); cluster.waitActive(); nn0 = cluster.getNameNode(0); nn1 = cluster.getNameNode(1); fs = HATestUtil.configureFailoverFs(cluster, conf); cluster.transitionToActive(0); }
private void setUpHaCluster(boolean security) throws Exception { conf = new Configuration(); conf.setBoolean(CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, security); cluster = new MiniQJMHACluster.Builder(conf).build(); setHAConf(conf, cluster.getDfsCluster().getNameNode(0).getHostAndPort(), cluster.getDfsCluster().getNameNode(1).getHostAndPort()); cluster.getDfsCluster().getNameNode(0).getHostAndPort(); admin = new DFSAdmin(); admin.setConf(conf); assertTrue(HAUtil.isHAEnabled(conf, "ns1")); originOut = System.out; originErr = System.err; System.setOut(new PrintStream(out)); System.setErr(new PrintStream(err)); }
public static List<RemoteNameNodeInfo> getRemoteNameNodes(Configuration conf, String nsId) throws IOException { // there is only a single NN configured (and no federation) so we don't have any more NNs if (nsId == null) { return Collections.emptyList(); } List<Configuration> otherNodes = HAUtil.getConfForOtherNodes(conf); List<RemoteNameNodeInfo> nns = new ArrayList<RemoteNameNodeInfo>(); for (Configuration otherNode : otherNodes) { String otherNNId = HAUtil.getNameNodeId(otherNode, nsId); // don't do any validation here as in some cases, it can be overwritten later InetSocketAddress otherIpcAddr = NameNode.getServiceAddress(otherNode, true); final String scheme = DFSUtil.getHttpClientScheme(conf); URL otherHttpAddr = DFSUtil.getInfoServerWithDefaultHost(otherIpcAddr.getHostName(), otherNode, scheme).toURL(); nns.add(new RemoteNameNodeInfo(otherNode, otherNNId, otherIpcAddr, otherHttpAddr)); } return nns; }
/** * Determine the address of the NN we are checkpointing * as well as our own HTTP address from the configuration. * @throws IOException */ private void setNameNodeAddresses(Configuration conf) throws IOException { // Look up our own address. myNNAddress = getHttpAddress(conf); // Look up the active node's address List<Configuration> confForActive = HAUtil.getConfForOtherNodes(conf); activeNNAddresses = new ArrayList<URL>(confForActive.size()); for (Configuration activeConf : confForActive) { URL activeNNAddress = getHttpAddress(activeConf); // sanity check each possible active NN Preconditions.checkArgument(checkAddress(activeNNAddress), "Bad address for active NN: %s", activeNNAddress); activeNNAddresses.add(activeNNAddress); } // Sanity-check. Preconditions.checkArgument(checkAddress(myNNAddress), "Bad address for standby NN: %s", myNNAddress); }
/** rollback for rolling upgrade. */ private void rollingRollback(long discardSegmentTxId, long ckptId) throws IOException { // discard discard unnecessary editlog segments starting from the given id this.editLog.discardSegments(discardSegmentTxId); // rename the special checkpoint renameCheckpoint(ckptId, NameNodeFile.IMAGE_ROLLBACK, NameNodeFile.IMAGE, true); // purge all the checkpoints after the marker archivalManager.purgeCheckpoinsAfter(NameNodeFile.IMAGE, ckptId); // HDFS-7939: purge all old fsimage_rollback_* archivalManager.purgeCheckpoints(NameNodeFile.IMAGE_ROLLBACK); String nameserviceId = DFSUtil.getNamenodeNameServiceId(conf); if (HAUtil.isHAEnabled(conf, nameserviceId)) { // close the editlog since it is currently open for write this.editLog.close(); // reopen the editlog for read this.editLog.initSharedJournalsForRead(); } }
/** * Dumps DFS data structures into specified file. * Usage: hdfs dfsadmin -metasave filename * @param argv List of of command line parameters. * @param idx The index of the command that is being processed. * @exception IOException if an error occurred while accessing * the file or path. */ public int metaSave(String[] argv, int idx) throws IOException { String pathname = argv[idx]; DistributedFileSystem dfs = getDFS(); Configuration dfsConf = dfs.getConf(); URI dfsUri = dfs.getUri(); boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri); if (isHaEnabled) { String nsId = dfsUri.getHost(); List<ProxyAndInfo<ClientProtocol>> proxies = HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId, ClientProtocol.class); for (ProxyAndInfo<ClientProtocol> proxy : proxies) { proxy.getProxy().metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + proxy.getAddress()); } } else { dfs.metaSave(pathname); System.out.println("Created metasave file " + pathname + " in the log " + "directory of namenode " + dfs.getUri()); } return 0; }
/** * Clones the delegation token to individual host behind the same logical address. * * @param config the hadoop configuration * @throws IOException if failed to get information for the current user. */ public static void cloneHaNnCredentials(Configuration config) throws IOException { String scheme = URI.create(config.get(CommonConfigurationKeysPublic.FS_DEFAULT_NAME_KEY, CommonConfigurationKeysPublic.FS_DEFAULT_NAME_DEFAULT)).getScheme(); // Loop through all name services. Each name service could have multiple name node associated with it. for (Map.Entry<String, Map<String, InetSocketAddress>> entry : DFSUtil.getHaNnRpcAddresses(config).entrySet()) { String nsId = entry.getKey(); Map<String, InetSocketAddress> addressesInNN = entry.getValue(); if (!HAUtil.isHAEnabled(config, nsId) || addressesInNN == null || addressesInNN.isEmpty()) { continue; } // The client may have a delegation token set for the logical // URI of the cluster. Clone this token to apply to each of the // underlying IPC addresses so that the IPC code can find it. URI uri = URI.create(scheme + "://" + nsId); LOG.info("Cloning delegation token for uri {}", uri); HAUtil.cloneDelegationTokenForLogicalUri(UserGroupInformation.getCurrentUser(), uri, addressesInNN.values()); } }
@VisibleForTesting Token<DelegationTokenIdentifier> deserializeToken (String delegation,String nnId) throws IOException { final DataNode datanode = (DataNode) context.getAttribute("datanode"); final Configuration conf = datanode.getConf(); final Token<DelegationTokenIdentifier> token = new Token<DelegationTokenIdentifier>(); token.decodeFromUrlString(delegation); URI nnUri = URI.create(HdfsConstants.HDFS_URI_SCHEME + "://" + nnId); boolean isLogical = HAUtil.isLogicalUri(conf, nnUri); if (isLogical) { token.setService(HAUtil.buildTokenServiceForLogicalUri(nnUri, HdfsConstants.HDFS_URI_SCHEME)); } else { token.setService(SecurityUtil.buildTokenService(nnUri)); } token.setKind(DelegationTokenIdentifier.HDFS_DELEGATION_KIND); return token; }
/** * Derive the namenode http address from the current file system, * either default or as set by "-fs" in the generic options. * @return Returns http address or null if failure. * @throws IOException if we can't determine the active NN address */ private URI getCurrentNamenodeAddress() throws IOException { //String nnAddress = null; Configuration conf = getConf(); //get the filesystem object to verify it is an HDFS system FileSystem fs; try { fs = FileSystem.get(conf); } catch (IOException ioe) { System.err.println("FileSystem is inaccessible due to:\n" + StringUtils.stringifyException(ioe)); return null; } if (!(fs instanceof DistributedFileSystem)) { System.err.println("FileSystem is " + fs.getUri()); return null; } return DFSUtil.getInfoServer(HAUtil.getAddressOfActive(fs), conf, DFSUtil.getHttpClientScheme(conf)); }