Java 类org.apache.hadoop.hdfs.protocol.ClientProtocol 实例源码

项目:hadoop    文件:DFSClient.java   
private static ClientProtocol getNNProxy(
    Token<DelegationTokenIdentifier> token, Configuration conf)
    throws IOException {
  URI uri = HAUtil.getServiceUriFromToken(HdfsConstants.HDFS_URI_SCHEME,
          token);
  if (HAUtil.isTokenForLogicalUri(token) &&
      !HAUtil.isLogicalUri(conf, uri)) {
    // If the token is for a logical nameservice, but the configuration
    // we have disagrees about that, we can't actually renew it.
    // This can be the case in MR, for example, if the RM doesn't
    // have all of the HA clusters configured in its configuration.
    throw new IOException("Unable to map logical nameservice URI '" +
        uri + "' to a NameNode. Local configuration does not have " +
        "a failover proxy provider configured.");
  }

  NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
    NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
  assert info.getDelegationTokenService().equals(token.getService()) :
    "Returned service '" + info.getDelegationTokenService().toString() +
    "' doesn't match expected service '" +
    token.getService().toString() + "'";

  return info.getProxy();
}
项目:hadoop    文件:DfsServlet.java   
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
项目:hadoop    文件:NameNode.java   
public long getProtocolVersion(String protocol, 
                               long clientVersion) throws IOException {
  if (protocol.equals(ClientProtocol.class.getName())) {
    return ClientProtocol.versionID; 
  } else if (protocol.equals(DatanodeProtocol.class.getName())){
    return DatanodeProtocol.versionID;
  } else if (protocol.equals(NamenodeProtocol.class.getName())){
    return NamenodeProtocol.versionID;
  } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
    return RefreshAuthorizationPolicyProtocol.versionID;
  } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
    return RefreshUserMappingsProtocol.versionID;
  } else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) {
    return RefreshCallQueueProtocol.versionID;
  } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
    return GetUserMappingsProtocol.versionID;
  } else if (protocol.equals(TraceAdminProtocol.class.getName())){
    return TraceAdminProtocol.versionID;
  } else {
    throw new IOException("Unknown protocol to name node: " + protocol);
  }
}
项目:hadoop    文件:PBHelper.java   
public static GetFsStatsResponseProto convert(long[] fsStats) {
  GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto
      .newBuilder();
  if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1)
    result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1)
    result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1)
    result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1)
    result.setUnderReplicated(
            fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1)
    result.setCorruptBlocks(
        fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1)
    result.setMissingBlocks(
        fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX + 1)
    result.setMissingReplOneBlocks(
        fsStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]);
  return result.build();
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
项目:hadoop    文件:HAUtil.java   
/**
 * Used to ensure that at least one of the given HA NNs is currently in the
 * active state..
 * 
 * @param namenodes list of RPC proxies for each NN to check.
 * @return true if at least one NN is active, false if all are in the standby state.
 * @throws IOException in the event of error.
 */
public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
    throws IOException {
  for (ClientProtocol namenode : namenodes) {
    try {
      namenode.getFileInfo("/");
      return true;
    } catch (RemoteException re) {
      IOException cause = re.unwrapRemoteException();
      if (cause instanceof StandbyException) {
        // This is expected to happen for a standby NN.
      } else {
        throw re;
      }
    }
  }
  return false;
}
项目:hadoop    文件:TestRetryCacheWithHA.java   
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxies.createFailoverProxyProvider(conf, 
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_BASE_DEFAULT,
          DFSConfigKeys.DFS_CLIENT_FAILOVER_SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);

  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
项目:hadoop    文件:TestBalancerWithMultipleNameNodes.java   
static void wait(final ClientProtocol[] clients,
    long expectedUsedSpace, long expectedTotalSpace) throws IOException {
  LOG.info("WAIT expectedUsedSpace=" + expectedUsedSpace
      + ", expectedTotalSpace=" + expectedTotalSpace);
  for(int n = 0; n < clients.length; n++) {
    int i = 0;
    for(boolean done = false; !done; ) {
      final long[] s = clients[n].getStats();
      done = s[0] == expectedTotalSpace && s[1] == expectedUsedSpace;
      if (!done) {
        sleep(100L);
        if (++i % 100 == 0) {
          LOG.warn("WAIT i=" + i + ", s=[" + s[0] + ", " + s[1] + "]");
        }
      }
    }
  }
}
项目:hadoop    文件:TestEncryptionZones.java   
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
    CipherSuite suite, CryptoProtocolVersion version) throws Exception {
  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, new FileEncryptionInfo(suite,
          version, new byte[suite.getAlgorithmBlockSize()],
          new byte[suite.getAlgorithmBlockSize()],
          "fakeKey", "fakeVersion"),
          (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
项目:aliyun-oss-hadoop-fs    文件:DFSClient.java   
private static ClientProtocol getNNProxy(
    Token<DelegationTokenIdentifier> token, Configuration conf)
    throws IOException {
  URI uri = HAUtilClient.getServiceUriFromToken(
      HdfsConstants.HDFS_URI_SCHEME, token);
  if (HAUtilClient.isTokenForLogicalUri(token) &&
      !HAUtilClient.isLogicalUri(conf, uri)) {
    // If the token is for a logical nameservice, but the configuration
    // we have disagrees about that, we can't actually renew it.
    // This can be the case in MR, for example, if the RM doesn't
    // have all of the HA clusters configured in its configuration.
    throw new IOException("Unable to map logical nameservice URI '" +
        uri + "' to a NameNode. Local configuration does not have " +
        "a failover proxy provider configured.");
  }

  ProxyAndInfo<ClientProtocol> info =
      NameNodeProxiesClient.createProxyWithClientProtocol(conf, uri, null);
  assert info.getDelegationTokenService().equals(token.getService()) :
      "Returned service '" + info.getDelegationTokenService().toString() +
          "' doesn't match expected service '" +
          token.getService().toString() + "'";

  return info.getProxy();
}
项目:aliyun-oss-hadoop-fs    文件:PBHelperClient.java   
public static long[] convert(GetFsStatsResponseProto res) {
  long[] result = new long[ClientProtocol.STATS_ARRAY_LENGTH];
  result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity();
  result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed();
  result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining();
  result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] =
      res.getUnderReplicated();
  result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] =
      res.getCorruptBlocks();
  result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] =
      res.getMissingBlocks();
  result[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
      res.getMissingReplOneBlocks();
  result[ClientProtocol.GET_STATS_BYTES_IN_FUTURE_BLOCKS_IDX] =
      res.hasBlocksInFuture() ? res.getBlocksInFuture() : 0;
  return result;
}
项目:aliyun-oss-hadoop-fs    文件:DfsServlet.java   
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, DFSUtilClient.getNNUri(nnAddr),
      ClientProtocol.class).getProxy();
}
项目:aliyun-oss-hadoop-fs    文件:NameNode.java   
public long getProtocolVersion(String protocol, 
                               long clientVersion) throws IOException {
  if (protocol.equals(ClientProtocol.class.getName())) {
    return ClientProtocol.versionID; 
  } else if (protocol.equals(DatanodeProtocol.class.getName())){
    return DatanodeProtocol.versionID;
  } else if (protocol.equals(NamenodeProtocol.class.getName())){
    return NamenodeProtocol.versionID;
  } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
    return RefreshAuthorizationPolicyProtocol.versionID;
  } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
    return RefreshUserMappingsProtocol.versionID;
  } else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) {
    return RefreshCallQueueProtocol.versionID;
  } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
    return GetUserMappingsProtocol.versionID;
  } else if (protocol.equals(TraceAdminProtocol.class.getName())){
    return TraceAdminProtocol.versionID;
  } else {
    throw new IOException("Unknown protocol to name node: " + protocol);
  }
}
项目:aliyun-oss-hadoop-fs    文件:DFSAdmin.java   
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtilClient.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
项目:big-c    文件:HAUtil.java   
/**
 * Used to ensure that at least one of the given HA NNs is currently in the
 * active state..
 * 
 * @param namenodes list of RPC proxies for each NN to check.
 * @return true if at least one NN is active, false if all are in the standby state.
 * @throws IOException in the event of error.
 */
public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
    throws IOException {
  for (ClientProtocol namenode : namenodes) {
    try {
      namenode.getFileInfo("/");
      return true;
    } catch (RemoteException re) {
      IOException cause = re.unwrapRemoteException();
      if (cause instanceof StandbyException) {
        // This is expected to happen for a standby NN.
      } else {
        throw re;
      }
    }
  }
  return false;
}
项目:aliyun-oss-hadoop-fs    文件:HAUtil.java   
/**
 * Used to ensure that at least one of the given HA NNs is currently in the
 * active state..
 * 
 * @param namenodes list of RPC proxies for each NN to check.
 * @return true if at least one NN is active, false if all are in the standby state.
 * @throws IOException in the event of error.
 */
public static boolean isAtLeastOneActive(List<ClientProtocol> namenodes)
    throws IOException {
  for (ClientProtocol namenode : namenodes) {
    try {
      namenode.getFileInfo("/");
      return true;
    } catch (RemoteException re) {
      IOException cause = re.unwrapRemoteException();
      if (cause instanceof StandbyException) {
        // This is expected to happen for a standby NN.
      } else {
        throw re;
      }
    }
  }
  return false;
}
项目:big-c    文件:TestEncryptionZones.java   
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
    CipherSuite suite, CryptoProtocolVersion version) throws Exception {
  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, new FileEncryptionInfo(suite,
          version, new byte[suite.getAlgorithmBlockSize()],
          new byte[suite.getAlgorithmBlockSize()],
          "fakeKey", "fakeVersion"),
          (byte) 0))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
项目:aliyun-oss-hadoop-fs    文件:TestRetryCacheWithHA.java   
private DFSClient genClientWithDummyHandler() throws IOException {
  URI nnUri = dfs.getUri();
  FailoverProxyProvider<ClientProtocol> failoverProxyProvider = 
      NameNodeProxiesClient.createFailoverProxyProvider(conf,
          nnUri, ClientProtocol.class, true, null);
  InvocationHandler dummyHandler = new DummyRetryInvocationHandler(
      failoverProxyProvider, RetryPolicies
      .failoverOnNetworkException(RetryPolicies.TRY_ONCE_THEN_FAIL,
          Integer.MAX_VALUE,
          HdfsClientConfigKeys.Failover.SLEEPTIME_BASE_DEFAULT,
          HdfsClientConfigKeys.Failover.SLEEPTIME_MAX_DEFAULT));
  ClientProtocol proxy = (ClientProtocol) Proxy.newProxyInstance(
      failoverProxyProvider.getInterface().getClassLoader(),
      new Class[] { ClientProtocol.class }, dummyHandler);

  DFSClient client = new DFSClient(null, proxy, conf, null);
  return client;
}
项目:big-c    文件:TestBalancerWithMultipleNameNodes.java   
static void wait(final ClientProtocol[] clients,
    long expectedUsedSpace, long expectedTotalSpace) throws IOException {
  LOG.info("WAIT expectedUsedSpace=" + expectedUsedSpace
      + ", expectedTotalSpace=" + expectedTotalSpace);
  for(int n = 0; n < clients.length; n++) {
    int i = 0;
    for(boolean done = false; !done; ) {
      final long[] s = clients[n].getStats();
      done = s[0] == expectedTotalSpace && s[1] == expectedUsedSpace;
      if (!done) {
        sleep(100L);
        if (++i % 100 == 0) {
          LOG.warn("WAIT i=" + i + ", s=[" + s[0] + ", " + s[1] + "]");
        }
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestBalancerWithMultipleNameNodes.java   
static void wait(final ClientProtocol[] clients,
    long expectedUsedSpace, long expectedTotalSpace) throws IOException {
  LOG.info("WAIT expectedUsedSpace=" + expectedUsedSpace
      + ", expectedTotalSpace=" + expectedTotalSpace);
  for(int n = 0; n < clients.length; n++) {
    int i = 0;
    for(boolean done = false; !done; ) {
      final long[] s = clients[n].getStats();
      done = s[0] == expectedTotalSpace && s[1] == expectedUsedSpace;
      if (!done) {
        sleep(100L);
        if (++i % 100 == 0) {
          LOG.warn("WAIT i=" + i + ", s=[" + s[0] + ", " + s[1] + "]");
        }
      }
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestEncryptionZones.java   
@SuppressWarnings("unchecked")
private static void mockCreate(ClientProtocol mcp,
    CipherSuite suite, CryptoProtocolVersion version) throws Exception {
  Mockito.doReturn(
      new HdfsFileStatus(0, false, 1, 1024, 0, 0, new FsPermission(
          (short) 777), "owner", "group", new byte[0], new byte[0],
          1010, 0, new FileEncryptionInfo(suite,
          version, new byte[suite.getAlgorithmBlockSize()],
          new byte[suite.getAlgorithmBlockSize()],
          "fakeKey", "fakeVersion"),
          (byte) 0, null))
      .when(mcp)
      .create(anyString(), (FsPermission) anyObject(), anyString(),
          (EnumSetWritable<CreateFlag>) anyObject(), anyBoolean(),
          anyShort(), anyLong(), (CryptoProtocolVersion[]) anyObject());
}
项目:big-c    文件:DFSClient.java   
private static ClientProtocol getNNProxy(
    Token<DelegationTokenIdentifier> token, Configuration conf)
    throws IOException {
  URI uri = HAUtil.getServiceUriFromToken(HdfsConstants.HDFS_URI_SCHEME,
          token);
  if (HAUtil.isTokenForLogicalUri(token) &&
      !HAUtil.isLogicalUri(conf, uri)) {
    // If the token is for a logical nameservice, but the configuration
    // we have disagrees about that, we can't actually renew it.
    // This can be the case in MR, for example, if the RM doesn't
    // have all of the HA clusters configured in its configuration.
    throw new IOException("Unable to map logical nameservice URI '" +
        uri + "' to a NameNode. Local configuration does not have " +
        "a failover proxy provider configured.");
  }

  NameNodeProxies.ProxyAndInfo<ClientProtocol> info =
    NameNodeProxies.createProxy(conf, uri, ClientProtocol.class);
  assert info.getDelegationTokenService().equals(token.getService()) :
    "Returned service '" + info.getDelegationTokenService().toString() +
    "' doesn't match expected service '" +
    token.getService().toString() + "'";

  return info.getProxy();
}
项目:big-c    文件:DfsServlet.java   
/**
 * Create a {@link NameNode} proxy from the current {@link ServletContext}. 
 */
protected ClientProtocol createNameNodeProxy() throws IOException {
  ServletContext context = getServletContext();
  // if we are running in the Name Node, use it directly rather than via 
  // rpc
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(context);
  if (nn != null) {
    return nn.getRpcServer();
  }
  InetSocketAddress nnAddr =
    NameNodeHttpServer.getNameNodeAddressFromContext(context);
  Configuration conf = new HdfsConfiguration(
      NameNodeHttpServer.getConfFromContext(context));
  return NameNodeProxies.createProxy(conf, NameNode.getUri(nnAddr),
      ClientProtocol.class).getProxy();
}
项目:big-c    文件:NameNode.java   
public long getProtocolVersion(String protocol, 
                               long clientVersion) throws IOException {
  if (protocol.equals(ClientProtocol.class.getName())) {
    return ClientProtocol.versionID; 
  } else if (protocol.equals(DatanodeProtocol.class.getName())){
    return DatanodeProtocol.versionID;
  } else if (protocol.equals(NamenodeProtocol.class.getName())){
    return NamenodeProtocol.versionID;
  } else if (protocol.equals(RefreshAuthorizationPolicyProtocol.class.getName())){
    return RefreshAuthorizationPolicyProtocol.versionID;
  } else if (protocol.equals(RefreshUserMappingsProtocol.class.getName())){
    return RefreshUserMappingsProtocol.versionID;
  } else if (protocol.equals(RefreshCallQueueProtocol.class.getName())) {
    return RefreshCallQueueProtocol.versionID;
  } else if (protocol.equals(GetUserMappingsProtocol.class.getName())){
    return GetUserMappingsProtocol.versionID;
  } else if (protocol.equals(TraceAdminProtocol.class.getName())){
    return TraceAdminProtocol.versionID;
  } else {
    throw new IOException("Unknown protocol to name node: " + protocol);
  }
}
项目:big-c    文件:PBHelper.java   
public static GetFsStatsResponseProto convert(long[] fsStats) {
  GetFsStatsResponseProto.Builder result = GetFsStatsResponseProto
      .newBuilder();
  if (fsStats.length >= ClientProtocol.GET_STATS_CAPACITY_IDX + 1)
    result.setCapacity(fsStats[ClientProtocol.GET_STATS_CAPACITY_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_USED_IDX + 1)
    result.setUsed(fsStats[ClientProtocol.GET_STATS_USED_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_REMAINING_IDX + 1)
    result.setRemaining(fsStats[ClientProtocol.GET_STATS_REMAINING_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX + 1)
    result.setUnderReplicated(
            fsStats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX + 1)
    result.setCorruptBlocks(
        fsStats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX + 1)
    result.setMissingBlocks(
        fsStats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX]);
  if (fsStats.length >= ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX + 1)
    result.setMissingReplOneBlocks(
        fsStats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX]);
  return result.build();
}
项目:big-c    文件:DFSAdmin.java   
/**
 * Dumps DFS data structures into specified file.
 * Usage: hdfs dfsadmin -metasave filename
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 */
public int metaSave(String[] argv, int idx) throws IOException {
  String pathname = argv[idx];
  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().metaSave(pathname);
      System.out.println("Created metasave file " + pathname + " in the log "
          + "directory of namenode " + proxy.getAddress());
    }
  } else {
    dfs.metaSave(pathname);
    System.out.println("Created metasave file " + pathname + " in the log " +
        "directory of namenode " + dfs.getUri());
  }
  return 0;
}
项目:hadoop    文件:NameNodeProxies.java   
/**
 * Creates an explicitly non-HA-enabled proxy object. Most of the time you
 * don't want to use this, and should instead use {@link NameNodeProxies#createProxy}.
 *
 * @param conf the configuration object
 * @param nnAddr address of the remote NN to connect to
 * @param xface the IPC interface which should be created
 * @param ugi the user who is making the calls on the proxy object
 * @param withRetries certain interfaces have a non-standard retry policy
 * @param fallbackToSimpleAuth - set to true or false during this method to
 *   indicate if a secure client falls back to simple auth
 * @return an object containing both the proxy and the associated
 *         delegation token service it corresponds to
 * @throws IOException
 */
@SuppressWarnings("unchecked")
public static <T> ProxyAndInfo<T> createNonHAProxy(
    Configuration conf, InetSocketAddress nnAddr, Class<T> xface,
    UserGroupInformation ugi, boolean withRetries,
    AtomicBoolean fallbackToSimpleAuth) throws IOException {
  Text dtService = SecurityUtil.buildTokenService(nnAddr);

  T proxy;
  if (xface == ClientProtocol.class) {
    proxy = (T) createNNProxyWithClientProtocol(nnAddr, conf, ugi,
        withRetries, fallbackToSimpleAuth);
  } else if (xface == JournalProtocol.class) {
    proxy = (T) createNNProxyWithJournalProtocol(nnAddr, conf, ugi);
  } else if (xface == NamenodeProtocol.class) {
    proxy = (T) createNNProxyWithNamenodeProtocol(nnAddr, conf, ugi,
        withRetries);
  } else if (xface == GetUserMappingsProtocol.class) {
    proxy = (T) createNNProxyWithGetUserMappingsProtocol(nnAddr, conf, ugi);
  } else if (xface == RefreshUserMappingsProtocol.class) {
    proxy = (T) createNNProxyWithRefreshUserMappingsProtocol(nnAddr, conf, ugi);
  } else if (xface == RefreshAuthorizationPolicyProtocol.class) {
    proxy = (T) createNNProxyWithRefreshAuthorizationPolicyProtocol(nnAddr,
        conf, ugi);
  } else if (xface == RefreshCallQueueProtocol.class) {
    proxy = (T) createNNProxyWithRefreshCallQueueProtocol(nnAddr, conf, ugi);
  } else {
    String message = "Unsupported protocol found when creating the proxy " +
        "connection to NameNode: " +
        ((xface != null) ? xface.getClass().getName() : "null");
    LOG.error(message);
    throw new IllegalStateException(message);
  }

  return new ProxyAndInfo<T>(proxy, dtService, nnAddr);
}
项目:hadoop    文件:NameNodeProxies.java   
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);

  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();

    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
项目:hadoop    文件:DFSClient.java   
@SuppressWarnings("unchecked")
@Override
public long renew(Token<?> token, Configuration conf) throws IOException {
  Token<DelegationTokenIdentifier> delToken = 
    (Token<DelegationTokenIdentifier>) token;
  ClientProtocol nn = getNNProxy(delToken, conf);
  try {
    return nn.renewDelegationToken(delToken);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(InvalidToken.class, 
                                   AccessControlException.class);
  }
}
项目:hadoop    文件:DFSClient.java   
@SuppressWarnings("unchecked")
@Override
public void cancel(Token<?> token, Configuration conf) throws IOException {
  Token<DelegationTokenIdentifier> delToken = 
      (Token<DelegationTokenIdentifier>) token;
  LOG.info("Cancelling " + 
           DelegationTokenIdentifier.stringifyToken(delToken));
  ClientProtocol nn = getNNProxy(delToken, conf);
  try {
    nn.cancelDelegationToken(delToken);
  } catch (RemoteException re) {
    throw re.unwrapRemoteException(InvalidToken.class,
        AccessControlException.class);
  }
}
项目:hadoop    文件:DFSClient.java   
/**
 * @see ClientProtocol#getBlockLocations(String, long, long)
 */
static LocatedBlocks callGetBlockLocations(ClientProtocol namenode,
    String src, long start, long length) 
    throws IOException {
  try {
    return namenode.getBlockLocations(src, start, length);
  } catch(RemoteException re) {
    throw re.unwrapRemoteException(AccessControlException.class,
                                   FileNotFoundException.class,
                                   UnresolvedPathException.class);
  }
}
项目:hadoop    文件:DFSInotifyEventInputStream.java   
DFSInotifyEventInputStream(Sampler traceSampler, ClientProtocol namenode,
      long lastReadTxid) throws IOException {
  this.traceSampler = traceSampler;
  this.namenode = namenode;
  this.it = Iterators.emptyIterator();
  this.lastReadTxid = lastReadTxid;
}
项目:hadoop    文件:FSNamesystem.java   
/** @see ClientProtocol#getStats() */
long[] getStats() {
  final long[] stats = datanodeStatistics.getStats();
  stats[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = getUnderReplicatedBlocks();
  stats[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = getCorruptReplicaBlocks();
  stats[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = getMissingBlocksCount();
  stats[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
      getMissingReplOneBlocksCount();
  return stats;
}
项目:hadoop    文件:FileDataServlet.java   
/** Create a redirection URL */
private URL createRedirectURL(String path, String encodedPath, HdfsFileStatus status, 
    UserGroupInformation ugi, ClientProtocol nnproxy, HttpServletRequest request, String dt)
    throws IOException {
  String scheme = request.getScheme();
  final LocatedBlocks blks = nnproxy.getBlockLocations(
      status.getFullPath(new Path(path)).toUri().getPath(), 0, 1);
  final Configuration conf = NameNodeHttpServer.getConfFromContext(
      getServletContext());
  final DatanodeID host = pickSrcDatanode(blks, status, conf);
  final String hostname;
  if (host instanceof DatanodeInfo) {
    hostname = host.getHostName();
  } else {
    hostname = host.getIpAddr();
  }

  int port = "https".equals(scheme) ? host.getInfoSecurePort() : host
      .getInfoPort();

  String dtParam = "";
  if (dt != null) {
    dtParam = JspHelper.getDelegationTokenUrlParam(dt);
  }

  // Add namenode address to the url params
  NameNode nn = NameNodeHttpServer.getNameNodeFromContext(
      getServletContext());
  String addr = nn.getNameNodeAddressHostPortString();
  String addrParam = JspHelper.getUrlParam(JspHelper.NAMENODE_ADDRESS, addr);

  return new URL(scheme, hostname, port,
      "/streamFile" + encodedPath + '?' +
      "ugi=" + ServletUtil.encodeQueryValue(ugi.getShortUserName()) +
      dtParam + addrParam);
}
项目:hadoop    文件:NameNodeConnector.java   
public NameNodeConnector(String name, URI nameNodeUri, Path idPath,
                         List<Path> targetPaths, Configuration conf,
                         int maxNotChangedIterations)
    throws IOException {
  this.nameNodeUri = nameNodeUri;
  this.idPath = idPath;
  this.targetPaths = targetPaths == null || targetPaths.isEmpty() ? Arrays
      .asList(new Path("/")) : targetPaths;
  this.maxNotChangedIterations = maxNotChangedIterations;

  this.namenode = NameNodeProxies.createProxy(conf, nameNodeUri,
      NamenodeProtocol.class).getProxy();
  this.client = NameNodeProxies.createProxy(conf, nameNodeUri,
      ClientProtocol.class, fallbackToSimpleAuth).getProxy();
  this.fs = (DistributedFileSystem)FileSystem.get(nameNodeUri, conf);

  final NamespaceInfo namespaceinfo = namenode.versionRequest();
  this.blockpoolID = namespaceinfo.getBlockPoolID();

  final FsServerDefaults defaults = fs.getServerDefaults(new Path("/"));
  this.keyManager = new KeyManager(blockpoolID, namenode,
      defaults.getEncryptDataTransfer(), conf);
  // if it is for test, we do not create the id file
  out = checkAndMarkRunning();
  if (out == null) {
    // Exit if there is another one running.
    throw new IOException("Another " + name + " is running.");
  }
}
项目:hadoop    文件:PBHelper.java   
public static long[] convert(GetFsStatsResponseProto res) {
  long[] result = new long[7];
  result[ClientProtocol.GET_STATS_CAPACITY_IDX] = res.getCapacity();
  result[ClientProtocol.GET_STATS_USED_IDX] = res.getUsed();
  result[ClientProtocol.GET_STATS_REMAINING_IDX] = res.getRemaining();
  result[ClientProtocol.GET_STATS_UNDER_REPLICATED_IDX] = res.getUnderReplicated();
  result[ClientProtocol.GET_STATS_CORRUPT_BLOCKS_IDX] = res.getCorruptBlocks();
  result[ClientProtocol.GET_STATS_MISSING_BLOCKS_IDX] = res.getMissingBlocks();
  result[ClientProtocol.GET_STATS_MISSING_REPL_ONE_BLOCKS_IDX] =
      res.getMissingReplOneBlocks();
  return result;
}
项目:hadoop    文件:DFSAdmin.java   
private boolean waitExitSafeMode(ClientProtocol nn, boolean inSafeMode)
    throws IOException {
  while (inSafeMode) {
    try {
      Thread.sleep(5000);
    } catch (java.lang.InterruptedException e) {
      throw new IOException("Wait Interrupted");
    }
    inSafeMode = nn.setSafeMode(SafeModeAction.SAFEMODE_GET, false);
  }
  return inSafeMode;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to save the namespace.
 * Usage: hdfs dfsadmin -saveNamespace
 * @exception IOException 
 * @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()
 */
public int saveNamespace() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().saveNamespace();
      System.out.println("Save namespace successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.saveNamespace();
    System.out.println("Save namespace successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to reread the hosts and excluded hosts 
 * file.
 * Usage: hdfs dfsadmin -refreshNodes
 * @exception IOException 
 */
public int refreshNodes() throws IOException {
  int exitCode = -1;

  DistributedFileSystem dfs = getDFS();
  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaEnabled = HAUtil.isLogicalUri(dfsConf, dfsUri);

  if (isHaEnabled) {
    String nsId = dfsUri.getHost();
    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy: proxies) {
      proxy.getProxy().refreshNodes();
      System.out.println("Refresh nodes successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.refreshNodes();
    System.out.println("Refresh nodes successful");
  }
  exitCode = 0;

  return exitCode;
}
项目:hadoop    文件:DFSAdmin.java   
/**
 * Command to ask the namenode to finalize previously performed upgrade.
 * Usage: hdfs dfsadmin -finalizeUpgrade
 * @exception IOException 
 */
public int finalizeUpgrade() throws IOException {
  DistributedFileSystem dfs = getDFS();

  Configuration dfsConf = dfs.getConf();
  URI dfsUri = dfs.getUri();
  boolean isHaAndLogicalUri = HAUtil.isLogicalUri(dfsConf, dfsUri);
  if (isHaAndLogicalUri) {
    // In the case of HA and logical URI, run finalizeUpgrade for all
    // NNs in this nameservice.
    String nsId = dfsUri.getHost();
    List<ClientProtocol> namenodes =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf, nsId);
    if (!HAUtil.isAtLeastOneActive(namenodes)) {
      throw new IOException("Cannot finalize with no NameNode active");
    }

    List<ProxyAndInfo<ClientProtocol>> proxies =
        HAUtil.getProxiesForAllNameNodesInNameservice(dfsConf,
        nsId, ClientProtocol.class);
    for (ProxyAndInfo<ClientProtocol> proxy : proxies) {
      proxy.getProxy().finalizeUpgrade();
      System.out.println("Finalize upgrade successful for " +
          proxy.getAddress());
    }
  } else {
    dfs.finalizeUpgrade();
    System.out.println("Finalize upgrade successful");
  }

  return 0;
}