Java 类org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol 实例源码

项目:hadoop    文件:BlockReaderLocalLegacy.java   
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
项目:hadoop    文件:BlockStorageLocationUtil.java   
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  TraceScope scope =
      Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    scope.close();
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
项目:hadoop    文件:DFSAdmin.java   
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
项目:hadoop    文件:DFSAdmin.java   
private int shutdownDatanode(String[] argv, int i) throws IOException {
  final String dn = argv[i];
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
  boolean upgrade = false;
  if (argv.length-1 == i+1) {
    if ("upgrade".equalsIgnoreCase(argv[i+1])) {
      upgrade = true;
    } else {
      printUsage("-shutdownDatanode");
      return -1;
    }
  }
  dnProxy.shutdownDatanode(upgrade);
  System.out.println("Submitted a shutdown request to datanode " + dn);
  return 0;
}
项目:aliyun-oss-hadoop-fs    文件:BlockReaderLocalLegacy.java   
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtilClient.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
项目:aliyun-oss-hadoop-fs    文件:DFSAdmin.java   
int getReconfigurableProperties(String nodeType, String address,
    PrintStream out, PrintStream err) throws IOException {
  if ("datanode".equals(nodeType)) {
    ClientDatanodeProtocol dnProxy = getDataNodeProxy(address);
    try {
      List<String> properties =
          dnProxy.listReconfigurableProperties();
      out.println(
          "Configuration properties that are allowed to be reconfigured:");
      for (String name : properties) {
        out.println(name);
      }
    } catch (IOException e) {
      err.println("DataNode reconfiguration: " + e + ".");
      return 1;
    }
  } else {
    err.println("Node type " + nodeType +
        " does not support reconfiguration.");
    return 1;
  }
  return 0;
}
项目:aliyun-oss-hadoop-fs    文件:DFSAdmin.java   
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtilClient.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
项目:aliyun-oss-hadoop-fs    文件:DFSAdmin.java   
private int shutdownDatanode(String[] argv, int i) throws IOException {
  final String dn = argv[i];
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
  boolean upgrade = false;
  if (argv.length-1 == i+1) {
    if ("upgrade".equalsIgnoreCase(argv[i+1])) {
      upgrade = true;
    } else {
      printUsage("-shutdownDatanode");
      return -1;
    }
  }
  dnProxy.shutdownDatanode(upgrade);
  System.out.println("Submitted a shutdown request to datanode " + dn);
  return 0;
}
项目:aliyun-oss-hadoop-fs    文件:TestBalancerBandwidth.java   
private void runGetBalancerBandwidthCmd(DFSAdmin admin, String[] args,
    ClientDatanodeProtocol proxy, long expectedBandwidth) throws Exception {
  PrintStream initialStdOut = System.out;
  outContent.reset();
  try {
    System.setOut(outStream);
    int exitCode = admin.run(args);
    assertEquals("DFSAdmin should return 0", 0, exitCode);
    String bandwidthOutMsg = "Balancer bandwidth is " + expectedBandwidth
        + " bytes per second.";
    String strOut = new String(outContent.toByteArray(), UTF8);
    assertTrue("Wrong balancer bandwidth!", strOut.contains(bandwidthOutMsg));
  } finally {
    System.setOut(initialStdOut);
  }
}
项目:big-c    文件:BlockReaderLocalLegacy.java   
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
项目:big-c    文件:BlockStorageLocationUtil.java   
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  TraceScope scope =
      Trace.startSpan("getHdfsBlocksMetadata", parentSpan);
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    scope.close();
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
项目:big-c    文件:DFSAdmin.java   
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
项目:big-c    文件:DFSAdmin.java   
private int shutdownDatanode(String[] argv, int i) throws IOException {
  final String dn = argv[i];
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
  boolean upgrade = false;
  if (argv.length-1 == i+1) {
    if ("upgrade".equalsIgnoreCase(argv[i+1])) {
      upgrade = true;
    } else {
      printUsage("-shutdownDatanode");
      return -1;
    }
  }
  dnProxy.shutdownDatanode(upgrade);
  System.out.println("Submitted a shutdown request to datanode " + dn);
  return 0;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockReaderLocalLegacy.java   
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:BlockStorageLocationUtil.java   
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
项目:hadoop-2.6.0-cdh5.4.3    文件:DFSAdmin.java   
private int shutdownDatanode(String[] argv, int i) throws IOException {
  final String dn = argv[i];
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
  boolean upgrade = false;
  if (argv.length-1 == i+1) {
    if ("upgrade".equalsIgnoreCase(argv[i+1])) {
      upgrade = true;
    } else {
      printUsage("-shutdownDatanode");
      return -1;
    }
  }
  dnProxy.shutdownDatanode(upgrade);
  System.out.println("Submitted a shutdown request to datanode " + dn);
  return 0;
}
项目:hadoop-EAR    文件:HadoopThriftDatanodeServer.java   
/**
 * Creates one rpc object if necessary
 */
private synchronized ClientDatanodeProtocol getOrCreate(String name)
  throws IOException {
  ClientDatanodeProtocol obj =  hadoopHash.get(name);
  if (obj != null) {
    return obj;
  }
  // connection does not exist, create a new one.
  DatanodeID dn = new DatanodeID(name, "", -1, getPort(name));
  ClientDatanodeProtocol instance =
    DFSClient.createClientDatanodeProtocolProxy(dn, conf, timeout); 

  // cache connection
  hadoopHash.put(name, instance);
  return instance;
}
项目:hadoop-EAR    文件:HadoopThriftDatanodeServer.java   
public ThdfsBlock getBlockInfo(TDatanodeID datanode,
                      ThdfsNamespaceId namespaceid,
                      ThdfsBlock block)
                      throws ThriftIOException, TException { 
  Block blk = new Block(block.blockId, block.numBytes, 
                         block.generationStamp);
  // make RPC to datanode
  try {
    ClientDatanodeProtocol remote = getOrCreate(datanode.name);
    Block nblk = remote.getBlockInfo(namespaceid.id, blk);
    return new ThdfsBlock(nblk.getBlockId(), nblk.getNumBytes(),
                          nblk.getGenerationStamp());
  } catch (IOException e) {
    String msg = "Error getBlockInfo datanode " + datanode.name +
                 " namespaceid " + namespaceid.id +
                 " block " + blk;
    LOG.warn(msg);
    throw new ThriftIOException(msg);
  }
}
项目:hadoop-EAR    文件:HadoopThriftDatanodeServer.java   
public ThdfsBlockPath getBlockPathInfo(TDatanodeID datanode,
                              ThdfsNamespaceId namespaceId,
                              ThdfsBlock block)
                              throws ThriftIOException, TException {
  Block blk = new Block(block.blockId, block.numBytes, 
                        block.generationStamp);

  // make RPC to datanode to find local pathnames of blocks
  try {
    ClientDatanodeProtocol remote = getOrCreate(datanode.name);
    BlockPathInfo pathinfo = remote.getBlockPathInfo(namespaceId.id, blk);
    return new ThdfsBlockPath(pathinfo.getBlockPath(), 
                              pathinfo.getMetaPath());
  } catch (IOException e) {
    String msg = "Error getBlockPathInfo datanode " + datanode.name +
                 " namespaceid " + namespaceId.id +
                 " block " + blk;
    LOG.warn(msg);
    throw new ThriftIOException(msg);
  }
}
项目:hadoop-EAR    文件:DFSClient.java   
static ProtocolProxy<ClientDatanodeProtocol> createClientDNProtocolProxy (
    DatanodeID datanodeid, Configuration conf, int socketTimeout)
    throws IOException {
  InetSocketAddress addr = NetUtils.createSocketAddr(
    datanodeid.getHost() + ":" + datanodeid.getIpcPort());
  if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
    ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
  }
  UserGroupInformation ugi;
  try {
    ugi = UserGroupInformation.login(conf);
  } catch (LoginException le) {
    throw new RuntimeException("Couldn't login!");
  }

  return RPC.getProtocolProxy(ClientDatanodeProtocol.class,
      ClientDatanodeProtocol.versionID, addr, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf), socketTimeout);
}
项目:hadoop-EAR    文件:FileFixer.java   
/**
 * Setup a session with the specified datanode
 */
static ClientDatanodeProtocol createClientDatanodeProtocolProxy (
    DatanodeInfo datanodeid, Configuration conf) throws IOException {
  InetSocketAddress addr = NetUtils.createSocketAddr(
    datanodeid.getHost() + ":" + datanodeid.getIpcPort());
  if (ClientDatanodeProtocol.LOG.isDebugEnabled()) {
    ClientDatanodeProtocol.LOG.info("ClientDatanodeProtocol addr=" + addr);
  }
  try {
    return (ClientDatanodeProtocol)RPC.getProxy(ClientDatanodeProtocol.class,
      ClientDatanodeProtocol.versionID, addr, conf);
  } catch (RPC.VersionMismatch e) {
    long clientVersion = e.getClientVersion();
    long datanodeVersion = e.getServerVersion();
    if (clientVersion > datanodeVersion &&
        !ProtocolCompatible.isCompatibleClientDatanodeProtocol(
            clientVersion, datanodeVersion)) {
      throw new RPC.VersionIncompatible(
          ClientDatanodeProtocol.class.getName(), clientVersion, datanodeVersion);
    }
    return (ClientDatanodeProtocol)e.getProxy();
  }
}
项目:hadoop-EAR    文件:DFSAdmin.java   
/**
 * Refresh the namenodes served by the {@link DataNode}.
 * Usage: java DFSAdmin -refreshNamenodes datanodehost:port
 * @param argv List of of command line parameters.
 * @param idx The index of the command that is being processed.
 * @exception IOException if an error accoured wile accessing
 *            the file or path.
 * @return exitcode 0 on success, non-zero on failure
 */
public int refreshNamenodes(String[] argv, int i) throws IOException {
  ClientDatanodeProtocol datanode = null;
  String dnAddr = (argv.length == 2) ? argv[i] : null;
  try {
    datanode = getClientDatanodeProtocol(dnAddr);
    if (datanode != null) {
      datanode.refreshNamenodes();
      return 0;
    } else {
      return -1;
    }
   } finally {
    if (datanode != null && Proxy.isProxyClass(datanode.getClass())) {
      RPC.stopProxy(datanode);
    }
  }
}
项目:hadoop-EAR    文件:DFSAdmin.java   
private int refreshOfferService(String serviceName) throws IOException, InterruptedException {
  ClientDatanodeProtocol datanode = null;
  try {
    datanode = getClientDatanodeProtocol(null);
    if (datanode != null) {
      datanode.refreshOfferService(serviceName);
      return 0;
    } else {
      return -1;
    }
  } finally {
    if (datanode != null && Proxy.isProxyClass(datanode.getClass())) {
      RPC.stopProxy(datanode);
    }
  }
}
项目:hadoop-EAR    文件:DFSAdmin.java   
public int refreshDatanodeDataDirs(String [] argv, int i) throws IOException {
  ClientDatanodeProtocol datanode = null;
  String confFilePath = argv[i++];
  String dnAddr = (argv.length == 3) ? argv[i] : null;
  try {
    datanode = getClientDatanodeProtocol(dnAddr);
     if (datanode != null) {
       datanode.refreshDataDirs(confFilePath);
       return 0;
     } else {
       return -1;
     }
   } finally {
     if (datanode != null && Proxy.isProxyClass(datanode.getClass())) {
       RPC.stopProxy(datanode);
    }
  }
}
项目:hadoop-EAR    文件:DFSAdmin.java   
/**
 * Removes a namespace from a given {@link DataNode}. It defaults to the
 * datanode on the local machine of no datanode is given.
 * 
 * Usage: java DFSAdmin -removeNamespace nameserviceId [datanodehost:datanodeport]
 * @param argv List of of command line parameters.
 * @param i The index of the command that is being processed.
 * @exception IOException if an error occurred while accessing
 *            the file or path.
 * @return exit code 0 on success, non-zero on failure
 */
public int removeNamespace(String[] argv, int i) throws IOException {
  String nameserviceId = argv[i++];
  ClientDatanodeProtocol datanode = null;
  String dnAddr = (argv.length == 3) ? argv[i] : null;

  try {
    datanode = getClientDatanodeProtocol(dnAddr);
    if (datanode != null) {
      datanode.removeNamespace(nameserviceId);
      return 0;
    } else {
      return -1;
    }
  } finally {
    if (datanode != null && Proxy.isProxyClass(datanode.getClass())) {
      RPC.stopProxy(datanode);
    }
  }
}
项目:hadoop-EAR    文件:FastCopy.java   
/**
 * Creates an RPC connection to a datanode if connection not already
 * cached and caches the connection if a new RPC connection is created
 *
 * @param dn
 *          the datanode to which we need to connect to
 * @param conf
 *          the configuration for this RPC
 * @param timeout
 *          the RPC timeout for this connection
 * @return the RPC protocol object we can use to make RPC calls
 * @throws IOException
 */
private ClientDatanodeProtocol getDatanodeConnection(DatanodeInfo dn,
    Configuration conf, int timeout) throws IOException {
  // This is done to improve read performance, no need for
  // synchronization on the map when we do a read. We go through this
  // method for each block.
  ClientDatanodeProtocol cdp = datanodeMap.get(dn.getName());
  if (cdp != null) {
    return cdp;
  }
  synchronized (datanodeMap) {
    cdp = datanodeMap.get(dn.getName());
    if (cdp == null) {
      LOG.debug("Creating new RPC connection to : " + dn.getName());
      cdp = DFSClient.createClientDatanodeProtocolProxy(
          dn, conf, timeout);
      datanodeMap.put(dn.getName(), cdp);
    }
  }
  return cdp;
}
项目:hadoop-EAR    文件:FastCopy.java   
/**
 * Tears down all RPC connections, you MUST call this once you are done.
 * @throws IOException
 */
public void shutdown() throws IOException {
  // Clean up RPC connections.
  Iterator <ClientDatanodeProtocol> connections =
    datanodeMap.values().iterator();
  while(connections.hasNext()) {
    ClientDatanodeProtocol cnxn = connections.next();
    RPC.stopProxy(cnxn);
  }
  datanodeMap.clear();
  executor.shutdownNow();
  synchronized (leaseCheckers) {
    for (LeaseChecker checker : leaseCheckers.values()) {
      checker.closeRenewal();
    }
  }
}
项目:hadoop-plus    文件:BlockReaderLocalLegacy.java   
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
项目:hadoop-plus    文件:BlockReaderLocalLegacy.java   
private static BlockLocalPathInfo getBlockPathInfo(UserGroupInformation ugi,
    ExtendedBlock blk, DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname)
    throws IOException {
  LocalDatanodeInfo localDatanodeInfo = getLocalDatanodeInfo(node.getIpcPort());
  BlockLocalPathInfo pathinfo = null;
  ClientDatanodeProtocol proxy = localDatanodeInfo.getDatanodeProxy(ugi, node,
      conf, timeout, connectToDnViaHostname);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    if (pathinfo != null) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cached location of block " + blk + " as " + pathinfo);
      }
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
项目:hadoop-plus    文件:BlockStorageLocationUtil.java   
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(extendedBlocks, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
项目:hadoop-plus    文件:DFSAdmin.java   
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
项目:FlexMap    文件:BlockReaderLocalLegacy.java   
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}
项目:FlexMap    文件:BlockStorageLocationUtil.java   
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  try {
    cdp = DFSUtil.createClientDatanodeProtocolProxy(datanode, configuration,
        timeout, connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(poolId, blockIds, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
项目:FlexMap    文件:DFSAdmin.java   
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_KERBEROS_PRINCIPAL_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol =     
      DFSUtil.createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
项目:FlexMap    文件:DFSAdmin.java   
private int shutdownDatanode(String[] argv, int i) throws IOException {
  final String dn = argv[i];
  ClientDatanodeProtocol dnProxy = getDataNodeProxy(dn);
  boolean upgrade = false;
  if (argv.length-1 == i+1) {
    if ("upgrade".equalsIgnoreCase(argv[i+1])) {
      upgrade = true;
    } else {
      printUsage("-shutdownDatanode");
      return -1;
    }
  }
  dnProxy.shutdownDatanode(upgrade);
  System.out.println("Submitted a shutdown request to datanode " + dn);
  return 0;
}
项目:hops    文件:BlockReaderLocal.java   
private static BlockLocalPathInfo getBlockPathInfo(ExtendedBlock blk,
    DatanodeInfo node, Configuration conf, int timeout,
    Token<BlockTokenIdentifier> token, boolean connectToDnViaHostname)
    throws IOException {
  LocalDatanodeInfo localDatanodeInfo =
      getLocalDatanodeInfo(node.getIpcPort());
  BlockLocalPathInfo pathinfo = null;
  ClientDatanodeProtocol proxy = localDatanodeInfo
      .getDatanodeProxy(node, conf, timeout, connectToDnViaHostname);
  try {
    // make RPC to local datanode to find local pathnames of blocks
    pathinfo = proxy.getBlockLocalPathInfo(blk, token);
    if (pathinfo != null) {
      if (LOG.isDebugEnabled()) {
        LOG.debug("Cached location of block " + blk + " as " + pathinfo);
      }
      localDatanodeInfo.setBlockLocalPathInfo(blk, pathinfo);
    }
  } catch (IOException e) {
    localDatanodeInfo.resetDatanodeProxy(); // Reset proxy on error
    throw e;
  }
  return pathinfo;
}
项目:hops    文件:BlockStorageLocationUtil.java   
@Override
public HdfsBlocksMetadata call() throws Exception {
  HdfsBlocksMetadata metadata = null;
  // Create the RPC proxy and make the RPC
  ClientDatanodeProtocol cdp = null;
  try {
    cdp = DFSUtil
        .createClientDatanodeProtocolProxy(datanode, configuration, timeout,
            connectToDnViaHostname);
    metadata = cdp.getHdfsBlocksMetadata(extendedBlocks, dnTokens);
  } catch (IOException e) {
    // Bubble this up to the caller, handle with the Future
    throw e;
  } finally {
    if (cdp != null) {
      RPC.stopProxy(cdp);
    }
  }
  return metadata;
}
项目:hops    文件:DFSAdmin.java   
private ClientDatanodeProtocol getDataNodeProxy(String datanode)
    throws IOException {
  InetSocketAddress datanodeAddr = NetUtils.createSocketAddr(datanode);
  // Get the current configuration
  Configuration conf = getConf();

  // For datanode proxy the server principal should be DN's one.
  conf.set(CommonConfigurationKeys.HADOOP_SECURITY_SERVICE_USER_NAME_KEY,
      conf.get(DFSConfigKeys.DFS_DATANODE_USER_NAME_KEY, ""));

  // Create the client
  ClientDatanodeProtocol dnProtocol = DFSUtil
      .createClientDatanodeProtocolProxy(datanodeAddr, getUGI(), conf,
          NetUtils.getSocketFactory(conf, ClientDatanodeProtocol.class));
  return dnProtocol;
}
项目:hadoop-TCP    文件:BlockReaderLocalLegacy.java   
private synchronized ClientDatanodeProtocol getDatanodeProxy(
    UserGroupInformation ugi, final DatanodeInfo node,
    final Configuration conf, final int socketTimeout,
    final boolean connectToDnViaHostname) throws IOException {
  if (proxy == null) {
    try {
      proxy = ugi.doAs(new PrivilegedExceptionAction<ClientDatanodeProtocol>() {
        @Override
        public ClientDatanodeProtocol run() throws Exception {
          return DFSUtil.createClientDatanodeProtocolProxy(node, conf,
              socketTimeout, connectToDnViaHostname);
        }
      });
    } catch (InterruptedException e) {
      LOG.warn("encountered exception ", e);
    }
  }
  return proxy;
}