Java 类org.apache.hadoop.hdfs.protocolPB.ClientNamenodeProtocolTranslatorPB 实例源码

项目:hadoop    文件:NameNodeProxies.java   
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);

  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();

    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
项目:aliyun-oss-hadoop-fs    文件:NameNodeProxiesClient.java   
public static ClientProtocol createNonHAProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class,
      ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy =
      RetryUtils.getDefaultRetryPolicy(
          conf,
          HdfsClientConfigKeys.Retry.POLICY_ENABLED_KEY,
          HdfsClientConfigKeys.Retry.POLICY_ENABLED_DEFAULT,
          HdfsClientConfigKeys.Retry.POLICY_SPEC_KEY,
          HdfsClientConfigKeys.Retry.POLICY_SPEC_DEFAULT,
          SafeModeException.class.getName());

  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries
    Map<String, RetryPolicy> methodNameToPolicyMap = new HashMap<>();
    ClientProtocol translatorProxy =
        new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<>(ClientProtocol.class,
            translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
项目:big-c    文件:NameNodeProxies.java   
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);

  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();

    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
项目:hadoop-plus    文件:TestIsMethodSupported.java   
@Test
public void testClientNamenodeProtocol() throws IOException {
  ClientNamenodeProtocolTranslatorPB translator = 
      (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
          conf, nnAddress, ClientProtocol.class,
          UserGroupInformation.getCurrentUser(), true).getProxy();
  assertTrue(translator.isMethodSupported("mkdirs"));
}
项目:hops    文件:TestIsMethodSupported.java   
@Test
public void testClientNamenodeProtocol() throws IOException {
  ClientNamenodeProtocolTranslatorPB translator =
      (ClientNamenodeProtocolTranslatorPB) NameNodeProxies
          .createNonHAProxy(conf, nnAddress, ClientProtocol.class,
              UserGroupInformation.getCurrentUser(), true).getProxy();
  assertTrue(translator.isMethodSupported("mkdirs"));
}
项目:hadoop-TCP    文件:TestIsMethodSupported.java   
@Test
public void testClientNamenodeProtocol() throws IOException {
  ClientNamenodeProtocolTranslatorPB translator = 
      (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
          conf, nnAddress, ClientProtocol.class,
          UserGroupInformation.getCurrentUser(), true).getProxy();
  assertTrue(translator.isMethodSupported("mkdirs"));
}
项目:hardfs    文件:TestIsMethodSupported.java   
@Test
public void testClientNamenodeProtocol() throws IOException {
  ClientNamenodeProtocolTranslatorPB translator = 
      (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
          conf, nnAddress, ClientProtocol.class,
          UserGroupInformation.getCurrentUser(), true).getProxy();
  assertTrue(translator.isMethodSupported("mkdirs"));
}
项目:hadoop-on-lustre2    文件:TestIsMethodSupported.java   
@Test
public void testClientNamenodeProtocol() throws IOException {
  ClientNamenodeProtocolTranslatorPB translator = 
      (ClientNamenodeProtocolTranslatorPB) NameNodeProxies.createNonHAProxy(
          conf, nnAddress, ClientProtocol.class,
          UserGroupInformation.getCurrentUser(), true).getProxy();
  assertTrue(translator.isMethodSupported("mkdirs"));
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NameNodeProxies.java   
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);

  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    RetryPolicy createPolicy = RetryPolicies
        .retryUpToMaximumCountWithFixedSleep(5,
            HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);

    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap 
               = new HashMap<Class<? extends Exception>, RetryPolicy>();
    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
        createPolicy);

    RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
        defaultPolicy, remoteExceptionToPolicyMap);
    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();

    methodNameToPolicyMap.put("create", methodPolicy);

    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}
项目:FlexMap    文件:NameNodeProxies.java   
private static ClientProtocol createNNProxyWithClientProtocol(
    InetSocketAddress address, Configuration conf, UserGroupInformation ugi,
    boolean withRetries, AtomicBoolean fallbackToSimpleAuth)
    throws IOException {
  RPC.setProtocolEngine(conf, ClientNamenodeProtocolPB.class, ProtobufRpcEngine.class);

  final RetryPolicy defaultPolicy = 
      RetryUtils.getDefaultRetryPolicy(
          conf, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_KEY, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_ENABLED_DEFAULT, 
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_KEY,
          DFSConfigKeys.DFS_CLIENT_RETRY_POLICY_SPEC_DEFAULT,
          SafeModeException.class);

  final long version = RPC.getProtocolVersion(ClientNamenodeProtocolPB.class);
  ClientNamenodeProtocolPB proxy = RPC.getProtocolProxy(
      ClientNamenodeProtocolPB.class, version, address, ugi, conf,
      NetUtils.getDefaultSocketFactory(conf),
      org.apache.hadoop.ipc.Client.getTimeout(conf), defaultPolicy,
      fallbackToSimpleAuth).getProxy();

  if (withRetries) { // create the proxy with retries

    RetryPolicy createPolicy = RetryPolicies
        .retryUpToMaximumCountWithFixedSleep(5,
            HdfsConstants.LEASE_SOFTLIMIT_PERIOD, TimeUnit.MILLISECONDS);

    Map<Class<? extends Exception>, RetryPolicy> remoteExceptionToPolicyMap 
               = new HashMap<Class<? extends Exception>, RetryPolicy>();
    remoteExceptionToPolicyMap.put(AlreadyBeingCreatedException.class,
        createPolicy);

    RetryPolicy methodPolicy = RetryPolicies.retryByRemoteException(
        defaultPolicy, remoteExceptionToPolicyMap);
    Map<String, RetryPolicy> methodNameToPolicyMap 
               = new HashMap<String, RetryPolicy>();

    methodNameToPolicyMap.put("create", methodPolicy);

    ClientProtocol translatorProxy =
      new ClientNamenodeProtocolTranslatorPB(proxy);
    return (ClientProtocol) RetryProxy.create(
        ClientProtocol.class,
        new DefaultFailoverProxyProvider<ClientProtocol>(
            ClientProtocol.class, translatorProxy),
        methodNameToPolicyMap,
        defaultPolicy);
  } else {
    return new ClientNamenodeProtocolTranslatorPB(proxy);
  }
}