Java 类org.apache.hadoop.hdfs.nfs.mount.RpcProgramMountd 实例源码

项目:hadoop    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:aliyun-oss-hadoop-fs    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:big-c    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:hadoop-plus    文件:TestPortmapRegister.java   
public static void main(String[] args) throws InterruptedException {
  PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
      RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
      RpcProgramMountd.PORT);
  XDR mappingRequest = PortmapRequest.create(mapEntry);
  RegistrationClient registrationClient = new RegistrationClient(
      "localhost", Nfs3Constant.SUN_RPCBIND, mappingRequest);
  registrationClient.run();

  Thread t1 = new Runtest1();
  //Thread t2 = testa.new Runtest2();
  t1.start();
  //t2.start();
  t1.join();
  //t2.join();
  //testDump();
}
项目:hadoop-plus    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .manageNameDfsDirs(false).build();
  cluster.waitActive();

  // Start nfs
  List<String> exports = new ArrayList<String>();
  exports.add("/");
  Nfs3 nfs3 = new Nfs3(exports, config);
  nfs3.start(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountBase()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:hops    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster =
      new MiniDFSCluster.Builder(config).numDataNodes(1).build();
  cluster.waitActive();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd =
      (RpcProgramMountd) nfs3.getMountd().getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:hadoop-TCP    文件:TestPortmapRegister.java   
public static void main(String[] args) throws InterruptedException {
  PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
      RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
      RpcProgramMountd.PORT);
  XDR mappingRequest = PortmapRequest.create(mapEntry);
  RegistrationClient registrationClient = new RegistrationClient(
      "localhost", Nfs3Constant.SUN_RPCBIND, mappingRequest);
  registrationClient.run();

  Thread t1 = new Runtest1();
  //Thread t2 = testa.new Runtest2();
  t1.start();
  //t2.start();
  t1.join();
  //t2.join();
  //testDump();
}
项目:hadoop-TCP    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();

  // Start nfs
  List<String> exports = new ArrayList<String>();
  exports.add("/");
  Nfs3 nfs3 = new Nfs3(exports, config);
  nfs3.start(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountBase()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:hardfs    文件:TestPortmapRegister.java   
public static void main(String[] args) throws InterruptedException {
  PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
      RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
      RpcProgramMountd.PORT);
  XDR mappingRequest = PortmapRequest.create(mapEntry);
  RegistrationClient registrationClient = new RegistrationClient(
      "localhost", Nfs3Constant.SUN_RPCBIND, mappingRequest);
  registrationClient.run();

  Thread t1 = new Runtest1();
  //Thread t2 = testa.new Runtest2();
  t1.start();
  //t2.start();
  t1.join();
  //t2.join();
  //testDump();
}
项目:hardfs    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();

  // Start nfs
  List<String> exports = new ArrayList<String>();
  exports.add("/");
  Nfs3 nfs3 = new Nfs3(exports, config);
  nfs3.start(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountBase()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:hadoop-on-lustre2    文件:TestMountd.java   
@Test
public void testStart() throws IOException {
  // Start minicluster
  Configuration config = new Configuration();
  MiniDFSCluster cluster = new MiniDFSCluster.Builder(config).numDataNodes(1)
      .build();
  cluster.waitActive();

  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  // Start nfs
  Nfs3 nfs3 = new Nfs3(config);
  nfs3.startServiceInternal(false);

  RpcProgramMountd mountd = (RpcProgramMountd) nfs3.getMountd()
      .getRpcProgram();
  mountd.nullOp(new XDR(), 1234, InetAddress.getByName("localhost"));

  RpcProgramNfs3 nfsd = (RpcProgramNfs3) nfs3.getRpcProgram();
  nfsd.nullProcedure();

  cluster.shutdown();
}
项目:hadoop    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:aliyun-oss-hadoop-fs    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:big-c    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  NfsConfiguration config = new NfsConfiguration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(NfsConfigKeys.DFS_NFS_EXPORT_POINT_KEY, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-plus    文件:TestPortmapRegister.java   
@Override
public void run() {
  //testGetportMount();
  PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
      RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
      RpcProgramMountd.PORT);
  XDR req = PortmapRequest.create(mapEntry);
  testRequest(req, req);
}
项目:hops    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  Configuration config = new Configuration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(Nfs3Constant.EXPORT_POINT, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}
项目:hadoop-TCP    文件:TestPortmapRegister.java   
@Override
public void run() {
  //testGetportMount();
  PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
      RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
      RpcProgramMountd.PORT);
  XDR req = PortmapRequest.create(mapEntry);
  testRequest(req, req);
}
项目:hardfs    文件:TestPortmapRegister.java   
@Override
public void run() {
  //testGetportMount();
  PortmapMapping mapEntry = new PortmapMapping(RpcProgramMountd.PROGRAM,
      RpcProgramMountd.VERSION_1, PortmapMapping.TRANSPORT_UDP,
      RpcProgramMountd.PORT);
  XDR req = PortmapRequest.create(mapEntry);
  testRequest(req, req);
}
项目:hadoop-on-lustre2    文件:TestExportsTable.java   
@Test
public void testExportPoint() throws IOException {
  Configuration config = new Configuration();
  MiniDFSCluster cluster = null;

  String exportPoint = "/myexport1";
  config.setStrings(Nfs3Constant.EXPORT_POINT, exportPoint);
  // Use emphral port in case tests are running in parallel
  config.setInt("nfs3.mountd.port", 0);
  config.setInt("nfs3.server.port", 0);

  try {
    cluster = new MiniDFSCluster.Builder(config).numDataNodes(1).build();
    cluster.waitActive();

    // Start nfs
    final Nfs3 nfsServer = new Nfs3(config);
    nfsServer.startServiceInternal(false);

    Mountd mountd = nfsServer.getMountd();
    RpcProgramMountd rpcMount = (RpcProgramMountd) mountd.getRpcProgram();
    assertTrue(rpcMount.getExports().size() == 1);

    String exportInMountd = rpcMount.getExports().get(0);
    assertTrue(exportInMountd.equals(exportPoint));

  } finally {
    if (cluster != null) {
      cluster.shutdown();
    }
  }
}