Java 类org.apache.hadoop.hdfs.HDFSPolicyProvider 实例源码

项目:hadoop    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:hadoop    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:hadoop    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:aliyun-oss-hadoop-fs    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:aliyun-oss-hadoop-fs    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:aliyun-oss-hadoop-fs    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:big-c    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:big-c    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:big-c    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:hadoop-EAR    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:FlexMap    文件:TestXAttrCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setBoolean(DFSConfigKeys.DFS_NAMENODE_XATTRS_ENABLED_KEY, true);
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(), 
      fs instanceof DistributedFileSystem);
}
项目:FlexMap    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:FlexMap    文件:TestCryptoAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  tmpDir = new File(System.getProperty("test.build.data", "target"),
      UUID.randomUUID().toString()).getAbsoluteFile();
  final Path jksPath = new Path(tmpDir.toString(), "test.jks");
  conf.set(DFSConfigKeys.DFS_ENCRYPTION_KEY_PROVIDER_URI,
      JavaKeyStoreProvider.SCHEME_NAME + "://file" + jksPath.toUri());

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(1).build();
  dfsCluster.waitClusterUp();
  createAKey("mykey", conf);
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not an HDFS: " + fs.getUri(),
      fs instanceof DistributedFileSystem);
}
项目:hadoop-on-lustre    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:hadoop-on-lustre2    文件:TestCacheAdminCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(3).build();

  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");
  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:cumulus    文件:DataNode.java   
private void initIpcServer(Configuration conf) throws IOException {
  InetSocketAddress ipcAddr = NetUtils.createSocketAddr(
      conf.get("dfs.datanode.ipc.address"));
  ipcServer = RPC.getServer(DataNode.class, this, ipcAddr.getHostName(),
      ipcAddr.getPort(), conf.getInt("dfs.datanode.handler.count", 3), false,
      conf, blockTokenSecretManager);

  // set service-level authorization security policy
  if (conf.getBoolean(
      CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
    ipcServer.refreshServiceAcl(conf, new HDFSPolicyProvider());
  }

  dnRegistration.setIpcPort(ipcServer.getListenerAddress().getPort());
  LOG.info("dnRegistration = " + dnRegistration);
}
项目:RDFS    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:hadoop-0.20    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:hanoi-hadoop-2.0.0-cdh    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:mapreduce-fork    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:hortonworks-extension    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:hortonworks-extension    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:hadoop-gpu    文件:TestServiceLevelAuthorization.java   
private void rewriteHadoopPolicyFile(File policyFile) throws IOException {
  FileWriter fos = new FileWriter(policyFile);
  PolicyProvider policyProvider = new HDFSPolicyProvider();
  fos.write("<configuration>\n");
  for (Service service : policyProvider.getServices()) {
    String key = service.getServiceKey();
    String value ="*";
    if (key.equals("security.refresh.policy.protocol.acl")) {
      value = DUMMY_ACL;
    }
    fos.write("<property><name>"+ key + "</name><value>" + value + 
              "</value></property>\n");
    System.err.println("<property><name>"+ key + "</name><value>" + value + 
        "</value></property>\n");
  }
  fos.write("</configuration>\n");
  fos.close();
}
项目:hadoop    文件:NameNodeRpcServer.java   
@Override // RefreshAuthorizationPolicyProtocol
public void refreshServiceAcl() throws IOException {
  checkNNStartup();
  if (!serviceAuthEnabled) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }

  this.clientRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
  if (this.serviceRpcServer != null) {
    this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
  }
}
项目:hadoop    文件:JournalNodeRpcServer.java   
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException {
  this.jn = jn;

  Configuration confCopy = new Configuration(conf);

  // Ensure that nagling doesn't kick in, which could cause latency issues.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
      true);

  InetSocketAddress addr = getAddress(confCopy);
  RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
      ProtobufRpcEngine.class);
  QJournalProtocolServerSideTranslatorPB translator =
      new QJournalProtocolServerSideTranslatorPB(this);
  BlockingService service = QJournalProtocolService
      .newReflectiveBlockingService(translator);

  this.server = new RPC.Builder(confCopy)
    .setProtocol(QJournalProtocolPB.class)
    .setInstance(service)
    .setBindAddress(addr.getHostName())
    .setPort(addr.getPort())
    .setNumHandlers(HANDLER_COUNT)
    .setVerbose(false)
    .build();

  // set service-level authorization security policy
  if (confCopy.getBoolean(
    CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
        server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
  }
}
项目:hadoop    文件:TestHDFSCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  // Build racks and hosts configuration to test dfsAdmin -printTopology
  String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                      "/rack2", "/rack3", "/rack4", "/rack4" };
  String [] hosts = {"host1", "host2", "host3", "host4",
                     "host5", "host6", "host7", "host8" };
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
                                               .racks(racks)
                                               .hosts(hosts)
                                               .build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:aliyun-oss-hadoop-fs    文件:NameNodeRpcServer.java   
@Override // RefreshAuthorizationPolicyProtocol
public void refreshServiceAcl() throws IOException {
  checkNNStartup();
  if (!serviceAuthEnabled) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }

  this.clientRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
  if (this.serviceRpcServer != null) {
    this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
  }
}
项目:aliyun-oss-hadoop-fs    文件:JournalNodeRpcServer.java   
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException {
  this.jn = jn;

  Configuration confCopy = new Configuration(conf);

  // Ensure that nagling doesn't kick in, which could cause latency issues.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
      true);

  InetSocketAddress addr = getAddress(confCopy);
  RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
      ProtobufRpcEngine.class);
  QJournalProtocolServerSideTranslatorPB translator =
      new QJournalProtocolServerSideTranslatorPB(this);
  BlockingService service = QJournalProtocolService
      .newReflectiveBlockingService(translator);

  this.server = new RPC.Builder(confCopy)
    .setProtocol(QJournalProtocolPB.class)
    .setInstance(service)
    .setBindAddress(addr.getHostName())
    .setPort(addr.getPort())
    .setNumHandlers(HANDLER_COUNT)
    .setVerbose(false)
    .build();

  // set service-level authorization security policy
  if (confCopy.getBoolean(
    CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
        server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
  }
  this.server.setTracer(jn.tracer);
}
项目:aliyun-oss-hadoop-fs    文件:TestHDFSCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  // Build racks and hosts configuration to test dfsAdmin -printTopology
  String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                      "/rack2", "/rack3", "/rack4", "/rack4" };
  String [] hosts = {"host1", "host2", "host3", "host4",
                     "host5", "host6", "host7", "host8" };
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
                                               .racks(racks)
                                               .hosts(hosts)
                                               .build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:big-c    文件:NameNodeRpcServer.java   
@Override // RefreshAuthorizationPolicyProtocol
public void refreshServiceAcl() throws IOException {
  checkNNStartup();
  if (!serviceAuthEnabled) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }

  this.clientRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
  if (this.serviceRpcServer != null) {
    this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
  }
}
项目:big-c    文件:JournalNodeRpcServer.java   
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException {
  this.jn = jn;

  Configuration confCopy = new Configuration(conf);

  // Ensure that nagling doesn't kick in, which could cause latency issues.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
      true);

  InetSocketAddress addr = getAddress(confCopy);
  RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
      ProtobufRpcEngine.class);
  QJournalProtocolServerSideTranslatorPB translator =
      new QJournalProtocolServerSideTranslatorPB(this);
  BlockingService service = QJournalProtocolService
      .newReflectiveBlockingService(translator);

  this.server = new RPC.Builder(confCopy)
    .setProtocol(QJournalProtocolPB.class)
    .setInstance(service)
    .setBindAddress(addr.getHostName())
    .setPort(addr.getPort())
    .setNumHandlers(HANDLER_COUNT)
    .setVerbose(false)
    .build();

  // set service-level authorization security policy
  if (confCopy.getBoolean(
    CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
        server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
  }
}
项目:big-c    文件:TestHDFSCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  // Build racks and hosts configuration to test dfsAdmin -printTopology
  String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                      "/rack2", "/rack3", "/rack4", "/rack4" };
  String [] hosts = {"host1", "host2", "host3", "host4",
                     "host5", "host6", "host7", "host8" };
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
                                               .racks(racks)
                                               .hosts(hosts)
                                               .build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:NameNodeRpcServer.java   
@Override // RefreshAuthorizationPolicyProtocol
public void refreshServiceAcl() throws IOException {
  checkNNStartup();
  if (!serviceAuthEnabled) {
    throw new AuthorizationException("Service Level Authorization not enabled!");
  }

  this.clientRpcServer.refreshServiceAcl(new Configuration(),
      new HDFSPolicyProvider());
  if (this.serviceRpcServer != null) {
    this.serviceRpcServer.refreshServiceAcl(new Configuration(), new HDFSPolicyProvider());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:JournalNodeRpcServer.java   
JournalNodeRpcServer(Configuration conf, JournalNode jn) throws IOException {
  this.jn = jn;

  Configuration confCopy = new Configuration(conf);

  // Ensure that nagling doesn't kick in, which could cause latency issues.
  confCopy.setBoolean(
      CommonConfigurationKeysPublic.IPC_SERVER_TCPNODELAY_KEY,
      true);

  InetSocketAddress addr = getAddress(confCopy);
  RPC.setProtocolEngine(confCopy, QJournalProtocolPB.class,
      ProtobufRpcEngine.class);
  QJournalProtocolServerSideTranslatorPB translator =
      new QJournalProtocolServerSideTranslatorPB(this);
  BlockingService service = QJournalProtocolService
      .newReflectiveBlockingService(translator);

  this.server = new RPC.Builder(confCopy)
    .setProtocol(QJournalProtocolPB.class)
    .setInstance(service)
    .setBindAddress(addr.getHostName())
    .setPort(addr.getPort())
    .setNumHandlers(HANDLER_COUNT)
    .setVerbose(false)
    .build();

  // set service-level authorization security policy
  if (confCopy.getBoolean(
    CommonConfigurationKeys.HADOOP_SECURITY_AUTHORIZATION, false)) {
        server.refreshServiceAcl(confCopy, new HDFSPolicyProvider());
  }
}
项目:hadoop-2.6.0-cdh5.4.3    文件:TestHDFSCLI.java   
@Before
@Override
public void setUp() throws Exception {
  super.setUp();
  conf.setClass(PolicyProvider.POLICY_PROVIDER_CONFIG,
      HDFSPolicyProvider.class, PolicyProvider.class);

  // Many of the tests expect a replication value of 1 in the output
  conf.setInt(DFSConfigKeys.DFS_REPLICATION_KEY, 1);

  // Build racks and hosts configuration to test dfsAdmin -printTopology
  String [] racks =  {"/rack1", "/rack1", "/rack2", "/rack2",
                      "/rack2", "/rack3", "/rack4", "/rack4" };
  String [] hosts = {"host1", "host2", "host3", "host4",
                     "host5", "host6", "host7", "host8" };
  dfsCluster = new MiniDFSCluster.Builder(conf).numDataNodes(8)
                                               .racks(racks)
                                               .hosts(hosts)
                                               .build();
  dfsCluster.waitClusterUp();
  namenode = conf.get(DFSConfigKeys.FS_DEFAULT_NAME_KEY, "file:///");

  username = System.getProperty("user.name");

  fs = dfsCluster.getFileSystem();
  assertTrue("Not a HDFS: "+fs.getUri(),
             fs instanceof DistributedFileSystem);
}
项目:hadoop-2.6.0-cdh5.4.3    文件:HadoopPolicyProvider.java   
@Override
public Service[] getServices() {
  Service[] hdfsServices = new HDFSPolicyProvider().getServices();
  Service[] mrServices = new MapReducePolicyProvider().getServices();

  Service[] hadoopServices = 
    new Service[hdfsServices.length + mrServices.length];
  System.arraycopy(hdfsServices, 0, hadoopServices, 0, hdfsServices.length);
  System.arraycopy(mrServices, 0, hadoopServices, hdfsServices.length, 
                   mrServices.length);

  return hadoopServices;
}