Java 类org.apache.hadoop.hbase.security.token.TokenUtil 实例源码

项目:ditb    文件:TableMapReduceUtil.java   
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
项目:ditb    文件:TableMapReduceUtil.java   
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * @param job The job that requires the permission.
 * @param conf The configuration to use in connecting to the peer cluster
 * @throws IOException When the authentication token cannot be obtained.
 */
public static void initCredentialsForCluster(Job job, Configuration conf)
    throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      Connection peerConn = ConnectionFactory.createConnection(conf);
      try {
        TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
      } finally {
        peerConn.close();
      }
    } catch (InterruptedException e) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.interrupted();
    }
  }
}
项目:pbase    文件:TableMapReduceUtil.java   
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
项目:pbase    文件:TableMapReduceUtil.java   
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * The quorumAddress is the key to the ZK ensemble, which contains:
 * hbase.zookeeper.quorum, hbase.zookeeper.client.port and zookeeper.znode.parent
 *
 * @param job The job that requires the permission.
 * @param quorumAddress string that contains the 3 required configuratins
 * @throws IOException When the authentication token cannot be obtained.
 */
public static void initCredentialsForCluster(Job job, String quorumAddress)
    throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      Configuration peerConf = HBaseConfiguration.create(job.getConfiguration());
      ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress);
      Connection peerConn = ConnectionFactory.createConnection(peerConf);
      try {
        TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
      } finally {
        peerConn.close();
      }
    } catch (InterruptedException e) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.interrupted();
    }
  }
}
项目:hbase-in-action    文件:TokenExample.java   
public static void main(String[] args) throws IOException, InterruptedException {
  Configuration conf = HBaseConfiguration.create();
  Connection connection = ConnectionFactory.createConnection(conf);

  // vv TokenExample
  Token<AuthenticationTokenIdentifier> token =
    TokenUtil.obtainToken(connection);
  String urlString = token.encodeToUrlString();
  File temp = new File(FileUtils.getTempDirectory(), "token");
  FileUtils.writeStringToFile(temp, urlString);

  System.out.println("Encoded Token: " + urlString);

  String strToken = FileUtils.readFileToString(new File("token"));
  Token token2 = new Token();
  token2.decodeFromUrlString(strToken);
  UserGroupInformation.getCurrentUser().addToken(token2);
  // ^^ TokenExample
  connection.close();
}
项目:hbase    文件:TableMapReduceUtil.java   
public static void initCredentials(JobConf job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job);
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.set("mapreduce.job.credentials.binary", System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    Connection conn = ConnectionFactory.createConnection(job);
    try {
      // login the server principal (if using secure Hadoop)
      User user = userProvider.getCurrent();
      TokenUtil.addTokenForJob(conn, job, user);
    } catch (InterruptedException ie) {
      ie.printStackTrace();
      Thread.currentThread().interrupt();
    } finally {
      conn.close();
    }
  }
}
项目:hbase    文件:TableMapReduceUtil.java   
/**
 * Obtain an authentication token, for the specified cluster, on behalf of the current user
 * and add it to the credentials for the given map reduce job.
 *
 * @param job The job that requires the permission.
 * @param conf The configuration to use in connecting to the peer cluster
 * @throws IOException When the authentication token cannot be obtained.
 */
public static void initCredentialsForCluster(Job job, Configuration conf)
    throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      Connection peerConn = ConnectionFactory.createConnection(conf);
      try {
        TokenUtil.addTokenForJob(peerConn, userProvider.getCurrent(), job);
      } finally {
        peerConn.close();
      }
    } catch (InterruptedException e) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.interrupted();
    }
  }
}
项目:ditb    文件:TableMapReduceUtil.java   
public static void initCredentials(Job job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.getConfiguration().set("mapreduce.job.credentials.binary",
                                 System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      // init credentials for remote cluster
      String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
      User user = userProvider.getCurrent();
      if (quorumAddress != null) {
        Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
            quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX);
        Connection peerConn = ConnectionFactory.createConnection(peerConf);
        try {
          TokenUtil.addTokenForJob(peerConn, user, job);
        } finally {
          peerConn.close();
        }
      }

      Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
      try {
        TokenUtil.addTokenForJob(conn, user, job);
      } finally {
        conn.close();
      }
    } catch (InterruptedException ie) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.currentThread().interrupt();
    }
  }
}
项目:pbase    文件:TableMapReduceUtil.java   
public static void initCredentials(Job job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.getConfiguration().set("mapreduce.job.credentials.binary",
                                 System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      // init credentials for remote cluster
      String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
      User user = userProvider.getCurrent();
      if (quorumAddress != null) {
        Configuration peerConf = HBaseConfiguration.create(job.getConfiguration());
        ZKUtil.applyClusterKeyToConf(peerConf, quorumAddress);
        Connection peerConn = ConnectionFactory.createConnection(peerConf);
        try {
          TokenUtil.addTokenForJob(peerConn, user, job);
        } finally {
          peerConn.close();
        }
      }

      Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
      try {
        TokenUtil.addTokenForJob(conn, user, job);
      } finally {
        conn.close();
      }
    } catch (InterruptedException ie) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.currentThread().interrupt();
    }
  }
}
项目:hbase    文件:TableMapReduceUtil.java   
public static void initCredentials(Job job) throws IOException {
  UserProvider userProvider = UserProvider.instantiate(job.getConfiguration());
  if (userProvider.isHadoopSecurityEnabled()) {
    // propagate delegation related props from launcher job to MR job
    if (System.getenv("HADOOP_TOKEN_FILE_LOCATION") != null) {
      job.getConfiguration().set("mapreduce.job.credentials.binary",
                                 System.getenv("HADOOP_TOKEN_FILE_LOCATION"));
    }
  }

  if (userProvider.isHBaseSecurityEnabled()) {
    try {
      // init credentials for remote cluster
      String quorumAddress = job.getConfiguration().get(TableOutputFormat.QUORUM_ADDRESS);
      User user = userProvider.getCurrent();
      if (quorumAddress != null) {
        Configuration peerConf = HBaseConfiguration.createClusterConf(job.getConfiguration(),
            quorumAddress, TableOutputFormat.OUTPUT_CONF_PREFIX);
        Connection peerConn = ConnectionFactory.createConnection(peerConf);
        try {
          TokenUtil.addTokenForJob(peerConn, user, job);
        } finally {
          peerConn.close();
        }
      }

      Connection conn = ConnectionFactory.createConnection(job.getConfiguration());
      try {
        TokenUtil.addTokenForJob(conn, user, job);
      } finally {
        conn.close();
      }
    } catch (InterruptedException ie) {
      LOG.info("Interrupted obtaining user authentication token");
      Thread.currentThread().interrupt();
    }
  }
}
项目:kylin    文件:PingHBaseCLI.java   
public static void main(String[] args) throws IOException {
    String hbaseTable = args[0];

    System.out.println("Hello friend.");

    Configuration hconf = HBaseConnection.getCurrentHBaseConfiguration();
    if (User.isHBaseSecurityEnabled(hconf)) {
        try {
            System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName());
            TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser());
        } catch (InterruptedException e) {
            Thread.currentThread().interrupt();
            System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName());
        }
    }

    Scan scan = new Scan();
    int limit = 20;

    Connection conn = null;
    Table table = null;
    ResultScanner scanner = null;
    try {
        conn = ConnectionFactory.createConnection(hconf);
        table = conn.getTable(TableName.valueOf(hbaseTable));
        scanner = table.getScanner(scan);
        int count = 0;
        for (Result r : scanner) {
            byte[] rowkey = r.getRow();
            System.out.println(Bytes.toStringBinary(rowkey));
            count++;
            if (count == limit)
                break;
        }
    } finally {
        IOUtils.closeQuietly(scanner);
        IOUtils.closeQuietly(table);
        IOUtils.closeQuietly(conn);
    }

}
项目:Kylin    文件:PingHBaseCLI.java   
public static void main(String[] args) throws IOException {
    String metadataUrl = args[0];
    String hbaseTable = args[1];

    System.out.println("Hello friend.");

    Configuration hconf = HadoopUtil.newHBaseConfiguration(metadataUrl);
    if (User.isHBaseSecurityEnabled(hconf)) {
        try {
            System.out.println("--------------Getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName());
            TokenUtil.obtainAndCacheToken(hconf, UserGroupInformation.getCurrentUser());
        } catch (InterruptedException e) {
            System.out.println("--------------Error while getting kerberos credential for user " + UserGroupInformation.getCurrentUser().getUserName());
        }
    }

    Scan scan = new Scan();
    int limit = 20;

    HConnection conn = null;
    HTableInterface table = null;
    ResultScanner scanner = null;
    try {
        conn = HConnectionManager.createConnection(hconf);
        table = conn.getTable(hbaseTable);
        scanner = table.getScanner(scan);
        int count = 0;
        for (Result r : scanner) {
            byte[] rowkey = r.getRow();
            System.out.println(Bytes.toStringBinary(rowkey));
            count++;
            if (count == limit)
                break;
        }
    } finally {
        if (scanner != null) {
            scanner.close();
        }
        if (table != null) {
            table.close();
        }
        if (conn != null) {
            conn.close();
        }
    }

}