/** * Write the connection header. */ private void writeConnectionHeader() throws IOException { boolean isCryptoAesEnable = false; // check if Crypto AES is enabled if (saslRpcClient != null) { boolean saslEncryptionEnabled = SaslUtil.QualityOfProtection.PRIVACY. getSaslQop().equalsIgnoreCase(saslRpcClient.getSaslQOP()); isCryptoAesEnable = saslEncryptionEnabled && conf.getBoolean( CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT); } // if Crypto AES is enabled, set transformation and negotiate with server if (isCryptoAesEnable) { waitingConnectionHeaderResponse = true; } this.out.write(connectionHeaderWithLength); this.out.flush(); }
private synchronized boolean setupSaslConnection(final InputStream in2, final OutputStream out2) throws IOException { saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal, fallbackAllowed, conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase())); return saslRpcClient.saslConnect(in2, out2); }
private void checkHttpSecurity(QualityOfProtection qop, Configuration conf) { if (qop == QualityOfProtection.PRIVACY && conf.getBoolean(USE_HTTP_CONF_KEY, false) && !conf.getBoolean(THRIFT_SSL_ENABLED, false)) { throw new IllegalArgumentException("Thrift HTTP Server's QoP is privacy, but " + THRIFT_SSL_ENABLED + " is false"); } }
private boolean setupSaslConnection(final InputStream in2, final OutputStream out2) throws IOException { saslRpcClient = new HBaseSaslRpcClient(authMethod, token, serverPrincipal, this.rpcClient.fallbackAllowed, this.rpcClient.conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT)), this.rpcClient.conf.getBoolean(CRYPTO_AES_ENABLED_KEY, CRYPTO_AES_ENABLED_DEFAULT)); return saslRpcClient.saslConnect(in2, out2); }
public static void init(Configuration conf) { SaslUtil.initSaslProperties(conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase())); }
public ThriftServerRunner(Configuration conf) throws IOException { UserProvider userProvider = UserProvider.instantiate(conf); // login the server principal (if using secure Hadoop) securityEnabled = userProvider.isHadoopSecurityEnabled() && userProvider.isHBaseSecurityEnabled(); if (securityEnabled) { host = Strings.domainNamePointerToHostName(DNS.getDefaultHost( conf.get("hbase.thrift.dns.interface", "default"), conf.get("hbase.thrift.dns.nameserver", "default"))); userProvider.login("hbase.thrift.keytab.file", "hbase.thrift.kerberos.principal", host); } this.conf = HBaseConfiguration.create(conf); this.listenPort = conf.getInt(PORT_CONF_KEY, DEFAULT_LISTEN_PORT); this.metrics = new ThriftMetrics(conf, ThriftMetrics.ThriftServerType.ONE); this.pauseMonitor = new JvmPauseMonitor(conf, this.metrics.getSource()); this.hbaseHandler = new HBaseHandler(conf, userProvider); this.hbaseHandler.initMetrics(metrics); this.handler = HbaseHandlerMetricsProxy.newInstance( hbaseHandler, metrics, conf); this.realUser = userProvider.getCurrent().getUGI(); String strQop = conf.get(THRIFT_QOP_KEY); if (strQop != null) { this.qop = SaslUtil.getQop(strQop); } doAsEnabled = conf.getBoolean(THRIFT_SUPPORT_PROXYUSER, false); if (doAsEnabled) { if (!conf.getBoolean(USE_HTTP_CONF_KEY, false)) { LOG.warn("Fail to enable the doAs feature. hbase.regionserver.thrift.http is not " + "configured "); } } if (qop != null) { if (qop != QualityOfProtection.AUTHENTICATION && qop != QualityOfProtection.INTEGRITY && qop != QualityOfProtection.PRIVACY) { throw new IOException(String.format("Invalide %s: It must be one of %s, %s, or %s.", THRIFT_QOP_KEY, QualityOfProtection.AUTHENTICATION.name(), QualityOfProtection.INTEGRITY.name(), QualityOfProtection.PRIVACY.name())); } checkHttpSecurity(qop, conf); if (!securityEnabled) { throw new IOException("Thrift server must" + " run in secure mode to support authentication"); } } }
/** * Constructs a server listening on the named port and address. * @param server hosting instance of {@link Server}. We will do authentications if an * instance else pass null for no authentication check. * @param name Used keying this rpc servers' metrics and for naming the Listener thread. * @param services A list of services. * @param bindAddress Where to listen * @param conf * @param scheduler * @param reservoirEnabled Enable ByteBufferPool or not. */ public RpcServer(final Server server, final String name, final List<BlockingServiceAndInterface> services, final InetSocketAddress bindAddress, Configuration conf, RpcScheduler scheduler, boolean reservoirEnabled) throws IOException { if (reservoirEnabled) { int poolBufSize = conf.getInt(ByteBufferPool.BUFFER_SIZE_KEY, ByteBufferPool.DEFAULT_BUFFER_SIZE); // The max number of buffers to be pooled in the ByteBufferPool. The default value been // selected based on the #handlers configured. When it is read request, 2 MB is the max size // at which we will send back one RPC request. Means max we need 2 MB for creating the // response cell block. (Well it might be much lesser than this because in 2 MB size calc, we // include the heap size overhead of each cells also.) Considering 2 MB, we will need // (2 * 1024 * 1024) / poolBufSize buffers to make the response cell block. Pool buffer size // is by default 64 KB. // In case of read request, at the end of the handler process, we will make the response // cellblock and add the Call to connection's response Q and a single Responder thread takes // connections and responses from that one by one and do the socket write. So there is chances // that by the time a handler originated response is actually done writing to socket and so // released the BBs it used, the handler might have processed one more read req. On an avg 2x // we consider and consider that also for the max buffers to pool int bufsForTwoMB = (2 * 1024 * 1024) / poolBufSize; int maxPoolSize = conf.getInt(ByteBufferPool.MAX_POOL_SIZE_KEY, conf.getInt(HConstants.REGION_SERVER_HANDLER_COUNT, HConstants.DEFAULT_REGION_SERVER_HANDLER_COUNT) * bufsForTwoMB * 2); this.reservoir = new ByteBufferPool(poolBufSize, maxPoolSize); this.minSizeForReservoirUse = getMinSizeForReservoirUse(this.reservoir); } else { reservoir = null; this.minSizeForReservoirUse = Integer.MAX_VALUE;// reservoir itself not in place. } this.server = server; this.services = services; this.bindAddress = bindAddress; this.conf = conf; // See declaration above for documentation on what this size is. this.maxQueueSizeInBytes = this.conf.getLong("hbase.ipc.server.max.callqueue.size", DEFAULT_MAX_CALLQUEUE_SIZE); this.warnResponseTime = conf.getInt(WARN_RESPONSE_TIME, DEFAULT_WARN_RESPONSE_TIME); this.warnResponseSize = conf.getInt(WARN_RESPONSE_SIZE, DEFAULT_WARN_RESPONSE_SIZE); this.minClientRequestTimeout = conf.getInt(MIN_CLIENT_REQUEST_TIMEOUT, DEFAULT_MIN_CLIENT_REQUEST_TIMEOUT); this.maxRequestSize = conf.getInt(MAX_REQUEST_SIZE, DEFAULT_MAX_REQUEST_SIZE); this.metrics = new MetricsHBaseServer(name, new MetricsHBaseServerWrapperImpl(this)); this.tcpNoDelay = conf.getBoolean("hbase.ipc.server.tcpnodelay", true); this.tcpKeepAlive = conf.getBoolean("hbase.ipc.server.tcpkeepalive", true); this.cellBlockBuilder = new CellBlockBuilder(conf); this.authorize = conf.getBoolean(HADOOP_SECURITY_AUTHORIZATION, false); this.userProvider = UserProvider.instantiate(conf); this.isSecurityEnabled = userProvider.isHBaseSecurityEnabled(); if (isSecurityEnabled) { saslProps = SaslUtil.initSaslProperties(conf.get("hbase.rpc.protection", QualityOfProtection.AUTHENTICATION.name().toLowerCase(Locale.ROOT))); } else { saslProps = Collections.emptyMap(); } this.scheduler = scheduler; }