@Test (timeout = 800000) public void testRegionReplicationOnMidClusterWithRacks() { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 10000000L); conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec loadBalancer.setConf(conf); int numNodes = 30; int numRegions = numNodes * 30; int replication = 3; // 3 replicas per region int numRegionsPerServer = 28; int numTables = 10; int numRacks = 4; // all replicas should be on a different rack Map<ServerName, List<HRegionInfo>> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); RackManager rm = new ForTestRackManager(numRacks); testWithCluster(serverMap, rm, false, true); }
@BeforeClass public static void beforeAllTests() throws Exception { Configuration conf = HBaseConfiguration.create(); conf.setClass("hbase.util.ip.to.rack.determiner", MockMapping.class, DNSToSwitchMapping.class); loadBalancer = new MockBalancer(); loadBalancer.setConf(conf); MasterServices st = Mockito.mock(MasterServices.class); Mockito.when(st.getServerName()).thenReturn(master); loadBalancer.setMasterServices(st); // Set up the rack topologies (5 machines per rack) rackManager = Mockito.mock(RackManager.class); for (int i = 0; i < NUM_SERVERS; i++) { servers[i] = ServerName.valueOf("foo"+i+":1234",-1); if (i < 5) { Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack1"); } if (i >= 5 && i < 10) { Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack2"); } if (i >= 10) { Mockito.when(rackManager.getRack(servers[i])).thenReturn("rack3"); } } }
@Override public void setConf(Configuration conf) { this.config = conf; setSlop(conf); if (slop < 0) slop = 0; else if (slop > 1) slop = 1; if (overallSlop < 0) overallSlop = 0; else if (overallSlop > 1) overallSlop = 1; this.tablesOnMaster = LoadBalancer.isTablesOnMaster(this.config); this.onlySystemTablesOnMaster = LoadBalancer.isSystemTablesOnlyOnMaster(this.config); // If system tables on master, implies tablesOnMaster = true. if (this.onlySystemTablesOnMaster && !this.tablesOnMaster) { LOG.warn("Set " + TABLES_ON_MASTER + "=true because " + SYSTEM_TABLES_ON_MASTER + "=true"); this.tablesOnMaster = true; } this.rackManager = new RackManager(getConf()); if (useRegionFinder) { regionFinder.setConf(conf); } // Print out base configs. Don't print overallSlop since it for simple balancer exclusively. LOG.info("slop=" + this.slop + ", tablesOnMaster=" + this.tablesOnMaster + ", systemTablesOnMaster=" + this.onlySystemTablesOnMaster); }
@Test public void testRegionReplicationOnMidClusterWithRacks() { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 10000000L); conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 120 * 1000); // 120 sec loadBalancer.setConf(conf); int numNodes = 30; int numRegions = numNodes * 30; int replication = 3; // 3 replicas per region int numRegionsPerServer = 28; int numTables = 10; int numRacks = 4; // all replicas should be on a different rack Map<ServerName, List<RegionInfo>> serverMap = createServerMap(numNodes, numRegions, numRegionsPerServer, replication, numTables); RackManager rm = new ForTestRackManager(numRacks); testWithCluster(serverMap, rm, false, true); }
public FavoredNodeAssignmentHelper(final List<ServerName> servers, final RackManager rackManager) { this.servers = servers; this.rackManager = rackManager; this.rackToRegionServerMap = new HashMap<String, List<ServerName>>(); this.regionServerToRackMap = new HashMap<ServerName, String>(); this.uniqueRackList = new ArrayList<String>(); this.random = new Random(); }
@Override public void setConf(Configuration conf) { super.setConf(conf); globalFavoredNodesAssignmentPlan = new FavoredNodesPlan(); this.rackManager = new RackManager(conf); super.setConf(conf); }
protected Cluster( Map<ServerName, List<HRegionInfo>> clusterState, Map<String, Deque<RegionLoad>> loads, RegionLocationFinder regionFinder, RackManager rackManager) { this(null, clusterState, loads, regionFinder, rackManager); }
@Override public void setConf(Configuration conf) { setSlop(conf); if (slop < 0) slop = 0; else if (slop > 1) slop = 1; this.config = conf; String[] tables = getTablesOnMaster(conf); if (tables != null && tables.length > 0) { Collections.addAll(tablesOnMaster, tables); } this.rackManager = new RackManager(getConf()); regionFinder.setConf(conf); }
protected void testWithCluster(Map<ServerName, List<HRegionInfo>> serverMap, RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { List<ServerAndLoad> list = convertToList(serverMap); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); loadBalancer.setRackManager(rackManager); // Run the balancer. List<RegionPlan> plans = loadBalancer.balanceCluster(serverMap); assertNotNull(plans); // Check to see that this actually got to a stable place. if (assertFullyBalanced || assertFullyBalancedForReplicas) { // Apply the plan to the mock cluster. List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap); // Print out the cluster loads to make debugging easier. LOG.info("Mock Balance : " + printMock(balancedCluster)); if (assertFullyBalanced) { assertClusterAsBalanced(balancedCluster); List<RegionPlan> secondPlans = loadBalancer.balanceCluster(serverMap); assertNull(secondPlans); } if (assertFullyBalancedForReplicas) { assertRegionReplicaPlacement(serverMap, rackManager); } } }
public FavoredNodeAssignmentHelper(final List<ServerName> servers, final RackManager rackManager) { this.servers = servers; this.rackManager = rackManager; this.rackToRegionServerMap = new HashMap<>(); this.regionServerToRackMap = new HashMap<>(); this.uniqueRackList = new ArrayList<>(); this.random = new Random(); }
@Override public synchronized void initialize() throws HBaseIOException { super.initialize(); super.setConf(conf); this.fnm = services.getFavoredNodesManager(); this.rackManager = new RackManager(conf); super.setConf(conf); }
public FavoredNodesManager(MasterServices masterServices) { this.masterServices = masterServices; this.globalFavoredNodesAssignmentPlan = new FavoredNodesPlan(); this.primaryRSToRegionMap = new HashMap<>(); this.secondaryRSToRegionMap = new HashMap<>(); this.teritiaryRSToRegionMap = new HashMap<>(); this.rackManager = new RackManager(masterServices.getConfiguration()); }
protected Cluster( Map<ServerName, List<RegionInfo>> clusterState, Map<String, Deque<BalancerRegionLoad>> loads, RegionLocationFinder regionFinder, RackManager rackManager) { this(null, clusterState, loads, regionFinder, rackManager); }
protected void testWithCluster(Map<ServerName, List<RegionInfo>> serverMap, RackManager rackManager, boolean assertFullyBalanced, boolean assertFullyBalancedForReplicas) { List<ServerAndLoad> list = convertToList(serverMap); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); loadBalancer.setRackManager(rackManager); // Run the balancer. List<RegionPlan> plans = loadBalancer.balanceCluster(serverMap); assertNotNull(plans); // Check to see that this actually got to a stable place. if (assertFullyBalanced || assertFullyBalancedForReplicas) { // Apply the plan to the mock cluster. List<ServerAndLoad> balancedCluster = reconcile(list, plans, serverMap); // Print out the cluster loads to make debugging easier. LOG.info("Mock Balance : " + printMock(balancedCluster)); if (assertFullyBalanced) { assertClusterAsBalanced(balancedCluster); List<RegionPlan> secondPlans = loadBalancer.balanceCluster(serverMap); assertNull(secondPlans); } if (assertFullyBalancedForReplicas) { assertRegionReplicaPlacement(serverMap, rackManager); } } }
public FavoredNodeAssignmentHelper(final List<ServerName> servers, Configuration conf) { this(servers, new RackManager(conf)); }
public void setRackManager(RackManager rackManager) { this.rackManager = rackManager; }
@Override public void setConf(Configuration conf) { globalFavoredNodesAssignmentPlan = new FavoredNodesPlan(); this.rackManager = new RackManager(conf); this.conf = conf; }
public RackManager getRackManager() { return rackManager; }
@Override public void setConf(Configuration conf) { globalFavoredNodesAssignmentPlan = new FavoredNodesPlan(); this.rackManager = new RackManager(conf); super.setConf(conf); }