/** * Create all of the RegionPlan's needed to move from the initial cluster state to the desired * state. * * @param cluster The state of the cluster * @return List of RegionPlan's that represent the moves needed to get to desired final state. */ private List<RegionPlan> createRegionPlans(Cluster cluster) { List<RegionPlan> plans = new LinkedList<RegionPlan>(); for (int regionIndex = 0; regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) { int initialServerIndex = cluster.initialRegionIndexToServerIndex[regionIndex]; int newServerIndex = cluster.regionIndexToServerIndex[regionIndex]; if (initialServerIndex != newServerIndex) { HRegionInfo region = cluster.regions[regionIndex]; ServerName initialServer = cluster.servers[initialServerIndex]; ServerName newServer = cluster.servers[newServerIndex]; if (LOG.isTraceEnabled()) { LOG.trace("Moving Region " + region.getEncodedName() + " from server " + initialServer.getHostname() + " to " + newServer.getHostname()); } RegionPlan rp = new RegionPlan(region, initialServer, newServer); plans.add(rp); } } return plans; }
/** * Test the load balancing algorithm. * * Invariant is that all servers should be hosting either floor(average) or * ceiling(average) * * @throws Exception */ @Test (timeout=60000) public void testBalanceCluster() throws Exception { for (int[] mockCluster : clusterStateMocks) { Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster); List<ServerAndLoad> list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List<RegionPlan> plans = loadBalancer.balanceCluster(servers); List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers); LOG.info("Mock Balance : " + printMock(balancedCluster)); assertClusterAsBalanced(balancedCluster); for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) { returnRegions(entry.getValue()); returnServer(entry.getKey()); } } }
/** * Test the load balancing algorithm. * * Invariant is that all servers should be hosting either floor(average) or * ceiling(average) * * @throws Exception */ @Test public void testBalanceCluster() throws Exception { for (int[] mockCluster : clusterStateMocks) { Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster); List<ServerAndLoad> list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List<RegionPlan> plans = loadBalancer.balanceCluster(servers); List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers); LOG.info("Mock Balance : " + printMock(balancedCluster)); assertClusterAsBalanced(balancedCluster); List<RegionPlan> secondPlans = loadBalancer.balanceCluster(servers); assertNull(secondPlans); for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) { returnRegions(entry.getValue()); returnServer(entry.getKey()); } } }
/** * This assumes the RegionPlan HSI instances are the same ones in the map, so * actually no need to even pass in the map, but I think it's clearer. * * @param list * @param plans * @return */ protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list, List<RegionPlan> plans, Map<ServerName, List<HRegionInfo>> servers) { List<ServerAndLoad> result = new ArrayList<ServerAndLoad>(list.size()); Map<ServerName, ServerAndLoad> map = new HashMap<ServerName, ServerAndLoad>(list.size()); for (ServerAndLoad sl : list) { map.put(sl.getServerName(), sl); } if (plans != null) { for (RegionPlan plan : plans) { ServerName source = plan.getSource(); updateLoad(map, source, -1); ServerName destination = plan.getDestination(); updateLoad(map, destination, +1); servers.get(source).remove(plan.getRegionInfo()); servers.get(destination).add(plan.getRegionInfo()); } } result.clear(); result.addAll(map.values()); return result; }
/** * Test the load balancing algorithm. * * Invariant is that all servers should be hosting either floor(average) or * ceiling(average) * * @throws Exception */ @Test public void testBalanceCluster() throws Exception { for (int[] mockCluster : clusterStateMocks) { Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster); List<ServerAndLoad> list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List<RegionPlan> plans = loadBalancer.balanceCluster(servers); List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers); LOG.info("Mock Balance : " + printMock(balancedCluster)); assertClusterAsBalanced(balancedCluster); for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) { returnRegions(entry.getValue()); returnServer(entry.getKey()); } } }
/** * This assumes the RegionPlan HSI instances are the same ones in the map, so * actually no need to even pass in the map, but I think it's clearer. * * @param list * @param plans * @return */ protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list, List<RegionPlan> plans, Map<ServerName, List<HRegionInfo>> servers) { List<ServerAndLoad> result = new ArrayList<ServerAndLoad>(list.size()); if (plans == null) return result; Map<ServerName, ServerAndLoad> map = new HashMap<ServerName, ServerAndLoad>(list.size()); for (ServerAndLoad sl : list) { map.put(sl.getServerName(), sl); } for (RegionPlan plan : plans) { ServerName source = plan.getSource(); updateLoad(map, source, -1); ServerName destination = plan.getDestination(); updateLoad(map, destination, +1); servers.get(source).remove(plan.getRegionInfo()); servers.get(destination).add(plan.getRegionInfo()); } result.clear(); result.addAll(map.values()); return result; }
public static List<RegionPlan> makePlan(HBaseAdmin admin, List<RegionPlan> newRegionPlan) throws IOException { // snapshot current region assignment Map<HRegionInfo, ServerName> regionAssignmentMap = createRegionAssignmentMap(admin); // update with new plan for (RegionPlan regionPlan : newRegionPlan) { regionAssignmentMap.put(regionPlan.getRegionInfo(), regionPlan.getDestination()); } Map<ServerName, List<HRegionInfo>> clusterState = initializeRegionMap(admin); for (Map.Entry<HRegionInfo, ServerName> entry : regionAssignmentMap.entrySet()) clusterState.get(entry.getValue()).add(entry.getKey()); StochasticLoadBalancer balancer = new StochasticLoadBalancer(); Configuration conf = admin.getConfiguration(); conf.setFloat("hbase.regions.slop", 0.2f); balancer.setConf(conf); return balancer.balanceCluster(clusterState); }
@SuppressWarnings("SimplifiableIfStatement") private boolean preview(List<RegionPlan> regionPlanList, boolean asynchronous) throws IOException, InterruptedException { final boolean proceed; if (args.isForceProceed()) { proceed = true; } else { balance(args, regionPlanList, Phase.PREVIEW, asynchronous); if (regionPlanList.size() > 0) { System.out.println(regionPlanList.size() + " of " + getRegionAssignmentMap(admin, tableNameSet).size() + " region(s) will be moved."); warnBalanceAgain(regionPlanList); proceed = Util.askProceed(); } else { System.out.println("There is no region to move."); proceed = false; } } return proceed; }
/** * Create all of the RegionPlan's needed to move from the initial cluster state to the desired * state. * * @param cluster The state of the cluster * @return List of RegionPlan's that represent the moves needed to get to desired final state. */ private List<RegionPlan> createRegionPlans(Cluster cluster) { List<RegionPlan> plans = new LinkedList<>(); for (int regionIndex = 0; regionIndex < cluster.regionIndexToServerIndex.length; regionIndex++) { int initialServerIndex = cluster.initialRegionIndexToServerIndex[regionIndex]; int newServerIndex = cluster.regionIndexToServerIndex[regionIndex]; if (initialServerIndex != newServerIndex) { RegionInfo region = cluster.regions[regionIndex]; ServerName initialServer = cluster.servers[initialServerIndex]; ServerName newServer = cluster.servers[newServerIndex]; if (LOG.isTraceEnabled()) { LOG.trace("Moving Region " + region.getEncodedName() + " from server " + initialServer.getHostname() + " to " + newServer.getHostname()); } RegionPlan rp = new RegionPlan(region, initialServer, newServer); plans.add(rp); } } return plans; }
public MoveRegionProcedure createMoveRegionProcedure(final RegionPlan plan) { if (plan.getRegionInfo().getTable().isSystemTable()) { List<ServerName> exclude = getExcludedServersForSystemTable(); if (plan.getDestination() != null && exclude.contains(plan.getDestination())) { try { LOG.info("Can not move " + plan.getRegionInfo() + " to " + plan.getDestination() + " because the server is not with highest version"); plan.setDestination(getBalancer().randomAssignment(plan.getRegionInfo(), this.master.getServerManager().createDestinationServersList(exclude))); } catch (HBaseIOException e) { LOG.warn(e.toString(), e); } } } return new MoveRegionProcedure(getProcedureEnvironment(), plan); }
/** * Test the load balancing algorithm. * <p> * Invariant is that all servers should be hosting either floor(average) or ceiling(average) */ @Test public void testBalanceCluster() throws Exception { conf.setLong(StochasticLoadBalancer.MAX_STEPS_KEY, 2000000L); conf.setLong("hbase.master.balancer.stochastic.maxRunningTime", 90 * 1000); // 90 sec conf.setFloat("hbase.master.balancer.stochastic.maxMovePercent", 1.0f); loadBalancer.setConf(conf); for (int[] mockCluster : clusterStateMocks) { Map<ServerName, List<RegionInfo>> servers = mockClusterServers(mockCluster); List<ServerAndLoad> list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List<RegionPlan> plans = loadBalancer.balanceCluster(servers); List<ServerAndLoad> balancedCluster = reconcile(list, plans, servers); LOG.info("Mock Balance : " + printMock(balancedCluster)); assertClusterAsBalanced(balancedCluster); List<RegionPlan> secondPlans = loadBalancer.balanceCluster(servers); assertNull(secondPlans); for (Map.Entry<ServerName, List<RegionInfo>> entry : servers.entrySet()) { returnRegions(entry.getValue()); returnServer(entry.getKey()); } } }
/** * This assumes the RegionPlan HSI instances are the same ones in the map, so * actually no need to even pass in the map, but I think it's clearer. * * @param list * @param plans * @return */ protected List<ServerAndLoad> reconcile(List<ServerAndLoad> list, List<RegionPlan> plans, Map<ServerName, List<RegionInfo>> servers) { List<ServerAndLoad> result = new ArrayList<>(list.size()); Map<ServerName, ServerAndLoad> map = new HashMap<>(list.size()); for (ServerAndLoad sl : list) { map.put(sl.getServerName(), sl); } if (plans != null) { for (RegionPlan plan : plans) { ServerName source = plan.getSource(); updateLoad(map, source, -1); ServerName destination = plan.getDestination(); updateLoad(map, destination, +1); servers.get(source).remove(plan.getRegionInfo()); servers.get(destination).add(plan.getRegionInfo()); } } result.clear(); result.addAll(map.values()); return result; }
private ArrayListMultimap<String, ServerAndLoad> reconcile( ArrayListMultimap<String, ServerAndLoad> previousLoad, List<RegionPlan> plans) { ArrayListMultimap<String, ServerAndLoad> result = ArrayListMultimap .create(); result.putAll(previousLoad); if (plans != null) { for (RegionPlan plan : plans) { ServerName source = plan.getSource(); updateLoad(result, source, -1); ServerName destination = plan.getDestination(); updateLoad(result, destination, +1); } } return result; }
/** * Unassign all regions, so that they go through the regular region * assignment flow (in assignment manager) and are re-opened. */ @Override protected void populatePool(ExecutorService pool) { LOG.debug("Creating threads for each region server "); for (Map.Entry<ServerName, List<HRegionInfo>> e : rsToRegions .entrySet()) { final List<HRegionInfo> hris = e.getValue(); // add plans for the regions that need to be reopened Map<String, RegionPlan> plans = new HashMap<String, RegionPlan>(); for (HRegionInfo hri : hris) { RegionPlan reOpenPlan = new RegionPlan(hri, null, assignmentManager.getRegionServerOfRegion(hri)); plans.put(hri.getEncodedName(), reOpenPlan); } assignmentManager.addPlans(plans); pool.execute(new Runnable() { public void run() { assignmentManager.unassign(hris); } }); } }
/** * Touch timers for all regions in transition that have the passed * <code>sn</code> in common. * Call this method whenever a server checks in. Doing so helps the case where * a new regionserver has joined the cluster and its been given 1k regions to * open. If this method is tickled every time the region reports in a * successful open then the 1k-th region won't be timed out just because its * sitting behind the open of 999 other regions. This method is NOT used * as part of bulk assign -- there we have a different mechanism for extending * the regions in transition timer (we turn it off temporarily -- because * there is no regionplan involved when bulk assigning. * @param sn */ private void updateTimers(final ServerName sn) { // This loop could be expensive. // First make a copy of current regionPlan rather than hold sync while // looping because holding sync can cause deadlock. Its ok in this loop // if the Map we're going against is a little stale Map<String, RegionPlan> copy = new HashMap<String, RegionPlan>(); synchronized(this.regionPlans) { copy.putAll(this.regionPlans); } for (Map.Entry<String, RegionPlan> e: copy.entrySet()) { if (e.getValue() == null || e.getValue().getDestination() == null) continue; if (!e.getValue().getDestination().equals(sn)) continue; RegionState rs = null; synchronized (this.regionsInTransition) { rs = this.regionsInTransition.get(e.getKey()); } if (rs == null) continue; rs.updateTimestampToNow(); } }
/** * @param regionsInMeta * @return List of regions neither in transition nor assigned. * @throws IOException */ private List<HRegionInfo> regionsToAssignWithServerName( final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException { ServerManager serverManager = ((HMaster) this.server).getServerManager(); List<HRegionInfo> regions = new ArrayList<HRegionInfo>(); RegionStates regionStates = this.assignmentManager.getRegionStates(); for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) { HRegionInfo hri = regionLocation.getFirst(); ServerName sn = regionLocation.getSecond(); if (regionStates.isRegionOffline(hri)) { if (sn != null && serverManager.isServerOnline(sn)) { this.assignmentManager.addPlan(hri.getEncodedName(), new RegionPlan(hri, null, sn)); } regions.add(hri); } else { if (LOG.isDebugEnabled()) { LOG.debug("Skipping assign for the region " + hri + " during enable table " + hri.getTable() + " because its already in tranition or assigned."); } } } return regions; }
/** * Create all of the RegionPlan's needed to move from the initial cluster state to the desired * state. * * @param initialRegionMapping Initial mapping of Region to Server * @param clusterState The desired mapping of ServerName to Regions * @return List of RegionPlan's that represent the moves needed to get to desired final state. */ private List<RegionPlan> createRegionPlans(Map<HRegionInfo, ServerName> initialRegionMapping, Map<ServerName, List<HRegionInfo>> clusterState) { List<RegionPlan> plans = new LinkedList<RegionPlan>(); for (Entry<ServerName, List<HRegionInfo>> entry : clusterState.entrySet()) { ServerName newServer = entry.getKey(); for (HRegionInfo region : entry.getValue()) { ServerName initialServer = initialRegionMapping.get(region); if (!newServer.equals(initialServer)) { LOG.trace("Moving Region " + region.getEncodedName() + " from server " + initialServer.getHostname() + " to " + newServer.getHostname()); RegionPlan rp = new RegionPlan(region, initialServer, newServer); plans.add(rp); } } } return plans; }
/** * @param regionsInMeta * @return List of regions neither in transition nor assigned. * @throws IOException */ private List<HRegionInfo> regionsToAssignWithServerName( final List<Pair<HRegionInfo, ServerName>> regionsInMeta) throws IOException { ServerManager serverManager = ((HMaster) this.server).getServerManager(); List<HRegionInfo> regions = new ArrayList<HRegionInfo>(); RegionStates regionStates = this.assignmentManager.getRegionStates(); for (Pair<HRegionInfo, ServerName> regionLocation : regionsInMeta) { HRegionInfo hri = regionLocation.getFirst(); ServerName sn = regionLocation.getSecond(); if (!regionStates.isRegionInTransition(hri) && !regionStates.isRegionAssigned(hri)) { if (this.retainAssignment && sn != null && serverManager.isServerOnline(sn)) { this.assignmentManager.addPlan(hri.getEncodedName(), new RegionPlan(hri, null, sn)); } regions.add(hri); } else { if (LOG.isDebugEnabled()) { LOG.debug("Skipping assign for the region " + hri + " during enable table " + hri.getTableNameAsString() + " because its already in tranition or assigned."); } } } return regions; }
/** * Test the load balancing algorithm. * * Invariant is that all servers should be hosting either floor(average) or * ceiling(average) * * @throws Exception */ @Test public void testBalanceCluster() throws Exception { for (int[] mockCluster : clusterStateMocks) { Map<ServerName, List<HRegionInfo>> servers = mockClusterServers(mockCluster); List<ServerAndLoad> list = convertToList(servers); LOG.info("Mock Cluster : " + printMock(list) + " " + printStats(list)); List<RegionPlan> plans = loadBalancer.balanceCluster(servers); List<ServerAndLoad> balancedCluster = reconcile(list, plans); LOG.info("Mock Balance : " + printMock(balancedCluster)); assertClusterAsBalanced(balancedCluster); for (Map.Entry<ServerName, List<HRegionInfo>> entry : servers.entrySet()) { returnRegions(entry.getValue()); returnServer(entry.getKey()); } } }