protected HTableDescriptor selectTable(ConcurrentHashMap<TableName, HTableDescriptor> tableMap) { // randomly select table from tableMap if (tableMap.isEmpty()){ return null; } // synchronization to prevent removal from multiple threads synchronized (tableMap){ ArrayList<TableName> tableList = new ArrayList<TableName>(tableMap.keySet()); TableName randomKey = tableList.get(RandomUtils.nextInt(tableList.size())); HTableDescriptor randomHtd = tableMap.get(randomKey); // remove from tableMap tableMap.remove(randomKey); return randomHtd; } }
@Override public WebElement randomSelect(Element ele) { Select select = createSelect(ele); if(select != null) { List<WebElement> options = select.getOptions(); if(CollectionUtils.isNotEmpty(options)) { int count = options.size(); int index = RandomUtils.nextInt(count); index = (index == 0 ? 1 : index); //通常第一个选项都是无效的选项 select.selectByIndex(index); return options.get(index); } } return null; }
/** * Build worker node entity by IP and PORT */ private WorkerNodeEntity buildWorkerNode() { WorkerNodeEntity workerNodeEntity = new WorkerNodeEntity(); if (DockerUtils.isDocker()) { workerNodeEntity.setType(WorkerNodeType.CONTAINER.value()); workerNodeEntity.setHostName(DockerUtils.getDockerHost()); workerNodeEntity.setPort(DockerUtils.getDockerPort()); } else { workerNodeEntity.setType(WorkerNodeType.ACTUAL.value()); workerNodeEntity.setHostName(NetUtils.getLocalAddress()); workerNodeEntity.setPort(System.currentTimeMillis() + "-" + RandomUtils.nextInt(100000)); } return workerNodeEntity; }
public static void main(String[] args) { byte[] data=new byte[1024*1024]; for(int i=0;i<data.length;i++) { data[i]=(byte) RandomUtils.nextInt(255); } Test t=new Test(); Scanner sc=new Scanner(System.in); sc.nextLine(); // t.testtMap(data); // sc.nextLine(); // t.testMap(data); // sc.nextLine(); t.testNMap(data); sc.nextLine(); }
public static void main(String[] args) { byte[] data=new byte[1024*1024*1]; for(int i=0;i<data.length;i++) { data[i]=(byte) RandomUtils.nextInt(255); } Scanner sc=new Scanner(System.in); // sc.nextLine(); // new Test().testMap(data); sc.nextLine(); Test t=new Test(); t.testNMap(data); sc.nextLine(); }
@Override public Node next() throws InterruptedException { List<Node> nodes = getAliveNodes(); if (nodes == null || nodes.size() == 0) { return null; } Long nid = ArbitrateConfigUtils.getCurrentNid(); Node current = new Node(); current.setId(nid); // 判断一下是否优先返回local boolean existLocal = nodes.remove(current); if (existLocal && nodes.size() == 0) {//如果只有它自己 return current; } else if (existLocal && RandomUtils.nextInt(100) < localPercent) {//计算一下百分比 return current; } else { int index = RandomUtils.nextInt(nodes.size()); return nodes.get(index); } }
public Node next() throws InterruptedException { List<Node> nodes = getAliveNodes(); if (nodes == null || nodes.size() == 0) { return null; } Long nid = ArbitrateConfigUtils.getCurrentNid(); Node current = new Node(); current.setId(nid); // 判断一下是否优先返回local boolean existLocal = nodes.remove(current); if (existLocal && nodes.size() == 0) {//如果只有它自己 return current; } else if (existLocal && RandomUtils.nextInt(100) < localPercent) {//计算一下百分比 return current; } else { int number = round.incrementAndGet(); if (number > MAX_ROUND) { number = round.getAndSet(0); } int index = (int) (number % nodes.size()); return nodes.get(index); } }
@Test public void test_simple() { int thread = 10; int count = 10; WeightController controller = new WeightController(thread); CountDownLatch latch = new CountDownLatch(thread); WeightWorkerTest[] workers = new WeightWorkerTest[thread]; for (int i = 0; i < thread; i++) { int[] weights = new int[count]; for (int j = 0; j < count; j++) { weights[j] = RandomUtils.nextInt(count); } workers[i] = new WeightWorkerTest(i, weights, controller, latch); } for (int i = 0; i < thread; i++) { workers[i].start(); } try { latch.await(); } catch (InterruptedException e) { want.fail(); } }
private Intent createIntent(Key key, long mac, NodeId node, Multimap<NodeId, Device> devices) { // choose a random device for which this node is master List<Device> deviceList = devices.get(node).stream().collect(Collectors.toList()); Device device = deviceList.get(RandomUtils.nextInt(deviceList.size())); //FIXME we currently ignore the path length and always use the same device TrafficSelector selector = DefaultTrafficSelector.builder() .matchEthDst(MacAddress.valueOf(mac)).build(); TrafficTreatment treatment = DefaultTrafficTreatment.emptyTreatment(); ConnectPoint ingress = new ConnectPoint(device.id(), PortNumber.portNumber(1)); ConnectPoint egress = new ConnectPoint(device.id(), PortNumber.portNumber(2)); return PointToPointIntent.builder() .appId(appId) .key(key) .selector(selector) .treatment(treatment) .ingressPoint(ingress) .egressPoint(egress) .build(); }
@Override protected void chore() { final StringBuffer whyFlush = new StringBuffer(); for (Region r : this.server.onlineRegions.values()) { if (r == null) continue; if (((HRegion) r).shouldFlush(whyFlush)) { FlushRequester requester = server.getFlushRequester(); if (requester != null) { long randomDelay = RandomUtils.nextInt(RANGE_OF_DELAY) + MIN_DELAY_TIME; LOG.info(getName() + " requesting flush of " + r.getRegionInfo().getRegionNameAsString() + " because " + whyFlush.toString() + " after random delay " + randomDelay + "ms"); //Throttle the flushes by putting a delay. If we don't throttle, and there //is a balanced write-load on the regions in a table, we might end up //overwhelming the filesystem with too many flushes at once. requester.requestDelayedFlush(r, randomDelay, false); } } } }
@Override public void perform() throws Exception { HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); Admin admin = util.getHBaseAdmin(); boolean major = RandomUtils.nextInt(100) < majorRatio; LOG.info("Performing action: Compact table " + tableName + ", major=" + major); try { if (major) { admin.majorCompact(tableName); } else { admin.compact(tableName); } } catch (Exception ex) { LOG.warn("Compaction failed, might be caused by other chaos: " + ex.getMessage()); } if (sleepTime > 0) { Thread.sleep(sleepTime); } }
@Override public void run() { // Add some jitter. int jitter = RandomUtils.nextInt((int) periodMs); LOG.info("Sleeping for " + jitter + " to add jitter"); Threads.sleep(jitter); while (!isStopped()) { long start = System.currentTimeMillis(); runOneIteration(); if (isStopped()) return; long sleepTime = periodMs - (System.currentTimeMillis() - start); if (sleepTime > 0) { LOG.info("Sleeping for: " + sleepTime); Threads.sleep(sleepTime); } } }
/** Selects a random item from the given items with weights*/ public static <T> T selectWeightedRandomItem(List<Pair<T, Integer>> items) { int totalWeight = 0; for (Pair<T, Integer> pair : items) { totalWeight += pair.getSecond(); } int cutoff = RandomUtils.nextInt(totalWeight); int cummulative = 0; T item = null; //warn: O(n) for (int i=0; i<items.size(); i++) { int curWeight = items.get(i).getSecond(); if ( cutoff < cummulative + curWeight) { item = items.get(i).getFirst(); break; } cummulative += curWeight; } return item; }
@Override public void map(LongWritable key, SamRecordWritable value,Context context) throws IOException, InterruptedException { SamRecordDatum datum = new SamRecordDatum(); String rgID = SamRecordUtils.getReadGroup(value.get()); GaeaSamRecord record = new GaeaSamRecord(mFileHeader, value.get()); sampleName = mFileHeader.getReadGroup(rgID).getSample(); if(datum.parseSam(record.getSAMString())) { long winNum = -1; winNum = datum.getPosition() / BamQualityControl.WINDOW_SIZE; formatKeyValue(datum, rgID, winNum, true); context.write(outK, outV); if (winNum != (datum.getEnd() / BamQualityControl.WINDOW_SIZE)) { winNum++; formatKeyValue(datum, rgID, winNum, false); context.write(outK, outV); } } else { if(unmappedReadsNum > 10000) { randomkey = RandomUtils.nextInt(); unmappedReadsNum = 0; } context.write(new Text(formatKey("-1:-1", randomkey)), new Text("1")); unmappedReadsNum++; } }
@Override public Object nextData() { if (closed) throw new IllegalStateException("file closed.."); int position = RandomUtils.nextInt(size - 1); ByteBuffer buffer = localBuffer.get(); goNextNewLineHead(buffer, position); String[] obj = null; String line = readLine(buffer); if (needSplit) { if (lineSplit == null) { obj = StringUtils.split(line); } else { obj = StringUtils.split(line, lineSplit); } return obj; } else { return line; } }
public Event chooseRandomEvent(HashMap<String, String> event) { Event e = null; if (event.size() > 0) { int index = RandomUtils.nextInt(event.size()); String[] keys = event.keySet().toArray(new String[event.keySet().size()]); String randKey = keys[index]; if (event.size() > 0) { FactoryEvent factory = new FactoryEvent(); e = factory.getEvent(randKey); if (e != null) { e.setServer(_server); e.setConfiguration(event.get(randKey)); _server.getPluginManager().registerEvents(e, _plugin); } } } return e; }
private static JSONArray getArray(Swagger swagger, RandomGenerator gen, ArrayProperty property) { JSONArray res = new JSONArray(); int max = 1 + RandomUtils.nextInt(10); if (property.getItems() instanceof RefProperty) { for (int i = 0; i <= max; i++) { res.put(getRef(swagger, gen, (RefProperty) property.getItems())); } } else { for (int i = 0; i <= max; i++) { res.put(fillProperty(swagger, gen, property.getItems())); } } return res; }
@Test public void test() { System.out.println(NodeEnum.SERVICE_NODE); NodeEnum nodeName = NodeEnum.getEnum("group_node"); // nodeName = NodeEnum.valueOf("service_nodes"); switch (nodeName) { case SERVICE_NODE: System.out.println(NodeEnum.SERVICE_NODE); break; case GROUP_NODE: System.out.println(NodeEnum.GROUP_NODE); break; default: break; } String namespace = "namespace"; String pathname = NodeEnum.GROUP_NODE.toString(); String nodePath = CocoUtils.buildPath(namespace, pathname); System.out.println(nodePath); for (int i = 0; i < 100; i++) { System.out.println(RandomUtils.nextInt(10)); } String teString = new String("".getBytes(), Charsets.UTF_8); System.out.println(teString); }
/** * Can be used with xml configuration file or properties file * * @param datakey */ public void fillFromConfig(String datakey) { List<Object[]> set = DataProviderUtil.getDataSetAsMap(datakey); if (set.isEmpty()) { return; } int index = 0; if (set.size() > 1) { if (ApplicationProperties.BEAN_POPULATE_RANDOM.getBoolenVal(false)) { // get random index from 0 to size-1. index = RandomUtils.nextInt(set.size()); } else { // get next index, if index exceeds size then start with 0. int cindex = getBundle().getInt(RetryAnalyzer.RETRY_INVOCATION_COUNT, 0); index = cindex % set.size(); } } fillData(set.get(index)[0]); }
/** * checks the Bucket Distribution for the Boolean values * * @throws IndexParseFieldException */ public static RunStats delegateGetBucketForBoolean(int buckets) throws Exception { Long startTime = System.currentTimeMillis(); PrimaryIndexBucketGeneratorImpl ibg = new PrimaryIndexBucketGeneratorImpl(FieldDataType.BOOLEAN, buckets); PrimaryIndexBucketLocatorImpl locator = new PrimaryIndexBucketLocatorImpl(null, null, null, null, ibg.generateBuckets(), null, null, null, null); logger.debug("Test to getBucket for the bucket distribution on Boolean values"); HashMap<Long, List<Object>> hm = new HashMap<>(); SummaryStatistics stats = new SummaryStatistics(); for (int i = 0; i < 20000; i++) { Boolean randomBoolean = RandomUtils.nextBoolean(); Long bucketId = locator.getBucket(randomBoolean, FieldDataType.BOOLEAN); TestUtils.calculate(hm, stats, bucketId, randomBoolean); } Long runTime = System.currentTimeMillis() - startTime; RunStats runStats = new RunStats("BucketForBoolean", FieldDataType.BOOLEAN, runTime, stats, hm); logger.debug(runStats.toString()); return runStats; }
/** * Tests for {@link TaskDao#findNextScheduledTask()}. */ @Test public void testFindNextScheduledTask() { taskDao.create(Task.Factory.newInstance(UUID.randomUUID().toString(), true, TaskStatus.FAILED, new Date(System.currentTimeMillis() - 100000), "1")); taskDao.create(Task.Factory.newInstance(UUID.randomUUID().toString(), true, TaskStatus.RUNNING, new Date(System.currentTimeMillis() - 100000), "1")); taskDao.create(Task.Factory.newInstance(UUID.randomUUID().toString(), false, TaskStatus.PENDING, new Date(System.currentTimeMillis() - 100000), "1")); Assert.assertEquals(taskDao.findNextScheduledTask(), null); int e = 1000000; for (int i = e * (10 + RandomUtils.nextInt(32)); i > 0; i = i - e) { Long taskId = taskDao.create( Task.Factory.newInstance(UUID.randomUUID().toString(), true, TaskStatus.PENDING, new Date(i), "test")).getId(); Assert.assertEquals(taskDao.findNextScheduledTask().getId(), taskId); } }
/** * Tests, that it is possible to find all TaskExecutions for a given instance name. */ @Test public void testFindTaskExecutionsByInstanceName() { String instanceName; Task task; TaskExecution taskExecution; for (int e = 1; e <= 5 + RandomUtils.nextInt(20); e++) { instanceName = UUID.randomUUID().toString(); Assert.assertEquals(taskExecutionDao.findTaskExecutions(instanceName).size(), 0); for (int i = 1; i <= 10 + RandomUtils.nextInt(20); i++) { task = Task.Factory.newInstance(UUID.randomUUID().toString(), true, TaskStatus.PENDING, new Date(), "com."); taskExecution = TaskExecution.Factory.newInstance(instanceName, task); taskDao.create(task); taskExecutionDao.create(taskExecution); Assert.assertEquals(taskExecutionDao.findTaskExecutions(instanceName).size(), i); } } }
/** * Test for {@link BlogManagement#resetGlobalPermissions()} when a client manager calls the * method. * * @throws BlogAccessException * @throws BlogNotFoundException */ @Test public void testResetGlobalPermissionsForClientManager() throws BlogNotFoundException, BlogAccessException { User user = TestUtils.createRandomUser(true); List<Long> blogIds = new ArrayList<Long>(); for (int i = 10 + RandomUtils.nextInt(10); i > 0; i--) { blogIds.add(TestUtils.createRandomBlog(i % 2 == 0, i % 2 == 1, user).getId()); } AuthenticationTestUtils.setSecurityContext(user); blogManagement.resetGlobalPermissions(); for (Long blogId : blogIds) { Blog blog = blogManagement.getBlogById(blogId, false); Assert.assertFalse(blog.isAllCanRead()); Assert.assertFalse(blog.isAllCanWrite()); } }
/** * Tests for {@link TaskManagement#getNextScheduledTask()}. * * @throws Exception * in case the test failed */ @Test public void testGetNextScheduledTask() throws Exception { TaskTO task = taskManagement.getNextScheduledTask(); Assert.assertNull(task); String uniqueTaskNamePrefix = UUID.randomUUID().toString() + "_getNext_"; String uniqueTaskName = uniqueTaskNamePrefix + "single"; taskManagement.addTask(UUID.randomUUID().toString(), true, 0L, new Date(5000000000L), new HashMap<String, String>(), TestTaskHandler.class); Assert.assertNotNull(taskManagement.getNextScheduledTask()); Assert.assertNull(taskManagement.getNextScheduledTask(new Date(5000000000L))); for (int i = 10 + RandomUtils.nextInt(32); i > 0; i--) { uniqueTaskName = uniqueTaskNamePrefix + i; taskManagement.addTask(uniqueTaskName, true, 0L, new Date(10000000 * i), new HashMap<String, String>(), TestTaskHandler.class); task = taskManagement.getNextScheduledTask(); Assert.assertNotNull(task); Assert.assertEquals(task.getUniqueName(), uniqueTaskName); } }
@BeforeClass public static void populate() throws ArangoDBException, IOException { init(DB_NAME); arango.db(DB_NAME).createCollection(COLL_NAME); for (Integer i = 0; i < 50; i++) { final BaseDocument log = new BaseDocument(); log.setKey(i.toString()); log.addAttribute("date", new Date()); log.addAttribute("status", STATUS[RandomUtils.nextInt(STATUS.length)]); log.addAttribute("content_length", RandomUtils.nextInt(2000)); final Map<String, Object> req = new HashMap<>(); req.put("method", METHODS[RandomUtils.nextInt(METHODS.length)]); req.put("url", "/zeppelin/" + UUID.randomUUID().toString()); req.put("headers", Arrays.asList("Accept: *.*", "Host: apache.org")); log.addAttribute("request", req); arango.db(DB_NAME).collection(COLL_NAME).insertDocument(log); } }
public String getInfoTextSubject(String alias) { alias = StringUtils.trimToEmpty(alias); final InfoTextQuery query = new InfoTextQuery(); query.setOnlyActive(true); query.setAlias(alias); final List<InfoText> result = infoTextDao.search(query); final int size = result.size(); if (size == 0) { if (StringUtils.isEmpty(alias)) { return messageResolver.message(INFO_TEXT_EMPTY_PROPERTY); } else { return messageResolver.message(INFO_TEXT_NOT_MATCH_PROPERTY, alias); } } else if (size == 1) { return result.get(0).getSubject(); } else { // size > 1 final int index = RandomUtils.nextInt(result.size()); return result.get(index).getSubject(); } }
public InfoText loadByAliasForWebServices(String alias) { alias = StringUtils.trimToEmpty(alias); final InfoTextQuery query = new InfoTextQuery(); query.setOnlyActive(true); query.setAlias(alias); final List<InfoText> result = infoTextDao.search(query); final int size = result.size(); if (size == 0) { throw new EntityNotFoundException(InfoText.class); } else if (size == 1) { return result.get(0); } else { // size > 1 final int index = RandomUtils.nextInt(result.size()); return result.get(index); } }
private BigInteger buildCardNumber(final String cardFormatNumber) { BigInteger generatedNumber; boolean exists = false; do { final StringBuilder sb = new StringBuilder(); for (int i = 0; i < cardFormatNumber.length(); i++) { final char c = cardFormatNumber.charAt(i); if (Character.isDigit(c)) { sb.append(c); } else if (c == NUMERIC_CONSTANT) { final int next = i == 0 ? RandomUtils.nextInt(9) + 1 : RandomUtils.nextInt(10); // never generates zero for the first digit sb.append(next); } } generatedNumber = new BigInteger(sb.toString()); exists = cardDao.existsNumber(generatedNumber); } while (exists); return generatedNumber; }
@BeforeClass public void startServer() throws Exception { int port = 49152 + RandomUtils.nextInt(65535 - 49152); String serverUrl = "http://localhost:" + port + "/jax"; SelectChannelConnector connector = new SelectChannelConnector(); connector.setPort(port); _jettyServer = new Server(); _jettyServer.setConnectors(new Connector[]{connector}); ContextHandlerCollection contexts = new ContextHandlerCollection(); HandlerCollection handlers = new HandlerCollection(); handlers.addHandler(contexts); WebAppContext ogWebAppContext = new WebAppContext("RemoteViewRunnerTest", "/"); org.springframework.core.io.Resource resource = new ClassPathResource("web-engine"); ogWebAppContext.setBaseResource(Resource.newResource(resource.getFile())); DataViewRunnerResource viewRunnerResource = new DataViewRunnerResource(new TestViewRunner()); ComponentRepository repo = new ComponentRepository(ComponentLogger.Console.VERBOSE); repo.getRestComponents().publishResource(viewRunnerResource); repo.getRestComponents().publishHelper(new FudgeObjectBinaryConsumer()); repo.getRestComponents().publishHelper(new FudgeObjectBinaryProducer()); ogWebAppContext.setEventListeners(new EventListener[]{new ComponentRepositoryServletContextListener(repo)}); handlers.addHandler(ogWebAppContext); _jettyServer.setHandler(handlers); _jettyServer.start(); _remoteViewRunner = new RemoteViewRunner(URI.create(serverUrl)); }
@Override protected void chore() { for (HRegion r : this.server.onlineRegions.values()) { if (r == null) continue; if (r.shouldFlush()) { FlushRequester requester = server.getFlushRequester(); if (requester != null) { long randomDelay = RandomUtils.nextInt(RANGE_OF_DELAY) + MIN_DELAY_TIME; LOG.info(getName() + " requesting flush for region " + r.getRegionNameAsString() + " after a delay of " + randomDelay); //Throttle the flushes by putting a delay. If we don't throttle, and there //is a balanced write-load on the regions in a table, we might end up //overwhelming the filesystem with too many flushes at once. requester.requestDelayedFlush(r, randomDelay); } } } }
@Test public void lessThanWriteRestraintTest() throws InterruptedException { if (ASTATICISM_TEST) { return; } int executCount = 10; int writeCount = RandomUtils.nextInt(executCount); MockServer.setConfigInfo(TAtomConstants.getAppDataId(APPNAME, DBKEY_0), " maxPoolSize=100\r\nuserName=tddl\r\nminPoolSize=1\r\nwriteRestrictTimes=" + executCount + "\r\n"); TimeUnit.SECONDS.sleep(SLEEP_TIME); String sql = "update normaltbl_0001 set gmt_create=? where pk=?"; for (int i = 0; i < writeCount; i++) { try { int rs = tddlJT.update(sql, new Object[] { nextDay, RANDOM_ID }); Assert.assertEquals(1, rs); executCount--; } catch (DataAccessException ex) { } } Assert.assertTrue(executCount >= 0); }
@Test public void lessThanReadRestraintByDynamicTest() throws InterruptedException { if (ASTATICISM_TEST) { return; } int executCount = 10; int readCount = RandomUtils.nextInt(executCount); MockServer.setConfigInfo(TAtomConstants.getAppDataId(APPNAME, DBKEY_0), " maxPoolSize=100\r\nuserName=tddl\r\nminPoolSize=1\r\nreadRestrictTimes=" + executCount + "\r\n"); TimeUnit.SECONDS.sleep(SLEEP_TIME); String sql = "select * from normaltbl_0001 where pk=?"; for (int i = 0; i < readCount; i++) { try { Map rs = tddlJT.queryForMap(sql, new Object[] { RANDOM_ID }); Assert.assertEquals(time, String.valueOf(rs.get("gmt_create"))); executCount--; } catch (DataAccessException ex) { } } Assert.assertTrue(executCount >= 0); }
@Override public void convert(Document doc) { log.debug("Converting artifacts cleanup system property to a cron expression based configuration descriptor."); Element rootElement = doc.getRootElement(); Namespace namespace = rootElement.getNamespace(); // Create cron expression element with random times from 04:00AM to 05:59AM Element cronExpElement = new Element("cronExp", namespace); int minutes = RandomUtils.nextInt(60); // 0-59 int hours = RandomUtils.nextInt(2) + 4; // 4-5 cronExpElement.setText("0 " + minutes + " " + hours + " * * ?"); Element cleanupElement = new Element("cleanupConfig", namespace); cleanupElement.addContent(cronExpElement); rootElement.addContent(cleanupElement); log.debug("Finished converting the artifacts cleanup system property."); }
@Override public void perform() throws Exception { HTable table = null; try { Configuration conf = context.getHBaseIntegrationTestingUtility().getConfiguration(); table = new HTable(conf, tableName); } catch (IOException e) { LOG.debug("Error creating HTable used to get list of region locations.", e); return; } Collection<ServerName> serverNames = table.getRegionLocations().values(); ServerName[] nameArray = serverNames.toArray(new ServerName[serverNames.size()]); restartRs(nameArray[RandomUtils.nextInt(nameArray.length)], sleepTime); }
protected void unbalanceRegions(ClusterStatus clusterStatus, List<ServerName> fromServers, List<ServerName> toServers, double fractionOfRegions) throws Exception { List<byte[]> victimRegions = new LinkedList<byte[]>(); for (ServerName server : fromServers) { ServerLoad serverLoad = clusterStatus.getLoad(server); // Ugh. List<byte[]> regions = new LinkedList<byte[]>(serverLoad.getRegionsLoad().keySet()); int victimRegionCount = (int)Math.ceil(fractionOfRegions * regions.size()); LOG.debug("Removing " + victimRegionCount + " regions from " + server.getServerName()); for (int i = 0; i < victimRegionCount; ++i) { int victimIx = RandomUtils.nextInt(regions.size()); String regionId = HRegionInfo.encodeRegionName(regions.remove(victimIx)); victimRegions.add(Bytes.toBytes(regionId)); } } LOG.info("Moving " + victimRegions.size() + " regions from " + fromServers.size() + " servers to " + toServers.size() + " different servers"); HBaseAdmin admin = this.context.getHBaseIntegrationTestingUtility().getHBaseAdmin(); for (byte[] victimRegion : victimRegions) { int targetIx = RandomUtils.nextInt(toServers.size()); admin.move(victimRegion, Bytes.toBytes(toServers.get(targetIx).getServerName())); } }
@Override public void perform() throws Exception { HBaseTestingUtility util = context.getHBaseIntegrationTestingUtility(); HBaseAdmin admin = util.getHBaseAdmin(); LOG.info("Performing action: Merge random adjacent regions of table " + tableName); List<HRegionInfo> regions = admin.getTableRegions(tableNameBytes); if (regions == null || regions.size() < 2) { LOG.info("Table " + tableName + " doesn't have enough regions to merge"); return; } int i = RandomUtils.nextInt(regions.size() - 1); HRegionInfo a = regions.get(i++); HRegionInfo b = regions.get(i); LOG.debug("Merging " + a.getRegionNameAsString() + " and " + b.getRegionNameAsString()); try { admin.mergeRegions(a.getEncodedNameAsBytes(), b.getEncodedNameAsBytes(), false); } catch (Exception ex) { LOG.warn("Merge failed, might be caused by other chaos: " + ex.getMessage()); } if (sleepTime > 0) { Thread.sleep(sleepTime); } }
@Override public void perform() throws Exception { ClusterStatus status = this.cluster.getClusterStatus(); List<ServerName> victimServers = new LinkedList<ServerName>(status.getServers()); int liveCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_LIVE * victimServers.size()); int deadCount = (int)Math.ceil(FRC_SERVERS_THAT_HOARD_AND_DIE * victimServers.size()); Assert.assertTrue((liveCount + deadCount) < victimServers.size()); List<ServerName> targetServers = new ArrayList<ServerName>(liveCount); for (int i = 0; i < liveCount + deadCount; ++i) { int victimIx = RandomUtils.nextInt(victimServers.size()); targetServers.add(victimServers.remove(victimIx)); } unbalanceRegions(status, victimServers, targetServers, HOARD_FRC_OF_REGIONS); Thread.sleep(WAIT_FOR_UNBALANCE_MS); for (int i = 0; i < liveCount; ++i) { killRs(targetServers.get(i)); } Thread.sleep(WAIT_FOR_KILLS_MS); forceBalancer(); Thread.sleep(WAIT_AFTER_BALANCE_MS); for (int i = 0; i < liveCount; ++i) { startRs(targetServers.get(i)); } }