@Before public void setup() { when(dynamoDBClient.describeTable(TABLE_NAME)).thenReturn(new TableDescription() .withProvisionedThroughput(new ProvisionedThroughputDescription().withWriteCapacityUnits (WRITE_CAPACITY_UNITS))); JobConf jobConf = new JobConf(); jobConf.setNumMapTasks(TOTAL_MAP_TASKS); jobConf.set("mapreduce.task.attempt.id", "attempt_m_1"); jobConf.set(DynamoDBConstants.THROUGHPUT_WRITE_PERCENT, String.valueOf (THROUGHPUT_WRITE_PERCENT)); when(jobClient.getConf()).thenReturn(jobConf); writeIopsCalculator = new WriteIopsCalculator(jobClient, dynamoDBClient, TABLE_NAME) { @Override int calculateMaxMapTasks(int totalMapTasks) { return MAX_CONCURRENT_MAP_TASKS; } }; }
private TableDescription getTableDescription(String hashType, String rangeType) { List<KeySchemaElement> keySchema = new ArrayList<>(); List<AttributeDefinition> definitions = new ArrayList<>(); keySchema.add(new KeySchemaElement().withAttributeName("hashKey").withKeyType(KeyType.HASH)); definitions.add(new AttributeDefinition().withAttributeName("hashKey").withAttributeType (hashType)); if (rangeType != null) { keySchema.add(new KeySchemaElement().withAttributeName("rangeKey").withKeyType(KeyType .RANGE)); definitions.add(new AttributeDefinition().withAttributeName("rangeKey").withAttributeType (rangeType)); } TableDescription description = new TableDescription().withKeySchema(keySchema) .withAttributeDefinitions(definitions).withProvisionedThroughput(new ProvisionedThroughputDescription().withReadCapacityUnits(1000L) .withWriteCapacityUnits(1000L)); return description; }
@Override public DescribeTableResult describeTable(DescribeTableRequest describeTableRequest) { this.describeTableRequest = describeTableRequest; String tableName = describeTableRequest.getTableName(); if ("activeTable".equals(tableName)) { return tableWithStatus(TableStatus.ACTIVE); } else if ("creatibleTable".equals(tableName) && createTableRequest != null) { return tableWithStatus(TableStatus.ACTIVE); } else if ("FULL_DESCRIBE_TABLE".equals(tableName)) { return new DescribeTableResult().withTable(new TableDescription() .withTableName(tableName) .withTableStatus(TableStatus.ACTIVE) .withCreationDateTime(new Date(NOW)) .withItemCount(100L) .withKeySchema(new KeySchemaElement().withAttributeName("name")) .withProvisionedThroughput(new ProvisionedThroughputDescription() .withReadCapacityUnits(20L) .withWriteCapacityUnits(10L)) .withTableSizeBytes(1000L)); } throw new ResourceNotFoundException(tableName + " is missing"); }
/** * returns the approximate number of segments a table should be broken up * when parallel scanning. This function is based off of either read and * write capacity, with which you can scan much faster, or the size of your * table, which should need many more segments in order to scan the table * fast enough in parallel so that one worker does not finish long before * other workers. * * @throws NullReadCapacityException * if the table returns a null readCapacity units. */ public static int getNumberOfSegments(TableDescription description) throws NullReadCapacityException { ProvisionedThroughputDescription provisionedThroughput = description .getProvisionedThroughput(); double tableSizeInGigabytes = Math.ceil(description.getTableSizeBytes() / BootstrapConstants.GIGABYTE); Long readCapacity = provisionedThroughput.getReadCapacityUnits(); Long writeCapacity = provisionedThroughput.getWriteCapacityUnits(); if (writeCapacity == null) { writeCapacity = 1L; } if (readCapacity == null) { throw new NullReadCapacityException( "Cannot scan with a null readCapacity provisioned throughput"); } double throughput = (readCapacity + 3 * writeCapacity) / 3000.0; return (int) (10 * Math.max(Math.ceil(throughput), Math.ceil(tableSizeInGigabytes) / 10)); }
@Before public void setup() { when(dynamoDBClient.describeTable(TABLE_NAME)).thenReturn(new TableDescription() .withProvisionedThroughput(new ProvisionedThroughputDescription().withReadCapacityUnits (READ_CAPACITY_UNITS))); JobConf jobConf = new JobConf(); jobConf.set(DynamoDBConstants.THROUGHPUT_READ_PERCENT, String.valueOf(THROUGHPUT_READ_PERCENT)); when(jobClient.getConf()).thenReturn(jobConf); readIopsCalculator = new ReadIopsCalculator(jobClient, dynamoDBClient, TABLE_NAME, TOTAL_SEGMETNS, LOCAL_SEGMENTS); }
@Test public void testExecute() { command.execute(); assertEquals("DOMAIN1", ddbClient.deleteTableRequest.getTableName()); assertEquals(new ProvisionedThroughputDescription(), exchange.getIn().getHeader( DdbConstants.PROVISIONED_THROUGHPUT)); assertEquals(new Date(AmazonDDBClientMock.NOW), exchange.getIn().getHeader(DdbConstants.CREATION_DATE, Date.class)); assertEquals(Long.valueOf(10L), exchange.getIn().getHeader(DdbConstants.ITEM_COUNT, Long.class)); assertEquals(new ArrayList<KeySchemaElement>(), exchange.getIn().getHeader(DdbConstants.KEY_SCHEMA, ArrayList.class)); assertEquals(Long.valueOf(20L), exchange.getIn().getHeader(DdbConstants.TABLE_SIZE, Long.class)); assertEquals(TableStatus.ACTIVE, exchange.getIn().getHeader(DdbConstants.TABLE_STATUS, TableStatus.class)); }
@Override public DeleteTableResult deleteTable(DeleteTableRequest deleteTableRequest) { this.deleteTableRequest = deleteTableRequest; return new DeleteTableResult().withTableDescription(new TableDescription() .withProvisionedThroughput(new ProvisionedThroughputDescription()) .withTableName(deleteTableRequest.getTableName()) .withCreationDateTime(new Date(NOW)) .withItemCount(10L) .withKeySchema(new ArrayList<KeySchemaElement>()) .withTableSizeBytes(20L) .withTableStatus(TableStatus.ACTIVE)); }
@Before public void setupBeforeTest() { /** Setup key schema */ List<KeySchemaElement> keySchema = new ArrayList<KeySchemaElement>(); KeySchemaElement hashKey = new KeySchemaElement().withAttributeName(hashKeyName).withKeyType(KeyType.HASH); KeySchemaElement rangeKey = new KeySchemaElement().withAttributeName(rangeKeyName).withKeyType(KeyType.RANGE); keySchema.add(hashKey); keySchema.add(rangeKey); Mockito.when(mockTableDescription.getKeySchema()).thenReturn(keySchema); /** Setup attribute definition */ List<AttributeDefinition> attribtueDefinitions = new ArrayList<AttributeDefinition>(); AttributeDefinition hashKeyDefinition = new AttributeDefinition().withAttributeName(hashKeyName).withAttributeType(hashKeyType); AttributeDefinition rangeKeyDefinition = new AttributeDefinition().withAttributeName(rangeKeyName).withAttributeType(rangeKeyType); attribtueDefinitions.add(hashKeyDefinition); attribtueDefinitions.add(rangeKeyDefinition); Mockito.when(mockTableDescription.getAttributeDefinitions()).thenReturn(attribtueDefinitions); /** Setup GSI */ List<GlobalSecondaryIndexDescription> globalSecondaryIndexes = new ArrayList<GlobalSecondaryIndexDescription>(); GlobalSecondaryIndexDescription gsiTest = new GlobalSecondaryIndexDescription().withIndexName(gsiName); globalSecondaryIndexes.add(gsiTest); Mockito.when(mockTableDescription.getGlobalSecondaryIndexes()).thenReturn(globalSecondaryIndexes); /** Setup provisioned throughput */ ProvisionedThroughputDescription provisionedThroughPut = new ProvisionedThroughputDescription().withReadCapacityUnits(readCapacity) .withWriteCapacityUnits(writeCapacity); Mockito.when(mockTableDescription.getProvisionedThroughput()).thenReturn(provisionedThroughPut); }
private double getThroughput() { ProvisionedThroughputDescription provisionedThroughput = dynamoDBClient .describeTable(tableName) .getProvisionedThroughput(); return provisionedThroughput.getWriteCapacityUnits(); }
private double getThroughput() { ProvisionedThroughputDescription provisionedThroughput = dynamoDBClient .describeTable(tableName) .getProvisionedThroughput(); return provisionedThroughput.getReadCapacityUnits(); }
private static IndexDescription toIndex( String indexName, IndexType indexType, List<KeySchemaElement> keys, List<AttributeDefinition> defs, ProvisionedThroughputDescription throughput) { IndexDescription index = IndexDescription.builder() .indexName(indexName) .indexType(indexType) .build(); if ( null != throughput ) { index.setReadCapacity(throughput.getReadCapacityUnits()); index.setWriteCapacity(throughput.getWriteCapacityUnits()); } if ( null == keys || null == defs ) return index; Map<String, AttrType> attrTypes = new HashMap<>(); for ( AttributeDefinition def : defs ) { attrTypes.put(def.getAttributeName(), toAttrType(def.getAttributeType())); } for ( KeySchemaElement key : keys ) { if ( null == key || null == key.getKeyType() ) continue; AttrDescription attr = AttrDescription.builder() .attrName(key.getAttributeName()) .attrType(attrTypes.get(key.getAttributeName())) .build(); switch ( key.getKeyType().toUpperCase() ) { case "HASH": index.setHashKey(attr); break; case "RANGE": index.setRangeKey(attr); break; } } return index; }
public static void main(String[] args) { final String USAGE = "\n" + "Usage:\n" + " DescribeTable <table>\n\n" + "Where:\n" + " table - the table to get information about.\n\n" + "Example:\n" + " DescribeTable HelloTable\n"; if (args.length < 1) { System.out.println(USAGE); System.exit(1); } String table_name = args[0]; System.out.format("Getting description for %s\n\n", table_name); final AmazonDynamoDB ddb = AmazonDynamoDBClientBuilder.defaultClient(); try { TableDescription table_info = ddb.describeTable(table_name).getTable(); if (table_info != null) { System.out.format("Table name : %s\n", table_info.getTableName()); System.out.format("Table ARN : %s\n", table_info.getTableArn()); System.out.format("Status : %s\n", table_info.getTableStatus()); System.out.format("Item count : %d\n", table_info.getItemCount().longValue()); System.out.format("Size (bytes): %d\n", table_info.getTableSizeBytes().longValue()); ProvisionedThroughputDescription throughput_info = table_info.getProvisionedThroughput(); System.out.println("Throughput"); System.out.format(" Read Capacity : %d\n", throughput_info.getReadCapacityUnits().longValue()); System.out.format(" Write Capacity: %d\n", throughput_info.getWriteCapacityUnits().longValue()); List<AttributeDefinition> attributes = table_info.getAttributeDefinitions(); System.out.println("Attributes"); for (AttributeDefinition a : attributes) { System.out.format(" %s (%s)\n", a.getAttributeName(), a.getAttributeType()); } } } catch (AmazonServiceException e) { System.err.println(e.getErrorMessage()); System.exit(1); } System.out.println("\nDone!"); }