/** * When providing 2 unapplied upgrade steps * and one unapplied upgrade with with-only constraint (AddJamAmount should be applied only with AddJamType) * When determining the upgrade path * Then all 3 steps are applied in the correct order. */ @Test public void testConditionalUpgradeStepIsExecuted() { // given List<Class<? extends UpgradeStep>> upgradeSteps = new ArrayList<>(); upgradeSteps.add(AddJamType.class); upgradeSteps.add(AddDiameter.class); upgradeSteps.add(AddJamAmount.class); // to be executed only with AddJamType upgradeSteps.add(AddJamAmountUnit.class); // to be executed only with AddJamAmount UpgradePathFinder pathFinder = makeFinder(upgradeSteps, appliedSteps()); Schema current = schema(sconeTable); Schema target = schema(upgradedSconeTableWithJamAmount); // when List<UpgradeStep> path = pathFinder.determinePath(current, target, Sets.<String>newHashSet()).getUpgradeSteps(); // then assertEquals("Number of upgrades steps", 4, path.size()); assertSame("First", AddDiameter.class, path.get(0).getClass()); assertSame("Second", AddJamType.class, path.get(1).getClass()); assertSame("Third", AddJamAmountUnit.class, path.get(2).getClass()); assertSame("Last", AddJamAmount.class, path.get(3).getClass()); }
@Override public Iterable<Edge> getEdges(final Iterable<String> ids, final EnumSet<FetchHint> fetchHints, final Long endTime, final Authorizations authorizations) { return new LookAheadIterable<String, Edge>() { @Override protected boolean isIncluded(String src, Edge edge) { return edge != null; } @Override protected Edge convert(String id) { return getEdge(id, fetchHints, endTime, authorizations); } @Override protected Iterator<String> createIterator() { return Sets.newHashSet(ids).iterator(); } }; }
public static Set<Component> discoverComponentsFor(Descriptor desc) { Set<Component.Type> knownTypes = Sets.difference(Component.TYPES, Collections.singleton(Component.Type.CUSTOM)); Set<Component> components = Sets.newHashSetWithExpectedSize(knownTypes.size()); for (Component.Type componentType : knownTypes) { if (componentType == Component.Type.DIGEST) { if (desc.digestComponent != null && new File(desc.filenameFor(desc.digestComponent)).exists()) components.add(desc.digestComponent); } else { Component component = new Component(componentType); if (new File(desc.filenameFor(component)).exists()) components.add(component); } } return components; }
private void prepareModel() { when(sourcelist1.getIdentifier()).thenReturn(SOURCELIST_1_ID); when(sourcelist2.getIdentifier()).thenReturn(SOURCELIST_2_ID); when(client1.getDragItemId()).thenReturn(ITEM_1_ID); when(client2.getDragItemId()).thenReturn(ITEM_2_ID); when(client3.getDragItemId()).thenReturn(ITEM_3_ID); when(model.containsClient(CLIENT_1_ID)).thenReturn(true); when(model.containsClient(CLIENT_2_ID)).thenReturn(true); when(model.containsClient(CLIENT_3_ID)).thenReturn(true); when(model.getClientById(CLIENT_1_ID)).thenReturn(client1); when(model.getClientById(CLIENT_2_ID)).thenReturn(client2); when(model.getClientById(CLIENT_3_ID)).thenReturn(client3); when(model.getSourcelistById(SOURCELIST_1_ID)).thenReturn(sourcelist1); when(model.getSourcelistById(SOURCELIST_2_ID)).thenReturn(sourcelist2); when(model.getSourceLists()).thenReturn(Sets.newHashSet(sourcelist1, sourcelist2)); when(model.getSourcelistByClientId(CLIENT_1_ID)).thenReturn(sourcelist1); when(model.getSourcelistByClientId(CLIENT_2_ID)).thenReturn(sourcelist2); when(model.getSourcelistByClientId(CLIENT_3_ID)).thenReturn(sourcelist2); when(model.getClients(sourcelist1)).thenReturn(Lists.newArrayList(client1)); when(model.getClients(sourcelist2)).thenReturn(Lists.newArrayList(client2, client3)); }
protected PropertyInteger(String name, int min, int max) { super(name, Integer.class); if (min < 0) { throw new IllegalArgumentException("Min value of " + name + " must be 0 or greater"); } else if (max <= min) { throw new IllegalArgumentException("Max value of " + name + " must be greater than min (" + min + ")"); } else { Set<Integer> set = Sets.<Integer>newHashSet(); for (int i = min; i <= max; ++i) { set.add(Integer.valueOf(i)); } this.allowedValues = ImmutableSet.copyOf(set); } }
public Record readRecord(RecordSchema schema, JsonNode node) { GenericRecord record = new GenericRecord(schema); Set<String> jsonFields = Sets.newHashSet(node.fieldNames()); for (Field f : schema.getFields()) { try { if (!f.isRemoved()) { JsonNode fieldValue = findValue(node, f, jsonFields); if (fieldValue != null) { Object val = getNodeData(f.getSchema(), fieldValue); record.set(f.getOrd(), val); } } } catch (Exception e) { throw new RuntimeException("Exception reading field " + f.getName(), e); } } if (!jsonFields.isEmpty()) { throw new RuntimeException("Unknown fields " + jsonFields + " in JSON found"); } return record; }
@Test @Rollback public void testFindInstancesByIds() throws Exception { String someAppId = "someAppId"; String someClusterName = "someClusterName"; String someDataCenter = "someDataCenter"; String someIp = "someIp"; String anotherIp = "anotherIp"; Instance someInstance = instanceService.createInstance(assembleInstance(someAppId, someClusterName, someDataCenter, someIp)); Instance anotherInstance = instanceService.createInstance(assembleInstance(someAppId, someClusterName, someDataCenter, anotherIp)); List<Instance> instances = instanceService.findInstancesByIds(Sets.newHashSet(someInstance .getId(), anotherInstance.getId())); Set<String> ips = instances.stream().map(Instance::getIp).collect(Collectors.toSet()); assertEquals(2, instances.size()); assertEquals(Sets.newHashSet(someIp, anotherIp), ips); }
private void createJvmAssemblyLocalComponentMetaData(EnumMap<UsageKind, List<PublishArtifact>> artifacts, JvmAssembly assembly, EnumMap<UsageKind, Iterable<DependencySpec>> dependenciesPerUsage, boolean toAssembly) { configureUsageMetadata(UsageKind.API, Collections.<DependencySpec>emptyList(), dependenciesPerUsage); configureUsageMetadata(UsageKind.RUNTIME, Collections.<DependencySpec>emptyList(), dependenciesPerUsage); if (toAssembly) { // TODO:Cedric This is an approximation: when a component wants to compile against the assembly of // a library (not the jar), then we should give it the *stubbed classes* instead of the raw classes. However: // - there's no such thing as a "stubbed classes assembly" // - for performance reasons only the classes that belong to the API are stubbed, so we would miss the classes that do not belong to the API // So this makes the UsageKind.API misleading (should this be COMPILE?). addArtifact(UsageKind.API, assembly.getClassDirectories(), artifacts, assembly); addArtifact(UsageKind.RUNTIME, Sets.union(assembly.getClassDirectories(), assembly.getResourceDirectories()), artifacts, assembly); } }
@Test public void fictitiousSwitchTest() { Set<String> switchIds = Sets.newHashSet("BD", "BL"); Network network = FictitiousSwitchFactory.create(); List<Boolean> expectedSwitchStates = getSwitchStates(network, switchIds); BranchTripping tripping = new BranchTripping("CJ", "C"); Set<Switch> switchesToOpen = new HashSet<>(); Set<Terminal> terminalsToDisconnect = new HashSet<>(); tripping.traverse(network, null, switchesToOpen, terminalsToDisconnect); assertEquals(switchIds, switchesToOpen.stream().map(Switch::getId).collect(Collectors.toSet())); assertEquals(Collections.emptySet(), terminalsToDisconnect); tripping.modify(network, null); assertTrue(network.getSwitch("BD").isOpen()); assertTrue(network.getSwitch("BL").isOpen()); List<Boolean> switchStates = getSwitchStates(network, switchIds); assertEquals(expectedSwitchStates, switchStates); }
public void register(DrillOperatorTable operatorTable) { SqlOperator op; for (Entry<String, Collection<DrillFuncHolder>> function : methods.asMap().entrySet()) { Set<Integer> argCounts = Sets.newHashSet(); String name = function.getKey().toUpperCase(); for (DrillFuncHolder func : function.getValue()) { if (argCounts.add(func.getParamCount())) { if (func.isAggregating()) { op = new DrillSqlAggOperator(name, func.getParamCount()); } else { boolean isDeterministic; // prevent Drill from folding constant functions with types that cannot be materialized // into literals if (DrillConstExecutor.NON_REDUCIBLE_TYPES.contains(func.getReturnType().getMinorType())) { isDeterministic = false; } else { isDeterministic = func.isDeterministic(); } op = new DrillSqlOperator(name, func.getParamCount(), func.getReturnType(), isDeterministic); } operatorTable.add(function.getKey(), op); } } } }
@Test public final void testGetPorts() { putDevice(DID1, SW1); putDevice(DID2, SW1); List<PortDescription> pds = Arrays.<PortDescription>asList( new DefaultPortDescription(P1, true), new DefaultPortDescription(P2, true) ); deviceStore.updatePorts(PID, DID1, pds); Set<PortNumber> expectedPorts = Sets.newHashSet(P1, P2); List<Port> ports = deviceStore.getPorts(DID1); for (Port port : ports) { assertTrue("Port is enabled", port.isEnabled()); assertTrue("PortNumber is one of expected", expectedPorts.remove(port.number())); } assertTrue("Event for all expectedport appeared", expectedPorts.isEmpty()); assertTrue("DID2 has no ports", deviceStore.getPorts(DID2).isEmpty()); }
private void expandDerivRightwards(Derivation leftChild) { if (parser.verbose(6)) LogInfo.begin_track("Expanding rightward"); Map<String, List<Rule>> rhsCategoriesToRules = parser.leftToRightSiblingMap.get(leftChild.cat); if (rhsCategoriesToRules != null) { for (int i = 1; leftChild.end + i <= numTokens; ++i) { Set<String> intersection = Sets.intersection(rhsCategoriesToRules.keySet(), chart[leftChild.end][leftChild.end + i].keySet()); for (String rhsCategory : intersection) { List<Rule> compatibleRules = rhsCategoriesToRules.get(rhsCategory); List<Derivation> rightChildren = chart[leftChild.end][leftChild.end + i].get(rhsCategory); generateParentDerivations(leftChild, rightChildren, true, compatibleRules); } } // handle terminals if (leftChild.end < numTokens) handleTerminalExpansion(leftChild, false, rhsCategoriesToRules); } if (parser.verbose(6)) LogInfo.end_track(); }
@Override public Set<String> get() { try { try (InputStream inputStream = getClass().getResourceAsStream("SQL_RESERVED_WORDS.txt")) { if (inputStream == null) { throw new RuntimeException("Could not find resource: [SQL_RESERVED_WORDS.txt] near [" + getClass() + "]"); } InputStreamReader streamReader = new InputStreamReader(inputStream, "UTF-8"); HashSet<String> sqlReservedWords = Sets.newHashSet(Splitter.on("\r\n").split(CharStreams.toString(streamReader))); // temporary removal of words we currently have to allow sqlReservedWords.remove("TYPE"); // DB2 sqlReservedWords.remove("OPERATION"); // DB2, SQL Server "future", PostGres sqlReservedWords.remove("METHOD"); // PostGres sqlReservedWords.remove("LANGUAGE"); // DB2, ODBC (?), SQL Server "future", PostGres sqlReservedWords.remove("LOCATION"); // PostGres sqlReservedWords.remove("YEAR"); // DB2, ODBC (?), SQL Server "future", PostGres sqlReservedWords.remove("DAY"); // DB2, ODBC (?), SQL Server "future", PostGres sqlReservedWords.remove("SECURITY"); // DB2, PostGres return ImmutableSet.copyOf(sqlReservedWords); } } catch (IOException e) { throw new RuntimeException("Failed to load [SQL_RESERVED_WORDS.txt]", e); } }
@Test public void testOldConfig() throws EventDeliveryException { config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_URI, null); config.put(DatasetSinkConstants.CONFIG_KITE_REPO_URI, FILE_REPO_URI); config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_NAME, DATASET_NAME); DatasetSink sink = sink(in, config); // run the sink sink.start(); sink.process(); sink.stop(); Assert.assertEquals( Sets.newHashSet(expected), read(Datasets.load(FILE_DATASET_URI))); Assert.assertEquals("Should have committed", 0, remaining(in)); }
@Before public void setUp() { DatastoreContext datastoreContext = DatastoreContext.newBuilder().dataStoreName(dataStoreName) .shardInitializationTimeout(10, TimeUnit.SECONDS).build(); Configuration configuration = new ConfigurationImpl(new EmptyModuleShardConfigProvider()) { @Override public Collection<MemberName> getUniqueMemberNamesForAllShards() { return Sets.newHashSet(MemberName.forName("member-1")); } }; DatastoreContextFactory mockContextFactory = mock(DatastoreContextFactory.class); Mockito.doReturn(datastoreContext).when(mockContextFactory).getBaseDatastoreContext(); Mockito.doReturn(datastoreContext).when(mockContextFactory).getShardDatastoreContext(Mockito.anyString()); dataStore = new DistributedDataStore(getSystem(), new MockClusterWrapper(), configuration, mockContextFactory, null); dataStore.onGlobalContextUpdated(SchemaContextHelper.entityOwners()); }
@PostConstruct public void init() { // init env handler into map String[] beanNameByType = springContext.getBeanNameByType(EnvHandler.class); for (String bean : beanNameByType) { EnvHandler envHandler = (EnvHandler) springContext.getBean(bean); envHandlerMap.put(envHandler.env().name(), envHandler); } // init env list Set<EnvKey> keys = Sets.newHashSet(FlowEnvs.values()); keys.addAll(Sets.newHashSet(GitEnvs.values())); keys.addAll(Sets.newHashSet(GitToggleEnvs.values())); keys.addAll(Sets.newHashSet(JobEnvs.values())); keys.addAll(Sets.newHashSet(AgentEnvs.values())); for (EnvKey key : keys) { envKeyMap.put(key.name(), key); if (key.isEditable()) { editableKeyMap.put(key.name(), key); } else { noneEditableKeyMap.put(key.name(), key); } } }
protected void sendScoreboard(ServerScoreboard scoreboardIn, EntityPlayerMP playerIn) { Set<ScoreObjective> set = Sets.<ScoreObjective>newHashSet(); for (ScorePlayerTeam scoreplayerteam : scoreboardIn.getTeams()) { playerIn.playerNetServerHandler.sendPacket(new S3EPacketTeams(scoreplayerteam, 0)); } for (int i = 0; i < 19; ++i) { ScoreObjective scoreobjective = scoreboardIn.getObjectiveInDisplaySlot(i); if (scoreobjective != null && !set.contains(scoreobjective)) { for (Packet packet : scoreboardIn.func_96550_d(scoreobjective)) { playerIn.playerNetServerHandler.sendPacket(packet); } set.add(scoreobjective); } } }
private List<CharSequence> extractAnnotationsForElement(ElementType elementType, Set<String> additionalAnnotations) { List<CharSequence> allAnnotations = Lists.newArrayListWithCapacity(1); boolean dontHaveJsonPropetyAnnotationAlready = Annotations.getAnnotationLines(element, Collections.singleton(Annotations.JACKSON_PROPERTY), false, elementType, importsResolver, nullability).isEmpty(); if (dontHaveJsonPropetyAnnotationAlready) { allAnnotations.add(jacksonPropertyAnnotation()); } allAnnotations.addAll( Annotations.getAnnotationLines(element, Sets.union(additionalAnnotations, protoclass().styles().style().additionalJsonAnnotationsNames()), protoclass().environment().hasJacksonLib(), elementType, importsResolver, nullability)); return allAnnotations; }
private void corrigeerVoorMutatielevering(final List<Onderzoekbundel> teLeverenOnderzoekbundels, final OnderzoekData data) { final Set<MetaRecord> origineleDelta = Sets.newHashSet(data.berichtgegevens.getDeltaRecords()); final Collection<MetaObject> gewijzigdeBetrokkenheden = bepaalBetrokkenhedenInDelta(data.berichtgegevens); for (Onderzoekbundel onderzoekbundel : teLeverenOnderzoekbundels) { //als de mutatie enkel het onderzoek geraakt heeft, maar niet het eigenlijke gegeven dat aangewezen wordt //dan moet dat gegeven alsnog toegevoegd worden aan de delta. Dit voorkomt dat het onderzoek wel getoond //wordt maar het gegeven niet. final Collection<MetaModel> metaModels = data.gegevensInOnderzoek.get(onderzoekbundel); final Set<MetaRecord> records = geefRecordsVanOnderzoekgegeven(metaModels); records.retainAll(data.berichtgegevens.getGeautoriseerdeRecords()); for (final MetaRecord record : records) { //toon onderzoeken op identificerende gegevens (die niet in de delta zitten) van een betrokkenheid //alleen als *iets* van de betrokkenheid gewijzigd is. if (gegevenInOnderzoekZitInDelta(onderzoekbundel, data, origineleDelta) || origineleDelta.contains(record) || bepaalTonenIdentificerendGegeven(gewijzigdeBetrokkenheden, record)) { voegOnderzoekToeAanDelta(onderzoekbundel, data); //behoud de geautoriseerde records die in delta zitten of in identificerende groep zitten, door ze aan de set deltarecords toe te voegen data.berichtgegevens.addDeltaRecord(record); } } } }
private <T extends IForgeRegistryEntry<T>> FMLControlledNamespacedRegistry<T> createRegistry(ResourceLocation registryName, Class<T> type, ResourceLocation defaultObjectKey, int minId, int maxId, IForgeRegistry.AddCallback<T> addCallback, IForgeRegistry.ClearCallback<T> clearCallback, IForgeRegistry.CreateCallback<T> createCallback, IForgeRegistry.SubstitutionCallback<T> substitutionCallback) { Set<Class<?>> parents = Sets.newHashSet(); findSuperTypes(type, parents); SetView<Class<?>> overlappedTypes = Sets.intersection(parents, registrySuperTypes.keySet()); if (!overlappedTypes.isEmpty()) { Class<?> foundType = overlappedTypes.iterator().next(); FMLLog.severe("Found existing registry of type %1s named %2s, you cannot create a new registry (%3s) with type %4s, as %4s has a parent of that type", foundType, registrySuperTypes.get(foundType), registryName, type); throw new IllegalArgumentException("Duplicate registry parent type found - you can only have one registry for a particular super type"); } FMLControlledNamespacedRegistry<T> fmlControlledNamespacedRegistry = new FMLControlledNamespacedRegistry<T>(defaultObjectKey, minId, maxId, type, registries, addCallback, clearCallback, createCallback, substitutionCallback); registries.put(registryName, fmlControlledNamespacedRegistry); registrySuperTypes.put(type, registryName); return getRegistry(registryName, type); }
/** * Assemble watch keys for the given appId, cluster, namespaces, dataCenter combination * * @return a multimap with namespace as the key and watch keys as the value */ public Multimap<String, String> assembleAllWatchKeys(String appId, String clusterName, Set<String> namespaces, String dataCenter) { Multimap<String, String> watchedKeysMap = assembleWatchKeys(appId, clusterName, namespaces, dataCenter); //Every app has an 'application' namespace if (!(namespaces.size() == 1 && namespaces.contains(ConfigConsts.NAMESPACE_APPLICATION))) { Set<String> namespacesBelongToAppId = namespacesBelongToAppId(appId, namespaces); Set<String> publicNamespaces = Sets.difference(namespaces, namespacesBelongToAppId); //Listen on more namespaces if it's a public namespace if (!publicNamespaces.isEmpty()) { watchedKeysMap .putAll(findPublicConfigWatchKeys(appId, clusterName, publicNamespaces, dataCenter)); } } return watchedKeysMap; }
MediaTypeClassifierImpl(Iterable<? extends MediaType> mts) { Table<String, String, Set<MediaType>> typeTable = HashBasedTable.<String, String, Set<MediaType>>create(); for (MediaType mt : mts) { String type = mt.type(); String subtype = mt.subtype(); Set<MediaType> typeSet = typeTable.get(type, subtype); if (typeSet == null) { typeSet = Sets.newLinkedHashSet(); typeTable.put(type, subtype, typeSet); } typeSet.add(mt); } ImmutableTable.Builder<String, String, ImmutableSet<MediaType>> b = ImmutableTable.builder(); for (Table.Cell<String, String, Set<MediaType>> cell : typeTable.cellSet()) { b.put(cell.getRowKey(), cell.getColumnKey(), ImmutableSet.copyOf(cell.getValue())); } this.types = b.build(); }
public static void main(String[] args) { List<Integer> xlist = Lists.newArrayList(); for (int i = 0; i < 100000; i++) { xlist.add(i); } Spliterator<Integer> spliterator = Iterables.concat(xlist).spliterator(); Set<String> xset = Sets.newHashSet(); Stream<Double> stream = StreamSupport.stream(spliterator, true).map( num -> { double r = num / 2.0; xset.add(Thread.currentThread().getName()); return r; }); Iterator<Double> iterator = stream.iterator(); while (iterator.hasNext()) { System.out.println(iterator.next()); } System.out.println(xset); }
/** * Setup Interface expectation for all Testcases. **/ private void setUpInterfaceService() { Set<InterfaceIpAddress> interfaceIpAddresses1 = Sets.newHashSet(); interfaceIpAddresses1 .add(new InterfaceIpAddress(IpAddress.valueOf("192.168.10.101"), IpPrefix.valueOf("192.168.10.0/24"))); Interface sw1Eth1 = new Interface(SW1_ETH1.deviceId().toString(), SW1_ETH1, interfaceIpAddresses1, MacAddress.valueOf("00:00:00:00:00:01"), VlanId.NONE); interfaces.add(sw1Eth1); Set<InterfaceIpAddress> interfaceIpAddresses2 = Sets.newHashSet(); interfaceIpAddresses2 .add(new InterfaceIpAddress(IpAddress.valueOf("192.168.20.101"), IpPrefix.valueOf("192.168.20.0/24"))); Interface sw1Eth2 = new Interface(SW1_ETH1.deviceId().toString(), SW1_ETH2, interfaceIpAddresses2, MacAddress.valueOf("00:00:00:00:00:02"), VlanId.NONE); interfaces.add(sw1Eth2); Set<InterfaceIpAddress> interfaceIpAddresses3 = Sets.newHashSet(); interfaceIpAddresses3 .add(new InterfaceIpAddress(IpAddress.valueOf("192.168.30.101"), IpPrefix.valueOf("192.168.30.0/24"))); Interface sw1Eth3 = new Interface(SW1_ETH1.deviceId().toString(), SW1_ETH3, interfaceIpAddresses3, MacAddress.valueOf("00:00:00:00:00:03"), VlanId.NONE); interfaces.add(sw1Eth3); }
@Override public void executeBatch(FlowRuleBatchOperation batch) { ImmutableList.Builder<FlowRule> toAdd = ImmutableList.builder(); ImmutableList.Builder<FlowRule> toRemove = ImmutableList.builder(); for (FlowRuleBatchEntry fbe : batch.getOperations()) { if (fbe.operator() == ADD || fbe.operator() == MODIFY) { toAdd.add(fbe.target()); } else if (fbe.operator() == REMOVE) { toRemove.add(fbe.target()); } } ImmutableList<FlowRule> rulesToAdd = toAdd.build(); ImmutableList<FlowRule> rulesToRemove = toRemove.build(); Collection<FlowRule> added = applyFlowRules(batch.deviceId(), rulesToAdd); Collection<FlowRule> removed = removeFlowRules(batch.deviceId(), rulesToRemove); Set<FlowRule> failedRules = Sets.union(Sets.difference(copyOf(rulesToAdd), copyOf(added)), Sets.difference(copyOf(rulesToRemove), copyOf(removed))); CompletedBatchOperation status = new CompletedBatchOperation(failedRules.isEmpty(), failedRules, batch.deviceId()); providerService.batchOperationCompleted(batch.id(), status); }
/** * Checks that a Hasher returns the same HashCode when given the same input, and also * that the collision rate looks sane. */ static void assertInvariants(HashFunction hashFunction) { int objects = 100; Set<HashCode> hashcodes = Sets.newHashSetWithExpectedSize(objects); Random random = new Random(314159); for (int i = 0; i < objects; i++) { int value = random.nextInt(); HashCode hashcode1 = hashFunction.hashInt(value); HashCode hashcode2 = hashFunction.hashInt(value); Assert.assertEquals(hashcode1, hashcode2); // idempotent Assert.assertEquals(hashFunction.bits(), hashcode1.bits()); Assert.assertEquals(hashFunction.bits(), hashcode1.asBytes().length * 8); hashcodes.add(hashcode1); } Assert.assertTrue(hashcodes.size() > objects * 0.95); // quite relaxed test assertHashBytesThrowsCorrectExceptions(hashFunction); assertIndependentHashers(hashFunction); assertShortcutsAreEquivalent(hashFunction, 512); }
/** * Sets.cartesianProduct doesn't allow sets that contain null, but we want null to mean * "don't call the associated CacheBuilder method" - that is, get the default CacheBuilder * behavior. This method wraps the elements in the input sets (which may contain null) as * Optionals, calls Sets.cartesianProduct with those, then transforms the result to unwrap * the Optionals. */ private Iterable<List<Object>> buildCartesianProduct(Set<?>... sets) { List<Set<Optional<?>>> optionalSets = Lists.newArrayListWithExpectedSize(sets.length); for (Set<?> set : sets) { Set<Optional<?>> optionalSet = Sets.newLinkedHashSet(Iterables.transform(set, NULLABLE_TO_OPTIONAL)); optionalSets.add(optionalSet); } Set<List<Optional<?>>> cartesianProduct = Sets.cartesianProduct(optionalSets); return Iterables.transform(cartesianProduct, new Function<List<Optional<?>>, List<Object>>() { @Override public List<Object> apply(List<Optional<?>> objs) { return Lists.transform(objs, OPTIONAL_TO_NULLABLE); } }); }
protected void sendScoreboard(ServerScoreboard scoreboardIn, EntityPlayerMP playerIn) { Set<ScoreObjective> set = Sets.<ScoreObjective>newHashSet(); for (ScorePlayerTeam scoreplayerteam : scoreboardIn.getTeams()) { playerIn.connection.sendPacket(new SPacketTeams(scoreplayerteam, 0)); } for (int i = 0; i < 19; ++i) { ScoreObjective scoreobjective = scoreboardIn.getObjectiveInDisplaySlot(i); if (scoreobjective != null && !set.contains(scoreobjective)) { for (Packet<?> packet : scoreboardIn.getCreatePackets(scoreobjective)) { playerIn.connection.sendPacket(packet); } set.add(scoreobjective); } } }
@Test public void testDatasetUriOverridesOldConfig() throws EventDeliveryException { // CONFIG_KITE_DATASET_URI is still set, otherwise this will cause an error config.put(DatasetSinkConstants.CONFIG_KITE_REPO_URI, "bad uri"); config.put(DatasetSinkConstants.CONFIG_KITE_DATASET_NAME, ""); DatasetSink sink = sink(in, config); // run the sink sink.start(); sink.process(); sink.stop(); Assert.assertEquals( Sets.newHashSet(expected), read(Datasets.load(FILE_DATASET_URI))); Assert.assertEquals("Should have committed", 0, remaining(in)); }
@Override protected void execute() { RouterService service = get(RouterService.class); try { List<String> routes = new ArrayList<String>(); Router router = new DefaultRouter( RouterId.valueOf(id), routerName, adminStateUp, status == null ? Status.ACTIVE : Status.valueOf(status), distributed, null, VirtualPortId.portId(gatewayPortId), TenantId.tenantId(tenantId), routes); Set<Router> routerSet = Sets.newHashSet(router); service.createRouters(routerSet); } catch (Exception e) { print(null, e.getMessage()); } }
@Test public void testSerializedWithIncompatibleSchemasWithSavePolicy() throws EventDeliveryException { if (Datasets.exists(ERROR_DATASET_URI)) { Datasets.delete(ERROR_DATASET_URI); } config.put(DatasetSinkConstants.CONFIG_FAILURE_POLICY, DatasetSinkConstants.SAVE_FAILURE_POLICY); config.put(DatasetSinkConstants.CONFIG_KITE_ERROR_DATASET_URI, ERROR_DATASET_URI); final DatasetSink sink = sink(in, config); GenericRecordBuilder builder = new GenericRecordBuilder( INCOMPATIBLE_SCHEMA); GenericData.Record rec = builder.set("username", "koala").build(); // We pass in a valid schema in the header, but an incompatible schema // was used to serialize the record Event badEvent = event(rec, INCOMPATIBLE_SCHEMA, SCHEMA_FILE, true); putToChannel(in, badEvent); // run the sink sink.start(); sink.process(); sink.stop(); Assert.assertEquals("Good records should have been written", Sets.newHashSet(expected), read(Datasets.load(FILE_DATASET_URI))); Assert.assertEquals("Should not have rolled back", 0, remaining(in)); Assert.assertEquals("Should have saved the bad event", Sets.newHashSet(AvroFlumeEvent.newBuilder() .setBody(ByteBuffer.wrap(badEvent.getBody())) .setHeaders(toUtf8Map(badEvent.getHeaders())) .build()), read(Datasets.load(ERROR_DATASET_URI, AvroFlumeEvent.class))); }
/** * Returns a collection of all bindings of the given base type * * @param baseClass the base type of objects required * @return a set of objects returned from this injector */ public static Set<Binding<?>> getBindingsOf(Injector injector, Class<?> baseClass) { Set<Binding<?>> answer = Sets.newHashSet(); Set<Entry<Key<?>, Binding<?>>> entries = injector.getBindings().entrySet(); for (Entry<Key<?>, Binding<?>> entry : entries) { Key<?> key = entry.getKey(); Class<?> keyType = getKeyType(key); if (keyType != null && baseClass.isAssignableFrom(keyType)) { answer.add(entry.getValue()); } } return answer; }
@CollectionSize.Require(SEVERAL) public void testEquals() { resetContainer( Helpers.mapEntry(k0(), v0()), Helpers.mapEntry(k1(), v0()), Helpers.mapEntry(k0(), v3())); Map<K, Collection<V>> expected = Maps.newHashMap(); expected.put(k0(), Sets.newHashSet(v0(), v3())); expected.put(k1(), Sets.newHashSet(v0())); new EqualsTester().addEqualityGroup(expected, multimap().asMap()).testEquals(); }
@Test public void test() { Set<TermId> inputIds = Sets.newHashSet(id1); Set<TermId> outputIds = ImmutableSortedSet.copyOf(TermIds.augmentWithAncestors(ontology, inputIds, true)); assertEquals( "[ImmutableTermId [prefix=ImmutableTermPrefix [value=HP], id=0000001], ImmutableTermId [prefix=ImmutableTermPrefix [value=HP], id=0000002], ImmutableTermId [prefix=ImmutableTermPrefix [value=HP], id=0000003], ImmutableTermId [prefix=ImmutableTermPrefix [value=HP], id=0000004], ImmutableTermId [prefix=ImmutableTermPrefix [value=HP], id=0000005]]", outputIds.toString()); }
/** * Test that when two schemas differ in the number of tables, it doesn't matter if they are included on the * list of excluded tables. */ @Test public void testDifferingSchemasWithExcludedTablesMatch() { Schema schema1 = schema(appleTable, pearTable, simpleTable); Schema schema2 = schema(appleTable, pearTable); Set<String> exclusionRegex = Sets.newHashSet("MYTABLE"); assertTrue("Schemas", schemaHomology.schemasMatch(schema1, schema2, exclusionRegex)); }
public Collection<AttributeModifier> func_111122_c() { Set<AttributeModifier> set = Sets.<AttributeModifier>newHashSet(); for (int i = 0; i < 3; ++i) { set.addAll(this.getModifiersByOperation(i)); } return set; }
private static VectorContainer buildDoubleGlobalDictionary(List<Dictionary> dictionaries, VectorContainer existingDict, ColumnDescriptor columnDescriptor, BufferAllocator bufferAllocator) { final Field field = new Field(SchemaPath.getCompoundPath(columnDescriptor.getPath()).getAsUnescapedPath(), true, new ArrowType.FloatingPoint(FloatingPointPrecision.DOUBLE), null); final VectorContainer input = new VectorContainer(bufferAllocator); final NullableFloat8Vector doubleVector = input.addOrGet(field); doubleVector.allocateNew(); SortedSet<Double> values = Sets.newTreeSet(); for (Dictionary dictionary : dictionaries) { for (int i = 0; i <= dictionary.getMaxId(); ++i) { values.add(dictionary.decodeToDouble(i)); } } if (existingDict != null) { final NullableFloat8Vector existingDictValues = existingDict.getValueAccessorById(NullableFloat8Vector.class, 0).getValueVector(); for (int i = 0; i < existingDict.getRecordCount(); ++i) { values.add(existingDictValues.getAccessor().get(i)); } } final Iterator<Double> iter = values.iterator(); int recordCount = 0; while (iter.hasNext()) { doubleVector.getMutator().setSafe(recordCount++, iter.next()); } doubleVector.getMutator().setValueCount(recordCount); input.setRecordCount(recordCount); input.buildSchema(BatchSchema.SelectionVectorMode.NONE); return input; }
private Set<ResourceLocation> getVariantsTextureLocations() { Set<ResourceLocation> set = Sets.<ResourceLocation>newHashSet(); List<ModelResourceLocation> list = Lists.newArrayList(this.variants.keySet()); Collections.sort(list, new Comparator<ModelResourceLocation>() { public int compare(ModelResourceLocation p_compare_1_, ModelResourceLocation p_compare_2_) { return p_compare_1_.toString().compareTo(p_compare_2_.toString()); } }); for (ModelResourceLocation modelresourcelocation : list) { ModelBlockDefinition.Variants modelblockdefinition$variants = (ModelBlockDefinition.Variants)this.variants.get(modelresourcelocation); for (ModelBlockDefinition.Variant modelblockdefinition$variant : modelblockdefinition$variants.getVariants()) { ModelBlock modelblock = (ModelBlock)this.models.get(modelblockdefinition$variant.getModelLocation()); if (modelblock == null) { LOGGER.warn("Missing model for: " + modelresourcelocation); } else { set.addAll(this.getTextureLocations(modelblock)); } } } set.addAll(LOCATIONS_BUILTIN_TEXTURES); return set; }