private static Map<String, Set<String>> diffMap(Map<String, Byte[]> oldNodeData, Map<String, Byte[]> nodeWithData) { Set<String> oldChildNode = oldNodeData.keySet(); Set<String> newChildNode = nodeWithData.keySet(); SetView<String> view1 = Sets.difference(oldChildNode, newChildNode);// 下线的 SetView<String> view2 = Sets.difference(newChildNode, oldChildNode);// 上线的 Map<String, Set<String>> resultMap = new ConcurrentHashMap<String, Set<String>>(); Set<String> offlineSet = new HashSet<String>(); Set<String> onlineSet = new HashSet<String>(); long before = System.currentTimeMillis(); offlineSet = view1.copyInto(offlineSet); onlineSet = view2.copyInto(onlineSet); long after = System.currentTimeMillis(); //Long[] longList = new Long[]{after-before,new Long(onlineSet.size()),new Long(offlineSet.size())}; //logger.debug("SAF diff set spend time:{},online node MAP size:{} offlien node MAP size:{}",longList); resultMap.put(PathCache.OFFLINESET, offlineSet); resultMap.put(PathCache.ONLINESET, onlineSet); return resultMap; }
private <T extends IForgeRegistryEntry<T>> FMLControlledNamespacedRegistry<T> createRegistry(ResourceLocation registryName, Class<T> type, ResourceLocation defaultObjectKey, int minId, int maxId, IForgeRegistry.AddCallback<T> addCallback, IForgeRegistry.ClearCallback<T> clearCallback, IForgeRegistry.CreateCallback<T> createCallback, IForgeRegistry.SubstitutionCallback<T> substitutionCallback) { Set<Class<?>> parents = Sets.newHashSet(); findSuperTypes(type, parents); SetView<Class<?>> overlappedTypes = Sets.intersection(parents, registrySuperTypes.keySet()); if (!overlappedTypes.isEmpty()) { Class<?> foundType = overlappedTypes.iterator().next(); FMLLog.severe("Found existing registry of type %1s named %2s, you cannot create a new registry (%3s) with type %4s, as %4s has a parent of that type", foundType, registrySuperTypes.get(foundType), registryName, type); throw new IllegalArgumentException("Duplicate registry parent type found - you can only have one registry for a particular super type"); } FMLControlledNamespacedRegistry<T> fmlControlledNamespacedRegistry = new FMLControlledNamespacedRegistry<T>(defaultObjectKey, minId, maxId, type, registries, addCallback, clearCallback, createCallback, substitutionCallback); registries.put(registryName, fmlControlledNamespacedRegistry); registrySuperTypes.put(type, registryName); return getRegistry(registryName, type); }
private synchronized void notifyGroupChange(Iterable<String> memberIds) { ImmutableSet<String> newMemberIds = ImmutableSortedSet.copyOf(memberIds); Set<String> existingMemberIds = servicesByMemberId.asMap().keySet(); // Ignore no-op state changes except for the 1st when we've seen no group yet. if ((serverSet == null) || !newMemberIds.equals(existingMemberIds)) { SetView<String> deletedMemberIds = Sets.difference(existingMemberIds, newMemberIds); // Implicit removal from servicesByMemberId. existingMemberIds.removeAll(ImmutableSet.copyOf(deletedMemberIds)); Iterable<ServiceInstance> serviceInstances = Iterables.filter( Iterables.transform(newMemberIds, MAYBE_FETCH_NODE), Predicates.notNull()); notifyServerSetChange(ImmutableSet.copyOf(serviceInstances)); } }
private void logChange(ImmutableSet<ServiceInstance> newServerSet) { StringBuilder message = new StringBuilder("server set " + group.getPath() + " change: "); if (serverSet.size() != newServerSet.size()) { message.append("from ").append(serverSet.size()) .append(" members to ").append(newServerSet.size()); } Joiner joiner = Joiner.on("\n\t\t"); SetView<ServiceInstance> left = Sets.difference(serverSet, newServerSet); if (!left.isEmpty()) { message.append("\n\tleft:\n\t\t").append(joiner.join(left)); } SetView<ServiceInstance> joined = Sets.difference(newServerSet, serverSet); if (!joined.isEmpty()) { message.append("\n\tjoined:\n\t\t").append(joiner.join(joined)); } LOG.info(message.toString()); }
private SqlCompleter createSqlCompleter(Connection jdbcConnection) { SqlCompleter completer = null; try { Set<String> keywordsCompletions = SqlCompleter.getSqlKeywordsCompletions(jdbcConnection); Set<String> dataModelCompletions = SqlCompleter.getDataModelMetadataCompletions(jdbcConnection); SetView<String> allCompletions = Sets.union(keywordsCompletions, dataModelCompletions); completer = new SqlCompleter(allCompletions, dataModelCompletions); } catch (IOException | SQLException e) { logger.error("Cannot create SQL completer", e); } return completer; }
public void updateDataModelMetaData(Connection connection) { try { Set<String> newModelCompletions = getDataModelMetadataCompletions(connection); logger.debug("New model metadata is:" + Joiner.on(',').join(newModelCompletions)); // Sets.difference(set1, set2) - returned set contains all elements that are contained by set1 // and not contained by set2. set2 may also contain elements not present in set1; these are // simply ignored. SetView<String> removedCompletions = Sets.difference(modelCompletions, newModelCompletions); logger.debug("Removed Model Completions: " + Joiner.on(',').join(removedCompletions)); this.getStrings().removeAll(removedCompletions); SetView<String> newCompletions = Sets.difference(newModelCompletions, modelCompletions); logger.debug("New Completions: " + Joiner.on(',').join(newCompletions)); this.getStrings().addAll(newCompletions); modelCompletions = newModelCompletions; } catch (SQLException e) { logger.error("Failed to update the metadata conmpletions", e); } }
/** * It validates that peer-clusters can't coexist in replication-clusters * * @param clusterName: * given cluster whose peer-clusters can't be present into replication-cluster list * @param clusters: * replication-cluster list */ private void validatePeerClusterConflict(String clusterName, Set<String> replicationClusters) { try { ClusterData clusterData = clustersCache().get(path("clusters", clusterName)).orElseThrow( () -> new RestException(Status.PRECONDITION_FAILED, "Invalid replication cluster " + clusterName)); Set<String> peerClusters = clusterData.getPeerClusterNames(); if (peerClusters != null && !peerClusters.isEmpty()) { SetView<String> conflictPeerClusters = Sets.intersection(peerClusters, replicationClusters); if (!conflictPeerClusters.isEmpty()) { log.warn("[{}] {}'s peer cluster can't be part of replication clusters {}", clientAppId(), clusterName, conflictPeerClusters); throw new RestException(Status.CONFLICT, String.format("%s's peer-clusters %s can't be part of replication-clusters %s", clusterName, conflictPeerClusters, replicationClusters)); } } } catch (RestException re) { throw re; } catch (Exception e) { log.warn("[{}] Failed to get cluster-data for {}", clientAppId(), clusterName, e); } }
@Override protected void doRun(boolean write, File outputFile) throws Exception { boolean dropExisting = getCommandLine().hasOption(DROP_EXISTING_OPTION); if (getCommandLine().hasOption(SCHEMA_NAME_FILTER)) { String[] nameArgs = getCommandLine().getOptionValues(SCHEMA_NAME_FILTER); Set<String> includeNames = ImmutableSet.copyOf(nameArgs); Set<String> allNames = getDbToolContext().getSchemaNames(); SetView<String> filteredNames = Sets.intersection(allNames, includeNames); if (write) { System.out.println("Filtered " + allNames + " to " + filteredNames); } getDbToolContext().setSchemaNames(filteredNames); } if (getDbToolContext().getSchemaNames().isEmpty()) { System.out.println("No schemas specified, or all filtered out"); } else { new DbCreateOperation(getDbToolContext(), write, outputFile, dropExisting).execute(); if (write) { System.out.println("Database objects created successfully"); } } }
public void analyze() { log.info("Starting analyzing for user: " + twitterUser); //Create the CSVFormat object createFileIfNotExisting(filenameFollowerIDsDelta()); createFileIfNotExisting(filenameFollowerIDsCurrent()); Set<String> currentIDsFromFile = readAllIDsFromCurrentFile(); currentIDsFromTwitter = getAllFollwerIDsFromTwitter(); SetView<String> symmetricDifference = Sets.symmetricDifference(currentIDsFromFile, currentIDsFromTwitter); if (symmetricDifference.size() == 0) { log.info("No new followers were found."); } else { processSyncDifferences(currentIDsFromFile, symmetricDifference); deleteAndRecreateFile(filenameFollowerIDsCurrent()); writeCurrentFollowerIDFile(currentIDsFromTwitter); } }
private void syncListenedChannels() { if (m_channels.equals(m_currentListenedChannels)) { return; } final Set<NotificationChannel> channels = Sets.newHashSet(m_channels); final SetView<NotificationChannel> toUnlisten = Sets.difference(m_currentListenedChannels, channels); final SetView<NotificationChannel> toListen = Sets.difference(channels, m_currentListenedChannels); if (!toUnlisten.isEmpty()) { sendUnlistens(toUnlisten); } if (!toListen.isEmpty()) { sendListens(toListen); } if (!toUnlisten.isEmpty() || !toListen.isEmpty()) { m_currentListenedChannels.clear(); m_currentListenedChannels.addAll(channels); } }
public Map<String, Class<?>> defineClasses(Map<String, byte[]> newClasses) { SetView<String> conflicts = Sets.intersection(pendingClasses.keySet(), newClasses.keySet()); Preconditions.checkArgument(conflicts.isEmpty(), "The classes %s have already been defined", conflicts); pendingClasses.putAll(newClasses); try { Map<String, Class<?>> classes = new HashMap<>(); for (String className : newClasses.keySet()) { try { Class<?> clazz = loadClass(className); classes.put(className, clazz); } catch (ClassNotFoundException e) { // this should never happen throw Throwables.propagate(e); } } return classes; } finally { pendingClasses.keySet().removeAll(newClasses.keySet()); } }
/** * @author 胡天翔 */ @Test public void testFilterProjectMembers() { List<User> list1 = new ArrayList<User>(), list2 = new ArrayList<User>(); list1.add(getASampleUser(0)); list1.add(getASampleUser(1)); list1.add(getASampleUser(2)); list2.add(getASampleUser(1)); list2.add(getASampleUser(2)); list2.add(getASampleUser(3)); SetView<User> intersection = Sets.intersection(new HashSet<User>(list1), new HashSet<User>(list2)); UserServiceImpl spyUserServiceImpl = Mockito.spy(testedUserServiceImpl); Mockito.doReturn(list2).when(spyUserServiceImpl).getUserByProjectId(ModuleHelper.projectId); List<User> user = spyUserServiceImpl.filterProjectMembers(list1, ModuleHelper.projectId); Mockito.verify(spyUserServiceImpl).filterProjectMembers(list1, ModuleHelper.projectId); Mockito.verify(spyUserServiceImpl).getUserByProjectId(ModuleHelper.projectId); Mockito.verifyNoMoreInteractions(spyUserServiceImpl); assertEquals(intersection.size(), Sets.intersection(intersection, new HashSet<User>(user)).size()); }
/** Returns an error message if request is not valid. Checks basic request formatting. */ static String validateRequest(RPC.CreateOrganizationRequest request) { if (Strings.isNullOrEmpty(request.name)) { return "name cannot be empty"; } if (Strings.isNullOrEmpty(request.publicKey)) { return "publicKey cannot be empty"; } if (request.adminEncryptedKeys == null || request.adminEncryptedKeys.isEmpty()) { return "adminEncryptedKeys cannot be empty"; } if (request.memberGroupKeys == null || request.memberGroupKeys.isEmpty()) { return "memberGroupKeys cannot be empty"; } // ensure that every admin is also a member SetView<String> adminsNotInMembers = Sets.difference( request.adminEncryptedKeys.keySet(), request.memberGroupKeys.keySet()); if (!adminsNotInMembers.isEmpty()) { return "each admin must be a member"; } return null; }
/** * Two instances of {@link PackageInventory} are identical if they * contain the same set packages. */ @Override public boolean equals(Object obj) { if (!(obj instanceof PackageInventory)){ return false; } PackageInventory ref = (PackageInventory) obj; // comparison of the PIDs is sufficient SetView<PID> symmDiff; // synchronized access to the underlying maps synchronized (packagesByIdMap){ synchronized (ref.packagesByIdMap) { symmDiff = Sets.symmetricDifference(ref.packagesByIdMap.keySet(), this.packagesByIdMap.keySet()); } } return symmDiff.isEmpty(); }
public Builder add(Class<?> clazz, String... containerOf) { ImmutableSet<String> containerTyParams = ImmutableSet.copyOf(containerOf); HashSet<String> actualTyParams = new HashSet<>(); for (TypeVariable<?> x : clazz.getTypeParameters()) { actualTyParams.add(x.getName()); } SetView<String> difference = Sets.difference(containerTyParams, actualTyParams); if (!difference.isEmpty()) { throw new AssertionError( String.format( "For %s, please update the type parameter(s) from %s to %s", clazz, difference, actualTyParams)); } mapBuilder.put( clazz.getName(), AnnotationInfo.create(clazz.getName(), ImmutableList.copyOf(containerOf))); return this; }
@Deprecated private Set<E> getSelectedEdges(Map<E, Double> filteredEdgeValues) { Set<E> potentialEdges = filteredEdgeValues.keySet(); Set<E> actualEdges = Sets.newHashSet(); for (V root : kepInstance.getRootNodes()) { V head = root; while (head != null) { SetView<E> outEdge = Sets.intersection(potentialEdges, Sets.newHashSet(kepInstance.getGraph().getOutEdges(head))); if (outEdge.isEmpty()) { head = null; } else { E inUse = outEdge.iterator().next(); actualEdges.add(inUse); head = kepInstance.getGraph().getDest(inUse); } } } return actualEdges; }
/** * * * <b>Note:</b> Depends on the collection of information through {@link #buildFamilyRelations()}. * Calls {@link #buildFamilyRelations()} before returning the * result if any structures have been added or removed after the last call * to {@link #buildFamilyRelations()}, thus calling this method repeatedly * after adding/removing structures is very inefficient. * * @param parent1Id * @param parent2Id * @return */ public GedcomFamily getFamilyOfParents(String parent1Id, String parent2Id) { if (structuresModified) { buildFamilyRelations(); } if (parent1Id == null && parent2Id == null) { return null; } Set<GedcomFamily> families1 = getFamiliesOfParent(parent1Id); Set<GedcomFamily> families2 = getFamiliesOfParent(parent2Id); //A comment in the guava docs: //"I can use intersection as a Set directly, but copying it can be more //efficient if I use it a lot." SetView<GedcomFamily> view = Sets.intersection(families1, families2); if (view.size() > 1) { throw new GedcomCreatorError("The parents " + parent1Id + " and " + parent2Id + " have been found as parent in more than one (" + view.size() + ") family: " + view); } //Returns the first family, or null if there is none return Iterables.getFirst(view, null); }
public ObserversV2(Environment env, JsonObservers jco, Set<Column> strongColumns, Set<Column> weakColumns) { ObserverProvider obsProvider = ObserverStoreV2.newObserverProvider(jco.getObserverProviderClass()); ObserverProviderContextImpl ctx = new ObserverProviderContextImpl(env); ObserverRegistry or = new ObserverRegistry(strongColumns, weakColumns); obsProvider.provide(or, ctx); this.observers = or.observers; this.aliases = or.aliases; this.observers.forEach((k, v) -> aliases.computeIfAbsent(k, col -> Hex.encNonAscii(col, ":"))); // the following check ensures observers are provided for all previously configured columns SetView<Column> diff = Sets.difference(observers.keySet(), Sets.union(strongColumns, weakColumns)); if (diff.size() > 0) { throw new FluoException("ObserverProvider " + jco.getObserverProviderClass() + " did not provide observers for columns " + diff); } }
@Override protected void appendSuperClassGenericTypeVariables(Appendable appendable, ClassDescriptor inputClass) throws IOException { MetadataClassDescriptor mdInputClass = (MetadataClassDescriptor) inputClass; MetaMetadata mmd = mdInputClass.getDefiningMmd(); Map<String, MmdGenericTypeVar> gtvs = mmd.getGenericTypeVars(); MetaMetadata superMmd = mmd.getSuperMmd(); Map<String, MmdGenericTypeVar> superGtvs = superMmd.getGenericTypeVars(); List<MmdGenericTypeVar> involvedGtvs = new ArrayList<MmdGenericTypeVar>(); if (gtvs != null && superGtvs != null) { SetView<String> gtvNames = Sets.intersection(gtvs.keySet(), superGtvs.keySet()); for (String gtvName : gtvNames) { involvedGtvs.add(gtvs.get(gtvName)); } MetaMetadataRepository repository = mmd.getRepository(); appendGenericTypeVarParameterizations(appendable, involvedGtvs, repository); } }
static void validateRegions(Map<String, Collection<String>> regionsToAdd, Map<String, Collection<String>> supportedRegions) { MapDifference<String, Collection<String>> comparison = Maps.difference(regionsToAdd, supportedRegions); checkArgument(comparison.entriesOnlyOnLeft().isEmpty(), "unsupported regions: %s", comparison .entriesOnlyOnLeft().keySet()); for (Entry<String, Collection<String>> entry : regionsToAdd.entrySet()) { ImmutableSet<String> toAdd = ImmutableSet.copyOf(entry.getValue()); SetView<String> intersection = Sets.intersection(toAdd, ImmutableSet.copyOf( supportedRegions.get(entry.getKey()))); SetView<String> unsupported = Sets.difference(toAdd, intersection); checkArgument(unsupported.isEmpty(), "unsupported territories in %s:", entry.getKey(), unsupported); } }
/** * This will mark the submitted dimensions as Cross_Join and all * other dimensions as Suppressed. * * @param xj - Set of Dimensions * @return List of Dimensions that are cross joined and can be planned * by this disjointPlan. The resolver can use this to pick a plan. */ public List<Dimension> setResolvedCrossJoins(SetView<Dimension> xj) { List<Dimension> plannedXj = Lists.newArrayList(); getPlanDimensions().stream().forEach((d) -> { if(xj.contains(d)){ disjointMap.put(d,DisjointType.CROSS_JOIN); plannedXj.add(d); }else{ disjointMap.put(d,DisjointType.SUPPRESSED); } }); return plannedXj; }
@Lazy protected ImmutableList<Node> asNodeTree() { ImmutableMap<String, String> childParentMap = childParentMap(); SetView<String> roots = Sets.difference(relation().keySet(), childParentMap.keySet()); return roots.stream() .map(s -> nodeOf(s, relation())) .collect(ImmutableList.toImmutableList()); }
public static <T> void explore(T node, DirectedGraph<T> graph, List<T> sortedResult, Set<T> visitedNodes, Set<T> expandedNodes) { // Have we been here before? if (visitedNodes.contains(node)) { // And have completed this node before if (expandedNodes.contains(node)) { // Then we're fine return; } FMLLog.severe("Mod Sorting failed."); FMLLog.severe("Visiting node %s", node); FMLLog.severe("Current sorted list : %s", sortedResult); FMLLog.severe("Visited set for this node : %s", visitedNodes); FMLLog.severe("Explored node set : %s", expandedNodes); SetView<T> cycleList = Sets.difference(visitedNodes, expandedNodes); FMLLog.severe("Likely cycle is in : %s", cycleList); throw new ModSortingException("There was a cycle detected in the input graph, sorting is not possible", node, cycleList); } // Visit this node visitedNodes.add(node); // Recursively explore inbound edges for (T inbound : graph.edgesFrom(node)) { explore(inbound, graph, sortedResult, visitedNodes, expandedNodes); } // Add ourselves now sortedResult.add(node); // And mark ourselves as explored expandedNodes.add(node); }
/** * Returns the status of the sink. * * @return the status of the sink. */ public boolean isSinkHealthy() { SetView<String> unclosedSinks = Sets.difference(rdfData.keySet(), closedSinks); if (unclosedSinks.size() > 0) { LOGGER.error("Some sinks have not been closed: " + unclosedSinks.toString()); healthyness = false; } return healthyness; }
public static <T> void explore(T node, DirectedGraph<T> graph, List<T> sortedResult, Set<T> visitedNodes, Set<T> expandedNodes) { // Have we been here before? if (visitedNodes.contains(node)) { // And have completed this node before if (expandedNodes.contains(node)) { // Then we're fine return; } FMLLog.severe("Mod Sorting failed."); FMLLog.severe("Visting node %s", node); FMLLog.severe("Current sorted list : %s", sortedResult); FMLLog.severe("Visited set for this node : %s", visitedNodes); FMLLog.severe("Explored node set : %s", expandedNodes); SetView<T> cycleList = Sets.difference(visitedNodes, expandedNodes); FMLLog.severe("Likely cycle is in : %s", cycleList); throw new ModSortingException("There was a cycle detected in the input graph, sorting is not possible", node, cycleList); } // Visit this node visitedNodes.add(node); // Recursively explore inbound edges for (T inbound : graph.edgesFrom(node)) { explore(inbound, graph, sortedResult, visitedNodes, expandedNodes); } // Add ourselves now sortedResult.add(node); // And mark ourselves as explored expandedNodes.add(node); }
/** * Look for servers that may match. * * @param modules the web modules to search for * @param exact if true, look for exact module match * @return an existing server */ @VisibleForTesting public Collection<IServer> findExistingServers(IModule[] modules, boolean exact, SubMonitor progress) { if (modules.length == 1) { IServer defaultServer = ServerCore.getDefaultServer(modules[0]); if (defaultServer != null && LocalAppEngineServerDelegate.SERVER_TYPE_ID .equals(defaultServer.getServerType().getId())) { return Collections.singletonList(defaultServer); } } Set<IModule> myModules = ImmutableSet.copyOf(modules); List<IServer> matches = new ArrayList<>(); // Look for servers that contain these modules // Could prioritize servers that have *exactly* these modules, // or that have the smallest overlap for (IServer server : ServerCore.getServers()) { // obsolete or unavailable server definitions have serverType == null if (server.getServerType() == null || !LocalAppEngineServerDelegate.SERVER_TYPE_ID.equals(server.getServerType().getId())) { continue; } Set<IModule> serverModules = ImmutableSet.copyOf(server.getModules()); SetView<IModule> overlap = Sets.intersection(myModules, serverModules); if (overlap.size() == myModules.size() && (!exact || overlap.size() == serverModules.size())) { matches.add(server); } } return matches; }
@Override public boolean equals(Object other) { if (other instanceof ParallelWorkflowStep) { ParallelWorkflowStep otherStep = (ParallelWorkflowStep) other; SetView<SimpleWorkflowStep> intersection = Sets.intersection(this.steps, otherStep.steps); return intersection.size() == this.steps.size(); } return false; }
public static void guava() { HashSet<Integer> setA = Sets.newHashSet(1, 2, 3, 4, 5); HashSet<Integer> setB = Sets.newHashSet(4, 5, 6, 7, 8); /** * 合集 */ SetView<Integer> union = Sets.union(setA, setB); System.out.println("union:" + union); /** * 取第二个参数的补集 */ SetView<Integer> difference = Sets.difference(setA, setB); System.out.println("difference:" + difference); /** * 取第二个参数的补集 */ SetView<Integer> difference1 = Sets.difference(setB, setA); System.out.println("difference1:" + difference1); /** * 交集 */ SetView<Integer> intersection = Sets.intersection(setA, setB); System.out.println("intersection:" +intersection); /** * 交集的补集 */ SetView<Integer> differences = Sets.symmetricDifference(setA, setB); System.out.println("differences:" +differences); }
/** * Check whether the given invocation is done as a lazy initialization, * e.g. {@code if (foo == null) foo = new Foo();}. * <p> * This tries to also handle the scenario where the check is on some * <b>other</b> variable - e.g. * <pre> * if (foo == null) { * foo == init1(); * bar = new Bar(); * } * </pre> * or * <pre> * if (!initialized) { * initialized = true; * bar = new Bar(); * } * </pre> */ private static boolean isLazilyInitialized(Node node) { Node curr = node.getParent(); while (curr != null) { if (curr instanceof MethodDeclaration) { return false; } else if (curr instanceof If) { If ifNode = (If) curr; // See if the if block represents a lazy initialization: // compute all variable names seen in the condition // (e.g. for "if (foo == null || bar != foo)" the result is "foo,bar"), // and then compute all variables assigned to in the if body, // and if there is an overlap, we'll consider the whole if block // guarded (so lazily initialized and an allocation we won't complain // about.) List<String> assignments = new ArrayList<String>(); AssignmentTracker visitor = new AssignmentTracker(assignments); ifNode.astStatement().accept(visitor); if (!assignments.isEmpty()) { List<String> references = new ArrayList<String>(); addReferencedVariables(references, ifNode.astCondition()); if (!references.isEmpty()) { SetView<String> intersection = Sets.intersection( new HashSet<String>(assignments), new HashSet<String>(references)); return !intersection.isEmpty(); } } return false; } curr = curr.getParent(); } return false; }
private void processSyncDifferences(Set<String> currentIDsFromFile, SetView<String> symmetricDifference) { File file = new File(filenameFollowerIDsDelta()); try (CSVPrinter printer = new CSVPrinter(new FileWriter(file,true), csvFormat);){ for (String id : symmetricDifference) { User userDetails = twitterClient.getUserDetails(id); if (currentIDsFromFile.contains(id)) { log.info("UNFOLLOW: Follower " + id + " unfollowed you!"); printer.printRecord("DROP", DateTime.now().getMillis(), id, currentIDsFromTwitter.size(), (userDetails != null ) ? userDetails.getProfileImageURL() : "", (userDetails != null ) ? userDetails.getScreenName() : "", (userDetails != null ) ? userDetails.getName() : "" ); } else { log.info("FOLLOW: Follower " + id + " followed you!"); printer.printRecord("ADD", DateTime.now().getMillis(), id, currentIDsFromTwitter.size(), (userDetails != null ) ? userDetails.getProfileImageURL() : "", (userDetails != null ) ? userDetails.getScreenName() : "", (userDetails != null ) ? userDetails.getName() : "" ); } } } catch (IOException e) { log.error(e.getMessage()); } }
/** * This function enforces the type hierarchy of breakpoints. * * @param addresses The set of addresses for the breakpoints to be added. * @param type The type of the breakpoints to be added. * * @return The Set of breakpoints which has been set. */ private Set<BreakpointAddress> enforceBreakpointHierarchy( final Set<BreakpointAddress> addresses, final BreakpointType type) { final SetView<BreakpointAddress> alreadyRegularBreakpoints = Sets.intersection(addresses, indexedBreakpointStorage.getBreakPointAddresses()); final SetView<BreakpointAddress> alreadySteppingBreakpoints = Sets.intersection(addresses, stepBreakpointStorage.getBreakPointAddresses()); final SetView<BreakpointAddress> alreadyEchoBreakpoints = Sets.intersection(addresses, echoBreakpointStorage.getBreakPointAddresses()); Set<BreakpointAddress> addressesSet = null; switch (type) { case REGULAR: final SetView<BreakpointAddress> notInRegularBreakpoints = Sets.difference(addresses, indexedBreakpointStorage.getBreakPointAddresses()); removeBreakpoints(alreadySteppingBreakpoints, stepBreakpointStorage); removeBreakpoints(alreadyEchoBreakpoints, echoBreakpointStorage); addressesSet = notInRegularBreakpoints; break; case STEP: final SetView<BreakpointAddress> notInSteppingBreakpoints = Sets.difference(addresses, stepBreakpointStorage.getBreakPointAddresses()); removeBreakpoints(alreadyEchoBreakpoints, echoBreakpointStorage); addressesSet = Sets.difference(notInSteppingBreakpoints, alreadyRegularBreakpoints); break; case ECHO: final SetView<BreakpointAddress> notInEchoBreakPoints = Sets.difference(addresses, echoBreakpointStorage.getBreakPointAddresses()); addressesSet = Sets.difference(notInEchoBreakPoints, Sets.union(alreadySteppingBreakpoints, alreadyRegularBreakpoints)); break; default: throw new IllegalStateException("IE00722: Breakpoint of invalid type"); } return addressesSet; }
public static void validateKeys(Map<String, ?> map, String specType, Set<String> allowedKeys) { SetView<String> unrecognizedKeys = Sets.difference(map.keySet(), allowedKeys); if (!unrecognizedKeys.isEmpty()) { throw new RuntimeException("Did not recognize keys in the " + specType + " configuration: " + unrecognizedKeys); } }
@Inject public WorkspaceRuntimes( EventService eventService, Map<String, InternalEnvironmentFactory> envFactories, RuntimeInfrastructure infra, WorkspaceSharedPool sharedPool, WorkspaceDao workspaceDao, @SuppressWarnings("unused") DBInitializer ignored, ProbeScheduler probeScheduler) { this.probeScheduler = probeScheduler; this.runtimes = new ConcurrentHashMap<>(); this.eventService = eventService; this.sharedPool = sharedPool; this.workspaceDao = workspaceDao; this.isStartRefused = new AtomicBoolean(false); this.infrastructure = infra; this.environmentFactories = ImmutableMap.copyOf(envFactories); LOG.info("Configured factories for environments: '{}'", envFactories.keySet()); LOG.info("Registered infrastructure '{}'", infra.getName()); SetView<String> notSupportedByInfra = Sets.difference(envFactories.keySet(), infra.getRecipeTypes()); if (!notSupportedByInfra.isEmpty()) { LOG.warn( "Configured environment(s) are not supported by infrastructure: '{}'", notSupportedByInfra); } }
@Override public List<User> filterProjectMembers(List<User> users, int projectId) { List<User> members = getUserByProjectId(projectId); SetView<User> intersection = Sets.intersection(new HashSet<User>(users), new HashSet<User>(members)); List<User> legalUsers = Lists.newArrayList(); for (User user : intersection) legalUsers.add(user); return legalUsers; }
public PropertyDiff diff(ElPaaSoLogicalTestModelCatalog catalog, String fileName) throws InvalidConfigServiceException { // Load properties from a reference properties file Set<String> releasePropertiesKeys = configServiceUtils.loadKeysFromFile(fileName); Set<String> actualPropertiesKeys = getReferencePropertiesKeys(catalog); // Evict all development properties from comparison releasePropertiesKeys.removeAll(developmentSpecificPropertiesKeys); SetView<String> addedProperties = Sets.difference(actualPropertiesKeys, releasePropertiesKeys); SetView<String> removedProperties = Sets.difference(releasePropertiesKeys, actualPropertiesKeys); // Return a diff properties containing new and removed properties return new PropertyDiff(addedProperties, removedProperties); }
@Test public void reference_and_hudson_keys_are_identical() throws IOException { Set<String> referenceKeys = PropertiesHelper.loadKeys(REFERENCE_PROPERTIES_FILE, this.getClass().getResourceAsStream(REFERENCE_PROPERTIES_FILE)); Set<String> hudsonKeys = PropertiesHelper.loadKeys(HUDSON_PROPERTIES_FILE, new ClassPathResource(HUDSON_PROPERTIES_FILE).getInputStream()); SetView<String> jenkinsKeysNotPresentInHudsonKeys = Sets.difference(referenceKeys, hudsonKeys); SetView<String> hudsonKeysNotPresentInJenkinsKeys = Sets.difference(hudsonKeys, referenceKeys); assertTrue(buildDifferences("jenkins","hudson",jenkinsKeysNotPresentInHudsonKeys, hudsonKeysNotPresentInJenkinsKeys), jenkinsKeysNotPresentInHudsonKeys.isEmpty() && hudsonKeysNotPresentInJenkinsKeys.isEmpty()); }
private String buildDifferences(String file1, String file2, SetView<String> keysFile1NotPresentInFile2, SetView<String> keysFile2NotPresentInFile1) { String message = "they are differences:"; if(!keysFile1NotPresentInFile2.isEmpty()) { message += "\n"+file1+ " keys declared for "+file2+ ": "; message += listValues(keysFile1NotPresentInFile2); } if(!keysFile2NotPresentInFile1.isEmpty()) { message += "\n"+file2+ " keys declared for "+file1+ ": "; message += listValues(keysFile2NotPresentInFile1); } return message; }