public Stream<ISemiBlock> getSemiBlocks(World world, BlockPos pos) { Stream<ISemiBlock> stream = null; Chunk chunk = world.getChunkFromBlockCoords(pos); Map<BlockPos, List<ISemiBlock>> map = semiBlocks.get(chunk); if (map != null) { List<ISemiBlock> semiblocks = map.get(pos); if(semiblocks != null){ stream = semiblocks.stream().filter(semiBlock -> !semiBlock.isInvalid()); } } //Semiblocks that _just_ have been added, but not the the chunk maps yet. Stream<ISemiBlock> addingStream = addingBlocks.stream() .filter(semiBlock -> semiBlock.getWorld() == world && semiBlock.getPos().equals(pos) && !semiBlock.isInvalid()); if(stream == null){ return addingStream; }else{ return Streams.concat(stream, addingStream); } }
public void compute(String text, CodeArea parent) { Matcher matcher = getHighlightingPattern().matcher(text); while(matcher.find()) { String styleClass = getStyleClass(matcher); // For now lets setStyles for base style. parent.setStyle(matcher.start(), matcher.end(), Collections.singleton(styleClass)); // Then we can grab the style from the document via subView and overlay for(HighlightStyle style : getChildStyles(styleClass, getText(matcher.start(), matcher.end(), parent))){ StyleSpans spans = parent.getStyleSpans(matcher.start(), matcher.end()).subView(style.start, style.end).overlay( StyleSpans.singleton(new StyleSpan<>( Arrays.asList(style.styleClasses), (style.end - style.start) )), (strings, strings2) -> { Collection<String> coll = Streams.concat(strings.stream(), strings2.stream()).collect(Collectors.toList()); logger.debug(coll.toString()); return coll; } ); parent.setStyleSpans(matcher.start(), spans); // Reset styles } } }
protected List<String> listRuntimeDependencies(String collectionName) throws IOException, SolrServerException { ModifiableSolrParams params = new ModifiableSolrParams().set("file",RUNTIME_LIB_FILE_NAME); SolrRequest request = new QueryRequest(params); request.setPath("/admin/file"); request.setResponseParser(new InputStreamResponseParser("json")); NamedList o = client.request(request, collectionName); LineIterator it = IOUtils.lineIterator((InputStream) o.get("stream"), "utf-8"); List<String> returnValues = Streams.stream(it).collect(Collectors.toList()); //if file not exists (a little hacky..) if(returnValues.size() == 1 && returnValues.get(0).startsWith("{\"responseHeader\":{\"status\":404")) { logger.warn("Release does not yet contain rumtimelib configuration file. Runtimelibs have to be installed manually."); return Collections.emptyList(); }; return returnValues; }
@Override public BuckarooConfig deserialize(final JsonElement jsonElement, final Type type, final JsonDeserializationContext context) throws JsonParseException { Preconditions.checkNotNull(jsonElement); Preconditions.checkNotNull(type); Preconditions.checkNotNull(context); final JsonObject jsonObject = jsonElement.getAsJsonObject(); final JsonArray cookBooksElement = jsonObject.getAsJsonArray("cookbooks"); final ImmutableList<RemoteCookbook> cookBooks = ImmutableList.copyOf( Streams.stream(cookBooksElement == null ? ImmutableList.of() : cookBooksElement) .map(x -> (RemoteCookbook) context.deserialize(x, RemoteCookbook.class)) .collect(Collectors.toList())); final Optional<URL> analyticsServer = jsonObject.has("analytics") ? Optional.of(context.deserialize(jsonObject.get("analytics"), URL.class)) : Optional.empty(); return BuckarooConfig.of(cookBooks, analyticsServer); }
public static Process<Event, RecipeIdentifier> selectDependency(final RecipeSource source, final PartialDependency dependency) { if (dependency.organization.isPresent()) { return Process.of( Single.just(RecipeIdentifier.of(dependency.source, dependency.organization.get(), dependency.project)) ); } final ImmutableList<RecipeIdentifier> candidates = Streams.stream(source .findCandidates(dependency)) .limit(5) .collect(toImmutableList()); if (candidates.size() == 0) { return Process.error( PartialDependencyResolutionException.of(candidates, dependency)); } if (candidates.size() > 1) { return Process.error(PartialDependencyResolutionException.of(candidates, dependency)); } return Process.of( Observable.just( Notification.of("Resolved partial dependency " + dependency.encode() + " to " + candidates.get(0).encode())), Single.just(candidates.get(0))); }
@Test public void testEncode() throws IOException { ImageWriter writer = Streams.stream(ImageIO.getImageWritersByFormatName("jpeg")) .filter(TurboJpegImageWriter.class::isInstance) .findFirst().get(); ImageWriteParam param = writer.getDefaultWriteParam(); param.setCompressionMode(ImageWriteParam.MODE_EXPLICIT); param.setCompressionQuality(0.85f); BufferedImage in = ImageIO.read(ClassLoader.getSystemResource("crop_aligned.jpg")); ByteArrayOutputStream os = new ByteArrayOutputStream(); try (ImageOutputStream ios = ImageIO.createImageOutputStream(os)) { writer.setOutput(ios); writer.write(null, new IIOImage(in, null, null), param); } os.flush(); assertThat(os.toByteArray()).isNotEmpty(); }
@Test public void testCanReuseWriter() throws IOException { ImageWriter writer = Streams.stream(ImageIO.getImageWritersByFormatName("jpeg")) .filter(TurboJpegImageWriter.class::isInstance) .findFirst().get(); BufferedImage in = ImageIO.read(ClassLoader.getSystemResource("rgb.jpg")); ByteArrayOutputStream rgb = new ByteArrayOutputStream(); try (ImageOutputStream ios = ImageIO.createImageOutputStream(rgb)) { writer.setOutput(ios); writer.write(null, new IIOImage(in, null, null), null); } rgb.flush(); in = ImageIO.read(ClassLoader.getSystemResource("crop_aligned.jpg")); ByteArrayOutputStream bw = new ByteArrayOutputStream(); try (ImageOutputStream ios = ImageIO.createImageOutputStream(bw)) { writer.setOutput(ios); writer.write(null, new IIOImage(in, null, null), null); } bw.flush(); assertThat(rgb.toByteArray()).isNotEqualTo(bw.toByteArray()); }
/** Try to obtain a {@link ImageReader} for a given identifier **/ private ImageReader getReader(String identifier) throws ResourceNotFoundException, UnsupportedFormatException, IOException { if (imageSecurityService != null && !imageSecurityService.isAccessAllowed(identifier)) { throw new ResourceNotFoundException(); } Resource res = null; try { res = resourceService.get(identifier, ResourcePersistenceType.RESOLVED, MimeType.MIME_IMAGE); } catch (ResourceIOException e) { throw new ResourceNotFoundException(); } ImageInputStream iis = ImageIO.createImageInputStream(resourceService.getInputStream(res)); ImageReader reader = Streams.stream(ImageIO.getImageReaders(iis)) .findFirst() .orElseThrow(() -> new UnsupportedFormatException()); reader.setInput(iis); return reader; }
@Override public void processImage(String identifier, ImageApiSelector selector, OutputStream os) throws InvalidParametersException, UnsupportedOperationException, UnsupportedFormatException, ResourceNotFoundException, IOException { DecodedImage img; try { img = readImage(identifier, selector); } catch (IllegalArgumentException e) { throw new InvalidParametersException(); } BufferedImage outImg = transformImage(img.img, img.targetSize, img.rotation, selector.getRotation().isMirror(), selector.getQuality()); ImageWriter writer = Streams.stream(ImageIO.getImageWriters(new ImageTypeSpecifier(outImg), selector.getFormat().name())) .findFirst() .orElseThrow(() -> new UnsupportedFormatException()); ImageOutputStream ios = ImageIO.createImageOutputStream(os); writer.setOutput(ios); writer.write(null, new IIOImage(outImg, null, null), null); writer.dispose(); ios.flush(); }
/** * Generates ZonedSchema object from given JsonNode. * * @param schemaNode JsonNode which contains all the columns, timezone and granularity * * @return ResultSetSchema object generated from the JsonNode */ private ResultSetSchema getResultSetSchema(JsonNode schemaNode) { DateTimeZone timezone = generateTimezone( schemaNode.get(SCHEMA_TIMEZONE).asText(), DateTimeZone.forID( SYSTEM_CONFIG.getStringProperty(SYSTEM_CONFIG.getPackageVariableName("timezone"), "UTC") ) ); //Recreate ResultSetSchema LinkedHashSet<Column> columns = Stream.concat( Streams.stream(schemaNode.get(SCHEMA_DIM_COLUMNS)) .map(JsonNode::asText) .map(this::resolveDimensionName) .map(DimensionColumn::new), Streams.stream(() -> schemaNode.get(SCHEMA_METRIC_COLUMNS_TYPE).fields()) .map(entry -> new MetricColumnWithValueType(entry.getKey(), entry.getValue().asText())) ).collect(Collectors.toCollection(LinkedHashSet::new)); return new ResultSetSchema(generateGranularity(schemaNode.get(SCHEMA_GRANULARITY).asText(), timezone), columns); }
/** * Creates a string array of status values. * * <p>The spec indicates that OK should be listed as "active". We use the "removed" status to * indicate deleted objects. */ private static ImmutableList<String> makeStatusValueList( ImmutableSet<StatusValue> statusValues, boolean isDeleted) { Stream<RdapStatus> stream = statusValues .stream() .map(status -> statusToRdapStatusMap.getOrDefault(status, RdapStatus.OBSCURED)); if (isDeleted) { stream = Streams.concat( stream.filter(rdapStatus -> !Objects.equals(rdapStatus, RdapStatus.ACTIVE)), Stream.of(RdapStatus.REMOVED)); } return stream .map(RdapStatus::getDisplayName) .collect(toImmutableSortedSet(Ordering.natural())) .asList(); }
/** * Loads all the diff keys, sorted in a transaction-consistent chronological order. * * @param lowerCheckpoint exclusive lower bound on keys in this diff, or null if no lower bound * @param upperCheckpoint inclusive upper bound on keys in this diff */ private ImmutableList<Key<CommitLogManifest>> loadAllDiffKeys( @Nullable final CommitLogCheckpoint lowerCheckpoint, final CommitLogCheckpoint upperCheckpoint) { // Fetch the keys (no data) between these checkpoints, and sort by timestamp. This ordering is // transaction-consistent by virtue of our checkpoint strategy and our customized Ofy; see // CommitLogCheckpointStrategy for the proof. We break ties by sorting on bucket ID to ensure // a deterministic order. return upperCheckpoint .getBucketTimestamps() .keySet() .stream() .flatMap( bucketNum -> Streams.stream(loadDiffKeysFromBucket(lowerCheckpoint, upperCheckpoint, bucketNum))) .sorted( comparingLong(Key<CommitLogManifest>::getId) .thenComparingLong(a -> a.getParent().getId())) .collect(toImmutableList()); }
/** * Returns the names of all active registrars. Finds registrar accounts with clientIds matching * the format used for OT&E accounts (regname-1, regname-2, etc.) and returns just the common * prefixes of those accounts (in this case, regname). */ private ImmutableSet<String> getAllRegistrarNames() { return Streams.stream(Registrar.loadAll()) .map( registrar -> { if (!registrar.isLive()) { return null; } String name = registrar.getClientId(); // Look for names of the form "regname-1", "regname-2", etc. and strip the -# suffix. String replacedName = name.replaceFirst("^(.*)-[1234]$", "$1"); // Check if any replacement happened, and thus whether the name matches the format. // If it matches, provide the shortened name, and otherwise return null. return name.equals(replacedName) ? null : replacedName; }) .filter(Objects::nonNull) .collect(toImmutableSet()); }
@Nullable @Override Registrar getOldRegistrar(final String clientId) { checkArgument(clientId.length() >= 3, "Client identifier (%s) is too short", clientId); checkArgument(clientId.length() <= 16, "Client identifier (%s) is too long", clientId); if (Registrar.Type.REAL.equals(registrarType)) { checkArgument( clientId.equals(normalizeClientId(clientId)), "Client identifier (%s) can only contain lowercase letters, numbers, and hyphens", clientId); } checkState( !Registrar.loadByClientId(clientId).isPresent(), "Registrar %s already exists", clientId); List<Registrar> collisions = Streams.stream(Registrar.loadAll()) .filter(registrar -> normalizeClientId(registrar.getClientId()).equals(clientId)) .collect(toCollection(ArrayList::new)); if (!collisions.isEmpty()) { throw new IllegalArgumentException(String.format( "The registrar client identifier %s normalizes identically to existing registrar %s", clientId, collisions.get(0).getClientId())); } return null; }
@Override public void run() { checkArgument( // safety RegistryEnvironment.get() == RegistryEnvironment.CRASH || RegistryEnvironment.get() == RegistryEnvironment.UNITTEST, "DO NOT RUN ANYWHERE ELSE EXCEPT CRASH OR TESTS."); // Create a in-memory input, assigning each bucket to its own shard for maximum parallelization, // with one extra shard for the CommitLogCheckpointRoot. Input<Key<?>> input = new InMemoryInput<>( Lists.partition( Streams.concat( Stream.of(CommitLogCheckpointRoot.getKey()), CommitLogBucket.getAllBucketKeys().stream()) .collect(toImmutableList()), 1)); response.sendJavaScriptRedirect(createJobPath(mrRunner .setJobName("Delete all commit logs") .setModuleName("tools") .runMapreduce( new KillAllCommitLogsMapper(), new KillAllEntitiesReducer(), ImmutableList.of(input)))); }
/** * Returns the set of fields to return, aliased or not according to --full_field_names, and * with duplicates eliminated but the ordering otherwise preserved. */ private ImmutableSet<String> getFieldsToUse(ImmutableSet<T> objects) { // Get the list of fields from the received parameter. List<String> fieldsToUse; if ((fields == null) || !fields.isPresent()) { fieldsToUse = new ArrayList<>(); } else { fieldsToUse = Splitter.on(',').splitToList(fields.get()); // Check whether any field name is the wildcard; if so, use all fields. if (fieldsToUse.contains("*")) { fieldsToUse = getAllAvailableFields(objects); } } // Handle aliases according to the state of the fullFieldNames parameter. final ImmutableMap<String, String> nameMapping = ((fullFieldNames != null) && fullFieldNames.isPresent() && fullFieldNames.get()) ? getFieldAliases() : getFieldAliases().inverse(); return Streams.concat(getPrimaryKeyFields().stream(), fieldsToUse.stream()) .map(field -> nameMapping.getOrDefault(field, field)) .collect(toImmutableSet()); }
@Test public void testGracePeriodsByType() { ImmutableSet<GracePeriod> addGracePeriods = ImmutableSet.of( GracePeriod.create(GracePeriodStatus.ADD, clock.nowUtc().plusDays(3), "foo", null), GracePeriod.create(GracePeriodStatus.ADD, clock.nowUtc().plusDays(1), "baz", null)); ImmutableSet<GracePeriod> renewGracePeriods = ImmutableSet.of( GracePeriod.create(GracePeriodStatus.RENEW, clock.nowUtc().plusDays(3), "foo", null), GracePeriod.create(GracePeriodStatus.RENEW, clock.nowUtc().plusDays(1), "baz", null)); domain = domain .asBuilder() .setGracePeriods( Streams.concat(addGracePeriods.stream(), renewGracePeriods.stream()) .collect(toImmutableSet())) .build(); assertThat(domain.getGracePeriodsOfType(GracePeriodStatus.ADD)).isEqualTo(addGracePeriods); assertThat(domain.getGracePeriodsOfType(GracePeriodStatus.RENEW)).isEqualTo(renewGracePeriods); assertThat(domain.getGracePeriodsOfType(GracePeriodStatus.TRANSFER)).isEmpty(); }
/** * Confirms that an EppResourceIndex entity exists in Datastore for a given resource. */ protected static <T extends EppResource> void assertEppResourceIndexEntityFor(final T resource) { ImmutableList<EppResourceIndex> indices = Streams.stream( ofy() .load() .type(EppResourceIndex.class) .filter("kind", Key.getKind(resource.getClass()))) .filter( index -> Key.create(resource).equals(index.getKey()) && ofy().load().key(index.getKey()).now().equals(resource)) .collect(toImmutableList()); assertThat(indices).hasSize(1); assertThat(indices.get(0).getBucket()) .isEqualTo(EppResourceIndexBucket.getBucketKey(Key.create(resource))); }
private void assertHistoryEntriesDoNotContainTransferBillingEventsOrGracePeriods( BillingEvent.Cancellation.Builder... expectedCancellationBillingEvents) throws Exception { domain = reloadResourceByForeignKey(); final HistoryEntry historyEntryTransferApproved = getOnlyHistoryEntryOfType(domain, DOMAIN_TRANSFER_APPROVE); // We expect two billing events: a closed autorenew for the losing client and an open autorenew // for the gaining client that begins at the new expiration time. assertBillingEventsForResource( domain, Streams.concat( Arrays.stream(expectedCancellationBillingEvents) .map(builder -> builder.setParent(historyEntryTransferApproved).build()), Stream.of( getLosingClientAutorenewEvent() .asBuilder() .setRecurrenceEndTime(clock.nowUtc()) .build(), getGainingClientAutorenewEvent() .asBuilder() .setEventTime(domain.getRegistrationExpirationTime()) .setParent(historyEntryTransferApproved) .build())) .toArray(BillingEvent[]::new)); // There should be no grace period. assertGracePeriods(domain.getGracePeriods(), ImmutableMap.of()); }
DefaultClientFactory(HttpClientFactory httpClientFactory) { this.httpClientFactory = httpClientFactory; final List<ClientFactory> availableClientFactories = new ArrayList<>(); availableClientFactories.add(httpClientFactory); Streams.stream(ServiceLoader.load(ClientFactoryProvider.class, DefaultClientFactory.class.getClassLoader())) .map(provider -> provider.newFactory(httpClientFactory)) .forEach(availableClientFactories::add); final ImmutableMap.Builder<Scheme, ClientFactory> builder = ImmutableMap.builder(); for (ClientFactory f : availableClientFactories) { f.supportedSchemes().forEach(s -> builder.put(s, f)); } clientFactories = builder.build(); clientFactoriesToClose = ImmutableList.copyOf(availableClientFactories).reverse(); }
/** * Creates a new instance. */ public ServiceInfo(String name, Iterable<MethodInfo> methods, Iterable<HttpHeaders> exampleHttpHeaders, @Nullable String docString) { this.name = requireNonNull(name, "name"); requireNonNull(methods, "methods"); this.methods = ImmutableSortedSet.copyOf(comparing(MethodInfo::name), methods); this.exampleHttpHeaders = Streams.stream(requireNonNull(exampleHttpHeaders, "exampleHttpHeaders")) .map(HttpHeaders::copyOf) .map(HttpHeaders::asImmutable) .collect(toImmutableList()); this.docString = Strings.emptyToNull(docString); }
/** * Generates a new {@link ServiceSpecification} from the specified {@link ServiceInfo}s and * the factory {@link Function} that creates {@link NamedTypeInfo}s for the enum, struct or exception types * referred by the specified {@link ServiceInfo}s. */ public static ServiceSpecification generate( Iterable<ServiceInfo> services, Function<TypeSignature, ? extends NamedTypeInfo> namedTypeInfoFactory) { // Collect all named types referred by the services. final Set<TypeSignature> namedTypes = Streams.stream(services) .flatMap(s -> s.findNamedTypes().stream()) .collect(toImmutableSortedSet(comparing(TypeSignature::name))); final Map<String, EnumInfo> enums = new HashMap<>(); final Map<String, StructInfo> structs = new HashMap<>(); final Map<String, ExceptionInfo> exceptions = new HashMap<>(); generateNamedTypeInfos(namedTypeInfoFactory, enums, structs, exceptions, namedTypes); return new ServiceSpecification(services, enums.values(), structs.values(), exceptions.values()); }
/** * Creates a new instance. */ public ServiceSpecification(Iterable<ServiceInfo> services, Iterable<EnumInfo> enums, Iterable<StructInfo> structs, Iterable<ExceptionInfo> exceptions, Iterable<HttpHeaders> exampleHttpHeaders) { this.services = Streams.stream(requireNonNull(services, "services")) .collect(toImmutableSortedSet(comparing(ServiceInfo::name))); this.enums = collectNamedTypeInfo(enums, "enums"); this.structs = collectNamedTypeInfo(structs, "structs"); this.exceptions = collectNamedTypeInfo(exceptions, "exceptions"); this.exampleHttpHeaders = Streams.stream(requireNonNull(exampleHttpHeaders, "exampleHttpHeaders")) .map(headers -> HttpHeaders.copyOf(headers).asImmutable()) .collect(toImmutableList()); }
private Stream<ChangeNotesResult> scanReviewDb(Repository repo, ReviewDb db) throws IOException { // Scan IDs that might exist in ReviewDb, assuming that each change has at least one patch set // ref. Not all changes might exist: some patch set refs might have been written where the // corresponding ReviewDb write failed. These will be silently filtered out by the batch get // call below, which is intended. Set<Change.Id> ids = scanChangeIds(repo).fromPatchSetRefs(); // A batch size of N may overload get(Iterable), so use something smaller, but still >1. return Streams.stream(Iterators.partition(ids.iterator(), 30)) .flatMap( batch -> { try { return Streams.stream(ReviewDbUtil.unwrapDb(db).changes().get(batch)) .map(this::toResult) .filter(Objects::nonNull); } catch (OrmException e) { // Return this error for each Id in the input batch. return batch.stream().map(id -> ChangeNotesResult.error(id, e)); } }); }
public <R extends RestResource> Iterable<UiAction.Description> from( DynamicMap<RestView<R>> views, R resource) { List<UiAction.Description> descs = Streams.stream(views) .map(e -> describe(e, resource)) .filter(Objects::nonNull) .collect(toList()); List<PermissionBackendCondition> conds = Streams.concat( descs.stream().flatMap(u -> Streams.stream(visibleCondition(u))), descs.stream().flatMap(u -> Streams.stream(enabledCondition(u)))) .collect(toList()); permissionBackend.bulkEvaluateTest(conds); return descs.stream().filter(u -> u.isVisible()).collect(toList()); }
@Override public ImmutableSet<AccountGroup.UUID> load(Account.Id memberId) throws OrmException { GroupIndex groupIndex = groupIndexProvider.get(); if (groupIndex != null && groupIndex.getSchema().hasField(GroupField.MEMBER)) { return groupQueryProvider .get() .byMember(memberId) .stream() .map(InternalGroup::getGroupUUID) .collect(toImmutableSet()); } try (ReviewDb db = schema.open()) { return Groups.getGroupsWithMemberFromReviewDb(db, memberId) .map(groupCache::get) .flatMap(Streams::stream) .map(InternalGroup::getGroupUUID) .collect(toImmutableSet()); } }
@Override public ImmutableList<AccountGroup.UUID> load(AccountGroup.UUID key) throws OrmException { if (groupIndexProvider.get().getSchema().hasField(GroupField.SUBGROUP)) { return groupQueryProvider .get() .bySubgroup(key) .stream() .map(InternalGroup::getGroupUUID) .collect(toImmutableList()); } try (ReviewDb db = schema.open()) { return Groups.getParentGroupsFromReviewDb(db, key) .map(groupCache::get) .flatMap(Streams::stream) .map(InternalGroup::getGroupUUID) .collect(toImmutableList()); } }
@Override public SortedMap<String, PluginInfo> apply(TopLevelResource resource) throws BadRequestException { Stream<Plugin> s = Streams.stream(pluginLoader.getPlugins(all)); if (matchPrefix != null) { checkMatchOptions(matchSubstring == null && matchRegex == null); s = s.filter(p -> p.getName().startsWith(matchPrefix)); } else if (matchSubstring != null) { checkMatchOptions(matchPrefix == null && matchRegex == null); String substring = matchSubstring.toLowerCase(Locale.US); s = s.filter(p -> p.getName().toLowerCase(Locale.US).contains(substring)); } else if (matchRegex != null) { checkMatchOptions(matchPrefix == null && matchSubstring == null); Pattern pattern = Pattern.compile(matchRegex); s = s.filter(p -> pattern.matcher(p.getName()).matches()); } s = s.sorted(comparing(Plugin::getName)); if (start > 0) { s = s.skip(start); } if (limit > 0) { s = s.limit(limit); } return new TreeMap<>(s.collect(Collectors.toMap(p -> p.getName(), p -> toPluginInfo(p)))); }
private String toDoc(V v) throws IOException { XContentBuilder builder = jsonBuilder().startObject(); for (Values<V> values : schema.buildFields(v)) { String name = values.getField().getName(); if (values.getField().isRepeatable()) { builder.field( name, Streams.stream(values.getValues()).filter(e -> shouldAddElement(e)).collect(toList())); } else { Object element = Iterables.getOnlyElement(values.getValues(), ""); if (shouldAddElement(element)) { builder.field(name, element); } } } return builder.endObject().string(); }
/** * Remove expired sessions from the datastore. */ @Override public void processExpires() { log.debug("Processing expired sessions"); Query<Key> query = Query.newKeyQueryBuilder().setKind(sessionKind) .setFilter(PropertyFilter.le(SessionMetadata.EXPIRATION_TIME, clock.millis())) .build(); QueryResults<Key> keys = datastore.run(query); Stream<Key> toDelete = Streams.stream(keys) .parallel() .flatMap(key -> Streams.stream(datastore.run(Query.newKeyQueryBuilder() .setKind(sessionKind) .setFilter(PropertyFilter.hasAncestor(newKey(key.getName()))) .build()))); datastore.delete(toDelete.toArray(Key[]::new)); }
@Override protected ISelectable internalGetAllDescriptions(Resource resource) { Iterable<EObject> allContents = new Iterable<EObject>() { @Override public Iterator<EObject> iterator() { return EcoreUtil.getAllContents(resource, false); } }; Iterable<IEObjectDescription> allDescriptions = Scopes.scopedElementsFor(allContents, this.getQualifiedNameProvider()); Iterable<IEObjectDescription> allAliasedDescriptions = new Iterable<IEObjectDescription>() { @Override public Iterator<IEObjectDescription> iterator() { return Streams.stream(allDescriptions) .map(CooperateImportedNamespaceAwareLocalScopeProvider::createAliasedDescription).iterator(); } }; return new MultimapBasedSelectable(allAliasedDescriptions); }
/** Return whether the given try-tree will catch the given exception type. */ private boolean tryCatchesException(TryTree tryTree, Type exceptionToCatch, VisitorState state) { Types types = state.getTypes(); return tryTree .getCatches() .stream() .anyMatch( (CatchTree catchClause) -> { Type catchesException = getType(catchClause.getParameter().getType()); // Examine all alternative types of a union type. if (catchesException != null && catchesException.isUnion()) { return Streams.stream(((UnionClassType) catchesException).getAlternativeTypes()) .anyMatch(caught -> types.isSuperType(caught, exceptionToCatch)); } // Simple type, just check superclass. return types.isSuperType(catchesException, exceptionToCatch); }); }
/** * Check that the super-type of a {@code @ThreadSafe}-annotated type is instantiated with * threadsafe type arguments where required by its annotation's containerOf element, and that any * type arguments that correspond to containerOf type parameters on the sub-type are also in the * super-type's containerOf spec. * * @param containerTypeParameters the in-scope threadsafe type parameters, declared on some * enclosing class. * @param annotation the type's {@code @ThreadSafe} info * @param type the type to check */ public Violation checkSuperInstantiation( Set<String> containerTypeParameters, AnnotationInfo annotation, Type type) { Violation info = threadSafeInstantiation(containerTypeParameters, annotation, type); if (info.isPresent()) { return info; } return Streams.zip( type.asElement().getTypeParameters().stream(), type.getTypeArguments().stream(), (typaram, argument) -> { if (containerOfSubtyping(containerTypeParameters, annotation, typaram, argument)) { return Violation.of( String.format( "'%s' is not a container of '%s'", annotation.typeName(), typaram)); } return Violation.absent(); }) .filter(Violation::isPresent) .findFirst() .orElse(Violation.absent()); }
/** Checks that any thread-safe type parameters are instantiated with thread-safe types. */ public Violation checkInstantiation( Collection<TypeVariableSymbol> typeParameters, Collection<Type> typeArguments) { return Streams.zip( typeParameters.stream(), typeArguments.stream(), (sym, type) -> { if (!isThreadSafeTypeParameter(sym)) { return Violation.absent(); } Violation info = isThreadSafeType( /* allowContainerTypeParameters= */ true, /* containerTypeParameters= */ ImmutableSet.of(), type); if (!info.isPresent()) { return Violation.absent(); } return info.plus(String.format("instantiation of '%s' is mutable", sym)); }) .filter(Violation::isPresent) .findFirst() .orElse(Violation.absent()); }
private static Multimap<TypeVariableSymbol, TypeInfo> getResolvedGenerics( MethodInvocationTree tree) { Type type = ASTHelpers.getType(tree.getMethodSelect()); List<Type> from = new ArrayList<>(); List<Type> to = new ArrayList<>(); getSubst(type, from, to); Multimap<TypeVariableSymbol, TypeInfo> result = Streams.zip( from.stream(), to.stream(), (f, t) -> new TypeInfo((TypeVariableSymbol) f.asElement(), t, tree)) .collect( toMultimap( k -> k.sym, k -> k, MultimapBuilder.linkedHashKeys().arrayListValues()::build)); return result; }
@Override public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) { ImmutableList<Parameter> formal = Parameter.createListFromVarSymbols(ASTHelpers.getSymbol(tree).getParameters()); Stream<Parameter> actual = Parameter.createListFromExpressionTrees(tree.getArguments()).stream(); Changes changes = Changes.create( formal.stream().map(f -> 1.0).collect(toImmutableList()), formal.stream().map(f -> 0.0).collect(toImmutableList()), Streams.zip(formal.stream(), actual, ParameterPair::create) .collect(toImmutableList())); boolean result = !new NameInCommentHeuristic() .isAcceptableChange(changes, tree, ASTHelpers.getSymbol(tree), state); return buildDescription(tree).setMessage(String.valueOf(result)).build(); }
@Override public Description matchMethodInvocation(MethodInvocationTree tree, VisitorState state) { ImmutableList<Parameter> formal = Parameter.createListFromVarSymbols(ASTHelpers.getSymbol(tree).getParameters()); Stream<Parameter> actual = Parameter.createListFromExpressionTrees(tree.getArguments()).stream(); Changes changes = Changes.create( formal.stream().map(f -> 1.0).collect(toImmutableList()), formal.stream().map(f -> 0.0).collect(toImmutableList()), Streams.zip(formal.stream(), actual, ParameterPair::create) .collect(toImmutableList())); boolean result = !new CreatesDuplicateCallHeuristic() .isAcceptableChange(changes, tree, ASTHelpers.getSymbol(tree), state); return buildDescription(tree).setMessage(String.valueOf(result)).build(); }
private static NestedSet<Artifact> getArchiveInputs( JavaTargetAttributes attributes, @Nullable Function<Artifact, Artifact> derivedJarFunction) { NestedSetBuilder<Artifact> inputs = NestedSetBuilder.stableOrder(); if (derivedJarFunction != null) { inputs.addAll( Streams.stream(attributes.getRuntimeClassPathForArchive()) .map(derivedJarFunction) .collect(toImmutableList())); } else { attributes.addRuntimeClassPathForArchiveToNestedSet(inputs); } // TODO(bazel-team): Remove? Resources not used as input to singlejar action inputs.addAll(attributes.getResources().values()); inputs.addAll(attributes.getClassPathResources()); return inputs.build(); }
private static Iterable<ObjcProvider> getDylibObjcProviders( Iterable<TransitiveInfoCollection> transitiveInfoCollections) { // Dylibs. Iterable<ObjcProvider> frameworkObjcProviders = Streams.stream(getTypedProviders(transitiveInfoCollections, AppleDynamicFrameworkInfo.SKYLARK_CONSTRUCTOR)) .map(frameworkProvider -> frameworkProvider.getDepsObjcProvider()) .collect(ImmutableList.toImmutableList()); // Bundle Loaders. Iterable<ObjcProvider> executableObjcProviders = Streams.stream(getTypedProviders(transitiveInfoCollections, AppleExecutableBinaryInfo.SKYLARK_CONSTRUCTOR)) .map(frameworkProvider -> frameworkProvider.getDepsObjcProvider()) .collect(ImmutableList.toImmutableList()); return Iterables.concat(frameworkObjcProviders, executableObjcProviders, getTypedProviders(transitiveInfoCollections, ObjcProvider.SKYLARK_CONSTRUCTOR)); }
public static ImmutableList<ConstraintValueInfo> validateConstraints( Iterable<ConstraintValueInfo> constraintValues) throws DuplicateConstraintException { // Collect the constraints by the settings. ImmutableListMultimap<ConstraintSettingInfo, ConstraintValueInfo> constraints = Streams.stream(constraintValues) .collect( toImmutableListMultimap(ConstraintValueInfo::constraint, Functions.identity())); // Find settings with duplicate values. ImmutableListMultimap<ConstraintSettingInfo, ConstraintValueInfo> duplicates = constraints .asMap() .entrySet() .stream() .filter(e -> e.getValue().size() > 1) .collect( flatteningToImmutableListMultimap(Map.Entry::getKey, e -> e.getValue().stream())); if (!duplicates.isEmpty()) { throw new DuplicateConstraintException(duplicates); } return ImmutableList.copyOf(constraints.values()); }