protected AggregationStrategy getAggregationStrategy(Exchange exchange) { AggregationStrategy answer = null; // prefer to use per Exchange aggregation strategy over a global strategy if (exchange != null) { Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class); Map<Object, AggregationStrategy> map = CastUtils.cast(property); if (map != null) { answer = map.get(this); } } if (answer == null) { // fallback to global strategy answer = getAggregationStrategy(); } return answer; }
/** * Sets the given {@link org.apache.camel.processor.aggregate.AggregationStrategy} on the {@link Exchange}. * * @param exchange the exchange * @param aggregationStrategy the strategy */ protected void setAggregationStrategyOnExchange(Exchange exchange, AggregationStrategy aggregationStrategy) { Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class); Map<Object, AggregationStrategy> map = CastUtils.cast(property); if (map == null) { map = new ConcurrentHashMap<Object, AggregationStrategy>(); } else { // it is not safe to use the map directly as the exchange doesn't have the deep copy of it's properties // we just create a new copy if we need to change the map map = new ConcurrentHashMap<Object, AggregationStrategy>(map); } // store the strategy using this processor as the key // (so we can store multiple strategies on the same exchange) map.put(this, aggregationStrategy); exchange.setProperty(Exchange.AGGREGATION_STRATEGY, map); }
protected GenericFileEndpoint<T> createEndpoint(String uri, String remaining, Map<String, Object> parameters) throws Exception { // create the correct endpoint based on the protocol final GenericFileEndpoint<T> endpoint; // call to subclasses to build their custom version of a GenericFileEndpoint endpoint = buildFileEndpoint(uri, remaining, parameters); // sort by using file language String sortBy = getAndRemoveParameter(parameters, "sortBy", String.class); if (isNotEmpty(sortBy) && !EndpointHelper.isReferenceParameter(sortBy)) { // we support nested sort groups so they should be chained String[] groups = sortBy.split(";"); Iterator<String> it = CastUtils.cast(ObjectHelper.createIterator(groups)); Comparator<Exchange> comparator = createSortByComparator(it); endpoint.setSortBy(comparator); } setProperties(endpoint.getConfiguration(), parameters); setProperties(endpoint, parameters); afterPropertiesSet(endpoint); return endpoint; }
@Override protected int poll() throws Exception { // must reset for each poll shutdownRunningTask = null; pendingExchanges = 0; try { Messages messages = null; LOG.trace("Receiving messages with request [messagePerPoll {}, timeout {}]...", getMaxMessagesPerPoll(), getEndpoint().getConfiguration().getTimeout()); messages = this.ironQueue.reserve(getMaxMessagesPerPoll(), getEndpoint().getConfiguration().getTimeout(), getEndpoint().getConfiguration().getWait()); LOG.trace("Received {} messages", messages.getSize()); Queue<Exchange> exchanges = createExchanges(messages.getMessages()); int noProcessed = processBatch(CastUtils.cast(exchanges)); // delete all processed messages in one batch; if (getEndpoint().getConfiguration().isBatchDelete()) { LOG.trace("Batch deleting {} messages", messages.getSize()); this.ironQueue.deleteMessages(messages); } return noProcessed; } catch (EmptyQueueException e) { return 0; } }
public Exchange aggregate(Exchange oldExchange, Exchange newExchange) { if (oldExchange == null) { return newExchange; } SyndFeed oldFeed = oldExchange.getIn().getBody(SyndFeed.class); SyndFeed newFeed = newExchange.getIn().getBody(SyndFeed.class); if (oldFeed != null && newFeed != null) { List<SyndEntryImpl> oldEntries = CastUtils.cast(oldFeed.getEntries()); List<SyndEntryImpl> newEntries = CastUtils.cast(newFeed.getEntries()); List<SyndEntryImpl> mergedList = new ArrayList<SyndEntryImpl>(oldEntries.size() + newEntries.size()); mergedList.addAll(oldEntries); mergedList.addAll(newEntries); oldFeed.setEntries(mergedList); } else { log.debug("Could not merge exchanges. One body was null."); } return oldExchange; }
@Override protected int poll() throws Exception { GetRecordsRequest req = new GetRecordsRequest() .withShardIterator(getShardItertor()) .withLimit(getEndpoint().getMaxResultsPerRequest()); GetRecordsResult result = getClient().getRecords(req); Queue<Exchange> exchanges = createExchanges(result.getRecords()); int processedExchangeCount = processBatch(CastUtils.cast(exchanges)); // May cache the last successful sequence number, and pass it to the // getRecords request. That way, on the next poll, we start from where // we left off, however, I don't know what happens to subsequent // exchanges when an earlier echangee fails. currentShardIterator = result.getNextShardIterator(); return processedExchangeCount; }
private List<MetricDatum> getMetricData(Exchange exchange) { Object body = exchange.getIn().getBody(); if (body instanceof List) { return CastUtils.cast((List<?>) body); } if (body instanceof MetricDatum) { return Arrays.asList((MetricDatum) body); } MetricDatum metricDatum = new MetricDatum() .withMetricName(determineName(exchange)) .withValue(determineValue(exchange)) .withUnit(determineUnit(exchange)) .withTimestamp(determineTimestamp(exchange)); setDimension(metricDatum, exchange); return Arrays.asList(metricDatum); }
/** * Removes the associated {@link org.apache.camel.processor.aggregate.AggregationStrategy} from the {@link Exchange} * which must be done after use. * * @param exchange the current exchange */ protected void removeAggregationStrategyFromExchange(Exchange exchange) { Map<?, ?> property = exchange.getProperty(Exchange.AGGREGATION_STRATEGY, Map.class); Map<Object, AggregationStrategy> map = CastUtils.cast(property); if (map == null) { return; } // remove the strategy using this processor as the key map.remove(this); }
/** * Creates a new context with the given environment. * * @param environment the environment, must not be <tt>null</tt> * @return the created context. * @throws NamingException is thrown if creation failed. */ public Context getInitialContext(Hashtable<?, ?> environment) throws NamingException { try { return new JndiContext(CastUtils.cast(environment, String.class, Object.class)); } catch (Exception e) { if (e instanceof NamingException) { throw (NamingException) e; } NamingException exception = new NamingException(e.getMessage()); exception.initCause(e); throw exception; } }
public NamingEnumeration<NameClassPair> list(String name) throws NamingException { Object o = lookup(name); if (o == this) { return CastUtils.cast(new ListEnumeration()); } else if (o instanceof Context) { return ((Context)o).list(""); } else { throw new NotContextException(); } }
public NamingEnumeration<Binding> listBindings(String name) throws NamingException { Object o = lookup(name); if (o == this) { return CastUtils.cast(new ListBindingEnumeration()); } else if (o instanceof Context) { return ((Context)o).listBindings(""); } else { throw new NotContextException(); } }
private CachingInjector<?> handleHasConverterAnnotation(TypeConverterRegistry registry, Class<?> type, CachingInjector<?> injector, Method method, boolean allowNull) { if (isValidConverterMethod(method)) { int modifiers = method.getModifiers(); if (isAbstract(modifiers) || !isPublic(modifiers)) { LOG.warn("Ignoring bad converter on type: " + type.getCanonicalName() + " method: " + method + " as a converter method is not a public and concrete method"); } else { Class<?> toType = method.getReturnType(); if (toType.equals(Void.class)) { LOG.warn("Ignoring bad converter on type: " + type.getCanonicalName() + " method: " + method + " as a converter method returns a void method"); } else { Class<?> fromType = method.getParameterTypes()[0]; if (isStatic(modifiers)) { registerTypeConverter(registry, method, toType, fromType, new StaticMethodTypeConverter(method, allowNull)); } else { if (injector == null) { injector = new CachingInjector<Object>(registry, CastUtils.cast(type, Object.class)); } registerTypeConverter(registry, method, toType, fromType, new InstanceMethodTypeConverter(injector, method, registry, allowNull)); } } } } else { LOG.warn("Ignoring bad converter on type: " + type.getCanonicalName() + " method: " + method + " as a converter method should have one parameter"); } return injector; }
private CachingInjector<?> handleHasFallbackConverterAnnotation(TypeConverterRegistry registry, Class<?> type, CachingInjector<?> injector, Method method, boolean allowNull) { if (isValidFallbackConverterMethod(method)) { int modifiers = method.getModifiers(); if (isAbstract(modifiers) || !isPublic(modifiers)) { LOG.warn("Ignoring bad fallback converter on type: " + type.getCanonicalName() + " method: " + method + " as a fallback converter method is not a public and concrete method"); } else { Class<?> toType = method.getReturnType(); if (toType.equals(Void.class)) { LOG.warn("Ignoring bad fallback converter on type: " + type.getCanonicalName() + " method: " + method + " as a fallback converter method returns a void method"); } else { if (isStatic(modifiers)) { registerFallbackTypeConverter(registry, new StaticMethodFallbackTypeConverter(method, registry, allowNull), method); } else { if (injector == null) { injector = new CachingInjector<Object>(registry, CastUtils.cast(type, Object.class)); } registerFallbackTypeConverter(registry, new InstanceMethodFallbackTypeConverter(injector, method, registry, allowNull), method); } } } } else { LOG.warn("Ignoring bad fallback converter on type: " + type.getCanonicalName() + " method: " + method + " as a fallback converter method should have one parameter"); } return injector; }
public <T> Class<T> resolveClass(String name, Class<T> type) { Class<T> answer = CastUtils.cast(loadClass(name, DefaultClassResolver.class.getClassLoader())); if (answer == null && getApplicationContextClassLoader() != null) { // fallback and use application context class loader answer = CastUtils.cast(loadClass(name, getApplicationContextClassLoader())); } return answer; }
public Iterator<String> getPrefixes(String namespaceURI) { Set<String> set = new HashSet<String>(); for (Entry<String, String> entry : map.entrySet()) { if (namespaceURI.equals(entry.getValue())) { set.add(entry.getKey()); } } if (parent != null) { Iterator<String> iter = CastUtils.cast(parent.getPrefixes(namespaceURI)); while (iter.hasNext()) { set.add(iter.next()); } } return set.iterator(); }
/** * Add any relevant project dependencies to the classpath. Takes * includeProjectDependencies into consideration. * * @param path classpath of {@link java.net.URL} objects * @throws MojoExecutionException */ private void addRelevantProjectDependenciesToClasspath(Set<URL> path) throws MojoExecutionException { if (this.includeProjectDependencies) { try { getLog().debug("Project Dependencies will be included."); URL mainClasses = new File(project.getBuild().getOutputDirectory()).toURI().toURL(); getLog().debug("Adding to classpath : " + mainClasses); path.add(mainClasses); Set<Artifact> dependencies = CastUtils.cast(project.getArtifacts()); // system scope dependencies are not returned by maven 2.0. See // MEXEC-17 dependencies.addAll(getAllNonTestScopedDependencies()); Iterator<Artifact> iter = dependencies.iterator(); while (iter.hasNext()) { Artifact classPathElement = iter.next(); getLog().debug("Adding project dependency artifact: " + classPathElement.getArtifactId() + " to classpath"); File file = classPathElement.getFile(); if (file != null) { path.add(file.toURI().toURL()); } } } catch (MalformedURLException e) { throw new MojoExecutionException("Error during setting up classpath", e); } } else { getLog().debug("Project Dependencies will be excluded."); } }
private Set<Artifact> resolveExecutableDependencies(Artifact executablePomArtifact, boolean ignoreFailures) throws MojoExecutionException { Set<Artifact> executableDependencies = null; try { MavenProject executableProject = this.projectBuilder.buildFromRepository(executablePomArtifact, this.remoteRepositories, this.localRepository); // get all of the dependencies for the executable project List<Artifact> dependencies = CastUtils.cast(executableProject.getDependencies()); // make Artifacts of all the dependencies Set<Artifact> dependencyArtifacts = CastUtils.cast(MavenMetadataSource.createArtifacts(this.artifactFactory, dependencies, null, null, null)); // not forgetting the Artifact of the project itself dependencyArtifacts.add(executableProject.getArtifact()); // resolve runtime dependencies transitively to obtain a comprehensive list of assemblies ArtifactResolutionResult result = artifactResolver.resolveTransitively(dependencyArtifacts, executablePomArtifact, Collections.emptyMap(), this.localRepository, this.remoteRepositories, metadataSource, new ScopeArtifactFilter(Artifact.SCOPE_RUNTIME), Collections.emptyList()); executableDependencies = CastUtils.cast(result.getArtifacts()); } catch (Exception ex) { if (ignoreFailures) { getLog().debug("Ignoring maven resolving dependencies failure " + ex.getMessage()); } else { throw new MojoExecutionException("Encountered problems resolving dependencies of the executable " + "in preparation for its execution.", ex); } } return executableDependencies; }
/** * Add any relevant project dependencies to the classpath. Takes * includeProjectDependencies into consideration. * * @param path classpath of {@link java.net.URL} objects * @throws org.apache.maven.plugin.MojoExecutionException */ private void addRelevantProjectDependenciesToClasspath(List<URL> path) throws MojoExecutionException { if (this.includeProjectDependencies) { try { getLog().debug("Project Dependencies will be included."); URL mainClasses = new File(project.getBuild().getOutputDirectory()).toURI().toURL(); getLog().debug("Adding to classpath : " + mainClasses); path.add(mainClasses); Set<Artifact> dependencies = CastUtils.cast(project.getArtifacts()); // system scope dependencies are not returned by maven 2.0. See // MEXEC-17 dependencies.addAll(getAllNonTestScopedDependencies()); Iterator<Artifact> iter = dependencies.iterator(); while (iter.hasNext()) { Artifact classPathElement = iter.next(); getLog().debug("Adding project dependency artifact: " + classPathElement.getArtifactId() + " to classpath"); File file = classPathElement.getFile(); if (file != null) { path.add(file.toURI().toURL()); } } } catch (MalformedURLException e) { throw new MojoExecutionException("Error during setting up classpath", e); } } else { getLog().debug("Project Dependencies will be excluded."); } }
private Set<Artifact> resolveExecutableDependencies(Artifact executablePomArtifact) throws MojoExecutionException { Set<Artifact> executableDependencies; try { MavenProject executableProject = this.projectBuilder.buildFromRepository(executablePomArtifact, this.remoteRepositories, this.localRepository); // get all of the dependencies for the executable project List<Artifact> dependencies = CastUtils.cast(executableProject.getDependencies()); // make Artifacts of all the dependencies Set<Artifact> dependencyArtifacts = CastUtils.cast(MavenMetadataSource.createArtifacts(this.artifactFactory, dependencies, null, null, null)); // not forgetting the Artifact of the project itself dependencyArtifacts.add(executableProject.getArtifact()); // resolve all dependencies transitively to obtain a comprehensive // list of assemblies ArtifactResolutionResult result = artifactResolver.resolveTransitively(dependencyArtifacts, executablePomArtifact, Collections.emptyMap(), this.localRepository, this.remoteRepositories, metadataSource, null, Collections.emptyList()); executableDependencies = CastUtils.cast(result.getArtifacts()); } catch (Exception ex) { throw new MojoExecutionException("Encountered problems resolving dependencies of the executable " + "in preparation for its execution.", ex); } return executableDependencies; }
/** * Additional properties for the entity manager to use. */ public Map<String, Object> getEntityManagerProperties() { if (entityManagerProperties == null) { entityManagerProperties = CastUtils.cast(System.getProperties()); } return entityManagerProperties; }
@Override protected int poll() throws Exception { shutdownRunningTask = null; pendingExchanges = 0; int max = getMaxMessagesPerPoll() > 0 ? getMaxMessagesPerPoll() : Integer.MAX_VALUE; Queue<Exchange> queue = new LinkedList<Exchange>(); Iterator<Object> keyIterator = dataStore.keyIterator(); int index = 0; while (keyIterator.hasNext() && index < max) { Object key = keyIterator.next(); Object value = dataStore.get(key); Exchange exchange = endpoint.createExchange(); exchange.setProperty(KratiConstants.KEY, key); exchange.getIn().setBody(value); queue.add(exchange); index++; } // did we cap at max? if (index == max && keyIterator.hasNext()) { log.debug("Limiting to maximum messages to poll {} as there was more messages in this poll.", max); } return queue.isEmpty() ? 0 : processBatch(CastUtils.cast(queue)); }
public Enumeration<URL> findEntries(String path, String filePattern, boolean recurse) { if (path.equals("/org/apache/camel/core/osgi/test") && filePattern.equals("*.class")) { List<URL> urls = new ArrayList<URL>(); URL url = getClass().getClassLoader().getResource("org/apache/camel/core/osgi/test/MyTypeConverter.class"); urls.add(url); url = getClass().getClassLoader().getResource("org/apache/camel/core/osgi/test/MyRouteBuilder.class"); urls.add(url); return new ListEnumeration<URL>(urls); } else { return CastUtils.cast(super.findEntries(path, filePattern, recurse)); } }
@Override protected int poll() throws Exception { shutdownRunningTask = null; pendingExchanges = 0; Queue<Exchange> queue = new LinkedList<Exchange>(); String directory = endpoint.getDirectory(); ListContainerOptions opt = new ListContainerOptions(); if (!Strings.isNullOrEmpty(directory)) { opt = opt.inDirectory(directory); } for (StorageMetadata md : blobStore.list(container, opt.maxResults(maxMessagesPerPoll).recursive())) { String blobName = md.getName(); if (md.getType().equals(StorageType.BLOB)) { if (!Strings.isNullOrEmpty(blobName)) { InputStream body = JcloudsBlobStoreHelper.readBlob(blobStore, container, blobName); if (body != null) { Exchange exchange = endpoint.createExchange(); CachedOutputStream cos = new CachedOutputStream(exchange); IOHelper.copy(body, cos); exchange.getIn().setBody(cos.newStreamCache()); exchange.setProperty(JcloudsConstants.BLOB_NAME, blobName); queue.add(exchange); } } } } return queue.isEmpty() ? 0 : processBatch(CastUtils.cast(queue)); }
@Override protected int poll() throws Exception { // must reset for each poll shutdownRunningTask = null; pendingExchanges = 0; ReceiveMessageRequest request = new ReceiveMessageRequest(getQueueUrl()); request.setMaxNumberOfMessages(getMaxMessagesPerPoll() > 0 ? getMaxMessagesPerPoll() : null); request.setVisibilityTimeout(getConfiguration().getVisibilityTimeout() != null ? getConfiguration().getVisibilityTimeout() : null); request.setWaitTimeSeconds(getConfiguration().getWaitTimeSeconds() != null ? getConfiguration().getWaitTimeSeconds() : null); if (attributeNames != null) { request.setAttributeNames(attributeNames); } if (messageAttributeNames != null) { request.setMessageAttributeNames(messageAttributeNames); } LOG.trace("Receiving messages with request [{}]...", request); ReceiveMessageResult messageResult = null; try { messageResult = getClient().receiveMessage(request); } catch (QueueDoesNotExistException e) { LOG.info("Queue does not exist....recreating now..."); reConnectToQueue(); messageResult = getClient().receiveMessage(request); } if (LOG.isTraceEnabled()) { LOG.trace("Received {} messages", messageResult.getMessages().size()); } Queue<Exchange> exchanges = createExchanges(messageResult.getMessages()); return processBatch(CastUtils.cast(exchanges)); }
@Override public int processBatch(Queue<Object> exchanges) throws Exception { int total = exchanges.size(); int answer = total; for (int index = 0; index < total && isBatchAllowed(); index++) { // only loop if we are started (allowed to run) // use poll to remove the head so it does not consume memory even // after we have processed it Exchange exchange = (Exchange) exchanges.poll(); // add current index and total as properties exchange.setProperty(Exchange.BATCH_INDEX, index); exchange.setProperty(Exchange.BATCH_SIZE, total); exchange.setProperty(Exchange.BATCH_COMPLETE, index == total - 1); // update pending number of exchanges pendingExchanges = total - index - 1; boolean started = processExchange(exchange); // if we did not start processing then decrement the counter if (!started) { answer--; } } // drain any in progress files as we are done with this batch removeExcessiveInProgressTasks(CastUtils.cast((Queue<?>) exchanges, Exchange.class), 0); return answer; }
public Hashtable<String, Object> getEnvironment() throws NamingException { return CastUtils.cast((Hashtable<?, ?>)environment.clone(), String.class, Object.class); }
public <T> Class<T> resolveClass(String name, Class<T> type, ClassLoader loader) { return CastUtils.cast(loadClass(name, loader)); }
public <T> List<T> newInstances(String key, Injector injector, Class<T> type) throws ClassNotFoundException, IOException { List<Class<T>> list = CastUtils.cast(findClasses(key)); List<T> answer = new ArrayList<T>(list.size()); answer.add(newInstance(key, injector, type)); return answer; }
public int processBatch(Queue<Object> exchanges) { int total = exchanges.size(); int answer = total; // limit if needed if (maxMessagesPerPoll > 0 && total > maxMessagesPerPoll) { log.debug("Limiting to maximum messages to poll {} as there was {} messages in this poll.", maxMessagesPerPoll, total); total = maxMessagesPerPoll; } for (int index = 0; index < total && isBatchAllowed(); index++) { // only loop if we are started (allowed to run) // use poll to remove the head so it does not consume memory even after we have processed it Exchange exchange = (Exchange) exchanges.poll(); // add current index and total as properties exchange.setProperty(Exchange.BATCH_INDEX, index); exchange.setProperty(Exchange.BATCH_SIZE, total); exchange.setProperty(Exchange.BATCH_COMPLETE, index == total - 1); // update pending number of exchanges pendingExchanges = total - index - 1; // process the current exchange boolean started; if (customProcessor != null) { // use a custom processor started = customProcessExchange(exchange, customProcessor); } else { // process the exchange regular started = processExchange(exchange); } // if we did not start process the file then decrement the counter if (!started) { answer--; } } // drain any in progress files as we are done with this batch removeExcessiveInProgressFiles(CastUtils.cast((Deque<?>) exchanges, Exchange.class), 0); return answer; }
@SuppressWarnings("unchecked") @Override protected void setParameters(Object[] params, Message message) { Object attachments = message.get(CxfConstants.CAMEL_CXF_ATTACHMENTS); if (attachments != null) { message.setAttachments((Collection<Attachment>) attachments); message.remove(CxfConstants.CAMEL_CXF_ATTACHMENTS); } // Don't try to reset the parameters if the parameter is not CxfPayload instance // as the setParameter will be called more than once when using the fail over feature if (DataFormat.PAYLOAD == message.get(DataFormat.class) && params[0] instanceof CxfPayload) { CxfPayload<?> payload = (CxfPayload<?>) params[0]; List<Source> elements = payload.getBodySources(); BindingOperationInfo boi = message.get(BindingOperationInfo.class); MessageContentsList content = new MessageContentsList(); int i = 0; for (MessagePartInfo partInfo : boi.getInput().getMessageParts()) { if (elements.size() > i) { if (isSkipPayloadMessagePartCheck()) { content.put(partInfo, elements.get(i++)); } else { String name = findName(elements, i); if (partInfo.getConcreteName().getLocalPart().equals(name)) { content.put(partInfo, elements.get(i++)); } } } } if (elements != null && content.size() < elements.size()) { throw new IllegalArgumentException("The PayLoad elements cannot fit with the message parts of the BindingOperation. Please check the BindingOperation and PayLoadMessage."); } message.setContent(List.class, content); // merge header list from request context with header list from CXF payload List<Object> headerListOfRequestContxt = (List<Object>)message.get(Header.HEADER_LIST); List<Object> headerListOfPayload = CastUtils.cast(payload.getHeaders()); if (headerListOfRequestContxt == headerListOfPayload) { // == is correct, we want to compare the object instances // nothing to do, this can happen when the CXF payload is already created in the from-cxf-endpoint and then forwarded to a to-cxf-endpoint } else { if (headerListOfRequestContxt == null) { message.put(Header.HEADER_LIST, payload.getHeaders()); } else { headerListOfRequestContxt.addAll(headerListOfPayload); } } } else { super.setParameters(params, message); } message.remove(DataFormat.class); }
@Override public <T> Class<T> resolveClass(String name, Class<T> type) { return CastUtils.cast(resolveClass(name)); }