/** * Basic constructor. * * pock: For BES we use a new server for every request. We can't determine * this server from the serverChain --> use the server of the request as * parameter (Bug 5487) * * @param response * The response we are wrapping * @param server * The server that was matched * @param ownHostName * String we are rewriting servers to * @throws IOException * When there is a problem with the streams */ public UrlRewritingResponseWrapper(HttpServletResponse response, Server server, String ownHostName, String contextPath, ServerChain serverChain) throws IOException { super(response); this.server = server; this.ownHostName = ownHostName; this.contextPath = contextPath; this.serverChain = serverChain; log = LogFactory.getLog(UrlRewritingResponseWrapper.class); outStream = new UrlRewritingOutputStream(response.getOutputStream(), contextPath, response .getCharacterEncoding()); outWriter = new PrintWriter(outStream); originalWriter = new PrintWriter(response.getOutputStream()); }
public void contextDestroyed(ServletContextEvent contextEvent) { try { LOGGER.info("Start shutting down ORS and releasing resources."); if (RoutingProfileManagerStatus.isReady()) RoutingProfileManager.getInstance().destroy(); LocationsDataProviderFactory.releaseProviders(); StatisticsProviderFactory.releaseProviders(); LogFactory.release(Thread.currentThread().getContextClassLoader()); try { System.gc(); System.runFinalization(); System.gc(); System.runFinalization(); } catch(Throwable t) { LOGGER.error("Failed to perform finalization."); t.printStackTrace(); } } catch (Exception e) { e.printStackTrace(); } }
/** * Spy on the Java DNS infrastructure. * This likely only works on Sun-derived JDKs, but uses JUnit's * Assume functionality so that any tests using it are skipped on * incompatible JDKs. */ private NameService spyOnNameService() { try { Field f = InetAddress.class.getDeclaredField("nameServices"); f.setAccessible(true); Assume.assumeNotNull(f); @SuppressWarnings("unchecked") List<NameService> nsList = (List<NameService>) f.get(null); NameService ns = nsList.get(0); Log log = LogFactory.getLog("NameServiceSpy"); ns = Mockito.mock(NameService.class, new GenericTestUtils.DelegateAnswer(log, ns)); nsList.set(0, ns); return ns; } catch (Throwable t) { LOG.info("Unable to spy on DNS. Skipping test.", t); // In case the JDK we're testing on doesn't work like Sun's, just // skip the test. Assume.assumeNoException(t); throw new RuntimeException(t); } }
protected VersionInfo(String component) { info = new Properties(); String versionInfoFile = component + "-version-info.properties"; InputStream is = null; try { is = Thread.currentThread().getContextClassLoader() .getResourceAsStream(versionInfoFile); if (is == null) { throw new IOException("Resource not found"); } info.load(is); } catch (IOException ex) { LogFactory.getLog(getClass()).warn("Could not read '" + versionInfoFile + "', " + ex.toString(), ex); } finally { IOUtils.closeStream(is); } }
/** * Will set the via header with this proxies data to the response. * * @param response * The response we set the header for */ private void setViaHeader(HttpServletResponse response) { String serverHostName = "jEasyReverseProxy"; try { serverHostName = InetAddress.getLocalHost().getHostName(); } catch (UnknownHostException e) { LogFactory.getLog(RequestHandlerBase.class).error( "Couldn't get the hostname needed for header Via", e); } Header originalVia = method.getResponseHeader("via"); StringBuffer via = new StringBuffer(""); if (originalVia != null) { via.append(originalVia.getValue()).append(", "); } via.append(method.getStatusLine().getHttpVersion()).append(" ").append( serverHostName); response.setHeader("via", via.toString()); }
private static Log doCreateLogger(Class<?> logName) { Log logger; ClassLoader ccl = Thread.currentThread().getContextClassLoader(); // push the logger class classloader (useful when dealing with commons-logging 1.0.x Thread.currentThread().setContextClassLoader(logName.getClassLoader()); try { logger = LogFactory.getLog(logName); } catch (Throwable th) { logger = new SimpleLogger(); logger .fatal( "logger infrastructure not properly set up. If commons-logging jar is used try switching to slf4j (see the FAQ for more info).", th); } finally { Thread.currentThread().setContextClassLoader(ccl); } return logger; }
@Deprecated public DefaultRequestDirector( final HttpRequestExecutor requestExec, final ClientConnectionManager conman, final ConnectionReuseStrategy reustrat, final ConnectionKeepAliveStrategy kastrat, final HttpRoutePlanner rouplan, final HttpProcessor httpProcessor, final HttpRequestRetryHandler retryHandler, final RedirectHandler redirectHandler, final AuthenticationHandler targetAuthHandler, final AuthenticationHandler proxyAuthHandler, final UserTokenHandler userTokenHandler, final HttpParams params) { this(LogFactory.getLog(DefaultRequestDirector.class), requestExec, conman, reustrat, kastrat, rouplan, httpProcessor, retryHandler, new DefaultRedirectStrategyAdaptor(redirectHandler), new AuthenticationStrategyAdaptor(targetAuthHandler), new AuthenticationStrategyAdaptor(proxyAuthHandler), userTokenHandler, params); }
@Deprecated public DefaultRequestDirector( final Log log, final HttpRequestExecutor requestExec, final ClientConnectionManager conman, final ConnectionReuseStrategy reustrat, final ConnectionKeepAliveStrategy kastrat, final HttpRoutePlanner rouplan, final HttpProcessor httpProcessor, final HttpRequestRetryHandler retryHandler, final RedirectStrategy redirectStrategy, final AuthenticationHandler targetAuthHandler, final AuthenticationHandler proxyAuthHandler, final UserTokenHandler userTokenHandler, final HttpParams params) { this(LogFactory.getLog(DefaultRequestDirector.class), requestExec, conman, reustrat, kastrat, rouplan, httpProcessor, retryHandler, redirectStrategy, new AuthenticationStrategyAdaptor(targetAuthHandler), new AuthenticationStrategyAdaptor(proxyAuthHandler), userTokenHandler, params); }
/** * <p>Returns the <code>Log</code>. */ private Log logger() { if (logger == null) { logger = LogFactory.getLog(LazyDynaBean.class); } return logger; }
/** * Gets the class for the primitive type corresponding to the primitive wrapper class given. * For example, an instance of <code>Boolean.class</code> returns a <code>boolean.class</code>. * @param wrapperType the * @return the primitive type class corresponding to the given wrapper class, * null if no match is found */ public static Class<?> getPrimitiveType(final Class<?> wrapperType) { // does anyone know a better strategy than comparing names? if (Boolean.class.equals(wrapperType)) { return boolean.class; } else if (Float.class.equals(wrapperType)) { return float.class; } else if (Long.class.equals(wrapperType)) { return long.class; } else if (Integer.class.equals(wrapperType)) { return int.class; } else if (Short.class.equals(wrapperType)) { return short.class; } else if (Byte.class.equals(wrapperType)) { return byte.class; } else if (Double.class.equals(wrapperType)) { return double.class; } else if (Character.class.equals(wrapperType)) { return char.class; } else { final Log log = LogFactory.getLog(MethodUtils.class); if (log.isDebugEnabled()) { log.debug("Not a known primitive wrapper class: " + wrapperType); } return null; } }
protected VersionInfo(String component) { info = new Properties(); String versionInfoFile = component + "-version-info.properties"; InputStream is = null; try { is = ThreadUtil.getResourceAsStream(versionInfoFile); info.load(is); } catch (IOException ex) { LogFactory.getLog(getClass()).warn("Could not read '" + versionInfoFile + "', " + ex.toString(), ex); } finally { IOUtils.closeStream(is); } }
@Test public void testServerSaslNoClientSasl() throws Exception { HdfsConfiguration clusterConf = createSecureConfig( "authentication,integrity,privacy"); // Set short retry timeouts so this test runs faster clusterConf.setInt(DFSConfigKeys.DFS_CLIENT_RETRY_WINDOW_BASE, 10); startCluster(clusterConf); HdfsConfiguration clientConf = new HdfsConfiguration(clusterConf); clientConf.set(DFS_DATA_TRANSFER_PROTECTION_KEY, ""); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataNode.class)); try { doTest(clientConf); Assert.fail("Should fail if SASL data transfer protection is not " + "configured or not supported in client"); } catch (IOException e) { GenericTestUtils.assertMatches(e.getMessage(), "could only be replicated to 0 nodes"); } finally { logs.stopCapturing(); } GenericTestUtils.assertMatches(logs.getOutput(), "Failed to read expected SASL data transfer protection " + "handshake from client at"); }
@Test public void testJar() throws Exception { //picking a class that is for sure in a JAR in the classpath String jar = JarFinder.getJar(LogFactory.class); Assert.assertTrue(new File(jar).exists()); }
private static Future<?> quietlyCallListener(final ProgressListener listener, final ProgressEvent event) { try { listener.progressChanged(event); } catch(Throwable t) { // That's right, we need to suppress all errors so as to be on par // with the async mode where all failures will be ignored. LogFactory.getLog(SDKProgressPublisher.class) .debug("Failure from the event listener", t); } return null; }
public CloudControllerClientImpl(URL cloudControllerUrl, RestTemplate restTemplate, OauthClient oauthClient, LoggregatorClient loggregatorClient, CloudCredentials cloudCredentials, CloudSpace sessionSpace) { logger = LogFactory.getLog(getClass().getName()); initialize(cloudControllerUrl, restTemplate, oauthClient, loggregatorClient, cloudCredentials); this.sessionSpace = sessionSpace; }
public static synchronized void enableBouncyCastle() { if (isBouncyCastleAvailable()) { return; } try { @SuppressWarnings("unchecked") Class<Provider> c = (Class<Provider>)Class.forName(BC_PROVIDER_FQCN); Provider provider = c.newInstance(); Security.addProvider(provider); } catch (Exception e) { LogFactory.getLog(CryptoRuntime.class).debug( "Bouncy Castle not available", e); } }
@Override public void addSubMeasurement(String subMeasurementName, TimingInfo ti) { List<TimingInfo> timings = subMeasurementsByName.get(subMeasurementName); if (timings == null) { timings = new ArrayList<TimingInfo>(); subMeasurementsByName.put(subMeasurementName, timings); } if (ti.isEndTimeKnown()) { timings.add(ti); } else { LogFactory.getLog(getClass()).debug( "Skip submeasurement timing info with no end time for " + subMeasurementName); } }
/** * Constructor */ public TransformerDebug(NodeService nodeService, MimetypeService mimetypeService, ContentTransformerRegistry transformerRegistry, TransformerConfig transformerConfig, Log transformerLog, Log transformerDebugLog) { this.nodeService = nodeService; this.mimetypeService = mimetypeService; this.transformerRegistry = transformerRegistry; this.transformerConfig = transformerConfig; logger = new LogTee(LogFactory.getLog(TransformerDebug.class), transformerDebugLog); info = new LogTee(LogFactory.getLog(TransformerLog.class), transformerLog); }
/** * Instantiates a new batch processor. * * @param processName * the process name * @param retryingTransactionHelper * the retrying transaction helper * @param workProvider * the object providing the work packets * @param workerThreads * the number of worker threads * @param batchSize * the number of entries we process at a time in a transaction * @param applicationEventPublisher * the application event publisher (may be <tt>null</tt>) * @param logger * the logger to use (may be <tt>null</tt>) * @param loggingInterval * the number of entries to process before reporting progress * * @since 3.4 */ public BatchProcessor( String processName, RetryingTransactionHelper retryingTransactionHelper, BatchProcessWorkProvider<T> workProvider, int workerThreads, int batchSize, ApplicationEventPublisher applicationEventPublisher, Log logger, int loggingInterval) { this.threadFactory = new TraceableThreadFactory(); this.threadFactory.setNamePrefix(processName); this.threadFactory.setThreadDaemon(true); this.processName = processName; this.retryingTransactionHelper = retryingTransactionHelper; this.workProvider = workProvider; this.workerThreads = workerThreads; this.batchSize = batchSize; if (logger == null) { this.logger = LogFactory.getLog(this.getClass()); } else { this.logger = logger; } this.loggingInterval = loggingInterval; // Let the (enterprise) monitoring side know of our presence if (applicationEventPublisher != null) { applicationEventPublisher.publishEvent(new BatchMonitorEvent(this)); } }
/** * Public constructor. */ public TransactionalCache() { logger = LogFactory.getLog(TransactionalCache.class); isDebugEnabled = logger.isDebugEnabled(); disableSharedCache = false; isMutable = true; allowEqualsChecks = false; }
/** * Switches the logger for the given class to DEBUG level. * * @param clazz The class for which to switch to debug logging. */ public void enableDebug(Class<?> clazz) { Log l = LogFactory.getLog(clazz); if (l instanceof Log4JLogger) { ((Log4JLogger) l).getLogger().setLevel(org.apache.log4j.Level.DEBUG); } else if (l instanceof Jdk14Logger) { ((Jdk14Logger) l).getLogger().setLevel(java.util.logging.Level.ALL); } }
/** Disable the logs that are not very useful for snapshot related tests. */ public static void disableLogs() { final String[] lognames = { "org.apache.hadoop.hdfs.server.datanode.BlockPoolSliceScanner", "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetImpl", "org.apache.hadoop.hdfs.server.datanode.fsdataset.impl.FsDatasetAsyncDiskService", }; for(String n : lognames) { GenericTestUtils.disableLog(LogFactory.getLog(n)); } GenericTestUtils.disableLog(LogFactory.getLog(UserGroupInformation.class)); GenericTestUtils.disableLog(LogFactory.getLog(BlockManager.class)); GenericTestUtils.disableLog(LogFactory.getLog(FSNamesystem.class)); GenericTestUtils.disableLog(LogFactory.getLog(DirectoryScanner.class)); GenericTestUtils.disableLog(LogFactory.getLog(MetricsSystemImpl.class)); GenericTestUtils.disableLog(BlockScanner.LOG); GenericTestUtils.disableLog(HttpServer2.LOG); GenericTestUtils.disableLog(DataNode.LOG); GenericTestUtils.disableLog(BlockPoolSliceStorage.LOG); GenericTestUtils.disableLog(LeaseManager.LOG); GenericTestUtils.disableLog(NameNode.stateChangeLog); GenericTestUtils.disableLog(NameNode.blockStateChangeLog); GenericTestUtils.disableLog(DFSClient.LOG); GenericTestUtils.disableLog(Server.LOG); }
public void testSetUp() throws Throwable { // Check that the bean is present ctx.getBean("log4JHierarchyInit"); // Make sure that the default log4j.properties is being picked up Log log = LogFactory.getLog("log4j.logger.org.alfresco"); assertFalse("Expect log level ERROR for 'org.alfresco'.", log.isWarnEnabled()); }
/** * Initialize. * * @see javax.servlet.Filter#init(javax.servlet.FilterConfig) */ public void init(FilterConfig filterConfig) throws ServletException { log = LogFactory.getLog(RewriteFilter.class); String data = filterConfig.getInitParameter("dataUrl"); if (data == null) { throw new ServletException("dataUrl is required."); } else { try { File dataFile = new File(filterConfig.getServletContext().getRealPath(data)); ConfigParser parser = new ConfigParser(dataFile); serverChain = parser.getServerChain(); } catch (Exception e) { throw new ServletException(e); } } }
private void readObject(ObjectInputStream ois) throws IOException, ClassNotFoundException { // Rely on default serialization, just initialize state after // deserialization. ois.defaultReadObject(); // Initialize transient fields. this.logger = LogFactory.getLog(getClass()); this.monitor = new Object(); }
/** * Standard constructor only specifying the input file. The constructor will * parse the config and build a corresponding rule chain with the server * mappings included. * * @param data * The config file containing the XML data structure. */ public ConfigParser(File data) { log = LogFactory.getLog(ConfigParser.class); try { LinkedList serverContainer = createServerList(data); if (log.isDebugEnabled()) { debugServers(serverContainer); } serverChain = new ServerChain(serverContainer); } catch (Exception e) { throw new RuntimeException(e); } }
/** * Basic constructor */ public ClusterContainer() { servers = new HashMap(); statusChecker = new ServerStatusChecker(this, 5*60*1000); statusChecker.start(); log = LogFactory.getLog(ClusterContainer.class); }
protected void preProcessBundleContext(BundleContext platformBundleContext) throws Exception { // all below fail LogFactory.releaseAll(); //System.setProperty("org.apache.commons.logging.LogFactory", "org.apache.commons.logging.impl.NoOpLog"); System.setProperty("org.apache.commons.logging.Log", "org.apache.commons.logging.impl.Jdk14Logger"); ClassLoader cl = Thread.currentThread().getContextClassLoader(); // System.out.println("TCCL is " + cl); Thread.currentThread().setContextClassLoader(null); super.preProcessBundleContext(platformBundleContext); }
private void testEncryptedWrite(int numDns) throws IOException { MiniDFSCluster cluster = null; try { Configuration conf = new Configuration(); setEncryptionConfigKeys(conf); cluster = new MiniDFSCluster.Builder(conf).numDataNodes(numDns).build(); FileSystem fs = getFileSystem(conf); LogCapturer logs = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(SaslDataTransferServer.class)); LogCapturer logs1 = GenericTestUtils.LogCapturer.captureLogs( LogFactory.getLog(DataTransferSaslUtil.class)); try { writeTestDataToFile(fs); } finally { logs.stopCapturing(); logs1.stopCapturing(); } assertEquals(PLAIN_TEXT, DFSTestUtil.readFile(fs, TEST_PATH)); fs.close(); if (resolverClazz == null) { // Test client and server negotiate cipher option GenericTestUtils.assertDoesNotMatch(logs.getOutput(), "Server using cipher suite"); // Check the IOStreamPair GenericTestUtils.assertDoesNotMatch(logs1.getOutput(), "Creating IOStreamPair of CryptoInputStream and CryptoOutputStream."); } } finally { if (cluster != null) { cluster.shutdown(); } } }
private String tryDetectRegion() { try { return EC2MetadataUtils.getEC2InstanceRegion(); } catch (AmazonClientException sce) { LogFactory.getLog(InstanceMetadataRegionProvider.class) .debug("Ignoring failure to retrieve the region: " + sce.getMessage()); return null; } }
private static void initLoggers() { ((Log4JLogger) NameNode.stateChangeLog).getLogger().setLevel(Level.ALL); ((Log4JLogger) LogFactory.getLog(FSNamesystem.class)).getLogger().setLevel(Level.ALL); ((Log4JLogger) DataNode.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) TestFiPipelines.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClient.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) FiTestUtil.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) BlockReceiverAspects.LOG).getLogger().setLevel(Level.ALL); ((Log4JLogger) DFSClientAspects.LOG).getLogger().setLevel(Level.ALL); }
private void readObject(ObjectInputStream ois) throws IOException, ClassNotFoundException { // Rely on default serialization; just initialize state after deserialization. ois.defaultReadObject(); // Initialize transient fields. this.logger = LogFactory.getLog(getClass()); }
public double getDurationMilli() { if (endNano == startNano) { LogFactory.getLog(getClass()).debug( "Likely to be a missing invocation of endTiming()."); } return TimingInfo.durationMilliOf(startNano, endNano); }
/** * Notified from {@link MultiFileOutputStream#fos()} when a part ready for * upload has been successfully created on disk. By default, this method * performs the following: * <ol> * <li>calls {@link #newUploadPartRequest(PartCreationEvent, File)} to * create an upload-part request for the newly created ciphertext file</li> * <li>call {@link #appendUserAgent(AmazonWebServiceRequest, String)} to * append the necessary user agent string to the request</li> * <li>and finally submit a concurrent task, which calls the method * {@link #uploadPart(UploadPartRequest)}, to be performed</li> * </ol> * <p> * To enable parallel uploads, implementation of this method should never * block. * * @param event * to represent the completion of a ciphertext file creation * which is ready for multipart upload to S3. */ public void onPartCreate(PartCreationEvent event) { final File part = event.getPart(); final UploadPartRequest reqUploadPart = newUploadPartRequest(event, part); final OnFileDelete fileDeleteObserver = event.getFileDeleteObserver(); appendUserAgent(reqUploadPart, AmazonS3EncryptionClient.USER_AGENT); futures.add(es.submit(new Callable<UploadPartResult>() { @Override public UploadPartResult call() { // Upload the ciphertext directly via the non-encrypting // s3 client try { return uploadPart(reqUploadPart); } finally { // clean up part already uploaded if (!part.delete()) { LogFactory.getLog(getClass()).debug( "Ignoring failure to delete file " + part + " which has already been uploaded"); } else { if (fileDeleteObserver != null) fileDeleteObserver.onFileDelete(null); } } } })); }
private static Future<?> quietlyCallListener( final S3ProgressListener listener, final PersistableTransfer persistableTransfer) { try { listener.onPersistableTransfer(persistableTransfer); } catch(Throwable t) { // That's right, we need to suppress all errors so as to be on par // with the async mode where all failures will be ignored. LogFactory.getLog(S3ProgressPublisher.class) .debug("Failure from the event listener", t); } return null; }
/** * Creates a new thread safe connection manager. * * @param schreg the scheme registry. * @param connTTL max connection lifetime, <=0 implies "infinity" * @param connTTLTimeUnit TimeUnit of connTTL * @param connPerRoute mapping of maximum connections per route, * provided as a dependency so it can be managed externally, e.g. * for dynamic connection pool size management. * * @since 4.2 */ public ThreadSafeClientConnManager(final SchemeRegistry schreg, long connTTL, TimeUnit connTTLTimeUnit, ConnPerRouteBean connPerRoute) { super(); if (schreg == null) { throw new IllegalArgumentException("Scheme registry may not be null"); } this.log = LogFactory.getLog(getClass()); this.schemeRegistry = schreg; this.connPerRoute = connPerRoute; this.connOperator = createConnectionOperator(schreg); this.pool = createConnectionPool(connTTL, connTTLTimeUnit) ; this.connectionPool = this.pool; }
/** * Creates a new thread safe connection manager. * * @param params the parameters for this manager. * @param schreg the scheme registry. * * @deprecated (4.1) use {@link ThreadSafeClientConnManager#ThreadSafeClientConnManager(SchemeRegistry)} */ public ThreadSafeClientConnManager(HttpParams params, SchemeRegistry schreg) { if (schreg == null) { throw new IllegalArgumentException("Scheme registry may not be null"); } this.log = LogFactory.getLog(getClass()); this.schemeRegistry = schreg; this.connPerRoute = new ConnPerRouteBean(); this.connOperator = createConnectionOperator(schreg); this.pool = (ConnPoolByRoute) createConnectionPool(params) ; this.connectionPool = this.pool; }
/** * Creates a new connection pool. */ protected AbstractConnPool() { super(); this.log = LogFactory.getLog(getClass()); this.leasedConnections = new HashSet<BasicPoolEntry>(); this.idleConnHandler = new IdleConnectionHandler(); this.poolLock = new ReentrantLock(); }
/** * <p>Gracefully shut down this controller servlet, releasing any resources * that were allocated at initialization.</p> */ public void destroy() { if (log.isDebugEnabled()) { log.debug(internal.getMessage("finalizing")); } destroyModules(); destroyInternal(); getServletContext().removeAttribute(Globals.ACTION_SERVLET_KEY); // Release our LogFactory and Log instances (if any) ClassLoader classLoader = Thread.currentThread().getContextClassLoader(); if (classLoader == null) { classLoader = ActionServlet.class.getClassLoader(); } try { LogFactory.release(classLoader); } catch (Throwable t) { ; // Servlet container doesn't have the latest version ; // of commons-logging-api.jar installed // :FIXME: Why is this dependent on the container's version of commons-logging? // Shouldn't this depend on the version packaged with Struts? /* Reason: LogFactory.release(classLoader); was added as an attempt to investigate the OutOfMemory error reported on Bugzilla #14042. It was committed for version 1.136 by craigmcc */ } PropertyUtils.clearDescriptors(); }