Java 类org.webrtc.Logging 实例源码

项目:react-native-webrtc    文件:SurfaceViewRenderer.java   
private void updateFrameDimensionsAndReportEvents(VideoRenderer.I420Frame frame) {
  synchronized (layoutLock) {
    if (frameWidth != frame.width || frameHeight != frame.height
        || frameRotation != frame.rotationDegree) {
      Logging.d(TAG, getResourceName() + "Reporting frame resolution changed to "
          + frame.width + "x" + frame.height + " with rotation " + frame.rotationDegree);
      if (rendererEvents != null) {
        rendererEvents.onFrameResolutionChanged(frame.width, frame.height, frame.rotationDegree);
      }
      frameWidth = frame.width;
      frameHeight = frame.height;
      frameRotation = frame.rotationDegree;
      post(new Runnable() {
        @Override public void run() {
          requestLayout();
        }
      });
    }
  }
}
项目:AppRTC-Android    文件:WebRtcAudioTrack.java   
private boolean stopPlayout() {
  Logging.d(TAG, "stopPlayout");
  assertTrue(audioThread != null);
  logUnderrunCount();
  audioThread.stopThread();

  final Thread aThread = audioThread;
  audioThread = null;
  if (aThread != null) {
    Logging.d(TAG, "Stopping the AudioTrackThread...");
    aThread.interrupt();
    if (!ThreadUtils.joinUninterruptibly(aThread, AUDIO_TRACK_THREAD_JOIN_TIMEOUT_MS)) {
      Logging.e(TAG, "Join of AudioTrackThread timed out.");
    }
    Logging.d(TAG, "AudioTrackThread has now been stopped.");
  }

  releaseAudioResources();
  return true;
}
项目:AppRTC-Android    文件:WebRtcAudioManager.java   
private int getNativeOutputSampleRate() {
  // Override this if we're running on an old emulator image which only
  // supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
  if (WebRtcAudioUtils.runningOnEmulator()) {
    Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
    return 8000;
  }
  // Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
  // If so, use that value and return here.
  if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
    Logging.d(TAG, "Default sample rate is overriden to "
            + WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz");
    return WebRtcAudioUtils.getDefaultSampleRateHz();
  }
  // No overrides available. Deliver best possible estimate based on default
  // Android AudioManager APIs.
  final int sampleRateHz;
  if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
    sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
  } else {
    sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
  }
  Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
  return sampleRateHz;
}
项目:AndroidRTC    文件:WebRtcAudioManager.java   
private int getNativeOutputSampleRate() {
  // Override this if we're running on an old emulator image which only
  // supports 8 kHz and doesn't support PROPERTY_OUTPUT_SAMPLE_RATE.
  if (WebRtcAudioUtils.runningOnEmulator()) {
    Logging.d(TAG, "Running emulator, overriding sample rate to 8 kHz.");
    return 8000;
  }
  // Default can be overriden by WebRtcAudioUtils.setDefaultSampleRateHz().
  // If so, use that value and return here.
  if (WebRtcAudioUtils.isDefaultSampleRateOverridden()) {
    Logging.d(TAG, "Default sample rate is overriden to "
            + WebRtcAudioUtils.getDefaultSampleRateHz() + " Hz");
    return WebRtcAudioUtils.getDefaultSampleRateHz();
  }
  // No overrides available. Deliver best possible estimate based on default
  // Android AudioManager APIs.
  final int sampleRateHz;
  if (WebRtcAudioUtils.runningOnJellyBeanMR1OrHigher()) {
    sampleRateHz = getSampleRateOnJellyBeanMR10OrHigher();
  } else {
    sampleRateHz = WebRtcAudioUtils.getDefaultSampleRateHz();
  }
  Logging.d(TAG, "Sample rate is set to " + sampleRateHz + " Hz");
  return sampleRateHz;
}
项目:AndroidRTC    文件:WebRtcAudioRecord.java   
private boolean startRecording() {
  Logging.d(TAG, "startRecording");
  assertTrue(audioRecord != null);
  assertTrue(audioThread == null);
  try {
    audioRecord.startRecording();
  } catch (IllegalStateException e) {
    reportWebRtcAudioRecordStartError("AudioRecord.startRecording failed: " + e.getMessage());
    return false;
  }
  if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
    reportWebRtcAudioRecordStartError("AudioRecord.startRecording failed - incorrect state :"
        + audioRecord.getRecordingState());
    return false;
  }
  audioThread = new AudioRecordThread("AudioRecordJavaThread");
  audioThread.start();
  return true;
}
项目:react-native-webrtc    文件:SurfaceViewRenderer.java   
@Override
public void surfaceCreated(final SurfaceHolder holder) {
  Logging.d(TAG, getResourceName() + "Surface created.");
  synchronized (layoutLock) {
    isSurfaceCreated = true;
  }
  tryCreateEglSurface();
}
项目:react-native-webrtc    文件:SurfaceViewRenderer.java   
@Override
public void surfaceChanged(SurfaceHolder holder, int format, int width, int height) {
  Logging.d(TAG, getResourceName() + "Surface changed: " + width + "x" + height);
  synchronized (layoutLock) {
    surfaceSize.x = width;
    surfaceSize.y = height;
  }
  // Might have a pending frame waiting for a surface of correct size.
  runOnRenderThread(renderFrameRunnable);
}
项目:AppRTC-Android    文件:CallActivity.java   
@Override
synchronized public void renderFrame(VideoRenderer.I420Frame frame) {
  if (target == null) {
    Logging.d(TAG, "Dropping frame in proxy because target is null.");
    VideoRenderer.renderFrameDone(frame);
    return;
  }

  target.renderFrame(frame);
}
项目:AppRTC-Android    文件:CallActivity.java   
@Override
synchronized public void onFrame(VideoFrame frame) {
  if (target == null) {
    Logging.d(TAG, "Dropping frame in proxy because target is null.");
    return;
  }

  target.onFrame(frame);
}
项目:AppRTC-Android    文件:CallActivity.java   
private void setSwappedFeeds(boolean isSwappedFeeds) {
  Logging.d(TAG, "setSwappedFeeds: " + isSwappedFeeds);
  this.isSwappedFeeds = isSwappedFeeds;
  localProxyVideoSink.setTarget(isSwappedFeeds ? fullscreenRenderer : pipRenderer);
  remoteProxyRenderer.setTarget(isSwappedFeeds ? pipRenderer : fullscreenRenderer);
  fullscreenRenderer.setMirror(isSwappedFeeds);
  pipRenderer.setMirror(!isSwappedFeeds);
}
项目:AppRTC-Android    文件:WebRtcAudioTrack.java   
WebRtcAudioTrack(long nativeAudioTrack) {
  Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
  this.nativeAudioTrack = nativeAudioTrack;
  audioManager =
      (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
  if (DEBUG) {
    WebRtcAudioUtils.logDeviceInfo(TAG);
  }
}
项目:AndroidRTC    文件:WebRtcAudioRecord.java   
@TargetApi(23)
private void logMainParametersExtended() {
  if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
    Logging.d(TAG, "AudioRecord: "
            // The frame count of the native AudioRecord buffer.
            + "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
  }
}
项目:VideoCRE    文件:VideoSource.java   
public void stop() {
    if (mVideoCapturer != null && !mVideoCapturerStopped) {
        Logging.d(TAG, "Stop video source.");
        try {
            mVideoCapturer.stopCapture();
        } catch (InterruptedException e) {
            Logging.e(TAG, "stop", e);
        }
        mVideoCapturerStopped = true;
    }
}
项目:AppRTC-Android    文件:WebRtcAudioTrack.java   
private void logMainParameters() {
  Logging.d(TAG, "AudioTrack: "
          + "session ID: " + audioTrack.getAudioSessionId() + ", "
          + "channels: " + audioTrack.getChannelCount() + ", "
          + "sample rate: " + audioTrack.getSampleRate() + ", "
          // Gain (>=1.0) expressed as linear multiplier on sample values.
          + "max gain: " + audioTrack.getMaxVolume());
}
项目:AppRTC-Android    文件:WebRtcAudioTrack.java   
@TargetApi(21)
private static AudioTrack createAudioTrackOnLollipopOrHigher(
    int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
  Logging.d(TAG, "createAudioTrackOnLollipopOrHigher");
  // TODO(henrika): use setPerformanceMode(int) with PERFORMANCE_MODE_LOW_LATENCY to control
  // performance when Android O is supported. Add some logging in the mean time.
  final int nativeOutputSampleRate =
      AudioTrack.getNativeOutputSampleRate(AudioManager.STREAM_VOICE_CALL);
  Logging.d(TAG, "nativeOutputSampleRate: " + nativeOutputSampleRate);
  if (sampleRateInHz != nativeOutputSampleRate) {
    Logging.w(TAG, "Unable to use fast mode since requested sample rate is not native");
  }
  if (usageAttribute != DEFAULT_USAGE) {
    Logging.w(TAG, "A non default usage attribute is used: " + usageAttribute);
  }
  // Create an audio track where the audio usage is for VoIP and the content type is speech.
  return new AudioTrack(
      new AudioAttributes.Builder()
          .setUsage(usageAttribute)
          .setContentType(AudioAttributes.CONTENT_TYPE_SPEECH)
      .build(),
      new AudioFormat.Builder()
        .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
        .setSampleRate(sampleRateInHz)
        .setChannelMask(channelConfig)
        .build(),
      bufferSizeInBytes,
      AudioTrack.MODE_STREAM,
      AudioManager.AUDIO_SESSION_ID_GENERATE);
}
项目:AppRTC-Android    文件:WebRtcAudioTrack.java   
private void releaseAudioResources() {
  Logging.d(TAG, "releaseAudioResources");
  if (audioTrack != null) {
    audioTrack.release();
    audioTrack = null;
  }
}
项目:AppRTC-Android    文件:WebRtcAudioEffects.java   
public static boolean isAcousticEchoCancelerBlacklisted() {
  List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForAecUsage();
  boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
  if (isBlacklisted) {
    Logging.w(TAG, Build.MODEL + " is blacklisted for HW AEC usage!");
  }
  return isBlacklisted;
}
项目:AppRTC-Android    文件:WebRtcAudioEffects.java   
public static boolean isNoiseSuppressorBlacklisted() {
  List<String> blackListedModels = WebRtcAudioUtils.getBlackListedModelsForNsUsage();
  boolean isBlacklisted = blackListedModels.contains(Build.MODEL);
  if (isBlacklisted) {
    Logging.w(TAG, Build.MODEL + " is blacklisted for HW NS usage!");
  }
  return isBlacklisted;
}
项目:AppRTC-Android    文件:WebRtcAudioEffects.java   
public boolean setAEC(boolean enable) {
  Logging.d(TAG, "setAEC(" + enable + ")");
  if (!canUseAcousticEchoCanceler()) {
    Logging.w(TAG, "Platform AEC is not supported");
    shouldEnableAec = false;
    return false;
  }
  if (aec != null && (enable != shouldEnableAec)) {
    Logging.e(TAG, "Platform AEC state can't be modified while recording");
    return false;
  }
  shouldEnableAec = enable;
  return true;
}
项目:AppRTC-Android    文件:WebRtcAudioEffects.java   
public boolean setNS(boolean enable) {
  Logging.d(TAG, "setNS(" + enable + ")");
  if (!canUseNoiseSuppressor()) {
    Logging.w(TAG, "Platform NS is not supported");
    shouldEnableNs = false;
    return false;
  }
  if (ns != null && (enable != shouldEnableNs)) {
    Logging.e(TAG, "Platform NS state can't be modified while recording");
    return false;
  }
  shouldEnableNs = enable;
  return true;
}
项目:AppRTC-Android    文件:WebRtcAudioEffects.java   
public void release() {
  Logging.d(TAG, "release");
  if (aec != null) {
    aec.release();
    aec = null;
  }
  if (ns != null) {
    ns.release();
    ns = null;
  }
}
项目:AppRTC-Android    文件:WebRtcAudioManager.java   
public void run() {
  final int mode = audioManager.getMode();
  if (mode == AudioManager.MODE_RINGTONE) {
    Logging.d(TAG, "STREAM_RING stream volume: "
            + audioManager.getStreamVolume(AudioManager.STREAM_RING) + " (max="
            + maxRingVolume + ")");
  } else if (mode == AudioManager.MODE_IN_COMMUNICATION) {
    Logging.d(TAG, "VOICE_CALL stream volume: "
            + audioManager.getStreamVolume(AudioManager.STREAM_VOICE_CALL) + " (max="
            + maxVoiceCallVolume + ")");
  }
}
项目:AppRTC-Android    文件:WebRtcAudioManager.java   
WebRtcAudioManager(long nativeAudioManager) {
  Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
  this.nativeAudioManager = nativeAudioManager;
  audioManager =
      (AudioManager) ContextUtils.getApplicationContext().getSystemService(Context.AUDIO_SERVICE);
  if (DEBUG) {
    WebRtcAudioUtils.logDeviceInfo(TAG);
  }
  volumeLogger = new VolumeLogger(audioManager);
  storeAudioParameters();
  nativeCacheAudioParameters(sampleRate, outputChannels, inputChannels, hardwareAEC, hardwareAGC,
      hardwareNS, lowLatencyOutput, lowLatencyInput, proAudio, outputBufferSize, inputBufferSize,
      nativeAudioManager);
}
项目:AppRTC-Android    文件:WebRtcAudioManager.java   
private boolean init() {
  Logging.d(TAG, "init" + WebRtcAudioUtils.getThreadInfo());
  if (initialized) {
    return true;
  }
  Logging.d(TAG, "audio mode is: " + AUDIO_MODES[audioManager.getMode()]);
  initialized = true;
  volumeLogger.start();
  return true;
}
项目:AppRTC-Android    文件:WebRtcAudioManager.java   
private void dispose() {
  Logging.d(TAG, "dispose" + WebRtcAudioUtils.getThreadInfo());
  if (!initialized) {
    return;
  }
  volumeLogger.stop();
}
项目:AppRTC-Android    文件:WebRtcAudioManager.java   
private boolean isDeviceBlacklistedForOpenSLESUsage() {
  boolean blacklisted = blacklistDeviceForOpenSLESUsageIsOverridden
      ? blacklistDeviceForOpenSLESUsage
      : WebRtcAudioUtils.deviceIsBlacklistedForOpenSLESUsage();
  if (blacklisted) {
    Logging.d(TAG, Build.MODEL + " is blacklisted for OpenSL ES usage!");
  }
  return blacklisted;
}
项目:AppRTC-Android    文件:WebRtcAudioUtils.java   
public static void logDeviceInfo(String tag) {
  Logging.d(tag, "Android SDK: " + Build.VERSION.SDK_INT + ", "
          + "Release: " + Build.VERSION.RELEASE + ", "
          + "Brand: " + Build.BRAND + ", "
          + "Device: " + Build.DEVICE + ", "
          + "Id: " + Build.ID + ", "
          + "Hardware: " + Build.HARDWARE + ", "
          + "Manufacturer: " + Build.MANUFACTURER + ", "
          + "Model: " + Build.MODEL + ", "
          + "Product: " + Build.PRODUCT);
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
WebRtcAudioRecord(long nativeAudioRecord) {
  threadChecker.checkIsOnValidThread();
  Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
  this.nativeAudioRecord = nativeAudioRecord;
  if (DEBUG) {
    WebRtcAudioUtils.logDeviceInfo(TAG);
  }
  effects = WebRtcAudioEffects.create();
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
private boolean enableBuiltInAEC(boolean enable) {
  threadChecker.checkIsOnValidThread();
  Logging.d(TAG, "enableBuiltInAEC(" + enable + ')');
  if (effects == null) {
    Logging.e(TAG, "Built-in AEC is not supported on this platform");
    return false;
  }
  return effects.setAEC(enable);
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
private boolean enableBuiltInNS(boolean enable) {
  threadChecker.checkIsOnValidThread();
  Logging.d(TAG, "enableBuiltInNS(" + enable + ')');
  if (effects == null) {
    Logging.e(TAG, "Built-in NS is not supported on this platform");
    return false;
  }
  return effects.setNS(enable);
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
private boolean startRecording() {
  threadChecker.checkIsOnValidThread();
  Logging.d(TAG, "startRecording");
  assertTrue(audioRecord != null);
  assertTrue(audioThread == null);

  // Starts recording from the AudioRecord instance.
  try {
    audioRecord.startRecording();
  } catch (IllegalStateException e) {
    reportWebRtcAudioRecordStartError(AudioRecordStartErrorCode.AUDIO_RECORD_START_EXCEPTION,
        "AudioRecord.startRecording failed: " + e.getMessage());
    return false;
  }

  // Verify the recording state up to two times (with a sleep in between)
  // before returning false and reporting an error.
  int numberOfStateChecks = 0;
  while (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING &&
         ++numberOfStateChecks < 2) {
    threadSleep(200);
  }
  if (audioRecord.getRecordingState() != AudioRecord.RECORDSTATE_RECORDING) {
    reportWebRtcAudioRecordStartError(
        AudioRecordStartErrorCode.AUDIO_RECORD_START_STATE_MISMATCH,
        "AudioRecord.startRecording failed - incorrect state :"
        + audioRecord.getRecordingState());
    return false;
  }

  // Create and start new high-priority thread which calls AudioRecord.read()
  // and where we also call the native DataIsRecorded() callback to feed
  // WebRTC with recorded audio.
  audioThread = new AudioRecordThread("AudioRecordJavaThread");
  audioThread.start();
  return true;
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
private boolean stopRecording() {
  threadChecker.checkIsOnValidThread();
  Logging.d(TAG, "stopRecording");
  assertTrue(audioThread != null);
  audioThread.stopThread();
  if (!ThreadUtils.joinUninterruptibly(audioThread, AUDIO_RECORD_THREAD_JOIN_TIMEOUT_MS)) {
    Logging.e(TAG, "Join of AudioRecordJavaThread timed out");
  }
  audioThread = null;
  if (effects != null) {
    effects.release();
  }
  releaseAudioResources();
  return true;
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
@TargetApi(23)
private void logMainParametersExtended() {
  if (WebRtcAudioUtils.runningOnMarshmallowOrHigher()) {
    Logging.d(TAG, "AudioRecord: "
            // The frame count of the native AudioRecord buffer.
            + "buffer size in frames: " + audioRecord.getBufferSizeInFrames());
  }
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
@TargetApi(23)
private AudioRecord createAudioRecordOnMarshmallowOrHigher(
  int sampleRateInHz, int channelConfig, int bufferSizeInBytes) {
  Logging.d(TAG, "createAudioRecordOnMarshmallowOrHigher");
  return new AudioRecord.Builder()
      .setAudioSource(AudioSource.VOICE_COMMUNICATION)
      .setAudioFormat(new AudioFormat.Builder()
          .setEncoding(AudioFormat.ENCODING_PCM_16BIT)
          .setSampleRate(sampleRateInHz)
          .setChannelMask(channelConfig)
          .build())
      .setBufferSizeInBytes(bufferSizeInBytes)
      .build();
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
private void reportWebRtcAudioRecordStartError(
    AudioRecordStartErrorCode errorCode, String errorMessage) {
  Logging.e(TAG, "Start recording error: " + errorCode + ". " + errorMessage);
  if (errorCallback != null) {
    errorCallback.onWebRtcAudioRecordStartError(errorCode, errorMessage);
  }
}
项目:AppRTC-Android    文件:WebRtcAudioRecord.java   
private void threadSleep(long millis) {
  try {
    Thread.sleep(millis);
  } catch (InterruptedException e) {
    Logging.e(TAG, "Thread.sleep failed: " + e.getMessage());
  }
}
项目:AndroidRTC    文件:WebRtcAudioTrack.java   
WebRtcAudioTrack(Context context, long nativeAudioTrack) {
  Logging.d(TAG, "ctor" + WebRtcAudioUtils.getThreadInfo());
  this.context = context;
  this.nativeAudioTrack = nativeAudioTrack;
  audioManager = (AudioManager) context.getSystemService(Context.AUDIO_SERVICE);
  if (DEBUG) {
    WebRtcAudioUtils.logDeviceInfo(TAG);
  }
}
项目:AndroidRTC    文件:WebRtcAudioTrack.java   
private boolean startPlayout() {
  Logging.d(TAG, "startPlayout");
  assertTrue(audioTrack != null);
  assertTrue(audioThread == null);
  if (audioTrack.getState() != AudioTrack.STATE_INITIALIZED) {
    Logging.e(TAG, "AudioTrack instance is not successfully initialized.");
    return false;
  }
  audioThread = new AudioTrackThread("AudioTrackJavaThread");
  audioThread.start();
  return true;
}
项目:AndroidRTC    文件:WebRtcAudioTrack.java   
private boolean setStreamVolume(int volume) {
  Logging.d(TAG, "setStreamVolume(" + volume + ")");
  assertTrue(audioManager != null);
  if (isVolumeFixed()) {
    Logging.e(TAG, "The device implements a fixed volume policy.");
    return false;
  }
  audioManager.setStreamVolume(AudioManager.STREAM_VOICE_CALL, volume, 0);
  return true;
}
项目:AndroidRTC    文件:WebRtcAudioTrack.java   
private void logMainParameters() {
  Logging.d(TAG, "AudioTrack: "
          + "session ID: " + audioTrack.getAudioSessionId() + ", "
          + "channels: " + audioTrack.getChannelCount() + ", "
          + "sample rate: " + audioTrack.getSampleRate() + ", "
          // Gain (>=1.0) expressed as linear multiplier on sample values.
          + "max gain: " + audioTrack.getMaxVolume());
}