我们从Python开源项目中,提取了以下34个代码示例,用于说明如何使用pyaudio.paContinue()。
def callback(in_data, frame_count, time_info, status): """ Audio callback of the hardware This function is the callback (non-blocking) of the audio function and its purpose is to connect the input to the output. Returns: input data a bit at 0 in order to continue to deliver the buffers. Todo: This function should be renamed in order to have a better idea about its purpose. It's here as well that the signal processing should be done. It would be nice in the future to be able to load whatever signal blocks we want and create a desired chain at startup (or even as hot plug). """ return (in_data, pyaudio.paContinue)
def callback(in_data, frame_count, time_info, status): data = wf.readframes(frame_count) return (data, pyaudio.paContinue) # open stream using callback (3)
def play_sound(sound): try: wf = wave.open(sound, 'rb') # instantiate PyAudio (1) p = pyaudio.PyAudio() # define callback (2) def callback(in_data, frame_count, time_info, status): data = wf.readframes(frame_count) return (data, pyaudio.paContinue) # open stream using callback (3) stream = p.open(format=p.get_format_from_width(wf.getsampwidth()), channels=wf.getnchannels(), rate=wf.getframerate(), output=True, stream_callback=callback) # start the stream (4) stream.start_stream() except wave.Error: print('Warning: caught wave.Error!')
def callback(self, in_data, count, time_info, status): if self.pos + count > self.range[1]: output = self.signal[self.pos:self.range[1], :] status = pa.paComplete if self.stop_func: self.stop_func() else: output = self.signal[self.pos:self.pos + count, :] status = pa.paContinue self.pos += count return (output.flatten().astype(np.float32).tostring(), status)
def callback(self, in_data, count, time_info, status): import numpy as np input = np.fromstring(in_data, dtype=np.float32).astype( types.float_) input_frames = len(input) / self.num_channels input = np.reshape(input, (input_frames, self.num_channels)) np.vstack((self.signal,input)) if (self.pos + count) >= self.total_frames: status = pa.paComplete print("recording done") if self.stop_func: self.stop_func() else: status = pa.paContinue self.pos += count return (None, status)
def get_callback(self): def callback(in_data, frame_count, time_info, status): self.wavefile.writeframes(in_data) return in_data, pyaudio.paContinue return callback
def callback(in_data, frame_count, time_info, status): return (in_data, pyaudio.paContinue)
def callback(in_data, frame_count, time_info, status): data = wf.readframes(frame_count) return (data, pyaudio.paContinue)
def wav_callback(self, in_data, frame_count, time_info, status): data = self.wav.readframes(frame_count) flag = pyaudio.paContinue if self.wav.getnframes() == self.wav.tell(): data = data.ljust(frame_count * self.wav.getsampwidth() * self.wav.getnchannels(), '\x00') # flag = pyaudio.paComplete self.event.set() return data, flag
def raw_callback(self, in_data, frame_count, time_info, status): size = frame_count * self.width * self.channels data = self.raw[:size] self.raw = self.raw[size:] flag = pyaudio.paContinue if not len(self.raw): data = data.ljust(frame_count * self.width * self.channels, '\x00') # flag = pyaudio.paComplete self.event.set() return data, flag
def _get_stream(self,start=False,callback=None): """ """ if callback is None: def callback(in_data, frame_count, time_info, status): data = self.wf.readframes(frame_count) return (data, pyaudio.paContinue) self.stream = self.pa.open(format=self.pa.get_format_from_width(self.wf.getsampwidth()), channels=self.wf.getnchannels(), rate=self.wf.getframerate(), output=True, output_device_index=self.device_index, start=start, stream_callback=callback)
def _stream_callback(self, in_data, frame_count, time_info, status_flags): chunk = AudioChunk(start_time=time_info['input_buffer_adc_time'], audio=in_data, freq=self._rate, width=2) self._stream_queue.sync_q.put(chunk) retflag = pyaudio.paContinue if self.running else pyaudio.paComplete return (None, retflag)
def _process_frame(self, data, frame_count, time_info, status_flag): data_array = np.fromstring(data, dtype=np.int16) freq0 = self._spectral_analyser.process_data(data_array) if freq0: # Onset detected print("Note detected; fundamental frequency: ", freq0) midi_note_value = int(hz_to_midi(freq0)[0]) print("Midi note value: ", midi_note_value) note = RTNote(midi_note_value, 100, 0.5) self._synth.play_note(note) return (data, paContinue)
def _fill_buffer(self, buff, in_data, frame_count, time_info, status_flags): """Continuously collect from the audio stream, into the buffer.""" buff.put(in_data) return None, pyaudio.paContinue # [START audio_stream]
def _callback(self, in_data, frame_count, time_info, status): """ Callback function for continuous_record Checks global var recording If true, put frames into the queue - another thread will pop from the queue and write to disk If false, shut down the recorder (we don't want silence or sudden time shifts in one recording file) """ if self._recording: self._frames.put(in_data) callback_flag = pyaudio.paContinue else: callback_flag = pyaudio.paComplete return in_data, callback_flag
def _fill_buffer(buff, in_data, frame_count, time_info, status_flags): """Continuously collect data from the audio stream, into the buffer.""" buff.put(in_data) return None, pyaudio.paContinue # [START audio_stream]
def pya_callback(self, in_data, frame_count, time_info, status): import pyaudio if status != 0: self.junklog("pya_callback status %d\n" % (status)) pcm = numpy.fromstring(in_data, dtype=numpy.int16) if self.chan == 1: pcm = pcm[self.chan::2] assert frame_count == len(pcm) # time of first sample in pcm[], in seconds since start. adc_time = time_info['input_buffer_adc_time'] # time of last sample adc_end = adc_time + (len(pcm) / float(self.cardrate)) if self.last_adc_end != None: expected = (adc_end - self.last_adc_end) * float(self.cardrate) expected = int(round(expected)) shortfall = expected - len(pcm) if abs(shortfall) > 20: self.junklog("pya expected %d got %d" % (expected, len(pcm))) #if shortfall > 100: # pcm = numpy.append(numpy.zeros(shortfall, dtype=pcm.dtype), pcm) self.last_adc_end = adc_end # translate time of last sample to UNIX time ut = time.time() st = self.pya_strm.get_time() unix_end = (adc_end - st) + ut self.cardlock.acquire() self.cardbufs.append([ pcm, unix_end ]) self.cardlock.release() return ( None, pyaudio.paContinue )
def get_frame(self, in_data, frame_count, time_info, status): """ Callback function for the pyaudio stream. Don't use directly. """ while self.keep_listening: try: frame = self.queue.get(False, timeout=queue_timeout) return (frame, pyaudio.paContinue) except Empty: pass return (None, pyaudio.paComplete)
def _apiai_stt(self): from math import log import audioop import pyaudio import time resampler = apiai.Resampler(source_samplerate=settings['RATE']) request = self.ai.voice_request() vad = apiai.VAD() def callback(in_data, frame_count): frames, data = resampler.resample(in_data, frame_count) if settings.show_decibels: decibel = 20 * log(audioop.rms(data, 2) + 1, 10) click.echo(decibel) state = vad.processFrame(frames) request.send(data) state_signal = pyaudio.paContinue if state == 1 else pyaudio.paComplete return in_data, state_signal p = pyaudio.PyAudio() stream = p.open(format=pyaudio.paInt32, input=True, output=False, stream_callback=callback, channels=settings['CHANNELS'], rate=settings['RATE'], frames_per_buffer=settings['CHUNK']) stream.start_stream() click.echo("Speak!") while stream.is_active(): time.sleep(0.1) stream.stop_stream() stream.close() p.terminate()
def callback(self, in_data, frame_count, time_info, status): #print "callback data len: %s" % len(in_data) pixels = sample(in_data) if pixels != None: self.miso.put(pixels) time.sleep(1.0/30) #data = wf.readframes(frame_count) return ('', pyaudio.paContinue)
def __init__(self, decoder_model, resource=RESOURCE_FILE, sensitivity=[], audio_gain=1): def audio_callback(in_data, frame_count, time_info, status): self.ring_buffer.extend(in_data) play_data = chr(0) * len(in_data) return play_data, pyaudio.paContinue tm = type(decoder_model) ts = type(sensitivity) if tm is not list: decoder_model = [decoder_model] if ts is not list: sensitivity = [sensitivity] model_str = ",".join(decoder_model) self.detector = snowboydetect.SnowboyDetect( resource_filename=resource.encode(), model_str=model_str.encode()) self.detector.SetAudioGain(audio_gain) self.num_hotwords = self.detector.NumHotwords() if len(decoder_model) > 1 and len(sensitivity) == 1: sensitivity = sensitivity * self.num_hotwords if len(sensitivity) != 0: assert self.num_hotwords == len(sensitivity), \ "number of hotwords in decoder_model (%d) and sensitivity " \ "(%d) does not match" % (self.num_hotwords, len(sensitivity)) sensitivity_str = ",".join([str(t) for t in sensitivity]) if len(sensitivity) != 0: self.detector.SetSensitivity(sensitivity_str.encode()) self.ring_buffer = RingBuffer( self.detector.NumChannels() * self.detector.SampleRate() * 5) self.audio = pyaudio.PyAudio() self.stream_in = self.audio.open( input=True, output=False, format=self.audio.get_format_from_width( self.detector.BitsPerSample() / 8), channels=self.detector.NumChannels(), rate=self.detector.SampleRate(), frames_per_buffer=2048, stream_callback=audio_callback)
def __init__(self, decoder_model, resource=RESOURCE_FILE, sensitivity=[], audio_gain=1): def audio_callback(in_data, frame_count, time_info, status): self.ring_buffer.extend(in_data) play_data = chr(0) * len(in_data) return play_data, pyaudio.paContinue tm = type(decoder_model) ts = type(sensitivity) if tm is not list: decoder_model = [decoder_model] if ts is not list: sensitivity = [sensitivity] model_str = ",".join(decoder_model) self.detector = snowboydetect.SnowboyDetect( resource_filename=resource.encode(), model_str=model_str.encode()) self.detector.SetAudioGain(audio_gain) self.num_hotwords = self.detector.NumHotwords() if len(decoder_model) > 1 and len(sensitivity) == 1: sensitivity = sensitivity*self.num_hotwords if len(sensitivity) != 0: assert self.num_hotwords == len(sensitivity), \ "number of hotwords in decoder_model (%d) and sensitivity " \ "(%d) does not match" % (self.num_hotwords, len(sensitivity)) sensitivity_str = ",".join([str(t) for t in sensitivity]) if len(sensitivity) != 0: self.detector.SetSensitivity(sensitivity_str.encode()) self.ring_buffer = RingBuffer( self.detector.NumChannels() * self.detector.SampleRate() * 5) self.audio = pyaudio.PyAudio() self.stream_in = self.audio.open( input=True, output=False, format=self.audio.get_format_from_width( self.detector.BitsPerSample() / 8), channels=self.detector.NumChannels(), rate=self.detector.SampleRate(), frames_per_buffer=2048, stream_callback=audio_callback)
def __init__(self, decoder_model, resource=RESOURCE_FILE, sensitivity=[], audio_gain=1): def audio_callback(in_data, frame_count, time_info, status): self.ring_buffer.extend(in_data) play_data = chr(0) * len(in_data) return play_data, pyaudio.paContinue tm = type(decoder_model) ts = type(sensitivity) if tm is not list: decoder_model = [decoder_model] if ts is not list: sensitivity = [sensitivity] model_str = ",".join(decoder_model) self.detector = snowboydetect.SnowboyDetect( resource_filename=resource, model_str=model_str) self.detector.SetAudioGain(audio_gain) self.num_hotwords = self.detector.NumHotwords() if len(decoder_model) > 1 and len(sensitivity) == 1: sensitivity = sensitivity*self.num_hotwords if len(sensitivity) != 0: assert self.num_hotwords == len(sensitivity), \ "number of hotwords in decoder_model (%d) and sensitivity " \ "(%d) does not match" % (self.num_hotwords, len(sensitivity)) sensitivity_str = ",".join([str(t) for t in sensitivity]) if len(sensitivity) != 0: self.detector.SetSensitivity(sensitivity_str); self.ring_buffer = RingBuffer( self.detector.NumChannels() * self.detector.SampleRate() * 5) self.audio = pyaudio.PyAudio() self.stream_in = self.audio.open( input=True, output=False, format=self.audio.get_format_from_width( self.detector.BitsPerSample() / 8), channels=self.detector.NumChannels(), rate=self.detector.SampleRate(), frames_per_buffer=2048, stream_callback=audio_callback)
def __init__(self, decoder_model, resource=RESOURCE_FILE, sensitivity=[], audio_gain=1, detected_callback=None, interrupt_check=lambda: False, sleep_time=0.03): super(HotwordDetector, self).__init__() self.detected_callback = detected_callback self.interrupt_check = interrupt_check self.sleep_time = sleep_time self.kill_received = False self.paused = False def audio_callback(in_data, frame_count, time_info, status): self.ring_buffer.extend(in_data) play_data = chr(0) * len(in_data) return play_data, pyaudio.paContinue tm = type(decoder_model) ts = type(sensitivity) if tm is not list: decoder_model = [decoder_model] if ts is not list: sensitivity = [sensitivity] model_str = ",".join(decoder_model) self.detector = snowboydetect.SnowboyDetect( resource_filename=resource.encode(), model_str=model_str.encode()) self.detector.SetAudioGain(audio_gain) self.num_hotwords = self.detector.NumHotwords() if len(decoder_model) > 1 and len(sensitivity) == 1: sensitivity = sensitivity*self.num_hotwords if len(sensitivity) != 0: assert self.num_hotwords == len(sensitivity), \ "number of hotwords in decoder_model (%d) and sensitivity " \ "(%d) does not match" % (self.num_hotwords, len(sensitivity)) sensitivity_str = ",".join([str(t) for t in sensitivity]) if len(sensitivity) != 0: self.detector.SetSensitivity(sensitivity_str.encode()) self.ring_buffer = RingBuffer( self.detector.NumChannels() * self.detector.SampleRate() * 5) self.audio = pyaudio.PyAudio() self.stream_in = self.audio.open( input=True, output=False, format=self.audio.get_format_from_width( self.detector.BitsPerSample() / 8), channels=self.detector.NumChannels(), rate=self.detector.SampleRate(), frames_per_buffer=2048, stream_callback=audio_callback)
def audio_callback(binsim): """ Wrapper for callback to hand over custom data """ assert isinstance(binsim, BinSim) # The pyAudio Callback def callback(in_data, frame_count, time_info, status): # print("pyAudio callback") current_soundfile_list = binsim.oscReceiver.get_sound_file_list() if current_soundfile_list: binsim.soundHandler.request_new_sound_file(current_soundfile_list) # Get sound block. At least one convolver should exist binsim.block[:binsim.soundHandler.get_sound_channels(), :] = binsim.soundHandler.buffer_read() # Update Filters and run each convolver with the current block for n in range(binsim.soundHandler.get_sound_channels()): # Get new Filter if binsim.oscReceiver.is_filter_update_necessary(n): # print('Updating Filter') filterValueList = binsim.oscReceiver.get_current_values(n) filter = binsim.filterStorage.get_filter(Pose.from_filterValueList(filterValueList)) binsim.convolvers[n].setIR(filter, callback.config.get('enableCrossfading')) left, right = binsim.convolvers[n].process(binsim.block[n, :]) # Sum results from all convolvers if n == 0: binsim.result[:, 0] = left binsim.result[:, 1] = right else: binsim.result[:, 0] += left binsim.result[:, 1] += right # Finally apply Headphone Filter if callback.config.get('useHeadphoneFilter') == 'True': binsim.result[:, 0], binsim.result[:, 1] = binsim.convolverHP.process(binsim.result) # Scale data binsim.result *= 1 / float((callback.config.get('maxChannels') + 1) * 2) binsim.result *= callback.config.get('loudnessFactor') # When the last block is small than the blockSize, this is probably the end of the file. # Call pyaudio to stop after this frame if binsim.block.size < callback.config.get('blockSize'): pyaudio.paContinue = 1 return (binsim.result[:frame_count].tostring(), pyaudio.paContinue) callback.config = binsim.config return callback
def __init__(self, decoder_model, resource=RESOURCE_FILE, sensitivity=[], audio_gain=1): def audio_callback(in_data, frame_count, time_info, status): self.ring_buffer.extend(in_data) play_data = chr(0) * len(in_data) return play_data, pyaudio.paContinue tm = type(decoder_model) ts = type(sensitivity) if tm is not list: decoder_model = [decoder_model] if ts is not list: sensitivity = [sensitivity] model_str = ",".join(decoder_model) self.detector = snowboy.snowboydetect.SnowboyDetect( resource_filename=resource.encode(), model_str=model_str.encode()) self.detector.SetAudioGain(audio_gain) self.num_hotwords = self.detector.NumHotwords() if len(decoder_model) > 1 and len(sensitivity) == 1: sensitivity = sensitivity*self.num_hotwords if len(sensitivity) != 0: assert self.num_hotwords == len(sensitivity), \ "number of hotwords in decoder_model (%d) and sensitivity " \ "(%d) does not match" % (self.num_hotwords, len(sensitivity)) sensitivity_str = ",".join([str(t) for t in sensitivity]) if len(sensitivity) != 0: self.detector.SetSensitivity(sensitivity_str.encode()) self.ring_buffer = RingBuffer( self.detector.NumChannels() * self.detector.SampleRate() * 5) self.audio = pyaudio.PyAudio() self.stream_in = self.audio.open( input=True, output=False, format=self.audio.get_format_from_width( self.detector.BitsPerSample() / 8), channels=self.detector.NumChannels(), rate=self.detector.SampleRate(), frames_per_buffer=2048, stream_callback=audio_callback)
def __init__(self, decoder_model, resource=RESOURCE_FILE, sensitivity=[], audio_gain=1): def audio_callback(in_data, frame_count, time_info, status): self.ring_buffer.extend(in_data) play_data = chr(0) * len(in_data) return play_data, pyaudio.paContinue self.decoder_model = decoder_model tm = type(decoder_model) ts = type(sensitivity) if tm is not list: decoder_model = [decoder_model] if ts is not list: sensitivity = [sensitivity] model_str = ",".join(decoder_model) self.detector = snowboydetect.SnowboyDetect( resource_filename=resource, model_str=str(model_str)) self.detector.SetAudioGain(audio_gain) self.num_hotwords = self.detector.NumHotwords() if len(decoder_model) > 1 and len(sensitivity) == 1: sensitivity = sensitivity*self.num_hotwords if len(sensitivity) != 0: assert self.num_hotwords == len(sensitivity), \ "number of hotwords in decoder_model (%d) and sensitivity " \ "(%d) does not match" % (self.num_hotwords, len(sensitivity)) sensitivity_str = ",".join([str(t) for t in sensitivity]) if len(sensitivity) != 0: self.detector.SetSensitivity(sensitivity_str); self.ring_buffer = RingBuffer( self.detector.NumChannels() * self.detector.SampleRate() * 5) self.audio = pyaudio.PyAudio() self.stream_in = self.audio.open( input=True, output=True, format=self.audio.get_format_from_width(self.detector.BitsPerSample() / 8), channels=self.detector.NumChannels(), rate=self.detector.SampleRate(), frames_per_buffer=2048, #input_device_index=1, #output_device_index=0, stream_callback=audio_callback)