我们从Python开源项目中,提取了以下25个代码示例,用于说明如何使用audioop.getsample()。
def test_issue7673(self): state = None for data, size in INVALID_DATA: size2 = size self.assertRaises(audioop.error, audioop.getsample, data, size, 0) self.assertRaises(audioop.error, audioop.max, data, size) self.assertRaises(audioop.error, audioop.minmax, data, size) self.assertRaises(audioop.error, audioop.avg, data, size) self.assertRaises(audioop.error, audioop.rms, data, size) self.assertRaises(audioop.error, audioop.avgpp, data, size) self.assertRaises(audioop.error, audioop.maxpp, data, size) self.assertRaises(audioop.error, audioop.cross, data, size) self.assertRaises(audioop.error, audioop.mul, data, size, 1.0) self.assertRaises(audioop.error, audioop.tomono, data, size, 0.5, 0.5) self.assertRaises(audioop.error, audioop.tostereo, data, size, 0.5, 0.5) self.assertRaises(audioop.error, audioop.add, data, data, size) self.assertRaises(audioop.error, audioop.bias, data, size, 0) self.assertRaises(audioop.error, audioop.reverse, data, size) self.assertRaises(audioop.error, audioop.lin2lin, data, size, size2) self.assertRaises(audioop.error, audioop.ratecv, data, size, 1, 1, 1, state) self.assertRaises(audioop.error, audioop.lin2ulaw, data, size) self.assertRaises(audioop.error, audioop.lin2alaw, data, size) self.assertRaises(audioop.error, audioop.lin2adpcm, data, size, state)
def createDragData(self): self.drag = QtGui.QDrag(self) data = QtCore.QMimeData() wave_len = self.selection[1] + 1 - self.selection[0] samples = self.current_data[self.selection[0] * 256 + self.offset:(self.selection[1] + 1) * 256 + self.offset] data.setData('audio/wavesamples', samples) data.setText(self.current_source) path = QtGui.QPainterPath() sampwidth_int = self.current_sampwidth_int / 2 path.moveTo(0, sampwidth_int - audioop.getsample(samples, self.current_sampwidth, 0)) for s in xrange(1, len(samples)/2): path.lineTo(s, sampwidth_int - audioop.getsample(samples, self.current_sampwidth, s)) wave_size = self.main.wavetable_view.width() / 64 pixmap = QtGui.QPixmap(wave_size * wave_len, 48) pixmap.fill(QtCore.Qt.transparent) qp = QtGui.QPainter(pixmap) qp.setRenderHints(qp.Antialiasing) qp.scale((wave_size * wave_len / path.boundingRect().width()), 48. / self.current_sampwidth_int) qp.drawPath(path) qp.end() self.drag.setPixmap(pixmap) self.drag.setMimeData(data) self.drag.exec_()
def test_string(self): data = 'abcd' size = 2 self.assertRaises(TypeError, audioop.getsample, data, size, 0) self.assertRaises(TypeError, audioop.max, data, size) self.assertRaises(TypeError, audioop.minmax, data, size) self.assertRaises(TypeError, audioop.avg, data, size) self.assertRaises(TypeError, audioop.rms, data, size) self.assertRaises(TypeError, audioop.avgpp, data, size) self.assertRaises(TypeError, audioop.maxpp, data, size) self.assertRaises(TypeError, audioop.cross, data, size) self.assertRaises(TypeError, audioop.mul, data, size, 1.0) self.assertRaises(TypeError, audioop.tomono, data, size, 0.5, 0.5) self.assertRaises(TypeError, audioop.tostereo, data, size, 0.5, 0.5) self.assertRaises(TypeError, audioop.add, data, data, size) self.assertRaises(TypeError, audioop.bias, data, size, 0) self.assertRaises(TypeError, audioop.reverse, data, size) self.assertRaises(TypeError, audioop.lin2lin, data, size, size) self.assertRaises(TypeError, audioop.ratecv, data, size, 1, 1, 1, None) self.assertRaises(TypeError, audioop.lin2ulaw, data, size) self.assertRaises(TypeError, audioop.lin2alaw, data, size) self.assertRaises(TypeError, audioop.lin2adpcm, data, size, None)
def fadeout(self, seconds, target_volume=0.0): """Fade the end of the sample out to the target volume (usually zero) in the given time.""" assert not self.__locked seconds = min(seconds, self.duration) i = self.frame_idx(self.duration-seconds) begin = self.__frames[:i] end = self.__frames[i:] # we fade this chunk numsamples = len(end)/self.__samplewidth decrease = 1.0-target_volume _sw = self.__samplewidth # optimization _getsample = audioop.getsample # optimization faded = Sample.get_array(_sw, [int(_getsample(end, _sw, i)*(1.0-i*decrease/numsamples)) for i in range(int(numsamples))]) end = faded.tobytes() if sys.byteorder == "big": end = audioop.byteswap(end, self.__samplewidth) self.__frames = begin + end return self
def fadein(self, seconds, start_volume=0.0): """Fade the start of the sample in from the starting volume (usually zero) in the given time.""" assert not self.__locked seconds = min(seconds, self.duration) i = self.frame_idx(seconds) begin = self.__frames[:i] # we fade this chunk end = self.__frames[i:] numsamples = len(begin)/self.__samplewidth increase = 1.0-start_volume _sw = self.__samplewidth # optimization _getsample = audioop.getsample # optimization _incr = increase/numsamples # optimization faded = Sample.get_array(_sw, [int(_getsample(begin, _sw, i)*(i*_incr+start_volume)) for i in range(int(numsamples))]) begin = faded.tobytes() if sys.byteorder == "big": begin = audioop.byteswap(begin, self.__samplewidth) self.__frames = begin + end return self
def test_getsample(self): for i in range(3): self.assertEqual(audioop.getsample(data[0], 1, i), i) self.assertEqual(audioop.getsample(data[1], 2, i), i) self.assertEqual(audioop.getsample(data[2], 4, i), i)
def test_getsample(self): for w in 1, 2, 4: data = packs[w](0, 1, -1, maxvalues[w], minvalues[w]) self.assertEqual(audioop.getsample(data, w, 0), 0) self.assertEqual(audioop.getsample(data, w, 1), 1) self.assertEqual(audioop.getsample(data, w, 2), -1) self.assertEqual(audioop.getsample(data, w, 3), maxvalues[w]) self.assertEqual(audioop.getsample(data, w, 4), minvalues[w])
def setStereoWave(self, stream): sampwidth = stream.getsampwidth() left_delta = 2**(8 * sampwidth) right_delta = left_delta * 2 frames = stream.getnframes() ratio = frames / 255 data = stream.readframes(float('inf')) left_data = audioop.tomono(data, sampwidth, 1, 0) right_data = audioop.tomono(data, sampwidth, 0, 1) wavepath = QtGui.QPainterPath() try: for frame_set in xrange(256): left_min = left_max = right_min = right_max = 0 for frame in xrange(ratio): try: pos = frame + frame_set * ratio left_value = audioop.getsample(left_data, sampwidth, pos) left_min = min(left_min, left_value) left_max = max(left_max, left_value) right_value = audioop.getsample(right_data, sampwidth, pos) right_min = min(right_min, right_value) right_max = max(right_max, right_value) except: break wavepath.moveTo(frame_set, left_delta - left_min) wavepath.lineTo(frame_set, left_delta - left_max) wavepath.moveTo(frame_set, right_delta - right_min) wavepath.lineTo(frame_set, right_delta - right_max) # left_wavepath.lineTo(frame, left_sampwidth_int - left_value) # right_wavepath.lineTo(frame, right_sampwidth_int - right_value) except: pass # left_wavepath.addPath(right_wavepath) self.wavepath = self.scene.addPath(wavepath) self.wavepath.setPen(self.pen) self.fitInView(0, 0, 256, right_delta) self.centerOn(self.wavepath) self.setBackgroundBrush(QtCore.Qt.white)
def setMonoWave(self, stream): sampwidth = stream.getsampwidth() delta = 2**(8*sampwidth) frames = stream.getnframes() ratio = frames / 255 data = stream.readframes(float('inf')) wavepath = QtGui.QPainterPath() try: for frame_set in xrange(256): frame_min = frame_max = 0 for frame in xrange(ratio): try: value = audioop.getsample(data, sampwidth, frame + frame_set * ratio) frame_min = min(frame_min, value) frame_max = max(frame_max, value) except: break wavepath.moveTo(frame_set, delta - frame_min) wavepath.lineTo(frame_set, delta - frame_max) except: pass self.wavepath = self.scene.addPath(wavepath) self.wavepath.setPen(self.pen) self.wavepath.setY(delta * .5) self.fitInView(0, 0, 256, delta) self.centerOn(self.wavepath) self.setBackgroundBrush(QtCore.Qt.white)
def test_getsample(self): for w in 1, 2, 3, 4: data = packs[w](0, 1, -1, maxvalues[w], minvalues[w]) self.assertEqual(audioop.getsample(data, w, 0), 0) self.assertEqual(audioop.getsample(bytearray(data), w, 0), 0) self.assertEqual(audioop.getsample(memoryview(data), w, 0), 0) self.assertEqual(audioop.getsample(data, w, 1), 1) self.assertEqual(audioop.getsample(data, w, 2), -1) self.assertEqual(audioop.getsample(data, w, 3), maxvalues[w]) self.assertEqual(audioop.getsample(data, w, 4), minvalues[w])
def draw_wave(self, stream, force=False): # print stream.getnframes() if self.wavepath: self.scene().removeItem(self.wavepath) self.fitInView(0, 0, 1, 1) self.current_sampwidth = sampwidth = stream.getsampwidth() self.current_sampwidth_int = delta = 2**(8*sampwidth) if stream in self.cache and not force: self.current_data, wavepath = self.cache[stream] else: stream.rewind() frames = stream.getnframes() ratio = frames / 64 if stream.getnchannels() == 2: data = audioop.tomono(stream.readframes(float('inf')), sampwidth, self.main.left_spin.value(), self.main.right_spin.value()) else: data = stream.readframes(float('inf')) data = audioop.mul(data, sampwidth, self.main.gain) self.current_data = data wavepath = QtGui.QPainterPath() try: for frame_set in xrange(ratio): frame_min = frame_max = 0 for frame in xrange(64): try: value = audioop.getsample(data, sampwidth, frame + frame_set * 64) frame_min = min(frame_min, value) frame_max = max(frame_max, value) except: break if frame == 0: break wavepath.moveTo(frame_set, delta - frame_min) wavepath.lineTo(frame_set, delta - frame_max) except: pass self.cache[stream] = data, wavepath self.wavepath = self.scene().addPath(wavepath) self.wavepath.setPen(self.wave_pen) self.wavepath.setY(-delta * .5) self.wavepath.setX(self.left_margin*2) self.fitInView(0, 0, self.zoom_values[self.zoom], delta) if not force: self.centerOn(self.wavepath) self.right_margin_item.setX(len(self.current_data)/self.current_sampwidth/64) visible = self.mapToScene(self.viewport().rect()).boundingRect() if visible.width() > self.wavepath.boundingRect().width(): self.scene().setSceneRect(-self.left_margin, 0, visible.width(), delta) else: self.scene().setSceneRect(-self.left_margin, 0, self.wavepath.boundingRect().width(), delta)
def dropEvent(self, event): items = [i for i in self.items(event.pos()) if isinstance(i, WavePlaceHolderItem)] if not items: return index = items[0].index mimedata = event.mimeData() if mimedata.hasFormat('audio/wavesamples'): data = mimedata.data('audio/wavesamples') slice_range = len(data) / 256 self.dropAction.emit(DROP_WAVE, True, self.waveobj_list[index:index + slice_range], '"{}"'.format(mimedata.text())) for w in xrange(slice_range): values = [] for s in xrange(128): values.append(audioop.getsample(data, 2, w * 128 + s) * 31) self.waveobj_list[index + w].setValues(values) for r in self.waveobj_list: r.preview_rect.setHighlight(False) self.dropAction.emit(DROP_WAVE, False, self.waveobj_list[index:index + slice_range], '') event.accept() elif mimedata.hasFormat('audio/waves'): start, end = map(int, str(mimedata.data('audio/waves')).split(':')) if start == index: event.ignore() return wave_len = end - start + 1 if start == end: drop_text = 'wave {}'.format(start + 1) else: drop_text = 'wave {} to {}'.format(start + 1, end + 1) if mimedata.hasFormat('data/reference'): ref = mimedata.referenceData() if ref == self.main: source = self.waveobj_list else: source = ref.waveobj_list drop_text = 'from "{}" {}'.format(ref.wavetable_name, drop_text) else: source = self.waveobj_list data_list = [] self.dropAction.emit(DROP_WAVE, True, self.waveobj_list[index:index + wave_len], drop_text) for wave_obj in source[start:end + 1]: data_list.append(wave_obj.values) for i, wave_obj in enumerate(self.waveobj_list[index:index + wave_len]): wave_obj.setValues(data_list[i]) for r in self.waveobj_list: r.preview_rect.setHighlight(False) selection = tuple(xrange(index, index + wave_len)) for i, r in enumerate(self.waveobj_list): r.preview_rect.setSelected(True if i in selection else False) self.selection = selection self.dropAction.emit(DROP_WAVE, False, self.waveobj_list[index:index + wave_len], '') event.accept() else: event.ignore()