我们从Python开源项目中,提取了以下36个代码示例,用于说明如何使用moviepy.editor.VideoClip()。
def make_gif(images, fname, duration=2, true_image=False): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images)/duration*t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x+1)/2*255).astype(np.uint8) clip = mpy.VideoClip(make_frame, duration=duration) clip.write_gif(fname, fps = len(images) / duration)
def make_gif(images, fname, duration=2, true_image=False): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images) / duration * t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x + 1) / 2 * 255).astype(np.uint8) clip = mpy.VideoClip(make_frame, duration=duration) clip.write_gif(fname, fps=len(images) / duration)
def make_frame(self, t): ''' Callback to return an image from from our tub records. This is called from the VideoClip as it references a time. We don't use t to reference the frame, but instead increment a frame counter. This assumes sequential access. ''' self.iRec = self.iRec + 1 if self.iRec >= self.num_rec - 1: return None rec = self.tub.get_record(self.iRec) image = rec['cam/image_array'] return image # returns a 8-bit RGB array
def make_gif(images, fname, duration=2, true_image=False): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images)/duration*t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x+1)/2*255).astype(np.uint8) clip = mpy.VideoClip(make_frame, duration=duration) clip.write_gif(fname, fps=len(images) / duration)
def make_gif(images, fname, duration=2, true_image=False): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images)/duration*t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x+1)/2*255).astype(np.uint8) clip = mpy.VideoClip(make_frame, duration=duration) clip.write_gif(fname, fps = len(images) / duration,verbose=False)
def make_gif(images, fname, duration=2, true_image=False): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images)/duration*t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x+1)/2*255).astype(np.uint8) clip = mpy.VideoClip(make_frame, duration=duration) clip.write_gif(fname, fps = len(images) / duration) #a sloppy way of creating sample labels, which we generate 3 samples for 24 chord types each.
def make_gif(images, fname, duration=2, true_image=False): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images) / duration * t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x + 1) / 2 * 255).astype(np.uint8) clip = mpy.VideoClip(make_frame, duration=duration) clip.write_gif(fname, fps=len(images) / duration, verbose=False)
def ipython_display(self, *args, **kwargs): """ Fixes inheritance naming issue with moviepy's ipython_display """ seg_copy = self.copy() # Class should also always be set to VideoClip for expected video display seg_copy.__class__ = VideoClip().__class__ return seg_copy.ipython_display(*args, **kwargs)
def write_video_clips_to_directory(self, video_clips: List[VideoClip], directory: str, *, file_extension: str = VIDEO_EXTENSION, audio: Union[str, bool] = True, **kwargs): """ Writes a list of video segments to files in the specified directory """ for index, segment in enumerate(tqdm(video_clips)): output_path = os.path.join(directory, str(index) + file_extension) self.write_video_clip_to_file(segment, output_path, audio=audio, verbose=False, progress_bar=False, **kwargs)
def write_video_clip_to_file(self, video_clip: VideoClip, output_path: Opt[str] = None, *, audio: Union[str, bool] = True, verbose: bool = False, progress_bar: bool = True, **kwargs): """ Writes a video clip to file in the specified directory Parameters ---------- video_clip output_path audio Audio for the video clip. Can be True to enable, False to disable, or an external audio file. verbose Whether output to stdout should include extra information during writing progress_bar Whether to output progress information to stdout kwargs List of other keyword arguments to pass to moviepy's write_videofile """ # Prepend crf to ffmpeg_params ffmpeg_params = ['-crf', str(self.crf)] + self.ffmpeg_params audio_bitrate = str(self.audio_bitrate) + 'k' video_clip.write_videofile(output_path, audio=audio, preset=self.preset, codec=self.codec, audio_codec=self.audio_codec, audio_bitrate=audio_bitrate, ffmpeg_params=ffmpeg_params, **kwargs, verbose=verbose, progress_bar=progress_bar) return output_path
def run(self, args): ''' Load the images from a tub and create a movie from them. Movie ''' import moviepy.editor as mpy args, parser = self.parse_args(args) if args.tub is None: parser.print_help() return conf = os.path.expanduser(args.config) if not os.path.exists(conf): print("No config file at location: %s. Add --config to specify\ location or run from dir containing config.py." % conf) return try: cfg = dk.load_config(conf) except: print("Exception while loading config from", conf) return self.tub = Tub(args.tub) self.num_rec = self.tub.get_num_records() self.iRec = 0 print('making movie', args.out, 'from', self.num_rec, 'images') clip = mpy.VideoClip(self.make_frame, duration=(self.num_rec//cfg.DRIVE_LOOP_HZ) - 1) clip.write_videofile(args.out,fps=cfg.DRIVE_LOOP_HZ) print('done')
def make_gif(frames, fps=8, width=320, scale=11, filename='stencil.gif'): clip = mpy.VideoClip(make_frame=make_frames(frames, width, scale), duration=frames / fps) clip.write_gif(filename, fps=fps)
def make_gif(images, fname, duration=2, true_image=False,salience=False,salIMGS=None): import moviepy.editor as mpy def make_frame(t): try: x = images[int(len(images)/duration*t)] except: x = images[-1] if true_image: return x.astype(np.uint8) else: return ((x+1)/2*255).astype(np.uint8) def make_mask(t): try: x = salIMGS[int(len(salIMGS)/duration*t)] except: x = salIMGS[-1] return x clip = mpy.VideoClip(make_frame, duration=duration) if salience == True: mask = mpy.VideoClip(make_mask, ismask=True,duration= duration) clipB = clip.set_mask(mask) clipB = clip.set_opacity(0) mask = mask.set_opacity(0.1) mask.write_gif(fname, fps = len(images) / duration,verbose=False) #clipB.write_gif(fname, fps = len(images) / duration,verbose=False) else: clip.write_gif(fname, fps = len(images) / duration,verbose=False)
def compose(self) -> VideoClip: """ Composes the music video into a VideoClip """ segments = [segment.crop_scale(self.dimensions) for segment in self.segments] segments = [segment.apply_effects() for segment in segments] # Add buffers for crossfaded video segments buffered_video_segments = [] for index, segment in enumerate(segments): buffered_video_segments.append(segment) if index == len(segments) - 1: continue next_segment = segments[index + 1] for effect in next_segment.effects: if isinstance(effect, v_effects.CrossFade): buffer = segment.trailing_buffer(effect.duration) if buffer.audio: buffer = buffer.set_audio(buffer.audio.audio_fadeout(effect.duration)) buffered_video_segments.append(buffer) segments = buffered_video_segments # Build composite video composite_video_segments = [segments[0]] for index, segment in enumerate(segments[1:]): # Start current segment where previous segment ends in composite video previous_segment = composite_video_segments[index] segment = segment.set_start(previous_segment.end) # Apply any crossfade for the current segment for effect in segment.effects: if isinstance(effect, v_effects.CrossFade): segment = segment.set_start(previous_segment.end - effect.duration) segment = segment.crossfadein(effect.duration) if segment.audio: segment = segment.set_audio(segment.audio.audio_fadein(effect.duration)) composite_video_segments.append(segment) music_video = CompositeVideoClip(composite_video_segments) if self.audio_file: music_video.audio = AudioFileClip(self.audio_file) return music_video