我们从Python开源项目中,提取了以下26个代码示例,用于说明如何使用moviepy.editor.VideoFileClip()。
def main(): # reading in an image #image = (mpimg.imread('test_images/solidWhiteRight.jpg') * 255).astype('uint8') #image = (mpimg.imread('test_images/solidWhiteCurve.jpg') * 255).astype('uint8') #image = (mpimg.imread('test_images/solidYellowCurve.jpg') * 255).astype('uint8') #image = (mpimg.imread('test_images/solidYellowCurve2.jpg') * 255).astype('uint8') #image = (mpimg.imread('test_images/solidYellowLeft.jpg') * 255).astype('uint8') image = (mpimg.imread('test_images/whiteCarLaneSwitch.jpg') * 255).astype('uint8') processImage = process_image(image) plt.imshow(processImage) plt.show() # Make video white_output = 'white.mp4' clip1 = VideoFileClip("solidWhiteRight.mp4") white_clip = clip1.fl_image(process_image) #NOTE: this function expects color images!! white_clip.write_videofile(white_output, audio=False) # Make video yellow_output = 'yellow.mp4' clip2 = VideoFileClip('solidYellowLeft.mp4') yellow_clip = clip2.fl_image(process_image) yellow_clip.write_videofile(yellow_output, audio=False)
def generate_thumbnail(self): self.thumbnail_uuid = hash_file(self.file_path) + ".jpg" thumbnail_loc = os.path.join(self.dir_path, self.thumbnail_uuid) clip = VideoFileClip(self.file_path) time_mark = clip.duration * 0.05 clip.save_frame(thumbnail_loc, t=time_mark) with Image(filename=thumbnail_loc) as img: with img.clone() as image: size = image.width if image.width < image.height else image.height image.crop(width=size, height=size, gravity='center') image.resize(256, 256) image.background_color = Color("#EEEEEE") image.format = 'jpeg' image.save(filename=thumbnail_loc)
def add_text_to_movie(movie_fol, movie_name, out_movie_name, subs, fontsize=50, txt_color='red', font='Xolonium-Bold'): # Should install ImageMagick # For centos6: https://www.vultr.com/docs/install-imagemagick-on-centos-6 from moviepy import editor def annotate(clip, txt, txt_color=txt_color, fontsize=fontsize, font=font): """ Writes a text at the bottom of the clip. """ txtclip = editor.TextClip(txt, fontsize=fontsize, font=font, color=txt_color) # txtclip = txtclip.on_color((clip.w, txtclip.h + 6), color=(0, 0, 255), pos=(6, 'center')) cvc = editor.CompositeVideoClip([clip, txtclip.set_pos(('center', 'bottom'))]) return cvc.set_duration(clip.duration) video = editor.VideoFileClip(op.join(movie_fol, movie_name)) annotated_clips = [annotate(video.subclip(from_t, to_t), txt) for (from_t, to_t), txt in subs] final_clip = editor.concatenate_videoclips(annotated_clips) final_clip.write_videofile(op.join(movie_fol, out_movie_name))
def mash(phrase1, phrase2, outfile=None): phrase1_videos = get_videos(phrase1) phrase2_videos = get_videos(phrase2) videos = phrase2_videos + phrase1_videos random.shuffle(videos) clips = [] for video in videos: clip = mp.VideoFileClip(video) start = clip.duration/2 clip = clip.subclip(start, start + 0.3) clips.append(clip) if outfile is None: outfile = phrase1+phrase2+'.mp4' outfile = outfile.replace(' ', '_') composite = mp.concatenate_videoclips(clips, method="compose") composite.write_videofile(outfile, fps=24)
def randomize_video(videofile, segment_length): original_video = mp.VideoFileClip(videofile) duration = original_video.duration clips = [] clip_start = 0 while clip_start < duration: clip_end = clip_start + segment_length if clip_end > duration: clip_end = duration clip = original_video.subclip(clip_start, clip_end) clips.append(clip) clip_start = clip_end random.shuffle(clips) final_video = mp.concatenate_videoclips(clips) final_video.write_videofile('random.mp4', codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def repeat_frames(videofile, segment_length, repeat): original_video = mp.VideoFileClip(videofile) duration = original_video.duration clips = [] clip_start = 0 while clip_start < duration: clip_end = clip_start + segment_length if clip_end > duration: clip_end = duration clip = original_video.subclip(clip_start, clip_end) for i in range(0, repeat): clips.append(clip) clip_start = clip_end final_video = mp.concatenate(clips) final_video.write_videofile('repeated.mp4', codec="libx264", temp_audiofile='temp-audio.m4a', remove_temp=True, audio_codec='aac')
def write_wav(self, video_obj, target_wav_file): ''' Writes the audio stream of a video as a wav suitable as input to HTK ---------- video_obj: a moviepy VideoFileClip target_wav_file: path to write the wav file to Returns ------- None ''' assert isinstance(video_obj, VideoFileClip), "video needs to be a instance of VideoFileClip" # Write audio stream of video to file in the desired format video_obj.audio.write_audiofile(target_wav_file, fps=16000, # Set fps to 16k codec='pcm_s16le', ffmpeg_params=['-ac', '1']) # Convert to mono
def GetDuration(clip): if '.mp4' in str(clip): return VideoFileClip(clip).duration elif '.mp3' in str(clip): return eyed3.load('{}'.format(clip)).info.time_secs
def configureVideo(self, upload_id, video, thumbnail, caption = ''): clip = VideoFileClip(video) self.uploadPhoto(photo=thumbnail, caption=caption, upload_id=upload_id) data = json.dumps({ 'upload_id': upload_id, 'source_type': 3, 'poster_frame_index': 0, 'length': 0.00, 'audio_muted': False, 'filter_type': 0, 'video_result': 'deprecated', 'clips': { 'length': clip.duration, 'source_type': '3', 'camera_position': 'back', }, 'extra': { 'source_width': clip.size[0], 'source_height': clip.size[1], }, 'device': self.DEVICE_SETTINTS, '_csrftoken': self.token, '_uuid': self.uuid, '_uid': self.username_id, 'caption': caption, }) return self.SendRequest('media/configure/?video=1', self.generateSignature(data))
def main(video_filename, fit_filename, output_filename, fit_offset=0, duration=0, strain=150, bitrate='34000000'): v = VideoFileClip(video_filename) f = FitFile(fit_filename) fit = list() lfit = 0 altgain = 0 lastchange = 0 for msg in f.get_messages('record'): d = dict() d.update(msg.get_values()) fit.append(d) lfit += 1 if lfit > 1: try: gain = fit[-1].get('altitude') - fit[-2].get('altitude') except: gain = 0 if gain > 0: altgain += gain fit[-1]['altgain'] = altgain ovr = OVR(v, fit, fit_offset, strain) if duration: nv = v.subclip(t_end=duration).fl_image(ovr) else: nv = v.fl_image(ovr) nv.write_videofile(output_filename, progress_bar=True, bitrate=bitrate)
def _make_video(subclip_paths, filename): clips = [] for subclip_path in subclip_paths: clips.append(mpy.VideoFileClip(subclip_path)) final_clip = mpy.concatenate_videoclips(clips, method='compose') final_clip.write_videofile(filename) for subclip_path in subclip_paths: os.remove(subclip_path)
def post(self, instance_id): if not self.is_exist(instance_id): abort(404) if not self.is_allowed(instance_id): abort(403) uploaded_file = request.files["file"] folder_path = thumbnail_utils.get_preview_folder_name( "originals", instance_id ) if ".png" in uploaded_file.filename: thumbnail_utils.save_file( folder_path, instance_id, uploaded_file, size=None ) thumbnail_utils.generate_preview_variants(instance_id) return thumbnail_utils.get_preview_url_path(instance_id), 201 elif ".mp4" in uploaded_file.filename: file_name = "%s.mp4" % instance_id folder = thumbnail_utils.create_folder(folder_path) file_path = os.path.join(folder, file_name) picture_path = os.path.join(folder, "%s.png" % instance_id) uploaded_file.save(file_path + '.tmp') clip = VideoFileClip(file_path + '.tmp') clip = clip.resize(height=720) clip.save_frame(picture_path, round(clip.duration / 2)) thumbnail_utils.generate_preview_variants(instance_id) clip.write_videofile(file_path) return {}, 201 else: abort(400, "Wrong file format")
def cut_movie(movie_fol, movie_name, out_movie_name, subclips_times): from moviepy import editor # subclips_times [(3, 4), (6, 17), (38, 42)] video = editor.VideoFileClip(op.join(movie_fol, movie_name)) subclips = [] for from_t, to_t in subclips_times: clip = video.subclip(from_t, to_t) subclips.append(clip) final_clip = editor.concatenate_videoclips(subclips) final_clip.write_videofile(op.join(movie_fol, out_movie_name))
def crop_movie(movie_fol, movie_name, out_movie_name, crop_ys=(60, 1170)): from moviepy import editor video = editor.VideoFileClip(op.join(movie_fol, movie_name)) crop_video = video.crop(y1=crop_ys[0], y2=crop_ys[1]) crop_video.write_videofile(op.join(movie_fol, out_movie_name))
def create_animated_gif(movie_fol, movie_name, out_movie_name, fps=10): from moviepy import editor video = editor.VideoFileClip(op.join(movie_fol, movie_name)) video.write_gif(op.join(movie_fol, out_movie_name), fps=fps)
def get_av_clips(file): video_clip = mp.VideoFileClip(file) audio_clip = mp.AudioFileClip(file) return video_clip, audio_clip
def dump_frames(video_path, output_directory, frames_per_second): """Dump frames at frames_per_second from a video to output_directory. If frames_per_second is None, the clip's fps attribute is used instead.""" clip = VideoFileClip(video_path) info_path = '{}/info.json'.format(output_directory) name_format = '{}/frame%04d.png'.format(output_directory) if frames_per_second is None: frames_per_second = clip.fps frames_already_dumped_helper = lambda: \ frames_already_dumped(video_path, output_directory, frames_per_second, info_path, name_format, clip.duration) if frames_already_dumped_helper(): logging.info('Frames for {} exist, skipping...'.format(video_path)) return successfully_wrote_images = False try: clip.write_images_sequence( name_format.format(output_directory), fps=frames_per_second) successfully_wrote_images = True except Exception as e: logging.error("Failed to dump images for %s", video_path) logging.error(e) if successfully_wrote_images: info = {'frames_per_second': frames_per_second, 'input_video_path': os.path.abspath(video_path)} with open(info_path, 'wb') as info_file: json.dump(info, info_file) if not frames_already_dumped_helper(): logging.error( "Images for {} don't seem to be dumped properly!".format( video_path))
def label_clip(video_path, label, start_second, end_second): clip = VideoFileClip(video_path) text_clip = TextClip(label, fontsize=40, color='white', bg_color='red') text_clip = text_clip.set_pos(('center', 'bottom')) text_clip = text_clip.set_start(start_second).set_duration(end_second - start_second) return CompositeVideoClip([clip, text_clip])