Python IPython.display 模块,Image() 实例源码

我们从Python开源项目中,提取了以下38个代码示例,用于说明如何使用IPython.display.Image()

项目:zipline-chinese    作者:zhanghan1990    | 项目源码 | 文件源码
def display_graph(g, format='svg', include_asset_exists=False):
    """
    Display a TermGraph interactively from within IPython.
    """
    try:
        import IPython.display as display
    except ImportError:
        raise NoIPython("IPython is not installed.  Can't display graph.")

    if format == 'svg':
        display_cls = display.SVG
    elif format in ("jpeg", "png"):
        display_cls = partial(display.Image, format=format, embed=True)

    out = BytesIO()
    _render(g, out, format, include_asset_exists=include_asset_exists)
    return display_cls(data=out.getvalue())
项目:catalyst    作者:enigmampc    | 项目源码 | 文件源码
def display_graph(g, format='svg', include_asset_exists=False):
    """
    Display a TermGraph interactively from within IPython.
    """
    try:
        import IPython.display as display
    except ImportError:
        raise NoIPython("IPython is not installed.  Can't display graph.")

    if format == 'svg':
        display_cls = display.SVG
    elif format in ("jpeg", "png"):
        display_cls = partial(display.Image, format=format, embed=True)

    out = BytesIO()
    _render(g, out, format, include_asset_exists=include_asset_exists)
    return display_cls(data=out.getvalue())
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def classify(image_path):
    # Display the image.
    display(Image(image_path))

    # Use the Inception model to classify the image.
    pred = model.classify(image_path=image_path)

    # Print the scores and names for the top-10 predictions.
    model.print_scores(pred=pred, k=10, only_first_name=True)    


# ## Panda

# This image of a panda is included in the Inception data-file. The Inception model is quite confident that this image shows a panda, with a classification score of 89.23% and the next highest score being only 0.86% for an indri, which is another exotic animal.

# In[8]:
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def plot_image(image):
    # Assume the pixel-values are scaled between 0 and 255.

    if False:
        # Convert the pixel-values to the range between 0.0 and 1.0
        image = np.clip(image/255.0, 0.0, 1.0)

        # Plot using matplotlib.
        plt.imshow(image, interpolation='lanczos')
        plt.show()
    else:
        # Ensure the pixel-values are between 0 and 255.
        image = np.clip(image, 0.0, 255.0)

        # Convert pixels to bytes.
        image = image.astype(np.uint8)

        # Convert to a PIL-image and display it.
        display(PIL.Image.fromarray(image))


# Normalize an image so its values are between 0.0 and 1.0. This is useful for plotting the gradient.

# In[13]:
项目:tensorlight    作者:bsautermeister    | 项目源码 | 文件源码
def image_from_array(img_array, format='png'):
    """Creates an image object from a given numpy array.
    Parameters
    ----------
    img_array : numpy.ndarray
        The image data, which can have 1 or 3 color channels.
    Returns
    -------
    IPython.display.Image
        An image object for plots.
    """
    factor = 1
    if utils.image.is_float_image(img_array):
        factor = 255

    img_data = np.uint8(img_array * factor)
    f = StringIO()
    img_data = utils.image.to_rgb(img_data)
    arr = PIL.Image.fromarray(img_data)
    arr.save(f, format)
    return Image(data=f.getvalue())
项目:tissuelab    作者:VirtualPlants    | 项目源码 | 文件源码
def vtk_show(renderer, width=400, height=300):
    """
    Takes vtkRenderer instance and returns an IPython Image with the rendering.
    """
    renderWindow = vtk.vtkRenderWindow()
    renderWindow.SetOffScreenRendering(1)
    renderWindow.AddRenderer(renderer)
    renderWindow.SetSize(width, height)
    renderWindow.Render()

    windowToImageFilter = vtk.vtkWindowToImageFilter()
    windowToImageFilter.SetInput(renderWindow)
    windowToImageFilter.Update()

    writer = vtk.vtkPNGWriter()
    writer.SetWriteToMemory(1)
    writer.SetInputConnection(windowToImageFilter.GetOutputPort())
    writer.Write()
    data = str(buffer(writer.GetResult()))

    return Image(data)
项目:menrva    作者:amirziai    | 项目源码 | 文件源码
def decision_tree(X, y, regression, max_depth=3):
    from sklearn.tree import export_graphviz
    from sklearn.externals.six import StringIO  
    from IPython.core.pylabtools import figsize
    from IPython.display import Image
    figsize(12.5, 6)
    import pydot

    if regression:
        clf = DecisionTreeRegressor(max_depth=max_depth)
    else:
        clf = DecisionTreeClassifier(max_depth=max_depth)

    clf.fit(X, y)
    dot_data = StringIO()  
    export_graphviz(clf, out_file=dot_data, feature_names=list(X.columns),
                    filled=True, rounded=True,)
    graph = pydot.graph_from_dot_data(dot_data.getvalue())  
    return Image(graph.create_png())
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def Run(self, img_path, guide_image_path='', objective=0):
        """Run deep dream"""
        self.guide_path = guide_image_path
        if self.guide_path != '':
            self.Get_guide()
        self.net.blobs.keys()
        if img_path != '':
            frame = PIL.Image.open(img_path)
            frame = imresize(frame)
            frame = np.float32(frame)
        else:
            frame = self.GenerateInputImage()
        frame_i = 0
        h, w = frame.shape[:2]
        #s = 0.05 # scale coefficient
        for i in xrange(self.epoch):
            start = time.time()
            frame = self.Deepdream(frame)
            PIL.Image.fromarray(np.uint8(frame)).save("frames/%04d.jpg"%frame_i)
            #frame = nd.affine_transform(frame, [1-s,1-s,1], [h*s/2,w*s/2,0], order=1)
            frame_i += 1
            stop = time.time()
            print "Time cost for {:d}th image: {:.3f} s".format(i,stop-start)
项目:rstviewer    作者:arne-cl    | 项目源码 | 文件源码
def embed_rs3_image(rs3_filepath, shrink_to_fit=True):
    """Render an RST tree given the path to an .rs3 file."""
    from IPython.display import display, Image
    display(Image(rs3topng(rs3_filepath), unconfined=not(shrink_to_fit)))
项目:xarray-simlab    作者:benbovy    | 项目源码 | 文件源码
def _get_display_cls(format):
    """
    Get the appropriate IPython display class for `format`.

    Returns `IPython.display.SVG` if format=='svg', otherwise
    `IPython.display.Image`.

    If IPython is not importable, return dummy function that swallows its
    arguments and returns None.
    """
    dummy = lambda *args, **kwargs: None
    try:
        import IPython.display as display
    except ImportError:
        # Can't return a display object if no IPython.
        return dummy

    if format in IPYTHON_NO_DISPLAY_FORMATS:
        # IPython can't display this format natively, so just return None.
        return dummy
    elif format in IPYTHON_IMAGE_FORMATS:
        # Partially apply `format` so that `Image` and `SVG` supply a uniform
        # interface to the caller.
        return partial(display.Image, format=format)
    elif format == 'svg':
        return display.SVG
    else:
        raise ValueError("Unknown format '%s' passed to `dot_graph`" % format)
项目:quantum-SVM    作者:JinlongHuang    | 项目源码 | 文件源码
def png(self):
        from IPython.display import Image
        return Image(self._repr_png_(), embed=True)
项目:sci-pype    作者:jay-johnson    | 项目源码 | 文件源码
def get_job_analysis(job_id, show_plots=True, debug=False):

    job_report = {}
    if job_id == None:
        boom("Failed to start a new job")
    else:
        job_res = helper_get_job_analysis(job_id)

        if job_res["status"] != "SUCCESS":
            boom("Job=" + str(job_id) + " failed with status=" + str(job_res["status"]) + " err=" + str(job_res["error"]))
        else:
            job_report = job_res["record"]
    # end of get job analysis

    if show_plots:
        if "images" in job_report:
            for img in job_report["images"]:
                anmt(img["title"])
                lg("URL: " + str(img["image"]))
                ipyDisplay(ipyImage(url=img["image"]))
                lg("---------------------------------------------------------------------------------------")
        else:
            boom("Job=" + str(job_id) + " does not have any images yet")
        # end of if images exist
    # end of downloading job plots

    return job_report
# end of get_job_analysis
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


#ANIMAL model (default)
#Here you select the model
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


#ANIMAL model (default)
#Here you select the model
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))



#Here you select the model
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


#ANIMAL
#PLEASE MAKE SURE TO SELECT THE RIGHT MODEL FOR THE KEYS!!!
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))



#Here you select the model
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


#ANIMAL model (default)
#Here you select the model
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))



#Here you select the model
项目:deepdream    作者:martinkaptein    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))



#Here you select the model
项目:video-pose-extractor    作者:JustinShenk    | 项目源码 | 文件源码
def showBGRimage(a, fmt='jpeg'):
    a = np.uint8(np.clip(a, 0, 255))
    a[:,:,[0,2]] = a[:,:,[2,0]] # for B,G,R order
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))
项目:video-pose-extractor    作者:JustinShenk    | 项目源码 | 文件源码
def showmap(a, fmt='png'):
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))

#def checkparam(param):
#    octave = param['octave']
#    starting_range = param['starting_range']
#    ending_range = param['ending_range']
#    assert starting_range <= ending_range, 'starting ratio should <= ending ratio'
#    assert octave >= 1, 'octave should >= 1'
#    return starting_range, ending_range, octave
项目:lddmm-ot    作者:jeanfeydy    | 项目源码 | 文件源码
def ishow(cls, figure_or_data, format='png', width=None, height=None,
              scale=None):
        """Display a static image of the plot described by `figure_or_data`
        in an IPython Notebook.

        positional arguments:
        - figure_or_data: The figure dict-like or data list-like object that
                          describes a plotly figure.
                          Same argument used in `py.plot`, `py.iplot`,
                          see https://plot.ly/python for examples
        - format: 'png', 'svg', 'jpeg', 'pdf'
        - width: output width
        - height: output height
        - scale: Increase the resolution of the image by `scale` amount
               Only valid for PNG and JPEG images.

        example:
import plotly.plotly as py
    fig = {'data': [{'x': [1, 2, 3], 'y': [3, 1, 5], 'type': 'bar'}]}
    py.image.ishow(fig, 'png', scale=3)
    """
    if format == 'pdf':
        raise exceptions.PlotlyError(
            "Aw, snap! "
            "It's not currently possible to embed a pdf into "
            "an IPython notebook. You can save the pdf "
            "with the `image.save_as` or you can "
            "embed an png, jpeg, or svg.")
    img = cls.get(figure_or_data, format, width, height, scale)
    from IPython.display import display, Image, SVG
    if format == 'svg':
        display(SVG(img))
    else:
        display(Image(img))

```

项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def load_image(filename):
    image = PIL.Image.open(filename)

    return np.float32(image)


# Save an image as a jpeg-file. The image is given as a numpy array with pixel-values between 0 and 255.

# In[11]:
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def save_image(image, filename):
    # Ensure the pixel-values are between 0 and 255.
    image = np.clip(image, 0.0, 255.0)

    # Convert to bytes.
    image = image.astype(np.uint8)

    # Write the image-file in jpeg-format.
    with open(filename, 'wb') as file:
        PIL.Image.fromarray(image).save(file, 'jpeg')


# This function plots an image. Using matplotlib gives low-resolution images. Using PIL gives pretty pictures.

# In[12]:
项目:Deep-learning-with-cats    作者:AlexiaJM    | 项目源码 | 文件源码
def forward(self, input):
        # Return itself + the result of the two convolutions
        output = self.model(input) + input
        return output

# Image transformation network
项目:tensorflow-for-poets-2    作者:googlecodelabs    | 项目源码 | 文件源码
def show_image(image_path):
    display(Image(image_path))

    image_rel = image_path.replace(root,'')
    caption = "Image " + ' - '.join(attributions[image_rel].split(' - ')[:-1])
    display(HTML("<div>%s</div>" % caption))
项目:tensorlight    作者:bsautermeister    | 项目源码 | 文件源码
def display_image(image):
    """Display an image object.
    Remarks: Some RGB images might be displayed with changed colors.
    Parameters
    ----------
    image : IPython.display.Image
        The image to display.
    """
    if image is None:
        return

    display(image)
项目:yuuno    作者:Irrational-Encoding-Wizardry    | 项目源码 | 文件源码
def ipy_image(self) -> IPyImage:
        """
        Converts a clip to an image.
        """
        raw = self.environment.parent.output.bytes_of(self.first_frame)
        return IPyImage(
            data=raw,
            format="png",
            embed=True,
            unconfined=True,
            width=self.first_frame.width,
            height=self.first_frame.height
        )
项目:DeepArt    作者:jiriroz    | 项目源码 | 文件源码
def loadimg(self, filename):
        return np.float32(PIL.Image.open(filename))
项目:DeepArt    作者:jiriroz    | 项目源码 | 文件源码
def showarray(self, a, name, fmt='jpeg'):
        a = np.uint8(np.clip(a, 0, 255))
        #f = StringIO()
        #PIL.Image.fromarray(a).save(f, fmt)
        #PIL.Image.fromarray(a).save(name + '.' + fmt, fmt)
        #display(Image(data=f.getvalue()))
        if fmt == 'jpeg':
            outputfmt = 'jpg'
        else:
            outputfmt = fmt
        PIL.Image.fromarray(a).save(name + '.' + outputfmt, fmt)

    # a couple of utility functions for converting to and from Caffe's input image layout
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    """Display image in windows"""
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


# Class DD: DeepDream
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def Get_guide(self):
        """Generate guide image feature"""
        guide = np.float32(imresize(PIL.Image.open(self.guide_path),224))
        h,w = guide.shape[:2]
        src, dst = self.net.blobs['data'], self.net.blobs[self.end]
        src.reshape(1,3,h,w)
        src.data[0] = self.Preprocess(guide)
        self.net.forward(end=self.end)
        self.guide_features = dst.data[0].copy()
        self.flag = 1
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def showarray(a, fmt='jpeg'):
    """Display image in windows"""
    a = np.uint8(np.clip(a, 0, 255))
    f = StringIO()
    PIL.Image.fromarray(a).save(f, fmt)
    display(Image(data=f.getvalue()))


# Class DD: DeepDream
项目:QScode    作者:PierreHao    | 项目源码 | 文件源码
def Get_guide(self):
        """Generate guide image feature"""
        guide = np.float32(imresize(PIL.Image.open(self.guide_path),224))
        h,w = guide.shape[:2]
        src, dst = self.net.blobs['data'], self.net.blobs[self.end]
        src.reshape(1,3,h,w)
        src.data[0] = self.Preprocess(guide)
        self.net.forward(end=self.end)
        self.guide_features = dst.data[0].copy()
        self.flag = 1
项目:crayimage    作者:yandexdataschool    | 项目源码 | 文件源码
def draw_to_notebook(layers, **kwargs):
    """
    Draws a network diagram in an IPython notebook
    :parameters:
        - layers : list or NeuralNet instance
            List of layers or the neural net to draw.
        - **kwargs : see the docstring of make_pydot_graph for other options
    """
    from IPython.display import Image
    layers = (layers.get_all_layers() if hasattr(layers, 'get_all_layers')
              else layers)
    dot = make_pydot_graph(layers, **kwargs)
    return Image(dot.create_png())
项目:LIE    作者:EmbraceLife    | 项目源码 | 文件源码
def resize_image(image, size=None, factor=None):
    # If a rescaling-factor is provided then use it.
    if factor is not None:
        # Scale the numpy array's shape for height and width.
        size = np.array(image.shape[0:2]) * factor

        # The size is floating-point because it was scaled.
        # PIL requires the size to be integers.
        size = size.astype(int)
    else:
        # Ensure the size has length 2.
        size = size[0:2]

    # The height and width is reversed in numpy vs. PIL.
    size = tuple(reversed(size))

    # Ensure the pixel-values are between 0 and 255.
    img = np.clip(image, 0.0, 255.0)

    # Convert the pixels to 8-bit bytes.
    img = img.astype(np.uint8)

    # Create PIL-object from numpy array.
    img = PIL.Image.fromarray(img)

    # Resize the image.
    img_resized = img.resize(size, PIL.Image.LANCZOS)

    # Convert 8-bit pixel values back to floating-point.
    img_resized = np.float32(img_resized)

    return img_resized


# ## DeepDream Algorithm

# ### Gradient

# The following helper-functions calculate the gradient of an input image for use in the DeepDream algorithm. The Inception 5h model can accept images of any size, but very large images may use many giga-bytes of RAM. In order to keep the RAM-usage low we will split the input image into smaller tiles and calculate the gradient for each of the tiles. 
# 
# However, this may result in visible lines in the final images produced by the DeepDream algorithm. We therefore choose the tiles randomly so the locations of the tiles are always different. This makes the seams between the tiles invisible in the final DeepDream image.

# This is a helper-function for determining an appropriate tile-size. The desired tile-size is e.g. 400x400 pixels, but the actual tile-size will depend on the image-dimensions.

# In[16]:
项目:tissuelab    作者:VirtualPlants    | 项目源码 | 文件源码
def vtk_show_polydata(polydata, width=400, height=300, position=(0,0,-160), colormap_name='glasbey', **kwargs):
    """
    Takes vtkRenderer instance and returns an IPython Image with the rendering.
    """
    from tissuelab.gui.vtkviewer.colormap_utils import colormap_from_file
    from tissuelab.gui.vtkviewer.vtk_utils import define_lookuptable, get_polydata_cell_data

    point_radius = kwargs.get('point_radius',1.0)

    if (polydata.GetNumberOfCells() == 0) and (polydata.GetNumberOfPoints() > 0):
        sphere = vtk.vtkSphereSource()
        sphere.SetRadius(point_radius)
        sphere.SetThetaResolution(12)
        sphere.SetPhiResolution(12)
        glyph = vtk.vtkGlyph3D()
        glyph.SetScaleModeToDataScalingOff()
        glyph.SetColorModeToColorByScalar()
        glyph.SetSource(sphere.GetOutput())
        glyph.SetInput(polydata)
        glyph.Update()
        polydata = glyph.GetOutput()

    # colormap = colormap_from_file("/Users/gcerutti/Developpement/openalea/oalab-tissue/tissuelab/share/data/colormaps/glasbey.lut",name="glasbey")
    colormap = load_colormaps()[colormap_name]

    irange = kwargs.get('intensity_range', None)

    cell_data = get_polydata_cell_data(polydata)
    lut = define_lookuptable(cell_data,colormap_points=colormap._color_points,colormap_name=colormap.name,intensity_range=irange)

    VtkMapper = vtk.vtkPolyDataMapper()
    VtkMapper.SetInputConnection(polydata.GetProducerPort())
    VtkMapper.SetLookupTable(lut)

    VtkActor = vtk.vtkActor()
    VtkActor.SetMapper(VtkMapper)

    VtkRenderer = vtk.vtkRenderer()
    VtkRenderer.SetBackground(1.0, 1.0, 1.0)
    VtkRenderer.AddActor(VtkActor)

    VtkRenderer.GetActiveCamera().SetPosition(*position)

    return vtk_show(VtkRenderer, width=width, height=height)