本文整理匯總了Python中IPython.display.Image方法的典型用法代碼示例。如果您正苦於以下問題:Python display.Image方法的具體用法?Python display.Image怎麽用?Python display.Image使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類IPython.display
的用法示例。
在下文中一共展示了display.Image方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: AddMLPModel
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def AddMLPModel(model, data):
size = 28 * 28 * 1
sizes = [size, size * 2, size * 2, 10]
layer = data
for i in range(len(sizes) - 1):
layer = brew.fc(model, layer, 'dense_{}'.format(i), dim_in=sizes[i], dim_out=sizes[i + 1])
layer = brew.relu(model, layer, 'relu_{}'.format(i))
softmax = brew.softmax(model, layer, 'softmax')
return softmax
# ### LeNet Model Definition
#
# **Note**: This is the model used when the flag *USE_LENET_MODEL=True*
#
# Below is another possible (and very powerful) architecture called LeNet. The primary difference from the MLP model is that LeNet is a Convolutional Neural Network (CNN), and therefore uses convolutional layers ([Conv](https://caffe2.ai/docs/operators-catalogue.html#conv)), max pooling layers ([MaxPool](https://caffe2.ai/docs/operators-catalogue.html#maxpool)), [ReLUs](https://caffe2.ai/docs/operators-catalogue.html#relu), *and* fully-connected ([FC](https://caffe2.ai/docs/operators-catalogue.html#fc)) layers. A full explanation of how a CNN works is beyond the scope of this tutorial but here are a few good resources for the curious reader:
#
# - [Stanford cs231 CNNs for Visual Recognition](http://cs231n.github.io/convolutional-networks/) (**Recommended**)
# - [Explanation of Kernels in Image Processing](https://en.wikipedia.org/wiki/Kernel_%28image_processing%29)
# - [Convolutional Arithmetic Tutorial](http://deeplearning.net/software/theano_versions/dev/tutorial/conv_arithmetic.html)
#
# Notice, this function also uses Brew. However, this time we add more than just FC and Softmax layers.
# In[5]:
示例2: show
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def show(self, exec_widget=True):
super(Viewer, self).show()
self.viewAll()
rec = self.app.desktop().screenGeometry()
self.move(rec.width() - self.size().width(),
rec.height() - self.size().height())
if not exec_widget:
timer = QtCore.QTimer()
# timer.timeout.connect(self.close)
timer.singleShot(20, self.close)
self.app.exec_()
try:
from IPython.display import Image
return Image(self.name)
except ImportError as e:
print(e)
示例3: logoNotebook
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def logoNotebook(symbol, token='', version='', filter=''):
'''This is a helper function, but the google APIs url is standardized.
https://iexcloud.io/docs/api/#logo
8am UTC daily
Args:
symbol (string); Ticker to request
token (string); Access token
version (string); API version
filter (string); filters: https://iexcloud.io/docs/api/#filter-results
Returns:
image: result
'''
_raiseIfNotStr(symbol)
url = logo(symbol, token, version, filter)['url']
return ImageI(url=url)
示例4: embed_mp4_as_gif
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def embed_mp4_as_gif(filename):
""" Makes a temporary gif version of an mp4 using ffmpeg for embedding in
IPython. Intended for use in Jupyter notebooks. """
if not os.path.exists(filename):
print('file does not exist.')
return
dirname = os.path.dirname(filename)
basename = os.path.basename(filename)
newfile = tempfile.NamedTemporaryFile()
newname = newfile.name + '.gif'
if len(dirname) != 0:
os.chdir(dirname)
os.system('ffmpeg -i ' + basename + ' ' + newname)
try:
with open(newname, 'rb') as f:
display(Image(f.read(), format='png'))
finally:
os.remove(newname)
示例5: save_to_img
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def save_to_img(src, output_path_name, src_type = "tensor", channel_order="cwd", scale = 255):
if src_type == "tensor":
src_arr = np.asarray(src) * scale
elif src_type == "array":
src_arr = src*scale
else:
print("save tensor error, cannot parse src type.")
return False
if channel_order == "cwd":
src_arr = (np.moveaxis(src_arr,0,2)).astype(np.uint8)
elif channel_order == "wdc":
src_arr = src_arr.astype(np.uint8)
else:
print("save tensor error, cannot parse channel order.")
return False
src_img = PIL.Image.fromarray(src_arr)
src_img.save(output_path_name)
return True
示例6: display_graph
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def display_graph(g, format='svg', include_asset_exists=False):
"""
Display a TermGraph interactively from within IPython.
"""
try:
import IPython.display as display
except ImportError:
raise NoIPython("IPython is not installed. Can't display graph.")
if format == 'svg':
display_cls = display.SVG
elif format in ("jpeg", "png"):
display_cls = partial(display.Image, format=format, embed=True)
out = BytesIO()
_render(g, out, format, include_asset_exists=include_asset_exists)
return display_cls(data=out.getvalue())
示例7: _jplot
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def _jplot(*args):
from IPython.display import Image
with _MAGICS_LOCK:
f, tmp = tempfile.mkstemp(".png")
os.close(f)
base, ext = os.path.splitext(tmp)
img = output(
output_formats=["png"],
output_name_first_page_number="off",
output_name=base,
)
all = [img]
all.extend(args)
_plot(all)
image = Image(tmp)
os.unlink(tmp)
return image
示例8: draw
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def draw(self, layout='neato', **kwargs):
""" Draw the graph.
Optional layout=['neato'|'dot'|'twopi'|'circo'|'fdp'|'nop']
will use specified graphviz layout method.
:param layout: pygraphviz layout algorithm (default: 'neato')
:type layout: str
"""
f, filePath = tempfile.mkstemp(suffix='.png')
self.g.layout(prog=layout)
self.g.draw(filePath)
i = Image(filename=filePath)
display(i)
os.close(f)
os.remove(filePath)
示例9: forward
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def forward(self, input):
# Return itself + the result of the two convolutions
output = self.model(input) + input
return output
# Image transformation network
示例10: macho_example11
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def macho_example11():
picture = Image(filename='_static/curvas_ejemplos11.jpg')
picture.size = (100, 100)
return picture
# the library
示例11: AddLeNetModel
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def AddLeNetModel(model, data):
'''
This part is the standard LeNet model: from data to the softmax prediction.
For each convolutional layer we specify dim_in - number of input channels
and dim_out - number or output channels. Also each Conv and MaxPool layer changes the
image size. For example, kernel of size 5 reduces each side of an image by 4.
While when we have kernel and stride sizes equal 2 in a MaxPool layer, it divides
each side in half.
'''
# Image size: 28 x 28 -> 24 x 24
conv1 = brew.conv(model, data, 'conv1', dim_in=1, dim_out=20, kernel=5)
# Image size: 24 x 24 -> 12 x 12
pool1 = brew.max_pool(model, conv1, 'pool1', kernel=2, stride=2)
# Image size: 12 x 12 -> 8 x 8
conv2 = brew.conv(model, pool1, 'conv2', dim_in=20, dim_out=50, kernel=5)
# Image size: 8 x 8 -> 4 x 4
pool2 = brew.max_pool(model, conv2, 'pool2', kernel=2, stride=2)
# 50 * 4 * 4 stands for dim_out from previous layer multiplied by the image size
# Here, the data is flattened from a tensor of dimension 50x4x4 to a vector of length 50*4*4
fc3 = brew.fc(model, pool2, 'fc3', dim_in=50 * 4 * 4, dim_out=500)
relu3 = brew.relu(model, fc3, 'relu3')
# Last FC Layer
pred = brew.fc(model, relu3, 'pred', dim_in=500, dim_out=10)
# Softmax Layer
softmax = brew.softmax(model, pred, 'softmax')
return softmax
# The `AddModel` function below allows us to easily switch from MLP to LeNet model. Just change `USE_LENET_MODEL` at the very top of the notebook and rerun the whole thing.
# In[6]:
示例12: interactive
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def interactive( animation, size = 320 ):
basedir = mkdtemp()
basename = join( basedir, 'graph' )
steps = [ Image( path ) for path in render( animation.graphs(), basename, 'png', size ) ]
rmtree( basedir )
slider = widgets.IntSlider( min = 0, max = len( steps ) - 1, step = 1, value = 0 )
return widgets.interactive( lambda n: display(steps[ n ]), n = slider )
示例13: display_upstream_structure
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def display_upstream_structure(structure_dict):
"""Displays pipeline structure in the jupyter notebook.
Args:
structure_dict (dict): dict returned by
:func:`~steppy.base.Step.upstream_structure`.
"""
graph = _create_graph(structure_dict)
plt = Image(graph.create_png())
display(plt)
示例14: showarray
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def showarray(a, fmt='jpeg'):
a = np.uint8(np.clip(a, 0, 255))
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
示例15: showarrayHQ
# 需要導入模塊: from IPython import display [as 別名]
# 或者: from IPython.display import Image [as 別名]
def showarrayHQ(a, fmt='png'):
a = np.uint8(np.clip(a, 0, 255))
f = StringIO()
PIL.Image.fromarray(a).save(f, fmt)
display(Image(data=f.getvalue()))
# a couple of utility functions for converting to and from Caffe's input image layout