本文整理汇总了Python中theano.tensor.nnet.conv3d2d.conv3d函数的典型用法代码示例。如果您正苦于以下问题:Python conv3d函数的具体用法?Python conv3d怎么用?Python conv3d使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了conv3d函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: lcn_3d_input
def lcn_3d_input(data, kernel_shape, n_maps):
"""
:param data: [examples, depth, filters, height, width]
:param kernel_shape: int
:param n_maps: int
:return: new_x: [examples, depth, filters, height, width]
"""
# create symbolic variable for the input data
ftensor5 = T.TensorType('float32', [False] * 5)
x = ftensor5()
# # determine the number of maps
# n_maps = data.shape[2]
# create 3d filter that spans across all channels / feature maps
# todo: kernel is not really in 3d; need 3d implementation instead of 2d repeated across third dimension
# todo: alternative is to keep 2d kernel and extend short range given data size in z-plane; change first kernel_sh.
filter_shape = (1, kernel_shape[0], n_maps, kernel_shape[1], kernel_shape[2])
filters = np.resize(gaussian_filter(kernel_shape[1]), filter_shape)
filters = filters / np.sum(filters)
filters = sharedX(filters)
# convolve filter with input signal
convolution_out = conv3d(
signals=x,
filters=filters,
signals_shape=data.shape,
filters_shape=filter_shape,
border_mode='valid'
)
# for each pixel, remove mean of 9x9 neighborhood
mid_0 = int(np.floor(kernel_shape[0] / 2.))
mid_1 = int(np.floor(kernel_shape[1] / 2.))
mid_2 = int(np.floor(kernel_shape[2] / 2.))
mean = T.tile(convolution_out, (1, 1, n_maps, 1, 1))
padded_mean = T.zeros_like(x)
padded_mean = T.set_subtensor(padded_mean[:, mid_0:-mid_0, :, mid_1:-mid_1, mid_2:-mid_2], mean)
centered_data = data - padded_mean
# scale down norm of 9x9 patch if norm is bigger than 1
sum_sqr_xx = conv3d(signals=T.sqr(data), filters=filters)
denominator = T.tile(T.sqrt(sum_sqr_xx), (1, 1, n_maps, 1, 1))
padded_denominator = T.ones_like(x)
padded_denominator = T.set_subtensor(
padded_denominator[:, mid_0:-mid_0, :, mid_1:-mid_1, mid_2:-mid_2], denominator
)
per_img_mean = padded_denominator.mean(axis=[1, 2, 3, 4])
divisor = T.largest(
per_img_mean.dimshuffle(0, 'x', 'x', 'x', 'x'),
padded_denominator
)
new_x = centered_data / T.maximum(1., divisor)
# compile theano function
f = theano.function([x], new_x)
return f(data)
示例2: dot
def dot(self):
""" Convolve input with model weights """
f = conv3d(self.x, self.w)
return f
示例3: convolve
def convolve(self, input, **kwargs):
# Conv3d expects input [n_images, depth, channels, height, width]
weights = self.W.dimshuffle(0, 2, 1, 3, 4)
input_sh = input.dimshuffle(0, 2, 1, 3, 4)
conved = conv3d(input_sh, weights, signals_shape=None, filters_shape=None, border_mode='valid')
conved_sh = conved.dimshuffle(0, 2, 1, 3, 4)
return conved_sh
示例4: conv3d
def conv3d(x, kernel, strides=(1, 1, 1),
border_mode='valid', dim_ordering='th',
volume_shape=None, filter_shape=None):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
'''
if dim_ordering not in {'th', 'tf'}:
raise Exception('Unknown dim_ordering ' + str(dim_ordering))
if border_mode not in {'same', 'valid'}:
raise Exception('Invalid border mode: ' + str(border_mode))
if dim_ordering == 'tf':
# TF uses the last dimension as channel dimension,
# instead of the 2nd one.
# TH input shape: (samples, input_depth, conv_dim1, conv_dim2, conv_dim3)
# TF input shape: (samples, conv_dim1, conv_dim2, conv_dim3, input_depth)
# TH kernel shape: (out_depth, input_depth, kernel_dim1, kernel_dim2, kernel_dim3)
# TF kernel shape: (kernel_dim1, kernel_dim2, kernel_dim3, input_depth, out_depth)
x = x.dimshuffle((0, 4, 1, 2, 3))
kernel = kernel.dimshuffle((4, 3, 0, 1, 2))
if volume_shape:
volume_shape = (volume_shape[0], volume_shape[4],
volume_shape[1], volume_shape[2], volume_shape[3])
if filter_shape:
filter_shape = (filter_shape[4], filter_shape[3],
filter_shape[0], filter_shape[1], filter_shape[2])
if border_mode == 'same':
assert(strides == (1, 1, 1))
pad_dim1 = (kernel.shape[2] - 1)
pad_dim2 = (kernel.shape[3] - 1)
pad_dim3 = (kernel.shape[4] - 1)
output_shape = (x.shape[0], x.shape[1],
x.shape[2] + pad_dim1,
x.shape[3] + pad_dim2,
x.shape[4] + pad_dim3)
output = T.zeros(output_shape)
indices = (slice(None), slice(None),
slice(pad_dim1 // 2, x.shape[2] + pad_dim1 // 2),
slice(pad_dim2 // 2, x.shape[3] + pad_dim2 // 2),
slice(pad_dim3 // 2, x.shape[4] + pad_dim3 // 2))
x = T.set_subtensor(output[indices], x)
border_mode = 'valid'
border_mode_3d = (border_mode, border_mode, border_mode)
conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
filters=kernel.dimshuffle(0, 2, 1, 3, 4),
border_mode=border_mode_3d)
conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
# support strides by manually slicing the output
if strides != (1, 1, 1):
conv_out = conv_out[:, :, ::strides[0], ::strides[1], ::strides[2]]
if dim_ordering == 'tf':
conv_out = conv_out.dimshuffle((0, 2, 3, 4, 1))
return conv_out
示例5: get_output
def get_output(self, train):
X = self.get_input(train)
border_mode = self.border_mode
# Both conv3d2d.conv3d and nnet.conv3D only support the 'valid' border mode
if border_mode != 'valid':
if border_mode == 'same':
assert(self.subsample == (1, 1, 1))
pad_z = (self.nb_depth - self.subsample[0])
pad_x = (self.nb_row - self.subsample[1])
pad_y = (self.nb_col - self.subsample[2])
else: #full
pad_z = (self.nb_depth - 1) * 2
pad_x = (self.nb_row - 1) * 2
pad_y = (self.nb_col - 1) * 2
input_shape = X.shape
output_shape = (input_shape[0], input_shape[1],
input_shape[2] + pad_z,
input_shape[3] + pad_x,
input_shape[4] + pad_y)
output = T.zeros(output_shape)
indices = (slice(None), slice(None),
slice(pad_z//2, input_shape[2] + pad_z//2),
slice(pad_x//2, input_shape[3] + pad_x//2),
slice(pad_y//2, input_shape[4] + pad_y//2))
X = T.set_subtensor(output[indices], X)
border_mode = 'valid'
if on_gpu():
# Shuffle the dimensions as per the input parameter order, restore it once done
W_shape = (self.W_shape[0], self.W_shape[2], self.W_shape[1],
self.W_shape[3],self.W_shape[4])
conv_out = conv3d2d.conv3d(signals=X.dimshuffle(0, 2, 1, 3, 4),
filters=self.W.dimshuffle(0, 2, 1, 3, 4),
filters_shape=W_shape,
border_mode=border_mode)
conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
self.W = self.W.dimshuffle(0, 2, 1, 3, 4)
else:
# Shuffle the dimensions as per the input parameter order, restore it once done
# W1 = self.W.dimshuffle(0, 1, 3, 4, 2)
self.W = self.W.dimshuffle(0, 2, 3, 4 , 1)
conv_out = T.nnet.conv3D(V=X.dimshuffle(0, 2, 3, 4, 1),
W=self.W,
b=self.b, d=self.subsample)
conv_out = conv_out.dimshuffle(0, 4, 1, 2, 3)
self.W = self.W.dimshuffle(0, 4, 1, 2, 3)
output = self.activation(conv_out + self.b.dimshuffle('x', 0, 'x', 'x', 'x'))
return output
示例6: __init__
def __init__(self,
filters,
signal_shape,
filter_shape,
input_axes = ('b', 0, 1, 't', 'c'),
batch_size=None,
output_axes = ('b', 0, 1, 't', 'c'),
kernel_stride = [1, 1, 1],
pad=0,
message = '',
partial_sum=None):
if len(kernel_stride) != 3:
raise ValueError("kernel_stride must have length 3")
elif kernel_stride[0] != kernel_stride[1]:
raise ValueError("only values of kernel_stride with both "
"elements equal are supported currently")
if message != '':
raise NotImplementedError()
if batch_size != None:
raise NotImplementedError()
if input_axes != ('b', 0, 1, 't', 'c'):
raise NotImplementedError()
print kernel_stride
if kernel_stride != (1, 1, 1):
raise ValueError("only values of kernel_stride with value of 1 "
" are supported currently")
self.input_axes = input_axes
self.output_axes = output_axes
#self.conv3d_op = Conv3D()
self.conv3d_op = conv3d()
# filters should be a GPU shared variable.
# I guess you could GpuFromHost them every time,
# but if you're using this class you probably care
# about performance and want to be at least warned
# that this is happening
assert hasattr(filters, 'get_value')
assert 'Cuda' in str(type(filters))
self._filters = filters
self.pad = pad
self.partial_sum = partial_sum
self.kernel_stride = kernel_stride
self.signal_shape = signal_shape
self.filter_shape = filter_shape
## Add a dummy b for interface issue
self.b = sharedX(np.zeros((filter_shape[0])))
示例7: compute_output
def compute_output(self, network, in_vw):
# gather hyperparameters
num_filters = network.find_hyperparameter(["num_filters"])
filter_size = network.find_hyperparameter(["filter_size"])
stride = network.find_hyperparameter(["conv_stride", "stride"],
(1, 1, 1))
pad = network.find_hyperparameter(["conv_pad", "pad"], "valid")
inits = list(toolz.concat(network.find_hyperparameters(
["inits"],
[])))
assert len(filter_size) == 3
assert pad == "valid"
assert stride == (1, 1, 1)
# create weight
num_channels = in_vw.shape[1]
filter_shape = (num_filters, num_channels) + tuple(filter_size)
W = network.create_vw(
name="weight",
is_shared=True,
shape=filter_shape,
tags={"parameter", "weight"},
inits=inits,
).variable
from theano.tensor.nnet.conv3d2d import conv3d
# takes signals in order: (batch, time, channels, row, column)
# and filters in order: (out channel, time, in channels, row, column)
# but we keep the dimensions that W is stored in consistent with other
# convolutions, so we have to dimshuffle here
order = (0, 2, 1, 3, 4)
out_var = conv3d(signals=in_vw.variable.dimshuffle(*order),
filters=W.dimshuffle(*order),
signals_shape=[in_vw.shape[o] for o in order],
filters_shape=[filter_shape[o] for o in order],
# HACK as of 20150916, conv3d does a check
# if isinstance(border_mode, str), so we manually
# cast as a string
border_mode=str("valid"))
out_shape = conv_output_shape(input_shape=in_vw.shape,
num_filters=num_filters,
axes=(2, 3, 4),
conv_shape=filter_size,
strides=stride,
pads=conv_parse_pad(filter_size, pad))
network.create_vw(
"default",
variable=out_var,
shape=out_shape,
tags={"output"},
)
示例8: symb_forward
def symb_forward(self, symb_input):
"""symb_input shape: (n_input, depth, channels, height, width)"""
if symb_input.ndim < 5:
raise NotImplementedError("3D convolution requires a dimension >= 5")
conv_output = conv3d2d.conv3d(symb_input, self.weight, filters_shape=self.w_shape, border_mode=self.border_mode)
if self.with_bias:
return conv_output + self.bias.dimshuffle("x", "x", 0, "x", "x")
else:
return conv_output
示例9: apply
def apply(self, graph):
in_vw = graph.read_key(key="input")
num_filters = graph.read_key(key="num_filters")
filter_size = graph.read_key(key="filter_size")
stride = graph.read_key_with_default(key="stride", default=(1, 1, 1))
pad = graph.read_key_with_default(key="pad", default="valid")
assert len(filter_size) == 3
assert pad == "valid"
assert stride == (1, 1, 1)
# create weight
num_channels = in_vw.shape[1]
filter_shape = (num_filters, num_channels) + tuple(filter_size)
W = th_utils.read_key_with_state_default(
graph=graph,
key="weight",
tags={"weight": True,
"linear_weight": True,
"in_axes": (1,),
"out_axes": (0,),
"shape": filter_shape,
"dtype": fX},
state_tags={"parameter": True,
"state": True}
).var
from theano.tensor.nnet.conv3d2d import conv3d
# takes signals in order: (batch, time, channels, row, column)
# and filters in order: (out channel, time, in channels, row, column)
# but we keep the dimensions that W is stored in consistent with other
# convolutions, so we have to dimshuffle here
order = (0, 2, 1, 3, 4)
out_var = conv3d(signals=in_vw.variable.dimshuffle(*order),
filters=W.dimshuffle(*order),
signals_shape=[in_vw.shape[o] for o in order],
filters_shape=[filter_shape[o] for o in order],
# HACK as of 20150916, conv3d does a check
# if isinstance(border_mode, str), so we manually
# cast as a string
border_mode=str("valid"))
out_shape = conv_output_shape(input_shape=in_vw.shape,
num_filters=num_filters,
axes=(2, 3, 4),
conv_shape=filter_size,
strides=stride,
pads=conv_parse_pad(filter_size, pad))
out_vw = VariableWrapper(out_var, out_shape)
graph.write_key(key="output", value=out_vw)
示例10: kernel_3d_center_surround_filter
def kernel_3d_center_surround_filter(symbolic_input,model=None,name=uuid.uuid4(),config={}):
"""
Function to be used to initialize a `Node`.
Comparable to the VirtualRetina OPL layer, this node computes a center-surround signal.
To do this it creates a big composit kernel.
"""
_kernel = dtensor5(name+'_kernel')
output_variable = conv3d(symbolic_input,_kernel)
output_variable.name = name+'_output'
parameter_variables = [_kernel]
node_type = '3d Kernel Filter Node'
epsilon = float(config.get('epsilon',0.000000001))
num_E_n_C = m_en_filter(int(config.get('center-n__uint',0)),float(config.get('center-tau__sec',0.0001)),
normalize=True,retina=model)
num_G_C = m_g_filter(float(config.get('center-sigma__deg',0.05)),float(config.get('center-sigma__deg',0.05)),
retina=model,normalize=True,even=False)
num_TwuTu_C = m_t_filter(float(config.get('undershoot',{}).get('tau__sec',0.001)),
float(config.get('undershoot',{}).get('relative-weight',1.0)),
normalize=True,retina=model,epsilon=0.0000000000001)
num_E_S = m_e_filter(float(config.get('surround-tau__sec',0.001)),retina=model,normalize=True)
num_G_S = m_g_filter(float(config.get('surround-sigma__deg',0.15)),float(config.get('surround-sigma__deg',0.15)),
retina=model,normalize=True,even=False)
num_Reshape_C_S = fake_filter(num_G_S,num_E_S)
num_lambda_OPL = config.get('opl-amplification',0.25) / model.config.get('input-luminosity-range',255.0)
num_w_OPL = config.get('opl-relative-weight',0.7)
center_filter = retina_base.conv(retina_base.conv(num_E_n_C,num_TwuTu_C),
num_G_C)
num_kernel = retina_base.minimize_filter(
num_lambda_OPL*(
retina_base.conv(center_filter,num_Reshape_C_S)
- num_w_OPL * retina_base.conv(retina_base.conv(center_filter,num_E_S),num_G_S)),
filter_epsilon = epsilon)
node_description = lambda: 'Convolution '+str(num_kernel.shape)
def get_num_inputs(num_input_variable):
return dict(zip(parameter_variables,[num_kernel]))
return {
'output_variable': output_variable,
'accept_dimensions': [3],
'parameter_variables': parameter_variables,
'state_variables': [],
'inital_states': [],
'updated_state_variables': [],
'node_type': '2d Gauss Filter Node',
'node_description': lambda: 'Recursive Filtering',
'get_num_inputs': get_num_inputs
}
示例11: __init__
def __init__(self,config,kernel_center=None,kernel_surround=None,name=None):
self.config = config
if name is None:
name = str(uuid.uuid4())
self.kernel_center = kernel_center if kernel_center is not None else np.ones((1,1,1,1,1))
self.kernel_surround = kernel_surround if kernel_surround is not None else np.ones((1,1,1,1,1))
self.name = self.config.get('name',name)
self._I = dtensor5(name+'_I')
self._kernel_C = dtensor5(name+'_k_C')
self._kernel_S = dtensor5(name+'_k_S')
self._C = conv3d(self._I,self._kernel_C)
self._S = conv3d(self._C,self._kernel_S)
self._Reshape_C_S = dtensor5(name+'_Reshape_C_S')
self._lambda_OPL = T.dscalar(name+'_lambda_OPL')
self._w_OPL = T.dscalar(name+'_lambda_OPL')
self._I_OPL = self._lambda_OPL * (conv3d(self._C,self._Reshape_C_S) - self._w_OPL * self._S)
self.input_variables = [self._I]
self.internal_variables = [self._kernel_C,self._kernel_S,self._Reshape_C_S, self._lambda_OPL,self._w_OPL]
self.output_variable = self._I_OPL
self.compute_function= theano.function(self.input_variables + self.internal_variables, self.output_variable)
self.num_Reshape_C_S = fake_filter(self.kernel_center)
self.num_lambda_OPL = self.config.get('amplification',0.25) / self.config.get('input-luminosity-range',255.0)
self.num_w_OPL = self.config.get('relative-weight',0.7)
self.state = None
示例12: conv3d
def conv3d(x, kernel, strides=(1, 1, 1), border_mode='valid'):
'''
Run on cuDNN if available.
border_mode: string, "same" or "valid".
'''
# Both conv3d2d.conv3d and nnet.conv3D only support the 'valid' border mode
if border_mode != 'valid':
if border_mode == 'same':
assert(strides == (1, 1, 1))
pad_z = (kernel.shape[2] - strides[0])
pad_x = (kernel.shape[3] - strides[1])
pad_y = (kernel.shape[4] - strides[2])
else: #full
pad_z = (kernel.shape[2] - 1) * 2
pad_x = (kernel.shape[3] - 1) * 2
pad_y = (kernel.shape[4] - 1) * 2
input_shape = x.shape
output_shape = (input_shape[0], input_shape[1],
input_shape[2] + pad_z,
input_shape[3] + pad_x,
input_shape[4] + pad_y)
output = T.zeros(output_shape)
indices = (slice(None), slice(None),
slice(pad_z//2, input_shape[2] + pad_z//2),
slice(pad_x//2, input_shape[3] + pad_x//2),
slice(pad_y//2, input_shape[4] + pad_y//2))
x = T.set_subtensor(output[indices], x)
border_mode = 'valid'
if _on_gpu():
assert(strides == (1, 1, 1))
# Shuffle the dimensions as per the input parameter order, restore it once done
conv_out = conv3d2d.conv3d(signals=x.dimshuffle(0, 2, 1, 3, 4),
filters=kernel.dimshuffle(0, 2, 1, 3, 4),
border_mode=border_mode)
conv_out = conv_out.dimshuffle(0, 2, 1, 3, 4)
else:
# Shuffle the dimensions as per the input parameter order, restore it once done
conv_out = T.nnet.conv3D(V=x.dimshuffle(0, 2, 3, 4, 1),
W=kernel.dimshuffle(0, 2, 3, 4, 1),
b=None, d=strides)
conv_out = conv_out.dimshuffle(0, 4, 1, 2, 3)
return conv_out
示例13: forward
def forward(self, x, batch_size, run_time):
img_batch_shape = (batch_size,) + self.image_shape
x = x.reshape(img_batch_shape)
# Convolve input feature maps with filters
conv_out = conv3d2d.conv3d(signals=x,
filters=self.w,
signals_shape=img_batch_shape,
filters_shape=self.filter_shape,
border_mode='valid')
perm = [0, 2, 1, 3, 4] # Permutation is needed due to the pooling function prototype
pooled_out = max_pool_3d(conv_out.dimshuffle(perm), self.poolsize, ignore_border=True)
return self.neuron_type.activation_function(pooled_out.dimshuffle(perm)
+ self.b.dimshuffle('x', 'x', 0, 'x', 'x')).flatten(2)
示例14: get_output_for
def get_output_for(self, input, *args, **kwargs):
""" input is bct01
based on
https://github.com/lpigou/Theano-3D-ConvNet/blob/master/convnet3d/convnet3d.py
released as public domain.
"""
input_shape = self.input_layer.get_output_shape()
t, h, w = input_shape[2], input_shape[3], input_shape[4]
input_c = input_shape[1]
batch_size = input_shape[0]
filter_t, filter_h, filter_w = self.filter_size
input_btc01 = input.dimshuffle([0,2,1,3,4]) # bct01 -> btc01
out_btc01 = conv3d2d.conv3d(signals=btc01, filters=self.W,
signals_shape=(batch_size, t, input_c, h, w),
filters_shape=(self.num_filters, filter_t, input_c, filter_h, filter_w),
border_mode='valid')
out_bct01 = out_btc01.dimshuffle([0,2,1,3,4]) # btc01 -> bct01
if self.b is not None:
out_bct01 = out_bct01 + self.b.dimshuffle('x',0,'x','x','x')
return self.nonlinearity(out_bct01)
示例15: get_reconstructed_input
def get_reconstructed_input(self):
""" Computes the reconstructed input given the values of the hidden layer """
repeated_conv = conv3d(
self.hidden,
self.W_prime,
)
bp=(self.filter_shape[1]-1)/2
repeated_conv=repeated_conv.dimshuffle(0,2,1,3,4)
zeropad=T.zeros((self.image_shape[0],
1,
self.image_shape[1]/self.poolsize[0],
self.image_shape[3]/self.poolsize[1],
self.image_shape[4]/self.poolsize[2]))-100
repeated_conv=T.set_subtensor(zeropad[:,:,bp:-bp,bp:-bp,bp:-bp],repeated_conv)
#repeated_conv=repeated_conv.dimshuffle(0,2,1,3,4)
#multiple_conv_out = [repeated_conv.flatten()] * np.prod(self.poolsize)
#stacked_conv_neibs = T.stack(*multiple_conv_out).T
#newshape=()
#stretch_unpooling_out = T.nnet.neighbours.neibs2images(stacked_conv_neibs,
#self.poolsize, self.x1.shape)
z=repeated_conv ### now zp is (n_batch, 1, n/2, n/2, n/2)
shp=z.shape
zp= z.reshape((shp[0]*shp[1],shp[2],shp[3],shp[4])) ### (50,16,16,16)
iid = [T.arange(self.x1.shape[3])//self.poolsize[1]]
c=zp[:,:,:,iid]
c=c[:,:,iid]
c=c[:,iid].reshape(self.x1.shape)
#c = ((zp[T.arange(z.shape[0]*z.shape[1]*z.shape[2])//self.poolsize[0]].T)[T.arange(self.x1.shape[3])//self.poolsize[1]].T).reshape(self.x1.shape)
z=T.nnet.sigmoid(c + self.b_prime.dimshuffle('x', 'x', 0, 'x', 'x'))
return z