當前位置: 首頁>>代碼示例>>Python>>正文


Python ZoomableAttentionWindow.nn2att方法代碼示例

本文整理匯總了Python中attention.ZoomableAttentionWindow.nn2att方法的典型用法代碼示例。如果您正苦於以下問題:Python ZoomableAttentionWindow.nn2att方法的具體用法?Python ZoomableAttentionWindow.nn2att怎麽用?Python ZoomableAttentionWindow.nn2att使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在attention.ZoomableAttentionWindow的用法示例。


在下文中一共展示了ZoomableAttentionWindow.nn2att方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: AttentionWriter

# 需要導入模塊: from attention import ZoomableAttentionWindow [as 別名]
# 或者: from attention.ZoomableAttentionWindow import nn2att [as 別名]
class AttentionWriter(Initializable):
    def __init__(self, input_dim, output_dim, channels, width, height, N, **kwargs):
        super(AttentionWriter, self).__init__(name="writer", **kwargs)

        self.channels = channels
        self.img_width = width
        self.img_height = height
        self.N = N
        self.input_dim = input_dim
        self.output_dim = output_dim

        assert output_dim == channels*width*height

        self.zoomer = ZoomableAttentionWindow(channels, height, width, N)
        self.z_trafo = Linear(
                name=self.name+'_ztrafo',
                input_dim=input_dim, output_dim=5, 
                weights_init=self.weights_init, biases_init=self.biases_init,
                use_bias=True)

        self.w_trafo = Linear(
                name=self.name+'_wtrafo',
                input_dim=input_dim, output_dim=channels*N*N, 
                weights_init=self.weights_init, biases_init=self.biases_init,
                use_bias=True)

        self.children = [self.z_trafo, self.w_trafo]

    @application(inputs=['h'], outputs=['c_update'])
    def apply(self, h):
        w = self.w_trafo.apply(h)
        l = self.z_trafo.apply(h)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        c_update = 1./gamma * self.zoomer.write(w, center_y, center_x, delta, sigma)

        return c_update

    @application(inputs=['h'], outputs=['c_update', 'center_y', 'center_x', 'delta'])
    def apply_detailed(self, h):
        w = self.w_trafo.apply(h)
        l = self.z_trafo.apply(h)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        c_update = 1./gamma * self.zoomer.write(w, center_y, center_x, delta, sigma)

        return c_update, center_y, center_x, delta

    @application(inputs=['x','h'], outputs=['c_update', 'center_y', 'center_x', 'delta'])
    def apply_circular(self,x,h):
        #w = self.w_trafo.apply(h)
        l = self.z_trafo.apply(h)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        c_update = 1./gamma * self.zoomer.write(x, center_y, center_x, delta, sigma)

        return c_update, center_y, center_x, delta
開發者ID:drewlinsley,項目名稱:draw_classify,代碼行數:62,代碼來源:draw.py

示例2: AttentionReader

# 需要導入模塊: from attention import ZoomableAttentionWindow [as 別名]
# 或者: from attention.ZoomableAttentionWindow import nn2att [as 別名]
class AttentionReader(Initializable):
    def __init__(self, x_dim, dec_dim, channels, height, width, N, **kwargs):
        super(AttentionReader, self).__init__(name="reader", **kwargs)

        self.img_height = height
        self.img_width = width
        self.N = N
        self.x_dim = x_dim
        self.dec_dim = dec_dim
        self.output_dim = 2*channels*N*N

        self.zoomer = ZoomableAttentionWindow(channels, height, width, N)
        self.readout = MLP(activations=[Identity()], dims=[dec_dim, 5], **kwargs)

        self.children = [self.readout]

    def get_dim(self, name):
        if name == 'input':
            return self.dec_dim
        elif name == 'x_dim':
            return self.x_dim
        elif name == 'output':
            return self.output_dim
        else:
            raise ValueError
            
    @application(inputs=['x', 'x_hat', 'h_dec'], outputs=['r'])
    def apply(self, x, x_hat, h_dec):
        l = self.readout.apply(h_dec)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        w     = gamma * self.zoomer.read(x    , center_y, center_x, delta, sigma)
        w_hat = gamma * self.zoomer.read(x_hat, center_y, center_x, delta, sigma)
        
        return T.concatenate([w, w_hat], axis=1)

    @application(inputs=['x', 'x_hat', 'h_dec'], outputs=['r','center_y', 'center_x', 'delta'])
    def apply_detailed(self, x, x_hat, h_dec):
        l = self.readout.apply(h_dec)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        w     = gamma * self.zoomer.read(x    , center_y, center_x, delta, sigma)
        w_hat = gamma * self.zoomer.read(x_hat, center_y, center_x, delta, sigma)
        
        r = T.concatenate([w, w_hat], axis=1)
        return r, center_y, center_x, delta

    @application(inputs=['x', 'h_dec'], outputs=['r','center_y', 'center_x', 'delta'])
    def apply_simple(self, x, h_dec):
        l = self.readout.apply(h_dec)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        r     = gamma * self.zoomer.read(x    , center_y, center_x, delta, sigma)

        return r, center_y, center_x, delta
開發者ID:drewlinsley,項目名稱:draw_classify,代碼行數:60,代碼來源:draw_CL_WORKING.py

示例3: AttentionWriter

# 需要導入模塊: from attention import ZoomableAttentionWindow [as 別名]
# 或者: from attention.ZoomableAttentionWindow import nn2att [as 別名]
class AttentionWriter(Initializable):
    def __init__(self, input_dim, output_dim, width, height, N, **kwargs):
        super(AttentionWriter, self).__init__(name="writer", **kwargs)

        self.img_width = width
        self.img_height = height
        self.N = N
        self.input_dim = input_dim
        self.output_dim = output_dim

        assert output_dim == width * height

        self.zoomer = ZoomableAttentionWindow(height, width, N)
        self.z_trafo = Linear(
            name=self.name + "_ztrafo",
            input_dim=input_dim,
            output_dim=5,
            weights_init=self.weights_init,
            biases_init=self.biases_init,
            use_bias=True,
        )

        self.w_trafo = Linear(
            name=self.name + "_wtrafo",
            input_dim=input_dim,
            output_dim=N * N,
            weights_init=self.weights_init,
            biases_init=self.biases_init,
            use_bias=True,
        )

        self.children = [self.z_trafo, self.w_trafo]

    @application(inputs=["h"], outputs=["c_update"])
    def apply(self, h):
        w = self.w_trafo.apply(h)
        l = self.z_trafo.apply(h)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        c_update = 1.0 / gamma * self.zoomer.write(w, center_y, center_x, delta, sigma)

        return c_update

    @application(inputs=["h"], outputs=["c_update", "center_y", "center_x", "delta"])
    def apply_detailed(self, h):
        w = self.w_trafo.apply(h)
        l = self.z_trafo.apply(h)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        c_update = 1.0 / gamma * self.zoomer.write(w, center_y, center_x, delta, sigma)

        return c_update, center_y, center_x, delta
開發者ID:zan2434,項目名稱:draw,代碼行數:56,代碼來源:draw.py

示例4: LocatorReader

# 需要導入模塊: from attention import ZoomableAttentionWindow [as 別名]
# 或者: from attention.ZoomableAttentionWindow import nn2att [as 別名]
class LocatorReader(Initializable):
    def __init__(self, x_dim, dec_dim, channels, height, width, N, **kwargs):
        super(LocatorReader, self).__init__(name="reader", **kwargs)

        self.img_height = height
        self.img_width = width
        self.N = N
        self.x_dim = x_dim
        self.dec_dim = dec_dim
        self.output_dim = channels * N * N

        self.zoomer = ZoomableAttentionWindow(channels, height, width, N)
        self.readout = MLP(activations=[Identity()], dims=[dec_dim, 7], **kwargs)

        self.children = [self.readout]

    def get_dim(self, name):
        if name == 'input':
            return self.dec_dim
        elif name == 'x_dim':
            return self.x_dim
        elif name == 'output':
            return self.output_dim
        else:
            raise ValueError

    @application(inputs=['x', 'h_dec'], outputs=['r', 'l'])
    def apply(self, x, h_dec):
        l = self.readout.apply(h_dec)

        center_y, center_x, deltaY, deltaX, sigmaY, sigmaX, gamma = self.zoomer.nn2att(l)

        w = gamma * self.zoomer.read(x, center_y, center_x, deltaY, deltaX, sigmaY, sigmaX)

        return w, l

    @application(inputs=['h_dec'], outputs=['center_y', 'center_x', 'deltaY', 'deltaX'])
    def apply_l(self, h_dec):
        l = self.readout.apply(h_dec)

        center_y, center_x, deltaY, deltaX = self.zoomer.nn2att_wn(l)

        return center_y, center_x, deltaY, deltaX
開發者ID:ablavatski,項目名稱:draw,代碼行數:45,代碼來源:model.py

示例5: LocatorWriter

# 需要導入模塊: from attention import ZoomableAttentionWindow [as 別名]
# 或者: from attention.ZoomableAttentionWindow import nn2att [as 別名]
class LocatorWriter(Initializable):
    def __init__(self, input_dim, output_dim, channels, width, height, N, **kwargs):
        super(LocatorWriter, self).__init__(name="writer", **kwargs)

        self.channels = channels
        self.img_width = width
        self.img_height = height
        self.N = N
        self.input_dim = input_dim
        self.output_dim = output_dim

        self.zoomer = ZoomableAttentionWindow(channels, height, width, N)
        self.z_trafo = Linear(name=self.name + '_ztrafo', input_dim=input_dim, output_dim=5, weights_init=self.weights_init, biases_init=self.biases_init, use_bias=True)

        self.children = [self.z_trafo]

    @application(inputs=['h'], outputs=['center_y', 'center_x', 'delta'])
    def apply_detailed(self, h):
        l = self.z_trafo.apply(h)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        return center_y, center_x, delta
開發者ID:ablavatski,項目名稱:draw,代碼行數:25,代碼來源:model.py

示例6: AttentionReader

# 需要導入模塊: from attention import ZoomableAttentionWindow [as 別名]
# 或者: from attention.ZoomableAttentionWindow import nn2att [as 別名]
class AttentionReader(Initializable):
    def __init__(self, x_dim, dec_dim, height, width, N, **kwargs):
        super(AttentionReader, self).__init__(name="reader", **kwargs)

        self.img_height = height
        self.img_width = width
        self.N = N
        self.x_dim = x_dim
        self.dec_dim = dec_dim
        self.output_dim = 2 * N * N

        self.zoomer = ZoomableAttentionWindow(height, width, N)
        self.readout = MLP(activations=[Identity()], dims=[dec_dim, 5], **kwargs)

        self.children = [self.readout]

    def get_dim(self, name):
        if name == "input":
            return self.dec_dim
        elif name == "x_dim":
            return self.x_dim
        elif name == "output":
            return self.output_dim
        else:
            raise ValueError

    @application(inputs=["x", "x_hat", "h_dec"], outputs=["r"])
    def apply(self, x, x_hat, h_dec):
        l = self.readout.apply(h_dec)

        center_y, center_x, delta, sigma, gamma = self.zoomer.nn2att(l)

        w = gamma * self.zoomer.read(x, center_y, center_x, delta, sigma)
        w_hat = gamma * self.zoomer.read(x_hat, center_y, center_x, delta, sigma)

        return T.concatenate([w, w_hat], axis=1)
開發者ID:zan2434,項目名稱:draw,代碼行數:38,代碼來源:draw.py

示例7: main

# 需要導入模塊: from attention import ZoomableAttentionWindow [as 別名]
# 或者: from attention.ZoomableAttentionWindow import nn2att [as 別名]
def main(name, epochs, batch_size, learning_rate):
    if name is None:
        name = "att-rw" 

    print("\nRunning experiment %s" % name)
    print("         learning rate: %5.3f" % learning_rate) 
    print()


    #------------------------------------------------------------------------

    img_height, img_width = 28, 28
    
    read_N = 12
    write_N = 14

    inits = {
        #'weights_init': Orthogonal(),
        'weights_init': IsotropicGaussian(0.001),
        'biases_init': Constant(0.),
    }
    
    x_dim = img_height * img_width

    reader = ZoomableAttentionWindow(img_height, img_width,  read_N)
    writer = ZoomableAttentionWindow(img_height, img_width, write_N)

    # Parameterize the attention reader and writer
    mlpr = MLP(activations=[Tanh(), Identity()], 
                dims=[x_dim, 50, 5], 
                name="RMLP",
                **inits)
    mlpw = MLP(activations=[Tanh(), Identity()],
                dims=[x_dim, 50, 5],
                name="WMLP",
                **inits)

    # MLP between the reader and writer
    mlp = MLP(activations=[Tanh(), Identity()],
                dims=[read_N**2, 300, write_N**2],
                name="MLP",
                **inits)

    for brick in [mlpr, mlpw, mlp]:
        brick.allocate()
        brick.initialize()

    #------------------------------------------------------------------------
    x = tensor.matrix('features')

    hr = mlpr.apply(x)
    hw = mlpw.apply(x)

    center_y, center_x, delta, sigma, gamma = reader.nn2att(hr)
    r = reader.read(x, center_y, center_x, delta, sigma)

    h = mlp.apply(r)

    center_y, center_x, delta, sigma, gamma = writer.nn2att(hw)
    c = writer.write(h, center_y, center_x, delta, sigma) / gamma
    x_recons = T.nnet.sigmoid(c)

    cost = BinaryCrossEntropy().apply(x, x_recons)
    cost.name = "cost"

    #------------------------------------------------------------
    cg = ComputationGraph([cost])
    params = VariableFilter(roles=[PARAMETER])(cg.variables)

    algorithm = GradientDescent(
        cost=cost, 
        params=params,
        step_rule=CompositeRule([
            RemoveNotFinite(),
            Adam(learning_rate),
            StepClipping(3.), 
        ])
        #step_rule=RMSProp(learning_rate),
        #step_rule=Momentum(learning_rate=learning_rate, momentum=0.95)
    )


    #------------------------------------------------------------------------
    # Setup monitors
    monitors = [cost]
    #for v in [center_y, center_x, log_delta, log_sigma, log_gamma]:
    #    v_mean = v.mean()
    #    v_mean.name = v.name
    #    monitors += [v_mean]
    #    monitors += [aggregation.mean(v)]

    train_monitors = monitors[:]
    train_monitors += [aggregation.mean(algorithm.total_gradient_norm)]
    train_monitors += [aggregation.mean(algorithm.total_step_norm)]

    # Live plotting...
    plot_channels = [
        ["cost"],
    ]

#.........這裏部分代碼省略.........
開發者ID:Philip-Bachman,項目名稱:NN-Python,代碼行數:103,代碼來源:run-att-rw.py


注:本文中的attention.ZoomableAttentionWindow.nn2att方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。