当前位置: 首页>>代码示例>>Python>>正文


Python tensorflow.conj函数代码示例

本文整理汇总了Python中tensorflow.conj函数的典型用法代码示例。如果您正苦于以下问题:Python conj函数的具体用法?Python conj怎么用?Python conj使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。


在下文中一共展示了conj函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: transfer_op_python

def transfer_op_python(As, Bs, direction, x):
    """
    (mixed) transfer operator for a list of mps tensors

    Parameters:
    ----------------------
    As,Bs:        list of tf.Tensor
                  the mps tensors (Bs are on the conjugated side)
    direction:    int or str 
                  can be (1,'l','left') or (-1,'r','right) for left or right 
                  operation
    x:            tf.Tensor 
                  input matrix
    Returns:
    ----------------------
    tf.Tensor:  the evolved matrix

    """

    if direction in ('l', 'left', 1):
        for n in range(len(As)):
            x = ncon([x, As[n], tf.conj(Bs[n])], [(0, 1), (0, 2, -1),
                                                       (1, 2, -2)])
    elif direction in ('r', 'right', -1):
        for n in reversed(range(len(As))):
            x = ncon([x, As[n], tf.conj(Bs[n])], [(0, 1), (-1, 2, 0),
                                                       (-2, 2, 1)])
    else:
        raise ValueError("Invalid direction: {}".format(direction))

    return x
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:31,代码来源:misc_mps.py

示例2: add_layer_python

def add_layer_python(B, mps_tensor, mpo, conj_mps_tensor, direction):
    """
    adds an mps-mpo-mps layer to a left or right block "E"; used in dmrg to calculate the left and right
    environments
    Parameters:
    ---------------------------
    B:               Tensor object  
                     a tensor of shape (D1,D1',M1) (for direction>0) or (D2,D2',M2) (for direction>0)
    mps_tensor:      Tensor object of shape =(Dl,Dr,d)
    mpo_tensor:      Tensor object of shape = (Ml,Mr,d,d')
    conj_mps_tensor: Tensor object of shape =(Dl',Dr',d')
                     the mps tensor on the conjugated side
                     this tensor will be complex conjugated inside the routine; usually, the user will like to pass 
                     the unconjugated tensor
    direction:       int or str
                     direction in (1,'l','left'): add a layer to the right of ```B```
                     direction in (-1,'r','right'): add a layer to the left of ```B```
    Return:
    -----------------
    Tensor of shape (Dr,Dr',Mr) for direction in (1,'l','left')
    Tensor of shape (Dl,Dl',Ml) for direction in (-1,'r','right')
    """
    if direction in ('l', 'left', 1):
        return ncon(
            [B, mps_tensor, mpo, tf.conj(conj_mps_tensor)],
            [[1, 4, 3], [1, 2, -1], [3, -3, 5, 2], [4, 5, -2]])
    
    if direction in ('r', 'right', -1):
        return ncon(
            [B, mps_tensor, mpo, tf.conj(conj_mps_tensor)],
            [[1, 4, 3], [-1, 2, 1], [-3, 3, 5, 2], [-2, 5, 4]])
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:31,代码来源:misc_mps.py

示例3: __init__

    def __init__(self,
                 mps,
                 mpo,
                 name='InfiniteDMRG',
                 precision=1E-12,
                 precision_canonize=1E-12,
                 nmax=1000,
                 nmax_canonize=1000,
                 ncv=40,
                 numeig=1,
                 pinv=1E-20,
                 power_method=False):

        # if not isinstance(mps, InfiniteMPSCentralGauge):
        #     raise TypeError(
        #         'in InfiniteDMRGEngine.__init__(...): mps of type InfiniteMPSCentralGauge expected, got {0}'
        #         .format(type(mps)))

        mps.restore_form(
            precision=precision_canonize,
            ncv=ncv,
            nmax=nmax_canonize,
            numeig=numeig,
            power_method=power_method,
            pinv=pinv)  #this leaves state in left-orthogonal form

        lb, hl = misc_mps.compute_steady_state_Hamiltonian_GMRES(
            'l',
            mps,
            mpo,
            left_dominant=tf.diag(tf.ones(mps.D[-1], dtype=mps.dtype)),
            right_dominant=ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                     [[-1, 1], [-2, 1]]),
            precision=precision,
            nmax=nmax)

        rmps = mps.get_right_orthogonal_imps(
            precision=precision_canonize,
            ncv=ncv,
            nmax=nmax_canonize,
            numeig=numeig,
            pinv=pinv,
            restore_form=False)

        rb, hr = misc_mps.compute_steady_state_Hamiltonian_GMRES(
            'r',
            rmps,
            mpo,
            right_dominant=tf.diag(tf.ones(mps.D[0], dtype=mps.dtype)),
            left_dominant=ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                    [[1, -1], [1, -2]]),
            precision=precision,
            nmax=nmax)

        left_dominant = ncon.ncon([mps.mat, tf.conj(mps.mat)],
                                  [[1, -1], [1, -2]])
        out = mps.unitcell_transfer_op('l', left_dominant)

        super().__init__(mps=mps, mpo=mpo, lb=lb, rb=rb, name=name)
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:59,代码来源:DMRG.py

示例4: steady_state_density_matrices

def steady_state_density_matrices(nsteps, rhoAB, rhoBA, w_isometry, v_isometry, unitary, refsym):
    for n in range(nsteps):
        rhoAB_, rhoBA_ = descending_super_operator(rhoAB, rhoBA, w_isometry, v_isometry, unitary,
                                                   refsym)
        rhoAB = 1/2 * (rhoAB_ + tf.conj(tf.transpose(rhoAB_,(2,3,0,1))))/ncon.ncon([rhoAB_],[[1,2,1,2]])
        rhoBA = 1/2 * (rhoBA_ + tf.conj(tf.transpose(rhoBA_,(2,3,0,1))))/ncon.ncon([rhoBA_],[[1,2,1,2]])
        
    return rhoAB, rhoBA
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:8,代码来源:modified_binary_mera.py

示例5: _inference

    def _inference(self, x, dropout):
        with tf.name_scope('conv1'):
            # Transform to Fourier domain
            x_2d = tf.reshape(x, [-1, 28, 28])
            x_2d = tf.complex(x_2d, 0)
            xf_2d = tf.fft2d(x_2d)
            xf = tf.reshape(xf_2d, [-1, NFEATURES])
            xf = tf.expand_dims(xf, 1)  # NSAMPLES x 1 x NFEATURES
            xf = tf.transpose(xf)  # NFEATURES x 1 x NSAMPLES
            # Filter
            Wreal = self._weight_variable([int(NFEATURES/2), self.F, 1])
            Wimg = self._weight_variable([int(NFEATURES/2), self.F, 1])
            W = tf.complex(Wreal, Wimg)
            xf = xf[:int(NFEATURES/2), :, :]
            yf = tf.matmul(W, xf)  # for each feature
            yf = tf.concat([yf, tf.conj(yf)], axis=0)
            yf = tf.transpose(yf)  # NSAMPLES x NFILTERS x NFEATURES
            yf_2d = tf.reshape(yf, [-1, 28, 28])
            # Transform back to spatial domain
            y_2d = tf.ifft2d(yf_2d)
            y_2d = tf.real(y_2d)
            y = tf.reshape(y_2d, [-1, self.F, NFEATURES])
            # Bias and non-linearity
            b = self._bias_variable([1, self.F, 1])
#            b = self._bias_variable([1, self.F, NFEATURES])
            y += b  # NSAMPLES x NFILTERS x NFEATURES
            y = tf.nn.relu(y)
        with tf.name_scope('fc1'):
            W = self._weight_variable([self.F*NFEATURES, NCLASSES])
            b = self._bias_variable([NCLASSES])
            y = tf.reshape(y, [-1, self.F*NFEATURES])
            y = tf.matmul(y, W) + b
        return y
开发者ID:hyzcn,项目名称:cnn_graph,代码行数:33,代码来源:models.py

示例6: sparse_dot_product0

def sparse_dot_product0(emb, tuples, use_matmul=True, output_type='real'):
    """
    Compute the dot product of complex vectors.
    It uses complex vectors but tensorflow does not optimize in the complex space (or there is a bug in the gradient
    propagation with complex numbers...)
    :param emb: embeddings
    :param tuples: indices at which we compute dot products
    :return: scores (dot products)
    """
    n_t = tuples.get_shape()[0].value
    rk = emb.get_shape()[1].value
    emb_sel_a = tf.gather(emb, tuples[:, 0])
    emb_sel_b = tf.gather(emb, tuples[:, 1])
    if use_matmul:
        pred_cplx = tf.squeeze(tf.batch_matmul(
                tf.reshape(emb_sel_a, [n_t, rk, 1]),
                tf.reshape(emb_sel_b, [n_t, rk, 1]), adj_x=True))
    else:
        pred_cplx = tf.reduce_sum(tf.mul(tf.conj(emb_sel_a), emb_sel_b), 1)
    if output_type == 'complex':
        return pred_cplx
    elif output_type == 'real':
        return tf.real(pred_cplx) + tf.imag(pred_cplx)
    elif output_type == 'real':
        return tf.abs(pred_cplx)
    elif output_type == 'angle':
        raise NotImplementedError('No argument or inverse-tanh function for complex number in Tensorflow')
    else:
        raise NotImplementedError()
开发者ID:Peratham,项目名称:factorix,代码行数:29,代码来源:hermitian.py

示例7: tridiag_tensorflow

def tridiag_tensorflow(vecs, alpha, beta):
    Heff=tf.contrib.distributions.tridiag(beta, alpha, tf.conj(beta))
    eta, u = tf.linalg.eigh(Heff)  #could use tridiag
    out=ncon.ncon([vecs,u],[[1,-1,-2,-3],[1,-4]])
    out=out[:,:,:,0]
    out=tf.math.divide(out,tf.linalg.norm(out))
    return eta[0], out
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:7,代码来源:Lanczos.py

示例8: visualize_data_transformations

def visualize_data_transformations():
    records = glob.glob(os.path.join(utils.working_dir, 'train_fragment_*.tfrecords'))
    dataset = tf.data.TFRecordDataset(records)
    dataset = dataset.map(parse_tfrecord_raw)
    dataset = dataset.repeat()
    dataset = dataset.shuffle(buffer_size=10)
    dataset = dataset.prefetch(2)
    it = dataset.make_one_shot_iterator()

    data_x = tf.placeholder(tf.float32, shape=(utils.sample_rate * utils.audio_clip_len,))
    data_y = tf.placeholder(tf.float32, shape=(utils.timesteps,))
    stfts = tf.contrib.signal.stft(data_x, frame_length=utils.frame_length, frame_step=utils.frame_step,
                                   fft_length=4096)
    power_stfts = tf.real(stfts * tf.conj(stfts))
    magnitude_spectrograms = tf.abs(stfts)
    power_magnitude_spectrograms = tf.abs(power_stfts)

    num_spectrogram_bins = magnitude_spectrograms.shape[-1].value

    # scale frequency to mel scale and put into bins to reduce dimensionality
    lower_edge_hertz, upper_edge_hertz = 30.0, 17000.0
    num_mel_bins = utils.mel_bins_base * 4
    linear_to_mel_weight_matrix = tf.contrib.signal.linear_to_mel_weight_matrix(
        num_mel_bins, num_spectrogram_bins, utils.sample_rate, lower_edge_hertz,
        upper_edge_hertz)
    mel_spectrograms = tf.tensordot(magnitude_spectrograms, linear_to_mel_weight_matrix, 1)
    mel_spectrograms.set_shape(
        magnitude_spectrograms.shape[:-1].concatenate(linear_to_mel_weight_matrix.shape[-1:]))

    # log scale the mel bins to better represent human loudness perception
    log_offset = 1e-6
    log_mel_spectrograms = tf.log(mel_spectrograms + log_offset)

    # compute first order differential and concat. "It indicates a raise or reduction of the energy for each
    # frequency bin at a frame relative to its predecessor"
    first_order_diff = tf.abs(
        tf.subtract(log_mel_spectrograms, tf.manip.roll(log_mel_spectrograms, shift=1, axis=1)))
    mel_fod = tf.concat([log_mel_spectrograms, first_order_diff], 1)

    with tf.Session() as sess:
        while True:
            try:
                raw_x, raw_y = sess.run(it.get_next())
                np_stfts = sess.run(power_stfts, feed_dict={data_x: raw_x})
                np_magnitude_spectrograms = sess.run(power_magnitude_spectrograms, feed_dict={data_x: raw_x})
                np_mel_spectrograms = sess.run(mel_spectrograms, feed_dict={data_x: raw_x})
                np_log_mel_spectrograms = sess.run(log_mel_spectrograms, feed_dict={data_x: raw_x})
                np_mel_fod = sess.run(mel_fod, feed_dict={data_x: raw_x})

                utils.plot_signal_transforms(raw_x,
                                            np_stfts,
                                            np_magnitude_spectrograms,
                                            np_mel_spectrograms,
                                            np_log_mel_spectrograms,
                                            np_mel_fod)
                print('wank')

            except tf.errors.OutOfRangeError:
                break
开发者ID:nearlyeveryone,项目名称:bpm,代码行数:59,代码来源:bpm_estimator.py

示例9: _compareConj

 def _compareConj(self, cplx, use_gpu):
   np_ans = np.conj(cplx)
   with self.test_session(use_gpu=use_gpu):
     inx = tf.convert_to_tensor(cplx)
     tf_conj = tf.conj(inx)
     tf_ans = tf_conj.eval()
   self.assertAllEqual(np_ans, tf_ans)
   self.assertShapeEqual(np_ans, tf_conj)
开发者ID:adeelzaman,项目名称:tensorflow,代码行数:8,代码来源:cwise_ops_test.py

示例10: gram_schmidt_step

 def gram_schmidt_step(j, basis, v):
     """Makes v orthogonal to the j'th vector in basis."""
     #v_shape = v.get_shape()
     basis_vec = basis.read(j)
     v -=  ncon.ncon([tf.reshape(tf.conj(basis_vec), [basis_vec.shape[0] * basis_vec.shape[1] * basis_vec.shape[2]]),
                      tf.reshape(v, [v.shape[0] * v.shape[1] * v.shape[2]])], [[1], [1]])* basis_vec
     #v.set_shape(v_shape)
     return j + 1, basis, v
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:8,代码来源:Lanczos.py

示例11: descend_state_1site_R

def descend_state_1site_R(state_1site, iso_012):  #χ^4
    """Descends a state from the top to the rightmost index of the isometry `iso`.
    Physically, if `iso` has 012 ordering, this is a descent to the right and
    if `iso` has 021 ordering, this is a descent to the left.
    """
    return tensornetwork.ncon(
        [iso_012, state_1site, tf.conj(iso_012)], [(1, 2, -1), (1, 0),
                                                   (0, 2, -2)])
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:8,代码来源:ttn_1d_uniform.py

示例12: CheckUnitary

 def CheckUnitary(self, x):
   # Tests that x[...,:,:]^H * x[...,:,:] is close to the identity.
   xx = tf.matmul(tf.conj(x), x, transpose_a=True)
   identity = tf.matrix_band_part(tf.ones_like(xx), 0, 0)
   if is_single:
     tol = 1e-5
   else:
     tol = 1e-14
   self.assertAllClose(identity.eval(), xx.eval(), atol=tol)
开发者ID:ComeOnGetMe,项目名称:tensorflow,代码行数:9,代码来源:qr_op_test.py

示例13: get_env_disentangler

def get_env_disentangler(hamAB,hamBA,rhoBA,w,v,u,refsym):

    indList1 = [[7,8,10,-1],[4,3,9,2],[10,-3,9],[7,5,4],[8,-2,5,6],[1,-4,2],[1,6,3]]
    indList2 = [[7,8,-1,-2],[3,6,2,5],[1,-3,2],[1,9,3],[7,8,9,10],[4,-4,5],[4,10,6]]
    indList3 = [[7,8,-2,10],[3,4,2,9],[1,-3,2],[1,5,3],[-1,7,5,6],[10,-4,9],[8,6,4]]

    uEnv = ncon.ncon([hamAB,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList1)
    if refsym:
        uEnv = uEnv + tf.transpose(uEnv,(1,0,3,2))
    else:
        uEnv = uEnv + ncon.ncon([hamAB,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList3)
    
    uEnv = uEnv + ncon.ncon([hamBA,rhoBA,w,tf.conj(w),tf.conj(u),v,tf.conj(v)],indList2)

    return uEnv
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:15,代码来源:modified_binary_mera.py

示例14: _mpo_with_state

def _mpo_with_state(iso_012, iso_021, h_mpo_2site, state_1site):
    # contract ascended hamiltonian at level `lup` with nearest 1-site descended state
    h2L, h2R = h_mpo_2site

    envL = [
        tensornetwork.ncon(
            [state_1site, iso_021, h, tf.conj(iso_012)],
            [(0, 2), (0, -1, 1), (3, 1), (2, 3, -2)])  # one transpose required
        for h in h2L
    ]

    envR = [
        tensornetwork.ncon(
            [state_1site, iso_012, h, tf.conj(iso_021)],
            [(0, 2), (0, -1, 1), (3, 1), (2, 3, -2)])  # one transpose required
        for h in h2R
    ]

    return envL, envR
开发者ID:zoltanegyed,项目名称:TensorNetwork,代码行数:19,代码来源:ttn_1d_uniform.py

示例15: testComplexConj

 def testComplexConj(self):
   with self.test_session():
     size = ()
     x = tf.constant(11 - 13j, dtype=tf.complex64)
     y = tf.conj(x)
     analytical, numerical = tf.test.compute_gradient(x, size, y, size)
     correct = np.array([[1, 0], [0, -1]])
     self.assertAllEqual(correct, analytical)
     self.assertAllClose(correct, numerical, rtol=3e-6)
     self.assertLess(tf.test.compute_gradient_error(x, size, y, size), 2e-5)
开发者ID:0-T-0,项目名称:tensorflow,代码行数:10,代码来源:gradient_checker_test.py


注:本文中的tensorflow.conj函数示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。