当前位置: 首页>>代码示例>>Python>>正文


Python Space.build方法代码示例

本文整理汇总了Python中composes.semantic_space.space.Space.build方法的典型用法代码示例。如果您正苦于以下问题:Python Space.build方法的具体用法?Python Space.build怎么用?Python Space.build使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在composes.semantic_space.space.Space的用法示例。


在下文中一共展示了Space.build方法的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: test_simple_sparse_zipped

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def test_simple_sparse_zipped(self):
            
        bcs.main(["build_core_space.py", 
                  "-l", self.dir_ + "log1.txt",
                  "-i", self.dir_ + "mat1", 
                  "-o", self.dir_,
                  "--input_format", "sm",
                  "--output_format", "sm",
                  "--gz", "True"
                  ])
        
        s1 = Space.build(data=self.dir_ + "mat1.sm.gz",
                         cols= self.dir_ + "mat1.cols",
                         format = "sm")

        s2 = Space.build(data=self.dir_ + "CORE_SS.mat1.sm",
                         cols=self.dir_ + "CORE_SS.mat1.cols",
                         format="sm")
        s3 = io_utils.load(self.dir_ + "CORE_SS.mat1.pkl", Space)
        s4 = Space.build(data=self.dir_ + "mat1.sm",
                         cols= self.dir_ + "mat1.cols",
                         format = "sm")
                
        self._test_equal_spaces_sparse(s1, s2)
        self._test_equal_spaces_sparse(s1, s3)
        self._test_equal_spaces_sparse(s1, s4)
开发者ID:georgiana-dinu,项目名称:dissect,代码行数:28,代码来源:bcs_pipeline_test.py

示例2: train_from_core

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
def train_from_core(lexical_space_file, an_dn_file, pn_file, sv_file, vo_file, output_file_prefix):
    
    if (not exists(lexical_space_file) or not exists(pn_file) or not exists(sv_file)
        or not exists(vo_file) or not exists(an_dn_file)):
        print "some file doesn't exist"
        print lexical_space_file, an_dn_file, pn_file, sv_file, vo_file
    
    print "load core"
    core_space = Space.build(data=lexical_space_file, format="dm")
    print "load an dn"
    
    an_dn_space = Space.build(data=an_dn_file, format="dm")
    print "load pn"
    pn_space = Space.build(data=pn_file, format="dm")
    print "load sv"
    sv_space = Space.build(data=sv_file, format="dm")
    print "load vo"
    vo_space = Space.build(data=vo_file, format="dm")
    
    print "start training"
    all_mat_space_normed = train_all_spaces(core_space, an_dn_space, 
                                     pn_space, sv_space, vo_space)
    print "exporting trained file"
    all_mat_space_normed.export(output_file_prefix, format="dm")
    del all_mat_space_normed
    print "DONE"
开发者ID:thenghiapham,项目名称:p_tree_kernel,代码行数:28,代码来源:train_plf.py

示例3: test_build_data

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def test_build_data(self):

        test_cases = [("data1",["red", "blue"], ["car", "man"],
                       np.mat([[3,5],[0,10]]), np.mat([[3,5],[0,10]])),
                      ("data2",["red"], ["car"],
                       np.mat([[3]]), np.mat([[3]])),
                      ("data3",["red", "blue"], ["car", "man"],
                       np.mat([[15,0],[0,6]]), np.mat([[5,0],[0,6]])),
                      ("data7",["red"], ["car"], np.mat([[0]]), np.mat([[0]])),
                      ("data9",["man"], ["car"], np.mat([[4]]), None),
                      ]
        for data_file, rows, cols, smat, dmat in test_cases:
            data_file1 = self.dir_ + data_file + ".sparse"

            sp = Space.build(data=data_file1,
                             cols= self.dir_ + data_file + ".cols",
                             format="sm")
            self.assertListEqual(rows, sp.id2row)
            self.assertListEqual(cols, sp.id2column)

            self.assertIsInstance(sp.cooccurrence_matrix, SparseMatrix)
            np.testing.assert_array_equal(smat,
                                          sp.cooccurrence_matrix.mat.todense())

            data_file2 = self.dir_ + data_file + ".dense"
            if not dmat is None:
                sp = Space.build(data=data_file2, format="dm")
                self.assertListEqual(rows, sp.id2row)
                self.assertListEqual([], sp.id2column)

                self.assertIsInstance(sp.cooccurrence_matrix, DenseMatrix)
                np.testing.assert_array_equal(dmat, sp.cooccurrence_matrix.mat)
开发者ID:Aliases,项目名称:dissect,代码行数:34,代码来源:space_test.py

示例4: eval_on_file

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
def eval_on_file(path_composed_emb, path_observed_emb, save_path):
    raw_observed_space = Space.build(data=path_observed_emb, format='dm')
    observed_space = raw_observed_space.apply(RowNormalization('length'))
    observed_words = observed_space.get_id2row()
    print("Observed words, size: " + str(len(observed_words)) + ", first:")
    print(observed_words[:10])
    observed_words_set = set(observed_words)

    raw_composed_space = Space.build(data=path_composed_emb, format='dm')
    composed_space = raw_composed_space.apply(RowNormalization('length'))
    composed_words = composed_space.get_id2row()
    print("Composed words, size: " + str(len(composed_words)) + ", first:")
    print(composed_words[:10])

    # all composed words should be in the initial space
    for idx, word in enumerate(composed_words):
        assert(word in observed_words_set)

    q1, q2, q3, ranks = evaluateRank(composed_words, composed_space, observed_space)
    print("Q1: " + str(q1) + ", Q2: " + str(q2) + ", Q3: " + str(q3))

    printDictToFile(ranks, save_path + '_rankedCompounds.txt')
    
    sortedRanks = sorted(ranks.values())
    printListToFile(sortedRanks, save_path + '_ranks.txt')
    logResult(q1, q2, q3, save_path + '_quartiles.txt')

    return q1,q2,q3,ranks
开发者ID:corinadima,项目名称:gWordcomp,代码行数:30,代码来源:composition_eval.py

示例5: test_simple_dense

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def test_simple_dense(self):
            
        bcs.main(["build_core_space.py", 
                  "-l", self.dir_ + "log1.txt",
                  "-i", self.dir_ + "mat2", 
                  "-o", self.dir_,
                  "--input_format", "dm",
                  "--output_format", "dm"
                  ])
        
        s1 = Space.build(data = self.dir_ + "mat2.dm", format = "dm")
        s2 = Space.build(data = self.dir_ + "CORE_SS.mat2.dm", format="dm")
        s3 = io_utils.load(self.dir_ + "CORE_SS.mat2.pkl", Space)
        
        self._test_equal_spaces_dense(s1, s2)
        self._test_equal_spaces_dense(s1, s3)        
 
        bcs.main(["build_core_space.py", 
                  "-l", self.dir_ + "log1.txt",
                  "-i", self.dir_ + "CORE_SS.mat2", 
                  "-o", self.dir_,
                  "--input_format", "pkl",
                  "--output_format", "dm"
                  ])
        
        s1 = io_utils.load(self.dir_ + "CORE_SS.CORE_SS.mat2.pkl", Space)
        s3 = io_utils.load(self.dir_ + "CORE_SS.mat2.pkl", Space)
        
        self._test_equal_spaces_dense(s1, s3)  
开发者ID:georgiana-dinu,项目名称:dissect,代码行数:31,代码来源:bcs_pipeline_test.py

示例6: test_as_conversion_tool

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def test_as_conversion_tool(self):
        
        bcs.main(["build_core_space.py", 
                  "-i", self.dir_ + "mat3", 
                  "-o", self.dir_,
                  "--input_format", "sm",
                  "--output_format", "sm"
                  ])        
        
        s1 = Space.build(data=self.dir_ + "mat3.sm",
                         cols= self.dir_ + "mat3.cols",
                         format = "sm")
        s2 = Space.build(data=self.dir_ + "CORE_SS.mat3.sm",
                         rows=self.dir_ + "CORE_SS.mat3.rows",
                         cols=self.dir_ + "CORE_SS.mat3.cols", 
                         format="sm")
        s3 = io_utils.load(self.dir_ + "CORE_SS.mat3.pkl", Space)
        
        self._test_equal_spaces_sparse(s1, s2)
        self._test_equal_spaces_sparse(s1, s3)
        
        bcs.main(["build_core_space.py", 
                  "-i", self.dir_ + "mat3", 
                  "-o", self.dir_,
                  "--input_format", "sm",
                  "--output_format", "dm"
                  ])
        
        s1 = Space.build(data=self.dir_ + "mat3.dm",
                         cols=self.dir_ + "CORE_SS.mat3.cols",
                         format = "dm")
        s2 = Space.build(data=self.dir_ + "CORE_SS.mat3.dm",
                         rows=self.dir_ + "CORE_SS.mat3.rows",
                         cols=self.dir_ + "CORE_SS.mat3.cols",
                         format = "dm")                 
        s3 = io_utils.load(self.dir_ + "CORE_SS.mat3.pkl", Space)

        self._test_equal_spaces_dense(s1, s2)
        s3.to_dense()
        self._test_equal_spaces_dense(s1, s3)
        
        bcs.main(["build_core_space.py", 
                  "-i", self.dir_ + "mat3", 
                  "-o", self.dir_,
                  "--input_format", "dm",
                  "--output_format", "dm"
                  ])        
       
        s1 = Space.build(data=self.dir_ + "CORE_SS.mat3.dm",
                         cols=self.dir_ + "CORE_SS.mat3.cols",
                         format = "dm")                 
        s3 = io_utils.load(self.dir_ + "CORE_SS.mat3.pkl", Space)
        
        s3.to_dense()
        self._test_equal_spaces_dense(s1, s3)
开发者ID:georgiana-dinu,项目名称:dissect,代码行数:57,代码来源:bcs_pipeline_test.py

示例7: test_simple_ops

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def test_simple_ops(self):

        bcs.main(["build_core_space.py",
                  "-l", self.dir_ + "log1.txt",
                  "-i", self.dir_ + "mat3",
                  "-w", "raw",
                  "-s", "top_sum_3,top_length_3,top_sum_4",
                  "-r", "svd_2,svd_1",
                  "-o", self.dir_,
                  "--input_format", "dm",
                  "--output_format", "dm"
                  ])

        core_mats = ["CORE_SS.mat3.raw.top_sum_3.svd_2",
                     "CORE_SS.mat3.raw.top_sum_3.svd_1",
                     "CORE_SS.mat3.raw.top_length_3.svd_2",
                     "CORE_SS.mat3.raw.top_length_3.svd_1",
                     "CORE_SS.mat3.raw.top_sum_4.svd_2",
                     "CORE_SS.mat3.raw.top_sum_4.svd_1"
                     ]

        core_spaces = [Space.build(data=self.dir_ + suffix + ".dm", format="dm") for suffix in core_mats]

        for i, core_mat in enumerate(core_mats):
            bps.main(["build_peripheral_space.py",
                      "-l", self.dir_ + "log1.txt",
                      "-i", self.dir_ + "mat3",
                      "-o", self.dir_,
                      "-c", self.dir_ + core_mat + ".pkl",
                      "--input_format", "dm",
                      "--output_format", "dm"
                      ])

            s1 = core_spaces[i]
            data_file = self.dir_ + "PER_SS.mat3." + core_mats[i] + ".dm"
            s2 = Space.build(data=data_file, format="dm")
            self._test_equal_spaces_dense(s1, s2)

            bps.main(["build_peripheral_space.py",
                      "-l", self.dir_ + "log1.txt",
                      "-i", self.dir_ + "mat3",
                      "-o", self.dir_,
                      "-c", self.dir_ + core_mat + ".pkl",
                      "--input_format", "sm",
                      "--output_format", "dm"
                      ])

            s1 = core_spaces[i]
            data_file = self.dir_ + "PER_SS.mat3." + core_mats[i] + ".dm"
            s2 = Space.build(data=data_file, format="dm")

            self._test_equal_spaces_dense(s1, s2)
开发者ID:Aliases,项目名称:dissect,代码行数:54,代码来源:bps_pipeline_test.py

示例8: test_simple_lstsq_no_inter

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
 def test_simple_lstsq_no_inter(self):
     tc.main(["train_composition.py", 
               "-l", self.dir_ + "log1.txt",
               "-i", self.dir_ + "an_train_data.txt", 
               "-o", self.dir_,
               "-m", "lexical_func",
               "-p", self.dir_ + "CORE_SS.AN_mat.pkl",
               "-a", self.dir_ + "CORE_SS.N_mat.pkl",
               "-r", "lstsq",
               "--intercept", "False",
               "--export_params", "True"
               ]) 
     
     trained = io_utils.load(self.dir_ + "TRAINED_COMP_MODEL.lexical_func.an_train_data.txt.pkl")
     new_space = trained.function_space
     np.testing.assert_array_almost_equal(new_space.cooccurrence_matrix.mat, 
                                          np.mat([1,0,0,1]), 10)
     self.assertTupleEqual(new_space.element_shape, (2,2))
     self.assertListEqual(new_space.id2row, ["big"])
     self.assertListEqual(new_space.id2column, [])
     
     a_space = Space.build(data=self.dir_ + "TRAINED_COMP_MODEL.lexical_func.an_train_data.txt.params.dm", 
                           format="dm")
     
     self._test_equal_spaces_dense(a_space, new_space)
     
     tc.main(["train_composition.py", 
               "-l", self.dir_ + "log1.txt",
               "-i", self.dir_ + "an_train_data.txt", 
               "-o", self.dir_,
               "-m", "lexical_func",
               "-p", self.dir_ + "CORE_SS.AN_mat.pkl",
               "-a", self.dir_ + "CORE_SS.N_mat.pkl",
               "-r", "ridge",
               "--lambda", "0",
               "--crossvalidation", "False",
               "--intercept", "False",
               "--export_params", "True"
               ]) 
     
     trained = io_utils.load(self.dir_ + "TRAINED_COMP_MODEL.lexical_func.an_train_data.txt.pkl")
     new_space2 = trained.function_space
     np.testing.assert_array_almost_equal(new_space2.cooccurrence_matrix.mat, 
                                          np.mat([1,0,0,1]), 10)
     self.assertTupleEqual(new_space2.element_shape, (2,2))
     self.assertListEqual(new_space2.id2row, ["big"])
     self.assertListEqual(new_space2.id2column, [])
     
     a_space = Space.build(data=self.dir_ + "TRAINED_COMP_MODEL.lexical_func.an_train_data.txt.params.dm", 
                           format="dm")
     
     self._test_equal_spaces_dense(a_space, new_space2)
开发者ID:georgiana-dinu,项目名称:dissect,代码行数:54,代码来源:tc_pipeline_test.py

示例9: test_simple_dense

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def test_simple_dense(self):
        bps.main(["build_peripheral_space.py",
                  "-l", self.dir_ + "log1.txt",
                  "-i", self.dir_ + "mat2",
                  "-o", self.dir_,
                  "-c", self.dir_ + "CORE_SS.mat2.pkl",
                  "--input_format", "dm",
                  "--output_format", "dm"
                  ])
        s1 = Space.build(data=self.dir_ + "mat2.dm", format="dm")
        s2 = Space.build(data=self.dir_ + "PER_SS.mat2.CORE_SS.mat2.dm", format="dm")

        self._test_equal_spaces_dense(s1, s2)
开发者ID:Aliases,项目名称:dissect,代码行数:15,代码来源:bps_pipeline_test.py

示例10: test_to_dissect_sparse_files

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
def test_to_dissect_sparse_files(vectors_c, tmpdir):
    """

    :type vectors_c: Thesaurus
    :type tmpdir: py.path.local
    """
    from composes.semantic_space.space import Space

    prefix = str(tmpdir.join('output'))
    vectors_c.to_dissect_sparse_files(prefix)
    # check that files are there
    for suffix in ['sm', 'rows', 'cols']:
        outfile = '{}.{}'.format(prefix, suffix)
        assert os.path.exists(outfile)
        assert os.path.isfile(outfile)

    # check that reading the files in results in the same matrix
    space = Space.build(data="{}.sm".format(prefix),
                        rows="{}.rows".format(prefix),
                        cols="{}.cols".format(prefix),
                        format="sm")

    matrix, rows, cols = space.cooccurrence_matrix.mat, space.id2row, space.id2column
    exp_matrix, exp_cols, exp_rows = vectors_c.to_sparse_matrix()

    assert exp_cols == cols
    assert exp_rows == rows
    assert_array_equal(exp_matrix.A, matrix.A)
    _assert_matrix_of_thesaurus_c_is_as_expected(matrix.A, rows, cols)
    _assert_matrix_of_thesaurus_c_is_as_expected(exp_matrix.A, exp_rows, exp_cols)
开发者ID:mbatchkarov,项目名称:DiscoUtils,代码行数:32,代码来源:test_thesaurus.py

示例11: build_raw_per_space

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
def build_raw_per_space(in_file_prefix, in_format, is_gz):

    if not in_format in ("sm", "dm", "pkl"):
        raise ValueError("Invalid input format:%s" % in_format)

    data_file = "%s.%s" % (in_file_prefix, in_format)

    if in_format == "pkl":
        space = io_utils.load(data_file, Space)

    else:
        if is_gz:
            data_file = "%s.gz" % data_file
        row_file = "%s.rows" % (in_file_prefix)
        column_file = "%s.cols" % (in_file_prefix)
        if not os.path.exists(row_file):
            row_file = None
        if not os.path.exists(column_file):
            if in_format == "sm":
                raise ValueError("Column file: %s needs to be provided!" % column_file)
            column_file = None
        print "Building matrix..."
        space = Space.build(data=data_file, rows=row_file, cols=column_file, format=in_format)

    return space
开发者ID:Rygbee,项目名称:dissect,代码行数:27,代码来源:build_peripheral_space.py

示例12: test_build_data_row_col

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def test_build_data_row_col(self):
        test_cases = [("data1", "row1.row", "col1.col", ["red"], ["man", "car"],
                       np.mat([[5,3]]), np.mat([[3,5]])),
                      ("data1", "row1.row", "col5.col", ["red"], ["man", "car"],
                       np.mat([[5,3]]), np.mat([[3,5]])),
                      ("data3", "row2.row", "col2.col", ["blue", "red"], ["car"],
                       np.mat([[0],[15]]), None),
                      ("data2", "row1.row","col1.col", ["red"], ["man","car"],
                       np.mat([[0,3]]), None),
                      ("data3", "row3.row", "col3.col", ["blue", "red"], ["man", "car"],
                       np.mat([[6,0],[0,15]]), np.mat([[0,6],[5,0]])),
                      ("data7", "row2.row", "col3.col", ["blue", "red"], ["man", "car"],
                       np.mat([[0,0],[0,0]]), None),
                      ("data3", "row2.row", "col4.col", ["blue", "red"], ["airplane"],
                       np.mat([[0],[0]]), None)
                      ]

        for data_file, row_file, col_file, rows, cols, smat, dmat in test_cases:
            row_file = self.dir_ + row_file
            col_file = self.dir_ + col_file

            data_file1 = self.dir_ + data_file + ".sparse"

            if smat is None:
                self.assertRaises(ValueError, Space.build, data=data_file1, rows= row_file, cols=col_file, format="sm")

            else:
                sp = Space.build(data=data_file1, rows= row_file, cols=col_file, format="sm")
                self.assertListEqual(rows, sp.id2row)
                self.assertListEqual(cols, sp.id2column)

                self.assertIsInstance(sp.cooccurrence_matrix, SparseMatrix)
                np.testing.assert_array_equal(smat,
                                              sp.cooccurrence_matrix.mat.todense())

            data_file2 = self.dir_ + data_file + ".dense"

            if dmat is None:
                self.assertRaises(ValueError, Space.build, data=data_file2, rows= row_file, cols=col_file, format="dm")

            else:
                sp = Space.build(data=data_file2, rows= row_file, cols=col_file, format="dm")
                self.assertListEqual(rows, sp.id2row)
                self.assertListEqual(cols, sp.id2column)

                self.assertIsInstance(sp.cooccurrence_matrix, DenseMatrix)
                np.testing.assert_array_equal(dmat, sp.cooccurrence_matrix.mat)
开发者ID:Aliases,项目名称:dissect,代码行数:49,代码来源:space_test.py

示例13: test_simple_sparse

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def test_simple_sparse(self):

        bps.main(["build_peripheral_space.py",
                  "-l", self.dir_ + "log1.txt",
                  "-i", self.dir_ + "mat1",
                  "-o", self.dir_,
                  "-c", self.dir_ + "CORE_SS.mat1.pkl",
                  "--input_format", "sm",
                  "--output_format", "sm"
                  ])

        s1 = Space.build(data=self.dir_ + "mat1.sm",
                         cols=self.dir_ + "mat1.cols",
                         format="sm")
        s2 = Space.build(data=self.dir_ + "PER_SS.mat1.CORE_SS.mat1.sm",
                         cols=self.dir_ + "PER_SS.mat1.CORE_SS.mat1.cols",
                         format="sm")

        self._test_equal_spaces_sparse(s1, s2)
开发者ID:Aliases,项目名称:dissect,代码行数:21,代码来源:bps_pipeline_test.py

示例14: write_pkl

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
    def write_pkl(self):
        """
        Create spaces from co-occurrence counts in sparse format (.sm)
        """

        # For direction DE-EN
        my_space_1 = Space.build(
            data=OUTPUT_FILE_DE_DE_EN_SM, rows=OUTPUT_FILE_DE_WORDS_ROW, cols=OUTPUT_FILE_DE_EN_WORDS_COL, format="sm"
        )

        # For direction EN-DE
        my_space_2 = Space.build(
            data=OUTPUT_FILE_EN_EN_DE_SM, rows=OUTPUT_FILE_EN_WORDS_ROW, cols=OUTPUT_FILE_DE_EN_WORDS_COL, format="sm"
        )

        # Save the space objects in pickle format
        io_utils.save(my_space_1, OUTPUT_FILE_DE_DE_EN_PKL)
        io_utils.save(my_space_2, OUTPUT_FILE_EN_EN_DE_PKL)

        print >> stderr, "Pickle file 1 written out:", OUTPUT_FILE_DE_DE_EN_PKL
        print >> stderr, "Pickle file 2 written out:", OUTPUT_FILE_EN_EN_DE_PKL
开发者ID:2mh,项目名称:europarl-dissect,代码行数:23,代码来源:create_input_data.py

示例15: build_unigram_space

# 需要导入模块: from composes.semantic_space.space import Space [as 别名]
# 或者: from composes.semantic_space.space.Space import build [as 别名]
def build_unigram_space() :
	unigram_space = Space.build(data = args.function[3],
                       	       rows = args.function[2],
                       	       cols = args.function[1],
                       	       format = "sm")
	 
	ppmi_space = ppmi(unigram_space)
	ppmi_norm_space = norm(ppmi_space)
	ppmi_norm_svd_space = svd(ppmi_norm_space)
	
	save_space(ppmi_norm_svd_space, "unigrams_space") 
	return ppmi_norm_svd_space
开发者ID:anupama-gupta,项目名称:AN_Composition,代码行数:14,代码来源:semantic_space.py


注:本文中的composes.semantic_space.space.Space.build方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。