当前位置: 首页>>代码示例>>Python>>正文


Python Dataset.Dataset方法代码示例

本文整理汇总了Python中Dataset.Dataset方法的典型用法代码示例。如果您正苦于以下问题:Python Dataset.Dataset方法的具体用法?Python Dataset.Dataset怎么用?Python Dataset.Dataset使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在Dataset的用法示例。


在下文中一共展示了Dataset.Dataset方法的11个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: map

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def map(self, mapping_f, filename = None):
        if filename is None:
            fname = tempfile.mktemp()
            delete = True
        else:
            fname = filename
            delete = False            
        f = h5py.File(fname, "w")
        #Transform file by file        
        for x in self.file_iterator(path = True):
            route = x[-1]
            x = x[:-1]
            y = mapping_f(*x)
            if np.prod(y[0].shape) > 0:
                for i,v in enumerate(y):
                    f[route+"/"+str(i)] = v
        f.close()
        return Dataset(fname, 
                       self.entries_regexp, 
                       tuple(str(i) for i in xrange(len(y))), 
                       delete_after_use = delete) 
开发者ID:MarcCote,项目名称:NADE,代码行数:23,代码来源:SpeechDataset.py

示例2: _prepare_graph

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def _prepare_graph(self, params):
        tf.reset_default_graph()
        self.lr = tf.placeholder(tf.float32, shape=())

        self.dataset = Dataset(params)

        logits = self._prepare_model(self.dataset.img_data)
        float_y = tf.cast(self.dataset.labels, tf.float32)
        cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=float_y)
        self.loss = tf.reduce_sum(cross_entropy)

        sigmoid_logits = tf.nn.sigmoid(logits)
        self.predictions = tf.cast(tf.round(sigmoid_logits), tf.int32, name='predictions')
        self.accuracy = tf.reduce_sum(tf.reduce_min(tf.cast(tf.equal(self.predictions, self.dataset.labels),
                                                                tf.float32), axis=1))

        self._prepare_optimizer_stage(fine_tune_upto=1) 
开发者ID:Prasad9,项目名称:TFHubSample,代码行数:19,代码来源:Model.py

示例3: _prepare_graph

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def _prepare_graph(self, params):
        tf.reset_default_graph()

        self.dataset = Dataset(params)

        self.lr = tf.placeholder(tf.float32, shape=())
        one_hot_y = tf.one_hot(self.dataset.labels, depth=self._n_class)
        logits = self._prepare_model(self.dataset.text_data, self.dataset.text_len)

        cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=one_hot_y)
        self.loss = tf.reduce_sum(cross_entropy)

        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)

        self.predictions = tf.argmax(logits, axis=1, output_type=tf.int32, name='predictions')
        self.accuracy = tf.reduce_sum(tf.cast(tf.equal(self.predictions, self.dataset.labels), tf.float32)) 
开发者ID:Prasad9,项目名称:TFHubSample,代码行数:18,代码来源:Model.py

示例4: _prepare_graph

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def _prepare_graph(self, params):
        tf.reset_default_graph()

        self.dataset = Dataset(params)

        self.lr = tf.placeholder(tf.float32, shape=())
        one_hot_y = tf.one_hot(self.dataset.labels, depth=self._n_class)
        logits = self._prepare_model(self.dataset.text_data)

        cross_entropy = tf.nn.softmax_cross_entropy_with_logits_v2(logits=logits, labels=one_hot_y)
        self.loss = tf.reduce_sum(cross_entropy)

        self.optimizer = tf.train.AdamOptimizer(learning_rate=self.lr).minimize(self.loss)

        self.predictions = tf.argmax(logits, axis=1, output_type=tf.int32, name='predictions')
        self.accuracy = tf.reduce_sum(tf.cast(tf.equal(self.predictions, self.dataset.labels), tf.float32)) 
开发者ID:Prasad9,项目名称:TFHubSample,代码行数:18,代码来源:Model.py

示例5: _build_architecture

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def _build_architecture(self, params):
        tf.reset_default_graph()

        self.dataset = Dataset(params)
        self.lr = tf.placeholder(tf.float32, ())
        one_hot_y = tf.one_hot(self.dataset.data_y, self._n_class, dtype=tf.int32)
        self.logits = self._build_model(self.dataset.data_X)
        self.logits = tf.identity(self.logits, name='logits')
        self.predictions = tf.argmax(self.logits, axis=1, output_type=tf.int32, name='predictions')

        softmax = tf.nn.softmax_cross_entropy_with_logits_v2(labels=one_hot_y, logits=self.logits)
        self.loss = tf.reduce_sum(softmax)

        self.optimizer = tf.train.RMSPropOptimizer(learning_rate=self.lr).minimize(self.loss)
        self.accuracy = tf.reduce_sum(tf.cast(tf.equal(self.predictions, self.dataset.data_y), tf.float32),
                                       name='accuracy') 
开发者ID:Prasad9,项目名称:TFHubSample,代码行数:18,代码来源:Model.py

示例6: _prepare_graph

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def _prepare_graph(self, params):
        tf.reset_default_graph()
        self.lr = tf.placeholder(tf.float32, shape=())
        self.is_training = tf.placeholder(tf.bool, shape=(), name='is_training')

        self.dataset = Dataset(params)

        logits = self._prepare_model(self.dataset.img_data, self.is_training)
        float_y = tf.cast(self.dataset.labels, tf.float32)
        cross_entropy = tf.nn.sigmoid_cross_entropy_with_logits(logits=logits, labels=float_y)
        self.loss = tf.reduce_sum(cross_entropy)

        sigmoid_logits = tf.nn.sigmoid(logits)
        self.predictions = tf.cast(tf.round(sigmoid_logits), tf.int32, name='predictions')
        self.accuracy = tf.reduce_sum(tf.reduce_min(tf.cast(tf.equal(self.predictions, self.dataset.labels),
                                                            tf.float32), axis=1))

        self._prepare_optimizer_stage(fine_tune_upto=1) 
开发者ID:Prasad9,项目名称:TFHubSample,代码行数:20,代码来源:Model.py

示例7: reduce

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def reduce(self, reduction_f, initial_accum):
        #Reduce file by file 
        accum = initial_accum       
        for x in self.file_iterator():           
            accum = reduction_f(accum, *x)
        return accum
        
#    def pmap(self, mapping_f, filename = None, n_workers = 4):
#        if filename is None:
#            fname = tempfile.mktemp()
#            delete = True
#        else:
#            fname = filename
#            delete = False            
#        f = h5py.File(fname, "w")        
#        queue = Queue(maxsize = n_workers * 2)
#        worker_threads = [MappingThread(queue, mapping_f, f) for i in range(n_workers)]
#        # Start all threads        
#        [t.start() for t in worker_threads]        
#        # Transform file by file. This is done by queuing it and waiting for the worker threads to do it.       
#        for x in self.file_iterator(path = True):
#            queue.put(x)        
#        # Send a signal to all threads so they finish
#        [t.finish() for t in worker_threads]
#        # Wait for all the processing and writing to be finished before closing the file 
#        queue.join()
#        f.close()
#        # Return the new dataset
#        return Dataset(fname, 
#                       self.entries_regexp, 
#                       tuple(str(i) for i in xrange(len(y))), 
#                       delete_after_use = delete)
    #TODO: Move to utils 
开发者ID:MarcCote,项目名称:NADE,代码行数:35,代码来源:SpeechDataset.py

示例8: _prepare_graph

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def _prepare_graph(self, params):
        tf.reset_default_graph()
        self.dataset = Dataset(params)

        logits = self._prepare_model(self.dataset.img_data)
        softmax = tf.nn.softmax(logits)
        self.top_prediction = tf.nn.top_k(softmax, self._top_k, name='top_prediction') 
开发者ID:Prasad9,项目名称:TFHubSample,代码行数:9,代码来源:Model.py

示例9: _prepare_graph

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def _prepare_graph(self, params):
        tf.reset_default_graph()
        self.dataset = Dataset(params)

        self.class_entities, self.boxes = self._prepare_model(self.dataset.img_data) 
开发者ID:Prasad9,项目名称:TFHubSample,代码行数:7,代码来源:Model.py

示例10: _build_architecture

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def _build_architecture(self):
        tf.reset_default_graph()

        self.dataset = Dataset()
        self.aug_images = self._build_model() 
开发者ID:Prasad9,项目名称:TFHubSample,代码行数:7,代码来源:Model.py

示例11: map

# 需要导入模块: import Dataset [as 别名]
# 或者: from Dataset import Dataset [as 别名]
def map(self, mapping_f, filename = None):
        if filename is None:
            fname = tempfile.mktemp()
            delete = True
        else:
            fname = filename
            delete = False            
        f = h5py.File(fname, "w")
        #Transform file by file        
        for x in self.file_iterator(path = True):
            route = x[-1]
            x = x[:-1]
            y = mapping_f(*x)
            if np.prod(y[0].shape) > 0:
                for i,v in enumerate(y):
                    f[route+"/"+str(i)] = v
        f.close()
        return Dataset(fname, 
                       ".*/.*/.*/.*", 
                       tuple(str(i) for i in xrange(len(y))), 
                       delete_after_use = delete)
        
#    def get_data(self, n = None, proportion = None, accept_less = True):        
#        if n is None:
#            total = sum([self.get_file_shape(0, i)[0] for i in xrange(len(self.file_paths))])
#            if proportion is not None: 
#                n = total * proportion
#            else:
#                n = total
#        data = tuple(np.empty((n, self.get_dimensionality(i))) for i in xrange(self.get_arity()))
#        row = 0
#        for fs in self.file_iterator():
#            for i,f in enumerate(fs):
#                increment = min(f.shape[0], n-row)
#                data[i][row:row+increment, :] = f[0:increment, :]
#            row += increment
#            if row >= n:
#                break
#        if accept_less and row < n:
#            return tuple(d[0:row,:] for d in data)
#        else:
#            assert(n == row)
#        return data 
开发者ID:MarcCote,项目名称:NADE,代码行数:45,代码来源:HTKSpeechDataset.py


注:本文中的Dataset.Dataset方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。