当前位置: 首页>>代码示例>>Python>>正文


Python datasets.SupervisedDataSet方法代码示例

本文整理汇总了Python中pybrain.datasets.SupervisedDataSet方法的典型用法代码示例。如果您正苦于以下问题:Python datasets.SupervisedDataSet方法的具体用法?Python datasets.SupervisedDataSet怎么用?Python datasets.SupervisedDataSet使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在pybrain.datasets的用法示例。


在下文中一共展示了datasets.SupervisedDataSet方法的7个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。

示例1: get_cat_dog_trainset

# 需要导入模块: from pybrain import datasets [as 别名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 别名]
def get_cat_dog_trainset():
    count = 0
    images = os.listdir(root.path() + '/res/cats_proc/')
    shape = cv2.imread(root.path() + '/res/cats_proc/'+images[0],0).shape
    ds = SupervisedDataSet(shape[0]*shape[1], 2)
    for image in os.listdir(root.path() + '/res/cats_proc/'):
        img = cv2.imread(root.path() + '/res/cats_proc/'+image,0)
        inp = np.reshape(img, shape[0]*shape[1])
        target = [1,0]
        ds.addSample(inp, target)
        count += 1
    for image in os.listdir(root.path() + '/res/dogs_proc/'):
        img = cv2.imread(root.path() + '/res/dogs_proc/'+image,0)
        img = cv2.resize(img, img.shape, fx=0.5, fy=0.5)
        inp = np.reshape(img, shape[0]*shape[1])
        target = [0,1]
        ds.addSample(inp, target)
        count += 1
    return ds 
开发者ID:research-team,项目名称:NEUCOGAR,代码行数:21,代码来源:image_processing.py

示例2: get_cat_dog_testset

# 需要导入模块: from pybrain import datasets [as 别名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 别名]
def get_cat_dog_testset():
    count = 0
    images = os.listdir(root.path() + '/res/cats_proc/')
    shape = cv2.imread(root.path() + '/res/cats_proc/'+images[0],0).shape
    ds = SupervisedDataSet(shape[0]*shape[1], 2)
    for image in os.listdir(root.path() + '/res/cats_proc/'):
        img = cv2.imread(root.path() + '/res/cats_proc/'+image,0)
        inp = np.reshape(img, shape[0]*shape[1])
        target = [1,0]
        ds.addSample(inp, target)
        count += 1
    for image in os.listdir(root.path() + '/res/dogs_proc/'):
        img = cv2.imread(root.path() + '/res/dogs_proc/'+image,0)
        img = cv2.resize(img, img.shape, fx=0.5, fy=0.5)
        inp = np.reshape(img, shape[0]*shape[1])
        target = [0,1]
        ds.addSample(inp, target)
        count += 1
    return ds

# img = cv2.resize(img,(280, 280), interpolation = cv2.INTER_CUBIC)
# cv2.imwrite(root.path()+"/images/proc.jpg", img) 
开发者ID:research-team,项目名称:NEUCOGAR,代码行数:24,代码来源:image_processing.py

示例3: prepareANNDataset

# 需要导入模块: from pybrain import datasets [as 别名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 别名]
def prepareANNDataset(data, prob=None):
    '''
        Method to prepare the dataset for ANN training
        and testing
    '''
    # we only import this when preparing ANN dataset
    import pybrain.datasets as dt

    # supplementary method to convert list to tuple
    def extract(row):
        return tuple(row)

    # get the number of inputs and outputs
    inputs = len(data[0].columns)
    outputs = len(data[1].axes) + 1 
    if prob == 'regression':
        outputs -= 1

    # create dataset object
    dataset = dt.SupervisedDataSet(inputs, outputs)

    # convert dataframes to lists of tuples
    x = list(data[0].apply(extract, axis=1))
    if prob == 'regression':
        y = [(item) for item in data[1]]
    else:
        y = [(item,abs(item - 1)) for item in data[1]]

    # and add samples to the ANN dataset
    for x_item, y_item in zip(x,y):
        dataset.addSample(x_item, y_item)

    return dataset 
开发者ID:drabastomek,项目名称:practicalDataAnalysisCookbook,代码行数:35,代码来源:helper.py

示例4: get_new_data_set

# 需要导入模块: from pybrain import datasets [as 别名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 别名]
def get_new_data_set(self):
        input_number, output_number = self.meta_data

        return SupervisedDataSet(input_number, output_number) 
开发者ID:tonybeltramelli,项目名称:Deep-Spying,代码行数:6,代码来源:FeedForward.py

示例5: create_DS

# 需要导入模块: from pybrain import datasets [as 别名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 别名]
def create_DS(self, data):
        size = self.datasetinputs
        DS = SupervisedDataSet(size, 1)
        try:
            for i, val in enumerate(data):
                sample = create_sample_row(data, i, size)
                target = data[i + size]
                DS.addSample(sample, (target,))
        except Exception as e:
            if "list index out of range" not in str(e):
                print(e)
        return DS 
开发者ID:owocki,项目名称:pytrader,代码行数:14,代码来源:models.py

示例6: _build_dataset

# 需要导入模块: from pybrain import datasets [as 别名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 别名]
def _build_dataset(self, data):
        """
    Given a input training Dataframe with features and targets it returns the formatted training and validation
    datasets for pybrain usage, and randomly shuffled according to the self.seed given at instantiation.

    ----------

    data: pandas Dataframe
        It must contains both features and target columns

    Returns: (pybrain dataset, pybrain dataset)
        The first is the training dataset and the second is the validation dataset

        """
        np.random.seed(self.seed)
        permutation = np.random.permutation(np.arange(len(data)))
        sep = int(self.train_fraction * len(data))
        x = data[self.features]
        y = data[self.targets]
        ds_train = SupervisedDataSet(self.n_feature, self.n_target)
        ds_valid = SupervisedDataSet(self.n_feature, self.n_target)
        for i in permutation[:sep]:
            ds_train.addSample(x.values[i], y.values[i])
        for i in permutation[sep:]:
            ds_valid.addSample(x.values[i], y.values[i])
        return ds_train, ds_valid 
开发者ID:Ambrosys,项目名称:climatelearn,代码行数:28,代码来源:pybrain_MP.py

示例7: fit_predict

# 需要导入模块: from pybrain import datasets [as 别名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 别名]
def fit_predict(xTrain,yTrain,xTest,epochs,neurons):

  # Check edge cases
  if (not len(xTrain) == len(yTrain) or len(xTrain) == 0 or 
    len(xTest) == 0 or epochs <= 0):
    return

  # Randomize the training data (probably not necessary but pybrain might
  # not shuffle the data itself, so perform as safety check)
  indices = np.arange(len(xTrain))
  np.random.shuffle(indices)

  trainSwapX = [xTrain[x] for x in indices]
  trainSwapY = [yTrain[x] for x in indices]

  supTrain = SupervisedDataSet(len(xTrain[0]),1)
  for x in range(len(trainSwapX)):
    supTrain.addSample(trainSwapX[x],trainSwapY[x])

  # Construct the feed-forward neural network

  n = FeedForwardNetwork()

  inLayer = LinearLayer(len(xTrain[0]))
  hiddenLayer1 = SigmoidLayer(neurons)
  outLayer = LinearLayer(1)

  n.addInputModule(inLayer)
  n.addModule(hiddenLayer1)
  n.addOutputModule(outLayer)

  in_to_hidden = FullConnection(inLayer, hiddenLayer1)
  hidden_to_out = FullConnection(hiddenLayer1, outLayer)
  
  n.addConnection(in_to_hidden)
  n.addConnection(hidden_to_out)

  n.sortModules() 

  # Train the neural network on the training partition, validating
  # the training progress on the validation partition

  trainer = BackpropTrainer(n,dataset=supTrain,momentum=0.1,learningrate=0.01
    ,verbose=False,weightdecay=0.01)
  
  trainer.trainUntilConvergence(dataset=supTrain,
    maxEpochs=epochs,validationProportion=0.30)

  outputs = []
  for x in xTest:
    outputs.append(n.activate(x))

  return outputs 
开发者ID:lbenning,项目名称:Load-Forecasting,代码行数:55,代码来源:neural.py


注:本文中的pybrain.datasets.SupervisedDataSet方法示例由纯净天空整理自Github/MSDocs等开源代码及文档管理平台,相关代码片段筛选自各路编程大神贡献的开源项目,源码版权归原作者所有,传播和使用请参考对应项目的License;未经允许,请勿转载。