當前位置: 首頁>>代碼示例>>Python>>正文


Python datasets.SupervisedDataSet方法代碼示例

本文整理匯總了Python中pybrain.datasets.SupervisedDataSet方法的典型用法代碼示例。如果您正苦於以下問題:Python datasets.SupervisedDataSet方法的具體用法?Python datasets.SupervisedDataSet怎麽用?Python datasets.SupervisedDataSet使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pybrain.datasets的用法示例。


在下文中一共展示了datasets.SupervisedDataSet方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: get_cat_dog_trainset

# 需要導入模塊: from pybrain import datasets [as 別名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 別名]
def get_cat_dog_trainset():
    count = 0
    images = os.listdir(root.path() + '/res/cats_proc/')
    shape = cv2.imread(root.path() + '/res/cats_proc/'+images[0],0).shape
    ds = SupervisedDataSet(shape[0]*shape[1], 2)
    for image in os.listdir(root.path() + '/res/cats_proc/'):
        img = cv2.imread(root.path() + '/res/cats_proc/'+image,0)
        inp = np.reshape(img, shape[0]*shape[1])
        target = [1,0]
        ds.addSample(inp, target)
        count += 1
    for image in os.listdir(root.path() + '/res/dogs_proc/'):
        img = cv2.imread(root.path() + '/res/dogs_proc/'+image,0)
        img = cv2.resize(img, img.shape, fx=0.5, fy=0.5)
        inp = np.reshape(img, shape[0]*shape[1])
        target = [0,1]
        ds.addSample(inp, target)
        count += 1
    return ds 
開發者ID:research-team,項目名稱:NEUCOGAR,代碼行數:21,代碼來源:image_processing.py

示例2: get_cat_dog_testset

# 需要導入模塊: from pybrain import datasets [as 別名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 別名]
def get_cat_dog_testset():
    count = 0
    images = os.listdir(root.path() + '/res/cats_proc/')
    shape = cv2.imread(root.path() + '/res/cats_proc/'+images[0],0).shape
    ds = SupervisedDataSet(shape[0]*shape[1], 2)
    for image in os.listdir(root.path() + '/res/cats_proc/'):
        img = cv2.imread(root.path() + '/res/cats_proc/'+image,0)
        inp = np.reshape(img, shape[0]*shape[1])
        target = [1,0]
        ds.addSample(inp, target)
        count += 1
    for image in os.listdir(root.path() + '/res/dogs_proc/'):
        img = cv2.imread(root.path() + '/res/dogs_proc/'+image,0)
        img = cv2.resize(img, img.shape, fx=0.5, fy=0.5)
        inp = np.reshape(img, shape[0]*shape[1])
        target = [0,1]
        ds.addSample(inp, target)
        count += 1
    return ds

# img = cv2.resize(img,(280, 280), interpolation = cv2.INTER_CUBIC)
# cv2.imwrite(root.path()+"/images/proc.jpg", img) 
開發者ID:research-team,項目名稱:NEUCOGAR,代碼行數:24,代碼來源:image_processing.py

示例3: prepareANNDataset

# 需要導入模塊: from pybrain import datasets [as 別名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 別名]
def prepareANNDataset(data, prob=None):
    '''
        Method to prepare the dataset for ANN training
        and testing
    '''
    # we only import this when preparing ANN dataset
    import pybrain.datasets as dt

    # supplementary method to convert list to tuple
    def extract(row):
        return tuple(row)

    # get the number of inputs and outputs
    inputs = len(data[0].columns)
    outputs = len(data[1].axes) + 1 
    if prob == 'regression':
        outputs -= 1

    # create dataset object
    dataset = dt.SupervisedDataSet(inputs, outputs)

    # convert dataframes to lists of tuples
    x = list(data[0].apply(extract, axis=1))
    if prob == 'regression':
        y = [(item) for item in data[1]]
    else:
        y = [(item,abs(item - 1)) for item in data[1]]

    # and add samples to the ANN dataset
    for x_item, y_item in zip(x,y):
        dataset.addSample(x_item, y_item)

    return dataset 
開發者ID:drabastomek,項目名稱:practicalDataAnalysisCookbook,代碼行數:35,代碼來源:helper.py

示例4: get_new_data_set

# 需要導入模塊: from pybrain import datasets [as 別名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 別名]
def get_new_data_set(self):
        input_number, output_number = self.meta_data

        return SupervisedDataSet(input_number, output_number) 
開發者ID:tonybeltramelli,項目名稱:Deep-Spying,代碼行數:6,代碼來源:FeedForward.py

示例5: create_DS

# 需要導入模塊: from pybrain import datasets [as 別名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 別名]
def create_DS(self, data):
        size = self.datasetinputs
        DS = SupervisedDataSet(size, 1)
        try:
            for i, val in enumerate(data):
                sample = create_sample_row(data, i, size)
                target = data[i + size]
                DS.addSample(sample, (target,))
        except Exception as e:
            if "list index out of range" not in str(e):
                print(e)
        return DS 
開發者ID:owocki,項目名稱:pytrader,代碼行數:14,代碼來源:models.py

示例6: _build_dataset

# 需要導入模塊: from pybrain import datasets [as 別名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 別名]
def _build_dataset(self, data):
        """
    Given a input training Dataframe with features and targets it returns the formatted training and validation
    datasets for pybrain usage, and randomly shuffled according to the self.seed given at instantiation.

    ----------

    data: pandas Dataframe
        It must contains both features and target columns

    Returns: (pybrain dataset, pybrain dataset)
        The first is the training dataset and the second is the validation dataset

        """
        np.random.seed(self.seed)
        permutation = np.random.permutation(np.arange(len(data)))
        sep = int(self.train_fraction * len(data))
        x = data[self.features]
        y = data[self.targets]
        ds_train = SupervisedDataSet(self.n_feature, self.n_target)
        ds_valid = SupervisedDataSet(self.n_feature, self.n_target)
        for i in permutation[:sep]:
            ds_train.addSample(x.values[i], y.values[i])
        for i in permutation[sep:]:
            ds_valid.addSample(x.values[i], y.values[i])
        return ds_train, ds_valid 
開發者ID:Ambrosys,項目名稱:climatelearn,代碼行數:28,代碼來源:pybrain_MP.py

示例7: fit_predict

# 需要導入模塊: from pybrain import datasets [as 別名]
# 或者: from pybrain.datasets import SupervisedDataSet [as 別名]
def fit_predict(xTrain,yTrain,xTest,epochs,neurons):

  # Check edge cases
  if (not len(xTrain) == len(yTrain) or len(xTrain) == 0 or 
    len(xTest) == 0 or epochs <= 0):
    return

  # Randomize the training data (probably not necessary but pybrain might
  # not shuffle the data itself, so perform as safety check)
  indices = np.arange(len(xTrain))
  np.random.shuffle(indices)

  trainSwapX = [xTrain[x] for x in indices]
  trainSwapY = [yTrain[x] for x in indices]

  supTrain = SupervisedDataSet(len(xTrain[0]),1)
  for x in range(len(trainSwapX)):
    supTrain.addSample(trainSwapX[x],trainSwapY[x])

  # Construct the feed-forward neural network

  n = FeedForwardNetwork()

  inLayer = LinearLayer(len(xTrain[0]))
  hiddenLayer1 = SigmoidLayer(neurons)
  outLayer = LinearLayer(1)

  n.addInputModule(inLayer)
  n.addModule(hiddenLayer1)
  n.addOutputModule(outLayer)

  in_to_hidden = FullConnection(inLayer, hiddenLayer1)
  hidden_to_out = FullConnection(hiddenLayer1, outLayer)
  
  n.addConnection(in_to_hidden)
  n.addConnection(hidden_to_out)

  n.sortModules() 

  # Train the neural network on the training partition, validating
  # the training progress on the validation partition

  trainer = BackpropTrainer(n,dataset=supTrain,momentum=0.1,learningrate=0.01
    ,verbose=False,weightdecay=0.01)
  
  trainer.trainUntilConvergence(dataset=supTrain,
    maxEpochs=epochs,validationProportion=0.30)

  outputs = []
  for x in xTest:
    outputs.append(n.activate(x))

  return outputs 
開發者ID:lbenning,項目名稱:Load-Forecasting,代碼行數:55,代碼來源:neural.py


注:本文中的pybrain.datasets.SupervisedDataSet方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。