當前位置: 首頁>>代碼示例>>Python>>正文


Python DataFrame.columns方法代碼示例

本文整理匯總了Python中pandas.core.frame.DataFrame.columns方法的典型用法代碼示例。如果您正苦於以下問題:Python DataFrame.columns方法的具體用法?Python DataFrame.columns怎麽用?Python DataFrame.columns使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在pandas.core.frame.DataFrame的用法示例。


在下文中一共展示了DataFrame.columns方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: classifyTestData

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
def classifyTestData(testFilePath,modelRoot):
    """
    This method calls the traverseDecisionTreeModel() to classify the test data on the trained model and generate Confusion matrix and error at the given depth
    :param testFilePath: Path to the test file
    :param modelRoot: Root node of the decision tree of the trained model

    """
    correctlyClassifiedInstances=0
    incorrectlyClassifiedInstances=0
    testDataList=[]
    input=open(testFilePath,'rU')
    csvObject=csv.reader(input)
    label = featureList[len(featureList) -1]
    classLabels = featureAndValueMapping.get(label)
    classLabelCount = len(classLabels)
    ConfusionMatrix = [[0 for x in range(int(classLabelCount))] for x in range(int(classLabelCount))]
    for row in csvObject:
        predictedLabel=traverseDecisionTreeModel(row,root)
        ConfusionMatrix[int(row[len(row)- 1]) - 1][int(predictedLabel) - 1] += 1

        if predictedLabel==row[len(row)-1]:
            correctlyClassifiedInstances+=1
        else:
            incorrectlyClassifiedInstances+=1
    df = DataFrame(ConfusionMatrix)
    df.columns = classLabels
    df.index = classLabels

    print "Confusion Matrix :: \n"
    print df
    print "Correctly Classified Instance ",correctlyClassifiedInstances
    print "Incorrectly Classified Instance ",incorrectlyClassifiedInstances
開發者ID:biprade,項目名稱:Applied_Machine_Learning,代碼行數:34,代碼來源:DecisionTree.py

示例2: get_result

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
    def get_result(self):
        if self._is_series:
            if self.axis == 0:
                new_data = com._concat_compat([x.get_values() for x in self.objs])
                name = com._consensus_name_attr(self.objs)
                return Series(new_data, index=self.new_axes[0], name=name).__finalize__(self, method='concat')
            else:
                data = dict(zip(range(len(self.objs)), self.objs))
                index, columns = self.new_axes
                tmpdf = DataFrame(data, index=index)
                if columns is not None:
                    tmpdf.columns = columns
                return tmpdf.__finalize__(self, method='concat')
        else:
            mgrs_indexers = []
            for obj in self.objs:
                mgr = obj._data
                indexers = {}
                for ax, new_labels in enumerate(self.new_axes):
                    if ax == self.axis:
                        # Suppress reindexing on concat axis
                        continue

                    obj_labels = mgr.axes[ax]
                    if not new_labels.equals(obj_labels):
                        indexers[ax] = obj_labels.reindex(new_labels)[1]

                mgrs_indexers.append((obj._data, indexers))

            new_data = concatenate_block_managers(
                mgrs_indexers, self.new_axes, concat_axis=self.axis, copy=self.copy)
            if not self.copy:
                new_data._consolidate_inplace()

            return self.objs[0]._from_axes(new_data, self.new_axes).__finalize__(self, method='concat')
開發者ID:Martbov,項目名稱:InformationRetrieval,代碼行數:37,代碼來源:merge.py

示例3: get_result

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
    def get_result(self):

        # series only
        if self._is_series:

            # stack blocks
            if self.axis == 0:
                new_data = com._concat_compat([x._values for x in self.objs])
                name = com._consensus_name_attr(self.objs)
                return (Series(new_data, index=self.new_axes[0],
                               name=name,
                               dtype=new_data.dtype)
                        .__finalize__(self, method='concat'))

            # combine as columns in a frame
            else:
                data = dict(zip(range(len(self.objs)), self.objs))
                index, columns = self.new_axes
                tmpdf = DataFrame(data, index=index)
                # checks if the column variable already stores valid column
                # names (because set via the 'key' argument in the 'concat'
                # function call. If that's not the case, use the series names
                # as column names
                if (columns.equals(Index(np.arange(len(self.objs)))) and
                        not self.ignore_index):
                    columns = np.array([data[i].name
                                        for i in range(len(data))],
                                       dtype='object')
                    indexer = isnull(columns)
                    if indexer.any():
                        columns[indexer] = np.arange(len(indexer[indexer]))
                tmpdf.columns = columns
                return tmpdf.__finalize__(self, method='concat')

        # combine block managers
        else:
            mgrs_indexers = []
            for obj in self.objs:
                mgr = obj._data
                indexers = {}
                for ax, new_labels in enumerate(self.new_axes):
                    if ax == self.axis:
                        # Suppress reindexing on concat axis
                        continue

                    obj_labels = mgr.axes[ax]
                    if not new_labels.equals(obj_labels):
                        indexers[ax] = obj_labels.reindex(new_labels)[1]

                mgrs_indexers.append((obj._data, indexers))

            new_data = concatenate_block_managers(
                mgrs_indexers, self.new_axes,
                concat_axis=self.axis, copy=self.copy)
            if not self.copy:
                new_data._consolidate_inplace()

            return (self.objs[0]._from_axes(new_data, self.new_axes)
                    .__finalize__(self, method='concat'))
開發者ID:clamus,項目名稱:pandas,代碼行數:61,代碼來源:merge.py

示例4: write_to_csv

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
 def write_to_csv(self):
     nw_df = DataFrame(list(self.lst))
     nw_df.columns = ['Redirect count','ssl_classification','url_length','hostname_length','subdomain_count','at_sign_in_url','exe_extension_in_request_url','exe_extension_in_landing_url',
                         'ip_as_domain_name','no_of_slashes_in requst_url','no_of_slashes_in_landing_url','no_of_dots_in_request_url','no_of_dots_in_landing_url','tld_value','age_of_domain',
                         'age_of_last_modified','content_length','same_landing_and_request_ip','same_landing_and_request_url']
     frames = [self.df['label'],self.df2['label']]
     new_df = pd.concat(frames)
     new_df = new_df.reset_index()
     nw_df['label'] = new_df['label']
     nw_df.to_csv('dataset1.csv',sep=',', encoding='latin-1')
開發者ID:kegbo,項目名稱:Malicious-URL-Detector,代碼行數:12,代碼來源:train.py

示例5: getPercentile

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
 def getPercentile(self, df, trg_percentile):
     
     percentile = df.quantile(trg_percentile, axis=0)
     
     percentile_df = DataFrame(percentile)
     
     column_name = trg_percentile * 100
     percentile_df.columns = [str(column_name)]
     
     return percentile_df
開發者ID:lalitagarwal,項目名稱:CPET,代碼行數:12,代碼來源:dfAnalyst1.py

示例6: str_extract

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
def str_extract(arr, pat, flags=0):
    """
    Find groups in each string using passed regular expression

    Parameters
    ----------
    pat : string
        Pattern or regular expression
    flags : int, default 0 (no flags)
        re module flags, e.g. re.IGNORECASE

    Returns
    -------
    extracted groups : Series (one group) or DataFrame (multiple groups)


    Notes
    -----
    Compare to the string method match, which returns re.match objects.
    """
    regex = re.compile(pat, flags=flags)

    # just to be safe, check this
    if regex.groups == 0:
        raise ValueError("This pattern contains no groups to capture.")
    elif regex.groups == 1:
        def f(x):
            if not isinstance(x, compat.string_types):
                return None
            m = regex.search(x)
            if m:
                return m.groups()[0]  # may be None
            else:
                return None
    else:
        empty_row = Series(regex.groups * [None])

        def f(x):
            if not isinstance(x, compat.string_types):
                return empty_row
            m = regex.search(x)
            if m:
                return Series(list(m.groups()))  # may contain None
            else:
                return empty_row
    result = arr.apply(f)
    result.replace({None: np.nan}, inplace=True)
    if regex.groups > 1:
        result = DataFrame(result)  # Don't rely on the wrapper; name columns.
        names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
        result.columns = [names.get(1 + i, i) for i in range(regex.groups)]
    else:
        result.name = regex.groupindex.get(0)
    return result
開發者ID:Exception4U,項目名稱:pandas,代碼行數:56,代碼來源:strings.py

示例7: get_result

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
    def get_result(self):

        # series only
        if self._is_series:

            # stack blocks
            if self.axis == 0:
                # concat Series with length to keep dtype as much
                non_empties = [x for x in self.objs if len(x) > 0]
                if len(non_empties) > 0:
                    values = [x._values for x in non_empties]
                else:
                    values = [x._values for x in self.objs]
                new_data = com._concat_compat(values)

                name = com._consensus_name_attr(self.objs)
                return (Series(new_data, index=self.new_axes[0],
                               name=name,
                               dtype=new_data.dtype)
                        .__finalize__(self, method='concat'))

            # combine as columns in a frame
            else:
                data = dict(zip(range(len(self.objs)), self.objs))
                index, columns = self.new_axes
                tmpdf = DataFrame(data, index=index)
                tmpdf.columns = columns
                return tmpdf.__finalize__(self, method='concat')

        # combine block managers
        else:
            mgrs_indexers = []
            for obj in self.objs:
                mgr = obj._data
                indexers = {}
                for ax, new_labels in enumerate(self.new_axes):
                    if ax == self.axis:
                        # Suppress reindexing on concat axis
                        continue

                    obj_labels = mgr.axes[ax]
                    if not new_labels.equals(obj_labels):
                        indexers[ax] = obj_labels.reindex(new_labels)[1]

                mgrs_indexers.append((obj._data, indexers))

            new_data = concatenate_block_managers(
                mgrs_indexers, self.new_axes,
                concat_axis=self.axis, copy=self.copy)
            if not self.copy:
                new_data._consolidate_inplace()

            return (self.objs[0]._from_axes(new_data, self.new_axes)
                    .__finalize__(self, method='concat'))
開發者ID:DangDangSister,項目名稱:pandas,代碼行數:56,代碼來源:merge.py

示例8: get_result

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
 def get_result(self):
     if self._is_series and self.axis == 0:
         new_data = com._concat_compat([x.values for x in self.objs])
         name = com._consensus_name_attr(self.objs)
         return Series(new_data, index=self.new_axes[0], name=name)
     elif self._is_series:
         data = dict(itertools.izip(xrange(len(self.objs)), self.objs))
         tmpdf = DataFrame(data, index=self.new_axes[0])
         tmpdf.columns = self.new_axes[1]
         return tmpdf
     else:
         new_data = self._get_concatenated_data()
         return self.objs[0]._from_axes(new_data, self.new_axes)
開發者ID:da415,項目名稱:pandas,代碼行數:15,代碼來源:merge.py

示例9: runModelOnTest

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
def runModelOnTest(testFilePath):
    classLabels = featureAndValueMapping.get(featureList[len(featureList) -1])
    classLabelCount = len(classLabels)
    ConfusionMatrix = [[0 for x in range(int(classLabelCount))] for x in range(int(classLabelCount))]
    input=open(testFilePath,'rU')
    csvObject=csv.reader(input)
    for row in csvObject:
        predictedLabel=classify(row[:len(row)-1])
        ConfusionMatrix[int(row[len(row)- 1])][int(predictedLabel)] += 1
        # print "Actual label : "+row[len(row)- 1]+"Class label : "+classify(row[:len(row)-1])
    df = DataFrame(ConfusionMatrix)
    df.columns = classLabels
    df.index = classLabels
    print df
開發者ID:biprade,項目名稱:Applied_Machine_Learning,代碼行數:16,代碼來源:NaiveBayes.py

示例10: get_result

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
 def get_result(self):
     if self._is_series and self.axis == 0:
         new_data = com._concat_compat([x.get_values() for x in self.objs])
         name = com._consensus_name_attr(self.objs)
         new_data = self._post_merge(new_data)
         return Series(new_data, index=self.new_axes[0], name=name)
     elif self._is_series:
         data = dict(zip(range(len(self.objs)), self.objs))
         index, columns = self.new_axes
         tmpdf = DataFrame(data, index=index)
         if columns is not None:
             tmpdf.columns = columns
         return tmpdf
     else:
         new_data = self._get_concatenated_data()
         new_data = self._post_merge(new_data)
         return self.objs[0]._from_axes(new_data, self.new_axes)
開發者ID:nitfer,項目名稱:pandas,代碼行數:19,代碼來源:merge.py

示例11: do_load

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
 def do_load(self):
     all_unimported = IncomingSalesforceRecord.get_unimported()
     object_types = all_unimported.select(IncomingSalesforceRecord.object_type).distinct()
     
     for obj in object_types:
         unimported_recs = all_unimported.select().where(IncomingSalesforceRecord.object_type==obj.object_type)
         unimported_dicts = [json.loads(rec.record) for rec in unimported_recs]
         for d in unimported_dicts:
             d['url'] = d['attributes']['url']
             del d['attributes']
             
             for k,v in d.iteritems():
                 if isinstance(v, dict):
                     d[k] = json.dumps(v)
                     
         df = DataFrame(unimported_dicts)
         df.columns = [colname.lower() for colname in df.columns]
         table_name = 'sf_%s' % (obj.object_type.lower())
         logger.info('Writing records for Salesforce object %s to db table %s' % (obj.object_type,table_name))
         df.to_sql(table_name, self.engine, flavor='postgresql', if_exists='replace', index=False, index_label=None)
開發者ID:dimagi,項目名稱:dimagi-data-platform,代碼行數:22,代碼來源:loaders.py

示例12: str_extract

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
def str_extract(arr, pat, flags=0):
    """
    Find groups in each string using passed regular expression

    Parameters
    ----------
    pat : string
        Pattern or regular expression
    flags : int, default 0 (no flags)
        re module flags, e.g. re.IGNORECASE

    Returns
    -------
    extracted groups : Series (one group) or DataFrame (multiple groups)

    Examples
    --------
    A pattern with one group will return a Series. Non-matches will be NaN.

    >>> Series(['a1', 'b2', 'c3']).str.extract('[ab](\d)')
    0      1
    1      2
    2    NaN
    dtype: object

    A pattern with more than one group will return a DataFrame.
    
    >>> Series(['a1', 'b2', 'c3']).str.extract('([ab])(\d)')

    A pattern may contain optional groups.
    
    >>> Series(['a1', 'b2', 'c3']).str.extract('([ab])?(\d)')

    Named groups will become column names in the result.
    
    >>> Series(['a1', 'b2', 'c3']).str.extract('(?P<letter>[ab])(?P<digit>\d)')
    """
    regex = re.compile(pat, flags=flags)

    # just to be safe, check this
    if regex.groups == 0:
        raise ValueError("This pattern contains no groups to capture.")
    elif regex.groups == 1:
        def f(x):
            if not isinstance(x, compat.string_types):
                return None
            m = regex.search(x)
            if m:
                return m.groups()[0]  # may be None
            else:
                return None
    else:
        empty_row = Series(regex.groups * [None])

        def f(x):
            if not isinstance(x, compat.string_types):
                return empty_row
            m = regex.search(x)
            if m:
                return Series(list(m.groups()))  # may contain None
            else:
                return empty_row
    result = arr.apply(f)
    result.replace({None: np.nan}, inplace=True)
    if regex.groups > 1:
        result = DataFrame(result)  # Don't rely on the wrapper; name columns.
        names = dict(zip(regex.groupindex.values(), regex.groupindex.keys()))
        result.columns = [names.get(1 + i, i) for i in range(regex.groups)]
    else:
        result.name = regex.groupindex.get(0)
    return result
開發者ID:jzwick,項目名稱:pandas,代碼行數:73,代碼來源:strings.py

示例13: tee

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
stream = (line.decode('cp1251').strip().encode('utf-8')
          for line in stdin)

# tee the stream to get the metadata for title
stream, stream_2 = tee(stream)

title = get_metadata(stream_2)['TITLE']

df = DataFrame()
for cur_data in iter_contextual_atom_data(stream):
    current = DataFrame.from_dict([cur_data])
    df = df.append(current, ignore_index=False)

index_cols = list(df.columns.values)
index_cols.remove('value')
df.set_index(index_cols, inplace=True)
df.columns = [title]

# create removable temp file for use with HDFStore
tmpfile = NamedTemporaryFile().name

store = HDFStore(tmpfile)
store['default'] = df
store.close()

# put h5 file to stdout
with open(tmpfile, 'rb') as f:
    print f.read()

# temp file is automatically removed
開發者ID:petrushev,項目名稱:makstat,代碼行數:32,代碼來源:px2h5.py

示例14: parse

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]

#.........這裏部分代碼省略.........
        output = OrderedDict()

        for asheetname in sheets:
            if verbose:
                print("Reading sheet {sheet}".format(sheet=asheetname))

            if isinstance(asheetname, compat.string_types):
                sheet = self.get_sheet_by_name(asheetname)
            else:  # assume an integer if not a string
                sheet = self.get_sheet_by_index(asheetname)

            data = self.get_sheet_data(sheet, convert_float)
            usecols = _maybe_convert_usecols(usecols)

            if sheet.nrows == 0:
                output[asheetname] = DataFrame()
                continue

            if is_list_like(header) and len(header) == 1:
                header = header[0]

            # forward fill and pull out names for MultiIndex column
            header_names = None
            if header is not None and is_list_like(header):
                header_names = []
                control_row = [True] * len(data[0])

                for row in header:
                    if is_integer(skiprows):
                        row += skiprows

                    data[row], control_row = _fill_mi_header(data[row],
                                                             control_row)

                    if index_col is not None:
                        header_name, _ = _pop_header_name(data[row], index_col)
                        header_names.append(header_name)

            if is_list_like(index_col):
                # Forward fill values for MultiIndex index.
                if not is_list_like(header):
                    offset = 1 + header
                else:
                    offset = 1 + max(header)

                # Check if we have an empty dataset
                # before trying to collect data.
                if offset < len(data):
                    for col in index_col:
                        last = data[offset][col]

                        for row in range(offset + 1, len(data)):
                            if data[row][col] == '' or data[row][col] is None:
                                data[row][col] = last
                            else:
                                last = data[row][col]

            has_index_names = is_list_like(header) and len(header) > 1

            # GH 12292 : error when read one empty column from excel file
            try:
                parser = TextParser(data,
                                    names=names,
                                    header=header,
                                    index_col=index_col,
                                    has_index_names=has_index_names,
                                    squeeze=squeeze,
                                    dtype=dtype,
                                    true_values=true_values,
                                    false_values=false_values,
                                    skiprows=skiprows,
                                    nrows=nrows,
                                    na_values=na_values,
                                    parse_dates=parse_dates,
                                    date_parser=date_parser,
                                    thousands=thousands,
                                    comment=comment,
                                    skipfooter=skipfooter,
                                    usecols=usecols,
                                    mangle_dupe_cols=mangle_dupe_cols,
                                    **kwds)

                output[asheetname] = parser.read(nrows=nrows)

                if not squeeze or isinstance(output[asheetname], DataFrame):
                    if header_names:
                        output[asheetname].columns = output[
                            asheetname].columns.set_names(header_names)
                    elif compat.PY2:
                        output[asheetname].columns = _maybe_convert_to_string(
                            output[asheetname].columns)

            except EmptyDataError:
                # No Data, return an empty DataFrame
                output[asheetname] = DataFrame()

        if ret_dict:
            return output
        else:
            return output[asheetname]
開發者ID:josham,項目名稱:pandas,代碼行數:104,代碼來源:_base.py

示例15: strat_maLong_maShort

# 需要導入模塊: from pandas.core.frame import DataFrame [as 別名]
# 或者: from pandas.core.frame.DataFrame import columns [as 別名]
def strat_maLong_maShort(
    df=readYahoo("SPY"),
    maLongDays=10,
    maShortDays=3,
    closeCol="Close",
    highCol="High",
    lowCol="Low",
    openCol="Open",
    signOfTrade=1,
    printit=True,
    block=False,
):
    """ execute strategy which enters and exit based on Moving Average crossovers
        Example:
            from pystrats.state_strats import strat_maLong_maShort as ss
            dfretfinal = ss() #strat_maLong_maShort()
            print dfretfinal
            print dfretfinal['ret'].mean()
        
    """
    close = np.array(df[closeCol])
    high = np.array(df[highCol])
    low = np.array(df[lowCol])
    open = np.array(df[openCol])
    date = np.array(df["Date"])

    ma10 = rolling_mean(close, maLongDays)
    ma9 = rolling_mean(close, maLongDays - 1)
    ma3 = rolling_mean(close, maShortDays)
    ma2 = rolling_mean(close, maShortDays - 1)

    n = len(df)
    nl = n - 1

    #     pMa10 = dsInsert(ma10[0:nl],0,None)
    #     pMa9 = dsInsert(ma9[0:nl],0,None)
    #     pMa3 = dsInsert(ma3[0:nl],0,None)
    #     pMa2 = dsInsert(ma2[0:nl],0,None)

    pMa10 = np.insert(ma10[0:nl], 0, None)
    pMa9 = np.insert(ma9[0:nl], 0, None)
    pMa3 = np.insert(ma3[0:nl], 0, None)
    pMa2 = np.insert(ma2[0:nl], 0, None)

    pClose = np.insert(close[0:nl], 0, None)
    pHigh = np.insert(high[0:nl], 0, None)
    pLow = np.insert(low[0:nl], 0, None)

    # initialize state vector
    state = np.array([1] * n)

    # loop
    start_i = maLongDays + 1
    for i in range(start_i, n):
        if (pClose[i] < pMa10[i]) & (state[i - 1] == 1) & (high[i] > pMa9[i]):
            state[i] = 2
        elif (state[i - 1] == 2) & (low[i] > pMa2[i]):
            state[i] = 2
        elif (state[i - 1] == 2) & (low[i] <= pMa2[i]):
            state[i] = 1

    pState = np.insert(state[0:nl], 0, 1)

    # create entry conditions
    # 1. initial entry (state 1 to state 2)
    e1_2 = np.array((pState == 1) & (state == 2))
    e2_2 = np.array((pState == 2) & (state == 2))
    e2_1 = np.array((pState == 2) & (state == 1))

    dfret = DataFrame([date, pHigh, pLow, pClose, pMa10, pMa9, pMa3, pMa2]).T
    dfret.columns = ["Date", "pHigh", "pLow", "pClose", "pMa10", "pMa9", "pMa3", "pMa2"]

    # create daily entry prices
    dailyEntryPrices = np.array([0] * n)
    # default entry
    dailyEntryPrices = asb(dailyEntryPrices, pMa9, e1_2)
    useCloseOnEntry = e1_2 & (low > pMa9)
    dailyEntryPrices = asb(dailyEntryPrices, close, useCloseOnEntry)
    dailyEntryPrices = asb(dailyEntryPrices, pClose, e2_2)
    dailyEntryPrices = asb(dailyEntryPrices, pClose, e2_1)
    dfret["entry"] = dailyEntryPrices

    # create DAILY settle prices, which are either 0 or the Close
    # dfret$Close <- close
    dailySettlePrices = np.array([0] * n)
    dailySettlePrices = asb(dailySettlePrices, close, e1_2)  # <- close[w1_2]
    dailySettlePrices = asb(dailySettlePrices, close, e2_2)  # dailySettlePrices[w2_2] <- close[w2_2]
    dailySettlePrices = asb(dailySettlePrices, pMa2, e2_1)  # dailySettlePrices[w2_1] <- pMa2[w2_1]

    # adjust for situations where the high is below the pMa2, so you get out at the close
    useCloseOnExit = e2_1 & (high < pMa2)
    dailySettlePrices = asb(
        dailySettlePrices, close, useCloseOnExit
    )  # dailySettlePrices[useCloseOnExit] <- close[useCloseOnExit]
    dfret["exit"] = dailySettlePrices
    dfret["ret"] = dfret["exit"] / dfret["entry"] - 1

    dfret["ret"].fillna(0)
    dfretfinal = dfret.dropna(0)  # dfretfinal <- dfret[-badrows(dfret),]

#.........這裏部分代碼省略.........
開發者ID:bgithub1,項目名稱:pystrats,代碼行數:103,代碼來源:state_strats.py


注:本文中的pandas.core.frame.DataFrame.columns方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。