本文整理汇总了Python中pandas.io.data.DataReader.columns方法的典型用法代码示例。如果您正苦于以下问题:Python DataReader.columns方法的具体用法?Python DataReader.columns怎么用?Python DataReader.columns使用的例子?那么恭喜您, 这里精选的方法代码示例或许可以为您提供帮助。您也可以进一步了解该方法所在类pandas.io.data.DataReader
的用法示例。
在下文中一共展示了DataReader.columns方法的5个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: get_riskfree_rate
# 需要导入模块: from pandas.io.data import DataReader [as 别名]
# 或者: from pandas.io.data.DataReader import columns [as 别名]
def get_riskfree_rate(self,startdate,enddate,freq="M",maturity='1M'):
"""
Rates from FRED
http://research.stlouisfed.org/fred2/categories/116
"""
rfcache = self.__class__._cacherfrate
grabdata = False
if rfcache == None:
grabdata = True
elif rfcache[0]< startdate or rfcache[1] > enddate:
grabdata = True
if grabdata:
dt = DataReader('DTB4WK',"fred", startdate,enddate)
dt.columns = ['RFRate']
dt.fillna(method='backfill',inplace=True)
rfcache = (startdate,enddate,dt)
self.__class__._cacherfrate= rfcache
else:
dt = rfcache[2]
dsm = dt[startdate:enddate].resample('M')
return dsm
示例2: getHistoricalQuotes
# 需要导入模块: from pandas.io.data import DataReader [as 别名]
# 或者: from pandas.io.data.DataReader import columns [as 别名]
def getHistoricalQuotes(self, symbol, index, market=None):
assert (isinstance(index, pd.Index))
source = 'yahoo'
try:
quotes = DataReader(symbol, source, index[0], index[-1])
except:
log.error('** Could not get {} quotes'.format(symbol))
return pd.DataFrame()
if index.freq != pd.datetools.BDay() or index.freq != pd.datetools.Day():
#NOTE reIndexDF has a column arg but here not provided
quotes = utils.reIndexDF(quotes, delta=index.freq, reset_hour=False)
if not quotes.index.tzinfo:
quotes.index = quotes.index.tz_localize(self.tz)
quotes.columns = utils.Fields.QUOTES
return quotes
示例3: date
# 需要导入模块: from pandas.io.data import DataReader [as 别名]
# 或者: from pandas.io.data.DataReader import columns [as 别名]
# pivot
bda_negative = by_day_airline.pivot('tweet_date','airline','negative')
# plot
airlines = ['Delta','Southwest','US Airways','United','Virgin America']
bda_negative.plot(bda_negative.index,airlines,linestyle='--',figsize=(12,9))
plt.ylabel('% Negative')
plt.xlabel('Date')
plt.title('% of Tweets with Negative Sentiment, by Airline')
plt.savefig('airline_sentiment.pdf', bbox_inches='tight')
# grab stock data for all in list of airlines
start = date(2015,2,17)
end = date(2015,2,24)
tickers = ['DAL','LUV','AAL','UAL','VA']
prices = DataReader(tickers,'yahoo',start,end)['Adj Close']
# rename columns, so that they are airline names, not tickers
prices.columns = ['US Airways Price','Delta Price','Southwest Price','United Price','Virgin America Price']
# join stock data to twitter sentiment data
joined = pd.merge(bda_negative,prices,left_index=True,right_index=True)
# plot to see co-movements
united = ['United', 'United Price']
joined.plot(joined.index,united,linestyle='--',figsize=(12,9))
# use broken axis?
# with more days of data, create normalized variables, calculated as deviation from all airline average sentiment levels
示例4: _ohlc
# 需要导入模块: from pandas.io.data import DataReader [as 别名]
# 或者: from pandas.io.data.DataReader import columns [as 别名]
def _ohlc(code, source='yahoo', start=None, end=None):
df = DataReader(code, source, start=start, end=end)
df.columns = [c.replace(' ', '_').lower() for c in df.columns]
return df
示例5: DataReader
# 需要导入模块: from pandas.io.data import DataReader [as 别名]
# 或者: from pandas.io.data.DataReader import columns [as 别名]
# plt.show()
pic.new()
plt.savefig("C:/Users/oskar/Documents/doc_no_backup/python_crap/temp/%s.png" %(str(pic.num)))
rep.addimage("C:/Users/oskar/Documents/doc_no_backup/python_crap/temp/%s.png"%(str(pic.num)),7,4,'LEFT')
plt.close()
##unemployment model-----------------------------------------------------------------------------------------
UNEMP = DataReader("UNRATE", "fred", start, end) #Unemplyment
N_UNEMP = DataReader("NROU", "fred", start, end) #natural rate Unemplyment
# EQ = DataReader("SP500", "fred", start, end) #SPX
UNEMP=pd.merge(UNEMP, N_UNEMP, how='outer', left_index=True, right_index=True)
UNEMP=pd.merge(UNEMP, EQ, how='inner', left_index=True, right_index=True)
UNEMP = UNEMP.replace('.',np.nan).fillna(method='ffill')
UNEMP.columns=['UNEMP','N_UNEMP','EQ']
UNEMP['MAS']=pd.rolling_mean(UNEMP['EQ'], 15)
UNEMP[['UNEMP','N_UNEMP','EQ']]=UNEMP[['UNEMP','N_UNEMP','EQ']].applymap(f)
UNEMP['Excess']=UNEMP['UNEMP']-UNEMP['N_UNEMP']
trade =list(np.zeros(15))
i=15
pos3=0
while i < len(UNEMP['EQ']):
if UNEMP['EQ'].ix[i] >UNEMP['MAS'].ix[i] and UNEMP['Excess'].ix[i]>0.0:
pos3=1
trade.append(pos3)
elif UNEMP['EQ'].ix[i] <UNEMP['MAS'].ix[i] and min(UNEMP['Excess'].ix[i-12:i])<0.0:
pos3=0
trade.append(pos3)
else: