本文整理汇总了Python中quandl.get函数的典型用法代码示例。如果您正苦于以下问题:Python get函数的具体用法?Python get怎么用?Python get使用的例子?那么恭喜您, 这里精选的函数代码示例或许可以为您提供帮助。
在下文中一共展示了get函数的15个代码示例,这些例子默认根据受欢迎程度排序。您可以为喜欢或者感觉有用的代码点赞,您的评价将有助于系统推荐出更棒的Python代码示例。
示例1: load_eur
def load_eur():
""" Return cash rate for EUR and DEM prior to the introduction of EUR """
bank_rate = quandl.get(CashFile.GER_BANKRATE.value,
api_key=AdagioConfig.quandl_token)
ww2_data = pd.DataFrame([4.0, 3.5, 5.0],
index=[datetime(1936, 6, 30),
datetime(1940, 4, 9),
datetime(1948, 6, 28)])
ww2_month = pd.date_range('1936-06-01', '1948-06-01', freq='M')
ww2_month = pd.DataFrame(index=ww2_month)
ww2_data = pd.concat((ww2_data, ww2_month), axis=1).fillna(method="pad")
parser = lambda d: date_shift(datetime.strptime(d, "%Y-%m"),
"+BMonthEnd")
filename = join(DATA_DIRECTORY, 'cash_rate', 'eur', 'BBK01.SU0112.csv')
discount_rate = pd.read_csv(filename,
skiprows=[1, 2, 3, 4], index_col=0,
usecols=[0, 1], engine="python", skipfooter=95,
parse_dates=True, date_parser=parser)
ib_rate = DataReader(CashFile.EUR_3M_IB_RATE.value, "fred", START_DATE)
libor = quandl.get(CashFile.EUR_3M_EURIBOR.value,
api_key=AdagioConfig.quandl_token)
data = (pd.concat((bank_rate[:"1936-06"].fillna(method="pad"),
ww2_data,
discount_rate[:"1959"].fillna(method="pad"),
to_monthend(ib_rate['1960':"1998"].fillna(method="pad")),
libor['1999':].fillna(method="pad")),
axis=1)
.sum(axis=1).rename("cash_rate_eur"))
return data
示例2: sample
def sample():
df1 = quandl.get("FMAC/HPI_AL", authtoken=api_key)
df2 = quandl.get("FMAC/HPI_AK", authtoken=api_key)
df1.columns = ['HPI_AL']
df2.columns = ['HPI_AK']
print(df1.head())
print(df2.head())
joined = df1.join(df2)
print(joined.head())
示例3: get_unemployment
def get_unemployment(api_key):
df = quandl.get("ECPI/JOB_G", trim_start="1975-01-01", authtoken=api_key)
df.columns = ['unemployment']
df['unemployment'] = (df['unemployment']-df['unemployment'][0])/df['unemployment'][0]*100.0
df = df.resample('D').mean()
df = df.resample('M').mean()
return df
示例4: gdp_data
def gdp_data():
df = quandl.get("BCB/4385", trim_start="1975-01-01")
df["Value"] = (df["Value"]-df["Value"][0]) / df["Value"][0] * 100.0
df=df.resample('M').mean()
df.rename(columns={'Value':'GDP'}, inplace=True)
df = df['GDP'] # DataFrame to Series
return df
示例5: mortgage_30yr
def mortgage_30yr():
df = quandl.get('FMAC/MORTG', trim_start="1975-01-01")
df['Value'] = (df['Value'] - df['Value'][0]) / df['Value'][0] * 100
df = df.resample('M').mean()
df.rename(columns={'Value': 'M30'}, inplace=True)
df = df['M30']
return df
示例6: HPI_Benchmark
def HPI_Benchmark():
df = quandl.get("FMAC/HPI_USA", authtoken=api_key)
df.columns = ['United States']
df["United States"] = (df["United States"]-df["United States"][0]) / df["United States"][0] * 100.0
pickle_out = open('HPI_bench.pickle','wb')
pickle.dump(df, pickle_out)
pickle_out.close()
示例7: HPI_Benchmark
def HPI_Benchmark():
df = quandl.get('FMAC/HPI_USA' , authtoken=api_key)
df['United States'] = (df['Value'] - df['Value'][0]) / df['Value'][0] * 100.0
pickle_out = open('us_pct.pickle', 'wb')
pickle.dump(df, pickle_out)
pickle_out.close()
示例8: load_quandl_newsentiment
def load_quandl_newsentiment(dataset, start, end):
cache_file = 'NS1/ns1-cache.csv'
quandl_auth = 'T2GAyK64nwsePiJWMq8y'
#ns1 = pd.DataFrame()
i=1
for index, row in dataset.iterrows():
#ns2[i] = ns1
ns1 = []
ns1 = pd.DataFrame()
stock_cache_file = row['NSCode']+'-cache.csv'
if not(os.path.exists(stock_cache_file)):
print(row['NSCode'])
print ('Downloading news for', row['NSCode'])
allnews_data = quandl.get(row['NSCode'], authtoken=quandl_auth)
ns1 = ns1.append(allnews_data)
ns1.to_csv(stock_cache_file)
if os.path.exists(stock_cache_file):
with open(stock_cache_file, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
next(csvFileReader)
print ('Loading news from cache ', row['NSCode'])
for rows in csvFileReader:
date=int(time_to_num(rows[0]))
if date > start and date < end:
# print(start,date,end)
datens.append(date)
sentiment.append(rows[1])
#print (datens, ' ',sentiment,"\n")
#ns1.to_csv(cache_file)
return
示例9: store_quandl
def store_quandl(code):
data = quandl.get(code)
new_entry_c = 0
print('Storing quandl data for: %s...' % (code), " ", end="")
# index is the date
for index, row in data.iterrows():
date = index
value = row['Value']
exists = db.session.query(db.base.classes.quandl).filter(db.base.classes.quandl.date == date).filter(db.base.classes.quandl.code == code).first()
if exists:
continue
new_data = db.base.classes.quandl(p_time=datetime.now(),
date=date,
code=code,
value=value)
db.session.add(new_data)
db.session.commit()
new_entry_c += 1
print('added: %s/%s entries' % (new_entry_c, len(data)))
示例10: load_from_quandl
def load_from_quandl(self):
""" Download data from quandl """
logger.debug('Downloading data from Quandl')
data = quandl.get(self[keys.quandl_ticker],
api_key=AdagioConfig.quandl_token)
self.check_if_expired(data)
return data
示例11: build_graph
def build_graph(ticker):
# make a graph of closing prices from previous month
# Create some data for our plot.
data = quandl.get('WIKI/' + ticker)
# graph last month's data
enddate = date.today() - timedelta(1)
startdate = enddate - relativedelta(months=1)
wdata = data[startdate:enddate]
x = wdata.index # datatime formatted
y = wdata['Close'] # closing prices
# Create a heatmap from our data.
plot = figure(title='Data from Quandle WIKI set',
x_axis_label='date',
x_axis_type='datetime',
y_axis_label='price')
plot.line(x, y, color='navy', alpha=0.5)
script, div = components(plot)
return script, div
示例12: scrapeDailyNews
def scrapeDailyNews():
url = "http://finance.yahoo.com/q/hp?s=AAPL+Historical+Prices"
content = urllib2.urlopen(url).read()
soup = BeautifulSoup(content, "lxml")
tbl = soup.find("table", {"class": "yfnc_datamodoutline1"}).findNext('table').find_all('tr')[1].find_all('td')
newsDate = datetime.now()
mydata = quandl.get("AOS/AAPL")
row = mydata.iloc[-1:]
avgSent = float(row['Article Sentiment'])
impactScore = float(row['Impact Score'])
news = News(
date = newsDate,
avgSent = avgSent,
impactScore = impactScore)
try:
news.save()
print("Saved news object ({})".format(news.objectId))
except:
print("News data has already been saved.")
示例13: scrapeDailyOptions
def scrapeDailyOptions():
url = "http://finance.yahoo.com/q/hp?s=AAPL+Historical+Prices"
content = urllib2.urlopen(url).read()
soup = BeautifulSoup(content, "lxml")
tbl = soup.find("table", {"class": "yfnc_datamodoutline1"}).findNext('table').find_all('tr')[1].find_all('td')
optionDate = datetime.strptime(tbl[0].string, "%b %d, %Y")
mydata = quandl.get("VOL/AAPL")
row = mydata.iloc[-1:]
ivmean10 = float(row['IvMean10'])
ivmean20 = float(row['IvMean20'])
ivmean30 = float(row['IvMean30'])
ivmean60 = float(row['IvMean60'])
option = Option(
date = optionDate,
ivMean10 = ivmean10,
ivMean20 = ivmean10,
ivMean30 = ivmean30,
ivMean60 = ivmean60)
try:
option.save()
print("Save option object ({})".format(option.objectId))
except:
print("Option data has already been saved.")
示例14: second_stock
def second_stock():
n = app_stock.vars['name']
ss = "WIKI/" + n + ".4"
mydata = quandl.get(ss, encoding='latin1', parse_dates=['Date'], dayfirst=True, index_col='Date', trim_start="2016-05-05", trim_end="2016-06-05", returns = "numpy", authtoken="ZemsPswo-xM16GFxuKP2")
mydata = pd.DataFrame(mydata)
#mydata['Date'] = mydata['Date'].astype('datetime64[ns]')
x = mydata['Date']
y = mydata['Close']
p = figure(title="Stock close price", x_axis_label='Date', y_axis_label='close price', plot_height = 300, plot_width = 550)
p.line(x, y, legend="Price in USD", line_width=3, color = "#2222aa")
# Configure resources to include BokehJS inline in the document.
# For more details see:
# http://bokeh.pydata.org/en/latest/docs/reference/resources_embedding.html#bokeh-embed
js_resources = INLINE.render_js()
css_resources = INLINE.render_css()
# For more details see:
# http://bokeh.pydata.org/en/latest/docs/user_guide/embedding.html#components
script, div = components(p, INLINE)
html = flask.render_template(
'stockgraph.html',
ticker = app_stock.vars['name'],
plot_script=script,
plot_div=div,
js_resources=js_resources,
css_resources=css_resources,
)
return encode_utf8(html)
示例15: load_stock_datasets
def load_stock_datasets():
data_file = "stocks.xlsx"
if os.path.isfile(data_file):
stocks = pd.read_excel(data_file)
else:
quandl.ApiConfig.api_key = 'a5JKbmNDb4k98huTPMcY'
google = quandl.get('WIKI/GOOGL')
google["Company"] = "Google"
facebook = quandl.get('WIKI/FB')
facebook["Company"] = "Facebook"
apple = quandl.get('WIKI/AAPL')
apple["Company"] = "Apple"
stocks = pd.concat([apple, facebook, google])
stocks = stocks.reset_index()
stocks.to_excel(data_file, index=False)
return stocks