本文整理匯總了Python中streamlit.write方法的典型用法代碼示例。如果您正苦於以下問題:Python streamlit.write方法的具體用法?Python streamlit.write怎麽用?Python streamlit.write使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類streamlit
的用法示例。
在下文中一共展示了streamlit.write方法的7個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: load_data
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import write [as 別名]
def load_data(sql_path, table_path, use_small=False):
sql_data = []
table_data = {}
st.write("Loading data from %s" % sql_path)
with open(sql_path) as lines:
for idx, line in enumerate(lines):
if use_small and idx >= 1000:
break
sql = json.loads(line.strip())
sql_data.append(sql)
with open(table_path) as lines:
for line in lines:
tab = json.loads(line.strip())
table_data[tab[u'id']] = tab
for sql in sql_data:
assert sql[u'table_id'] in table_data
return sql_data, table_data
示例2: get_mov_base
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import write [as 別名]
def get_mov_base():
links = load_links()
movies_embeddings_tensor, key_to_id, id_to_key = get_embeddings()
meta = load_omdb_meta()
popular = pd.read_csv(DATAPATH + 'movie_counts.csv')[:SHOW_TOPN_MOVIES]
st.write(popular['id'])
mov_base = {}
for i, k in list(meta.items()):
tmdid = int(meta[i]['tmdbId'])
if tmdid > 0 and popular['id'].isin([i]).any():
movieid = pd.to_numeric(links.loc[tmdid]['movieId'])
if isinstance(movieid, pd.Series):
continue
mov_base[int(movieid)] = meta[i]['omdb']['Title']
return mov_base
示例3: plot_dist
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import write [as 別名]
def plot_dist(alpha_value: float, beta_value: float, data: np.ndarray = None):
beta_dist = beta(alpha_value, beta_value)
xs = np.linspace(0, 1, 1000)
ys = beta_dist.pdf(xs)
fig, ax = plt.subplots(figsize=(7, 3))
ax.plot(xs, ys)
ax.set_xlim(0, 1)
ax.set_xlabel("x")
ax.set_ylabel("P(x)")
if data is not None:
likelihoods = beta_dist.pdf(data)
sum_log_likelihoods = np.sum(beta_dist.logpdf(data))
ax.vlines(data, ymin=0, ymax=likelihoods)
ax.scatter(data, likelihoods, color="black")
st.write(
f"""
_Under your alpha={alpha_slider:.2f} and beta={beta_slider:.2f},
the sum of log likelihoods is {sum_log_likelihoods:.2f}_
"""
)
st.pyplot(fig)
示例4: download_file
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import write [as 別名]
def download_file(file_path):
# Don't download the file twice. (If possible, verify the download using the file length.)
if os.path.exists(file_path):
if "size" not in EXTERNAL_DEPENDENCIES[file_path]:
return
elif os.path.getsize(file_path) == EXTERNAL_DEPENDENCIES[file_path]["size"]:
return
# These are handles to two visual elements to animate.
weights_warning, progress_bar = None, None
try:
weights_warning = st.warning("Downloading %s..." % file_path)
progress_bar = st.progress(0)
with open(file_path, "wb") as output_file:
with urllib.request.urlopen(EXTERNAL_DEPENDENCIES[file_path]["url"]) as response:
length = int(response.info()["Content-Length"])
counter = 0.0
MEGABYTES = 2.0 ** 20.0
while True:
data = response.read(8192)
if not data:
break
counter += len(data)
output_file.write(data)
# We perform animation by overwriting the elements.
weights_warning.warning("Downloading %s... (%6.2f/%6.2f MB)" %
(file_path, counter / MEGABYTES, length / MEGABYTES))
progress_bar.progress(min(counter / length, 1.0))
# Finally, we remove these visual elements by calling .empty().
finally:
if weights_warning is not None:
weights_warning.empty()
if progress_bar is not None:
progress_bar.empty()
# This is the main app app itself, which appears when the user selects "Run the app".
示例5: print_sample_data
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import write [as 別名]
def print_sample_data(index, sql_data, table_data):
query = qu.Query(sql_data[index]['sql']['sel'], sql_data[index]['sql']['agg'],
sql_data[index]['sql']['conds'])
st.write('**Sample data:**')
st.write('*Question*: %s' % sql_data[index][u'question'])
st.write('*Query*: %s' % repr(query))
st.write('*Table columns*: %s' % ', '.join(['{}: {}'.format(i, x) for i,x in \
enumerate(table_data[sql_data[index][u'table_id']][u'header'])]))
示例6: on_connect
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import write [as 別名]
def on_connect(client, userdata, flags, rc):
st.write(
f"Connected with result code {str(rc)} to MQTT broker on {MQTT_BROKER}"
)
# The callback for when a PUBLISH message is received from the server.
示例7: run_the_app
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import write [as 別名]
def run_the_app():
# To make Streamlit fast, st.cache allows us to reuse computation across runs.
# In this common pattern, we download data from an endpoint only once.
@st.cache
def load_metadata(url):
return pd.read_csv(url)
# This function uses some Pandas magic to summarize the metadata Dataframe.
@st.cache
def create_summary(metadata):
one_hot_encoded = pd.get_dummies(metadata[["frame", "label"]], columns=["label"])
summary = one_hot_encoded.groupby(["frame"]).sum().rename(columns={
"label_biker": "biker",
"label_car": "car",
"label_pedestrian": "pedestrian",
"label_trafficLight": "traffic light",
"label_truck": "truck"
})
return summary
# An amazing property of st.cached functions is that you can pipe them into
# one another to form a computation DAG (directed acyclic graph). Streamlit
# recomputes only whatever subset is required to get the right answer!
metadata = load_metadata(os.path.join(DATA_URL_ROOT, "labels.csv.gz"))
summary = create_summary(metadata)
# Uncomment these lines to peek at these DataFrames.
# st.write('## Metadata', metadata[:1000], '## Summary', summary[:1000])
# Draw the UI elements to search for objects (pedestrians, cars, etc.)
selected_frame_index, selected_frame = frame_selector_ui(summary)
if selected_frame_index == None:
st.error("No frames fit the criteria. Please select different label or number.")
return
# Draw the UI element to select parameters for the YOLO object detector.
confidence_threshold, overlap_threshold = object_detector_ui()
# Load the image from S3.
image_url = os.path.join(DATA_URL_ROOT, selected_frame)
image = load_image(image_url)
# Add boxes for objects on the image. These are the boxes for the ground image.
boxes = metadata[metadata.frame == selected_frame].drop(columns=["frame"])
draw_image_with_boxes(image, boxes, "Ground Truth",
"**Human-annotated data** (frame `%i`)" % selected_frame_index)
# Get the boxes for the objects detected by YOLO by running the YOLO model.
yolo_boxes = yolo_v3(image, confidence_threshold, overlap_threshold)
draw_image_with_boxes(image, yolo_boxes, "Real-time Computer Vision",
"**YOLO v3 Model** (overlap `%3.1f`) (confidence `%3.1f`)" % (overlap_threshold, confidence_threshold))
# This sidebar UI is a little search engine to find certain object types.