本文整理匯總了Python中streamlit.error方法的典型用法代碼示例。如果您正苦於以下問題:Python streamlit.error方法的具體用法?Python streamlit.error怎麽用?Python streamlit.error使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在類streamlit
的用法示例。
在下文中一共展示了streamlit.error方法的5個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。
示例1: safe_sample
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import error [as 別名]
def safe_sample(l: Sequence[T], n: int, seed: Optional[int] = None) -> List[T]:
if seed is not None:
random.seed(seed)
# Prevent an error from trying to sample more than the population
return list(random.sample(l, min(n, len(l))))
示例2: get_tokens
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import error [as 別名]
def get_tokens(
texts: List[str], tokenize_method: TokenizeMethod, vocab_size: int
) -> List[List[str]]:
try:
return tokenize(tokenize_method, texts, vocab_size=vocab_size)
except RuntimeError as e:
str_e = str(e)
if "vocab_size()" in str_e and "pieces_size()" in str_e:
st.error(
"SentencePiece requires your texts to have at least as many different tokens "
"as its vocabulary size. Try a smaller vocabulary size."
)
return
else:
raise
示例3: run_the_app
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import error [as 別名]
def run_the_app():
# To make Streamlit fast, st.cache allows us to reuse computation across runs.
# In this common pattern, we download data from an endpoint only once.
@st.cache
def load_metadata(url):
return pd.read_csv(url)
# This function uses some Pandas magic to summarize the metadata Dataframe.
@st.cache
def create_summary(metadata):
one_hot_encoded = pd.get_dummies(metadata[["frame", "label"]], columns=["label"])
summary = one_hot_encoded.groupby(["frame"]).sum().rename(columns={
"label_biker": "biker",
"label_car": "car",
"label_pedestrian": "pedestrian",
"label_trafficLight": "traffic light",
"label_truck": "truck"
})
return summary
# An amazing property of st.cached functions is that you can pipe them into
# one another to form a computation DAG (directed acyclic graph). Streamlit
# recomputes only whatever subset is required to get the right answer!
metadata = load_metadata(os.path.join(DATA_URL_ROOT, "labels.csv.gz"))
summary = create_summary(metadata)
# Uncomment these lines to peek at these DataFrames.
# st.write('## Metadata', metadata[:1000], '## Summary', summary[:1000])
# Draw the UI elements to search for objects (pedestrians, cars, etc.)
selected_frame_index, selected_frame = frame_selector_ui(summary)
if selected_frame_index == None:
st.error("No frames fit the criteria. Please select different label or number.")
return
# Draw the UI element to select parameters for the YOLO object detector.
confidence_threshold, overlap_threshold = object_detector_ui()
# Load the image from S3.
image_url = os.path.join(DATA_URL_ROOT, selected_frame)
image = load_image(image_url)
# Add boxes for objects on the image. These are the boxes for the ground image.
boxes = metadata[metadata.frame == selected_frame].drop(columns=["frame"])
draw_image_with_boxes(image, boxes, "Ground Truth",
"**Human-annotated data** (frame `%i`)" % selected_frame_index)
# Get the boxes for the objects detected by YOLO by running the YOLO model.
yolo_boxes = yolo_v3(image, confidence_threshold, overlap_threshold)
draw_image_with_boxes(image, yolo_boxes, "Real-time Computer Vision",
"**YOLO v3 Model** (overlap `%3.1f`) (confidence `%3.1f`)" % (overlap_threshold, confidence_threshold))
# This sidebar UI is a little search engine to find certain object types.
示例4: file_selector
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import error [as 別名]
def file_selector(folder_path='datasets/'):
'''
Selects a CSV file to be used as a dataset for the model
Args:
folder_path (str): the absolute path for the directory that contains datasets
Return:
OS Path Directory
df (DataFrame): Pandas DataFrame with the dataset
'''
filenames = os.listdir(folder_path)
filenames.sort()
default_file_index = filenames.index('monthly_air_passengers.csv') if 'monthly_air_passengers.csv' in filenames else 0
selected_filename = st.sidebar.selectbox('Select a file', filenames, default_file_index)
# Checking if the file is in a valid delimited format
if str.lower(selected_filename.split('.')[-1]) in ['csv', 'txt']:
try:
df = pd.read_csv(os.path.join(folder_path, selected_filename))
except pd._libs.parsers.ParserError:
try:
df = pd.read_csv(os.path.join(folder_path, selected_filename), delimiter=';')
except UnicodeDecodeError:
df = pd.read_csv(os.path.join(folder_path, selected_filename), delimiter=';', encoding='latin1')
except UnicodeDecodeError:
try:
df = pd.read_csv(os.path.join(folder_path, selected_filename), encoding='latin1')
except pd._libs.parsers.ParserError:
df = pd.read_csv(os.path.join(folder_path, selected_filename), encoding='latin1', delimiter=';')
elif str.lower(selected_filename.split('.')[-1]) == 'xls' or str.lower(selected_filename.split('.')[-1]) == 'xlsx':
try:
df = pd.read_excel(os.path.join(folder_path, selected_filename))
except pd._libs.parsers.ParserError:
try:
df = pd.read_excel(os.path.join(folder_path, selected_filename), delimiter=';')
except UnicodeDecodeError:
df = pd.read_excel(os.path.join(folder_path, selected_filename), delimiter=';', encoding='latin1')
except UnicodeDecodeError:
try:
df = pd.read_excel(os.path.join(folder_path, selected_filename), encoding='latin1')
except pd._libs.parsers.ParserError:
df = pd.read_excel(os.path.join(folder_path, selected_filename), encoding='latin1', delimiter=';')
else:
st.error('This file format is not supported yet')
if len(df) < 30:
data_points_warning = '''
The dataset contains too few data points to make a prediction.
It is recommended to have at least 50 data points, but preferably 100 data points (Box and Tiao 1975).
This may lead to inaccurate predictions.
'''
st.warning(data_points_warning)
return os.path.join(folder_path, selected_filename), df
示例5: st_select_untrained_model
# 需要導入模塊: import streamlit [as 別名]
# 或者: from streamlit import error [as 別名]
def st_select_untrained_model(
use_gpu: bool,
nvidia_visible_devices: str,
predicate: Callable[[Any], bool] = lambda _: True,
) -> Optional[Tuple[Any, Dict[str, Any]]]:
"""
Generate widgets allowing users to select an untrained model and apply arbitrary
model parameters.
Args:
use_gpu: If True, initialize the model using a GPU.
nvidia_visible_devices: The list of devices to make available to the model container.
Should be either "all" or a comma-separated list of device IDs (ex "1,2").
predicate: A predicate used to filter the avaliable model classes.
Returns:
A 2-tuple: the class of model and the kwargs to initialized the model with.
"""
model_choices = [
cls.__name__
for name, cls in inspect.getmembers(gobbli.model)
if inspect.isclass(cls) and issubclass(cls, BaseModel) and predicate(cls)
]
model_cls_name = st.sidebar.selectbox("Model Class", model_choices)
model_params_str = st.sidebar.text_area("Model Parameters (JSON)", value="{}")
# Slight convenience if the user deletes the text area contents
if model_params_str == "":
model_params_str = "{}"
model_cls = getattr(gobbli.model, model_cls_name)
# Validate the model parameter JSON
try:
model_params = json.loads(model_params_str)
except Exception:
st.sidebar.error("Model parameters must be valid JSON.")
return None
model_kwargs = {
"use_gpu": use_gpu,
"nvidia_visible_devices": nvidia_visible_devices,
**model_params,
}
# Validate the parameters using the model initialization function
try:
model_cls(**model_kwargs)
except (TypeError, ValueError) as e:
st.sidebar.error(f"Error validating model parameters: {e}")
return None
return model_cls, model_kwargs