當前位置: 首頁>>代碼示例>>Python>>正文


Python pyplot.show方法代碼示例

本文整理匯總了Python中matplotlib.pyplot.show方法的典型用法代碼示例。如果您正苦於以下問題:Python pyplot.show方法的具體用法?Python pyplot.show怎麽用?Python pyplot.show使用的例子?那麽, 這裏精選的方法代碼示例或許可以為您提供幫助。您也可以進一步了解該方法所在matplotlib.pyplot的用法示例。


在下文中一共展示了pyplot.show方法的15個代碼示例,這些例子默認根據受歡迎程度排序。您可以為喜歡或者感覺有用的代碼點讚,您的評價將有助於係統推薦出更棒的Python代碼示例。

示例1: demo_plot

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def demo_plot():
    audio = './data/esc10/audio/Dog/1-30226-A.ogg'
    y, sr = librosa.load(audio, sr=44100)
    y_ps = librosa.effects.pitch_shift(y, sr, n_steps=6)   # n_steps控製音調變化尺度
    y_ts = librosa.effects.time_stretch(y, rate=1.2)   # rate控製時間維度的變換尺度
    plt.subplot(311)
    plt.plot(y)
    plt.title('Original waveform')
    plt.axis([0, 200000, -0.4, 0.4])
    # plt.axis([88000, 94000, -0.4, 0.4])
    plt.subplot(312)
    plt.plot(y_ts)
    plt.title('Time Stretch transformed waveform')
    plt.axis([0, 200000, -0.4, 0.4])
    plt.subplot(313)
    plt.plot(y_ps)
    plt.title('Pitch Shift transformed waveform')
    plt.axis([0, 200000, -0.4, 0.4])
    # plt.axis([88000, 94000, -0.4, 0.4])
    plt.tight_layout()
    plt.show() 
開發者ID:JasonZhang156,項目名稱:Sound-Recognition-Tutorial,代碼行數:23,代碼來源:data_augmentation.py

示例2: show

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def show(mnist, targets, ret):
    target_ids = range(len(set(targets)))
    
    colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k', 'violet', 'orange', 'purple']
    
    plt.figure(figsize=(12, 10))
    
    ax = plt.subplot(aspect='equal')
    for label in set(targets):
        idx = np.where(np.array(targets) == label)[0]
        plt.scatter(ret[idx, 0], ret[idx, 1], c=colors[label], label=label)
    
    for i in range(0, len(targets), 250):
        img = (mnist[i][0] * 0.3081 + 0.1307).numpy()[0]
        img = OffsetImage(img, cmap=plt.cm.gray_r, zoom=0.5) 
        ax.add_artist(AnnotationBbox(img, ret[i]))
    
    plt.legend()
    plt.show() 
開發者ID:peisuke,項目名稱:MomentumContrast.pytorch,代碼行數:21,代碼來源:test.py

示例3: _capture2dImage

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def _capture2dImage(self, cameraId):
        # Capture Image in RGB

        # WARNING : The same Name could be used only six time.
        strName = "capture2DImage_{}".format(random.randint(1,10000000000))

        clientRGB = self.video_service.subscribeCamera(strName, cameraId, AL_kVGA, 11, 10)
        imageRGB = self.video_service.getImageRemote(clientRGB)

        imageWidth   = imageRGB[0]
        imageHeight  = imageRGB[1]
        array        = imageRGB[6]
        image_string = str(bytearray(array))

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)

        # Save the image.
        image_name_2d = "images/img2d-" + str(self.imageNo2d) + ".png"
        im.save(image_name_2d, "PNG") # Stored in images folder in the pwd, if not present then create one
        self.imageNo2d += 1
        im.show()

        return 
開發者ID:maverickjoy,項目名稱:pepper-robot-programming,代碼行數:26,代碼來源:asthama_search.py

示例4: _capture3dImage

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def _capture3dImage(self):
        # Depth Image in RGB

        # WARNING : The same Name could be used only six time.
        strName = "capture3dImage_{}".format(random.randint(1,10000000000))


        clientRGB = self.video_service.subscribeCamera(strName, AL_kDepthCamera, AL_kQVGA, 11, 10)
        imageRGB = self.video_service.getImageRemote(clientRGB)

        imageWidth  = imageRGB[0]
        imageHeight = imageRGB[1]
        array       = imageRGB[6]
        image_string = str(bytearray(array))

        # Create a PIL Image from our pixel array.
        im = Image.frombytes("RGB", (imageWidth, imageHeight), image_string)
        # Save the image.
        image_name_3d = "images/img3d-" + str(self.imageNo3d) + ".png"
        im.save(image_name_3d, "PNG") # Stored in images folder in the pwd, if not present then create one
        self.imageNo3d += 1
        im.show()

        return 
開發者ID:maverickjoy,項目名稱:pepper-robot-programming,代碼行數:26,代碼來源:asthama_search.py

示例5: plot_mul

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def plot_mul(Y_hat, Y, pred_len):
    """
    PLots the predicted data versus true data

    Input: Predicted data, True Data, Length of prediction
    Output: return plot

    Note: Run from timeSeriesPredict.py
    """
    fig = plt.figure(facecolor='white')
    ax = fig.add_subplot(111)
    ax.plot(Y, label='Y')
    # Print the predictions in its respective series-length
    for i, j in enumerate(Y_hat):
        shift = [None for p in range(i * pred_len)]
        plt.plot(shift + j, label='Y_hat')
        plt.legend()
    plt.show() 
開發者ID:dhingratul,項目名稱:Stock-Price-Prediction,代碼行數:20,代碼來源:helper.py

示例6: atest_plot_samples

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def atest_plot_samples(self):
        dm = np.linspace(4., 19., 1001)
        samples = []

        for dm_k in dm:
            d = 10.**(dm_k/5.-2.)
            samples.append(self._interp_ebv(self._test_data[0], d))

        samples = np.array(samples).T
        # print samples

        import matplotlib.pyplot as plt
        fig = plt.figure()
        ax = fig.add_subplot(1,1,1)
        for s in samples:
            ax.plot(dm, s, lw=2., alpha=0.5)

        plt.show() 
開發者ID:gregreen,項目名稱:dustmaps,代碼行數:20,代碼來源:test_bayestar.py

示例7: show_result_pyplot

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def show_result_pyplot(model, img, result, score_thr=0.3, fig_size=(15, 10)):
    """Visualize the detection results on the image.

    Args:
        model (nn.Module): The loaded detector.
        img (str or np.ndarray): Image filename or loaded image.
        result (tuple[list] or list): The detection result, can be either
            (bbox, segm) or just bbox.
        score_thr (float): The threshold to visualize the bboxes and masks.
        fig_size (tuple): Figure size of the pyplot figure.
    """
    if hasattr(model, 'module'):
        model = model.module
    img = model.show_result(img, result, score_thr=score_thr, show=False)
    plt.figure(figsize=fig_size)
    plt.imshow(mmcv.bgr2rgb(img))
    plt.show() 
開發者ID:open-mmlab,項目名稱:mmdetection,代碼行數:19,代碼來源:inference.py

示例8: compute_roc

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def compute_roc(y_true, y_pred, plot=False):
    """
    TODO
    :param y_true: ground truth
    :param y_pred: predictions
    :param plot:
    :return:
    """
    fpr, tpr, _ = roc_curve(y_true, y_pred)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:23,代碼來源:util.py

示例9: compute_roc_rfeinman

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def compute_roc_rfeinman(probs_neg, probs_pos, plot=False):
    """
    TODO
    :param probs_neg:
    :param probs_pos:
    :param plot:
    :return:
    """
    probs = np.concatenate((probs_neg, probs_pos))
    labels = np.concatenate((np.zeros_like(probs_neg), np.ones_like(probs_pos)))
    fpr, tpr, _ = roc_curve(labels, probs)
    auc_score = auc(fpr, tpr)
    if plot:
        plt.figure(figsize=(7, 6))
        plt.plot(fpr, tpr, color='blue',
                 label='ROC (AUC = %0.4f)' % auc_score)
        plt.legend(loc='lower right')
        plt.title("ROC Curve")
        plt.xlabel("FPR")
        plt.ylabel("TPR")
        plt.show()

    return fpr, tpr, auc_score 
開發者ID:StephanZheng,項目名稱:neural-fingerprinting,代碼行數:25,代碼來源:util.py

示例10: generate_dataset

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def generate_dataset(true_w, true_b):
    num_examples = 1000

    features = torch.tensor(np.random.normal(0, 1, (num_examples, num_inputs)), dtype=torch.float)
    # 真實 label
    labels = true_w[0] * features[:, 0] + true_w[1] * features[:, 1] + true_b
    # 添加噪聲
    labels += torch.tensor(np.random.normal(0, 0.01, size=labels.size()), dtype=torch.float)
    # 展示下分布
    plt.scatter(features[:, 1].numpy(), labels.numpy(), 1)
    plt.show()
    
    return features, labels


# batch 讀取數據集 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:18,代碼來源:3_linear_regression_raw.py

示例11: run_eval

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def run_eval(sess, test_X, test_y):
    ds = tf.data.Dataset.from_tensor_slices((test_X, test_y))
    ds = ds.batch(1)
    X, y = ds.make_one_shot_iterator().get_next()

    with tf.variable_scope("model", reuse=True):
        prediction, _, _ = lstm_model(X, [0.0], False)
        predictions = []
        labels = []
        for i in range(TESTING_EXAMPLES):
            p, l = sess.run([prediction, y])
            predictions.append(p)
            labels.append(l)

    predictions = np.array(predictions).squeeze()
    labels = np.array(labels).squeeze()
    rmse = np.sqrt(((predictions-labels) ** 2).mean(axis=0))
    print("Mean Square Error is: %f" % rmse)

    plt.figure()
    plt.plot(predictions, label='predictions')
    plt.plot(labels, label='real_sin')
    plt.legend()
    plt.show() 
開發者ID:wdxtub,項目名稱:deep-learning-note,代碼行數:26,代碼來源:simulate_sin.py

示例12: data_stat

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def data_stat():
    """data statistic"""
    audio_path = './data/esc10/audio/'
    class_list = [os.path.basename(i) for i in glob(audio_path + '*')]
    nums_each_class = [len(glob(audio_path + cl + '/*.ogg')) for cl in class_list]
    rects = plt.bar(range(len(nums_each_class)), nums_each_class)

    index = list(range(len(nums_each_class)))
    plt.title('Numbers of each class for ESC-10 dataset')
    plt.ylim(ymax=60, ymin=0)
    plt.xticks(index, class_list, rotation=45)
    plt.ylabel("numbers")

    for rect in rects:
        height = rect.get_height()
        plt.text(rect.get_x() + rect.get_width() / 2, height, str(height), ha='center', va='bottom')

    plt.tight_layout()
    plt.show() 
開發者ID:JasonZhang156,項目名稱:Sound-Recognition-Tutorial,代碼行數:21,代碼來源:data_analysis.py

示例13: visualize_2D_trip

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def visualize_2D_trip(self,trip,tw_open,tw_close):
        plt.figure(figsize=(30,30))
        rcParams.update({'font.size': 22})
        # Plot cities
        colors = ['red'] # Depot is first city
        for i in range(len(tw_open)-1):
            colors.append('blue')
        plt.scatter(trip[:,0], trip[:,1], color=colors, s=200)
        # Plot tour
        tour=np.array(list(range(len(trip))) + [0])
        X = trip[tour, 0]
        Y = trip[tour, 1]
        plt.plot(X, Y,"--", markersize=100)
        # Annotate cities with TW
        tw_open = np.rint(tw_open)
        tw_close = np.rint(tw_close)
        time_window = np.concatenate((tw_open,tw_close),axis=1)
        for tw, (x, y) in zip(time_window,(zip(X,Y))):
            plt.annotate(tw,xy=(x, y))  
        plt.xlim(0,60)
        plt.ylim(0,60)
        plt.show()


    # Heatmap of permutations (x=cities; y=steps) 
開發者ID:MichelDeudon,項目名稱:neural-combinatorial-optimization-rl-tensorflow,代碼行數:27,代碼來源:dataset.py

示例14: visualize_sampling

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def visualize_sampling(self,permutations):
        max_length = len(permutations[0])
        grid = np.zeros([max_length,max_length]) # initialize heatmap grid to 0
        transposed_permutations = np.transpose(permutations)
        for t, cities_t in enumerate(transposed_permutations): # step t, cities chosen at step t
            city_indices, counts = np.unique(cities_t,return_counts=True,axis=0)
            for u,v in zip(city_indices, counts):
                grid[t][u]+=v # update grid with counts from the batch of permutations
        # plot heatmap
        fig = plt.figure()
        rcParams.update({'font.size': 22})
        ax = fig.add_subplot(1,1,1)
        ax.set_aspect('equal')
        plt.imshow(grid, interpolation='nearest', cmap='gray')
        plt.colorbar()
        plt.title('Sampled permutations')
        plt.ylabel('Time t')
        plt.xlabel('City i')
        plt.show()

    # Heatmap of attention (x=cities; y=steps) 
開發者ID:MichelDeudon,項目名稱:neural-combinatorial-optimization-rl-tensorflow,代碼行數:23,代碼來源:dataset.py

示例15: visualize_2D_trip

# 需要導入模塊: from matplotlib import pyplot [as 別名]
# 或者: from matplotlib.pyplot import show [as 別名]
def visualize_2D_trip(self, trip):
        plt.figure(figsize=(30,30))
        rcParams.update({'font.size': 22})

        # Plot cities
        plt.scatter(trip[:,0], trip[:,1], s=200)

        # Plot tour
        tour=np.array(list(range(len(trip))) + [0])
        X = trip[tour, 0]
        Y = trip[tour, 1]
        plt.plot(X, Y,"--", markersize=100)

        # Annotate cities with order
        labels = range(len(trip))
        for i, (x, y) in zip(labels,(zip(X,Y))):
            plt.annotate(i,xy=(x, y))  

        plt.xlim(0,100)
        plt.ylim(0,100)
        plt.show()


    # Heatmap of permutations (x=cities; y=steps) 
開發者ID:MichelDeudon,項目名稱:neural-combinatorial-optimization-rl-tensorflow,代碼行數:26,代碼來源:dataset.py


注:本文中的matplotlib.pyplot.show方法示例由純淨天空整理自Github/MSDocs等開源代碼及文檔管理平台,相關代碼片段篩選自各路編程大神貢獻的開源項目,源碼版權歸原作者所有,傳播和使用請參考對應項目的License;未經允許,請勿轉載。