Python skimage.measure 模块,compare_ssim() 实例源码

我们从Python开源项目中,提取了以下7个代码示例,用于说明如何使用skimage.measure.compare_ssim()

项目:piwall-cvtools    作者:infinnovation    | 项目源码 | 文件源码
def subtract_background(self):
        fgbg = cv2.createBackgroundSubtractorMOG2()
        prev = self.frames[0]
        fgmask = fgbg.apply(prev)
        for (i,next) in enumerate(self.frames[1:]):
            prev_gray = cv2.cvtColor(prev, cv2.COLOR_BGR2GRAY)
            next_gray = cv2.cvtColor(next, cv2.COLOR_BGR2GRAY)
            similarity_metric = compare_ssim(prev_gray, next_gray)
            print('prev/next similarity measure = %f' % similarity_metric)
            if similarity_metric < self.transition_threshold:
                fgmask = fgbg.apply(next)
                fgdn = denoise_foreground(next, fgmask)
                self.transitions.append((1, fgdn))
            else:
                fgmask = fgbg.apply(next)
                self.transitions.append((0, None))
            prev = next.copy()
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def compare_images(image_a, image_b, title):
    # compute the mean squared error and structural similarity
    # index for the images
    m = mse(image_a, image_b)
    s = compare_ssim(image_a, image_b, multichannel=True)

    # setup the figure
    fig = plt.figure(title)
    plt.suptitle("MSE: %.2f, SSIM: %.2f" % (m, s))

    # show first image
    ax = fig.add_subplot(1, 2, 1)
    plt.imshow(image_a, cmap=plt.cm.gray)
    plt.axis("off")

    # show the second image
    ax = fig.add_subplot(1, 2, 2)
    plt.imshow(image_b, cmap=plt.cm.gray)
    plt.axis("off")

    # show the images
    plt.show()
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def test_initial_pass_through_compare(self):
        original = cv2.imread(os.path.join(self.provider.assets, "start_screen.png"))
        against = self.provider.get_img_from_screen_shot()
        wrong = cv2.imread(os.path.join(self.provider.assets, "battle.png"))

        # convert the images to grayscale
        original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
        against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
        wrong = mask_image([127], [255], cv2.cvtColor(wrong, cv2.COLOR_BGR2GRAY), True)
        # initialize the figure
        (score, diff) = compare_ssim(original, against, full=True)
        diff = (diff * 255).astype("uint8")
        self.assertTrue(score > .90, 'If this is less then .90 the initial compare of the app will fail')
        (score, nothing) = compare_ssim(original, wrong, full=True)
        self.assertTrue(score < .90)
        if self.__debug_pictures__:
            # threshold the difference image, followed by finding contours to
            # obtain the regions of the two input images that differ
            thresh = cv2.threshold(diff, 0, 255,
                                   cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)[1]
            cnts = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL,
                                    cv2.CHAIN_APPROX_SIMPLE)
            cnts = cnts[0]
            # loop over the contours
            for c in cnts:
                # compute the bounding box of the contour and then draw the
                # bounding box on both input images to represent where the two
                # images differ
                (x, y, w, h) = cv2.boundingRect(c)
                cv2.rectangle(original, (x, y), (x + w, y + h), (0, 0, 255), 2)
                cv2.rectangle(against, (x, y), (x + w, y + h), (0, 0, 255), 2)
            # show the output images
            diffs = ("Original", original), ("Modified", against), ("Diff", diff), ("Thresh", thresh)
            images = ("Original", original), ("Against", against), ("Wrong", wrong)
            self.setup_compare_images(diffs)
            self.setup_compare_images(images)
项目:pix2pix-pytorch    作者:1zb    | 项目源码 | 文件源码
def test(epoch):
    avg_psnr = 0
    avg_ssim = 0
    for left, right in testing_data_loader:

        if args.direction == 'lr':
            input.data.resize_(left.size()).copy_(left)
            target.data.resize_(right.size()).copy_(right)
        else:
            input.data.resize_(right.size()).copy_(right)
            target.data.resize_(left.size()).copy_(left)

        prediction = netG(input)

        im_true = np.transpose(target.data.cpu().numpy(), (0, 2, 3, 1))
        im_test = np.transpose(prediction.data.cpu().numpy(), (0, 2, 3, 1))
        for i in range(input.size(0)):
            avg_psnr += psnr(im_true[i], im_test[i])
            avg_ssim += (ssim(im_true[i,:,:,0], im_test[i,:,:,0]) + ssim(im_true[i,:,:,1], im_test[i,:,:,1]) + ssim(im_true[i,:,:,2], im_test[i,:,:,2])) / 3
    print("[TEST]  PSNR: {:.4f}; SSIM: {:.4f}".format(avg_psnr / len(test_set), avg_ssim / len(test_set)))
项目:Yugioh-bot    作者:will7200    | 项目源码 | 文件源码
def __is_initial_screen__(self, *args, **kwargs):
        original = cv2.imread(os.path.join(self.assets, "start_screen.png"))
        against = self.get_img_from_screen_shot()
        # convert the images to grayscale
        original = mask_image([127], [255], cv2.cvtColor(original, cv2.COLOR_BGR2GRAY), True)
        against = mask_image([127], [255], cv2.cvtColor(against, cv2.COLOR_BGR2GRAY), True)
        (score, diff) = compare_ssim(original, against, full=True)
        if score > .9:
            return True
        return False
项目:Holden    作者:manugomez95    | 项目源码 | 文件源码
def classify_ssim(database, names, image):
    # usando ssim
    max=0
    i=0
    for example in database:
        s = ssim(image, example)
        print(names[i] + ' = ' + str(s))
        if s>max:
            max=s
            result = names[i]
        i+=1
    return result

# input:    carta, imagen de la carta completa
# output:   valor en formato string del 2 al 10, A, J, Q o K
项目:tf-exercise-gan    作者:sanghoon    | 项目源码 | 文件源码
def eval_images_naive(it, gen, data, tag='', sampler=None):
    metrics = OrderedDict()

    if sampler is not None:
        z = sampler(128)
        samples = gen(z)        # Feed z
    else:
        samples = gen(128)      # Generate n images

    true_samples = data.validation.images
    true_labels = data.validation.labels if 'labels' in dir(data.validation) else None


    # Compute dist.
    dist_func = lambda a, b: np.linalg.norm((a - b).reshape((-1)), ord=2)

    # Distance: (generated samples) x (true samples)
    dist = np.array([[dist_func(x, x_true) for x_true in true_samples] for x in samples])

    best_matching_i_true = np.argmin(dist, axis=1)
    metrics['n_modes'] = len(np.unique(best_matching_i_true))
    metrics['ave_dist'] = np.average(np.min(dist, axis=1))


    # Check the labels (if exist)
    if true_labels is not None:
        label_cnts = np.sum(true_labels[best_matching_i_true], axis=0)
        metrics['n_labels'] = np.sum(label_cnts > 0)


    # Compute SSIM among top-k candidates (XXX: No supporting evidence for this approx.)
    k = 10
    top_k_matching_samples = np.argpartition(dist, k, axis=1)[:, :k]

    # Please refer to https://en.wikipedia.org/wiki/Structural_similarity
    # compare_ssim assumes (W, H, C) ordering
    sim_func = lambda a, b: ssim(a, b, multichannel=True, data_range=2.0)

    # Similarity: (generated samples) x (top-k candidates)
    sim = [[sim_func(samples[i], true_samples[i_true]) for i_true in i_topk] \
                                for i, i_topk in enumerate(top_k_matching_samples)]
    sim = np.array(sim)

    metrics['ave_sim'] = np.average(np.max(sim, axis=1))


    # TODO: Impl. IvOM

    # TODO: Impl. better metrics

    print "Eval({}) ".format(it), ', '.join(['{}={:.2f}'.format(k, v) for k, v in metrics.iteritems()])

    return metrics