Python scipy.ndimage.filters 模块,maximum_filter() 实例源码

我们从Python开源项目中,提取了以下28个代码示例,用于说明如何使用scipy.ndimage.filters.maximum_filter()

项目:saliency    作者:shuuchen    | 项目源码 | 文件源码
def N(image):
    """
        Normalization parameter as per Itti et al. (1998).
        returns a normalized feature map image.
    """
    M = 8.  # an arbitrary global maximum to which the image is scaled.
            # (When saving saliency maps as images, pixel values may become
            # too large or too small for the chosen image format depending
            # on this constant)
    image = cv2.convertScaleAbs(image, alpha=M/image.max(), beta=0.)
    w,h = image.shape
    maxima = maximum_filter(image, size=(w/10,h/1))
    maxima = (image == maxima)
    mnum = maxima.sum()
    logger.debug("Found %d local maxima.", mnum)
    maxima = numpy.multiply(maxima, image)
    mbar = float(maxima.sum()) / mnum
    logger.debug("Average of local maxima: %f.  Global maximum: %f", mbar, M)
    return image * (M-mbar)**2
项目:SegmentationService    作者:jingchaoluan    | 项目源码 | 文件源码
def compute_colseps_conv(binary,scale=1.0):
    """Find column separators by convoluation and
    thresholding."""
    h,w = binary.shape
    # find vertical whitespace by thresholding
    smoothed = gaussian_filter(1.0*binary,(scale,scale*0.5))
    smoothed = uniform_filter(smoothed,(5.0*scale,1))
    thresh = (smoothed<amax(smoothed)*0.1)
    DSAVE("1thresh",thresh)
    # find column edges by filtering
    grad = gaussian_filter(1.0*binary,(scale,scale*0.5),order=(0,1))
    grad = uniform_filter(grad,(10.0*scale,1))
    # grad = abs(grad) # use this for finding both edges
    grad = (grad>0.5*amax(grad))
    DSAVE("2grad",grad)
    # combine edges and whitespace
    seps = minimum(thresh,maximum_filter(grad,(int(scale),int(5*scale))))
    seps = maximum_filter(seps,(int(2*scale),1))
    DSAVE("3seps",seps)
    # select only the biggest column separators
    seps = morph.select_regions(seps,sl.dim0,min=args['csminheight']*scale,nbest=args['maxcolseps'])
    DSAVE("4seps",seps)
    return seps
项目:Lifting-from-the-Deep-release    作者:DenisTome    | 项目源码 | 文件源码
def detect_objects_heatmap(heatmap):
    data = 256 * heatmap
    data_max = filters.maximum_filter(data, 3)
    maxima = (data == data_max)
    data_min = filters.minimum_filter(data, 3)
    diff = ((data_max - data_min) > 0.3)
    maxima[diff == 0] = 0
    labeled, num_objects = ndimage.label(maxima)
    slices = ndimage.find_objects(labeled)
    objects = np.zeros((num_objects, 2), dtype=np.int32)
    pidx = 0
    for (dy, dx) in slices:
        pos = [(dy.start + dy.stop - 1) // 2, (dx.start + dx.stop - 1) // 2]
        if heatmap[pos[0], pos[1]] > config.CENTER_TR:
            objects[pidx, :] = pos
            pidx += 1
    return objects[:pidx]
项目:wrfplot    作者:liamtill    | 项目源码 | 文件源码
def extrema(mat,mode='wrap',window=10): # function to find the pressure extrema
    """
    Find the indices of local extrema (min and max) in the input array.

    Parameters 
    mat (input array)
    mode
    window (sensitivity)

    Returns 
    Indices of extrema

    """
    mn = minimum_filter(mat, size=window, mode=mode)
    mx = maximum_filter(mat, size=window, mode=mode)
    # (mat == mx) true if pixel is equal to the local max
    # (mat == mn) true if pixel is equal to the local in
    # Return the indices of the maxima, minima
    return np.nonzero(mat == mn), np.nonzero(mat == mx)

#function to interpolate data to given level(s) using np.interp
项目:SegmentationService    作者:jingchaoluan    | 项目源码 | 文件源码
def compute_line_seeds(binary,bottom,top,colseps,scale):
    """Base on gradient maps, computes candidates for baselines
    and xheights.  Then, it marks the regions between the two
    as a line seed."""
    t = args['threshold']
    vrange = int(args['vscale']*scale)
    bmarked = maximum_filter(bottom==maximum_filter(bottom,(vrange,0)),(2,2))
    bmarked = bmarked*(bottom>t*amax(bottom)*t)*(1-colseps)
    tmarked = maximum_filter(top==maximum_filter(top,(vrange,0)),(2,2))
    tmarked = tmarked*(top>t*amax(top)*t/2)*(1-colseps)
    tmarked = maximum_filter(tmarked,(1,20))
    seeds = zeros(binary.shape,'i')
    delta = max(3,int(scale/2))
    for x in range(bmarked.shape[1]):
        transitions = sorted([(y,1) for y in find(bmarked[:,x])]+[(y,0) for y in find(tmarked[:,x])])[::-1]
        transitions += [(0,0)]
        for l in range(len(transitions)-1):
            y0,s0 = transitions[l]
            if s0==0: continue
            seeds[y0-delta:y0,x] = 1
            y1,s1 = transitions[l+1]
            if s1==0 and (y0-y1)<5*scale: seeds[y1:y0,x] = 1
    seeds = maximum_filter(seeds,(1,int(1+scale)))
    seeds = seeds*(1-colseps)
    DSAVE("lineseeds",[seeds,0.3*tmarked+0.7*bmarked,binary])
    seeds,_ = morph.label(seeds)
    return seeds



################################################################
### The complete line segmentation process.
################################################################
项目:SegmentationService    作者:jingchaoluan    | 项目源码 | 文件源码
def r_dilation(image,size,origin=0):
    """Dilation with rectangular structuring element using maximum_filter"""
    return filters.maximum_filter(image,size,origin=origin)
项目:SegmentationService    作者:jingchaoluan    | 项目源码 | 文件源码
def r_erosion(image,size,origin=0):
    """Erosion with rectangular structuring element using maximum_filter"""
    return filters.minimum_filter(image,size,origin=origin)
项目:SegmentationService    作者:jingchaoluan    | 项目源码 | 文件源码
def rg_dilation(image,size,origin=0):
    """Grayscale dilation with maximum/minimum filters."""
    return filters.maximum_filter(image,size,origin=origin)
项目:saliency    作者:shuuchen    | 项目源码 | 文件源码
def markMaxima(saliency):
    """
        Mark the maxima in a saliency map (a gray-scale image).
    """
    maxima = maximum_filter(saliency, size=(5, 5))
    maxima = numpy.array(saliency == maxima, dtype=numpy.float64) * 255
    g = cv2.max(saliency, maxima)
    r = saliency
    b = saliency
    marked = cv2.merge((b,g,r))
    return marked
项目:tf-openpose    作者:ildoonet    | 项目源码 | 文件源码
def non_max_suppression(np_input, window_size=3, threshold=NMS_Threshold):
    under_threshold_indices = np_input < threshold
    np_input[under_threshold_indices] = 0
    return np_input*(np_input == maximum_filter(np_input, footprint=np.ones((window_size, window_size))))
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _highGrad(arr):
    # mask high gradient areas in given array
    s = min(arr.shape)
    return maximum_filter(np.abs(laplace(arr, mode='reflect')) > 0.01,  # 0.02
                          min(max(s // 5, 3), 15))
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def _highGrad(arr):
    # mask high gradient areas in given array
    s = min(arr.shape)
    return maximum_filter(np.abs(laplace(arr, mode='reflect')) > 0.01,  # 0.02
                          min(max(s // 5, 3), 15))
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _import():
    global maximum_filter, PerspectiveGridROI, labelCracks, evalCracks, detectLabelCrackParams
    from scipy.ndimage.filters import maximum_filter
    from dataArtist.items.PerspectiveGridROI import PerspectiveGridROI

    try:
        from PROimgProcessor.features.crackDetection import labelCracks, evalCracks,\
            detectLabelCrackParams
    except ImportError:
        labelCracks = None
项目:qudi    作者:Ulm-IQO    | 项目源码 | 文件源码
def autofind_pois(self, neighborhood_size=1, min_threshold=10000, max_threshold=1e6):
        """Automatically search the xy scan image for POIs.

        @param neighborhood_size: size in microns.  Only the brightest POI per neighborhood will be found.

        @param min_threshold: POIs must have c/s above this threshold.

        @param max_threshold: POIs must have c/s below this threshold.
        """

        # Calculate the neighborhood size in pixels from the image range and resolution
        x_range_microns = np.max(self.roi_map_data[:, :, 0]) - np.min(self.roi_map_data[:, :, 0])
        y_range_microns = np.max(self.roi_map_data[:, :, 1]) - np.min(self.roi_map_data[:, :, 1])
        y_pixels = len(self.roi_map_data)
        x_pixels = len(self.roi_map_data[1, :])

        pixels_per_micron = np.max([x_pixels, y_pixels]) / np.max([x_range_microns, y_range_microns])
        # The neighborhood in pixels is nbhd_size * pixels_per_um, but it must be 1 or greater
        neighborhood_pix = int(np.max([math.ceil(pixels_per_micron * neighborhood_size), 1]))

        data = self.roi_map_data[:, :, 3]

        data_max = filters.maximum_filter(data, neighborhood_pix)
        maxima = (data == data_max)
        data_min = filters.minimum_filter(data, 3 * neighborhood_pix)
        diff = ((data_max - data_min) > min_threshold)
        maxima[diff is False] = 0

        labeled, num_objects = ndimage.label(maxima)
        xy = np.array(ndimage.center_of_mass(data, labeled, range(1, num_objects + 1)))

        for count, pix_pos in enumerate(xy):
            poi_pos = self.roi_map_data[pix_pos[0], pix_pos[1], :][0:3]
            this_poi_key = self.add_poi(position=poi_pos, emit_change=False)
            self.rename_poi(poikey=this_poi_key, name='spot' + str(count), emit_change=False)

        # Now that all the POIs are created, emit the signal for other things (ie gui) to update
        self.signal_poi_updated.emit()
项目:BinarizationService    作者:jingchaoluan    | 项目源码 | 文件源码
def r_dilation(image,size,origin=0):
    """Dilation with rectangular structuring element using maximum_filter"""
    return filters.maximum_filter(image,size,origin=origin)
项目:BinarizationService    作者:jingchaoluan    | 项目源码 | 文件源码
def r_erosion(image,size,origin=0):
    """Erosion with rectangular structuring element using maximum_filter"""
    return filters.minimum_filter(image,size,origin=origin)
项目:BinarizationService    作者:jingchaoluan    | 项目源码 | 文件源码
def rg_dilation(image,size,origin=0):
    """Grayscale dilation with maximum/minimum filters."""
    return filters.maximum_filter(image,size,origin=origin)
项目:BinarizationService    作者:jingchaoluan    | 项目源码 | 文件源码
def extract_masked(image,linedesc,pad=5,expand=0):
    """Extract a subimage from the image using the line descriptor.
    A line descriptor consists of bounds and a mask."""
    y0,x0,y1,x1 = [int(x) for x in [linedesc.bounds[0].start,linedesc.bounds[1].start, \
                  linedesc.bounds[0].stop,linedesc.bounds[1].stop]]
    if pad>0:
        mask = pad_image(linedesc.mask,pad,cval=0)
    else:
        mask = linedesc.mask
    line = extract(image,y0-pad,x0-pad,y1+pad,x1+pad)
    if expand>0:
        mask = filters.maximum_filter(mask,(expand,expand))
    line = where(mask,line,amax(line))
    return line
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def compute_colseps_conv(binary, csminheight, maxcolseps, scale=1.0, debug=False):
    """Find column separators by convoluation and
    thresholding."""
    h,w = binary.shape
    # find vertical whitespace by thresholding
    smoothed = gaussian_filter(1.0 * binary, (scale, scale*0.5))
    smoothed = uniform_filter(smoothed, (5.0*scale,1))
    thresh = (smoothed<np.amax(smoothed)*0.1)
    if debug:
        debug_show(thresh, "compute_colseps_conv thresh")
    # find column edges by filtering
    grad = gaussian_filter(1.0*binary, (scale, scale*0.5), order=(0,1))
    grad = uniform_filter(grad, (10.0*scale,1))
    # grad = abs(grad) # use this for finding both edges
    grad = (grad>0.5*np.amax(grad))
    if debug:
        debug_show(grad, "compute_colseps_conv grad")
    # combine edges and whitespace
    seps = np.minimum(thresh,maximum_filter(grad, (int(scale), int(5*scale))))
    seps = maximum_filter(seps,(int(2*scale),1))
    if debug:
        debug_show(seps, "compute_colseps_conv seps")
    # select only the biggest column separators
    seps = morph.select_regions(seps,sl.dim0,
                                min=csminheight*scale,
                                nbest=maxcolseps)
    if debug:
        debug_show(seps, "compute_colseps_conv 4seps")
    return seps
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def compute_line_seeds(binary, bottom, top, colseps, threshold, vscale, scale, debug=False):
    """Base on gradient maps, computes candidates for baselines
    and xheights.  Then, it marks the regions between the two
    as a line seed."""
    t = threshold
    vrange = int(vscale*scale)
    bmarked = maximum_filter(bottom==maximum_filter(bottom, (vrange, 0)),(2,2))
    bmarked = bmarked*(bottom>t*np.amax(bottom)*t)*(1-colseps)
    tmarked = maximum_filter(top==maximum_filter(top,(vrange,0)),(2,2))
    tmarked = tmarked*(top>t*np.amax(top)*t/2)*(1-colseps)
    tmarked = maximum_filter(tmarked,(1,20))
    seeds = np.zeros(binary.shape, 'i')
    delta = max(3,int(scale/2))
    for x in range(bmarked.shape[1]):
        transitions = sorted([(y, 1) for y in np.where(bmarked[:,x])[0]]+[(y,0) for y in np.where(tmarked[:,x][0])])[::-1]
        transitions += [(0,0)]
        for l in range(len(transitions)-1):
            y0,s0 = transitions[l]
            if s0==0: continue
            seeds[y0-delta:y0,x] = 1
            y1,s1 = transitions[l+1]
            if s1==0 and (y0-y1)<5*scale: seeds[y1:y0,x] = 1
    seeds = maximum_filter(seeds,(1,int(1+scale)))
    seeds = seeds*(1-colseps)
    if debug:
        debug_show([seeds,0.3*tmarked+0.7*bmarked,binary], "lineseeds")
    seeds,_ = morph.label(seeds)
    return seeds
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def r_dilation(image,size,origin=0):
    """Dilation with rectangular structuring element using maximum_filter"""
    return filters.maximum_filter(image,size,origin=origin)
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def r_erosion(image,size,origin=0):
    """Erosion with rectangular structuring element using maximum_filter"""
    return filters.minimum_filter(image,size,origin=origin)
项目:deep_ocr    作者:JinpengLI    | 项目源码 | 文件源码
def rg_dilation(image,size,origin=0):
    """Grayscale dilation with maximum/minimum filters."""
    return filters.maximum_filter(image,size,origin=origin)
项目:imgProcessor    作者:radjkarl    | 项目源码 | 文件源码
def addImg(self, img, maxShear=0.015, maxRot=100, minMatches=12,
               borderWidth=3):  # borderWidth=100
        """
        Args:
            img (path or array): image containing the same object as in the reference image
        Kwargs:
            maxShear (float): In order to define a good fit, refect higher shear values between
                              this and the reference image
            maxRot (float): Same for rotation
            minMatches (int): Minimum of mating points found in both, this and the reference image
        """
        try:
            fit, img, H, H_inv, nmatched = self._fitImg(img)
        except Exception as e:
            print(e)
            return

        # CHECK WHETHER FIT IS GOOD ENOUGH:
        (translation, rotation, scale, shear) = decompHomography(H)
        print('Homography ...\n\ttranslation: %s\n\trotation: %s\n\tscale: %s\n\tshear: %s'
              % (translation, rotation, scale, shear))
        if (nmatched > minMatches
                and abs(shear) < maxShear
                and abs(rotation) < maxRot):
            print('==> img added')
            # HOMOGRAPHY:
            self.Hs.append(H)
            # INVERSE HOMOGRSAPHY
            self.Hinvs.append(H_inv)
            # IMAGES WARPED TO THE BASE IMAGE
            self.fits.append(fit)
            # ADD IMAGE TO THE INITIAL flatField ARRAY:
            i = img > self.signal_ranges[-1][0]

            # remove borders (that might have erroneous light):
            i = minimum_filter(i, borderWidth)

            self._ff_mma.update(img, i)

            # create fit img mask:
            mask = fit < self.signal_ranges[-1][0]
            mask = maximum_filter(mask, borderWidth)
            # IGNORE BORDER
            r = self.remove_border_size
            if r:
                mask[:r, :] = 1
                mask[-r:, :] = 1
                mask[:, -r:] = 1
                mask[:, :r] = 1
            self._fit_masks.append(mask)

            # image added
            return fit
        return False
项目:dataArtist    作者:radjkarl    | 项目源码 | 文件源码
def _process(self):
        d = self.display
        img = d.widget.image
        out = np.empty(shape=img.shape[:3], dtype=bool)
        width = self.pWidth.value()
        debug = self.pDebug.value()
        laps, linelikellyness, params = [], [], []
        ki = self.pKSizeIntensity.value()
        ki += 1 - ki % 2  # enusre odd number

        t = d.tools['Selection']
        path = t.findPath(PerspectiveGridROI)
        shape = path.nCells if path is not None else (6, 10)
        border = path.vertices() if path is not None else None
        if border is None:
            print('No [Grid] found, assume image is corrected and there is \
no border around the image')

        v = self.pMask.value()
        if v != '-':
            grid = d.widget.cItems[v].image_full
        else:
            grid = None
            print('Please define [Grid Mask] to improve precision')

        for n, im in enumerate(img):

            if self.pDetect.value():
                kwargs = detectLabelCrackParams(img, shape)
                self.pKSizeProp.setValue(kwargs['ksize_length'])
                self.pThreshProp.setValue(kwargs['thresh_length'] * 100)
                self.pKSizeIntensity.setValue(kwargs['ksize_intensity'])
                self.pThreshIntensity.setValue(kwargs['thresh_intensity'])
            else:
                kwargs = dict(ksize_intensity=ki,
                              ksize_length=self.pKSizeProp.value(),
                              thresh_length=self.pThreshProp.value() / 100,
                              norientations=self.pNorient.value(),
                              thresh_intensity=self.pThreshIntensity.value())

            cracks, h, orient, lap,  mx = labelCracks(im, grid,
                                                      cracks_are_bright=not self.pDark.value(),
                                                      **kwargs)
            params.append(evalCracks(cracks, h, orient, shape, border))
            if width > 1:
                cracks = maximum_filter(cracks, width)
            out[n] = cracks

            if debug:
                laps.append(lap)
                linelikellyness.append(mx)

        return out, laps, linelikellyness, params
项目:audio-fingerprint-identifying-python    作者:itspoma    | 项目源码 | 文件源码
def get_2D_peaks(arr2D, plot=False, amp_min=DEFAULT_AMP_MIN):
    # http://docs.scipy.org/doc/scipy/reference/generated/scipy.ndimage.morphology.iterate_structure.html#scipy.ndimage.morphology.iterate_structure
    struct = generate_binary_structure(2, 1)
    neighborhood = iterate_structure(struct, PEAK_NEIGHBORHOOD_SIZE)

    # find local maxima using our fliter shape
    local_max = maximum_filter(arr2D, footprint=neighborhood) == arr2D
    background = (arr2D == 0)
    eroded_background = binary_erosion(background, structure=neighborhood,
                                       border_value=1)

    # Boolean mask of arr2D with True at peaks
    detected_peaks = local_max - eroded_background

    # extract peaks
    amps = arr2D[detected_peaks]
    j, i = np.where(detected_peaks)

    # filter peaks
    amps = amps.flatten()
    peaks = zip(i, j, amps)
    peaks_filtered = [x for x in peaks if x[2] > amp_min]  # freq, time, amp

    # get indices for frequency and time
    frequency_idx = [x[1] for x in peaks_filtered]
    time_idx = [x[0] for x in peaks_filtered]

    # scatter of the peaks
    if plot:
      fig, ax = plt.subplots()
      ax.imshow(arr2D)
      ax.scatter(time_idx, frequency_idx)
      ax.set_xlabel('Time')
      ax.set_ylabel('Frequency')
      ax.set_title("Spectrogram")
      plt.gca().invert_yaxis()
      plt.show()

    return zip(frequency_idx, time_idx)

# Hash list structure: sha1_hash[0:20] time_offset
# example: [(e05b341a9b77a51fd26, 32), ... ]
项目:PyEMD    作者:laszukdawid    | 项目源码 | 文件源码
def find_extrema(cls, image):
        """
        Finds extrema, both mininma and maxima, based on local maximum filter.
        Returns extrema in form of two rows, where the first and second are
        positions of x and y, respectively.

        Parameters
        ----------
        image : numpy 2D array
            Monochromatic image or any 2D array.

        Returns
        -------
        min_peaks : numpy array
            Minima positions.
        max_peaks : numpy array
            Maxima positions.
        """

        # define an 3x3 neighborhood
        neighborhood = generate_binary_structure(2,2)

        # apply the local maximum filter; all pixel of maximal value
        # in their neighborhood are set to 1
        local_min = maximum_filter(-image, footprint=neighborhood)==-image
        local_max = maximum_filter(image, footprint=neighborhood)==image

        # can't distinguish between background zero and filter zero
        background = (image==0)

        #appear along the bg border (artifact of the local max filter)
        eroded_background = binary_erosion(background,
                                structure=neighborhood, border_value=1)

        # we obtain the final mask, containing only peaks,
        # by removing the background from the local_max mask (xor operation)
        min_peaks = local_min ^ eroded_background
        max_peaks = local_max ^ eroded_background

        min_peaks[[0,-1],:] = False
        min_peaks[:,[0,-1]] = False
        max_peaks[[0,-1],:] = False
        max_peaks[:,[0,-1]] = False

        min_peaks = np.nonzero(min_peaks)
        max_peaks = np.nonzero(max_peaks)

        return min_peaks, max_peaks
项目:saw_release    作者:kovibalu    | 项目源码 | 文件源码
def eval_baseline_on_photo(pixel_labels_dir, thres_list, photo_id,
                           pred_shading_dir, bl_filter_size):
    """
    This method generates a list of precision-recall pairs and confusion
    matrices for each threshold provided in ``thres_list`` for a specific
    photo.

    :param pixel_labels_dir: Directory which contains the SAW pixel labels for each photo.

    :param thres_list: List of shading gradient magnitude thresholds we use to
    generate points on the precision-recall curve.

    :param photo_id: ID of the photo we want to evaluate on.

    :param pred_shading_dir: Directory which contains the intrinsic image
    decompositions for all photos generated by a decomposition algorithm.

    :param bl_filter_size: The size of the maximum filter used on the shading
    gradient magnitude image. We used 10 in the paper. If 0, we do not filter.
    """
    shading_image_arr = load_shading_image_arr(
        pred_shading_dir=pred_shading_dir, photo_id=photo_id
    )
    shading_image_linear = srgb_to_rgb(shading_image_arr)
    shading_image_linear_grayscale = np.mean(shading_image_linear, axis=2)
    shading_gradmag = compute_gradmag(shading_image_linear_grayscale)

    if bl_filter_size:
        shading_gradmag = maximum_filter(shading_gradmag, size=bl_filter_size)

    # We have the following ground truth labels:
    # (0) normal/depth discontinuity non-smooth shading (NS-ND)
    # (1) shadow boundary non-smooth shading (NS-SB)
    # (2) smooth shading (S)
    # (100) no data, ignored
    y_true = load_pixel_labels(pixel_labels_dir=pixel_labels_dir, photo_id=photo_id)
    y_true = np.ravel(y_true)
    ignored_mask = y_true == 100

    # If we don't have labels for this photo (so everything is ignored), return
    # None
    if np.all(ignored_mask):
        return [None] * len(thres_list)

    ret = []
    for thres in thres_list:
        y_pred = (shading_gradmag < thres).astype(int)
        y_pred = np.ravel(y_pred)
        # Note: y_pred should have the same image resolution as y_true
        assert y_pred.shape == y_true.shape
        ret.append(grouped_confusion_matrix(y_true[~ignored_mask], y_pred[~ignored_mask]))

    return ret