我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.cumprod()。
def analyse_data(input_dir): shapes = [] relative_volumes = [] for folder in get_sub_folders(input_dir): print(folder) for sub_folder in get_sub_folders(os.path.join(input_dir, folder)): image_type = get_image_type_from_folder_name(sub_folder) # do not save the raw data (too heavy) if image_type != '.OT': continue path = os.path.join(input_dir, folder, sub_folder) filename = next(filename for filename in os.listdir(path) if get_extension(filename) == '.nii') path = os.path.join(path, filename) im = nib.load(path) image = im.get_data() shape = image.shape shapes.append(shape) relative_volumes.append(100 * np.sum(image) / np.cumprod(shape)[-1]) return shapes, relative_volumes # train
def test_basic(self): ba = [1, 2, 10, 11, 6, 5, 4] ba2 = [[1, 2, 3, 4], [5, 6, 7, 9], [10, 3, 4, 5]] for ctype in [np.int16, np.uint16, np.int32, np.uint32, np.float32, np.float64, np.complex64, np.complex128]: a = np.array(ba, ctype) a2 = np.array(ba2, ctype) if ctype in ['1', 'b']: self.assertRaises(ArithmeticError, np.cumprod, a) self.assertRaises(ArithmeticError, np.cumprod, a2, 1) self.assertRaises(ArithmeticError, np.cumprod, a) else: assert_array_equal(np.cumprod(a, axis=-1), np.array([1, 2, 20, 220, 1320, 6600, 26400], ctype)) assert_array_equal(np.cumprod(a2, axis=0), np.array([[1, 2, 3, 4], [5, 12, 21, 36], [50, 36, 84, 180]], ctype)) assert_array_equal(np.cumprod(a2, axis=-1), np.array([[1, 2, 6, 24], [5, 30, 210, 1890], [10, 30, 120, 600]], ctype))
def reshape_workaround(data, shape_out): # type: (TensorOp, Sequence[int]) -> TensorOp """Limited workaround for tensor reshape operation.""" shape_in = data.shape.lengths if np.prod(shape_in) != np.prod(shape_out): raise ValueError('Total size of input (%d) and output (%d) dimension mismatch.', np.prod(shape_in), np.prod(shape_out)) ndims_out = len(shape_out) if ndims_out == 1: tensor = ng.flatten(data) elif ndims_out == 2: cumprods = list(np.cumprod(shape_in)) flatten_at_idx = cumprods.index(shape_out[0]) + 1 tensor = ng.flatten_at(data, flatten_at_idx) else: raise NotImplementedError('Reshape can only support flatten to 1d or 2d.') return ng.cast_axes(tensor, make_pos_axes(shape_out))
def _decade_mortality_table(year, url_template='https://www.ssa.gov/oact/NOTES/as120/LifeTables_Tbl_7_{}.html'): assert int(year) % 10 == 0 url = url_template.format(year) soup = BeautifulSoup(urlopen(url).read(), 'lxml') table = soup.find('table', border=1) rows = [] for row in table.find_all('tr'): row_datum = [cell.text.strip() for cell in row.find_all('td')] if len(row_datum) == 15 and row_datum[0] != '': rows.append({ 'year_of_birth': int(year), 'age': int(row_datum[0]), 'm_prob_survive_that_year': 1 - float(row_datum[1]), 'f_prob_survive_that_year': 1 - float(row_datum[9]), }) df = pd.DataFrame(rows).sort_values(by='age') for sex in 'mf': df[sex + '_prob_alive'] = np.cumprod(df[sex + '_prob_survive_that_year']).astype(np.float64) df['as_of_year'] = df['year_of_birth'] + df['age'] return df[['year_of_birth', 'as_of_year', 'm_prob_alive', 'f_prob_alive']]
def read_feature(filename): """Read feature dump by C3D Parameters ---------- filename : str Fullpath of file to read Outputs ------- x : ndarray numpy array of features Note: It accomplishes the same purpose of this code: C3D/examples/c3d_feature_extraction/script/read_binary_blob.m """ s_parr, d_parr = array.array('i'), array.array('f') with open(filename, 'r') as f: s_parr.fromfile(f, 5) s = np.array(s_parr) m = np.cumprod(s)[-1] d_parr.fromfile(f, m) return s, np.array(d_parr)
def __init__(self, frame_sizes, n_rnn, dim, learn_h0, q_levels, weight_norm): super().__init__() self.dim = dim self.q_levels = q_levels ns_frame_samples = map(int, np.cumprod(frame_sizes)) self.frame_level_rnns = torch.nn.ModuleList([ FrameLevelRNN( frame_size, n_frame_samples, n_rnn, dim, learn_h0, weight_norm ) for (frame_size, n_frame_samples) in zip( frame_sizes, ns_frame_samples ) ]) self.sample_level_mlp = SampleLevelMLP( frame_sizes[0], dim, q_levels, weight_norm )
def addVariable(self, var_name, var_dims, kind='Var', **kwargs): if var_name in self.var_dict: print "error: %s is a duplicated variable"%var_name var_name_parts = LPCompiler.var_name_regex.match(var_name) new_var = { 'start' : self.total_var_length, 'dims' : var_dims, 'cumdims': np.cumprod([1]+var_dims), 'length': np.prod(var_dims), 'name' : var_name, 'idx' : var_name_parts.group('var').split(','), 'lbl' : var_name_parts.group('lbl').split(','), 'kind' : kind, 'min_value' : 0 if 'min_value' not in kwargs else kwargs['min_value'] } self.var_dict[var_name] = new_var if kind == 'Param': for i in range(new_var['start'], new_var['start']+new_var['length']): self.int_flag.append([i, 1]) self.total_var_length += new_var['length']
def parse_struc(img): nbs = neighbors(img.shape) acc = np.cumprod((1,)+img.shape[::-1][:-1])[::-1] img = img.ravel() pts = np.array(np.where(img==2))[0] buf = np.zeros(131072, dtype=np.int64) num = 10 nodes = [] for p in pts: if img[p] == 2: nds = fill(img, p, num, nbs, acc, buf) num += 1 nodes.append(nds) edges = [] for p in pts: for dp in nbs: if img[p+dp]==1: edge = trace(img, p+dp, nbs, acc, buf) edges.append(edge) return nodes, edges # use nodes and edges build a networkx graph
def cumprod(x, axis=None): """Return the cumulative product of the elements along a given axis. Wraping of numpy.cumprod. Parameters ---------- x Input tensor variable. axis The axis along which the cumulative product is computed. The default (None) is to compute the cumprod over the flattened array. .. versionadded:: 0.7 """ return CumprodOp(axis=axis)(x)
def summarizeVdToDocTopicCount(Vd): ''' Create DocTopicCount matrix from given stick-breaking parameters Vd Returns -------- DocTopicCount : 2D array, size D x K ''' assert not np.any(np.isnan(Vd)) PRNG = np.random.RandomState(0) DocTopicCount = np.zeros(Vd.shape) for d in xrange(Vd.shape[0]): N_d = 100 + 50 * PRNG.rand() Pi_d = Vd[d, :].copy() Pi_d[1:] *= np.cumprod(1.0 - Vd[d, :-1]) np.maximum(Pi_d, 1e-10, out=Pi_d) Pi_d /= np.sum(Pi_d) DocTopicCount[d, :] = N_d * Pi_d return DocTopicCount
def rho2beta(rho, returnSize='K+1'): ''' Calculate probability for all components including remainder. Returns -------- beta : 1D array, size equal to 'K' or 'K+1', depending on returnSize beta[k] := probability of topic k ''' rho = np.asarray(rho, dtype=np.float64) if returnSize == 'K': beta = rho.copy() beta[1:] *= np.cumprod(1 - rho[:-1]) else: beta = np.append(rho, 1.0) beta[1:] *= np.cumprod(1.0 - rho) return beta
def discounts(self, periods_length, periods_num, libors, flat=False): """ Return the calculated discounts for each period, flat discounts don't include the spread: if flat: period_rate = (libors[i - 1]) / periods_num else: period_rate = (libors[i - 1] + spread) / periods_num discounts[i] = discounts[i - 1] / (1 + period_rate) :param periods_length: :param periods_num: :param libors: :param flat: whether to calculate a flat discounts or not """ discounts = np.zeros(periods_length) discounts[0] = 1 if flat: rates = libors[:-1] + self.spread else: rates = libors[:-1] discounts[1:] = np.cumprod(1 / (1 + (rates / periods_num)), dtype=float) return discounts
def get_chunks(x, nbytes_desired): nbytes = np.array(x).ravel()[0].nbytes size_desired = nbytes_desired / nbytes if size_desired >= x.size: # desired chunk size is greater or equal than array size, thus we can include the whole array in a single chunk return x.shape s = x.shape[::-1] cp = np.cumprod(s) dim = np.argmax(cp >= size_desired) s_dim_desired = size_desired / np.prod(s[:dim]) s_dim = np.round(s_dim_desired) if s_dim < 1: s_dim = 1 chunks = np.ones_like(s) chunks[:dim] = s[:dim] chunks[dim] = s_dim result = tuple(chunks[::-1]) return result
def test_allocation_weighting(self): b, n = 5, 10 u = np.random.rand(b, n) s = np.argsort(u, axis=1) correct_alloc = np.zeros((b, n)).astype(np.float32) for i in range(b): cp = np.concatenate([[1], np.cumprod(u[i][s[i]])[:-1]]) correct_alloc[i][s[i]] = (1 - u[i][s[i]]) * cp with self.test_session(): tf.global_variables_initializer().run() Memory.memory_size = n calculated_alloc = Memory.calculate_allocation_weighting(Memory, u).eval() self.assertAllClose(correct_alloc, calculated_alloc)
def cumproduct(a, axis=None, dtype=None, out=None): """ Return the cumulative product over the given axis. See Also -------- cumprod : equivalent function; see for details. """ try: cumprod = a.cumprod except AttributeError: return _wrapit(a, 'cumprod', axis, dtype, out) return cumprod(axis, dtype, out)
def fill_diagonal(a, val, wrap=False): """Fills the main diagonal of the given array of any dimensionality. For an array `a` with ``a.ndim > 2``, the diagonal is the list of locations with indices ``a[i, i, ..., i]`` all identical. This function modifies the input array in-place, it does not return a value. Args: a (cupy.ndarray): The array, at least 2-D. val (scalar): The value to be written on the diagonal. Its type must be compatible with that of the array a. wrap (bool): If specified, the diagonal is "wrapped" after N columns. This affects only tall matrices. Examples -------- >>> a = cupy.zeros((3, 3), int) >>> cupy.fill_diagonal(a, 5) >>> a array([[5, 0, 0], [0, 5, 0], [0, 0, 5]]) .. seealso:: :func:`numpy.fill_diagonal` """ # The followings are imported from the original numpy if a.ndim < 2: raise ValueError('array must be at least 2-d') end = None if a.ndim == 2: step = a.shape[1] + 1 if not wrap: end = a.shape[1] * a.shape[1] else: if not numpy.alltrue(numpy.diff(a.shape) == 0): raise ValueError('All dimensions of input must be of equal length') step = 1 + numpy.cumprod(a.shape[:-1]).sum() # Since the current cupy does not support a.flat, # we use a.ravel() instead of a.flat a.ravel()[:end:step] = val
def cumprod(a, axis=None, dtype=None, out=None): """Returns the cumulative product of an array along a given axis. Args: a (cupy.ndarray): Input array. axis (int): Axis along which the cumulative product is taken. If it is not specified, the input is flattened. dtype: Data type specifier. out (cupy.ndarray): Output array. Returns: cupy.ndarray: The result array. .. seealso:: :func:`numpy.cumprod` """ return _cum_core(a, axis, dtype, out, _cumprod_kern, _cumprod_batch_kern) # TODO(okuta): Implement diff # TODO(okuta): Implement ediff1d # TODO(okuta): Implement gradient # TODO(okuta): Implement cross # TODO(okuta): Implement trapz
def __init__(self, shape, dtype, buffer=None, offset=0, strides=None): itemsize = dtype().itemsize shape = tuple(np.array(shape).ravel().astype(np.uint64)) if strides is None: # This magic came from http://stackoverflow.com/a/32874295 strides = itemsize * np.r_[1, np.cumprod(shape[::-1][:-1], dtype=np.int64)][::-1] self.shape = shape self.dtype = dtype self.buffer = buffer self.offset = offset self.strides = strides self.base = None self.flags = {'WRITEABLE': True, 'ALIGNED': buffer % (itemsize == 0 if buffer is not None else True), 'OWNDATA': False, 'UPDATEIFCOPY': False, 'C_CONTIGUOUS': self.nbytes == strides[0] * shape[0], 'F_CONTIGUOUS': False, 'SPACE': 'cuda'} class CTypes(object): def __init__(self, parent): self.parent = parent @property def data(self): return self.parent.data self.ctypes = CTypes(self) if self.buffer is None: self.buffer = raw_malloc(self.nbytes, space='cuda') self.flags['OWNDATA'] = True self.flags['ALIGNED'] = True memset(self, 0) else: self.buffer += offset
def set_evaluation_feedback(self, feedbacks): """Set feedback for the last behavior. Parameters ---------- feedbacks : list of float feedback for each step or for the episode, depends on the problem """ visited_states = self.policy.visited_states actions_taken = self.policy.actions_taken n_steps = len(visited_states) assert n_steps == len(feedbacks) assert n_steps == len(actions_taken) gammas = np.hstack( ((1,), np.cumprod(np.ones(n_steps - 1) * self.gamma))) diff = 0.0 for t in range(n_steps): s = visited_states[t] a = actions_taken[t] ret = sum(feedbacks[t:] * gammas[:n_steps - t]) self.returns[s][a].append(ret) last_Q = self.Q[s][a] self.Q[s][a] = np.mean(self.returns[s][a]) diff = max(diff, np.abs(last_Q - self.Q[s][a])) self.done = any(feedbacks > 0) and diff < 1e-3
def discount(rewards, gamma, timestamps): dt = np.diff(timestamps.squeeze()) x = rewards.squeeze() g = np.power(gamma, dt) y = np.zeros_like(x) for n in range(len(y)): y[n] = x[n] + np.sum(x[n + 1:] * np.cumprod(g[n:])) return y
def __func__(arr): arr = arr.copy() arr[np.isnan(arr)] = 1 return np.cumprod(arr, dtype=float)
def __len__(self): """Returns the number of values handled by the :class:`MultiParameter` instance. It is required, that the `shape` has been set beforehand, which specifies the length in each dimension. """ return numpy.cumprod(self.shape)[-1]
def _create_prices(t): last_average = 100 if t==0 else source.data['average'][-1] returns = asarray(lognormal(mean.value, stddev.value, 1)) average = last_average * cumprod(returns) high = average * exp(abs(gamma(1, 0.03, size=1))) low = average / exp(abs(gamma(1, 0.03, size=1))) delta = high - low open = low + delta * uniform(0.05, 0.95, size=1) close = low + delta * uniform(0.05, 0.95, size=1) return open[0], high[0], low[0], close[0], average[0]
def _create_prices(t): global last_average returns = asarray(lognormal(mean, stddev, 1)) average = last_average * cumprod(returns) last_average = average high = average * exp(abs(gamma(1, 0.03, size=1))) low = average / exp(abs(gamma(1, 0.03, size=1))) delta = high - low open = low + delta * uniform(0.05, 0.95, size=1) close = low + delta * uniform(0.05, 0.95, size=1) return open[0], high[0], low[0], close[0], average[0]
def _ema(prices, days=10): if len(prices) < days or days < 2: return [prices[-1]] a = 2.0 / (days+1) kernel = ones(days, dtype=float) kernel[1:] = 1 - a kernel = a * cumprod(kernel) # The 0.8647 normalizes out that we stop the EMA after a finite number of terms return convolve(prices[-days:], kernel, mode="valid") / (0.8647)
def _grid_distance(self, index): """ Calculate the distance grid for a single index position. This is pre-calculated for fast neighborhood calculations later on (see _calc_influence). """ # Take every dimension but the first in reverse # then reverse that list again. dimensions = np.cumprod(self.map_dimensions[1::][::-1])[::-1] coord = [] for idx, dim in enumerate(dimensions): if idx != 0: value = (index % dimensions[idx-1]) // dim else: value = index // dim coord.append(value) coord.append(index % self.map_dimensions[-1]) for idx, (width, row) in enumerate(zip(self.map_dimensions, coord)): x = np.abs(np.arange(width) - row) ** 2 dims = self.map_dimensions[::-1] if idx: dims = dims[:-idx] x = np.broadcast_to(x, dims).T if idx == 0: distance = np.copy(x) else: distance += x.T return distance
def test_cumprod(self): self._check_accum_op('cumprod')
def test_cummethods_bool(self): # GH 6270 # looks like a buggy np.maximum.accumulate for numpy 1.6.1, py 3.2 def cummin(x): return np.minimum.accumulate(x) def cummax(x): return np.maximum.accumulate(x) a = pd.Series([False, False, False, True, True, False, False]) b = ~a c = pd.Series([False] * len(b)) d = ~c methods = {'cumsum': np.cumsum, 'cumprod': np.cumprod, 'cummin': cummin, 'cummax': cummax} args = product((a, b, c, d), methods) for s, method in args: expected = Series(methods[method](s.values)) result = getattr(s, method)() assert_series_equal(result, expected) e = pd.Series([False, True, nan, False]) cse = pd.Series([0, 1, nan, 1], dtype=object) cpe = pd.Series([False, 0, nan, 0]) cmin = pd.Series([False, False, nan, False]) cmax = pd.Series([False, True, nan, True]) expecteds = {'cumsum': cse, 'cumprod': cpe, 'cummin': cmin, 'cummax': cmax} for method in methods: res = getattr(e, method)() assert_series_equal(res, expecteds[method])
def p2(x, coef): X = np.empty(len(coef)) X[0] = 1 X[1:] = x y = np.cumprod(X) # y = [1, x, x**2,...] return np.dot(coef, y)
def get_idx_from_arg(a, arg, axis): shp = a.shape cp = np.cumprod(shp[::-1])[::-1] if axis == len(shp) - 1: m = 1 else: m = cp[axis + 1] n = cp[0] // cp[axis] if m == 1: return np.arange(n) * cp[axis] + arg.ravel() return np.repeat(np.arange(n) * cp[axis], m) + np.tile(np.arange(m), n) + arg.ravel() * m
def read_feature(filename, keep_shape=False): """Read feature (a.k.a blob) dump by C3D. Parameters ---------- filename : str Fullpath of file to read. keep_shape : bool Reshape feature to the shape reported. Outputs ------- feature : ndarray numpy array of features s : tuple shape of original feature Note: It accomplishes the same purpose of this code: C3D/examples/c3d_feature_extraction/script/read_binary_blob.m """ s_parr, d_parr = array.array('i'), array.array('f') with open(filename, 'rb') as f: s_parr.fromfile(f, 5) s = np.array(s_parr) m = np.cumprod(s)[-1] d_parr.fromfile(f, m) feature = np.array(d_parr) if keep_shape: feature = feature.reshape(s) return feature, s
def __init__(self, prefix, children, suffix='.dat', root='root', createdirs=True): ''' Parameters ---------- prefix : string All paths are prefixed with this string. children: sequence of ints Creates a directory tree rooted at path given by `root` with levels specified by the `children` array: level ``i`` had ``children[i]`` children. ``children[-1]`` specifies the arity of the leaves. suffix : string The suffix of the leafs (i.e. files) of the tree. root : path The path to the root of the tree. createdirs : bool. If True, actually create the directories. Note: this is not thread-safe. ''' if len(children) == 0: raise ValueError("need at least one level") self.children = np.asarray(children) self.root = root self.prefix = prefix self.suffix = suffix self.depth = len(children) self._cap = np.cumprod(self.children[::-1])[::-1] self.capacity = self._cap[0] self._den = self._cap / self.children self.width = int(np.ceil(np.log10(self.capacity))) if createdirs: self._mktree()
def neighbors(shape, conn=1): dim = len(shape) block = generate_binary_structure(dim, conn) block[tuple([1]*dim)] = 0 idx = np.where(block>0) idx = np.array(idx, dtype=np.uint8).T idx = np.array(idx-[1]*dim) acc = np.cumprod((1,)+shape[::-1][:-1]) return np.dot(idx, acc[::-1])
def neighbors(shape): dim = len(shape) block = np.ones([3]*dim) block[tuple([1]*dim)] = 0 idx = np.where(block>0) idx = np.array(idx, dtype=np.uint8).T idx = np.array(idx-[1]*dim) acc = np.cumprod((1,)+shape[::-1][:-1]) return np.dot(idx, acc[::-1])
def filter(img, msk, idx, bur, tor, mode): nbs = neighbors(img.shape) acc = np.cumprod((1,)+img.shape[::-1][:-1])[::-1] img = img.ravel() msk = msk.ravel() arg = np.argsort(img[idx])[::-1 if mode else 1] for i in arg: if msk[idx[i]]!=3: idx[i] = 0 continue cur = 0; s = 1; bur[0] = idx[i] while cur<s: p = bur[cur] if msk[p] == 2: idx[i]=0 break for dp in nbs: cp = p+dp if msk[cp]==0 or cp==idx[i] or msk[cp] == 4: continue if mode and img[cp] < img[idx[i]]-tor: continue if not mode and img[cp] > img[idx[i]]+tor: continue bur[s] = cp s += 1 if s==msk.size//3: cut = cur//2 msk[bur[:cut]] = 2 bur[:s-cut] = bur[cut:] cur -= cut s -= cut if msk[cp]!=2:msk[cp] = 4 cur += 1 msk[bur[:s]] = 2 return idx2rc(idx[idx>0], acc)
def ridge(img, mark, up=True): oimg, omark = img, mark ndim = img.ndim mark[[0,-1],:] = 4 mark[:,[0,-1]] = 4 nb4 = nbs4(*img.shape) nb8 = nbs8(*img.shape) acc = np.cumprod((1,)+img.shape[::-1][:-1])[::-1] img = img.ravel() mark = mark.ravel() pts = np.zeros(131072, dtype=np.int64) s, bins = collect(img, mark, nb4, pts) #print(bins) aaa=0 for level in range(len(bins))[::1 if up else -1]: if bins[level]==0:continue aaa+=1 s, c = clear(mark, pts, s, 0) s = step(img, mark, pts, s, level, up, nb4, nb8) ''' if level>250: plt.imshow(omark, cmap='gray') plt.show() ''' for i in range(len(mark)): if mark[i] == 3:mark[i] = 255 else: mark[i] = 0
def neighbors(shape): dim = len(shape) block = generate_binary_structure(dim, 1) block[tuple([1]*dim)] = 0 idx = np.where(block>0) idx = np.array(idx, dtype=np.uint8).T idx = np.array(idx-[1]*dim) acc = np.cumprod((1,)+shape[::-1][:-1]) return np.dot(idx, acc[::-1])
def draw_graph(img, graph, cn=255, ce=128): acc = np.cumprod((1,)+img.shape[::-1][:-1])[::-1] img = img.ravel() for idx in graph.nodes(): pts = graph.node[idx]['pts'] img[np.dot(pts, acc)] = cn for (s, e) in graph.edges(): eds = graph[s][e] for i in eds: pts = eds[i]['pts'] img[np.dot(pts, acc)] = ce
def run(self, ips, snap, img, para = None): ips.lut = self.buflut k, unit = ips.unit lev, ds, step = para['thr'], para['ds'], para['step'] scube = np.cumprod(ips.imgs.shape)[-1] * k**3 sfront = (ips.imgs[::ds,::ds,::ds]>lev).sum() * ds ** 3 * k**3 sback = scube - sfront print(scube, sfront, sback) vts, fs, ns, cs = marching_cubes_lewiner(ips.imgs[::ds,::ds,::ds], lev, step_size=step) area = mesh_surface_area(vts, fs) * (ds**2 * k **2) rst = [round(i,3) for i in [scube, sfront, sback, sfront/scube, area, area/sfront]] titles = ['Cube Volume', 'Volume', 'Blank', 'Volume/Cube', 'Surface', 'Volume/Surface'] IPy.table('Volume Measure', [rst], cols=titles)
def _map_global_to_filtered(self, k): """ map global (unfiltered) ND key to local (filtered) 2D key Parameters ---------- k: tuple Labels associated with the modified element of the non-filtered array. Returns ------- tuple Positional index (row, column) of the modified data cell. """ assert isinstance(k, tuple) and len(k) == self.la_data.ndim dkey = {axis_id: axis_key for axis_key, axis_id in zip(k, self.la_data.axes.ids)} # transform global dictionary key to "local" (filtered) key by removing # the parts of the key which are redundant with the filter for axis_id, axis_filter in self.current_filter.items(): axis_key = dkey[axis_id] if np.isscalar(axis_filter) and axis_key == axis_filter: del dkey[axis_id] elif not np.isscalar(axis_filter) and axis_key in axis_filter: pass else: # that key is invalid for/outside the current filter return None # transform (axis:label) dict key to positional ND key try: index_key = self.filtered_data._translated_key(dkey) except ValueError: return None # transform positional ND key to positional 2D key strides = np.append(1, np.cumprod(self.filtered_data.shape[1:-1][::-1], dtype=int))[::-1] return (index_key[:-1] * strides).sum(), index_key[-1]