我们从Python开源项目中,提取了以下30个代码示例,用于说明如何使用tensorflow.meshgrid()。
def _meshgrid(self): with tf.variable_scope('_meshgrid'): x_t = tf.matmul(tf.ones(shape=tf.stack([self.out_height, 1])), tf.transpose(tf.expand_dims(tf.linspace(-1.0, 1.0, self.out_width), 1), [1, 0])) y_t = tf.matmul(tf.expand_dims(tf.linspace(-1.0, 1.0, self.out_height), 1), tf.ones(shape=tf.stack([1, self.out_width]))) x_t_flat = tf.reshape(x_t, (1, -1)) y_t_flat = tf.reshape(y_t, (1, -1)) px,py = tf.stack([x_t_flat],axis=2),tf.stack([y_t_flat],axis=2) #source control points x,y = tf.linspace(-1.,1.,self.Column_controlP_number),tf.linspace(-1.,1.,self.Row_controlP_number) x,y = tf.meshgrid(x,y) xs,ys = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))) cpx,cpy = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]) px, cpx = tf.meshgrid(px,cpx);py, cpy = tf.meshgrid(py,cpy) #Compute distance R Rx,Ry = tf.square(tf.subtract(px,cpx)),tf.square(tf.subtract(py,cpy)) R = tf.add(Rx,Ry) R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10))) #Source coordinates ones = tf.ones_like(x_t_flat) grid = tf.concat([ones, x_t_flat, y_t_flat,R],0) grid = tf.reshape(grid,[-1]) grid = tf.reshape(grid,[self.Column_controlP_number*self.Row_controlP_number+3,self.out_height*self.out_width]) return grid
def fast_rotate(input_image, dx = 0, dy = 0): # Basic rotations (constant disparities) for equirectangular # images. For image augmentations (y-axis rotations), this method is preferable compared # to the more general rotation function. height = tf.shape(input_image)[0] width = tf.shape(input_image)[1] # Shift coordinate grid for inverse warp. ix, iy = tf.meshgrid(tf.range(width), tf.range(height)) ox = tf.mod(ix - dx, width) oy = tf.mod(iy - dy, height) indices = tf.stack([oy, ox], 2) # Perform exact sampling (as we are using integer coordinates). return tf.gather_nd(input_image, indices) # Project equirectangular image onto a cube face.
def get_tiled_anchors_for_shape(self, width, height): """ creates/tiles anchors for a width x height image/feature map, producing coordinates from [0, width) and [0, height) for the resulting bounding boxes, according to the feature stride of the last conv layer """ anchors = tf.expand_dims(self.anchors, axis=0) feat_height = tf.cast(tf.ceil(height/self.feat_stride), tf.int32) feat_width = tf.cast(tf.ceil(width/self.feat_stride), tf.int32) anchor_shape = [feat_height * feat_width, 1, 1] anchors = tf.tile(anchors, tf.stack(anchor_shape)) x = tf.range(0.0, feat_width * self.feat_stride, self.feat_stride) y = tf.range(0.0, feat_height * self.feat_stride, self.feat_stride) X, Y = tf.meshgrid(x, y) X = tf.expand_dims(X, 2) Y = tf.expand_dims(Y, 2) shift = tf.reshape(tf.concat([Y, X, tf.zeros_like(X), tf.zeros_like(X)], 2), [-1, 1, 4]) shift = tf.tile(shift, [1, self.num_anchors, 1]) anchors = tf.cast(anchors, tf.float32) + shift return tf.reshape(anchors, [-1, 4])
def compute_indexing(source_size, target_size): # source_size is the size of reference feature map, where (0,0) # corresponds to the top-left corner and (1,1) corresponds to the # bottom-right conner of the feature map. jj, ii = np.meshgrid(range(source_size[1]), range(source_size[0]), indexing='xy') xx, yy = np.meshgrid(range(target_size[1]), range(target_size[0]), indexing='xy') X, I = np.meshgrid(xx.flatten(), ii.flatten(), indexing='xy') Y, J = np.meshgrid(yy.flatten(), jj.flatten(), indexing='xy') # normalize to 0 and 1 I = I.astype('float32') / (source_size[0]-1) J = J.astype('float32') / (source_size[1]-1) Y = Y.astype('float32') / (target_size[0]-1) X = X.astype('float32') / (target_size[1]-1) indexing = tf.stack([I, J, Y, X], axis=2) return tf.expand_dims(indexing, 0)
def _generate_shifts(self, width, height): shift_x = tf.range(0, height) * self._feat_stride shift_y = tf.range(0, width) * self._feat_stride shift_x, shift_y = tf.meshgrid(shift_x, shift_y, indexing='ij') shifts = tf.transpose(tf.pack( [tf.reshape(shift_x, (-1,)), tf.reshape(shift_y, (-1,)), tf.reshape(shift_x, (-1,)), tf.reshape(shift_y, (-1,))], axis=0 )) return shifts
def _meshgrid(self): with tf.variable_scope('_meshgrid'): x_use = tf.linspace(-1.0, 1.0, self.out_height) y_use = tf.linspace(-1.0, 1.0, self.out_width) z_use = tf.linspace(-1.0, 1.0, self.out_depth) x_t = tf.tile(x_use,[self.out_width*self.out_depth]) y_t = tf.tile(self._repeat(y_use,self.out_height,'float32'),[self.out_depth]) z_t = self._repeat(z_use,self.out_height*self.out_width,'float32') x_t_flat = tf.reshape(x_t, (1, -1)) y_t_flat = tf.reshape(y_t, (1, -1)) z_t_flat = tf.reshape(z_t, (1, -1)) px,py,pz = tf.stack([x_t_flat],axis=2),tf.stack([y_t_flat],axis=2),tf.stack([z_t_flat],axis=2) #source control points x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number) x = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number]) y = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number]) z = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32') xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1))) cpx,cpy,cpz = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2]) px, cpx = tf.meshgrid(px,cpx);py, cpy = tf.meshgrid(py,cpy); pz, cpz = tf.meshgrid(pz,cpz) #Compute distance R Rx,Ry,Rz = tf.square(tf.subtract(px,cpx)),tf.square(tf.subtract(py,cpy)),tf.square(tf.subtract(pz,cpz)) R = tf.add(tf.add(Rx,Ry),Rz) R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10))) #Source coordinates ones = tf.ones_like(x_t_flat) grid = tf.concat([ones, x_t_flat, y_t_flat,z_t_flat,R],0) return grid
def __init__(self,input_shape,control_points_ratio): self.num_batch = input_shape[0] self.height = input_shape[1] self.width = input_shape[2] self.num_channels = input_shape[3] self.out_height = self.height self.out_width = self.width self.Column_controlP_number = int(input_shape[1] / \ (control_points_ratio)) self.Row_controlP_number = int(input_shape[2] / \ (control_points_ratio)) init_x = np.linspace(-5,5,self.Column_controlP_number) init_y = np.linspace(-5,5,self.Row_controlP_number) x_s,y_s = np.meshgrid(init_x, init_y) self.initial = np.array([x_s,y_s])
def generate_anchors(boxes, height, width, conv_height, conv_width): '''Generate anchors for given geometry boxes: K x 2 tensor for anchor geometries, K different sizes height: source image height width: source image width conv_height: convolution layer height conv_width: convolution layer width returns: conv_height x conv_width x K x 4 tensor with boxes for all positions. Last dimension 4 numbers are (y, x, h, w) ''' k, _ = boxes.get_shape().as_list() height, width = tf.cast(height, tf.float32), tf.cast(width, tf.float32) grid = tf.transpose(tf.stack(tf.meshgrid( tf.linspace(-0.5, height - 0.5, conv_height), tf.linspace(-0.5, width - 0.5, conv_width)), axis=2), [1, 0, 2]) # convert boxes from K x 2 to 1 x 1 x K x 2 boxes = tf.expand_dims(tf.expand_dims(boxes, 0), 0) # convert grid from H' x W' x 2 to H' x W' x 1 x 2 grid = tf.expand_dims(grid, 2) # combine them into single H' x W' x K x 4 tensor return tf.concat( 3, [tf.tile(grid, [1, 1, k, 1]), tf.tile(boxes, [conv_height, conv_width, 1, 1])] )
def meshgrid(*args, **kwargs): return tensorflow.meshgrid(*args, **kwargs)
def _makeT(self,cp): with tf.variable_scope('_makeT'): cp = tf.reshape(cp,(-1,3,self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number)) cp = tf.cast(cp,'float32') N_f = tf.shape(cp)[0] #c_s x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number) x = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number]) y = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number]) z = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32') xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1))) cp_s = tf.concat([xs,ys,zs],0) cp_s_trans = tf.transpose(cp_s) # (4*4*4)*3 -> 64 * 3 ##===Compute distance R xs_trans,ys_trans,zs_trans = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2]) xs, xs_trans = tf.meshgrid(xs,xs_trans);ys, ys_trans = tf.meshgrid(ys,ys_trans);zs, zs_trans = tf.meshgrid(zs,zs_trans) Rx,Ry, Rz = tf.square(tf.subtract(xs,xs_trans)),tf.square(tf.subtract(ys,ys_trans)),tf.square(tf.subtract(zs,zs_trans)) R = tf.add_n([Rx,Ry,Rz]) R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10))) ones = tf.ones([self.Y_controlP_number*self.X_controlP_number*self.Z_controlP_number,1],tf.float32) ones_trans = tf.transpose(ones) zeros = tf.zeros([4,4],tf.float32) Deltas1 = tf.concat([ones, cp_s_trans, R],1) Deltas2 = tf.concat([ones_trans,cp_s],0) Deltas2 = tf.concat([zeros,Deltas2],1) Deltas = tf.concat([Deltas1,Deltas2],0) ##get deltas_inv Deltas_inv = tf.matrix_inverse(Deltas) Deltas_inv = tf.expand_dims(Deltas_inv,0) Deltas_inv = tf.reshape(Deltas_inv,[-1]) Deltas_inv_f = tf.tile(Deltas_inv,tf.stack([N_f])) Deltas_inv_f = tf.reshape(Deltas_inv_f,tf.stack([N_f,self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number+4, -1])) cp_trans =tf.transpose(cp,perm=[0,2,1]) zeros_f_In = tf.zeros([N_f,4,3],tf.float32) cp = tf.concat([cp_trans,zeros_f_In],1) T = tf.transpose(tf.matmul(Deltas_inv_f,cp),[0,2,1]) return T
def grid_2d(in_size, out_size=None): grid_ys, grid_xs = tf.meshgrid(tf.range(0, in_size[0]), tf.range(0, in_size[1]), indexing='ij') if not out_size is None: grid_yxs = tf.image.resize_images(tf.pack([grid_ys, grid_xs], axis=2), out_size[0], out_size[1]) grid_ys, grid_xs = grid_yxs[:,:,0], grid_yxs[:,:,1] grid_ys = grid_ys / tf.to_float(in_size[0]) grid_xs = grid_xs / tf.to_float(in_size[1]) return grid_ys, grid_xs
def tf_batch_map_offsets(input, offsets, order=1): """Batch map offsets into input Parameters --------- input : tf.Tensor. shape = (b, s, s) offsets: tf.Tensor. shape = (b, s, s, 2) Returns ------- tf.Tensor. shape = (b, s, s) """ input_shape = tf.shape(input) batch_size = input_shape[0] input_size = input_shape[1] offsets = tf.reshape(offsets, (batch_size, -1, 2)) grid = tf.meshgrid( tf.range(input_size), tf.range(input_size), indexing='ij' ) grid = tf.stack(grid, axis=-1) grid = tf.cast(grid, 'float32') grid = tf.reshape(grid, (-1, 2)) grid = tf_repeat_2d(grid, batch_size) coords = offsets + grid mapped_vals = tf_batch_map_coordinates(input, coords) return mapped_vals
def uv_grid(shape): u, v = tf.meshgrid(tf.linspace(0.0, 1.0, shape[1]), tf.linspace(0.0, 1.0, shape[0])) return u, v
def lat_long_grid(shape, epsilon = 1.0e-12): return tf.meshgrid(tf.linspace(-np.pi, np.pi, shape[1]), tf.linspace(-np.pi / 2.0 + epsilon, np.pi / 2.0 - epsilon, shape[0]))
def uv_grid(shape): return tf.meshgrid(tf.linspace(-0.5, 0.5, shape[1]), tf.linspace(-0.5, 0.5, shape[0])) # Restricted rotations of (a, b, c) to (x, y, z), implemented using # permutations and negations.
def xyz_grid(shape, face = "front"): a, b = tf.meshgrid(tf.linspace(-1.0, 1.0, shape[1]), tf.linspace(-1.0, 1.0, shape[0])) c = tf.constant(1.0, dtype = tf.float32, shape = shape) return switch_face(a, b, c, face) # Convert Cartesian coordinates (x, y, z) to latitude (T) and longitude (S).
def backproject_cubic_depth(depth, shape, face): a, b = tf.meshgrid(tf.linspace(-1.0, 1.0, shape[2]), tf.linspace(-1.0, 1.0, shape[1])) A = depth * tf.expand_dims(tf.tile(tf.expand_dims(a, 0), [shape[0], 1, 1]), 3) B = depth * tf.expand_dims(tf.tile(tf.expand_dims(b, 0), [shape[0], 1, 1]), 3) C = depth x, y, z = switch_face(A, B, C, face) return tf.sqrt(x ** 2.0 + y ** 2.0 + z ** 2.0)
def backproject_rectilinear(depth, K, shape, face): u, v = tf.meshgrid(tf.linspace(-1.0, 1.0, shape[2]), tf.linspace(-1.0, 1.0, shape[1])) u = tf.expand_dims(tf.tile(tf.expand_dims(u, 0), [shape[0], 1, 1]), 3) v = tf.expand_dims(tf.tile(tf.expand_dims(v, 0), [shape[0], 1, 1]), 3) A = (u - K[2]) * depth / K[0] B = (v - K[3]) * depth / K[1] C = depth x, y, z = switch_face(A, B, C, face) return tf.sqrt(x ** 2.0 + z ** 2.0)
def rectilinear_xyz(K, shape, face = "front"): u, v = tf.meshgrid(tf.linspace(-1.0, 1.0, shape[1]), tf.linspace(-1.0, 1.0, shape[0])) # X = (u - c_x) * z / f_x # Y = (v - c_y) * z / f_y a = (u - K[2]) / K[0] b = (v - K[3]) / K[1] c = tf.ones([shape[1], shape[0]], dtype = tf.float32) return switch_face(a, b, c, face)
def unlabeled_data(self, x_u): # repeat data x_u = tf.tile(x_u, [self.num_classes, 1]) nums = tf.range(0, self.num_classes, 1) _, t_u = tf.meshgrid(tf.zeros(self.batch_size, dtype=tf.int32), nums) return x_u, t_u
def _meshgrid(height, width): x_t_flat, y_t_flat = tf.meshgrid(tf.linspace(-1., 1., width), tf.linspace(-1., 1., height)) ones = tf.ones_like(x_t_flat) grid = tf.concat(values=[x_t_flat, y_t_flat, ones], axis=0) return grid
def warping_meshgrid(height, width): x_t_flat, y_t_flat = tf.meshgrid(tf.linspace(-1., 1., width), tf.linspace(-1., 1., height)) grid = tf.concat(values=[x_t_flat, y_t_flat], axis=0) return grid
def pixelnet_convs(inputs, num_class, is_training=True, reuse=False): num_batch = tf.shape(inputs)[0] height = tf.shape(inputs)[1] width = tf.shape(inputs)[2] with tf.variable_scope('vgg_16', reuse=reuse): net, hyperfeats = nets.vgg_like(inputs) tf.add_to_collection('last_conv', net) with tf.name_scope('hyper_columns'): if is_training: # sample pixels corresponding to the last feature elements h, w = net.get_shape().as_list()[1:3] trace_locations = ops.trace_locations_backward else: # sample pixels corresponding to the whole image h, w = [height, width] trace_locations = ops.trace_locations_forward X, Y = tf.meshgrid(tf.range(w), tf.range(h), indexing='xy') loc_x = tf.tile(tf.reshape(X, [1,-1]), [num_batch, 1]) loc_y = tf.tile(tf.reshape(Y, [1,-1]), [num_batch, 1]) locations = [trace_locations(loc_x, loc_y, [h, w], [tf.shape(feat)[1], tf.shape(feat)[2]]) for feat in hyperfeats] net = ops.extract_values(hyperfeats, locations) hyperchannels = net.get_shape().as_list()[-1] net = tf.reshape(net, [num_batch, h, w, hyperchannels]) tf.add_to_collection('hyper_column', net) return net
def _generate_anchors(self, feature_map_shape): """Generate anchor for an image. Using the feature map, the output of the pretrained network for an image, and the anchor_reference generated using the anchor config values. We generate a list of anchors. Anchors are just fixed bounding boxes of different ratios and sizes that are uniformly generated throught the image. Args: feature_map_shape: Shape of the convolutional feature map used as input for the RPN. Should be (batch, height, width, depth). Returns: all_anchors: A flattened Tensor with all the anchors of shape `(num_anchors_per_points * feature_width * feature_height, 4)` using the (x1, y1, x2, y2) convention. """ with tf.variable_scope('generate_anchors'): grid_width = feature_map_shape[2] # width grid_height = feature_map_shape[1] # height shift_x = tf.range(grid_width) * self._anchor_stride shift_y = tf.range(grid_height) * self._anchor_stride shift_x, shift_y = tf.meshgrid(shift_x, shift_y) shift_x = tf.reshape(shift_x, [-1]) shift_y = tf.reshape(shift_y, [-1]) shifts = tf.stack( [shift_x, shift_y, shift_x, shift_y], axis=0 ) shifts = tf.transpose(shifts) # Shifts now is a (H x W, 4) Tensor # Expand dims to use broadcasting sum. all_anchors = ( np.expand_dims(self._anchor_reference, axis=0) + tf.expand_dims(shifts, axis=1) ) # Flatten all_anchors = tf.reshape( all_anchors, (-1, 4) ) return all_anchors
def _makeT(self,cp): with tf.variable_scope('_makeT'): cp = tf.reshape(cp,(-1,3,self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number)) cp = tf.cast(cp,'float32') N_f = tf.shape(cp)[0] #c_s x,y,z = tf.linspace(-1.,1.,self.X_controlP_number),tf.linspace(-1.,1.,self.Y_controlP_number),tf.linspace(-1.,1.,self.Z_controlP_number) x = tf.tile(x,[self.Y_controlP_number*self.Z_controlP_number]) y = tf.tile(self._repeat(y,self.X_controlP_number,'float32'),[self.Z_controlP_number]) z = self._repeat(z,self.X_controlP_number*self.Y_controlP_number,'float32') xs,ys,zs = tf.transpose(tf.reshape(x,(-1,1))),tf.transpose(tf.reshape(y,(-1,1))),tf.transpose(tf.reshape(z,(-1,1))) cp_s = tf.concat([xs,ys,zs],0) cp_s_trans = tf.transpose(cp_s) # (4*4*4)*3 -> 64 * 3 ##===Compute distance R xs_trans,ys_trans,zs_trans = tf.transpose(tf.stack([xs],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([ys],axis=2),perm=[1,0,2]),tf.transpose(tf.stack([zs],axis=2),perm=[1,0,2]) xs, xs_trans = tf.meshgrid(xs,xs_trans);ys, ys_trans = tf.meshgrid(ys,ys_trans);zs, zs_trans = tf.meshgrid(zs,zs_trans) Rx,Ry, Rz = tf.square(tf.subtract(xs,xs_trans)),tf.square(tf.subtract(ys,ys_trans)),tf.square(tf.subtract(zs,zs_trans)) R = tf.add_n([Rx,Ry,Rz]) #print("R",sess.run(R)) R = tf.multiply(R,tf.log(tf.clip_by_value(R,1e-10,1e+10))) #print("R",sess.run(R)) ones = tf.ones([self.Y_controlP_number*self.X_controlP_number*self.Z_controlP_number,1],tf.float32) ones_trans = tf.transpose(ones) zeros = tf.zeros([4,4],tf.float32) Deltas1 = tf.concat([ones, cp_s_trans, R],1) Deltas2 = tf.concat([ones_trans,cp_s],0) Deltas2 = tf.concat([zeros,Deltas2],1) Deltas = tf.concat([Deltas1,Deltas2],0) #print("Deltas",sess.run(Deltas)) ##get deltas_inv Deltas_inv = tf.matrix_inverse(Deltas) Deltas_inv = tf.expand_dims(Deltas_inv,0) Deltas_inv = tf.reshape(Deltas_inv,[-1]) Deltas_inv_f = tf.tile(Deltas_inv,tf.stack([N_f])) Deltas_inv_f = tf.reshape(Deltas_inv_f,tf.stack([N_f,self.X_controlP_number*self.Y_controlP_number*self.Z_controlP_number+4, -1])) cp_trans =tf.transpose(cp,perm=[0,2,1]) zeros_f_In = tf.zeros([N_f,4,3],tf.float32) cp = tf.concat([cp_trans,zeros_f_In],1) #print("cp",sess.run(cp)) #print("Deltas_inv_f",sess.run(Deltas_inv_f)) T = tf.transpose(tf.matmul(Deltas_inv_f,cp),[0,2,1]) #print("T",sess.run(T)) return T
def draw_fn(self, shader): indices = tf.placeholder(tf.int32, [None, 3], name="ph_indices") verts = [None, None, None] for i in range(3): verts[i] = shader.vertex(indices[:, i], i) verts[i] = tf.matmul(verts[i], self.viewport, transpose_b=True) verts[i] = utils.affine_to_cartesian(verts[i]) bbmin, bbmax = bounds(verts, self.width, self.height) def _fn(i): bbmin_i = tf.gather(bbmin, i) bbmax_i = tf.gather(bbmax, i) verts_i = [tf.gather(verts[0], i), tf.gather(verts[1], i), tf.gather(verts[2], i)] x, y = tf.meshgrid(tf.range(bbmin_i[0], bbmax_i[0]), tf.range(bbmin_i[1], bbmax_i[1])) num_frags = tf.reduce_prod(tf.shape(x)) p = tf.stack([tf.reshape(x, [-1]), tf.reshape(y, [-1]), tf.zeros([num_frags], dtype=tf.float32)], axis=1) bc, valid = barycentric(verts_i, p) p = tf.boolean_mask(p, valid) bc = [tf.boolean_mask(bc[k], valid) for k in range(3)] z = utils.tri_dot([verts_i[k][2] for k in range(3)], bc) inds = tf.to_int32(tf.stack([p[:, 1], p[:, 0]], axis=1)) cur_z = tf.gather_nd(self.depth, inds) visible = tf.less_equal(cur_z, z) inds = tf.boolean_mask(inds, visible) bc = [tf.boolean_mask(bc[k], visible) for k in range(3)] z = tf.boolean_mask(z, visible) c = utils.pack_colors(shader.fragment(bc, i), 1) updates = [ tf.scatter_nd_update(self.color, inds, c, use_locking=False), tf.scatter_nd_update(self.depth, inds, z, use_locking=False)] return updates num_faces = tf.shape(indices)[0] updates = utils.sequential_for(_fn, 0, num_faces) self.commands.append(updates) def _draw(indices_val, **kwargs): self.args[indices] = indices_val for k, v in kwargs.items(): self.args[getattr(shader, k)] = v return _draw