我们从Python开源项目中,提取了以下50个代码示例,用于说明如何使用numpy.r_()。
def primes_2_to_n(n): """ Efficient algorithm to find and list primes from 2 to `n'. Args: n (int): highest number from which to search for primes Returns: np array of all primes from 2 to n References: Robert William Hanks, https://stackoverflow.com/questions/2068372/fastest-way-to-list-all-primes-below-n/ """ sieve = np.ones(int(n / 3 + (n % 6 == 2)), dtype=np.bool) for i in range(1, int((n ** 0.5) / 3 + 1)): if sieve[i]: k = 3 * i + 1 | 1 sieve[int(k * k / 3)::2 * k] = False sieve[int(k * (k - 2 * (i & 1) + 4) / 3)::2 * k] = False return np.r_[2, 3, ((3 * np.nonzero(sieve)[0][1:] + 1) | 1)]
def test(args): corpus = load_corpus(args.input) vocab, docs = corpus['vocab'], corpus['docs'] n_vocab = len(vocab) doc_keys = docs.keys() X_docs = [] for k in doc_keys: X_docs.append(vecnorm(doc2vec(docs[k], n_vocab), 'logmax1', 0)) del docs[k] X_docs = np.r_[X_docs] vae = load_vae_model(args.load_model) doc_codes = vae.predict(X_docs) dump_json(dict(zip(doc_keys, doc_codes.tolist())), args.output) print 'Saved doc codes file to %s' % args.output # if args.save_topics: # topics = get_topics(vae, revdict(vocab), topn=10) # write_file(topics, args.save_topics) # print 'Saved topics file to %s' % args.save_topics
def radial_filter_fullspec(max_order, NFFT, fs, array_configuration, amp_maxdB=40): """Generate NFFT/2 + 1 modal radial filter of orders 0:max_order for frequencies 0:fs/2, wraps radial_filter() Parameters ---------- max_order : int Maximum order NFFT : int Order of FFT (number of bins), should be a power of 2. fs : int Sampling frequency array_configuration : ArrayConfiguration List/Tuple/ArrayConfiguration, see io.ArrayConfiguration amp_maxdB : int, optional Maximum modal amplification limit in dB [Default: 40] Returns ------- dn : array_like Vector of modal frequency domain filter of shape [max_order + 1 x NFFT / 2 + 1] """ freqs = _np.linspace(0, fs / 2, NFFT / 2 + 1) orders = _np.r_[0:max_order + 1] return radial_filter(orders, freqs, array_configuration, amp_maxdB=amp_maxdB)
def test_comp_controllable(self, pp_fixture): """ """ pcs, solver = pp_fixture I0 = np.r_[0.1, 0.2] IN = np.r_[0.1, 0.2] solver.set_start_interval(I0) solver.set_goal_interval(IN) # Basic checks res_ctrl = solver.solve_controllable_sets() assert res_ctrl is True ctrl_sets = solver.K for i in range(solver.N+1): assert ctrl_sets[i, 1] >= ctrl_sets[i, 0] assert ctrl_sets[i, 0] >= 0 assert ctrl_sets[solver.N, 0] >= IN[0] - TINY assert ctrl_sets[solver.N, 1] <= IN[1] + TINY
def test_comp_topp(self, pp_fixture): pcs, solver = pp_fixture I0 = np.r_[0.1, 0.2] IN = np.r_[0.1, 0.2] solver.set_start_interval(I0) solver.set_goal_interval(IN) us, xs = solver.solve_topp(reg=0) # Proper parameteriation assert xs[0] <= I0[1] + TINY and xs[0] >= I0[0] - TINY assert xs[-1] <= IN[1] + TINY and xs[-1] >= IN[0] - TINY assert np.all(xs >= 0) for i in range(solver.N): Delta_i = solver.ss[i+1] - solver.ss[i] assert np.allclose(xs[i+1], xs[i] + 2 * us[i] * Delta_i) # Constraint satisfy-ability for i in range(solver.N): for c in pcs: if c.nm != 0: assert np.all( c.a[i] * us[i] + c.b[i] * xs[i] + c.c[i] <= TINY)
def set_start_interval(self, I0): """Set starting *squared* velocities interval. Parameters ---------- I0: array, or float (2, 0) array, the interval of starting squared path velocities. Can also be a float. Raises ------ AssertionError If `I0` is a single, negative float. Or if `I0[0] > I0[1]`. """ I0 = np.r_[I0].astype(float) if I0.shape[0] == 1: I0 = np.array([I0[0], I0[0]]) elif I0.shape[0] > 2: raise ValueError('Input I0 has wrong dimension: {}'.format(I0.shape)) assert I0[1] >= I0[0], "Illegal input: non-increase end-points." assert I0[0] >= 0, "Illegal input: negative lower end-point." self.I0 = I0
def set_goal_interval(self, IN): """Set the goal squared velocity interval. Parameters ---------- IN: array or float A single float, or a (2, ) array setting the goal `(x_lower, x_higher)` squared path velocities. Raises ------ AssertionError If `IN` is a single, negative float. Or if `IN[0] > IN[1]`. """ IN = np.r_[IN].astype(float) if IN.shape[0] == 1: IN = np.array([IN[0], IN[0]]) elif IN.shape[0] > 2: raise ValueError('Input IN has wrong dimension: {}'.format(IN.shape)) assert IN[1] >= IN[0], "Illegal input: non-increase end-points." assert IN[0] >= 0, "Illegal input: negative lower end-point." self.IN = IN
def getSensitivity(survey, A, B, M, N, model): if(survey == "Dipole-Dipole"): rx = DC.Rx.Dipole(np.r_[M, 0.], np.r_[N, 0.]) src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.]) elif(survey == "Pole-Dipole"): rx = DC.Rx.Dipole(np.r_[M, 0.], np.r_[N, 0.]) src = DC.Src.Pole([rx], np.r_[A, 0.]) elif(survey == "Dipole-Pole"): rx = DC.Rx.Pole(np.r_[M, 0.]) src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.]) elif(survey == "Pole-Pole"): rx = DC.Rx.Pole(np.r_[M, 0.]) src = DC.Src.Pole([rx], np.r_[A, 0.]) survey = DC.Survey([src]) problem = DC.Problem3D_CC(mesh, sigmaMap=mapping) problem.Solver = SolverLU problem.pair(survey) fieldObj = problem.fields(model) J = problem.Jtvec(model, np.array([1.]), f=fieldObj) return J
def getSensitivity(survey,A,B,M,N,model): if(survey == "Dipole-Dipole"): rx = DC.Rx.Dipole_ky(np.r_[M,0.], np.r_[N,0.]) src = DC.Src.Dipole([rx], np.r_[A,0.], np.r_[B,0.]) elif(survey == "Pole-Dipole"): rx = DC.Rx.Dipole_ky(np.r_[M,0.], np.r_[N,0.]) src = DC.Src.Pole([rx], np.r_[A,0.]) elif(survey == "Dipole-Pole"): rx = DC.Rx.Pole_ky(np.r_[M,0.]) src = DC.Src.Dipole([rx], np.r_[A,0.], np.r_[B,0.]) elif(survey == "Pole-Pole"): rx = DC.Rx.Pole_ky(np.r_[M,0.]) src = DC.Src.Pole([rx], np.r_[A,0.]) survey = DC.Survey_ky([src]) problem = DC.Problem2D_CC(mesh, sigmaMap = mapping) problem.Solver = SolverLU problem.pair(survey) fieldObj = problem.fields(model) J = problem.Jtvec(model, np.array([1.]), f=fieldObj) return J
def computeRotMatrix(self,Phi=False): ####################################### # COMPUTE ROTATION MATRIX SUCH THAT m(t) = A*L(t)*A'*Hp # Default set such that phi1,phi2 = 0 is UXO pointed towards North if Phi is False: phi1 = np.radians(self.phi[0]) phi2 = np.radians(self.phi[1]) phi3 = np.radians(self.phi[2]) else: phi1 = np.radians(Phi[0]) # Roll (CCW) phi2 = np.radians(Phi[1]) # Inclination (+ve is nose pointing down) phi3 = np.radians(Phi[2]) # Declination (degrees CW from North) # A1 = np.r_[np.c_[np.cos(phi1),-np.sin(phi1),0.],np.c_[np.sin(phi1),np.cos(phi1),0.],np.c_[0.,0.,1.]] # CCW Rotation about z-axis # A2 = np.r_[np.c_[1.,0.,0.],np.c_[0.,np.cos(phi2),np.sin(phi2)],np.c_[0.,-np.sin(phi2),np.cos(phi2)]] # CW Rotation about x-axis (rotates towards North) # A3 = np.r_[np.c_[np.cos(phi3),-np.sin(phi3),0.],np.c_[np.sin(phi3),np.cos(phi3),0.],np.c_[0.,0.,1.]] # CCW Rotation about z-axis (direction of head of object) A1 = np.r_[np.c_[np.cos(phi1),np.sin(phi1),0.],np.c_[-np.sin(phi1),np.cos(phi1),0.],np.c_[0.,0.,1.]] # CW Rotation about z-axis A2 = np.r_[np.c_[1.,0.,0.],np.c_[0.,np.cos(phi2),np.sin(phi2)],np.c_[0.,-np.sin(phi2),np.cos(phi2)]] # CW Rotation about x-axis (rotates towards North) A3 = np.r_[np.c_[np.cos(phi3),np.sin(phi3),0.],np.c_[-np.sin(phi3),np.cos(phi3),0.],np.c_[0.,0.,1.]] # CW Rotation about z-axis (direction of head of object) return np.dot(A3,np.dot(A2,A1))
def defineSensorLoc(self,XYZ): ############################################# # DEFINE TRANSMITTER AND RECEIVER LOCATIONS # # XYZ: N X 3 array containing transmitter center locations # **NOTE** for this sensor, we know where the receivers are relative to transmitters self.TxLoc = XYZ dx = np.kron([-0.18,0.,0.,0.,0.18],np.ones(3)) dy = np.kron([0.,-0.18,0.,0.18,0.],np.ones(3)) N = np.shape(XYZ)[0] X = np.kron(XYZ[:,0],np.ones((15))) + np.kron(np.ones((N)),dx) Y = np.kron(XYZ[:,1],np.ones((15))) + np.kron(np.ones((N)),dy) Z = np.kron(XYZ[:,2],np.ones((15))) self.RxLoc = np.c_[X,Y,Z] self.TxID = np.kron(np.arange(1,np.shape(XYZ)[0]+1),np.ones((15))) self.RxComp = np.kron(np.kron(np.ones(np.shape(XYZ)[0]),np.ones((5))),np.r_[1,2,3])
def getSensitivity(survey, A, B, M, N, model): if(survey == "Dipole-Dipole"): rx = DC.Rx.Dipole_ky(np.r_[M, 0.], np.r_[N, 0.]) src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.]) elif(survey == "Pole-Dipole"): rx = DC.Rx.Dipole_ky(np.r_[M, 0.], np.r_[N, 0.]) src = DC.Src.Pole([rx], np.r_[A, 0.]) elif(survey == "Dipole-Pole"): rx = DC.Rx.Pole_ky(np.r_[M, 0.]) src = DC.Src.Dipole([rx], np.r_[A, 0.], np.r_[B, 0.]) elif(survey == "Pole-Pole"): rx = DC.Rx.Pole_ky(np.r_[M, 0.]) src = DC.Src.Pole([rx], np.r_[A, 0.]) survey = DC.Survey_ky([src]) problem = DC.Problem2D_CC(mesh, sigmaMap=mapping) problem.Solver = SolverLU problem.pair(survey) fieldObj = problem.fields(model) J = problem.Jtvec(model, np.array([1.]), f=fieldObj) return J
def InteractiveDipole(self): def foo(orientation, normal, component, view, functype, flog, siglog, x1, y1, x2, y2, npts2D, npts, loc): f = np.r_[10**flog] sig = np.r_[10**siglog] return self.Dipole2Dviz(x1, y1, x2, y2, npts2D, npts, sig, f, srcLoc=np.r_[0., 0., 0.], orientation=orientation, component=component, view=view, normal=normal, functype=functype, loc=loc, dx=50.) out = widgetify(foo ,orientation=widgets.ToggleButtons(options=['x','y','z']) \ ,normal=widgets.ToggleButtons(options=['X','Y','Z'], value="Z") \ ,component=widgets.ToggleButtons(options=['real','imag','amplitude', 'phase']) \ ,view=widgets.ToggleButtons(options=['x','y','z', 'vec']) \ ,functype=widgets.ToggleButtons(options=["E_from_ED", "H_from_ED", "E_from_ED_galvanic", "E_from_ED_inductive"]) \ ,flog=widgets.FloatSlider(min=-3, max=6, step=0.5, value=-3, continuous_update=False) \ ,siglog=widgets.FloatSlider(min=-3, max=3, step=0.5, value=-3, continuous_update=False) \ ,loc=widgets.FloatText(value=0.01) \ ,x1=widgets.FloatText(value=-10) \ ,y1=widgets.FloatText(value=0.01) \ ,x2=widgets.FloatText(value=10) \ ,y2=widgets.FloatText(value=0.01) \ ,npts2D=widgets.IntSlider(min=4,max=200,step=2,value=40) \ ,npts=widgets.IntSlider(min=4,max=200,step=2,value=40) ) return out
def getSensitivity(survey, A, B, M, N, model): if(survey == "Dipole-Dipole"): rx = DC.Rx.Dipole(np.r_[M,0.], np.r_[N,0.]) src = DC.Src.Dipole([rx], np.r_[A,0.], np.r_[B,0.]) elif(survey == "Pole-Dipole"): rx = DC.Rx.Dipole(np.r_[M,0.], np.r_[N,0.]) src = DC.Src.Pole([rx], np.r_[A,0.]) elif(survey == "Dipole-Pole"): rx = DC.Rx.Pole(np.r_[M,0.]) src = DC.Src.Dipole([rx], np.r_[A,0.], np.r_[B,0.]) elif(survey == "Pole-Pole"): rx = DC.Rx.Pole(np.r_[M,0.]) src = DC.Src.Pole([rx], np.r_[A,0.]) survey = DC.Survey([src]) problem = DC.Problem3D_CC(mesh, sigmaMap = sigmaMap) problem.Solver = SolverLU problem.pair(survey) fieldObj = problem.fields(model) J = problem.Jtvec(model, np.array([1.]), f=fieldObj) return J
def getSensitivity(survey,A,B,M,N,model): if(survey == "Dipole-Dipole"): rx = DC.Rx.Dipole(np.r_[M,0.], np.r_[N,0.]) src = DC.Src.Dipole([rx], np.r_[A,0.], np.r_[B,0.]) elif(survey == "Pole-Dipole"): rx = DC.Rx.Dipole(np.r_[M,0.], np.r_[N,0.]) src = DC.Src.Pole([rx], np.r_[A,0.]) elif(survey == "Dipole-Pole"): rx = DC.Rx.Pole(np.r_[M,0.]) src = DC.Src.Dipole([rx], np.r_[A,0.], np.r_[B,0.]) elif(survey == "Pole-Pole"): rx = DC.Rx.Pole(np.r_[M,0.]) src = DC.Src.Pole([rx], np.r_[A,0.]) Srv = DC.Survey([src]) problem = DC.Problem3D_CC(mesh, sigmaMap = sigmaMap) problem.Solver = SolverLU problem.pair(Srv) fieldObj = problem.fields(model) J = problem.Jtvec(model, np.array([1.]), f=fieldObj) return J
def run(plotIt=True): sz = [16, 16] tM = discretize.TensorMesh(sz) qM = discretize.TreeMesh(sz) def refine(cell): if np.sqrt(((np.r_[cell.center]-0.5)**2).sum()) < 0.4: return 4 return 3 qM.refine(refine) rM = discretize.CurvilinearMesh(discretize.utils.exampleLrmGrid(sz, 'rotate')) if not plotIt: return fig, axes = plt.subplots(1, 3, figsize=(14, 5)) opts = {} tM.plotGrid(ax=axes[0], **opts) axes[0].set_title('TensorMesh') qM.plotGrid(ax=axes[1], **opts) axes[1].set_title('TreeMesh') rM.plotGrid(ax=axes[2], **opts) axes[2].set_title('CurvilinearMesh')
def _getEdgeP(self, xEdge, yEdge, zEdge): if self.dim == 2: raise Exception('Not implemented') # this should be a reordering of the face inner product? ind1, ind2, ind3 = [], [], [] for ind in self._sortedCells: p = self._pointer(ind) w = self._levelWidth(p[-1]) posX = [0, 0] if xEdge == 'eX0' else [w, 0] if xEdge == 'eX1' else [0, w] if xEdge == 'eX2' else [w, w] posY = [0, 0] if yEdge == 'eY0' else [w, 0] if yEdge == 'eY1' else [0, w] if yEdge == 'eY2' else [w, w] posZ = [0, 0] if zEdge == 'eZ0' else [w, 0] if zEdge == 'eZ1' else [0, w] if zEdge == 'eZ2' else [w, w] ind1.append( self._ex2i[self._index([ p[0] , p[1] + posX[0], p[2] + posX[1], p[3]])] ) ind2.append( self._ey2i[self._index([ p[0] + posY[0], p[1] , p[2] + posY[1], p[3]])] + self.ntEx ) ind3.append( self._ez2i[self._index([ p[0] + posZ[0], p[1] + posZ[1], p[2] , p[3]])] + self.ntEx + self.ntEy ) IND = np.r_[ind1, ind2, ind3] PXXX = sp.coo_matrix((np.ones(self.dim*self.nC), (range(self.dim*self.nC), IND)), shape=(self.dim*self.nC, self.ntE)).tocsr() Re = self._deflationMatrix('E') return PXXX * Re
def rotatePointsFromNormals(XYZ, n0, n1, x0=np.r_[0., 0., 0.]): """ rotates a grid so that the vector n0 is aligned with the vector n1 :param numpy.array n0: vector of length 3, should have norm 1 :param numpy.array n1: vector of length 3, should have norm 1 :param numpy.array x0: vector of length 3, point about which we perform the rotation :rtype: numpy.array, 3x3 :return: rotation matrix which rotates the frame so that n0 is aligned with n1 """ R = rotationMatrixFromNormals(n0, n1) assert XYZ.shape[1] == 3, "Grid XYZ should be 3 wide" assert len(x0) == 3, "x0 should have length 3" X0 = np.ones([XYZ.shape[0], 1])*mkvc(x0) return (XYZ - X0).dot(R.T) + X0 # equivalent to (R*(XYZ - X0)).T + X0
def normals(self): """Face Normals :rtype: numpy.array :return: normals, (sum(nF), dim) """ if self.dim == 2: nX = np.c_[ np.ones(self.nFx), np.zeros(self.nFx) ] nY = np.c_[ np.zeros(self.nFy), np.ones(self.nFy) ] return np.r_[nX, nY] elif self.dim == 3: nX = np.c_[ np.ones(self.nFx), np.zeros(self.nFx), np.zeros(self.nFx) ] nY = np.c_[ np.zeros(self.nFy), np.ones(self.nFy), np.zeros(self.nFy) ] nZ = np.c_[ np.zeros(self.nFz), np.zeros(self.nFz), np.ones(self.nFz) ] return np.r_[nX, nY, nZ]
def tangents(self): """Edge Tangents :rtype: numpy.array :return: normals, (sum(nE), dim) """ if self.dim == 2: tX = np.c_[ np.ones(self.nEx), np.zeros(self.nEx) ] tY = np.c_[ np.zeros(self.nEy), np.ones(self.nEy) ] return np.r_[tX, tY] elif self.dim == 3: tX = np.c_[ np.ones(self.nEx), np.zeros(self.nEx), np.zeros(self.nEx) ] tY = np.c_[ np.zeros(self.nEy), np.ones(self.nEy), np.zeros(self.nEy) ] tZ = np.c_[ np.zeros(self.nEz), np.zeros(self.nEz), np.ones(self.nEz) ] return np.r_[tX, tY, tZ]
def test_counts(self): nc = 8 h1 = np.random.rand(nc)*nc*0.5 + nc*0.5 h2 = np.random.rand(nc)*nc*0.5 + nc*0.5 h = [hi/np.sum(hi) for hi in [h1, h2]] # normalize M = discretize.TreeMesh(h) M._refineCell([0, 0, 0]) M._refineCell([0, 0, 1]) M.number() # M.plotGrid(showIt=True) print(M) assert M.nhFx == 2 assert M.nFx == 9 assert np.allclose(M.vol.sum(), 1.0) assert np.allclose(np.r_[M._areaFxFull, M._areaFyFull], M._deflationMatrix('F') * M.area)
def test_faceDiv(self): hx, hy = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8] T = discretize.TreeMesh([hx, hy], levels=2) T.refine(lambda xc: 2) # T.plotGrid(showIt=True) M = discretize.TensorMesh([hx, hy]) assert M.nC == T.nC assert M.nF == T.nF assert M.nFx == T.nFx assert M.nFy == T.nFy assert M.nE == T.nE assert M.nEx == T.nEx assert M.nEy == T.nEy assert np.allclose(M.area, T.permuteF*T.area) assert np.allclose(M.edge, T.permuteE*T.edge) assert np.allclose(M.vol, T.permuteCC*T.vol) # plt.subplot(211).spy(M.faceDiv) # plt.subplot(212).spy(T.permuteCC*T.faceDiv*T.permuteF.T) # plt.show() assert (M.faceDiv - T.permuteCC*T.faceDiv*T.permuteF.T).nnz == 0
def test_faceDiv(self): hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12] M = discretize.TreeMesh([hx, hy, hz], levels=2) M.refine(lambda xc: 2) # M.plotGrid(showIt=True) Mr = discretize.TensorMesh([hx, hy, hz]) assert M.nC == Mr.nC assert M.nF == Mr.nF assert M.nFx == Mr.nFx assert M.nFy == Mr.nFy assert M.nE == Mr.nE assert M.nEx == Mr.nEx assert M.nEy == Mr.nEy assert np.allclose(Mr.area, M.permuteF*M.area) assert np.allclose(Mr.edge, M.permuteE*M.edge) assert np.allclose(Mr.vol, M.permuteCC*M.vol) # plt.subplot(211).spy(Mr.faceDiv) # plt.subplot(212).spy(M.permuteCC*M.faceDiv*M.permuteF.T) # plt.show() assert (Mr.faceDiv - M.permuteCC*M.faceDiv*M.permuteF.T).nnz == 0
def test_VectorIdenties(self): hx, hy, hz = [[(1, 4)], [(1, 4)], [(1, 4)]] M = discretize.TreeMesh([hx, hy, hz], levels=2) Mr = discretize.TensorMesh([hx, hy, hz]) assert (M.faceDiv * M.edgeCurl).nnz == 0 assert (Mr.faceDiv * Mr.edgeCurl).nnz == 0 hx, hy, hz = np.r_[1., 2, 3, 4], np.r_[5., 6, 7, 8], np.r_[9., 10, 11, 12] M = discretize.TreeMesh([hx, hy, hz], levels=2) Mr = discretize.TensorMesh([hx, hy, hz]) assert np.max(np.abs((M.faceDiv * M.edgeCurl).todense().flatten())) < TOL assert np.max(np.abs((Mr.faceDiv * Mr.edgeCurl).todense().flatten())) < TOL
def test_orderN2CC(self): self.name = "Averaging 2D: N2CC" fun = lambda x, y: (np.cos(x)+np.sin(y)) self.getHere = lambda M: call2(fun, M.gridN) self.getThere = lambda M: call2(fun, M.gridCC) self.getAve = lambda M: M.aveN2CC self.orderTest() # def test_orderN2F(self): # self.name = "Averaging 2D: N2F" # fun = lambda x, y: (np.cos(x)+np.sin(y)) # self.getHere = lambda M: call2(fun, M.gridN) # self.getThere = lambda M: np.r_[call2(fun, M.gridFx), call2(fun, M.gridFy)] # self.getAve = lambda M: M.aveN2F # self.orderTest() # def test_orderN2E(self): # self.name = "Averaging 2D: N2E" # fun = lambda x, y: (np.cos(x)+np.sin(y)) # self.getHere = lambda M: call2(fun, M.gridN) # self.getThere = lambda M: np.r_[call2(fun, M.gridEx), call2(fun, M.gridEy)] # self.getAve = lambda M: M.aveN2E # self.orderTest()
def test_orderF2CCV(self): self.name = "Averaging 2D: F2CCV" funX = lambda x, y: (np.cos(x)+np.sin(y)) funY = lambda x, y: (np.cos(y)*np.sin(x)) self.getHere = lambda M: np.r_[call2(funX, M.gridFx), call2(funY, M.gridFy)] self.getThere = lambda M: np.r_[call2(funX, M.gridCC), call2(funY, M.gridCC)] self.getAve = lambda M: M.aveF2CCV self.orderTest() # def test_orderCC2F(self): # self.name = "Averaging 2D: CC2F" # fun = lambda x, y: (np.cos(x)+np.sin(y)) # self.getHere = lambda M: call2(fun, M.gridCC) # self.getThere = lambda M: np.r_[call2(fun, M.gridFx), call2(fun, M.gridFy)] # self.getAve = lambda M: M.aveCC2F # self.expectedOrders = 1 # self.orderTest() # self.expectedOrders = 2
def test_orderN2CC(self): self.name = "Averaging 3D: N2CC" fun = lambda x, y, z: (np.cos(x)+np.sin(y)+np.exp(z)) self.getHere = lambda M: call3(fun, M.gridN) self.getThere = lambda M: call3(fun, M.gridCC) self.getAve = lambda M: M.aveN2CC self.orderTest() # def test_orderN2F(self): # self.name = "Averaging 3D: N2F" # fun = lambda x, y, z: (np.cos(x)+np.sin(y)+np.exp(z)) # self.getHere = lambda M: call3(fun, M.gridN) # self.getThere = lambda M: np.r_[call3(fun, M.gridFx), call3(fun, M.gridFy), call3(fun, M.gridFz)] # self.getAve = lambda M: M.aveN2F # self.orderTest() # def test_orderN2E(self): # self.name = "Averaging 3D: N2E" # fun = lambda x, y, z: (np.cos(x)+np.sin(y)+np.exp(z)) # self.getHere = lambda M: call3(fun, M.gridN) # self.getThere = lambda M: np.r_[call3(fun, M.gridEx), call3(fun, M.gridEy), call3(fun, M.gridEz)] # self.getAve = lambda M: M.aveN2E # self.orderTest()
def test_asArray_N_x_Dim(self): true = np.array([[1, 2, 3]]) listArray = asArray_N_x_Dim([1, 2, 3], 3) self.assertTrue(np.all(true == listArray)) self.assertTrue(true.shape == listArray.shape) listArray = asArray_N_x_Dim(np.r_[1, 2, 3], 3) self.assertTrue(np.all(true == listArray)) self.assertTrue(true.shape == listArray.shape) listArray = asArray_N_x_Dim(np.array([[1, 2, 3.]]), 3) self.assertTrue(np.all(true == listArray)) self.assertTrue(true.shape == listArray.shape) true = np.array([[1, 2], [4, 5]]) listArray = asArray_N_x_Dim([[1, 2], [4, 5]], 2) self.assertTrue(np.all(true == listArray)) self.assertTrue(true.shape == listArray.shape)
def test_mat_one(self): o = Identity() S = sdiag(np.r_[2, 3]) def check(exp, ans): assert np.all((exp).todense() == ans) check(S * o, [[2, 0], [0, 3]]) check(o * S, [[2, 0], [0, 3]]) check(S * -o, [[-2, 0], [0, -3]]) check(-o * S, [[-2, 0], [0, -3]]) check(S/o, [[2, 0], [0, 3]]) check(S/-o, [[-2, 0], [0, -3]]) self.assertRaises(NotImplementedError, lambda: o/S) check(S + o, [[3, 0], [0, 4]]) check(o + S, [[3, 0], [0, 4]]) check(S - o, [[1, 0], [0, 2]]) check(S + - o, [[1, 0], [0, 2]]) check(- o + S, [[1, 0], [0, 2]])
def test_numpy_one(self): o = Identity() n = np.r_[2., 3] assert np.all(n+1 == n+o) assert np.all(1+n == o+n) assert np.all(n-1 == n-o) assert np.all(1-n == o-n) assert np.all(n/1 == n/o) assert np.all(n/-1 == n/-o) assert np.all(1/n == o/n) assert np.all(-1/n == -o/n) assert np.all(n*1 == n*o) assert np.all(n*-1 == n*-o) assert np.all(1*n == o*n) assert np.all(-1*n == -o*n)
def test_float64_pass(self): # The number of units of least precision # In this case, use a few places above the lowest level (ie nulp=1) nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float64) x = 10**x x = np.r_[-x, x] # Addition eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(x, y, nulp) # Subtraction epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(x, y, nulp)
def test_complex64_pass(self): nulp = 5 x = np.linspace(-20, 20, 50, dtype=np.float32) x = 10**x x = np.r_[-x, x] xi = x + x*1j eps = np.finfo(x.dtype).eps y = x + x*eps*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x + x*eps*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp) epsneg = np.finfo(x.dtype).epsneg y = x - x*epsneg*nulp/2. assert_array_almost_equal_nulp(xi, x + y*1j, nulp) assert_array_almost_equal_nulp(xi, y + x*1j, nulp) y = x - x*epsneg*nulp/4. assert_array_almost_equal_nulp(xi, y + y*1j, nulp)
def test_2d(self): # Tests mr_ on 2D arrays. a_1 = np.random.rand(5, 5) a_2 = np.random.rand(5, 5) m_1 = np.round_(np.random.rand(5, 5), 0) m_2 = np.round_(np.random.rand(5, 5), 0) b_1 = masked_array(a_1, mask=m_1) b_2 = masked_array(a_2, mask=m_2) # append columns d = mr_['1', b_1, b_2] self.assertTrue(d.shape == (5, 10)) assert_array_equal(d[:, :5], b_1) assert_array_equal(d[:, 5:], b_2) assert_array_equal(d.mask, np.r_['1', m_1, m_2]) d = mr_[b_1, b_2] self.assertTrue(d.shape == (10, 5)) assert_array_equal(d[:5,:], b_1) assert_array_equal(d[5:,:], b_2) assert_array_equal(d.mask, np.r_[m_1, m_2])
def from_points(cls, p, q, r): abc = (p-r).cross(q-r) hp = np.r_[abc, -abc.dot(r)] return cls(hp)
def generateAllGaussianCenters(self, referenceSamples=None) : """ Generates kernels in the region where the P(X_reference) takes large values """ self.kernelBasis = referenceSamples.shape[1] return referenceSamples[:, numpy.r_[0:self.kernelBasis]]
def generateFirstNGaussianCenters(self, referenceSamples=None) : """ Chooses the firts N samples as Gaussian centers as an optimization """ numcols = referenceSamples.shape[1] self.kernelBasis = min(self.kernelBasis, numcols) return referenceSamples[:, numpy.r_[0:self.kernelBasis]]
def predict(self, dataframe): def is_invalid(row): valid_cpf = cpf.validate(str(row['recipient_id']).zfill(11)) valid_cnpj = cnpj.validate(str(row['recipient_id']).zfill(14)) good_doctype = row['document_type'] in ('bill_of_sale', 'simple_receipt', 'unknown') return good_doctype and (not (valid_cpf or valid_cnpj)) return np.r_[dataframe.apply(is_invalid, axis=1)]
def predict(self, X=None): self._X['is_over_monthly_subquota_limit'] = False for metadata in self.limits: data, monthly_limit = metadata['data'], metadata['monthly_limit'] if len(data): surplus_reimbursements = self.__find_surplus_reimbursements(data, monthly_limit) self._X.loc[surplus_reimbursements.index, 'is_over_monthly_subquota_limit'] = True results = self._X.loc[self.X.index, 'is_over_monthly_subquota_limit'] return np.r_[results]
def predict(self, X): statuses = ['BAIXADA', 'NULA', 'SUSPENSA', 'INAPTA'] self._X = X.apply(self.__compare_date, axis=1) return np.r_[self._X & X['situation'].isin(statuses)]
def disp(name=None, idx=None): """displays selected data from (files written by) the class `CMADataLogger`. The call ``cma.disp(name, idx)`` is a shortcut for ``cma.CMADataLogger(name).disp(idx)``. Arguments --------- `name` name of the logger, filename prefix, `None` evaluates to the default ``'outcmaes'`` `idx` indices corresponding to rows in the data file; by default the first five, then every 100-th, and the last 10 rows. Too large index values are removed. The best ever observed iteration is also printed by default. Examples -------- :: import cma from numpy import r_ # assume some data are available from previous runs cma.disp(None, r_[0, -1]) # first and last cma.disp(None, r_[0:int(1e9):100, -1]) # every 100-th and last cma.disp(idx=r_[0, -10:0]) # first and ten last cma.disp(idx=r_[0:int(1e9):1000, -10:0]) :See also: `CMADataLogger.disp` """ return CMADataLogger(name if name else CMADataLogger.default_prefix ).disp(idx) # END cmaplt.py
def vec(self): return np.r_[self.translation, self.quaternion]
def matrix(self): """:obj:`numpy.ndarray` of float: The canonical 4x4 matrix representation of this transform. The first three columns contain the columns of the rotation matrix followed by a zero, and the last column contains the translation vector followed by a one. """ return np.r_[np.c_[self._rotation, self._translation], [[0,0,0,1]]]
def test_inverse(self): R_a_b = RigidTransform.random_rotation() t_a_b = RigidTransform.random_translation() T_a_b = RigidTransform(R_a_b, t_a_b, 'a', 'b') T_b_a = T_a_b.inverse() # multiple with numpy arrays M_a_b = np.r_[np.c_[R_a_b, t_a_b], [[0,0,0,1]]] M_b_a = np.linalg.inv(M_a_b) self.assertTrue(np.sum(np.abs(T_b_a.matrix - M_b_a)) < 1e-5, msg='Inverse gave incorrect transformation') # check frames self.assertEqual(T_b_a.from_frame, 'b', msg='Inverse has incorrect input frame') self.assertEqual(T_b_a.to_frame, 'a', msg='Inverse has incorrect output frame')
def rank_bank_topic(bank_doc_map, doc_topic_dist): """Rank topics for banks """ bank_topic_ranks = {} for each_bank in bank_doc_map: rank = [] for each_doc in bank_doc_map[each_bank]: rank.append(calc_ranks(doc_topic_dist[each_doc])) rank = np.r_[rank] # compute ranking score bank_topic_ranks[each_bank] = np.mean(1. / rank, axis=0) return bank_topic_ranks
def train(args): corpus = load_corpus(args.input) n_vocab, docs = len(corpus['vocab']), corpus['docs'] corpus.clear() # save memory X_docs = [] for k in docs.keys(): X_docs.append(vecnorm(doc2vec(docs[k], n_vocab), 'logmax1', 0)) del docs[k] np.random.seed(0) np.random.shuffle(X_docs) # X_docs_noisy = corrupted_matrix(np.r_[X_docs], 0.1) n_val = args.n_val # X_train = np.r_[X_docs[:-n_val]] # X_val = np.r_[X_docs[-n_val:]] X_train = np.r_[X_docs[:-n_val]] del X_docs[:-n_val] X_val = np.r_[X_docs] del X_docs start = timeit.default_timer() vae = VarAutoEncoder(n_vocab, args.n_dim, comp_topk=args.comp_topk, ctype=args.ctype, save_model=args.save_model) vae.fit([X_train, X_train], [X_val, X_val], nb_epoch=args.n_epoch, batch_size=args.batch_size) print 'runtime: %ss' % (timeit.default_timer() - start)
def plot_tsne(doc_codes, doc_labels, classes_to_visual, save_file): # markers = ["D", "p", "*", "s", "d", "8", "^", "H", "v", ">", "<", "h", "|"] markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] plt.rc('legend',**{'fontsize':30}) classes_to_visual = list(set(classes_to_visual)) C = len(classes_to_visual) while True: if C <= len(markers): break markers += markers class_ids = dict(zip(classes_to_visual, range(C))) if isinstance(doc_codes, dict) and isinstance(doc_labels, dict): codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if doc_labels[doc] in classes_to_visual]) else: codes, labels = doc_codes, doc_labels X = np.r_[list(codes)] tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) np.set_printoptions(suppress=True) X = tsne.fit_transform(X) plt.figure(figsize=(10, 10), facecolor='white') for c in classes_to_visual: idx = np.array(labels) == c # idx = get_indices(labels, c) plt.plot(X[idx, 0], X[idx, 1], linestyle='None', alpha=1, marker=markers[class_ids[c]], markersize=10, label=c) legend = plt.legend(loc='upper right', shadow=True) # plt.title("tsne") # plt.savefig(save_file) plt.savefig(save_file, format='eps', dpi=2000) plt.show()
def plot_tsne_3d(doc_codes, doc_labels, classes_to_visual, save_file, maker_size=None, opaque=None): markers = ["D", "p", "*", "s", "d", "8", "^", "H", "v", ">", "<", "h", "|"] plt.rc('legend',**{'fontsize':20}) colors = ['r', 'b', 'g', 'c', 'm', 'y', 'k'] C = len(classes_to_visual) while True: if C <= len(markers): break markers += markers while True: if C <= len(colors): break colors += colors class_ids = dict(zip(classes_to_visual, range(C))) if isinstance(doc_codes, dict) and isinstance(doc_labels, dict): codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if doc_labels[doc] in classes_to_visual]) else: codes, labels = doc_codes, doc_labels X = np.r_[list(codes)] tsne = TSNE(perplexity=30, n_components=3, init='pca', n_iter=5000) np.set_printoptions(suppress=True) X = tsne.fit_transform(X) fig = plt.figure(figsize=(10, 10), facecolor='white') ax = fig.add_subplot(111, projection='3d') # The problem is that the legend function don't support the type returned by a 3D scatter. # So you have to create a "dummy plot" with the same characteristics and put those in the legend. scatter_proxy = [] for i in range(C): cls = classes_to_visual[i] idx = np.array(labels) == cls ax.scatter(X[idx, 0], X[idx, 1], X[idx, 2], c=colors[i], alpha=opaque[i] if opaque else 1, s=maker_size[i] if maker_size else 20, marker=markers[i], label=cls) scatter_proxy.append(mpl.lines.Line2D([0],[0], linestyle="none", c=colors[i], marker=markers[i], label=cls)) ax.legend(scatter_proxy, classes_to_visual, numpoints=1) plt.savefig(save_file) plt.show()
def DBN_visualize_pca_2d(doc_codes, doc_labels, classes_to_visual, save_file): """ Visualize the input data on a 2D PCA plot. Depending on the number of components, the plot will contain an X amount of subplots. @param doc_codes: @param number_of_components: The number of principal components for the PCA plot. """ # markers = ["p", "s", "h", "H", "+", "x", "D"] markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] C = len(classes_to_visual) while True: if C <= len(markers): break markers += markers class_ids = dict(zip(classes_to_visual.keys(), range(C))) codes, labels = doc_codes, doc_labels X = np.r_[list(codes)] X = PCA(n_components=3).fit_transform(X) plt.figure(figsize=(10, 10), facecolor='white') x_pc, y_pc = 1, 2 for c in classes_to_visual.keys(): idx = np.array(labels) == c # idx = get_indices(labels, c) plt.plot(X[idx, x_pc], X[idx, y_pc], linestyle='None', alpha=0.6, marker=markers[class_ids[c]], markersize=6, label=classes_to_visual[c]) # plt.legend(c) plt.title('Projected on the first 2 PCs') plt.xlabel('PC %s' % x_pc) plt.ylabel('PC %s' % y_pc) # legend = plt.legend(loc='upper center', shadow=True) plt.savefig(save_file) plt.show()
def reuters_visualize_tsne(doc_codes, doc_labels, classes_to_visual, save_file): """ Visualize the input data on a 2D PCA plot. Depending on the number of components, the plot will contain an X amount of subplots. @param doc_codes: @param number_of_components: The number of principal components for the PCA plot. """ # markers = ["p", "s", "h", "H", "+", "x", "D"] markers = ["o", "v", "8", "s", "p", "*", "h", "H", "+", "x", "D"] C = len(classes_to_visual) while True: if C <= len(markers): break markers += markers class_names = classes_to_visual.keys() class_ids = dict(zip(class_names, range(C))) class_names = set(class_names) codes, labels = zip(*[(code, doc_labels[doc]) for doc, code in doc_codes.items() if class_names.intersection(set(doc_labels[doc]))]) X = np.r_[list(codes)] tsne = TSNE(perplexity=30, n_components=2, init='pca', n_iter=5000) np.set_printoptions(suppress=True) X = tsne.fit_transform(X) plt.figure(figsize=(10, 10), facecolor='white') for c in classes_to_visual.keys(): idx = get_indices(labels, c) plt.plot(X[idx, 0], X[idx, 1], linestyle='None', alpha=0.6, marker=markers[class_ids[c]], markersize=6, label=classes_to_visual[c]) legend = plt.legend(loc='upper center', shadow=True) plt.title("tsne") plt.savefig(save_file) plt.show()