private static ListenableFuture<ResultSet> executeAdaptiveQueryAsync(Session session, Statement statement, int fetchSize, int remainingAdaptations) { statement.setFetchSize(fetchSize); ResultSetFuture rawFuture = session.executeAsync(statement); // Lazily wrap the result set from the async result with an AdaptiveResultSet ListenableFuture<ResultSet> adaptiveFuture = Futures.transform(rawFuture, new Function<ResultSet, ResultSet>() { @Override public ResultSet apply(ResultSet resultSet) { return new AdaptiveResultSet(session, resultSet, remainingAdaptations); } }); return Futures.withFallback(adaptiveFuture, t -> { if (isAdaptiveException(t) && remainingAdaptations > 0 && fetchSize > MIN_FETCH_SIZE) { // Try again with half the fetch size int reducedFetchSize = Math.max(fetchSize / 2, MIN_FETCH_SIZE); _log.debug("Repeating previous query with fetch size {} due to {}", reducedFetchSize, t.getMessage()); return executeAdaptiveQueryAsync(session, statement, reducedFetchSize, remainingAdaptations - 1); } throw Throwables.propagate(t); }); }
@Bean public Session createSession(CassandraProperties properties, Cluster cluster) throws Exception { Session session = Retriable.wrap(cluster::connect) .withErrorMessage("Cannot connect to cassandra cluster") .retryOn(NoHostAvailableException.class) .withDelaySec(properties.getConnectDelaySec()) .call(); initDb(properties, session); if (!session.getCluster().getMetadata().checkSchemaAgreement()) { log.warn("SCHEMA IS NOT IN AGREEMENT!!!"); } return session; }
@Test public void testDelayOnPreparedStatementWhenIgnoreOnPrepareIsFalse() throws Exception { Prime prime = PrimeDsl.when("select * from table where c1=?") .then(noRows()) .delay(2, TimeUnit.SECONDS) .applyToPrepare() .build(); HttpTestResponse response = server.prime(prime.getPrimedRequest()); assertNotNull(response); RequestPrime responseQuery = om.readValue(response.body, RequestPrime.class); assertThat(responseQuery).isEqualTo(prime.getPrimedRequest()); String contactPoint = HttpTestUtil.getContactPointString(server.getCluster(), 0); try (com.datastax.driver.core.Cluster cluster = defaultBuilder().addContactPoint(contactPoint).build()) { Session session = cluster.connect(); long start = System.currentTimeMillis(); session.prepare("select * from table where c1=?"); long duration = System.currentTimeMillis() - start; // should have taken longer than 2 seconds. assertThat(duration).isGreaterThan(2000); } }
private void primeAndExecuteQueries(String[] primed, String[] queries) throws Exception { SuccessResult result = getSampleSuccessResult(); for (String primeQuery : primed) { server.prime(when(primeQuery).then(result)); } try (com.datastax.driver.core.Cluster driverCluster = defaultBuilder(server.getCluster()) .withRetryPolicy(FallthroughRetryPolicy.INSTANCE) .build()) { Session session = driverCluster.connect(); server.getCluster().clearLogs(); for (String executeQuery : queries) { SimpleStatement stmt = new SimpleStatement(executeQuery); stmt.setDefaultTimestamp(100); session.execute(stmt); } } }
public static Session getClientSession(String hostAddr) { if(REGISTRY.containsKey(hostAddr)) { return REGISTRY.get(hostAddr); } else { Cluster.Builder clientClusterBuilder = new Cluster.Builder() .addContactPoint(hostAddr) .withQueryOptions(new QueryOptions() .setConsistencyLevel(ConsistencyLevel.ONE) .setSerialConsistencyLevel(ConsistencyLevel.LOCAL_SERIAL)) .withoutJMXReporting() .withoutMetrics() .withReconnectionPolicy(new ConstantReconnectionPolicy(RECONNECT_DELAY_IN_MS)); long startTimeInMillis = System.currentTimeMillis(); Cluster clientCluster = clientClusterBuilder.build(); Session clientSession = clientCluster.connect(); LOG.info("Client session established after {} ms.", System.currentTimeMillis() - startTimeInMillis); REGISTRY.putIfAbsent(hostAddr, clientSession); return clientSession; } }
public static Session createSession(String ip, int port) { Cluster cluster; cluster = Cluster.builder() .addContactPoint(ip) .withPort(port) .build(); Session session = cluster.connect(); session.execute("CREATE KEYSPACE IF NOT EXISTS cassandrait WITH REPLICATION = { 'class' : 'SimpleStrategy', 'replication_factor' : 1 }"); session.execute("DROP TABLE IF EXISTS cassandrait.counter"); session.execute("CREATE TABLE cassandrait.counter (key text, value counter, PRIMARY key(key));"); return session; }
@Override public Status selectByDevice(TsPoint point, Date startTime, Date endTime) { long costTime = 0L; Cluster cluster = null; try { // cluster = Cluster.builder().addContactPoint(CASSADRA_URL).build(); // Session session = cluster.connect(KEY_SPACE_NAME); Session session = SessionManager.getSession(); String selectCql = "SELECT * FROM point WHERE device_code='" + point.getDeviceCode() + "' and timestamp>=" + startTime.getTime() + " and timestamp<=" + endTime.getTime() + " ALLOW FILTERING"; // System.out.println(selectCql); long startTime1 = System.nanoTime(); ResultSet rs = session.execute(selectCql); long endTime1 = System.nanoTime(); costTime = endTime1 - startTime1; } finally { if (cluster != null) cluster.close(); } // System.out.println("此次查询消耗时间[" + costTime / 1000 + "]s"); return Status.OK(costTime); }
@Override public Status selectByDeviceAndSensor(TsPoint point, Date startTime, Date endTime) { long costTime = 0L; Cluster cluster = null; try { // cluster = Cluster.builder().addContactPoint(CASSADRA_URL).build(); // Session session = cluster.connect(KEY_SPACE_NAME); Session session = SessionManager.getSession(); String selectCql = "SELECT * FROM point WHERE device_code='" + point.getDeviceCode() + "' and sensor_code='" + point.getSensorCode() + "' and timestamp>=" + startTime.getTime() + " and timestamp<=" + endTime.getTime() + " ALLOW FILTERING"; // System.out.println(selectCql); long startTime1 = System.nanoTime(); ResultSet rs = session.execute(selectCql); long endTime1 = System.nanoTime(); costTime = endTime1 - startTime1; } finally { if (cluster != null) cluster.close(); } // System.out.println("此次查询消耗时间[" + costTime / 1000 + "]s"); return Status.OK(costTime); }
@Override public Status selectByDeviceAndSensor(TsPoint point, Double max, Double min, Date startTime, Date endTime) { long costTime = 0L; Cluster cluster = null; try { cluster = Cluster.builder().addContactPoint(CASSADRA_URL).build(); Session session = cluster.connect(KEY_SPACE_NAME); String createIndexCql = "CREATE INDEX IF NOT EXISTS value_index ON " + TABLE_NAME + "(value)"; // System.out.println(createIndexCql); long startTime1 = System.nanoTime(); session.execute(createIndexCql); String selectCql = "SELECT * FROM point WHERE device_code='" + point.getDeviceCode() + "' and sensor_code='" + point.getSensorCode() + "' and value<" + max + " and value>" + min + " and timestamp>=" + startTime.getTime() + " and timestamp<=" + endTime.getTime() + " ALLOW FILTERING"; // System.out.println(selectCql); ResultSet rs = session.execute(selectCql); long endTime1 = System.nanoTime(); costTime = endTime1 - startTime1; } finally { if (cluster != null) cluster.close(); } // System.out.println("此次查询消耗时间[" + costTime / 1000 + "]s"); return Status.OK(costTime); }
@Override public Status selectMaxByDeviceAndSensor(String deviceCode, String sensorCode, Date startTime, Date endTime) { long costTime = 0L; Cluster cluster = null; try { // cluster = Cluster.builder().addContactPoint(CASSADRA_URL).build(); // Session session = cluster.connect(KEY_SPACE_NAME); Session session = SessionManager.getSession(); String selectCql = "SELECT MAX(value) FROM point WHERE device_code='" + deviceCode + "' and sensor_code='" + sensorCode + "' and timestamp>=" + startTime.getTime() + " and timestamp<=" + endTime.getTime() + " ALLOW FILTERING"; long startTime1 = System.nanoTime(); // System.out.println("aaa"); ResultSet rs = session.execute(selectCql); // System.out.println("bbb"); long endTime1 = System.nanoTime(); costTime = endTime1 - startTime1; } finally { if (cluster != null) cluster.close(); } // System.out.println("此次查询消耗时间[" + costTime / 1000 + "]s"); return Status.OK(costTime); }
@Override public Status selectMinByDeviceAndSensor(String deviceCode, String sensorCode, Date startTime, Date endTime) { long costTime = 0L; Cluster cluster = null; try { cluster = Cluster.builder().addContactPoint(CASSADRA_URL).build(); Session session = cluster.connect(KEY_SPACE_NAME); String selectCql = "SELECT MIN(value) FROM point WHERE device_code='" + deviceCode + "' and sensor_code='" + sensorCode + "' and timestamp>=" + startTime.getTime() + " and timestamp<=" + endTime.getTime() + " ALLOW FILTERING"; // System.out.println(selectCql); long startTime1 = System.nanoTime(); ResultSet rs = session.execute(selectCql); long endTime1 = System.nanoTime(); costTime = endTime1 - startTime1; } finally { if (cluster != null) cluster.close(); } // System.out.println("此次查询消耗时间[" + costTime / 1000 + "]s"); return Status.OK(costTime); }
@Override public Status selectAvgByDeviceAndSensor(String deviceCode, String sensorCode, Date startTime, Date endTime) { long costTime = 0L; Cluster cluster = null; try { cluster = Cluster.builder().addContactPoint(CASSADRA_URL).build(); Session session = cluster.connect(KEY_SPACE_NAME); String selectCql = "SELECT AVG(value) FROM point WHERE device_code='" + deviceCode + "' and sensor_code='" + sensorCode + "' and timestamp>=" + startTime.getTime() + " and timestamp<=" + endTime.getTime() + " ALLOW FILTERING"; // System.out.println(selectCql); long startTime1 = System.nanoTime(); ResultSet rs = session.execute(selectCql); long endTime1 = System.nanoTime(); costTime = endTime1 - startTime1; } finally { if (cluster != null) cluster.close(); } // System.out.println("此次查询消耗时间[" + costTime / 1000 + "]s"); return Status.OK(costTime); }
@Override public Status selectCountByDeviceAndSensor(String deviceCode, String sensorCode, Date startTime, Date endTime) { long costTime = 0L; Cluster cluster = null; try { cluster = Cluster.builder().addContactPoint(CASSADRA_URL).build(); Session session = cluster.connect(KEY_SPACE_NAME); String selectCql = "SELECT COUNT(*) FROM point WHERE device_code='" + deviceCode + "' and sensor_code='" + sensorCode + "' and timestamp>=" + startTime.getTime() + " and timestamp<=" + endTime.getTime() + " ALLOW FILTERING"; // System.out.println(selectCql); long startTime1 = System.nanoTime(); ResultSet rs = session.execute(selectCql); long endTime1 = System.nanoTime(); costTime = endTime1 - startTime1; } finally { if (cluster != null) cluster.close(); } // System.out.println("此次查询消耗时间[" + costTime / 1000 + "]s"); return Status.OK(costTime); }
public static boolean untilApplied(Session session, BatchStatement.Type type, Consumer<BatchStatement> transaction) { for (int i = 1; i <= MAX_RETRY; i ++) { BatchStatement batchStatement = new BatchStatement(type); transaction.accept(batchStatement); if (batchStatement.size() == 0) return false; boolean applied; if (batchStatement.size() > 1) { applied = session.execute(batchStatement).wasApplied(); } else { Statement statement = Iterables.getOnlyElement(batchStatement.getStatements()); applied = session.execute(statement).wasApplied(); } if (applied) return true; log.warn("Attempt {}/{} failed executing {}", i, MAX_RETRY, batchStatement); try { Thread.sleep(100 * i); } catch (InterruptedException e) { throw new AttemptsFailedException(e); } } throw new AttemptsFailedException(); }
public static void executeSchemaCql(Session session, boolean deleteData) { try { URL schema = Resources.getResource("schema.cql"); String schemaCql = Resources.toString(schema, Charset.forName("UTF-8")); schemaCql = schemaCql.replaceAll("(?m)//.*$", ""); String[] statements = schemaCql.split(";"); if (deleteData) { dropSchema(session); } for (String statement : statements) { statement = statement.trim(); if (statement.isEmpty()) continue; executeWithLog(session, statement); } } catch (IOException e) { throw new RuntimeException(e); } }
@Override public long uploadPackage(DataPackage dataPack) { long time = System.currentTimeMillis(); try { Session session; Cluster cluster; cluster = Cluster.builder().addContactPoint("127.0.0.1").build(); session = cluster.connect(); ByteBuffer buffer = ByteBuffer.wrap(dataPack.getData()); Statement statement = QueryBuilder.insertInto(DATABASE, MAIN_TABLE) .value(COL_ID, time) .value(COL_DATA, buffer) .value(COL_DESC, dataPack.getDescription()); session.execute(statement); } catch (Exception ex) { System.out.println(ex.getMessage()); } return time; }
@Override public DataPackage downloadPackage(long packageID) { DataPackage dataPack = new DataPackage(); try { Session session; Cluster cluster; cluster = Cluster.builder().addContactPoint("127.0.0.1").build(); session = cluster.connect(); Statement statement = QueryBuilder.select() .all() .from(DATABASE, MAIN_TABLE) .where(eq(COL_ID, packageID)); ResultSet results = session.execute(statement); for(Row row : results) { dataPack.setId(row.getLong(COL_ID)); dataPack.setDescription(row.getString(COL_DESC)); dataPack.setData(row.getBytes(COL_DATA).array()); } } catch (Exception ex) { System.out.println(ex.getMessage()); } return dataPack; }
@Override public List<DataPackage> listPackages() { List<DataPackage> dataPacks = new ArrayList<>(); try { Session session; Cluster cluster; cluster = Cluster.builder().addContactPoint("127.0.0.1").build(); session = cluster.connect(); Statement statement = QueryBuilder.select() .all() .from(DATABASE, MAIN_TABLE); ResultSet results = session.execute(statement); for(Row row : results) { DataPackage dataPack = new DataPackage(); dataPack.setId(row.getLong(COL_ID)); dataPack.setDescription(row.getString(COL_DESC)); dataPacks.add(dataPack); } } catch (Exception ex) { System.out.println(ex.getMessage()); } return dataPacks; }
/** * Returns Cassandra driver session to sessions pool. * * @param cassandraSes Session wrapper. * @param driverSes Driver session. */ public static void put(CassandraSessionImpl cassandraSes, Session driverSes) { if (cassandraSes == null || driverSes == null) { return; } SessionWrapper old; synchronized (sessions) { old = sessions.put(cassandraSes, new SessionWrapper(driverSes)); if (monitorSingleton == null || State.TERMINATED.equals(monitorSingleton.getState())) { monitorSingleton = new SessionMonitor(); monitorSingleton.setDaemon(true); monitorSingleton.setName("Cassandra-sessions-pool"); monitorSingleton.start(); } } if (old != null) { old.release(); } }
/** * @return Cassandra driver session. */ private synchronized Session session() { if (ses != null) { return ses; } ses = SessionPool.get(this); if (ses != null) { return ses; } synchronized (sesStatements) { sesStatements.clear(); } try { return ses = builder.build().connect(); } catch (Throwable e) { throw new IgniteException("Failed to establish session with Cassandra database", e); } }
/** */ private static synchronized Session adminSession() { if (adminSes != null) { return adminSes; } try { Cluster.Builder builder = Cluster.builder(); builder = builder.withCredentials(getAdminUser(), getAdminPassword()); builder.addContactPoints(getContactPoints()); builder.addContactPointsWithPorts(getContactPointsWithPorts()); adminCluster = builder.build(); return adminSes = adminCluster.connect(); } catch (Throwable e) { throw new RuntimeException("Failed to create admin session to Cassandra database", e); } }
/** */ private static synchronized Session regularSession() { if (regularSes != null) { return regularSes; } try { Cluster.Builder builder = Cluster.builder(); builder = builder.withCredentials(getRegularUser(), getRegularPassword()); builder.addContactPoints(getContactPoints()); builder.addContactPointsWithPorts(getContactPointsWithPorts()); regularCluster = builder.build(); return regularSes = regularCluster.connect(); } catch (Throwable e) { throw new RuntimeException("Failed to create regular session to Cassandra database", e); } }
@Override public ResultSet<CassandraDBContext> execute(Query<CassandraDBContext> query) throws QueryExecutionException { try (Cluster cassandraConnection = buildConnection()) { final Metadata metadata = cassandraConnection.getMetadata(); System.out.printf("Connected to cluster: %s", metadata.getClusterName()); for (final Host host : metadata.getAllHosts()) { System.out.printf("Datacenter: %s; Host: %s; Rack: %s", host.getDatacenter(), host.getAddress(), host.getRack()); } try (Session session = cassandraConnection.connect()) { String queryToExecute = query.getQuery(); System.out.println(queryToExecute); com.datastax.driver.core.ResultSet resultSet = session.execute(queryToExecute); printResultSet(resultSet); ExecutionInfo executionInfo = resultSet.getExecutionInfo(); System.out.println(executionInfo); } } // There isn't any resultset for these use-case return new CassandraResultSet(); }
TracingSession(CassandraClientTracing cassandraTracing, Session target) { if (cassandraTracing == null) throw new NullPointerException("cassandraTracing == null"); if (target == null) throw new NullPointerException("target == null"); this.delegate = target; tracer = cassandraTracing.tracing().tracer(); sampler = cassandraTracing.sampler(); parser = cassandraTracing.parser(); String remoteServiceName = cassandraTracing.remoteServiceName(); this.remoteServiceName = remoteServiceName != null ? remoteServiceName : target.getCluster().getClusterName(); injector = cassandraTracing.tracing().propagation().injector((carrier, key, v) -> { if (v == null) { // for example, if injecting a null parent id field carrier.remove(key); return; } int length = v.length(); // all values are ascii byte[] buf = new byte[length]; for (int i = 0; i < length; i++) { buf[i] = (byte) v.charAt(i); } carrier.put(key, ByteBuffer.wrap(buf)); }); version = delegate.getCluster().getConfiguration().getProtocolOptions().getProtocolVersion(); }
static Session connect() { String contactPoint = "localhost"; String keySpace = "ks1"; if(session == null) { PoolingOptions poolingOptions = new PoolingOptions().setConnectionsPerHost(HostDistance.REMOTE, 1, 4); cluster = Cluster.builder().addContactPoint(contactPoint).withPoolingOptions(poolingOptions) .withCompression(Compression.SNAPPY).build(); cluster.init(); for (Host host : cluster.getMetadata().getAllHosts()) { System.out.printf("Address: %s, Rack: %s, Datacenter: %s, Tokens: %s\n", host.getAddress(), host.getDatacenter(), host.getRack(), host.getTokens()); } session = cluster.connect(keySpace); } return session; }
static Session connect() { String contactPoint = "localhost"; String keySpace = "ks1"; if(session == null) { RetryPolicy retryPolicy = new CustomRetryPolicy(3, 3, 2); cluster = Cluster.builder().addContactPoint(contactPoint) .withRetryPolicy(retryPolicy).build(); cluster.init(); for (Host host : cluster.getMetadata().getAllHosts()) { System.out.printf("Address: %s, Rack: %s, Datacenter: %s, Tokens: %s\n", host.getAddress(), host.getDatacenter(), host.getRack(), host.getTokens()); } } return session; }
static Session connect() { String contactPoint = "localhost"; String keySpace = "ks1"; if(session == null) { DCAwareRoundRobinPolicy dcAwarePolicy = new DCAwareRoundRobinPolicy.Builder().build(); LoadBalancingPolicy policy = new TokenAwarePolicy(dcAwarePolicy); cluster = Cluster.builder().addContactPoint(contactPoint) .withLoadBalancingPolicy(policy).build(); cluster.init(); for (Host host : cluster.getMetadata().getAllHosts()) { System.out.printf("Address: %s, Rack: %s, Datacenter: %s, Tokens: %s\n", host.getAddress(), host.getDatacenter(), host.getRack(), host.getTokens()); } } return session; }
public static void main(String[] args) { Session session = Connection.connect(); PreparedStatement preparedStatement = session.prepare("insert into user (id, name, age) values (?, ?, ?)"); try { BoundStatement boundStatement = preparedStatement.bind(UUIDs.timeBased(), "Hector", 34); ResultSet rs = session.execute(boundStatement); System.out.println(rs); } catch (Exception ex) { ex.printStackTrace(); } Connection.close(); }
public static void main(String[] args) { Session session = Connection.connect(); BatchStatement batchStatement = new BatchStatement(); PreparedStatement preparedStatement = session.prepare("insert into user (id, name) values (?, ?)"); int i = 0; while(i < 10) { batchStatement.add(preparedStatement.bind(UUIDs.timeBased(), "user-" + i)); ++i; } try { ResultSet rs = session.execute(batchStatement); System.out.println(rs); } catch (Exception ex) { ex.printStackTrace(); } Connection.close(); }
protected Session getSession() { if (session == null) { session = cluster.getSession(); defaultReadLevel = cluster.getDefaultReadConsistencyLevel(); defaultWriteLevel = cluster.getDefaultWriteConsistencyLevel(); CodecRegistry registry = session.getCluster().getConfiguration().getCodecRegistry(); registerCodecIfNotFound(registry, new JsonCodec()); registerCodecIfNotFound(registry, new DeviceCredentialsTypeCodec()); registerCodecIfNotFound(registry, new AuthorityCodec()); registerCodecIfNotFound(registry, new ComponentLifecycleStateCodec()); registerCodecIfNotFound(registry, new ComponentTypeCodec()); registerCodecIfNotFound(registry, new ComponentScopeCodec()); registerCodecIfNotFound(registry, new EntityTypeCodec()); } return session; }
@AfterClass public static void oneTimeTearDown() throws UnknownHostException { long start = System.currentTimeMillis(); while ((System.currentTimeMillis() - start) < 3000) ; Session session = getSession(); Set<Class<? extends SensorData>> sensorDataClasses = new Reflections( "de.tudarmstadt.informatik.tk.assistanceplatform.data") .getSubTypesOf(SensorData.class); for (Class<? extends SensorData> c : sensorDataClasses) { for (int i = 0; i < 100; i++) { session.execute("DELETE FROM " + c.getAnnotation(Table.class).name() + " WHERE user_id = 0 AND device_id = 0"); } } }
@Override protected boolean matchesSafely(PipelineResult pipelineResult) { pipelineResult.waitUntilFinish(); Session session = cluster.connect(); ResultSet result = session.execute("select id,name from " + CassandraTestDataSet.KEYSPACE + "." + tableName); List<Row> rows = result.all(); if (rows.size() != 1000) { return false; } for (Row row : rows) { if (!row.getString("name").matches("Name.*")) { return false; } } return true; }
/** * Creates the database based off of a passed in CQL file. WARNING: Be * careful, this could erase data if you are not cautious. Ignores comment * lines (lines that start with "//"). * * @param cqlPath path to the CQl file you wish to use to init the database. * @param session Database session * * @throws IOException if it can't read from the CQL file for some reason. */ @Deprecated public static void initDatabase(String cqlPath, Session session) throws IOException { logger.warn("Initing database from CQL file: " + cqlPath); InputStream cqlStream = Utils.class.getResourceAsStream(cqlPath); String cql = IOUtils.toString(cqlStream); String[] statements = cql.split("\\Q;\\E"); for (String statement : statements) { statement = statement.trim(); statement = statement.replaceAll("\\Q\n\\E", " "); if (!statement.equals("") && !statement.startsWith("//"))//don't count comments { logger.info("Executing CQL statement: " + statement); session.execute(statement); } } }
/** * Test with incoming message containing a header with RegularStatement. */ @Test public void testRequestMessageStatement() throws Exception { Update.Where update = update("camel_user") .with(set("first_name", "Claus 2")) .and(set("last_name", "Ibsen 2")) .where(eq("login", "c_ibsen")); Object response = producerTemplate.requestBodyAndHeader(null, CassandraConstants.CQL_QUERY, update); Cluster cluster = CassandraUnitUtils.cassandraCluster(); Session session = cluster.connect(CassandraUnitUtils.KEYSPACE); ResultSet resultSet = session.execute("select login, first_name, last_name from camel_user where login = ?", "c_ibsen"); Row row = resultSet.one(); assertNotNull(row); assertEquals("Claus 2", row.getString("first_name")); assertEquals("Ibsen 2", row.getString("last_name")); session.close(); cluster.close(); }
/** * Creates a session and ensures schema if configured. Closes the cluster and session if any * exception occurred. */ @Override public Session create(CassandraStorage cassandra) { Closer closer = Closer.create(); try { Cluster cluster = closer.register(buildCluster(cassandra)); cluster.register(new QueryLogger.Builder().build()); if (cassandra.ensureSchema) { Session session = closer.register(cluster.connect()); Schema.ensureExists(cassandra.keyspace, session); session.execute("USE " + cassandra.keyspace); return session; } else { return cluster.connect(cassandra.keyspace); } } catch (RuntimeException e) { try { closer.close(); } catch (IOException ignored) { } throw e; } }
@Override public void doFilter(ServletRequest req, ServletResponse resp, FilterChain chain) throws IOException, ServletException { @SuppressWarnings("unchecked") TRANS trans = (TRANS)req.getAttribute(TransFilter.TRANS_TAG); try { Pooled<Session> psess = pool.get(); try { trans.put(sessionSlot, psess.content); chain.doFilter(req, resp); } finally { psess.done(); } } catch (APIException e) { throw new ServletException(e); } }
/** * Remove the entries from the dirty row (for this replica) that correspond to a set of primary keys * @param tableName the table we are removing dirty entries from * @param keys the primary key values to use in the DELETE. Note: this is *only* the primary keys, not a full table row. */ @Override public void cleanDirtyRow(String tableName, Object[] keys) { TableInfo ti = dbi.getTableInfo(tableName); StringBuilder cols = new StringBuilder("REPLICA__=?"); List<Object> vallist = new ArrayList<Object>(); vallist.add(myId); int n = 0; for (int i = 0; i < ti.columns.size(); i++) { if (ti.iskey.get(i)) { cols.append(" AND ").append(ti.columns.get(i)).append("=?"); vallist.add(keys[n++]); } } String cql = String.format("DELETE FROM %s.DIRTY_%s WHERE %s;", music_ns, tableName, cols.toString()); logger.debug("Executing MUSIC write:"+ cql); Session sess = getMusicSession(); PreparedStatement ps = getPreparedStatementFromCache(cql); BoundStatement bound = ps.bind(vallist.toArray()); bound.setReadTimeoutMillis(60000); synchronized (sess) { sess.execute(bound); } }
@BeforeClass public static void startServer() throws InterruptedException, TTransportException, ConfigurationException, IOException { EmbeddedCassandraServerHelper.startEmbeddedCassandra(); Cluster cluster = new Cluster.Builder().addContactPoints("127.0.0.1").withPort(9142).build(); Session session = cluster.connect(); CQLDataLoader dataLoader = new CQLDataLoader(session); dataLoader.load(new ClassPathCQLDataSet("config/cql/create-tables.cql", true, "cassandra_unit_keyspace")); }
public static List<BoundStatement> generateDocumentDeleteIndexEntriesStatements(Session session, Document entity, BucketLocator bucketLocator) throws IndexParseException { //check for any indices that should exist on this setTable per the index setTable List<Index> indices = getIndexForDocument(session, entity); ArrayList<BoundStatement> statementList = new ArrayList<>(indices.size()); //for each index for (Index index : indices) { BoundStatement bs = generateDocumentDeleteIndexEntryStatement(session, index, entity.getObject(), bucketLocator); if (bs != null) { statementList.add(bs); } } return statementList; }
/** * Test with incoming message containing a header with RegularStatement. */ @Test public void testRequestMessageStatement() throws Exception { Update.Where update = update("camel_user") .with(set("first_name", bindMarker())) .and(set("last_name", bindMarker())) .where(eq("login", bindMarker())); Object response = producerTemplate.requestBodyAndHeader(new Object[]{"Claus 2", "Ibsen 2", "c_ibsen"}, CassandraConstants.CQL_QUERY, update); Cluster cluster = CassandraUnitUtils.cassandraCluster(); Session session = cluster.connect(CassandraUnitUtils.KEYSPACE); ResultSet resultSet = session.execute("select login, first_name, last_name from camel_user where login = ?", "c_ibsen"); Row row = resultSet.one(); assertNotNull(row); assertEquals("Claus 2", row.getString("first_name")); assertEquals("Ibsen 2", row.getString("last_name")); session.close(); cluster.close(); }