@Override public Token nextToken() { if (stashedNext != null) { previous = stashedNext; stashedNext = null; return previous; } Token next = super.nextToken(); if (insertSemicolon(previous, next)) { stashedNext = next; previous = _factory.create(new Pair<TokenSource, CharStream>(this, _input), PainlessLexer.SEMICOLON, ";", Lexer.DEFAULT_TOKEN_CHANNEL, next.getStartIndex(), next.getStopIndex(), next.getLine(), next.getCharPositionInLine()); return previous; } else { previous = next; return next; } }
private void validateReturnClause() { for (String variable : structuredQuery.getReturnVariables()) { checkVariableIsDefined(variable, UNDEFINED_VARIABLE_IN_RETURN_CLAUSE_ERROR_MESSAGE); } for (Pair<String, String> variablePropertyPair : structuredQuery.getReturnVariablePropertyPairs()) { checkVariableIsDefinedAndPropertyExists(variablePropertyPair); } for (QueryAggregation queryAggregation : structuredQuery.getQueryAggregations()) { if (null != queryAggregation.getVariable()) { checkVariableIsDefined(queryAggregation.getVariable(), UNDEFINED_VARIABLE_IN_RETURN_CLAUSE_ERROR_MESSAGE); } else if (null != queryAggregation.getVariablePropertyPair()) { checkVariableIsDefinedAndPropertyExists(queryAggregation.getVariablePropertyPair()); } } }
private AbstractStructuredQuery visitVariableVertex(StructuredQuery structuredQuery, VariableVertexContext ctx) { QueryVariable queryVariable = new QueryVariable(ctx.variable().getText()); TypeContext typeContext = ctx.type(); if (null != typeContext) { queryVariable.setVariableType(typeContext.variable().getText()); } PropertiesContext propertiesContext = ctx.properties(); if (null != propertiesContext) { Map<String, Pair<String, String>> variablePropertyFilters = parseProperties( propertiesContext); ComparisonPredicate comparisonPredicate; for (String key : variablePropertyFilters.keySet()) { comparisonPredicate = new ComparisonPredicate(); comparisonPredicate.setLeftOperand(new Pair<>(ctx.variable().getText(), key)); comparisonPredicate.setLiteral(variablePropertyFilters.get(key).b); comparisonPredicate.setComparisonOperator(ComparisonOperator.EQUALS); comparisonPredicate.setPredicateType(PredicateType. COMPARATIVE_CLAUSE_PROPERTY_KEY_AND_LITERAL_OPERANDS); structuredQuery.addQueryPredicate(comparisonPredicate); } } return queryVariable; }
private static Predicate<String[]> getComparativeClausePredicate( ComparisonPredicate comparisonPredicate, Map<String, Integer> descriptorIndexMap) { DataType dataType = getDataTypeToCastOperandsTo(comparisonPredicate); ComparisonOperator operator = comparisonPredicate.getComparisonOperator(); Pair<String, String> leftOperand = comparisonPredicate.getLeftOperand(); Pair<String, String> rightOperand = comparisonPredicate.getRightOperand(); String literal = comparisonPredicate.getLiteral(); int variable1IndexInPropertyResults = descriptorIndexMap.get(leftOperand.a + '.' + leftOperand.b); int variable2IndexInPropertyResults = (null != rightOperand) ? descriptorIndexMap.get( rightOperand.a + '.' + rightOperand.b) : -1; return (String[] predicate) -> { String rvalue = (variable2IndexInPropertyResults == -1) ? literal : predicate[ variable2IndexInPropertyResults]; return RuntimeComparator.resolveTypesAndCompare(DataType.parseDataType(dataType, predicate[variable1IndexInPropertyResults]), DataType.parseDataType(dataType, rvalue), operator); }; }
@Override public void notifyDone() { List<String> keys = groupByKeys.getSortedKeys(); for (String groupByKey : keys) { String[] variables = groupByKey.isEmpty() ? new String[0] : groupByKey.split( GROUP_BY_KEY_DELIMITER); Object[] tuple = new Object[variables.length + valueAggregatorPairs.size()]; System.arraycopy(variables, 0, tuple, 0, variables.length); int i = variables.length; int index = groupByKeys.mapStringKeyToInt(groupByKey); for (Pair<EdgeOrVertexPropertyDescriptor, AbstractAggregator> valueAggregatorPair : valueAggregatorPairs) { tuple[i++] = valueAggregatorPair.b.getValue(index); } tuples.addTuple(tuple); } ((OutputSink) nextOperator).append(tuples); this.tuples = new Tuples(columnTypes, columnNames); this.vertexIndices = new ArrayList<>(); this.edgeIndices = new ArrayList<>(); super.notifyDone(); }
AbstractOperator constructFilter(Map<String, Integer> vertexVariableOrderIndexMapBeforeProjection, Map<String, Integer> edgeVariableOrderIndexMap, AbstractOperator nextOperator) { List<EdgeOrVertexPropertyDescriptor> edgeOrVertexPropertyDescriptors = new ArrayList<>(); // The {@code descriptorIndexMap} holds the position of the descriptor for a given // variable in {@code edgeOrVertexPropertyDescriptors} list. A map is used to prevent // duplicate descriptors in the list. Map<String, Integer> descriptorIndexMap = new HashMap<>(); for (QueryPredicate queryPredicate : structuredQuery. getEdgeLiteralAndNonLiteralPredicates()) { for (Pair<String, String> variable : queryPredicate.getAllVariables()) { if (null == descriptorIndexMap.get(variable.a + '.' + variable.b)) { descriptorIndexMap.put(variable.a + '.' + variable.b, edgeOrVertexPropertyDescriptors.size()); edgeOrVertexPropertyDescriptors.add(getEdgeOrVertexPropertyDescriptor( vertexVariableOrderIndexMapBeforeProjection, edgeVariableOrderIndexMap, variable.a, typeAndPropertyKeyStore.mapStringPropertyKeyToShort( variable.b))); } } } Predicate<String []> predicate = PredicateFactory.getFilterPredicate(structuredQuery. getEdgeLiteralAndNonLiteralPredicates(), descriptorIndexMap); return new Filter(nextOperator, predicate, edgeOrVertexPropertyDescriptors, structuredQuery.getEdgeLiteralAndNonLiteralPredicates()); }
private List<EdgeOrVertexPropertyDescriptor> constructEdgeOrVertexPropertyDescriptorList( Map<String, Integer> vertexVariableOrderIndexMapAfterProjection, Map<String, Integer> edgeVariableOrderIndexMap, List<String> columnNames) { List<EdgeOrVertexPropertyDescriptor> edgeOrVertexPropertyIndices = new ArrayList<>(); for (String returnVariable : structuredQuery.getReturnVariables()) { edgeOrVertexPropertyIndices.add(getEdgeOrVertexPropertyDescriptor( vertexVariableOrderIndexMapAfterProjection, edgeVariableOrderIndexMap, returnVariable, (short) -1 /* No property key. Use the vertex or edge ID. */)); columnNames.add(returnVariable); } for (Pair<String, String> returnVariablePropertyPair : structuredQuery.getReturnVariablePropertyPairs()) { edgeOrVertexPropertyIndices.add(getEdgeOrVertexPropertyDescriptor( vertexVariableOrderIndexMapAfterProjection, edgeVariableOrderIndexMap, returnVariablePropertyPair.a, typeAndPropertyKeyStore.mapStringPropertyKeyToShort( returnVariablePropertyPair.b))); columnNames.add(returnVariablePropertyPair.a + "." + returnVariablePropertyPair.b); } return edgeOrVertexPropertyIndices; }
/** * See {@link QueryPredicate#validateTypes()}. */ @Override public void validateTypes() { Pair<Short, DataType> leftOperandKeyAndDataType = getKeyAndDataTypePair(leftOperand.b); if (PredicateType.COMPARATIVE_CLAUSE_TWO_PROPERTY_KEY_OPERANDS == predicateType) { Pair<Short, DataType> rightOperandKeyAndDataType = getKeyAndDataTypePair( rightOperand.b); if ((!isNumeric(leftOperandKeyAndDataType.b) || !isNumeric(rightOperandKeyAndDataType. b)) && leftOperandKeyAndDataType.b != rightOperandKeyAndDataType.b) { throw new IncorrectDataTypeException("DataType Mismatch - The left operand " + leftOperand.a + "." + leftOperand.b + " is of data type " + leftOperandKeyAndDataType.b + " and the right operand " + rightOperand.a + "." + rightOperand.b + " is of data type " + rightOperandKeyAndDataType.b + "."); } } else { DataType.assertValueCanBeCastToDataType(leftOperandKeyAndDataType.b, literal); } }
/** * See {@link QueryPredicate#validateTypes()}. */ @Override public void validateTypes() { Pair<Short, DataType> leftOperandKeyAndDataType = getKeyAndDataTypePair(leftOperand.b); for (Pair<String, String> variable : variablesWithProperty) { Pair<Short, DataType> variableKeyAndDataType = getKeyAndDataTypePair(variable.b); if ((!isNumeric(leftOperandKeyAndDataType.b) || !isNumeric(variableKeyAndDataType.b)) && leftOperandKeyAndDataType.b != variableKeyAndDataType.b) { throw new IncorrectDataTypeException("DataType Mismatch - The left operand " + leftOperand.b + " is of data type " + leftOperandKeyAndDataType.b + " and the value " + variable.a + " is of " + "data type " + variableKeyAndDataType.b + "."); } } for (String literal : literals) { DataType.assertValueCanBeCastToDataType(leftOperandKeyAndDataType.b, literal); } }
private String createVertices() { TypeAndPropertyKeyStore typeAndPropertyKeyStore = TypeAndPropertyKeyStore.getInstance(); for (QueryVariable queryVariable : structuredQuery.getQueryVariables()) { Map<String, Pair<String, String>> stringVertexProperties = queryVariable. getVariableProperties(); typeAndPropertyKeyStore.assertExistingKeyDataTypesMatchPreviousDeclarations( stringVertexProperties); int vertexId = Integer.parseInt(queryVariable.getVariableName()); short vertexType = typeAndPropertyKeyStore.mapStringTypeToShortOrInsert(queryVariable. getVariableType()); Map<Short, Pair<DataType, String>> vertexProperties = typeAndPropertyKeyStore. mapStringPropertiesToShortAndDataTypeOrInsert(stringVertexProperties); Graph.getInstance().addVertex(vertexId, vertexType, vertexProperties); } // TODO(amine): bug, count the actual number of vertices created to append to sink. return structuredQuery.getQueryVariables().size() + " vertices created."; }
private void assertDataTypesAreConsistent( Map<String, Pair<String, String>> thisPropertiesCollection, Map<String, Pair<String, String>> thatPropertiesCollection) { if (null == thisPropertiesCollection || null == thatPropertiesCollection) { return; } for (String propertyKey : thisPropertiesCollection.keySet()) { String thisDataType = thisPropertiesCollection.get(propertyKey).a.toUpperCase(); String thatDataType = null; if (null != thatPropertiesCollection.get(propertyKey)) { thatDataType = thatPropertiesCollection.get(propertyKey).a.toUpperCase(); } if (null != thatDataType && !thisDataType.equals(thatDataType)) { throw new IncorrectDataTypeException("Inconsistent DataType usage - property key " + propertyKey + " is used with two different data types: " + thisDataType + " and " + thatDataType + "."); } } }
/** * Creates an index depending on its {@link StructuredQuery}. * * @return Returns a string describing what it did. */ private String createIndex() { Pair<String, String> typePropertyPair = structuredQuery.getTypeAndPropertyToIndex(); String stringType = typePropertyPair.a; String stringProperty = typePropertyPair.b; Short type = TypeAndPropertyKeyStore.ANY; if (null != stringType) { type = TypeAndPropertyKeyStore.getInstance().mapStringTypeToShort(stringType); } Short property = TypeAndPropertyKeyStore.getInstance().mapStringPropertyKeyToShort( stringProperty); if (null == property) { throw new NoSuchPropertyKeyException(stringProperty); } IndexStore.getInstance().createIndex(type, property); return "Index created for type: " + (null == stringType ? "<ALL_TYPES>" : stringType) + " and on property: " + stringProperty; }
/** * Returns the {@code Short} key, and {@code Object} value pair properties of the vertex with * the given ID. * Warning: If a vertex's properties are empty, it can be because of two things: (1) the * vertex was never created; or (2) the vertex has indeed no properties. * * @param vertexId The ID of the vertex. * * @return The possibly empty properties of the vertex as a Map<Short, Object>. * * @throws NoSuchElementException if the vertex with ID {@code vertexId} is larger than the * highest vertex ID previously created. */ public Map<Short, Object> getProperties(int vertexId) { if (vertexId > Graph.getInstance().getHighestVertexId()) { throw new NoSuchElementException("Vertex with ID " + vertexId + " does not exist."); } Map<Short, Object> properties = new HashMap<>(); byte[] data = vertexProperties[vertexId]; if (null == data) { return properties; } propertyIterator.reset(data, 0, data.length); Pair<Short, Object> keyValue; while (propertyIterator.hasNext()) { keyValue = propertyIterator.next(); properties.put(keyValue.a, keyValue.b); } return properties; }
/** * Returns the {@code String} key, and {@code String} value pair properties of the vertex with * the given ID. * Warning: If a vertex's properties are empty, it can be because of two things: (1) the * vertex was never created; or (2) the vertex has indeed no properties. * * @param vertexId The ID of the vertex. * * @return The possibly empty properties of the vertex as a Map<String, String>. * * @throws NoSuchElementException if the vertex with ID {@code vertexId} is larger than the * highest vertex ID previously created. */ public Map<String, String> getPropertiesAsStrings(int vertexId) { if (vertexId > Graph.getInstance().getHighestVertexId()) { throw new NoSuchElementException("Vertex with ID " + vertexId + " does not exist."); } Map<String, String> properties = new HashMap<>(); byte[] data = vertexProperties[vertexId]; if (null == data) { return properties; } propertyIterator.reset(data, 0, data.length); Pair<Short, Object> keyValue; while (propertyIterator.hasNext()) { keyValue = propertyIterator.next(); properties.put(TypeAndPropertyKeyStore.getInstance().mapShortPropertyKeyToString( keyValue.a), keyValue.b.toString()); } return properties; }
/** * Add a vertex to a current index if it matches an index that is currently being stored. * * @param vertexId The ID of the vertex to potentially add to indices. * @param vertexType The type of the vertex specified by `vertexId`. * @param vertexProperties The properties of the vertex specified by `vertexId`. */ private void indexVertex(int vertexId, Short vertexType, Map<Short, Pair<DataType, String>> vertexProperties) { for (short propertyKey : vertexProperties.keySet()) { if (!isPropertyTypeIndexed(vertexType, propertyKey)) { continue; } Integer indexKey = getTypePropertyIndexKey(vertexType, propertyKey); indices.putIfAbsent(indexKey, new HashMap<>()); Pair<DataType, String> property = vertexProperties.get(propertyKey); DataType dataType = property.a; String propertyValue = property.b; validatePropertyType(propertyKey, dataType); updatedIndexWithVertex(vertexId, indexKey, propertyValue, true); } }
protected byte[] serializeProperties(Map<Short, Pair<DataType, String>> properties) { byte[] propertiesAsBytes = new byte[0]; if (null != properties && !properties.isEmpty()) { int index = 0; int propertiesLength = 0; byte[][] keyValueByteArrays = new byte[properties.size()][]; for (Short key : properties.keySet()) { keyValueByteArrays[index] = DataType.serialize(properties.get(key).a, key, properties.get(key).b); propertiesLength += keyValueByteArrays[index].length; index++; } propertiesAsBytes = new byte[propertiesLength]; propertiesLength = 0; for (byte[] keyValueAsBytes : keyValueByteArrays) { System.arraycopy(keyValueAsBytes, 0, propertiesAsBytes, propertiesLength, keyValueAsBytes.length); propertiesLength += keyValueAsBytes.length; } } return propertiesAsBytes; }
private Map<Short, Pair<DataType, String>> mapStringPropertiesToShortAndDataType( Map<String, Pair<String, String>> stringProperties, boolean insertIfKeyDoesntExist, boolean assertAllKeysExist) { if (null == stringProperties) { return null; } Pair<Short, DataType> keyDataTypePair; Pair<String, String> stringDataTypeValuePair; Map<Short, Pair<DataType, String>> resultProperties = new HashMap<>(); for (String stringKey : stringProperties.keySet()) { stringDataTypeValuePair = stringProperties.get(stringKey); keyDataTypePair = mapStringPropertyKeyValueToShortAndDataType(stringKey, stringDataTypeValuePair.a/* DataType as String */, insertIfKeyDoesntExist, assertAllKeysExist); resultProperties.put(keyDataTypePair.a /* key as short */, new Pair<>( keyDataTypePair.b/* DataType */, stringDataTypeValuePair.b/* value as String */)); } return resultProperties; }
@VisibleForTesting Pair<Short, DataType> mapStringPropertyKeyValueToShortAndDataType(String stringKey, String stringDataType, boolean insertIfKeyDoesntExist, boolean assertKeyExist) { if (isNullOrEmpty(stringKey)) { throw new IllegalArgumentException("Property keys can't be null or the empty string."); } Short key = propertyKeyStore.mapStringKeyToShort(stringKey); DataType dataType = DataType.mapStringToDataType(stringDataType); if (null != key) { DataType dataTypeStored = propertyDataTypeStore.get(key); if (dataTypeStored != dataType) { throw new IncorrectDataTypeException("Incorrect DataType usage - property key " + stringKey + " has been declared as " + dataTypeStored + " previously but " + "now it used as " + stringDataType.toUpperCase() + "."); } } else if (insertIfKeyDoesntExist) { key = propertyKeyStore.getKeyAsShortOrInsert(stringKey); propertyDataTypeStore.put(key, dataType); } else if (assertKeyExist) { throw new NoSuchPropertyKeyException(stringKey); } return new Pair<>(key, dataType); }
@Test public void testInClauseVariablesOnly() throws Exception { String query = "MATCH (a)->(b) WHERE a.name IN [b.age, b.name, b.value];"; StructuredQuery actualStructuredQuery = new StructuredQueryParser().parse(query); StructuredQuery expectedStructuredQuery = new StructuredQuery(); expectedStructuredQuery.addRelation(new QueryRelation(new QueryVariable("a"), new QueryVariable("b"))); expectedStructuredQuery.setQueryOperation(StructuredQuery.QueryOperation.MATCH); InClausePredicate expectedPredicate = new InClausePredicate(); expectedPredicate.setLeftOperand(new Pair<>("a", "name")); expectedPredicate.setPredicateType(PredicateType.IN_CLAUSE_VARIABLES_AND_LITERALS); expectedPredicate.addVariableWithProperty(new Pair<>("b", "age")); expectedPredicate.addVariableWithProperty(new Pair<>("b", "name")); expectedPredicate.addVariableWithProperty(new Pair<>("b", "value")); expectedStructuredQuery.addQueryPredicate(expectedPredicate); TestUtils.assertEquals(expectedStructuredQuery, actualStructuredQuery); }
@Test public void testInClauseVariablesAndLiterals() throws Exception { String query = "MATCH (a)->(b) WHERE a.name IN [\"a\", b.name, \"b\"];"; StructuredQuery actualStructuredQuery = new StructuredQueryParser().parse(query); StructuredQuery expectedStructuredQuery = new StructuredQuery(); expectedStructuredQuery.addRelation(new QueryRelation(new QueryVariable("a"), new QueryVariable("b"))); expectedStructuredQuery.setQueryOperation(StructuredQuery.QueryOperation.MATCH); InClausePredicate expectedPredicate = new InClausePredicate(); expectedPredicate.setLeftOperand(new Pair<>("a", "name")); expectedPredicate.setPredicateType(PredicateType.IN_CLAUSE_VARIABLES_AND_LITERALS); expectedPredicate.addLiteral("a"); expectedPredicate.addLiteral("b"); expectedPredicate.addVariableWithProperty(new Pair<>("b", "name")); expectedStructuredQuery.addQueryPredicate(expectedPredicate); TestUtils.assertEquals(expectedStructuredQuery, actualStructuredQuery); }
@Test public void testTwoVertexPropertyPredicate() { String propertyKey = "age"; ComparisonPredicate comparisonPredicate = TestUtils.createComparisonPredicate( new Pair<>("a", propertyKey), new Pair<>("b", propertyKey), null, ComparisonOperator. GREATER_THAN); List<QueryPredicate> queryPredicates = new ArrayList<>(); queryPredicates.add(comparisonPredicate); Map<String, Integer> descriptorIndexMap = new HashMap<>(); descriptorIndexMap.put("a." + propertyKey, 0); descriptorIndexMap.put("b." + propertyKey, 2); Predicate<String[]> predicate = PredicateFactory.getFilterPredicate(queryPredicates, descriptorIndexMap); String[] resolvedProperties = {"15", "20", "10"}; Assert.assertTrue(predicate.test(resolvedProperties)); }
@Test public void testInClausePredicateMatchVariable() { String propertyKey = "views"; List<Pair<String, String>> variables = new ArrayList<>(); variables.add(new Pair<>("b", propertyKey)); List<String> literals = new ArrayList<>(); literals.add("40"); InClausePredicate inClausePredicate = TestUtils.createInClausePredicate( new Pair<>("a", propertyKey), variables, literals); List<QueryPredicate> queryPredicates = new ArrayList<>(); queryPredicates.add(inClausePredicate); Map<String, Integer> descriptorIndexMap = new HashMap<>(); descriptorIndexMap.put("a." + propertyKey, 0); descriptorIndexMap.put("b." + propertyKey, 1); Predicate<String[]> predicate = PredicateFactory.getFilterPredicate(queryPredicates, descriptorIndexMap); String[] resolvedProperties = {"20", "20"}; Assert.assertTrue(predicate.test(resolvedProperties)); }
@Test public void testInClausePredicateMatchLiteral() { String propertyKey = "views"; List<Pair<String, String>> variables = new ArrayList<>(); variables.add(new Pair<>("b", propertyKey)); List<String> literals = new ArrayList<>(); literals.add("40"); InClausePredicate inClausePredicate = TestUtils.createInClausePredicate( new Pair<>("a", propertyKey), variables, literals); List<QueryPredicate> queryPredicates = new ArrayList<>(); queryPredicates.add(inClausePredicate); Map<String, Integer> descriptorIndexMap = new HashMap<>(); descriptorIndexMap.put("a." + propertyKey, 0); descriptorIndexMap.put("b." + propertyKey, 1); Predicate<String[]> predicate = PredicateFactory.getFilterPredicate(queryPredicates, descriptorIndexMap); String[] resolvedProperties = {"40", "20"}; Assert.assertTrue(predicate.test(resolvedProperties)); }
@Test public void testInClausePredicateMatchNone() { String propertyKey = "age"; List<Pair<String, String>> variables = new ArrayList<>(); variables.add(new Pair<>("b", propertyKey)); List<String> literals = new ArrayList<>(); literals.add("50"); InClausePredicate inClausePredicate = TestUtils.createInClausePredicate( new Pair<>("a", propertyKey), variables, literals); List<QueryPredicate> queryPredicates = new ArrayList<>(); queryPredicates.add(inClausePredicate); Map<String, Integer> descriptorIndexMap = new HashMap<>(); descriptorIndexMap.put("a." + propertyKey, 0); descriptorIndexMap.put("b." + propertyKey, 1); Predicate<String[]> predicate = PredicateFactory.getFilterPredicate(queryPredicates, descriptorIndexMap); String[] resolvedProperties = {"29", "20"}; Assert.assertFalse(predicate.test(resolvedProperties)); }
/** * Creates a {@link ComparisonPredicate} using the given parameters. * * @param variable1 A {@code Pair<String, Short>} which will be the left operand in the * {@link ComparisonPredicate} to be created. * @param variable2 A {@code Pair<String, Short>} which will be the right operand in the * {@link ComparisonPredicate} to be created. Mutually exclusive with {@code literal}. * @param literal A {@code String} which will be the right operand in the * {@link ComparisonPredicate} to be created. Mutually exclusive with {@code variable1}. * @param comparisonOperator A {@link ComparisonOperator} specifying the comparison operator * of the {@link ComparisonPredicate} to be created. * * @return A {@link ComparisonPredicate} created using the given parameters. */ public static ComparisonPredicate createComparisonPredicate( Pair<String, String> variable1, Pair<String, String> variable2, String literal, ComparisonOperator comparisonOperator) { ComparisonPredicate comparisonPredicate = new ComparisonPredicate(); comparisonPredicate.setLeftOperand(variable1); comparisonPredicate.setRightOperand(variable2); comparisonPredicate.setLiteral(literal); comparisonPredicate.setComparisonOperator(comparisonOperator); if (null == comparisonPredicate.getLiteral()) { comparisonPredicate.setPredicateType(PredicateType. COMPARATIVE_CLAUSE_TWO_PROPERTY_KEY_OPERANDS); } else { comparisonPredicate.setPredicateType(PredicateType. COMPARATIVE_CLAUSE_PROPERTY_KEY_AND_LITERAL_OPERANDS); } return comparisonPredicate; }
public static InClausePredicate createInClausePredicate( Pair<String, String> leftOperand, List<Pair<String, String>> variables, List<String> literals) { InClausePredicate inClausePredicate = new InClausePredicate(); if (variables.size() > 0) { inClausePredicate.setPredicateType(PredicateType.IN_CLAUSE_VARIABLES_AND_LITERALS); } else { inClausePredicate.setPredicateType(PredicateType.IN_CLAUSE_ONLY_LITERALS); } inClausePredicate.setLeftOperand(leftOperand); for (Pair<String, String> variable : variables) { inClausePredicate.addVariableWithProperty(variable); } for (String literal : literals) { inClausePredicate.addLiteral(literal); } return inClausePredicate; }
private void populateTypeStoreAndPropertiesMap() { for (short i = 0; i < 3; ++i) { TypeAndPropertyKeyStore.getInstance().propertyDataTypeStore.put(keys[i], DataType.STRING); } for (short i = 3; i < 5; ++i) { TypeAndPropertyKeyStore.getInstance().propertyDataTypeStore.put(keys[i], DataType.INT); } TypeAndPropertyKeyStore.getInstance().propertyDataTypeStore.put(keys[5], DataType.BOOLEAN); TypeAndPropertyKeyStore.getInstance().propertyDataTypeStore.put(keys[6], DataType.DOUBLE); for (short i = 0; i < 3; ++i) { propertiesOfEdgeToAdd.put(keys[i], new Pair<>(DataType.STRING, values[i])); } propertiesLengthInBytes = 3 * 6 /* 2 bytes for short key + 4 bytes for int length */ + values[0].length() + values[1].length() + values[2].length(); }
@Test public void testAddEdgeWithMultipleDataTypeProperties() { populateTypeStoreAndPropertiesMap(); for (short i = 0; i < 3; ++i) { propertiesOfEdgeToAdd.put(keys[i], new Pair<>(DataType.STRING, values[i])); } for (short i = 3; i < 5; ++i) { propertiesOfEdgeToAdd.put(keys[i], new Pair<>(DataType.INT, values[i])); } propertiesOfEdgeToAdd.put(keys[5], new Pair<>(DataType.BOOLEAN, values[5])); propertiesOfEdgeToAdd.put(keys[6], new Pair<>(DataType.DOUBLE, values[6])); EdgeStore.getInstance().addEdge(propertiesOfEdgeToAdd); Map<Short, Object> propertiesStored = EdgeStore.getInstance().getProperties( 0 /* edge ID */); Assert.assertEquals(values.length, propertiesStored.size()); for (int i = 0; i < 3; ++i) { Assert.assertEquals(values[i], propertiesStored.get(keys[i])); } for (int i = 3; i < 5; ++i) { Assert.assertEquals(Integer.parseInt(values[i]), propertiesStored.get(keys[i])); } Assert.assertEquals(Boolean.parseBoolean(values[5]), propertiesStored.get(keys[5])); Assert.assertEquals(Double.parseDouble(values[6]), propertiesStored.get(keys[6])); }
@Override public void exitNonReserved(SqlBaseParser.NonReservedContext context) { // we can't modify the tree during rule enter/exit event handling unless we're dealing with a terminal. // Otherwise, ANTLR gets confused an fires spurious notifications. if (!(context.getChild(0) instanceof TerminalNode)) { int rule = ((ParserRuleContext) context.getChild(0)).getRuleIndex(); throw new AssertionError("nonReserved can only contain tokens. Found nested rule: " + ruleNames.get(rule)); } // replace nonReserved words with IDENT tokens context.getParent().removeLastChild(); Token token = (Token) context.getChild(0).getPayload(); context.getParent().addChild(new CommonToken( new Pair<>(token.getTokenSource(), token.getInputStream()), SqlBaseLexer.IDENTIFIER, token.getChannel(), token.getStartIndex(), token.getStopIndex())); }
/** Walk upwards from node until we find a child of p at t's char position. * Don't see alignment with self, t, or element *after* us. * return null if there is no such ancestor p. */ public static Pair<ParserRuleContext,Integer> earliestAncestorWithChildStartingAtCharPos(ParserRuleContext node, Token t, int charpos) { ParserRuleContext p = node; while ( p!=null ) { // check all children of p to see if one of them starts at charpos for (int i = 0; i<p.getChildCount(); i++) { ParseTree child = p.getChild(i); Token start; if ( child instanceof ParserRuleContext ) { start = ((ParserRuleContext) child).getStart(); } else { // must be token start = ((TerminalNode)child).getSymbol(); } // check that we don't see alignment with self or element *after* us if ( start.getTokenIndex()<t.getTokenIndex() && start.getCharPositionInLine()==charpos ) { return new Pair<>(p,i); } } p = p.getParent(); } return null; }
/** Return a new map from rulename to List of (a,b) pairs stripped of * tuples (a,b) where a or b is in rule repeated token set. * E.g., before removing repeated token ',', we see: * * elementValueArrayInitializer: 4:'{',',' 1:'{','}' 4:',','}' * * After removing tuples containing repeated tokens, we get: * * elementValueArrayInitializer: 1:'{','}' */ protected Map<RuleAltKey,List<Pair<Integer,Integer>>> stripPairsWithRepeatedTokens() { Map<RuleAltKey,List<Pair<Integer,Integer>>> ruleToPairsWoRepeats = new HashMap<>(); // For each rule for (RuleAltKey ruleAltKey : ruleToPairsBag.keySet()) { Set<Integer> ruleRepeatedTokens = ruleToRepeatedTokensSet.get(ruleAltKey); Set<Pair<Integer, Integer>> pairsBag = ruleToPairsBag.get(ruleAltKey); // If there are repeated tokens for this rule if ( ruleRepeatedTokens!=null ) { // Remove all (a,b) for b in repeated token set List<Pair<Integer, Integer>> pairsWoRepeats = BuffUtils.filter(pairsBag, p -> !ruleRepeatedTokens.contains(p.a) && !ruleRepeatedTokens.contains(p.b)); ruleToPairsWoRepeats.put(ruleAltKey, pairsWoRepeats); } else { ruleToPairsWoRepeats.put(ruleAltKey, new ArrayList<>(pairsBag)); } } return ruleToPairsWoRepeats; }
public void visitNonSingletonWithSeparator(ParserRuleContext ctx, List<? extends ParserRuleContext> siblings, Token separator) { boolean oversize = isOversizeList(ctx, siblings, separator); Map<Token, Pair<Boolean, Integer>> tokenInfo = getInfoAboutListTokens(ctx, tokens, tokenToNodeMap, siblings, oversize); // copy sibling list info for associated tokens into overall list // but don't overwrite existing so that most general (largest construct) // list information is use/retained (i.e., not overwritten). for (Token t : tokenInfo.keySet()) { if ( !tokenToListInfo.containsKey(t) ) { tokenToListInfo.put(t, tokenInfo.get(t)); } } }
/** * Get {@code #} labels. The keys of the map are the labels applied to outer * alternatives of a lexer rule, and the values are collections of pairs * (alternative number and {@link AltAST}) identifying the alternatives with * this label. Unlabeled alternatives are not included in the result. */ public Map<String, List<Pair<Integer, AltAST>>> getAltLabels() { Map<String, List<Pair<Integer, AltAST>>> labels = new LinkedHashMap<String, List<Pair<Integer, AltAST>>>(); for (int i=1; i<=numberOfAlts; i++) { GrammarAST altLabel = alt[i].ast.altLabel; if ( altLabel!=null ) { List<Pair<Integer, AltAST>> list = labels.get(altLabel.getText()); if (list == null) { list = new ArrayList<Pair<Integer, AltAST>>(); labels.put(altLabel.getText(), list); } list.add(new Pair<Integer, AltAST>(i, alt[i].ast)); } } if ( labels.isEmpty() ) return null; return labels; }
@Override public void suffixAlt(AltAST originalAltTree, int alt) { AltAST altTree = (AltAST)originalAltTree.dupTree(); String altLabel = altTree.altLabel!=null ? altTree.altLabel.getText() : null; String label = null; boolean isListLabel = false; GrammarAST lrlabel = stripLeftRecursion(altTree); if ( lrlabel!=null ) { label = lrlabel.getText(); isListLabel = lrlabel.getParent().getType() == PLUS_ASSIGN; leftRecursiveRuleRefLabels.add(new Pair<GrammarAST,String>(lrlabel,altLabel)); } stripAltLabel(altTree); String altText = text(altTree); altText = altText.trim(); LeftRecursiveRuleAltInfo a = new LeftRecursiveRuleAltInfo(alt, altText, label, altLabel, isListLabel, originalAltTree); suffixAlts.put(alt, a); // System.out.println("suffixAlt " + alt + ": " + altText + ", rewrite=" + rewriteText); }
public ListenerFile(OutputModelFactory factory, String fileName) { super(factory, fileName); Grammar g = factory.getGrammar(); parserName = g.getRecognizerName(); grammarName = g.name; for (Rule r : g.rules.values()) { Map<String, List<Pair<Integer,AltAST>>> labels = r.getAltLabels(); if ( labels!=null ) { for (Map.Entry<String, List<Pair<Integer, AltAST>>> pair : labels.entrySet()) { listenerNames.add(pair.getKey()); listenerLabelRuleNames.put(pair.getKey(), r.name); } } else { // only add rule context if no labels listenerNames.add(r.name); } } ActionAST ast = g.namedActions.get("header"); if ( ast!=null ) header = new Action(factory, ast); genPackage = factory.getGrammar().tool.genPackage; }
public VisitorFile(OutputModelFactory factory, String fileName) { super(factory, fileName); Grammar g = factory.getGrammar(); parserName = g.getRecognizerName(); grammarName = g.name; for (Rule r : g.rules.values()) { Map<String, List<Pair<Integer, AltAST>>> labels = r.getAltLabels(); if ( labels!=null ) { for (Map.Entry<String, List<Pair<Integer, AltAST>>> pair : labels.entrySet()) { visitorNames.add(pair.getKey()); visitorLabelRuleNames.put(pair.getKey(), r.name); } } else { // if labels, must label all. no need for generic rule visitor then visitorNames.add(r.name); } } ActionAST ast = g.namedActions.get("header"); if ( ast!=null ) header = new Action(factory, ast); genPackage = factory.getGrammar().tool.genPackage; }
@Override public void enterEveryRule(ParserRuleContext ctx) { String ruleName = extractRuleName(ctx); Pair<Integer, Integer> interval = getLineRange(ctx); Element newNode = (Element) domDoc.createElement(ruleName); if (interval != null) { newNode.setAttribute("start", Integer.toString(interval.a)); newNode.setAttribute("end", Integer.toString(interval.b)); } if (debugMode) { System.out.println("enter->" + ruleName); } if (ctx.getText() != null && ctx.getChildCount() == 0) { //newNode.setTextContent(ctx.getText()); newNode.appendChild(domDoc.createTextNode(ctx.getText())); } nodeStack.peek().appendChild(newNode); nodeStack.push(newNode); }
public static Pair<Parser, Lexer> parsePHP(String filePath) { AntlrCaseInsensitiveFileStream input; try { input = new AntlrCaseInsensitiveFileStream(filePath); } catch (IOException e) { e.printStackTrace(); return null; } PHPLexer lexer = new PHPLexer(input); CommonTokenStream tokens = new CommonTokenStream(lexer); PHPParser parser = new InterruptablePHPParser(tokens, filePath); /* turn on prediction mode to speed up parsing */ parser.getInterpreter().setPredictionMode(PredictionMode.SLL); Pair<Parser, Lexer> retval = new Pair<Parser, Lexer>(parser, lexer); return retval; }
public static Document processFile(String filePath) { Pair<Parser, Lexer> pl = parsePHP(filePath); PHPParser parser = (PHPParser) pl.a; parser.setBuildParseTree(true); /* * htmlDocument is the start rule (the top-level rule) * for the PHP grammar */ ParserRuleContext tree = parser.htmlDocument(); List<String> ruleNames = Arrays.asList(parser.getRuleNames()); Map<Integer, String> invTokenMap = getInvTokenMap(parser); TokenStream tokenStream = parser.getTokenStream(); ParseTreeDOMSerializer ptSerializer = new ParseTreeDOMSerializer(ruleNames, invTokenMap, tokenStream); ParseTreeWalker.DEFAULT.walk(ptSerializer, tree); Document result= ptSerializer.getDOMDocument(); return result; }
/** Create an ANTLR Token from the current token type of the builder * then advance the builder to next token (which ultimately calls an * ANTLR lexer). The {@link ANTLRLexerAdaptor} creates tokens via * an ANTLR lexer but converts to {@link TokenIElementType} and here * we have to convert back to an ANTLR token using what info we * can get from the builder. We lose info such as the original channel. * So, whitespace and comments (typically hidden channel) will look like * real tokens. Jetbrains uses {@link ParserDefinition#getWhitespaceTokens()} * and {@link ParserDefinition#getCommentTokens()} to strip these before * our ANTLR parser sees them. */ @Override public Token nextToken() { ProgressIndicatorProvider.checkCanceled(); TokenIElementType ideaTType = (TokenIElementType)builder.getTokenType(); int type = ideaTType!=null ? ideaTType.getANTLRTokenType() : Token.EOF; int channel = Token.DEFAULT_CHANNEL; Pair<TokenSource, CharStream> source = new Pair<TokenSource, CharStream>(this, null); String text = builder.getTokenText(); int start = builder.getCurrentOffset(); int length = text != null ? text.length() : 0; int stop = start + length - 1; // PsiBuilder doesn't provide line, column info int line = 0; int charPositionInLine = 0; Token t = tokenFactory.create(source, type, text, channel, start, stop, line, charPositionInLine); builder.advanceLexer(); // System.out.println("TOKEN: "+t); return t; }