/** Initialize the syntax so it's ready to scan the given area. * @param syntax lexical analyzer to prepare * @param startPos starting position of the scanning * @param endPos ending position of the scanning * @param forceLastBuffer force the syntax to think that the scanned area is the last * in the document. This is useful for forcing the syntax to process all the characters * in the given area. * @param forceNotLastBuffer force the syntax to think that the scanned area is NOT * the last buffer in the document. This is useful when the syntax will continue * scanning on another buffer. */ public void initSyntax(Syntax syntax, int startPos, int endPos, boolean forceLastBuffer, boolean forceNotLastBuffer) throws BadLocationException { doc.readLock(); try { Segment text = new Segment(); int docLen = doc.getLength(); doc.prepareSyntax(text, syntax, startPos, 0, forceLastBuffer, forceNotLastBuffer); int preScan = syntax.getPreScan(); char[] buffer = doc.getChars(startPos - preScan, endPos - startPos + preScan); boolean lastBuffer = forceNotLastBuffer ? false : (forceLastBuffer || (endPos == docLen)); syntax.relocate(buffer, preScan, endPos - startPos, lastBuffer, endPos); } finally { doc.readUnlock(); } }
public void getChars(int offset, int length, Segment chars) throws BadLocationException { checkBounds(offset, length, length()); if ((offset + length) <= gapStart) { // completely below gap chars.array = charArray; chars.offset = offset; } else if (offset >= gapStart) { // completely above gap chars.array = charArray; chars.offset = offset + gapLength; } else { // spans the gap, must copy chars.array = copySpanChars(offset, length); chars.offset = 0; } chars.count = length; }
@Override public void getText(int offset, int length, Segment txt) throws BadLocationException { if (lastOffset == offset && lastLength == length) { txt.array = segArray; txt.offset = segOffset; txt.count = segCount; txt.setPartialReturn(segPartialReturn); return; } super.getText(offset, length, txt); if (length > CACHE_BOUNDARY || lastLength <= CACHE_BOUNDARY) { segArray = txt.array; segOffset = txt.offset; segCount = txt.count; segPartialReturn = txt.isPartialReturn(); lastOffset = offset; lastLength = length; } }
/** * Creates a new DocumentWordTokenizer to work on a document * * @param document The document to spell check */ public DocumentWordTokenizer(Document document) { this.document = document; //Create a text segment over the entire document text = new Segment(); sentenceIterator = BreakIterator.getSentenceInstance(); try { document.getText(0, document.getLength(), text); sentenceIterator.setText(text); currentWordPos = getNextWordStart(text, 0); //If the current word pos is -1 then the string was all white space if (currentWordPos != -1) { currentWordEnd = getNextWordEnd(text, currentWordPos); nextWordPos = getNextWordStart(text, currentWordEnd); } else { moreTokens = false; } } catch (BadLocationException ex) { moreTokens = false; } }
/** * Checks if a subregion of a <code>Segment</code> is equal to a string. * * @param ignoreCase * True if case should be ignored, false otherwise * @param text * The segment * @param offset * The offset into the segment * @param match * The string to match */ public static boolean regionMatches(boolean ignoreCase, Segment text, int offset, String match) { int length = offset + match.length(); char[] textArray = text.array; if (length > text.offset + text.count) { return false; } for (int i = offset, j = 0; i < length; i++, j++) { char c1 = textArray[i]; char c2 = match.charAt(j); if (ignoreCase) { c1 = Character.toUpperCase(c1); c2 = Character.toUpperCase(c2); } if (c1 != c2) { return false; } } return true; }
/** * Checks if a subregion of a <code>Segment</code> is equal to a character array. * * @param ignoreCase * True if case should be ignored, false otherwise * @param text * The segment * @param offset * The offset into the segment * @param match * The character array to match */ public static boolean regionMatches(boolean ignoreCase, Segment text, int offset, char[] match) { int length = offset + match.length; char[] textArray = text.array; if (length > text.offset + text.count) { return false; } for (int i = offset, j = 0; i < length; i++, j++) { char c1 = textArray[i]; char c2 = match[j]; if (ignoreCase) { c1 = Character.toUpperCase(c1); c2 = Character.toUpperCase(c2); } if (c1 != c2) { return false; } } return true; }
@Override public byte markTokensImpl(byte token, Segment line, int lineIndex) { if (line.count == 0) { return Token.NULL; } switch (line.array[line.offset]) { case '+': case '>': addToken(line.count, Token.KEYWORD1); break; case '-': case '<': addToken(line.count, Token.KEYWORD2); break; case '@': case '*': addToken(line.count, Token.KEYWORD3); break; default: addToken(line.count, Token.NULL); break; } return Token.NULL; }
synchronized void returnPressed() { Document doc = getDocument(); int len = doc.getLength(); Segment segment = new Segment(); try { doc.getText(outputMark, len - outputMark, segment); } catch(javax.swing.text.BadLocationException ignored) { ignored.printStackTrace(); } if(segment.count > 0) { history.add(segment.toString()); } historyIndex = history.size(); inPipe.write(segment.array, segment.offset, segment.count); append("\n"); outputMark = doc.getLength(); inPipe.write("\n"); inPipe.flush(); console1.flush(); }
/** * Creates a new DocumentWordTokenizer to work on a document * @param document The document to spell check */ public DocumentWordTokenizer(Document document) { this.document = document; //Create a text segment over the entire document text = new Segment(); sentenceIterator = BreakIterator.getSentenceInstance(); try { document.getText(0, document.getLength(), text); sentenceIterator.setText(text); // robert: use text.getBeginIndex(), not 0, for segment's first offset currentWordPos = getNextWordStart(text, text.getBeginIndex()); //If the current word pos is -1 then the string was all white space if (currentWordPos != -1) { currentWordEnd = getNextWordEnd(text, currentWordPos); nextWordPos = getNextWordStart(text, currentWordEnd); } else { moreTokens = false; } } catch (BadLocationException ex) { moreTokens = false; } }
/** * Sets the current word position at the start of the word containing * the char at position pos. This way a call to nextWord() will return * this word. * * @param pos position in the word we want to set as current. */ public void posStartFullWordFrom(int pos){ currentWordPos=text.getBeginIndex(); if(pos>text.getEndIndex()) pos=text.getEndIndex(); for (char ch = text.setIndex(pos); ch != Segment.DONE; ch = text.previous()) { if (!Character.isLetterOrDigit(ch)) { if (ch == '-' || ch == '\'') { // handle ' and - inside words char ch2 = text.previous(); text.next(); if (ch2 != Segment.DONE && Character.isLetterOrDigit(ch2)) continue; } currentWordPos=text.getIndex()+1; break; } } //System.out.println("CurPos:"+currentWordPos); if(currentWordPos==0) first=true; moreTokens=true; currentWordEnd = getNextWordEnd(text, currentWordPos); nextWordPos = getNextWordStart(text, currentWordEnd + 1); }
private static void testNPE() { Graphics g = null; try { String test = "\ttest\ttest2"; BufferedImage buffImage = new BufferedImage( 100, 100, BufferedImage.TYPE_INT_RGB); g = buffImage.createGraphics(); Segment segment = new Segment(test.toCharArray(), 0, test.length()); Utilities.drawTabbedText(segment, 0, 0, g, null, 0); } finally { if (g != null) { g.dispose(); } } }
/** * Returns the first token in the linked list of tokens generated * from <code>text</code>. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * <code>text</code> starts. * @return The first <code>Token</code> in a linked list representing * the syntax highlighted text. */ @Override public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = YYINITIAL; start = text.offset; s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } }
/** * Returns the first token in the linked list of tokens generated * from <code>text</code>. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * <code>text</code> starts. * @return The first <code>Token</code> in a linked list representing * the syntax highlighted text. */ @Override public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = TokenTypes.NULL; s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } }
/** * Returns the first token in the linked list of tokens generated * from <code>text</code>. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * <code>text</code> starts. * @return The first <code>Token</code> in a linked list representing * the syntax highlighted text. */ @Override public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = Token.NULL; switch (initialTokenType) { default: state = Token.NULL; } s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } }
/** * Deserializes a document. * * @param in The stream to read from. * @throws ClassNotFoundException * @throws IOException */ private void readObject(ObjectInputStream in) throws ClassNotFoundException, IOException { in.defaultReadObject(); // Install default TokenMakerFactory. To support custom TokenMakers, // both JVM's should install default TokenMakerFactories that support // the language they want to use beforehand. setTokenMakerFactory(null); // Handle other transient stuff this.s = new Segment(); int lineCount = getDefaultRootElement().getElementCount(); lastTokensOnLines = new DynamicIntArray(lineCount); setSyntaxStyle(syntaxStyle); // Actually install (transient) TokenMaker }
public Segment nextConverted() throws IOException { readWholeBuffer = false; if (reader == null) { // no more chars to read return null; } int readOffset = 0; int readSize = readBuffer(reader, convertedText.array, readOffset, true); if (readSize == 0) { // no more chars in reader reader.close(); reader = null; return null; } readWholeBuffer = (readSize == convertedText.array.length); if (lastCharCR && readSize > 0 && convertedText.array[readOffset] == '\n') { /* the preceding '\r' was already converted to '\n' * in the previous buffer so here just skip initial '\n' */ readOffset++; readSize--; } convertedText.offset = readOffset; convertedText.count = readSize; lastCharCR = convertSegmentToLineFeed(convertedText); return convertedText; }
/** * Convert all the '\r\n' or '\r' to '\n' (linefeed). * This method * @param text the text to be converted. Text is converted * in the original array of the given segment. * The <CODE>count</CODE> field * of the text parameter will possibly be changed by the conversion * if '\r\n' sequences are present. * @return whether the last character in the text was the '\r' character. * That character was already converted to '\n' and is present * in the segment. However this notification is important * because if there would be '\n' at the begining * of the next buffer then that character should be skipped. */ private static boolean convertSegmentToLineFeed(Segment text) { char[] chars = text.array; int storeOffset = text.offset; // offset at which chars are stored int endOffset = storeOffset + text.count; boolean storeChar = false; // to prevent copying same chars to same offsets boolean lastCharCR = false; // whether last char was '\r' for (int offset = storeOffset; offset < endOffset; offset++) { char ch = chars[offset]; if (lastCharCR && ch == '\n') { // found CRLF sequence lastCharCR = false; storeChar = true; // storeOffset now differs from offset } else { // not CRLF sequence if (ch == '\r') { lastCharCR = true; chars[storeOffset++] = '\n'; // convert it to '\n' } else if (ch == LS || ch == PS) { // Unicode LS, PS lastCharCR = false; chars[storeOffset++] = '\n'; } else { // current char not '\r' lastCharCR = false; if (storeChar) { chars[storeOffset] = ch; } storeOffset++; } } } text.count = storeOffset - text.offset; return lastCharCR; }
/** * Makes a <code>Segment</code> point to the text in our * document between the given positions. Note that the positions MUST be * valid positions in the document. * * @param p0 The first position in the document. * @param p1 The second position in the document. * @param document The document from which you want to get the text. * @param seg The segment in which to load the text. */ private void setSegment(int p0, int p1, Document document, Segment seg) { try { //System.err.println("... in setSharedSegment, p0/p1==" + p0 + "/" + p1); document.getText(p0, p1-p0, seg); //System.err.println("... in setSharedSegment: s=='" + s + "'; line/numLines==" + line + "/" + numLines); } catch (BadLocationException ble) { // Never happens ble.printStackTrace(); } }
/** * This helper method will return the end of the next word in the buffer. * * @param text Description of the Parameter * @param startPos Description of the Parameter * @return The nextWordEnd value */ private static int getNextWordEnd(Segment text, int startPos) { for (char ch = text.setIndex(startPos); ch != Segment.DONE; ch = text.next()) { if (!Character.isLetterOrDigit(ch)) { // changed by Saruta if (ch == '-' || ch == '\'' || ch == '~') { // handle ' and - inside words continue; } return text.getIndex(); } } return text.getEndIndex(); }
/** Read from some reader and insert into document */ static void read(BaseDocument doc, Reader reader, int pos) throws BadLocationException, IOException { int readBufferSize = ((Integer)doc.getProperty(EditorPreferencesKeys.READ_BUFFER_SIZE)).intValue(); LineSeparatorConversion.ToLineFeed toLF = new LineSeparatorConversion.ToLineFeed(reader, readBufferSize); Segment text = toLF.nextConverted(); while (text != null) { doc.insertString(pos, new String(text.array, text.offset, text.count), null); pos += text.count; text = toLF.nextConverted(); } }
private int drawText(Graphics g, int x, int y, int startOffset, int endOffset, boolean error, boolean selected, DocElement docElem) throws BadLocationException { Segment s = EventQueue.isDispatchThread() ? SEGMENT : new Segment(); s.array = docElem.getChars(); s.offset = startOffset - docElem.offset; s.count = endOffset - startOffset; g.setColor(getColor(error, selected)); return Utilities.drawTabbedText(s, x, y, g, this, startOffset); }
private void searchBack(Segment line, int pos, boolean padNull) { int len = pos - lastKeyword; byte id = keywords.lookup(line, lastKeyword, len); if (id != Token.NULL) { if (lastKeyword != lastOffset) { addToken(lastKeyword - lastOffset, Token.NULL); } addToken(len, id); lastOffset = pos; } lastKeyword = pos + 1; if (padNull && lastOffset < pos) { addToken(pos - lastOffset, Token.NULL); } }
/** * Paints the specified line onto the graphics context. Note that this method munges the offset * and count values of the segment. * * @param line * The line segment * @param tokens * The token list for the line * @param styles * The syntax style list * @param expander * The tab expander used to determine tab stops. May be null * @param gfx * The graphics context * @param x * The x co-ordinate * @param y * The y co-ordinate * @return The x co-ordinate, plus the width of the painted string */ public static int paintSyntaxLine(Segment line, Token tokens, SyntaxStyle[] styles, TabExpander expander, Graphics gfx, int x, int y) { Font defaultFont = gfx.getFont(); Color defaultColor = gfx.getColor(); int offset = 0; for (;;) { byte id = tokens.id; if (id == Token.END) { break; } int length = tokens.length; if (id == Token.NULL) { if (!defaultColor.equals(gfx.getColor())) { gfx.setColor(defaultColor); } if (!defaultFont.equals(gfx.getFont())) { gfx.setFont(defaultFont); } } else { styles[id].setGraphicsFlags(gfx, defaultFont); } line.count = length; x = Utilities.drawTabbedText(line, x, y, gfx, expander, 0); line.offset += length; offset += length; tokens = tokens.next; } return x; }
/** * {@inheritDoc} */ @Override public int getLastTokenTypeOnLine(Segment text, int initialTokenType) { // Last parameter doesn't matter if we're not painting. Token t = getTokenList(text, initialTokenType, 0); while (t.getNextToken()!=null) { t = t.getNextToken(); } return t.getType(); }
private boolean doKeyword(Segment line, int i, char c) { int i1 = i + 1; int len = i - lastKeyword; byte id = keywords.lookup(line, lastKeyword, len); if (id != Token.NULL) { if (lastKeyword != lastOffset) { addToken(lastKeyword - lastOffset, Token.KEYWORD3); } addToken(len, id); lastOffset = i; } lastKeyword = i1; return false; }
private boolean doKeyword(Segment line, int i, char c) { int i1 = i + 1; int len = i - lastKeyword; byte id = keywords.lookup(line, lastKeyword, len); if (id != Token.NULL) { if (lastKeyword != lastOffset) { addToken(lastKeyword - lastOffset, Token.NULL); } addToken(len, id); lastOffset = i; } lastKeyword = i1; return false; }
/** * Returns the first token in the linked list of tokens generated * from <code>text</code>. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * <code>text</code> starts. * @return The first <code>Token</code> in a linked list representing * the syntax highlighted text. */ @Override public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = Token.NULL; switch (initialTokenType) { case Token.COMMENT_MULTILINE: state = MLC; this.start = text.offset; break; /* No documentation comments */ default: state = Token.NULL; } this.s = text; try { yyreset(this.zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { // ignore return new DefaultToken(); } }
/** * Checks the token to give it the exact ID it deserves before being passed * up to the super method. * * @param segment <code>Segment</code> to get text from. * @param start Start offset in <code>segment</code> of token. * @param end End offset in <code>segment</code> of token. * @param tokenType The token's type. * @param startOffset The offset in the document at which the token occurs. */ @Override public void addToken(Segment segment, int start, int end, int tokenType, int startOffset) { switch (tokenType) { // Since reserved words, functions, and data types are all passed // into here as "identifiers," we have to see what the token // really is... case Token.IDENTIFIER: int value = this.wordsToHighlight.get(segment, start, end); if (value != -1) { tokenType = value; } break; case Token.WHITESPACE: case Token.SEPARATOR: case Token.OPERATOR: case Token.ERROR_IDENTIFIER: case Token.ERROR_NUMBER_FORMAT: case Token.ERROR_STRING_DOUBLE: case Token.ERROR_CHAR: case Token.COMMENT_EOL: case Token.COMMENT_MULTILINE: case Token.LITERAL_BOOLEAN: case Token.LITERAL_NUMBER_DECIMAL_INT: case Token.LITERAL_NUMBER_FLOAT: case Token.LITERAL_NUMBER_HEXADECIMAL: case Token.LITERAL_STRING_DOUBLE_QUOTE: case Token.LITERAL_CHAR: break; default: throw new IllegalArgumentException("Unknown tokenType: '" + tokenType + "'"); } super.addToken(segment, start, end, tokenType, startOffset); }
/** This helper method will return the start character of the next * word in the buffer from the start position */ private static int getNextWordStart(Segment text, int startPos) { if (startPos <= text.getEndIndex()) for (char ch = text.setIndex(startPos); ch != Segment.DONE; ch = text.next()) { if (Character.isLetterOrDigit(ch)) { return text.getIndex(); } } return -1; }
/** This helper method will return the end of the next word in the buffer. * */ private static int getNextWordEnd(Segment text, int startPos) { for (char ch = text.setIndex(startPos); ch != Segment.DONE; ch = text.next()) { if (!Character.isLetterOrDigit(ch)) { if (ch == '-' || ch == '\'') { // handle ' and - inside words char ch2 = text.next(); text.previous(); if (ch2 != Segment.DONE && Character.isLetterOrDigit(ch2)) continue; } return text.getIndex(); } } return text.getEndIndex(); }
/** * This helper method will return the start character of the next * word in the buffer from the start position * * @param text Description of the Parameter * @param startPos Description of the Parameter * @return The nextWordStart value */ private static int getNextWordStart(Segment text, int startPos) { if (startPos <= text.getEndIndex()) for (char ch = text.setIndex(startPos); ch != Segment.DONE; ch = text.next()) { // changed by Saruta if (Character.isLetterOrDigit(ch) || ch == '-' || ch == '\'' || ch == '~') { return text.getIndex(); } } return -1; }
/** * Removes any spaces or tabs from the end of the segment. * * @param segment The segment from which to remove tailing whitespace. * @return <code>segment</code> with trailing whitespace removed. */ private static Segment removeEndingWhitespace(Segment segment) { int toTrim = 0; char currentChar = segment.setIndex(segment.getEndIndex()-1); while ((currentChar==' ' || currentChar=='\t') && currentChar!=Segment.DONE) { toTrim++; currentChar = segment.previous(); } String stringVal = segment.toString(); String newStringVal = stringVal.substring(0,stringVal.length()-toTrim); return new Segment(newStringVal.toCharArray(), 0, newStringVal.length()); }
/** * Returns the first token in the linked list of tokens generated * from <code>text</code>. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * <code>text</code> starts. * @return The first <code>Token</code> in a linked list representing * the syntax highlighted text. */ @Override public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = Token.NULL; switch (initialTokenType) { /*case Token.COMMENT_MULTILINE: state = MLC; start = text.offset; break; case Token.COMMENT_DOCUMENTATION: state = DOCCOMMENT; start = text.offset; break;*/ case Token.LITERAL_STRING_DOUBLE_QUOTE: state = STRING; start = text.offset; break; default: state = Token.NULL; } s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } }
/** * Returns the first token in the linked list of tokens generated * from <code>text</code>. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * <code>text</code> starts. * @return The first <code>Token</code> in a linked list representing * the syntax highlighted text. */ @Override public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = Token.NULL; switch (initialTokenType) { case Token.COMMENT_MULTILINE: state = MLC; start = text.offset; break; default: state = Token.NULL; } s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } }
/** * Returns the first token in the linked list of tokens generated * from <code>text</code>. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * <code>text</code> starts. * @return The first <code>Token</code> in a linked list representing * the syntax highlighted text. */ @Override public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = YYINITIAL; switch (initialTokenType) { case Token.LITERAL_STRING_DOUBLE_QUOTE: state = MULTILINE_STRING_DOUBLE; break; case Token.COMMENT_MULTILINE: state = MLC; break; default: state = YYINITIAL; } s = text; start = text.offset; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } }
/** * Returns the first token in the linked list of tokens generated * from <code>text</code>. This method must be implemented by * subclasses so they can correctly implement syntax highlighting. * * @param text The text from which to get tokens. * @param initialTokenType The token type we should start with. * @param startOffset The offset into the document at which * <code>text</code> starts. * @return The first <code>Token</code> in a linked list representing * the syntax highlighted text. */ @Override public Token getTokenList(Segment text, int initialTokenType, int startOffset) { resetTokenList(); this.offsetShift = -text.offset + startOffset; // Start off in the proper state. int state = YYINITIAL; switch (initialTokenType) { case Token.LITERAL_STRING_DOUBLE_QUOTE: state = STRING; break; case Token.LITERAL_CHAR: state = CHAR_LITERAL; break; case Token.LITERAL_BACKQUOTE: state = BACKTICKS; break; case Token.COMMENT_MULTILINE: state = MLC; break; } start = text.offset; s = text; try { yyreset(zzReader); yybegin(state); return yylex(); } catch (IOException ioe) { ioe.printStackTrace(); return new TokenImpl(); } }
/** * Checks the token to give it the exact ID it deserves before * being passed up to the super method. * * @param segment <code>Segment</code> to get text from. * @param start Start offset in <code>segment</code> of token. * @param end End offset in <code>segment</code> of token. * @param tokenType The token's type. * @param startOffset The offset in the document at which the token occurs. */ @Override public void addToken(Segment segment, int start, int end, int tokenType, int startOffset) { switch (tokenType) { // Since reserved words, functions, and data types are all passed into here // as "identifiers," we have to see what the token really is... case Token.IDENTIFIER: int value = wordsToHighlight.get(segment, start,end); if (value!=-1) tokenType = value; break; case Token.WHITESPACE: case Token.SEPARATOR: case Token.OPERATOR: case Token.LITERAL_NUMBER_DECIMAL_INT: case Token.LITERAL_STRING_DOUBLE_QUOTE: case Token.LITERAL_CHAR: case Token.LITERAL_BACKQUOTE: case Token.COMMENT_EOL: case Token.PREPROCESSOR: case Token.VARIABLE: break; default: tokenType = Token.IDENTIFIER; break; } super.addToken(segment, start, end, tokenType, startOffset); }