Java 类org.yaml.snakeyaml.tokens.Token 实例源码

项目:AndroidApktool    文件:ParserImpl.java   
public Event produce() {
    // Parse an implicit document.
    if (!scanner.checkToken(Token.ID.Directive, Token.ID.DocumentStart, Token.ID.StreamEnd)) {
        directives = new VersionTagsTuple(null, DEFAULT_TAGS);
        Token token = scanner.peekToken();
        Mark startMark = token.getStartMark();
        Mark endMark = startMark;
        Event event = new DocumentStartEvent(startMark, endMark, false, null, null);
        // Prepare the next state.
        states.push(new ParseDocumentEnd());
        state = new ParseBlockNode();
        return event;
    } else {
        Production p = new ParseDocumentStart();
        return p.produce();
    }
}
项目:AndroidApktool    文件:ParserImpl.java   
public Event produce() {
    // Parse the document end.
    Token token = scanner.peekToken();
    Mark startMark = token.getStartMark();
    Mark endMark = startMark;
    boolean explicit = false;
    if (scanner.checkToken(Token.ID.DocumentEnd)) {
        token = scanner.getToken();
        endMark = token.getEndMark();
        explicit = true;
    }
    Event event = new DocumentEndEvent(startMark, endMark, explicit);
    // Prepare the next state.
    state = new ParseDocumentStart();
    return event;
}
项目:AndroidApktool    文件:ScannerImpl.java   
/**
 * Check whether the next token is one of the given types.
 */
public boolean checkToken(Token.ID... choices) {
    while (needMoreTokens()) {
        fetchMoreTokens();
    }
    if (!this.tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        // since profiler puts this method on top (it is used a lot), we
        // should not use 'foreach' here because of the performance reasons
        Token.ID first = this.tokens.get(0).getTokenId();
        for (int i = 0; i < choices.length; i++) {
            if (first == choices[i]) {
                return true;
            }
        }
    }
    return false;
}
项目:AndroidApktool    文件:ScannerImpl.java   
private void fetchStreamEnd() {
    // Set the current intendation to -1.
    unwindIndent(-1);

    // Reset simple keys.
    removePossibleSimpleKey();
    this.allowSimpleKey = false;
    this.possibleSimpleKeys.clear();

    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-END.
    Token token = new StreamEndToken(mark, mark);
    this.tokens.add(token);

    // The stream is finished.
    this.done = true;
}
项目:AndroidApktool    文件:ScannerImpl.java   
/**
 * Scan a flow-style scalar. Flow scalars are presented in one of two forms;
 * first, a flow scalar may be a double-quoted string; second, a flow scalar
 * may be a single-quoted string.
 * 
 * @see <a href="http://www.yaml.org/spec/1.1/#flow"></a> style/syntax
 * 
 *      <pre>
 * See the specification for details.
 * Note that we loose indentation rules for quoted scalars. Quoted
 * scalars don't need to adhere indentation because &quot; and ' clearly
 * mark the beginning and the end of them. Therefore we are less
 * restrictive then the specification requires. We only need to check
 * that document separators are not included in scalars.
 * </pre>
 */
private Token scanFlowScalar(char style) {
    boolean _double;
    // The style will be either single- or double-quoted; we determine this
    // by the first character in the entry (supplied)
    if (style == '"') {
        _double = true;
    } else {
        _double = false;
    }
    StringBuilder chunks = new StringBuilder();
    Mark startMark = reader.getMark();
    char quote = reader.peek();
    reader.forward();
    chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    while (reader.peek() != quote) {
        chunks.append(scanFlowScalarSpaces(startMark));
        chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    }
    reader.forward();
    Mark endMark = reader.getMark();
    return new ScalarToken(chunks.toString(), false, startMark, endMark, style);
}
项目:5zig-TIMV-Plugin    文件:ParserImpl.java   
public Event produce() {
    // Parse an implicit document.
    if (!scanner.checkToken(Token.ID.Directive, Token.ID.DocumentStart, Token.ID.StreamEnd)) {
        directives = new VersionTagsTuple(null, DEFAULT_TAGS);
        Token token = scanner.peekToken();
        Mark startMark = token.getStartMark();
        Mark endMark = startMark;
        Event event = new DocumentStartEvent(startMark, endMark, false, null, null);
        // Prepare the next state.
        states.push(new ParseDocumentEnd());
        state = new ParseBlockNode();
        return event;
    } else {
        Production p = new ParseDocumentStart();
        return p.produce();
    }
}
项目:5zig-TIMV-Plugin    文件:ParserImpl.java   
public Event produce() {
    // Parse the document end.
    Token token = scanner.peekToken();
    Mark startMark = token.getStartMark();
    Mark endMark = startMark;
    boolean explicit = false;
    if (scanner.checkToken(Token.ID.DocumentEnd)) {
        token = scanner.getToken();
        endMark = token.getEndMark();
        explicit = true;
    }
    Event event = new DocumentEndEvent(startMark, endMark, explicit);
    // Prepare the next state.
    state = new ParseDocumentStart();
    return event;
}
项目:5zig-TIMV-Plugin    文件:ScannerImpl.java   
/**
 * Check whether the next token is one of the given types.
 */
public boolean checkToken(Token.ID... choices) {
    while (needMoreTokens()) {
        fetchMoreTokens();
    }
    if (!this.tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        // since profiler puts this method on top (it is used a lot), we
        // should not use 'foreach' here because of the performance reasons
        Token.ID first = this.tokens.get(0).getTokenId();
        for (int i = 0; i < choices.length; i++) {
            if (first == choices[i]) {
                return true;
            }
        }
    }
    return false;
}
项目:5zig-TIMV-Plugin    文件:ScannerImpl.java   
private void fetchStreamEnd() {
    // Set the current intendation to -1.
    unwindIndent(-1);

    // Reset simple keys.
    removePossibleSimpleKey();
    this.allowSimpleKey = false;
    this.possibleSimpleKeys.clear();

    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-END.
    Token token = new StreamEndToken(mark, mark);
    this.tokens.add(token);

    // The stream is finished.
    this.done = true;
}
项目:5zig-TIMV-Plugin    文件:ScannerImpl.java   
/**
 * Scan a flow-style scalar. Flow scalars are presented in one of two forms;
 * first, a flow scalar may be a double-quoted string; second, a flow scalar
 * may be a single-quoted string.
 * 
 * @see <a href="http://www.yaml.org/spec/1.1/#flow"></a> style/syntax
 * 
 *      <pre>
 * See the specification for details.
 * Note that we loose indentation rules for quoted scalars. Quoted
 * scalars don't need to adhere indentation because &quot; and ' clearly
 * mark the beginning and the end of them. Therefore we are less
 * restrictive then the specification requires. We only need to check
 * that document separators are not included in scalars.
 * </pre>
 */
private Token scanFlowScalar(char style) {
    boolean _double;
    // The style will be either single- or double-quoted; we determine this
    // by the first character in the entry (supplied)
    if (style == '"') {
        _double = true;
    } else {
        _double = false;
    }
    StringBuilder chunks = new StringBuilder();
    Mark startMark = reader.getMark();
    int quote = reader.peek();
    reader.forward();
    chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    while (reader.peek() != quote) {
        chunks.append(scanFlowScalarSpaces(startMark));
        chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    }
    reader.forward();
    Mark endMark = reader.getMark();
    return new ScalarToken(chunks.toString(), false, startMark, endMark, style);
}
项目:snake-yaml    文件:ParserImpl.java   
public Event produce() {
    // Parse an implicit document.
    if (!scanner.checkToken(Token.ID.Directive, Token.ID.DocumentStart, Token.ID.StreamEnd)) {
        directives = new VersionTagsTuple(null, DEFAULT_TAGS);
        Token token = scanner.peekToken();
        Mark startMark = token.getStartMark();
        Mark endMark = startMark;
        Event event = new DocumentStartEvent(startMark, endMark, false, null, null);
        // Prepare the next state.
        states.push(new ParseDocumentEnd());
        state = new ParseBlockNode();
        return event;
    } else {
        Production p = new ParseDocumentStart();
        return p.produce();
    }
}
项目:snake-yaml    文件:ParserImpl.java   
public Event produce() {
    // Parse the document end.
    Token token = scanner.peekToken();
    Mark startMark = token.getStartMark();
    Mark endMark = startMark;
    boolean explicit = false;
    if (scanner.checkToken(Token.ID.DocumentEnd)) {
        token = scanner.getToken();
        endMark = token.getEndMark();
        explicit = true;
    }
    Event event = new DocumentEndEvent(startMark, endMark, explicit);
    // Prepare the next state.
    state = new ParseDocumentStart();
    return event;
}
项目:snake-yaml    文件:ScannerImpl.java   
/**
 * Check whether the next token is one of the given types.
 */
public boolean checkToken(Token.ID... choices) {
    while (needMoreTokens()) {
        fetchMoreTokens();
    }
    if (!this.tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        // since profiler puts this method on top (it is used a lot), we
        // should not use 'foreach' here because of the performance reasons
        Token.ID first = this.tokens.get(0).getTokenId();
        for (int i = 0; i < choices.length; i++) {
            if (first == choices[i]) {
                return true;
            }
        }
    }
    return false;
}
项目:snake-yaml    文件:ScannerImpl.java   
private void fetchStreamEnd() {
    // Set the current intendation to -1.
    unwindIndent(-1);

    // Reset simple keys.
    removePossibleSimpleKey();
    this.allowSimpleKey = false;
    this.possibleSimpleKeys.clear();

    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-END.
    Token token = new StreamEndToken(mark, mark);
    this.tokens.add(token);

    // The stream is finished.
    this.done = true;
}
项目:snake-yaml    文件:ScannerImpl.java   
/**
 * Scan a flow-style scalar. Flow scalars are presented in one of two forms;
 * first, a flow scalar may be a double-quoted string; second, a flow scalar
 * may be a single-quoted string.
 * 
 * @see <a href="http://www.yaml.org/spec/1.1/#flow"></a> style/syntax
 * 
 *      <pre>
 * See the specification for details.
 * Note that we loose indentation rules for quoted scalars. Quoted
 * scalars don't need to adhere indentation because &quot; and ' clearly
 * mark the beginning and the end of them. Therefore we are less
 * restrictive then the specification requires. We only need to check
 * that document separators are not included in scalars.
 * </pre>
 */
private Token scanFlowScalar(char style) {
    boolean _double;
    // The style will be either single- or double-quoted; we determine this
    // by the first character in the entry (supplied)
    if (style == '"') {
        _double = true;
    } else {
        _double = false;
    }
    StringBuilder chunks = new StringBuilder();
    Mark startMark = reader.getMark();
    char quote = reader.peek();
    reader.forward();
    chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    while (reader.peek() != quote) {
        chunks.append(scanFlowScalarSpaces(startMark));
        chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    }
    reader.forward();
    Mark endMark = reader.getMark();
    return new ScalarToken(chunks.toString(), false, startMark, endMark, style);
}
项目:snake-yaml    文件:ScannerImplTest.java   
public void testGetToken() {
    String data = "string: abcd";
    StreamReader reader = new StreamReader(data);
    Scanner scanner = new ScannerImpl(reader);
    Mark dummy = new Mark("dummy", 0, 0, 0, "", 0);
    LinkedList<Token> etalonTokens = new LinkedList<Token>();
    etalonTokens.add(new StreamStartToken(dummy, dummy));
    etalonTokens.add(new BlockMappingStartToken(dummy, dummy));
    etalonTokens.add(new KeyToken(dummy, dummy));
    etalonTokens.add(new ScalarToken("string", true, dummy, dummy, (char) 0));
    etalonTokens.add(new ValueToken(dummy, dummy));
    etalonTokens.add(new ScalarToken("abcd", true, dummy, dummy, (char) 0));
    etalonTokens.add(new BlockEndToken(dummy, dummy));
    etalonTokens.add(new StreamEndToken(dummy, dummy));
    while (!etalonTokens.isEmpty() && scanner.checkToken(etalonTokens.get(0).getTokenId())) {
        assertEquals(etalonTokens.removeFirst(), scanner.getToken());
    }
    assertFalse("Must contain no more tokens: " + scanner.getToken(),
            scanner.checkToken(new Token.ID[0]));
}
项目:snake-yaml    文件:CanonicalScanner.java   
public boolean checkToken(Token.ID... choices) {
    if (!scanned) {
        scan();
    }
    if (!tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        Token first = this.tokens.get(0);
        for (Token.ID choice : choices) {
            if (first.getTokenId() == choice) {
                return true;
            }
        }
    }
    return false;
}
项目:snake-yaml    文件:CanonicalScanner.java   
private Token scanTag() {
    index++;
    int start = index;
    while (" \n\0".indexOf(data.charAt(index)) == -1) {
        index++;
    }
    String value = data.substring(start, index);
    if (value.length() == 0) {
        value = "!";
    } else if (value.charAt(0) == '!') {
        value = Tag.PREFIX + value.substring(1);
    } else if (value.charAt(0) == '<' && value.charAt(value.length() - 1) == '>') {
        value = value.substring(1, value.length() - 1);
    } else {
        value = "!" + value;
    }
    return new TagToken(new TagTuple("", value), mark, mark);
}
项目:SubServers-2    文件:ParserImpl.java   
public Event produce() {
    // Parse an implicit document.
    if (!scanner.checkToken(Token.ID.Directive, Token.ID.DocumentStart, Token.ID.StreamEnd)) {
        directives = new VersionTagsTuple(null, DEFAULT_TAGS);
        Token token = scanner.peekToken();
        Mark startMark = token.getStartMark();
        Mark endMark = startMark;
        Event event = new DocumentStartEvent(startMark, endMark, false, null, null);
        // Prepare the next state.
        states.push(new ParseDocumentEnd());
        state = new ParseBlockNode();
        return event;
    } else {
        Production p = new ParseDocumentStart();
        return p.produce();
    }
}
项目:SubServers-2    文件:ParserImpl.java   
public Event produce() {
    // Parse the document end.
    Token token = scanner.peekToken();
    Mark startMark = token.getStartMark();
    Mark endMark = startMark;
    boolean explicit = false;
    if (scanner.checkToken(Token.ID.DocumentEnd)) {
        token = scanner.getToken();
        endMark = token.getEndMark();
        explicit = true;
    }
    Event event = new DocumentEndEvent(startMark, endMark, explicit);
    // Prepare the next state.
    state = new ParseDocumentStart();
    return event;
}
项目:SubServers-2    文件:ScannerImpl.java   
/**
 * Check whether the next token is one of the given types.
 */
public boolean checkToken(Token.ID... choices) {
    while (needMoreTokens()) {
        fetchMoreTokens();
    }
    if (!this.tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        // since profiler puts this method on top (it is used a lot), we
        // should not use 'foreach' here because of the performance reasons
        Token.ID first = this.tokens.get(0).getTokenId();
        for (int i = 0; i < choices.length; i++) {
            if (first == choices[i]) {
                return true;
            }
        }
    }
    return false;
}
项目:SubServers-2    文件:ScannerImpl.java   
private void fetchStreamEnd() {
    // Set the current intendation to -1.
    unwindIndent(-1);

    // Reset simple keys.
    removePossibleSimpleKey();
    this.allowSimpleKey = false;
    this.possibleSimpleKeys.clear();

    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-END.
    Token token = new StreamEndToken(mark, mark);
    this.tokens.add(token);

    // The stream is finished.
    this.done = true;
}
项目:SubServers-2    文件:ScannerImpl.java   
/**
 * Scan a flow-style scalar. Flow scalars are presented in one of two forms;
 * first, a flow scalar may be a double-quoted string; second, a flow scalar
 * may be a single-quoted string.
 * 
 * @see <a href="http://www.yaml.org/spec/1.1/#flow"></a> style/syntax
 * 
 *      <pre>
 * See the specification for details.
 * Note that we loose indentation rules for quoted scalars. Quoted
 * scalars don't need to adhere indentation because &quot; and ' clearly
 * mark the beginning and the end of them. Therefore we are less
 * restrictive then the specification requires. We only need to check
 * that document separators are not included in scalars.
 * </pre>
 */
private Token scanFlowScalar(char style) {
    boolean _double;
    // The style will be either single- or double-quoted; we determine this
    // by the first character in the entry (supplied)
    if (style == '"') {
        _double = true;
    } else {
        _double = false;
    }
    StringBuilder chunks = new StringBuilder();
    Mark startMark = reader.getMark();
    int quote = reader.peek();
    reader.forward();
    chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    while (reader.peek() != quote) {
        chunks.append(scanFlowScalarSpaces(startMark));
        chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    }
    reader.forward();
    Mark endMark = reader.getMark();
    return new ScalarToken(chunks.toString(), false, startMark, endMark, style);
}
项目:intellij-snakeyaml    文件:YamlLexer.java   
@Override
public void advance() {
    Token token;

    // Forward only on non-whitespace (whitespace is virtual)
    if (myToken.getTokenId() != Token.ID.Whitespace)
        token = myScanner.getToken();
    else
        token = myScanner.peekToken();


    if (token == null || token.getTokenId().equals(Token.ID.StreamEnd))
        myToken = null;
    else {
        Token nextToken = myScanner.peekToken();
        if (!nextToken.getTokenId().equals(Token.ID.StreamEnd) && token.getEndMark().getIndex() < nextToken.getStartMark().getIndex())
            myToken = guessTokenType(token, nextToken);
        else
            myToken = nextToken;
    }
}
项目:snakeyaml    文件:ParserImpl.java   
public Event produce() {
    // Parse an implicit document.
    if (!scanner.checkToken(Token.ID.Directive, Token.ID.DocumentStart, Token.ID.StreamEnd)) {
        directives = new VersionTagsTuple(null, DEFAULT_TAGS);
        Token token = scanner.peekToken();
        Mark startMark = token.getStartMark();
        Mark endMark = startMark;
        Event event = new DocumentStartEvent(startMark, endMark, false, null, null);
        // Prepare the next state.
        states.push(new ParseDocumentEnd());
        state = new ParseBlockNode();
        return event;
    } else {
        Production p = new ParseDocumentStart();
        return p.produce();
    }
}
项目:snakeyaml    文件:ParserImpl.java   
public Event produce() {
    // Parse the document end.
    Token token = scanner.peekToken();
    Mark startMark = token.getStartMark();
    Mark endMark = startMark;
    boolean explicit = false;
    if (scanner.checkToken(Token.ID.DocumentEnd)) {
        token = scanner.getToken();
        endMark = token.getEndMark();
        explicit = true;
    }
    Event event = new DocumentEndEvent(startMark, endMark, explicit);
    // Prepare the next state.
    state = new ParseDocumentStart();
    return event;
}
项目:snakeyaml    文件:ScannerImpl.java   
/**
 * Check whether the next token is one of the given types.
 */
public boolean checkToken(Token.ID... choices) {
    while (needMoreTokens()) {
        fetchMoreTokens();
    }
    if (!this.tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        // since profiler puts this method on top (it is used a lot), we
        // should not use 'foreach' here because of the performance reasons
        Token.ID first = this.tokens.get(0).getTokenId();
        for (int i = 0; i < choices.length; i++) {
            if (first == choices[i]) {
                return true;
            }
        }
    }
    return false;
}
项目:snakeyaml    文件:ScannerImpl.java   
private void fetchStreamEnd() {
    // Set the current intendation to -1.
    unwindIndent(-1);

    // Reset simple keys.
    removePossibleSimpleKey();
    this.allowSimpleKey = false;
    this.possibleSimpleKeys.clear();

    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-END.
    Token token = new StreamEndToken(mark, mark);
    this.tokens.add(token);

    // The stream is finished.
    this.done = true;
}
项目:snakeyaml    文件:ScannerImpl.java   
/**
 * Scan a flow-style scalar. Flow scalars are presented in one of two forms;
 * first, a flow scalar may be a double-quoted string; second, a flow scalar
 * may be a single-quoted string.
 * 
 * @see http://www.yaml.org/spec/1.1/#flow style/syntax
 * 
 *      <pre>
 * See the specification for details.
 * Note that we loose indentation rules for quoted scalars. Quoted
 * scalars don't need to adhere indentation because &quot; and ' clearly
 * mark the beginning and the end of them. Therefore we are less
 * restrictive then the specification requires. We only need to check
 * that document separators are not included in scalars.
 * </pre>
 */
private Token scanFlowScalar(char style) {
    boolean _double;
    // The style will be either single- or double-quoted; we determine this
    // by the first character in the entry (supplied)
    if (style == '"') {
        _double = true;
    } else {
        _double = false;
    }
    StringBuilder chunks = new StringBuilder();
    Mark startMark = reader.getMark();
    char quote = reader.peek();
    reader.forward();
    chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    while (reader.peek() != quote) {
        chunks.append(scanFlowScalarSpaces(startMark));
        chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    }
    reader.forward();
    Mark endMark = reader.getMark();
    return new ScalarToken(chunks.toString(), false, startMark, endMark, style);
}
项目:snakeyaml    文件:ScannerImplTest.java   
public void testGetToken() {
    String data = "string: abcd";
    StreamReader reader = new StreamReader(data);
    Scanner scanner = new ScannerImpl(reader);
    Mark dummy = new Mark("dummy", 0, 0, 0, "", 0);
    LinkedList<Token> etalonTokens = new LinkedList<Token>();
    etalonTokens.add(new StreamStartToken(dummy, dummy));
    etalonTokens.add(new BlockMappingStartToken(dummy, dummy));
    etalonTokens.add(new KeyToken(dummy, dummy));
    etalonTokens.add(new ScalarToken("string", true, dummy, dummy, (char) 0));
    etalonTokens.add(new ValueToken(dummy, dummy));
    etalonTokens.add(new ScalarToken("abcd", true, dummy, dummy, (char) 0));
    etalonTokens.add(new BlockEndToken(dummy, dummy));
    etalonTokens.add(new StreamEndToken(dummy, dummy));
    while (!etalonTokens.isEmpty() && scanner.checkToken(etalonTokens.get(0).getTokenId())) {
        assertEquals(etalonTokens.removeFirst(), scanner.getToken());
    }
    assertFalse("Must contain no more tokens: " + scanner.getToken(),
            scanner.checkToken(new Token.ID[0]));
}
项目:snakeyaml    文件:CanonicalScanner.java   
public boolean checkToken(Token.ID... choices) {
    if (!scanned) {
        scan();
    }
    if (!tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        Token first = this.tokens.get(0);
        for (Token.ID choice : choices) {
            if (first.getTokenId() == choice) {
                return true;
            }
        }
    }
    return false;
}
项目:snakeyaml    文件:CanonicalScanner.java   
private Token scanTag() {
    index++;
    int start = index;
    while (" \n\0".indexOf(data.charAt(index)) == -1) {
        index++;
    }
    String value = data.substring(start, index);
    if (value.length() == 0) {
        value = "!";
    } else if (value.charAt(0) == '!') {
        value = Tag.PREFIX + value.substring(1);
    } else if (value.charAt(0) == '<' && value.charAt(value.length() - 1) == '>') {
        value = value.substring(1, value.length() - 1);
    } else {
        value = "!" + value;
    }
    return new TagToken(new TagTuple("", value), mark, mark);
}
项目:TestTheTeacher    文件:ParserImpl.java   
public Event produce() {
    // Parse an implicit document.
    if (!scanner.checkToken(Token.ID.Directive, Token.ID.DocumentStart, Token.ID.StreamEnd)) {
        directives = new VersionTagsTuple(null, DEFAULT_TAGS);
        Token token = scanner.peekToken();
        Mark startMark = token.getStartMark();
        Mark endMark = startMark;
        Event event = new DocumentStartEvent(startMark, endMark, false, null, null);
        // Prepare the next state.
        states.push(new ParseDocumentEnd());
        state = new ParseBlockNode();
        return event;
    } else {
        Production p = new ParseDocumentStart();
        return p.produce();
    }
}
项目:TestTheTeacher    文件:ParserImpl.java   
public Event produce() {
    // Parse the document end.
    Token token = scanner.peekToken();
    Mark startMark = token.getStartMark();
    Mark endMark = startMark;
    boolean explicit = false;
    if (scanner.checkToken(Token.ID.DocumentEnd)) {
        token = scanner.getToken();
        endMark = token.getEndMark();
        explicit = true;
    }
    Event event = new DocumentEndEvent(startMark, endMark, explicit);
    // Prepare the next state.
    state = new ParseDocumentStart();
    return event;
}
项目:TestTheTeacher    文件:ScannerImpl.java   
/**
 * Check whether the next token is one of the given types.
 */
public boolean checkToken(Token.ID... choices) {
    while (needMoreTokens()) {
        fetchMoreTokens();
    }
    if (!this.tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        // since profiler puts this method on top (it is used a lot), we
        // should not use 'foreach' here because of the performance reasons
        Token.ID first = this.tokens.get(0).getTokenId();
        for (int i = 0; i < choices.length; i++) {
            if (first == choices[i]) {
                return true;
            }
        }
    }
    return false;
}
项目:TestTheTeacher    文件:ScannerImpl.java   
private void fetchStreamEnd() {
    // Set the current intendation to -1.
    unwindIndent(-1);

    // Reset simple keys.
    removePossibleSimpleKey();
    this.allowSimpleKey = false;
    this.possibleSimpleKeys.clear();

    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-END.
    Token token = new StreamEndToken(mark, mark);
    this.tokens.add(token);

    // The stream is finished.
    this.done = true;
}
项目:TestTheTeacher    文件:ScannerImpl.java   
/**
 * Scan a flow-style scalar. Flow scalars are presented in one of two forms;
 * first, a flow scalar may be a double-quoted string; second, a flow scalar
 * may be a single-quoted string.
 * 
 * @see http://www.yaml.org/spec/1.1/#flow style/syntax
 * 
 *      <pre>
 * See the specification for details.
 * Note that we loose indentation rules for quoted scalars. Quoted
 * scalars don't need to adhere indentation because &quot; and ' clearly
 * mark the beginning and the end of them. Therefore we are less
 * restrictive then the specification requires. We only need to check
 * that document separators are not included in scalars.
 * </pre>
 */
private Token scanFlowScalar(char style) {
    boolean _double;
    // The style will be either single- or double-quoted; we determine this
    // by the first character in the entry (supplied)
    if (style == '"') {
        _double = true;
    } else {
        _double = false;
    }
    StringBuilder chunks = new StringBuilder();
    Mark startMark = reader.getMark();
    char quote = reader.peek();
    reader.forward();
    chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    while (reader.peek() != quote) {
        chunks.append(scanFlowScalarSpaces(startMark));
        chunks.append(scanFlowScalarNonSpaces(_double, startMark));
    }
    reader.forward();
    Mark endMark = reader.getMark();
    return new ScalarToken(chunks.toString(), false, startMark, endMark, style);
}
项目:org.openntf.domino    文件:ParserImpl.java   
@Override
public Event produce() {
    // Parse an implicit document.
    if (!scanner.checkToken(Token.ID.Directive, Token.ID.DocumentStart, Token.ID.StreamEnd)) {
        directives = new VersionTagsTuple(null, DEFAULT_TAGS);
        Token token = scanner.peekToken();
        Mark startMark = token.getStartMark();
        Mark endMark = startMark;
        Event event = new DocumentStartEvent(startMark, endMark, false, null, null);
        // Prepare the next state.
        states.push(new ParseDocumentEnd());
        state = new ParseBlockNode();
        return event;
    } else {
        Production p = new ParseDocumentStart();
        return p.produce();
    }
}
项目:org.openntf.domino    文件:ParserImpl.java   
@Override
public Event produce() {
    // Parse the document end.
    Token token = scanner.peekToken();
    Mark startMark = token.getStartMark();
    Mark endMark = startMark;
    boolean explicit = false;
    if (scanner.checkToken(Token.ID.DocumentEnd)) {
        token = scanner.getToken();
        endMark = token.getEndMark();
        explicit = true;
    }
    Event event = new DocumentEndEvent(startMark, endMark, explicit);
    // Prepare the next state.
    state = new ParseDocumentStart();
    return event;
}
项目:org.openntf.domino    文件:ScannerImpl.java   
/**
 * Check whether the next token is one of the given types.
 */
@Override
public boolean checkToken(final Token.ID... choices) {
    while (needMoreTokens()) {
        fetchMoreTokens();
    }
    if (!this.tokens.isEmpty()) {
        if (choices.length == 0) {
            return true;
        }
        // since profiler puts this method on top (it is used a lot), we
        // should not use 'foreach' here because of the performance reasons
        Token.ID first = this.tokens.get(0).getTokenId();
        for (int i = 0; i < choices.length; i++) {
            if (first == choices[i]) {
                return true;
            }
        }
    }
    return false;
}