Java 类org.yaml.snakeyaml.tokens.StreamStartToken 实例源码

项目:snake-yaml    文件:ScannerImplTest.java   
public void testGetToken() {
    String data = "string: abcd";
    StreamReader reader = new StreamReader(data);
    Scanner scanner = new ScannerImpl(reader);
    Mark dummy = new Mark("dummy", 0, 0, 0, "", 0);
    LinkedList<Token> etalonTokens = new LinkedList<Token>();
    etalonTokens.add(new StreamStartToken(dummy, dummy));
    etalonTokens.add(new BlockMappingStartToken(dummy, dummy));
    etalonTokens.add(new KeyToken(dummy, dummy));
    etalonTokens.add(new ScalarToken("string", true, dummy, dummy, (char) 0));
    etalonTokens.add(new ValueToken(dummy, dummy));
    etalonTokens.add(new ScalarToken("abcd", true, dummy, dummy, (char) 0));
    etalonTokens.add(new BlockEndToken(dummy, dummy));
    etalonTokens.add(new StreamEndToken(dummy, dummy));
    while (!etalonTokens.isEmpty() && scanner.checkToken(etalonTokens.get(0).getTokenId())) {
        assertEquals(etalonTokens.removeFirst(), scanner.getToken());
    }
    assertFalse("Must contain no more tokens: " + scanner.getToken(),
            scanner.checkToken(new Token.ID[0]));
}
项目:snakeyaml    文件:ScannerImplTest.java   
public void testGetToken() {
    String data = "string: abcd";
    StreamReader reader = new StreamReader(data);
    Scanner scanner = new ScannerImpl(reader);
    Mark dummy = new Mark("dummy", 0, 0, 0, "", 0);
    LinkedList<Token> etalonTokens = new LinkedList<Token>();
    etalonTokens.add(new StreamStartToken(dummy, dummy));
    etalonTokens.add(new BlockMappingStartToken(dummy, dummy));
    etalonTokens.add(new KeyToken(dummy, dummy));
    etalonTokens.add(new ScalarToken("string", true, dummy, dummy, (char) 0));
    etalonTokens.add(new ValueToken(dummy, dummy));
    etalonTokens.add(new ScalarToken("abcd", true, dummy, dummy, (char) 0));
    etalonTokens.add(new BlockEndToken(dummy, dummy));
    etalonTokens.add(new StreamEndToken(dummy, dummy));
    while (!etalonTokens.isEmpty() && scanner.checkToken(etalonTokens.get(0).getTokenId())) {
        assertEquals(etalonTokens.removeFirst(), scanner.getToken());
    }
    assertFalse("Must contain no more tokens: " + scanner.getToken(),
            scanner.checkToken(new Token.ID[0]));
}
项目:AndroidApktool    文件:ParserImpl.java   
public Event produce() {
    // Parse the stream start.
    StreamStartToken token = (StreamStartToken) scanner.getToken();
    Event event = new StreamStartEvent(token.getStartMark(), token.getEndMark());
    // Prepare the next state.
    state = new ParseImplicitDocumentStart();
    return event;
}
项目:AndroidApktool    文件:ScannerImpl.java   
/**
 * We always add STREAM-START as the first token and STREAM-END as the last
 * token.
 */
private void fetchStreamStart() {
    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-START.
    Token token = new StreamStartToken(mark, mark);
    this.tokens.add(token);
}
项目:5zig-TIMV-Plugin    文件:ParserImpl.java   
public Event produce() {
    // Parse the stream start.
    StreamStartToken token = (StreamStartToken) scanner.getToken();
    Event event = new StreamStartEvent(token.getStartMark(), token.getEndMark());
    // Prepare the next state.
    state = new ParseImplicitDocumentStart();
    return event;
}
项目:5zig-TIMV-Plugin    文件:ScannerImpl.java   
/**
 * We always add STREAM-START as the first token and STREAM-END as the last
 * token.
 */
private void fetchStreamStart() {
    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-START.
    Token token = new StreamStartToken(mark, mark);
    this.tokens.add(token);
}
项目:snake-yaml    文件:ParserImpl.java   
public Event produce() {
    // Parse the stream start.
    StreamStartToken token = (StreamStartToken) scanner.getToken();
    Event event = new StreamStartEvent(token.getStartMark(), token.getEndMark());
    // Prepare the next state.
    state = new ParseImplicitDocumentStart();
    return event;
}
项目:snake-yaml    文件:ScannerImpl.java   
/**
 * We always add STREAM-START as the first token and STREAM-END as the last
 * token.
 */
private void fetchStreamStart() {
    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-START.
    Token token = new StreamStartToken(mark, mark);
    this.tokens.add(token);
}
项目:SubServers-2    文件:ParserImpl.java   
public Event produce() {
    // Parse the stream start.
    StreamStartToken token = (StreamStartToken) scanner.getToken();
    Event event = new StreamStartEvent(token.getStartMark(), token.getEndMark());
    // Prepare the next state.
    state = new ParseImplicitDocumentStart();
    return event;
}
项目:SubServers-2    文件:ScannerImpl.java   
/**
 * We always add STREAM-START as the first token and STREAM-END as the last
 * token.
 */
private void fetchStreamStart() {
    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-START.
    Token token = new StreamStartToken(mark, mark);
    this.tokens.add(token);
}
项目:snakeyaml    文件:ParserImpl.java   
public Event produce() {
    // Parse the stream start.
    StreamStartToken token = (StreamStartToken) scanner.getToken();
    Event event = new StreamStartEvent(token.getStartMark(), token.getEndMark());
    // Prepare the next state.
    state = new ParseImplicitDocumentStart();
    return event;
}
项目:snakeyaml    文件:ScannerImpl.java   
/**
 * We always add STREAM-START as the first token and STREAM-END as the last
 * token.
 */
private void fetchStreamStart() {
    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-START.
    Token token = new StreamStartToken(mark, mark);
    this.tokens.add(token);
}
项目:TestTheTeacher    文件:ParserImpl.java   
public Event produce() {
    // Parse the stream start.
    StreamStartToken token = (StreamStartToken) scanner.getToken();
    Event event = new StreamStartEvent(token.getStartMark(), token.getEndMark());
    // Prepare the next state.
    state = new ParseImplicitDocumentStart();
    return event;
}
项目:TestTheTeacher    文件:ScannerImpl.java   
/**
 * We always add STREAM-START as the first token and STREAM-END as the last
 * token.
 */
private void fetchStreamStart() {
    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-START.
    Token token = new StreamStartToken(mark, mark);
    this.tokens.add(token);
}
项目:org.openntf.domino    文件:ParserImpl.java   
@Override
public Event produce() {
    // Parse the stream start.
    StreamStartToken token = (StreamStartToken) scanner.getToken();
    Event event = new StreamStartEvent(token.getStartMark(), token.getEndMark());
    // Prepare the next state.
    state = new ParseImplicitDocumentStart();
    return event;
}
项目:org.openntf.domino    文件:ScannerImpl.java   
/**
 * We always add STREAM-START as the first token and STREAM-END as the last token.
 */
private void fetchStreamStart() {
    // Read the token.
    Mark mark = reader.getMark();

    // Add STREAM-START.
    Token token = new StreamStartToken(mark, mark);
    this.tokens.add(token);
}
项目:snake-yaml    文件:CanonicalScanner.java   
private void scan() {
    this.tokens.add(new StreamStartToken(mark, mark));
    boolean stop = false;
    while (!stop) {
        findToken();
        char ch = data.charAt(index);
        switch (ch) {
        case '\0':
            tokens.add(new StreamEndToken(mark, mark));
            stop = true;
            break;

        case '%':
            tokens.add(scanDirective());
            break;

        case '-':
            if ("---".equals(data.substring(index, index + 3))) {
                index += 3;
                tokens.add(new DocumentStartToken(mark, mark));
            }
            break;

        case '[':
            index++;
            tokens.add(new FlowSequenceStartToken(mark, mark));
            break;

        case '{':
            index++;
            tokens.add(new FlowMappingStartToken(mark, mark));
            break;

        case ']':
            index++;
            tokens.add(new FlowSequenceEndToken(mark, mark));
            break;

        case '}':
            index++;
            tokens.add(new FlowMappingEndToken(mark, mark));
            break;

        case '?':
            index++;
            tokens.add(new KeyToken(mark, mark));
            break;

        case ':':
            index++;
            tokens.add(new ValueToken(mark, mark));
            break;

        case ',':
            index++;
            tokens.add(new FlowEntryToken(mark, mark));
            break;

        case '*':
            tokens.add(scanAlias());
            break;

        case '&':
            tokens.add(scanAlias());
            break;

        case '!':
            tokens.add(scanTag());
            break;

        case '"':
            tokens.add(scanScalar());
            break;

        default:
            throw new CanonicalException("invalid token");
        }
    }
    scanned = true;
}
项目:snakeyaml    文件:CanonicalScanner.java   
private void scan() {
    this.tokens.add(new StreamStartToken(mark, mark));
    boolean stop = false;
    while (!stop) {
        findToken();
        char ch = data.charAt(index);
        switch (ch) {
        case '\0':
            tokens.add(new StreamEndToken(mark, mark));
            stop = true;
            break;

        case '%':
            tokens.add(scanDirective());
            break;

        case '-':
            if ("---".equals(data.substring(index, index + 3))) {
                index += 3;
                tokens.add(new DocumentStartToken(mark, mark));
            }
            break;

        case '[':
            index++;
            tokens.add(new FlowSequenceStartToken(mark, mark));
            break;

        case '{':
            index++;
            tokens.add(new FlowMappingStartToken(mark, mark));
            break;

        case ']':
            index++;
            tokens.add(new FlowSequenceEndToken(mark, mark));
            break;

        case '}':
            index++;
            tokens.add(new FlowMappingEndToken(mark, mark));
            break;

        case '?':
            index++;
            tokens.add(new KeyToken(mark, mark));
            break;

        case ':':
            index++;
            tokens.add(new ValueToken(mark, mark));
            break;

        case ',':
            index++;
            tokens.add(new FlowEntryToken(mark, mark));
            break;

        case '*':
            tokens.add(scanAlias());
            break;

        case '&':
            tokens.add(scanAlias());
            break;

        case '!':
            tokens.add(scanTag());
            break;

        case '"':
            tokens.add(scanScalar());
            break;

        default:
            throw new CanonicalException("invalid token");
        }
    }
    scanned = true;
}