private Writer getWriter(OptionManager options, SchemaConfig.SchemaInfoProvider infoProvider) throws IOException{ final String storeTablePath = options.getOption(QUERY_RESULTS_STORE_TABLE.getOptionName()).string_val; final List<String> storeTable = new StrTokenizer(storeTablePath, '.', ParserConfig.QUOTING.string.charAt(0)) .setIgnoreEmptyTokens(true).getTokenList(); // store query results as the system user final SchemaPlus systemUserSchema = context.getRootSchema( SchemaConfig .newBuilder(SystemUser.SYSTEM_USERNAME) .setProvider(infoProvider) .build()); final AbstractSchema schema = SchemaUtilities.resolveToMutableSchemaInstance(systemUserSchema, Util.skipLast(storeTable), true, MutationType.TABLE); // Query results are stored in arrow format. If need arises, we can change // this to a configuration option. final Map<String, Object> storageOptions = ImmutableMap.<String, Object> of("type", ArrowFormatPlugin.ARROW_DEFAULT_NAME); final CreateTableEntry createTableEntry = schema.createNewTable(Util.last(storeTable), WriterOptions.DEFAULT, storageOptions); return createTableEntry.getWriter(null); }
/** Split string x in tokens. Effectively just a friendly wrapper around StrTokenizer. * Use *single* quotes for avoiding splitting. */ public static ArrayList<String> tokenize(String x, String delimiterString){ if(x == null){ return null; } // This is a hack to allow empty tokens to be passed at the command line. // An empty x= x.replace("''", "' '"); // See also http://stackoverflow.com/questions/38161437/inconsistent-behaviour-of-strtokenizer-to-split-string StrTokenizer str= new StrTokenizer(x); str.setTrimmerMatcher(StrMatcher.spaceMatcher()); str.setDelimiterString(delimiterString); str.setQuoteChar('\''); // str.setIgnoreEmptyTokens(false); ArrayList<String> tokens= (ArrayList<String>) str.getTokenList(); for(int i= 0; i < tokens.size(); i++){ String tok= tokens.get(i).trim(); tokens.set(i, tok); } return tokens; }
public static final FieldPath fromString(String string) { if (StringUtils.isBlank(string)) { return null; } else { List<FieldPathComponent> components = Lists.newLinkedList(); StrTokenizer tokenizer = new StrTokenizer(string, DELIMITER_MATCHER); for (String token : tokenizer.getTokenList()) { if (ITEM_TOKEN.equals(token)) { components.add(FieldPathComponent.ITEM); } else { components.add(new FieldPathPropertyComponent(token)); } } return new FieldPath(components); } }
private long test(StrTokenizer tokenizer, File source) throws IOException { FileInputStream fis = new FileInputStream(source); InputStreamReader reader = new InputStreamReader(fis, "utf8"); BufferedReader br = new BufferedReader(reader); // keep track of time while iterating long start = System.currentTimeMillis(); String row = br.readLine(); while (row != null) { tokenizer.reset(row); String[] columns = tokenizer.getTokenArray(); row = br.readLine(); } long dur = System.currentTimeMillis() - start; br.close(); return dur; }
@Test public void testCsvUnquoted() throws IOException { StrTokenizer tokenizer = new StrTokenizer(); tokenizer.setDelimiterString(","); tokenizer.setEmptyTokenAsNull(true); tokenizer.setIgnoreEmptyTokens(false); tokenizer.reset("121,432423, 9099053,Frieda karla L.,DC.,Ahrens"); String[] columns = tokenizer.getTokenArray(); assertEquals("121", columns[0]); assertEquals("432423", columns[1]); assertEquals(" 9099053", columns[2]); assertEquals("Frieda karla L.", columns[3]); assertEquals("DC.", columns[4]); assertEquals("Ahrens", columns[5]); tokenizer.reset(",,,,zzz "); columns = tokenizer.getTokenArray(); assertNull(columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); }
private Map<String, String> parseEnvironmentVariables(final BuildListener listener) throws AbortException { Map<String, String> mapOfEnvironmentVariables = new HashMap<String, String>(); for (String environmentVariable : new StrTokenizer(environmentVariables, spaceMatcher(), quoteMatcher()).getTokenList()) { if (environmentVariable.contains("=")) { String[] parts = environmentVariable.split("=", 2); mapOfEnvironmentVariables.put(parts[0], parts[1]); } else { abort(listener, "Invalid environment variable: " + environmentVariable); } } return mapOfEnvironmentVariables; }
@Override protected int executeCommand(String line) { String[] tokens = new StrTokenizer(line).getTokenArray(); String action = tokens[0]; String[] actionArgs = Arrays.copyOfRange(tokens, 1, tokens.length); if (logger.isDebugEnabled()) { logger.debug("Executing command action: {}, Tokens: {}", action, tokens.length); } Command<StratosCommandContext> command = commands.get(action); if (command == null) { System.out.println(action + ": command not found."); return CliConstants.COMMAND_FAILED; } try { return command.execute(context, actionArgs, new Option[0]); } catch (CommandException e) { if (logger.isErrorEnabled()) { logger.error("Error executing command: " + action, e); } return CliConstants.ERROR_CODE; } }
public static void apacheCommonsTokenizer(String text){ StrTokenizer tokenizer = new StrTokenizer(text,","); while (tokenizer.hasNext()) { out.println(tokenizer.next()); } }
/** * When enabled, add a writer rel on top of the given rel to catch the output and write to configured store table. * @param inputRel * @return */ public static Rel storeQueryResultsIfNeeded(final SqlParser.Config config, final QueryContext context, final Rel inputRel) { final OptionManager options = context.getOptions(); final boolean storeResults = options.getOption(STORE_QUERY_RESULTS.getOptionName()) != null ? options.getOption(STORE_QUERY_RESULTS.getOptionName()).bool_val : false; if (!storeResults) { return inputRel; } // store query results as the system user final SchemaPlus systemUserSchema = context.getRootSchema( SchemaConfig.newBuilder(SystemUser.SYSTEM_USERNAME) .setProvider(context.getSchemaInfoProvider()) .build()); final String storeTablePath = options.getOption(QUERY_RESULTS_STORE_TABLE.getOptionName()).string_val; final List<String> storeTable = new StrTokenizer(storeTablePath, '.', config.quoting().string.charAt(0)) .setIgnoreEmptyTokens(true) .getTokenList(); final AbstractSchema schema = SchemaUtilities.resolveToMutableSchemaInstance(systemUserSchema, Util.skipLast(storeTable), true, MutationType.TABLE); // Query results are stored in arrow format. If need arises, we can change this to a configuration option. final Map<String, Object> storageOptions = ImmutableMap.<String, Object>of("type", ArrowFormatPlugin.ARROW_DEFAULT_NAME); final CreateTableEntry createTableEntry = schema.createNewTable(Util.last(storeTable), WriterOptions.DEFAULT, storageOptions); final RelTraitSet traits = inputRel.getCluster().traitSet().plus(Rel.LOGICAL); return new WriterRel(inputRel.getCluster(), traits, inputRel, createTableEntry, inputRel.getRowType()); }
/** * Convert fs path to list of strings. * /a/b/c -> [a,b,c] * @param fsPath a string * @return list of path components */ public static List<String> toPathComponents(String fsPath) { if (fsPath == null ) { return EMPTY_SCHEMA_PATHS; } final StrTokenizer tokenizer = new StrTokenizer(fsPath, SLASH_CHAR, SqlUtils.QUOTE).setIgnoreEmptyTokens(true); return tokenizer.getTokenList(); }
static String[] _tokenizeString(String string) { final StrTokenizer _tokenizer = new StrTokenizer(). setDelimiterMatcher(StrMatcher.trimMatcher()). setQuoteMatcher(StrMatcher.quoteMatcher()). setTrimmerMatcher(StrMatcher.trimMatcher()). setIgnoredMatcher(StrMatcher.quoteMatcher()); _tokenizer.reset(string.toLowerCase()); return _tokenizer.getTokenArray(); }
@CliCommand(value = "finder list", help = "List all finders for a given target (must be an entity)") public SortedSet<String> listFinders( @CliOption(key = "class", mandatory = false, unspecifiedDefaultValue = "*", optionContext = UPDATE_PROJECT, help = "The controller or entity for which the finders are generated") final JavaType typeName, @CliOption(key = { "", "depth" }, mandatory = false, unspecifiedDefaultValue = "1", specifiedDefaultValue = "1", help = "The depth of attribute combinations to be generated for the finders") final Integer depth, @CliOption(key = "filter", mandatory = false, help = "A comma separated list of strings that must be present in a filter to be included") final String filter) { Validate.isTrue(depth >= 1, "Depth must be at least 1"); Validate.isTrue(depth <= 3, "Depth must not be greater than 3"); final SortedSet<String> finders = finderOperations.listFindersFor( typeName, depth); if (StringUtils.isBlank(filter)) { return finders; } final Set<String> requiredEntries = new HashSet<String>(); final String[] filterTokens = new StrTokenizer(filter, ",") .getTokenArray(); for (final String requiredString : filterTokens) { requiredEntries.add(requiredString.toLowerCase()); } if (requiredEntries.isEmpty()) { return finders; } final SortedSet<String> result = new TreeSet<String>(); for (final String finder : finders) { required: for (final String requiredEntry : requiredEntries) { if (finder.toLowerCase().contains(requiredEntry)) { result.add(finder); break required; } } } return result; }
/** * Given a pattern and a full path, determine the pattern-mapped part. * <p> * For example: * <ul> * <li>'<code>/docs/cvs/commit.html</code>' and ' * <code>/docs/cvs/commit.html</code> -> ''</li> * <li>'<code>/docs/*</code>' and '<code>/docs/cvs/commit</code> -> ' * <code>cvs/commit</code>'</li> * <li>'<code>/docs/cvs/*.html</code>' and ' * <code>/docs/cvs/commit.html</code> -> '<code>commit.html</code>'</li> * <li>'<code>/docs/**</code>' and '<code>/docs/cvs/commit</code> -> ' * <code>cvs/commit</code>'</li> * <li>'<code>/docs/**\/*.html</code>' and ' * <code>/docs/cvs/commit.html</code> -> '<code>cvs/commit.html</code>'</li> * <li>'<code>/*.html</code>' and '<code>/docs/cvs/commit.html</code> -> ' * <code>docs/cvs/commit.html</code>'</li> * <li>'<code>*.html</code>' and '<code>/docs/cvs/commit.html</code> -> ' * <code>/docs/cvs/commit.html</code>'</li> * <li>'<code>*</code>' and '<code>/docs/cvs/commit.html</code> -> ' * <code>/docs/cvs/commit.html</code>'</li> * </ul> * <p> * Assumes that {@link #match} returns <code>true</code> for ' * <code>pattern</code>' and '<code>path</code>', but does * <strong>not</strong> enforce this. */ public String extractPathWithinPattern(final String pattern, final String path) { final String[] patternParts = new StrTokenizer(pattern, pathSeparator) .setIgnoreEmptyTokens(true).getTokenArray(); final String[] pathParts = new StrTokenizer(path, pathSeparator) .setIgnoreEmptyTokens(true).getTokenArray(); final StringBuilder builder = new StringBuilder(); // Add any path parts that have a wildcarded pattern part. int puts = 0; for (int i = 0; i < patternParts.length; i++) { final String patternPart = patternParts[i]; if ((patternPart.indexOf('*') > -1 || patternPart.indexOf('?') > -1) && pathParts.length >= i + 1) { if (puts > 0 || i == 0 && !pattern.startsWith(pathSeparator)) { builder.append(pathSeparator); } builder.append(pathParts[i]); puts++; } } // Append any trailing path parts. for (int i = patternParts.length; i < pathParts.length; i++) { if (puts > 0 || i > 0) { builder.append(pathSeparator); } builder.append(pathParts[i]); } return builder.toString(); }
private List<String> splitTablePatterns() { final String value = rawTables(); if (value == null) { return Collections.emptyList(); } else { return new StrTokenizer(value, ',', '"').getTokenList(); } }
public void parseKeyValue(String line) { int keySeparatorIndex = line.indexOf(keySeparator); String key; String valueString; if (keySeparatorIndex < 0) { if (keySeparatorOptional) { key = line.trim(); valueString = ""; } else { return; } } else { key = line.substring(0, keySeparatorIndex).trim(); valueString = line.substring( keySeparatorIndex + keySeparator.length() ).trim(); } String[] values; if (separator == null) { values = new String[]{valueString}; } else { StrTokenizer tokenizer = createStrTokenizer(valueString); values = tokenizer.getTokenArray(); } String[] result = new String[values.length + 1]; result[0] = key; System.arraycopy(values, 0, result, 1, values.length); storeLine(result); }
@Test public void testCsvQuoted() throws IOException { StrTokenizer tokenizer = new StrTokenizer(); tokenizer.setDelimiterString(","); tokenizer.setQuoteChar('"'); tokenizer.setEmptyTokenAsNull(true); tokenizer.setIgnoreEmptyTokens(false); tokenizer.reset("121,432423, 9099053,\"Frieda karla L.,DC.\",Ahrens"); String[] columns = tokenizer.getTokenArray(); assertEquals("121", columns[0]); assertEquals("432423", columns[1]); assertEquals(" 9099053", columns[2]); assertEquals("Frieda karla L.,DC.", columns[3]); assertEquals("Ahrens", columns[4]); tokenizer.reset(" ,4321"); columns = tokenizer.getTokenArray(); assertEquals(" ", columns[0]); assertEquals("4321", columns[1]); tokenizer.reset(" ,,,,zzz "); columns = tokenizer.getTokenArray(); assertEquals(" ", columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); tokenizer.reset(",,,,zzz "); columns = tokenizer.getTokenArray(); assertNull(columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); }
@Test public void testPipes() throws IOException { StrTokenizer tokenizer = new StrTokenizer(); tokenizer.setDelimiterChar('|'); tokenizer.setQuoteChar('"'); tokenizer.setEmptyTokenAsNull(true); tokenizer.setIgnoreEmptyTokens(false); tokenizer.reset("121|432423| 9099053|\"Frieda karla L.|DC.\"|Ahrens"); String[] columns = tokenizer.getTokenArray(); assertEquals("121", columns[0]); assertEquals("432423", columns[1]); assertEquals(" 9099053", columns[2]); assertEquals("Frieda karla L.|DC.", columns[3]); assertEquals("Ahrens", columns[4]); tokenizer.reset(" |4321"); columns = tokenizer.getTokenArray(); assertEquals(" ", columns[0]); assertEquals("4321", columns[1]); tokenizer.reset(" ||||zzz "); columns = tokenizer.getTokenArray(); assertEquals(" ", columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); tokenizer.reset("||||zzz "); columns = tokenizer.getTokenArray(); assertNull(columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); }
@Test public void testTabQuoted() throws IOException { StrTokenizer tokenizer = new StrTokenizer(); tokenizer.setDelimiterString("\t"); tokenizer.setQuoteChar('"'); tokenizer.setEmptyTokenAsNull(true); tokenizer.setIgnoreEmptyTokens(false); tokenizer.reset("121\t432423\t 9099053\t\"Frieda karla L.,DC.\"\tAhrens"); String[] columns = tokenizer.getTokenArray(); assertEquals("121", columns[0]); assertEquals("432423", columns[1]); assertEquals(" 9099053", columns[2]); assertEquals("Frieda karla L.,DC.", columns[3]); assertEquals("Ahrens", columns[4]); tokenizer.reset(" \t4321"); columns = tokenizer.getTokenArray(); assertEquals(" ", columns[0]); assertEquals("4321", columns[1]); tokenizer.reset(" \t\t\t\tzzz "); columns = tokenizer.getTokenArray(); assertEquals(" ", columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); tokenizer.reset("\t\t\t\tzzz "); columns = tokenizer.getTokenArray(); assertNull(columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); }
@Test public void testTabUnquoted() throws IOException { StrTokenizer tokenizer = new StrTokenizer(); tokenizer.setDelimiterString("\t"); tokenizer.setEmptyTokenAsNull(true); tokenizer.setIgnoreEmptyTokens(false); tokenizer.reset("121\t432423\t 9099053\t\"Frieda karla L.,DC.\"\tAhrens"); String[] columns = tokenizer.getTokenArray(); assertEquals("121", columns[0]); assertEquals("432423", columns[1]); assertEquals(" 9099053", columns[2]); assertEquals("\"Frieda karla L.,DC.\"", columns[3]); assertEquals("Ahrens", columns[4]); tokenizer.reset(" \t4321"); columns = tokenizer.getTokenArray(); assertEquals(" ", columns[0]); assertEquals("4321", columns[1]); tokenizer.reset(" \t\t\t\tzzz "); columns = tokenizer.getTokenArray(); assertEquals(" ", columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); tokenizer.reset("\t\t\t\tzzz "); columns = tokenizer.getTokenArray(); assertNull(columns[0]); assertNull(columns[1]); assertNull(columns[2]); assertNull(columns[3]); assertEquals("zzz ", columns[4]); }
private List<String> processCommand(String commandLine) { List<String> command = new ArrayList<>(arguments); if(!isEmpty(commandLine)) { command.addAll(new StrTokenizer(commandLine, ',', '"').getTokenList()); } return command; }
/** * Loads the CSV file from the file system. */ public void load() throws IOException { lines = Lists.newLinkedList(); headers = null; StrTokenizer st = StrTokenizer.getCSVInstance(); st.setDelimiterChar(';'); // Default encoding is used (--> UTF-8). BufferedReader br = null; try { br = new BufferedReader(new FileReader(fileName)); for (String line = null; (line = br.readLine()) != null;) { String trimmedLine = StringUtils.trimToNull(line); if (trimmedLine == null || trimmedLine.startsWith("#")) { continue; } st.reset(line); ArrayList<String> tokens = Lists.newArrayList(st.getTokenArray()); if (headers == null) { headers = tokens; } else { lines.add(tokens); } } } finally { IOUtils.closeQuietly(br); } }
private List<Column> initColumns(final StrTokenizer st, final String headerLine) { st.reset(headerLine); String[] headers = st.getTokenArray(); List<Column> columns = newArrayListWithCapacity(headers.length); for (String header : headers) { columns.add(new Column(header)); } return columns; }
private List<? extends List<String>> loadData(final File file) throws IOException { try (BufferedReader br = newReader(file, Charsets.UTF_8)) { List<List<String>> rows = newArrayList(); StrTokenizer tokenizer = StrTokenizer.getCSVInstance(); tokenizer.setDelimiterChar(DELIMITER); for (String line; (line = br.readLine()) != null; ) { tokenizer.reset(line); List<String> tokenList = tokenizer.getTokenList(); rows.add(tokenList); } return rows; } }
private void extractFilesForMarkers() { if (!markers.isEmpty()) { listPerfAlyzerFiles(normalizedDir).stream().filter(perfAlyzerFile -> { // GC logs cannot split up here and need to explicitly handle // markers later. // Load profiles contains the markers themselves and thus need // to be filtered out as well. String fileName = perfAlyzerFile.getFile().getName(); return !fileName.contains("gclog") & !fileName.contains("[loadprofile]"); }).forEach(perfAlyzerFile -> markers.forEach(marker -> { PerfAlyzerFile markerFile = perfAlyzerFile.copy(); markerFile.setMarker(marker.getName()); Path destPath = normalizedDir.toPath().resolve(markerFile.getFile().toPath()); try (WritableByteChannel destChannel = newByteChannel(destPath, CREATE, WRITE)) { Path srcPath = normalizedDir.toPath().resolve(perfAlyzerFile.getFile().toPath()); StrTokenizer tokenizer = StrTokenizer.getCSVInstance(); tokenizer.setDelimiterChar(';'); try (Stream<String> lines = Files.lines(srcPath, UTF_8);) { lines.filter(line -> { try { tokenizer.reset(line); String timestampString = tokenizer.nextToken(); long timestamp = Long.parseLong(timestampString); return marker.getLeftMillis() <= timestamp && marker.getRightMillis() > timestamp; } catch (NumberFormatException ex) { LOG.error("Invalid data line: {}", line); return false; } }).forEach(line -> writeLineToChannel(destChannel, line, UTF_8)); } } catch (IOException e) { throw new UncheckedIOException(e); } })); } }
public static Map<String, String> readAggregatedMap(final File executionsFile, final Charset charset) throws IOException { final StrTokenizer tokenizer = StrTokenizer.getCSVInstance(); tokenizer.setDelimiterChar(';'); Map<String, String> result = newHashMapWithExpectedSize(11); List<String> lines = Files.readLines(executionsFile, charset); String[] headers = null; for (String line : lines) { tokenizer.reset(line); String[] tokens = tokenizer.getTokenArray(); if (headers == null) { headers = tokens; } else { String[] data = tokenizer.getTokenArray(); String operation = data[0]; for (int i = 1; i < headers.length; ++i) { result.put(operation + "." + headers[i], data[i]); } } } return result; }
private ListMultimap<ProcessKey, LoadProfileEvent> readLoadProfileEvents(final Element testplan) throws IOException { ListMultimap<ProcessKey, LoadProfileEvent> eventsByProcess = ArrayListMultimap.create(); String loadProfile = testplan.elementTextTrim("loadProfile"); // relative to testplan File loadProfileConfigFile = new File(new File(testplanFile.getParentFile(), "loadprofiles"), loadProfile); try (BufferedReader br = new BufferedReader(new InputStreamReader(new FileInputStream(loadProfileConfigFile), "UTF-8"))) { StrTokenizer st = StrTokenizer.getCSVInstance(); st.setDelimiterChar(';'); for (String line = null; (line = br.readLine()) != null;) { // ignore line that are blank, commented out, or represent markers if (isBlank(line) || startsWith(line, "#") || MARKER_PATTERN.matcher(line).matches()) { continue; } st.reset(line); String[] tokens = st.getTokenArray(); long startTime = Long.parseLong(tokens[0]); String operation = tokens[1]; String target = tokens[2]; int daemonId = Integer.parseInt(tokens[3]); int processId = Integer.parseInt(tokens[4]); eventsByProcess.put(new ProcessKey(daemonId, processId), new LoadProfileEvent(startTime, operation, target, daemonId, processId)); } } return eventsByProcess; }
/** * Parse the top level schema name back into subschema path. * @param topLevelSchemaName * @return */ public static List<String> toSubSchemaPath(String topLevelSchemaName) { return new StrTokenizer(topLevelSchemaName, '.', '\'') .setIgnoreEmptyTokens(true) .getTokenList(); }
/** * Parse the schema path into a list of schema entries. * @param schemaPath * @return */ public static List<String> parseSchemaPath(String schemaPath) { return new StrTokenizer(schemaPath, '.', SqlUtils.QUOTE) .setIgnoreEmptyTokens(true) .getTokenList(); }
@CliCommand(value = "web mvc scaffold", help = "Create a new scaffold Controller (ie where Roo maintains CRUD functionality automatically)") public void webMvcScaffold( @CliOption(key = { "class", "" }, mandatory = true, help = "The path and name of the controller object to be created") final JavaType controller, @CliOption(key = "backingType", mandatory = false, optionContext = PROJECT, unspecifiedDefaultValue = "*", help = "The name of the form backing type which the controller exposes to the web tier") final JavaType backingType, @CliOption(key = "path", mandatory = false, help = "The base path under which the controller listens for RESTful requests (defaults to the simple name of the form backing object)") String path, @CliOption(key = "disallowedOperations", mandatory = false, help = "A comma separated list of operations (only create, update, delete allowed) that should not be generated in the controller") final String disallowedOperations) { final ClassOrInterfaceTypeDetails cid = typeLocationService .getTypeDetails(backingType); if (cid == null) { LOGGER.warning("The specified entity can not be resolved to a type in your project"); return; } if (controller.getSimpleTypeName().equalsIgnoreCase( backingType.getSimpleTypeName())) { LOGGER.warning("Controller class name needs to be different from the class name of the form backing object (suggestion: '" + backingType.getSimpleTypeName() + "Controller')"); return; } final Set<String> disallowedOperationSet = new HashSet<String>(); if (!"".equals(disallowedOperations)) { final String[] disallowedOperationsTokens = new StrTokenizer( disallowedOperations, ",").getTokenArray(); for (final String operation : disallowedOperationsTokens) { if (!("create".equals(operation) || "update".equals(operation) || "delete" .equals(operation))) { LOGGER.warning("-disallowedOperations options can only contain 'create', 'update', 'delete': -disallowedOperations update,delete"); return; } disallowedOperationSet.add(operation.toLowerCase()); } } if (StringUtils.isBlank(path)) { final LogicalPath targetPath = PhysicalTypeIdentifier.getPath(cid .getDeclaredByMetadataId()); final PluralMetadata pluralMetadata = (PluralMetadata) metadataService .get(PluralMetadata.createIdentifier(backingType, targetPath)); Validate.notNull(pluralMetadata, "Could not determine plural for '%s'", backingType.getSimpleTypeName()); path = pluralMetadata.getPlural().toLowerCase(); } else if (path.equals("/") || path.equals("/*")) { LOGGER.warning("Your application already contains a mapping to '/' or '/*' by default. Please provide a different path."); return; } else if (path.startsWith("/")) { path = path.substring(1); } controllerOperations.createAutomaticController(controller, backingType, disallowedOperationSet, path); }
public void parseTokenized(String line) { StrTokenizer tokenizer = createStrTokenizer(line); String[] tokens = tokenizer.getTokenArray(); storeLine(tokens); }
public StrTokenizer createStrTokenizer(String valueString) { StrTokenizer tokenizer = new StrTokenizer(valueString, separator); tokenizer.setIgnoreEmptyTokens(ignoreEmptyTokens); tokenizer.setQuoteChar(quoteChar); return tokenizer; }
@Override public int complete(String buffer, int cursor, List<CharSequence> candidates) { if (buffer.contains(CliConstants.RESOURCE_PATH_LONG_OPTION)) { return fileNameCompleter.complete(buffer, cursor, candidates); } if (logger.isTraceEnabled()) { logger.trace("Buffer: {}, cursor: {}", buffer, cursor); logger.trace("Candidates {}", candidates); } if (StringUtils.isNotBlank(buffer)) { // User is typing a command StrTokenizer strTokenizer = new StrTokenizer(buffer); String action = strTokenizer.next(); Collection<String> arguments = argumentMap.get(action); if (arguments != null) { if (logger.isTraceEnabled()) { logger.trace("Arguments found for {}, Tokens: {}", action, strTokenizer.getTokenList()); logger.trace("Arguments for {}: {}", action, arguments); } List<String> args = new ArrayList<String>(arguments); List<Completer> completers = new ArrayList<Completer>(); for (String token : strTokenizer.getTokenList()) { boolean argContains = arguments.contains(token); if (token.startsWith("-") && !argContains) { continue; } if (argContains) { if (logger.isTraceEnabled()) { logger.trace("Removing argument {}", token); } args.remove(token); } completers.add(new StringsCompleter(token)); } completers.add(new StringsCompleter(args)); Completer completer = new ArgumentCompleter(completers); return completer.complete(buffer, cursor, candidates); } else if (CliConstants.HELP_ACTION.equals(action)) { // For help action, we need to display available commands as arguments return helpCommandCompleter.complete(buffer, cursor, candidates); } } if (logger.isTraceEnabled()) { logger.trace("Using Default Completer..."); } return defaultCommandCompleter.complete(buffer, cursor, candidates); }
/** * Processes the specified CSV file. For every line but the header line (which is required), the * specified command is executed. * * @param reader * the reader for loading the CSV data * @param delimiter * the column separator * @param quoteChar * the quote character ('\0' for no quoting) * @param command * the command (i. e. a Groovy closure if used in a Groovy script) to be executed for * every processed line */ public void processFile(final Reader reader, final String delimiter, final char quoteChar, final Runnable command) { try { List<String> inputLines = CharStreams.readLines(reader); StrTokenizer st = StrTokenizer.getCSVInstance(); st.setDelimiterString(delimiter); if (quoteChar != '\0') { st.setQuoteChar(quoteChar); } else { st.setQuoteMatcher(StrMatcher.noneMatcher()); } // extract header String headerLine = inputLines.remove(0); List<Column> columns = initColumns(st, headerLine); for (String line : inputLines) { st.reset(line); String[] colArray = st.getTokenArray(); int len = colArray.length; checkState(len == columns.size(), "Mismatch between number of header columns and number of line columns."); DataSource dataSource = dataSourceProvider.get(); Configuration config = configProvider.get(); for (int i = 0; i < len; ++i) { String value = StringUtils.trimToEmpty(colArray[i]); String dataSetKey = columns.get(i).dataSetKey; String key = columns.get(i).key; if (dataSetKey != null) { if ("<auto>".equals(value)) { dataSource.resetFixedValue(dataSetKey, key); } else { log.debug("Setting data set entry for " + this + " to value=" + value); dataSource.setFixedValue(dataSetKey, key, value); } } else { log.debug("Setting property for " + this + " to value=" + value); config.put(key, value); } } command.run(); } } catch (IOException ex) { throw new JFunkException("Error processing CSV data", ex); } }
@Override protected void appendTrainingData(CrfSuiteTrainer trainer) throws IOException { BufferedReader inReader = trainingDataReader instanceof BufferedReader ? (BufferedReader) trainingDataReader : new BufferedReader(trainingDataReader); // parse training data log.info("Parsing training data..."); String line; int lineNumber = 0; List<List<Attribute>> items = newList(); List<String> labels = newList(); int instancesCounter = 0; while ((line = inReader.readLine()) != null) { lineNumber++; if (line.isEmpty()) { if (items.size() != labels.size()) { throw new IllegalStateException(); } if (items.isEmpty()) { log.warn("Empty instance at line {}", lineNumber); } else { trainer.append(items, labels, 0); instancesCounter++; } items = newList(); labels = newList(); } else { StrTokenizer fSplitter = getFeatureSplitter(line); if (!fSplitter.hasNext()) { log.warn("Empty item at line {}", lineNumber); continue; } String label = fSplitter.next(); List<Attribute> features = toAttributes(fSplitter, lineNumber); labels.add(label); items.add(features); } } // add last instance if any if (items.size() != labels.size()) { throw new IllegalStateException(); } if (!items.isEmpty()) { trainer.append(items, labels, 0); instancesCounter++; items = null; labels = null; } // report log.info("{} instances have been read", instancesCounter); }
private static StrTokenizer getFeatureSplitter(String src) { StrTokenizer result = new StrTokenizer(src); result.setDelimiterChar('\t'); result.setIgnoreEmptyTokens(true); return result; }
public void mergeFiles() throws IOException { if (!inputDir.isDirectory()) { throw new IllegalArgumentException("The input File must be a directory"); } StrTokenizer tokenizer = StrTokenizer.getCSVInstance(); tokenizer.setDelimiterChar(DELIMITER); Map<String, FileChannel> destChannels = newHashMap(); List<OutputStream> outputStreams = newArrayList(); File[] filesInInputDirectory = inputDir.listFiles(); try { for (File file : filesInInputDirectory) { FileInputStream fis = null; try { fis = new FileInputStream(file); for (Scanner scanner = new Scanner(fis, Charsets.UTF_8.name()); scanner.hasNext();) { String line = scanner.nextLine(); tokenizer.reset(line); List<String> tokenList = tokenizer.getTokenList(); String key = tokenList.get(sortCriteriaColumn); FileChannel destChannel = destChannels.get(key); if (destChannel == null) { FileOutputStream fos = new FileOutputStream(new File(outputDir, FILE_TYPE + "_" + key + ".out")); outputStreams.add(fos); destChannel = fos.getChannel(); destChannels.put(key, destChannel); //Write the Header...... Has to be improved IoUtilities.writeLineToChannel(destChannel, getHeader(), Charsets.UTF_8); } StrBuilder outputLine = new StrBuilder(); for (String s : tokenList) { StrBuilderUtils.appendEscapedAndQuoted(outputLine, DELIMITER, s); } IoUtilities.writeLineToChannel(destChannel, outputLine.toString(), Charsets.UTF_8); } } finally { closeQuietly(fis); } } } finally { outputStreams.forEach(IOUtils::closeQuietly); } }
/** * Reads a semicolon-delimited CSV file into a list. Each line in the result list will be * another list of {@link Number} objects. The file is expected to have two numberic columns * which are parsed using the specified number format. * * @param file * the file * @param charset * the character set to read the file * @param numberFormat * the number format for parsing the column values * @return the immutable result list */ public static List<SeriesPoint> readDataFile(final File file, final Charset charset, final NumberFormat numberFormat) throws IOException { final StrTokenizer tokenizer = StrTokenizer.getCSVInstance(); tokenizer.setDelimiterChar(';'); try (BufferedReader br = newReader(file, charset)) { boolean headerLine = true; List<SeriesPoint> result = newArrayListWithExpectedSize(200); for (String line; (line = br.readLine()) != null;) { try { if (headerLine) { headerLine = false; } else { tokenizer.reset(line); String[] tokens = tokenizer.getTokenArray(); double x = numberFormat.parse(tokens[0]).doubleValue(); double y = numberFormat.parse(tokens[1]).doubleValue(); if (!result.isEmpty()) { // additional point for histogram SeriesPoint previousPoint = getLast(result); result.add(new SeriesPoint(x, previousPoint.getY())); } tokenizer.reset(line); result.add(new SeriesPoint(x, y)); } } catch (ParseException ex) { throw new IOException("Error parsing number in file: " + file, ex); } } int size = result.size(); if (size > 2) { // additional point at end for histogram SeriesPoint nextToLast = result.get(size - 3); SeriesPoint last = result.get(size - 1); double dX = last.getX().doubleValue() - nextToLast.getX().doubleValue(); result.add(new SeriesPoint(last.getX().doubleValue() + dX, last.getY())); } return ImmutableList.copyOf(result); } }
/** * Reads a semicolon-delimited CSV file into a map of lists series values. Values for each * column are return as list of lists in the map, the key being the column header. * * @param file * the file * @param charset * the character set to read the file * @param numberFormat * the number format for formatting the column values * @param columnNames * the columns to consider * * @return an immutable map of lists of series values */ public static Map<String, List<SeriesPoint>> readDataFile(final File file, final Charset charset, final NumberFormat numberFormat, final Set<String> columnNames) throws IOException { final StrTokenizer tokenizer = StrTokenizer.getCSVInstance(); tokenizer.setDelimiterChar(';'); return readLines(file, charset, new LineProcessor<Map<String, List<SeriesPoint>>>() { private String[] headers; private final Map<String, List<SeriesPoint>> result = newHashMapWithExpectedSize(4); private int colCount; @Override public boolean processLine(final String line) throws IOException { try { tokenizer.reset(line); String[] tokens = tokenizer.getTokenArray(); if (headers == null) { headers = tokens; colCount = tokens.length; } else { Integer counter = Integer.valueOf(tokens[0]); for (int i = 1; i < colCount; ++i) { String header = headers[i]; if (columnNames.contains(header)) { List<SeriesPoint> colValues = result.get(header); if (colValues == null) { colValues = newArrayListWithExpectedSize(50); result.put(header, colValues); } colValues.add(new SeriesPoint(counter, numberFormat.parse(tokens[i]))); } } } return true; } catch (ParseException ex) { throw new IOException("Error parsing number in file: " + file, ex); } } @Override public Map<String, List<SeriesPoint>> getResult() { return ImmutableMap.copyOf(result); } }); }