public static String exportMetaDataToCSV(List<StandaloneArgument> arguments) throws IOException { StringWriter sw = new StringWriter(); CSVPrinter csvPrinter = new CSVPrinter(sw, CSVFormat.DEFAULT.withHeader( "id", "author", "annotatedStance", "timestamp", "debateMetaData.title", "debateMetaData.description", "debateMetaData.url" )); for (StandaloneArgument argument : arguments) { csvPrinter.printRecord( argument.getId(), argument.getAuthor(), argument.getAnnotatedStance(), argument.getTimestamp(), argument.getDebateMetaData().getTitle(), argument.getDebateMetaData().getDescription(), argument.getDebateMetaData().getUrl() ); } sw.flush(); return sw.toString(); }
private HashMap<String, Map<String, String>> parseHintFile(Integer exercise, Integer exercise_order) { final HashMap<String, Map<String, String>> list; final CSVParser parser; final URL resource; final CSVFormat csvFormat; final Charset charset; list = new HashMap<String, Map<String, String>> (); try { resource = ResourceHelper.getResource(BuenOjoFileUtils.GAME_RESOURCES_INPUT_DIR ,this.gamePath,this.setPath,exercise.toString(),exercise_order.toString(),"xy_pista.csv"); charset= FileEncodingDetectorHelper.guessEncodingAndGetCharset(resource); csvFormat = CSVFormatHelper.getDefaultCSVFormat(); parser = CSVParser.parse(resource, charset, csvFormat); for (CSVRecord record : parser ) list.put(record.get("id").trim() , (Map<String, String>)record.toMap()); } catch (IOException e) { log.error("Fail",e); } return list; }
@Override public void write(String outputFilePath) throws Exception{ try(Writer out = new BufferedWriter(new FileWriter(outputFilePath)); CSVPrinter csvPrinter = new CSVPrinter(out, CSVFormat.RFC4180)) { if(this.getHeaders() != null){ csvPrinter.printRecord(this.getHeaders()); } Iterator<CSVRecord> recordIter = this.getCSVParser().iterator(); while(recordIter.hasNext()){ CSVRecord record = recordIter.next(); csvPrinter.printRecord(record); } csvPrinter.flush(); }catch(Exception e){ throw e; } }
private ArrayList<Map<String, String>> parse() { final ArrayList<Map<String, String>> list; final CSVParser parser; final URL resource; final CSVFormat csvFormat; final Charset charset; list = new ArrayList<Map<String, String>> (); try { resource = ResourceHelper.getResource(isFromGameResourceInput(),fileName); charset= FileEncodingDetectorHelper.guessEncodingAndGetCharset(resource); csvFormat = CSVFormatHelper.getDefaultCSVFormat(); parser = CSVParser.parse(resource, charset, csvFormat); for (CSVRecord record : parser ) list.add((Map<String, String>)record.toMap()); } catch (IOException e) { log.error("Fail", e); } return list; }
private ArrayList<Map<String,String>> parse() { ArrayList<Map<String,String>> list; CSVParser parser; URL resource; CSVFormat csvFormat; Charset charset; list = new ArrayList<Map<String,String>> (); try { resource = ResourceHelper.getResource(isFromGameResourceInput(),fileName); charset= FileEncodingDetectorHelper.guessEncodingAndGetCharset(resource); csvFormat = CSVFormatHelper.getDefaultCSVFormat(); parser = CSVParser.parse(resource, charset, csvFormat); for (CSVRecord record : parser ) list.add(record.toMap()); } catch (IOException e) { log.error("Fail", e); } return list; }
private Map<String,String> parseDelimitedAreaFile(Integer exercise, Integer exercise_order) { Map<String,String> list; CSVParser parser; URL resource; CSVFormat csvFormat; Charset charset; list = new HashMap<String,String>(); try { resource =ResourceHelper.getResource(isFromGameResourceInput(),this.gamePath,this.setPath,exercise.toString(), exercise_order.toString(),"areaDelimitada.csv"); charset= FileEncodingDetectorHelper.guessEncodingAndGetCharset(resource); csvFormat = CSVFormatHelper.getDefaultCSVFormat(); parser = CSVParser.parse(resource, charset, csvFormat); for (CSVRecord record : parser ) list = record.toMap(); } catch (IOException e) { log.error("Fail",e); } return list; }
public List<Map<String,String>> parse() throws BuenOjoCSVParserException { List<Map<String,String>> list = new ArrayList<>(); CSVParser parser = null; try { parser = CSVFormat.RFC4180.withHeader() .withDelimiter(',') .withAllowMissingColumnNames(true) .parse(new InputStreamReader(this.inputStreamSource.getInputStream())); } catch (IOException e) { throw new BuenOjoCSVParserException(e.getMessage()); } for (CSVRecord record :parser) { Map<String,String> map = record.toMap(); list.add(map); } return list; }
public List<PhotoLocationSightPair> parse () throws IOException, BuenOjoCSVParserException { CSVParser parser = CSVFormat.RFC4180.withHeader().withDelimiter(',').withAllowMissingColumnNames(true).parse(new InputStreamReader(this.inputStreamSource.getInputStream())); List<CSVRecord> records = parser.getRecords(); if (records.size() == 0 ) { throw new BuenOjoCSVParserException("El archivos de miras no contiene registros"); } ArrayList<PhotoLocationSightPair> sightPairs = new ArrayList<>(records.size()); for (CSVRecord record : records) { PhotoLocationSightPair sight = new PhotoLocationSightPair(); sight.setNumber(new Integer(record.get(PhotoLocationSightPairCSVColumn.id))); sight.setSatelliteX(new Integer(record.get(PhotoLocationSightPairCSVColumn.satCol))); sight.setSatelliteY(new Integer(record.get(PhotoLocationSightPairCSVColumn.satRow))); sight.setSatelliteTolerance(new Integer(record.get(PhotoLocationSightPairCSVColumn.satTolerancia))); sight.setTerrainX(new Integer(record.get(PhotoLocationSightPairCSVColumn.terCol))); sight.setTerrainY(new Integer(record.get(PhotoLocationSightPairCSVColumn.terRow))); sight.setTerrainTolerance(new Integer(record.get(PhotoLocationSightPairCSVColumn.terTolerancia))); sightPairs.add(sight); } return sightPairs; }
public List<TagPair> parse() throws IOException { CSVParser parser = CSVFormat.RFC4180.withHeader().withDelimiter(',').withAllowMissingColumnNames(true).parse(new InputStreamReader(this.inputStreamSource.getInputStream())); ArrayList<TagPair> tagPairs = new ArrayList<>(AVG_ITEMS); for (CSVRecord record : parser ){ TagPair pair = new TagPair(); Integer tagSlotId = new Integer(record.get("id")); Integer tagNumber = new Integer(record.get("etiqueta")); pair.setTagSlotId(tagSlotId); Optional<Tag> optionalTag = tagList.stream().filter(isEqualToTagNumber(tagNumber)).findFirst(); if (optionalTag.isPresent()){ Tag tag = optionalTag.get(); pair.setTag(tag); tagPairs.add(pair); }else { log.debug("Attempt to get invalid tag with number: "+tagNumber); } } return tagPairs; }
public PhotoLocationBeacon parse() throws IOException, BuenOjoCSVParserException { CSVParser parser = CSVFormat.RFC4180.withHeader().withDelimiter(',').withAllowMissingColumnNames(true).parse(new InputStreamReader(this.inputStreamSource.getInputStream())); List<CSVRecord> records = parser.getRecords(); if (records.size() > 1) { throw new BuenOjoCSVParserException("El archivo contiene más de un indicador"); } if (records.size() == 0) { throw new BuenOjoCSVParserException("El archivo de indicador es inválido"); } CSVRecord record = records.get(0); PhotoLocationBeacon beacon = new PhotoLocationBeacon(); beacon.setX(new Integer(record.get(PhotoLocationBeaconCSVColumns.col.ordinal()))); beacon.setY(new Integer(record.get(PhotoLocationBeaconCSVColumns.row.ordinal()))); beacon.setTolerance(new Integer(record.get(PhotoLocationBeaconCSVColumns.tolerance.ordinal()))); return beacon; }
public List<TagCircle> parse() throws IOException, BuenOjoCSVParserException { ArrayList<TagCircle> list = new ArrayList<>(MAX_CIRCLES); CSVParser parser = CSVFormat.RFC4180.withHeader().withDelimiter(',').withAllowMissingColumnNames(false).parse(new InputStreamReader(this.inputStream)); for (CSVRecord record : parser ){ TagCircle circle = new TagCircle(); circle.setNumber(new Integer(record.get("id"))); circle.setX(new Integer(record.get("col"))); circle.setY(new Integer(record.get("row"))); circle.setRadioPx(new Float(record.get("radioPx"))); list.add(circle); } if (list.size()>MAX_CIRCLES){ throw new BuenOjoCSVParserException("el archivo contiene mas de "+MAX_CIRCLES+ "áreas circulares"); } return list; }
/** * Creates a new dataset with column labels and data read from the given Reader, using a specified input format. * * @param reader the Reader to read column labels and data from * @param input_format the format */ @SuppressWarnings("WeakerAccess") public DataSet(final Reader reader, final CSVFormat input_format) { this(); try (CSVParser parser = new CSVParser(reader, input_format.withHeader())) { labels.addAll(getColumnLabels(parser)); for (final CSVRecord record : parser) { final List<String> items = csvRecordToList(record); final int size = items.size(); // Don't add row if the line was empty. if (size > 1 || (size == 1 && items.get(0).length() > 0)) { records.add(items); } } reader.close(); } catch (final IOException e) { throw new RuntimeException(e); } }
/** * This method reads the file from @see {@link ReadFolder} and put into an array list the data we need. * We use here the API commons-csv. * Attention : tu run with the API you need to import him into the project @see README. * @param folderName. * @exception IOException | NumberFormatException : print error reading file. */ public void read(String folderName) { try { Reader in = new FileReader(folderName + "/" + file.getName()); BufferedReader br = new BufferedReader(in); String firstLine = br.readLine(); if (checkTheFile(firstLine)) { Iterable<CSVRecord> records = CSVFormat.RFC4180.withFirstRecordAsHeader().parse(br); for (CSVRecord record : records) if (record.get("Type").equals("WIFI") && !record.get("FirstSeen").contains("1970")) inputWifi(record, firstLine); in.close(); br.close(); } } catch(IOException | NumberFormatException ex) { // If there is an error. System.out.println("Error reading file : " + ex); System.exit(0); } }
public T csvToObject(InputStream is) throws IOException, IllegalArgumentException, IllegalAccessException, InstantiationException { WrapperReturner<T> list = new WrapperReturner(); try (InputStreamReader br = new InputStreamReader(is)) { CSVParser parser = new CSVParser(br, CSVFormat.DEFAULT); for (int i = 0, j = 0; i < parser.getRecordNumber(); i++) { j = 0; for (Field field : classFields) { setFieldValue(field, list.t, parser.getRecords().get(i).get(j)); list.tl.add(list.t); j++; } } } return list.t; }
@Override public void run() { log.info("Start exporting labels"); List<Label> labels = repo.findAllByOrderByName(); File outputFile = exportDirPath.resolve("labels.csv").toFile(); CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader("name", "imageUrl").withRecordSeparator('\n'); try (FileWriter writer = new FileWriter(outputFile); CSVPrinter csvPrinter = new CSVPrinter(writer, csvFormat) ) { for (Label label : labels) { csvPrinter.printRecord(label.getName(), label.getImageUrl()); } log.info("Finished exporting {} labels to file {}", labels.size(), outputFile); } catch (IOException e) { log.error("Failed to export labels to file {} due to error: {}", outputFile.getAbsolutePath(), e.getMessage()); } }
@Override public void run() { log.info("Start exporting allergens"); List<Allergen> allergens = repo.findAllByOrderByNumberAsc(); File outputFile = exportDirPath.resolve("allergens.csv").toFile(); CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader("number", "name").withRecordSeparator('\n'); try (FileWriter writer = new FileWriter(outputFile); CSVPrinter csvPrinter = new CSVPrinter(writer, csvFormat) ) { for (Allergen allergen : allergens) { csvPrinter.printRecord(allergen.getNumber(), allergen.getName()); } log.info("Finished exporting {} allergens to file {}", allergens.size(), outputFile); } catch (IOException e) { log.error("Failed to export allergens to file {} due to error: {}", outputFile.getAbsolutePath(), e.getMessage()); } }
@Override public void run() { log.info("Start exporting mensas"); List<Mensa> mensas = repo.findAllByOrderByName(); File outputFile = exportDirPath.resolve("mensas.csv").toFile(); CSVFormat csvFormat = CSVFormat.DEFAULT.withHeader("id", "mainUrl", "name", "nextWeekUrl", "thisWeekUrl", "todayUrl", "tomorrowUrl", "longitude", "latitude", "address", "zipcode", "city").withRecordSeparator('\n'); try (FileWriter writer = new FileWriter(outputFile); CSVPrinter csvPrinter = new CSVPrinter(writer, csvFormat) ) { for (Mensa mensa : mensas) { String longitude = mensa.getPoint() == null ? StringUtils.EMPTY : Double.toString(mensa.getPoint().getX()); String latitude = mensa.getPoint() == null ? StringUtils.EMPTY : Double.toString(mensa.getPoint().getY()); csvPrinter.printRecord(mensa.getId(), mensa.getMainUrl(), mensa.getName(), mensa.getNextWeekUrl(), mensa.getThisWeekUrl(), mensa.getTodayUrl(), mensa.getTomorrowUrl(), longitude, latitude, mensa.getAddress(), mensa.getZipcode(), mensa.getCity()); } log.info("Finished exporting {} mensas to file {}", mensas.size(), outputFile); } catch (IOException e) { log.error("Failed to export mensas to file {} due to error: {}", outputFile.getAbsolutePath(), e.getMessage()); } }
public void convertScenarios(File file, List<Scenario> scenarios) { if (scenarios != null && !scenarios.isEmpty()) { try (FileWriter out = new FileWriter(file); CSVPrinter printer = new CSVPrinter(out, CSVFormat.EXCEL.withIgnoreEmptyLines());) { printer.printRecord(HEADERS); for (Scenario scenario : scenarios) { for (TestCase testCase : scenario.getTestCases()) { convertTestCase(testCase, printer); printer.println(); } printer.println(); } } catch (Exception ex) { Logger.getLogger(StepMap.class.getName()).log(Level.SEVERE, "Error while converting", ex); } } }
public static void saveChanges(GlobalDataModel globalData) { createIfNotExists(globalData.getLocation()); try (FileWriter out = new FileWriter(new File(globalData.getLocation())); CSVPrinter printer = new CSVPrinter(out, CSVFormat.EXCEL.withIgnoreEmptyLines());) { for (String header : globalData.getColumns()) { printer.print(header); } printer.println(); globalData.removeEmptyRecords(); for (List<String> record : globalData.getRecords()) { for (String value : record) { printer.print(value); } printer.println(); } } catch (Exception ex) { Logger.getLogger(CSVUtils.class.getName()).log(Level.SEVERE, "Error while saving", ex); } }
public static void saveChanges(TestDataModel testData) { createIfNotExists(testData.getLocation()); try (FileWriter out = new FileWriter(new File(testData.getLocation())); CSVPrinter printer = new CSVPrinter(out, CSVFormat.EXCEL.withIgnoreEmptyLines());) { for (String header : testData.getColumns()) { printer.print(header); } printer.println(); testData.removeEmptyRecords(); for (Record record : testData.getRecords()) { for (String value : record) { printer.print(value); } printer.println(); } } catch (Exception ex) { Logger.getLogger(CSVUtils.class.getName()).log(Level.SEVERE, "Error while saving", ex); } }
public void save() { if (!isSaved()) { createIfNotExists(); try (FileWriter out = new FileWriter(new File(getLocation())); CSVPrinter printer = new CSVPrinter(out, CSVFormat.EXCEL.withIgnoreEmptyLines());) { printer.printRecord(HEADERS.getValues()); removeEmptySteps(); autoNumber(); for (TestStep testStep : testSteps) { printer.printRecord(testStep.stepDetails); } setSaved(true); } catch (Exception ex) { Logger.getLogger(TestCase.class.getName()).log(Level.SEVERE, "Error while saving", ex); } } }
public void save() { if (!isSaved()) { createIfNotExists(); try (FileWriter out = new FileWriter(new File(getLocation())); CSVPrinter printer = new CSVPrinter(out, CSVFormat.EXCEL.withIgnoreEmptyLines());) { printer.printRecord(HEADERS.getValues()); removeEmptySteps(); for (ExecutionStep testStep : testSteps) { printer.printRecord(testStep.exeStepDetails); } setSaved(true); } catch (Exception ex) { Logger.getLogger(TestSet.class.getName()).log(Level.SEVERE, "Error while saving", ex); } } execSettings.save(); }
/** * Putting this init here so that we can discover the file fields before running the actual rec */ public void init() { if (!this.initialized) { try { MutableList<String> fields; if (csvVersion == CsvStaticDataReader.CSV_V2) { CSVFormat csvFormat = getCsvFormat(delim, nullToken); this.csvreaderV2 = new CSVParser(reader, csvFormat); this.iteratorV2 = csvreaderV2.iterator(); fields = ListAdapter.adapt(IteratorUtils.toList(iteratorV2.next().iterator())); } else { this.csvreaderV1 = new au.com.bytecode.opencsv.CSVReader(this.reader, this.delim); fields = ArrayAdapter.adapt(this.csvreaderV1.readNext()); } this.fields = fields.collect(this.convertDbObjectName); } catch (Exception e) { throw new DeployerRuntimeException(e); } this.initialized = true; } }
private static String[] getUniqueFields(File inFile) throws IOException { CSVParser parser = new CSVParser(new BufferedReader(new FileReader(inFile)), CSVFormat.EXCEL.withNullString(NULL_STRING)); // first record used as header CSVRecord header = parser.iterator().next(); List<String> uniqueFields = new ArrayList<String>(); for(int i = 0; i < header.size(); i++) { String col = header.get(i); if (!uniqueFields.contains(col)) { // we can add it directly uniqueFields.add(col); } else { // disambiguate by appending index uniqueFields.add(col + "_" + i); } } return uniqueFields.toArray(new String[0]); }
@Test public void testing() throws Exception { JSONIterator parquet = ParquetAsJSONIterator.fromResource(parquetFilename); boolean isImpala = parquetFilename.contains("impala"); CSVHeaderMap headerMap = CSVHeaderMap.builder() .add(isImpala ? "n_nationkey" : "nation_key", CSVColumnWriter.NUMBER) .add(isImpala ? "n_name" : "name", CSVColumnWriter.STRING) .add(isImpala ? "n_regionkey" : "region_key", CSVColumnWriter.NUMBER) .add(isImpala ? "n_comment" : "comment_col", CSVColumnWriter.STRING) .build(); CSVFormat format = CSVFormat.newFormat('|'); JSONIterator csv = CSVAsJSONIterator.fromResource("test-data/parquet-python/nation.csv", format, headerMap); assertThat(parquet, is(sameAs(csv))); }
@Test public void testFindSpreadsheetsFromCells() throws IOException { CSVParser parse = org.apache.commons.csv.CSVParser.parse(new File("src/test/resources/technology/tabula/csv/TestSpreadsheetExtractor-CELLS.csv"), Charset.forName("utf-8"), CSVFormat.DEFAULT); List<Cell> cells = new ArrayList<>(); for (CSVRecord record : parse) { cells.add(new Cell(Float.parseFloat(record.get(0)), Float.parseFloat(record.get(1)), Float.parseFloat(record.get(2)), Float.parseFloat(record.get(3)))); } List<Rectangle> expected = Arrays.asList(EXPECTED_RECTANGLES); Collections.sort(expected, Rectangle.ILL_DEFINED_ORDER); List<Rectangle> foundRectangles = SpreadsheetExtractionAlgorithm.findSpreadsheetsFromCells(cells); Collections.sort(foundRectangles, Rectangle.ILL_DEFINED_ORDER); assertTrue(foundRectangles.equals(expected)); }
@Test public void testNaturalOrderOfRectanglesOneMoreTime() throws IOException { CSVParser parse = org.apache.commons.csv.CSVParser.parse(new File("src/test/resources/technology/tabula/csv/TestBasicExtractor-RECTANGLE_TEST_NATURAL_ORDER.csv"), Charset.forName("utf-8"), CSVFormat.DEFAULT); List<Rectangle> rectangles = new ArrayList<>(); for (CSVRecord record : parse) { rectangles.add(new Rectangle(Float.parseFloat(record.get(0)), Float.parseFloat(record.get(1)), Float.parseFloat(record.get(2)), Float.parseFloat(record.get(3)))); } //List<Rectangle> rectangles = Arrays.asList(RECTANGLES_TEST_NATURAL_ORDER); Utils.sort(rectangles, Rectangle.ILL_DEFINED_ORDER); for (int i = 0; i < (rectangles.size() - 1); i++) { Rectangle rectangle = rectangles.get(i); Rectangle nextRectangle = rectangles.get(i + 1); assertTrue(rectangle.compareTo(nextRectangle) < 0); } }
public HdrData read(String filename) throws IOException { Reader in = new FileReader(filename); HdrData ret = new HdrData(); List<Double> value = ret.getValue(); List<Double> percentile = ret.getPercentile(); Iterable<CSVRecord> records = CSVFormat.RFC4180 .withCommentMarker('#') .withFirstRecordAsHeader() .parse(in); for (CSVRecord record : records) { String valueStr = record.get(0); String percentileStr = record.get(1); logger.debug("Value: {}", valueStr); logger.debug("Percentile: {}", percentileStr); value.add(Double.parseDouble(valueStr)); percentile.add(Double.parseDouble(percentileStr) * 100); } return ret; }
/** * Decodes data from the provided stream and invoke the provided {@link Consumer} for each decoded record. * * @param in the {@link InputStream} for the CSV file * @param headers a list of the headers to keep from decoded records * @param mapToResult the function to invoke for reach decoded record * @throws IOException in the event of an I/O error. * @throws DecodingDataFromAdapterException if an error occurred while decoding the CSV file. */ public void decode(InputStream in, List<String> headers, Consumer<DataSample<T>> mapToResult) throws IOException, DecodingDataFromAdapterException { try (Profiler ignored = Profiler.start("Building time series from csv data", logger::trace)) { try (BufferedReader reader = new BufferedReader(new InputStreamReader(in, encoding))) { CSVFormat csvFormat = CSVFormat.DEFAULT .withAllowMissingColumnNames(false) .withFirstRecordAsHeader() .withSkipHeaderRecord() .withDelimiter(delimiter); Iterable<CSVRecord> records = csvFormat.parse(reader); for (CSVRecord csvRecord : records) { ZonedDateTime timeStamp = dateParser.apply(csvRecord.get(0)); DataSample<T> tRecord = new DataSample<>(timeStamp); for (String h : headers) { tRecord.getCells().put(h, numberParser.apply(csvRecord.get(h))); } mapToResult.accept(tRecord); } } } }
private List<SmellData> filterData(String nameFilter) throws IOException { Reader in = new FileReader(this.csvFile); Iterable<CSVRecord> records = CSVFormat.RFC4180.withFirstRecordAsHeader().parse(in); List<SmellData> smellDataList = new ArrayList<>(); for (CSVRecord record : records) { String className = record.get("Class"); List<String> values = new ArrayList<>(); for (int i = 1; i < record.size(); i++) { values.add(record.get(i)); } if (className.contains(nameFilter) || nameFilter.equals("")) { SmellData smellData = new SmellData(className, values); smellDataList.add(smellData); } } return smellDataList; }
/** * Imports a Sheet from a CSV file in the specified path. * @param path a CSV File Path. * @return a new Sheet or null if parsing failed */ public Sheet importSheet(String path) { File csvData = new File(path); // Parse the CSV file. CSVParser parser; try { parser = CSVParser.parse(csvData, Charset.defaultCharset(), CSVFormat.RFC4180); } catch (IOException e) { return null; } // Create our new sheet. Sheet sheet = new Sheet("Imported Sheet"); // Populate its cells. for (CSVRecord record : parser) { for (int x = 0; x < record.size(); ++x) { sheet.setCellValue(x, (int) record.getRecordNumber() - 1, record.get(x), true); } } return sheet; }
/** * this method is used to rewrite the csv file for all the pending transactions * @param trans */ public void rewriteCSV(List<UserStockTransaction> trans){ try{ filePath = servletContext.getRealPath("WEB-INF/csv/pending.csv"); File f = new File(filePath); FileWriter fw = new FileWriter(f); CSVPrinter cp = new CSVPrinter(fw, CSVFormat.DEFAULT); System.out.println(trans.toString()); for(UserStockTransaction tx: trans){ cp.printRecord((Object[]) tx.toString().split(",")); } fw.flush(); fw.close(); cp.close(); }catch (Exception e){ e.printStackTrace(); } }
public static void main(String[] args) throws IOException { try(Reader reader = new InputStreamReader(ExampleStudentCSV.class.getClassLoader().getResourceAsStream("students.csv"))) { try(CSVParser parser=new CSVParser(reader, CSVFormat.EXCEL.withHeader("Name","Class","Dorm","Room","GPA").withFirstRecordAsHeader())) { System.out.printf("%20s | %20s\n", "Name", "Class"); System.out.printf("%20s-+-%20s\n", StringUtils.leftPad("", 20, '-'), StringUtils.leftPad("", 20, '-')); for(CSVRecord record: parser) { System.out.printf("%20s | %20s\n", record.get("Name"), record.get("Class")); } } } }
protected void setupUtils() throws Exception { CSVFormat format = CSVFormat.DEFAULT; String fileLocation = config.getFileLocation(); URL url; try { url = new URL(fileLocation); } catch (MalformedURLException e) { File file; if (!(file = new File(fileLocation)).exists()) { log.error("File does not exist: ", fileLocation); } url = file.toURI().toURL(); } InputStreamReader isr = new InputStreamReader( downloadUtils.fetchInputStream(url, getProvider().getLabel(), ".csv")); CSVParser csvFileParser = new CSVParser(isr, format); csvRecords = csvFileParser.getRecords(); }
@Test public void testExportsCSV() throws Exception { DataExportSpecificationBuilder csvBuilder = DataExportSpecificationBuilder.withCSVExporter(); csvBuilder .addSubjectSpecification( new SubjectSpecificationBuilder(AbstractONSImporter.PROVIDER.getLabel(), "lsoa").setMatcher("label", "E01002766")) .addDatasourceSpecification("uk.org.tombolo.importer.ons.CensusImporter", "qs103ew", "") .addFieldSpecification( FieldBuilder.fractionOfTotal("percentage_under_1_years_old_label") .addDividendAttribute("uk.gov.ons", "Age: Age under 1") // number under one year old .setDivisorAttribute("uk.gov.ons", "Age: All categories: Age") // total population ); engine.execute(csvBuilder.build(), writer); List<CSVRecord> records = CSVParser.parse(writer.toString(), CSVFormat.DEFAULT.withHeader()).getRecords(); assertEquals(1, records.size()); assertEquals("E01002766", records.get(0).get("label")); assertEquals("0.012263099219620958", records.get(0).get("percentage_under_1_years_old_label")); }
/** * Each String in the stream is a CSV file * @return stream of parsed insert queries */ public Stream<Map<String, Object>> convert() { try{ CSVParser csvParser = CSVFormat.newFormat(separator) .withIgnoreEmptyLines() .withEscape('\\' ) .withFirstRecordAsHeader() .withQuote(quote) .withNullString(nullString) .parse(reader); return stream(csvParser.iterator()).map(this::parse); } catch (IOException e){ throw new RuntimeException(e); } }
@Override public void startRevisionProcessing() { logger.debug("Starting..."); try { BufferedReader csvReader; csvReader = new BufferedReader( new InputStreamReader( new BZip2CompressorInputStream( new BufferedInputStream( new FileInputStream(geolocationFeatureFile))), "UTF-8")); csvParser = new CSVParser(csvReader, CSVFormat.RFC4180.withHeader()); iterator = csvParser.iterator(); processor.startRevisionProcessing(); } catch (IOException e) { logger.error("", e); } }
private void logResults() { logger.info("Action frequency distribution:\n" + FrequencyUtils.formatFrequency(actionDistribution)); logger.info("Action frequency distribution of rollback-reverted revisions:\n" + FrequencyUtils.formatFrequency(rollbackRevertedActionDistribution)); logger.info("Action frequency distribution of non-rollback-reverted revisions:\n" + FrequencyUtils.formatFrequency(nonRollbackRevertedActionDistribution)); try { Writer writer = new PrintWriter(path, "UTF-8"); CSVPrinter csvWriter = CSVFormat.RFC4180.withQuoteMode(QuoteMode.ALL) .withHeader("month", "action", "count").print(writer); for (Entry<String, HashMap<String, Integer>> entry: getSortedList(monthlyActionDistribution)) { String month = entry.getKey(); for (Entry<String, Integer> entry2: getSortedList2(entry.getValue())) { String action = entry2.getKey(); Integer value = entry2.getValue(); csvWriter.printRecord(month, action, value); } } csvWriter.close(); } catch (IOException e) { logger.error("", e); } }
/** * Initializes the label reader. */ public void startReading() { try { BufferedReader csvReader = new BufferedReader( new InputStreamReader(labelsStream, "UTF-8"), BUFFER_SIZE); csvParser = new CSVParser(csvReader, CSVFormat.RFC4180.withHeader(FILE_HEADER)); iterator = csvParser.iterator(); CSVRecord headerRecord = iterator.next(); for (int i = 0; i < FILE_HEADER.length; i++) { if (!FILE_HEADER[i].equals(headerRecord.get(i))) { throw new IOException( "The header of the CSV file is wrong."); } } } catch (IOException e) { logger.error("", e); finishReading(); } }
public static void main(String[] args) throws IOException { CSVParser parser = CSVParser.parse(new File("dev/twitter-hate-speech-processed.csv"), Charset.forName("Cp1252"), CSVFormat.DEFAULT); try (PrintWriter writer = new PrintWriter("training/bad/model_comments_bad_tweets.txt", "UTF-8")) { boolean skipFirst = true; for (CSVRecord r : parser) { if (skipFirst){ skipFirst=false; continue; } String classif = r.get(0); if (classif.equalsIgnoreCase("The tweet is not offensive")) { continue; } writer.println(r.get(2)); } }finally { parser.close(); } }