private static void createIndexFile(File indexFile, Configuration conf) throws IOException { if (indexFile.exists()) { System.out.println("Deleting existing file"); indexFile.delete(); } indexFile.createNewFile(); FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append( new Path(indexFile.getAbsolutePath())); Checksum crc = new PureJavaCrc32(); crc.reset(); CheckedOutputStream chk = new CheckedOutputStream(output, crc); String msg = "Writing new index file. This file will be used only " + "for the testing."; chk.write(Arrays.copyOf(msg.getBytes(), MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH)); output.writeLong(chk.getChecksum().getValue()); output.close(); }
public ChecksumFSOutputSummer(ChecksumFileSystem fs, Path file, boolean overwrite, int bufferSize, short replication, long blockSize, int bytesPerChecksum, Progressable progress) throws IOException { super(new PureJavaCrc32(), bytesPerChecksum, 4, null); this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, replication, blockSize, progress); int sumBufferSize = fs.getSumBufferSize(bytesPerChecksum, bufferSize); this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, sumBufferSize, replication, blockSize); sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length); sums.writeInt(bytesPerChecksum); }
public static void createIndexFile(File indexFile, Configuration conf) throws IOException { if (indexFile.exists()) { System.out.println("Deleting existing file"); indexFile.delete(); } indexFile.createNewFile(); FSDataOutputStream output = FileSystem.getLocal(conf).getRaw().append( new Path(indexFile.getAbsolutePath())); Checksum crc = new PureJavaCrc32(); crc.reset(); CheckedOutputStream chk = new CheckedOutputStream(output, crc); String msg = "Writing new index file. This file will be used only " + "for the testing."; chk.write(Arrays.copyOf(msg.getBytes(), MapTask.MAP_OUTPUT_INDEX_RECORD_LENGTH)); output.writeLong(chk.getChecksum().getValue()); output.close(); }
/** * Construct the reader * @param in The stream to read from. * @param logVersion The version of the data coming from the stream. */ @SuppressWarnings("deprecation") public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) { this.logVersion = logVersion; if (LayoutVersion.supports(Feature.EDITS_CHESKUM, logVersion)) { this.checksum = new PureJavaCrc32(); } else { this.checksum = null; } if (this.checksum != null) { this.in = new DataInputStream( new CheckedInputStream(in, this.checksum)); } else { this.in = in; } this.limiter = limiter; this.cache = new OpInstanceCache(); this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT; }
public ChecksumFSOutputSummer(ChecksumFileSystem fs, Path file, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress) throws IOException { super(new PureJavaCrc32(), fs.getBytesPerSum(), 4); int bytesPerSum = fs.getBytesPerSum(); this.datas = fs.getRawFileSystem().create(file, overwrite, bufferSize, replication, blockSize, progress); int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize); this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), true, sumBufferSize, replication, blockSize); sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length); sums.writeInt(bytesPerSum); }
public ChecksumFSOutputSummer(final ChecksumFs fs, final Path file, final EnumSet<CreateFlag> createFlag, final FsPermission absolutePermission, final int bufferSize, final short replication, final long blockSize, final Progressable progress, final ChecksumOpt checksumOpt, final boolean createParent) throws IOException { super(new PureJavaCrc32(), fs.getBytesPerSum(), 4); // checksumOpt is passed down to the raw fs. Unless it implements // checksum impelemts internally, checksumOpt will be ignored. // If the raw fs does checksum internally, we will end up with // two layers of checksumming. i.e. checksumming checksum file. this.datas = fs.getRawFs().createInternal(file, createFlag, absolutePermission, bufferSize, replication, blockSize, progress, checksumOpt, createParent); // Now create the chekcsumfile; adjust the buffsize int bytesPerSum = fs.getBytesPerSum(); int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize); this.sums = fs.getRawFs().createInternal(fs.getChecksumFile(file), EnumSet.of(CreateFlag.CREATE, CreateFlag.OVERWRITE), absolutePermission, sumBufferSize, replication, blockSize, progress, checksumOpt, createParent); sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length); sums.writeInt(bytesPerSum); }
public ChecksumFSOutputSummer(ChecksumFileSystem fs, Path file, boolean overwrite, int bufferSize, short replication, long blockSize, Progressable progress, FsPermission permission) throws IOException { super(new PureJavaCrc32(), fs.getBytesPerSum(), 4); int bytesPerSum = fs.getBytesPerSum(); this.datas = fs.getRawFileSystem().create(file, permission, overwrite, bufferSize, replication, blockSize, progress); int sumBufferSize = fs.getSumBufferSize(bytesPerSum, bufferSize); this.sums = fs.getRawFileSystem().create(fs.getChecksumFile(file), permission, true, sumBufferSize, replication, blockSize, null); sums.write(CHECKSUM_VERSION, 0, CHECKSUM_VERSION.length); sums.writeInt(bytesPerSum); }
/** * Construct the reader * @param in The stream to read from. * @param logVersion The version of the data coming from the stream. */ public Reader(DataInputStream in, StreamLimiter limiter, int logVersion) { this.logVersion = logVersion; if (NameNodeLayoutVersion.supports( LayoutVersion.Feature.EDITS_CHESKUM, logVersion)) { this.checksum = new PureJavaCrc32(); } else { this.checksum = null; } // It is possible that the logVersion is actually a future layoutversion // during the rolling upgrade (e.g., the NN gets upgraded first). We // assume future layout will also support length of editlog op. this.supportEditLogLength = NameNodeLayoutVersion.supports( NameNodeLayoutVersion.Feature.EDITLOG_LENGTH, logVersion) || logVersion < NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION; if (this.checksum != null) { this.in = new DataInputStream( new CheckedInputStream(in, this.checksum)); } else { this.in = in; } this.limiter = limiter; this.cache = new OpInstanceCache(); this.maxOpSize = DFSConfigKeys.DFS_NAMENODE_MAX_OP_SIZE_DEFAULT; }
private DFSOutputStream(DFSClient dfsClient, String src, long blockSize, Progressable progress, int bytesPerChecksum, short replication) throws IOException { super(new PureJavaCrc32(), bytesPerChecksum, 4); this.dfsClient = dfsClient; this.conf = dfsClient.conf; this.src = src; // this.blockSize = blockSize; this.blockReplication = replication; this.progress = progress; if ((progress != null) && DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream " + src); } if (bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) { throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum + ") and blockSize(" + blockSize + ") do not match. " + "blockSize should be a " + "multiple of io.bytes.per.checksum"); } checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, bytesPerChecksum); }
private DFSOutputStream(DFSClient dfsClient, String src, long blockSize, Progressable progress, int bytesPerChecksum) throws IOException { super(new PureJavaCrc32(), bytesPerChecksum, 4); this.dfsClient = dfsClient; this.conf = dfsClient.conf; this.src = src; // this.blockSize = blockSize; this.progress = progress; if ((progress != null) && DFSClient.LOG.isDebugEnabled()) { DFSClient.LOG.debug("Set non-null progress callback on DFSOutputStream " + src); } if (bytesPerChecksum < 1 || blockSize % bytesPerChecksum != 0) { throw new IOException("io.bytes.per.checksum(" + bytesPerChecksum + ") and blockSize(" + blockSize + ") do not match. " + "blockSize should be a " + "multiple of io.bytes.per.checksum"); } checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, bytesPerChecksum); }
private void checkImages(FSNamesystem fsn) throws Exception { Iterator<StorageDirectory> iter = fsn. getFSImage().dirIterator(FSImage.NameNodeDirType.IMAGE); List<Long> checksums = new ArrayList<Long>(); while (iter.hasNext()) { StorageDirectory sd = iter.next(); File fsImage = FSImage.getImageFile(sd, FSImage.NameNodeFile.IMAGE); PureJavaCrc32 crc = new PureJavaCrc32(); FileInputStream in = new FileInputStream(fsImage); byte[] buff = new byte[4096]; int read = 0; while ((read = in.read(buff)) != -1) { crc.update(buff, 0, read); } long val = crc.getValue(); checksums.add(val); } assertTrue("Not enough fsimage copies in MiniDFSCluster " + "to test parallel write", checksums.size() > 1); for (int i = 1; i < checksums.size(); i++) { assertEquals(checksums.get(i - 1), checksums.get(i)); } }
public BlockXCodingMerger(Block block, int namespaceId, DataInputStream[] childInputStreams, long offsetInBlock, long length, String[] childAddrs, String myAddr, DataTransferThrottler throttler, int mergerLevel) throws IOException{ super(); this.block = block; this.namespaceId = namespaceId; this.childInputStreams = childInputStreams; this.offsetInBlock = offsetInBlock; this.length = length; this.childAddrs = childAddrs; this.myAddr = myAddr; this.throttler = throttler; this.mergerLevel = mergerLevel; Configuration conf = new Configuration(); this.packetSize = conf.getInt("raid.blockreconstruct.packetsize", 4096); this.bytesPerChecksum = conf.getInt("io.bytes.per.checksum", 512); this.checksum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, bytesPerChecksum, new PureJavaCrc32()); this.checksumSize = checksum.getChecksumSize(); }
public static void outputRecords(OutputStream out, boolean useAscii, Unsigned16 firstRecordNumber, Unsigned16 recordsToGenerate, Unsigned16 checksum ) throws IOException { byte[] row = new byte[100]; Unsigned16 recordNumber = new Unsigned16(firstRecordNumber); Unsigned16 lastRecordNumber = new Unsigned16(firstRecordNumber); Checksum crc = new PureJavaCrc32(); Unsigned16 tmp = new Unsigned16(); lastRecordNumber.add(recordsToGenerate); Unsigned16 ONE = new Unsigned16(1); Unsigned16 rand = Random16.skipAhead(firstRecordNumber); while (!recordNumber.equals(lastRecordNumber)) { Random16.nextRand(rand); if (useAscii) { generateAsciiRecord(row, rand, recordNumber); } else { generateRecord(row, rand, recordNumber); } if (checksum != null) { crc.reset(); crc.update(row, 0, row.length); tmp.set(crc.getValue()); checksum.add(tmp); } recordNumber.add(ONE); out.write(row); } }
/** * Create a checksum output stream that writes * the bytes to the given stream. * @param out */ public IFileOutputStream(OutputStream out) { super(out); sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, Integer.MAX_VALUE, new PureJavaCrc32()); barray = new byte[sum.getChecksumSize()]; }
/** * Create a checksum input stream that reads * @param in The input stream to be verified for checksum. * @param len The length of the input stream including checksum bytes. */ public IFileInputStream(InputStream in, long len) { this.in = in; sum = DataChecksum.newDataChecksum(DataChecksum.CHECKSUM_CRC32, Integer.MAX_VALUE, new PureJavaCrc32()); checksumSize = sum.getChecksumSize(); length = len; dataLength = length - checksumSize; }
/** * object constructor */ public BlockReaderAccelerator( Configuration conf, InetSocketAddress targetAddress, DatanodeInfo chosenNode, int dataTransferVersion, int namespaceId, String clientName, Socket sock, String hdfsfile, LocatedBlock blk, long startOffset, long length, boolean verifyChecksum, DFSClientMetrics metrics) throws IOException { this.conf = conf; this.targetAddress = targetAddress; this.datanodeInfo = chosenNode; this.dataTransferVersion = dataTransferVersion; this.namespaceId = namespaceId; this.clientName = clientName; this.sock = sock; this.hdfsfile = hdfsfile; this.blk = blk; this.startOffset = startOffset; this.length = length; this.verifyChecksum = verifyChecksum; this.metrics = metrics; // create a checksum checker if (this.verifyChecksum) { this.checker = new PureJavaCrc32(); } }