@Override public Optional<Group> findInCache(String cacheKey) { logger.info("{}", kvp("op", "findInCache", "group", cacheKey)); @SuppressWarnings("unchecked") Optional<String> maybe = (Optional<String>) new RedisCacheCommand<Optional<String>>( "GroupCacheGet", () -> redisCache.get(cacheKey), Optional::empty, hystrixConfiguration, metrics).execute(); if (maybe.isPresent()) { Group cached = GroupSupport.toGroup(maybe.get()); logger.info("{} /group=[{}]", kvp("op", "findInCache", "key", cacheKey, "result", "cache_hit"), TextFormat.shortDebugString(cached)); return Optional.of(cached); } logger.info("{}", kvp("op", "findInCache", "key", cacheKey, "result", "cache_miss")); return Optional.empty(); }
@Override public Optional<Feature> findInCache(String cacheKey) { //noinspection unchecked Optional<String> maybe = (Optional<String>) new RedisCacheCommand<Optional<String>>("CacheGet", () -> redisCache.get(cacheKey), Optional::empty, hystrixConfiguration, metrics).execute(); if (maybe.isPresent()) { Feature cached = FeatureSupport.toFeature(maybe.get()); logger.info("{} /feature=[{}]", kvp("op", "findInCache", "key", cacheKey, "result", "cache_hit"), TextFormat.shortDebugString(cached)); return Optional.of(cached); } logger.info("{}", kvp("op", "findInCache", "key", cacheKey, "result", "cache_miss")); return Optional.empty(); }
@Override public Request parseRequest(byte[] bytes) throws IOException { ByteString byteString = UnsafeByteOperations.unsafeWrap(bytes); CodedInputStream inputStream = byteString.newCodedInput(); // Enable aliasing to avoid an extra copy to get at the serialized Request inside of the // WireMessage. inputStream.enableAliasing(true); WireMessage wireMsg = WireMessage.parseFrom(inputStream); String serializedMessageClassName = wireMsg.getName(); try { RequestTranslator translator = getParserForRequest(serializedMessageClassName); // The ByteString should be logical offsets into the original byte array return translator.transform(wireMsg.getWrappedMessage()); } catch (RuntimeException e) { if (LOG.isDebugEnabled()) { LOG.debug("Failed to parse request message '{}'", TextFormat.shortDebugString(wireMsg)); } throw e; } }
/** * @return the current state of the given segment, or null if the * segment does not exist. */ @VisibleForTesting SegmentStateProto getSegmentInfo(long segmentTxId) throws IOException { EditLogFile elf = fjm.getLogFile(segmentTxId); if (elf == null) { return null; } if (elf.isInProgress()) { elf.scanLog(); } if (elf.getLastTxId() == HdfsConstants.INVALID_TXID) { LOG.info("Edit log file " + elf + " appears to be empty. " + "Moving it aside..."); elf.moveAsideEmptyFile(); return null; } SegmentStateProto ret = SegmentStateProto.newBuilder() .setStartTxId(segmentTxId) .setEndTxId(elf.getLastTxId()) .setIsInProgress(elf.isInProgress()) .build(); LOG.info("getSegmentInfo(" + segmentTxId + "): " + elf + " -> " + TextFormat.shortDebugString(ret)); return ret; }
synchronized long get() throws IOException { try { currentStat = zkc.exists(path, false); if (currentStat == null) { return 0; } else { byte[] bytes = zkc.getData(path, false, currentStat); MaxTxIdProto.Builder builder = MaxTxIdProto.newBuilder(); TextFormat.merge(new String(bytes, UTF_8), builder); if (!builder.isInitialized()) { throw new IOException("Invalid/Incomplete data in znode"); } return builder.build().getTxId(); } } catch (KeeperException e) { throw new IOException("Error reading the max tx id from zk", e); } catch (InterruptedException ie) { throw new IOException("Interrupted while reading thr max tx id", ie); } }
private void replayWALFlushCannotFlushMarker(FlushDescriptor flush, long replaySeqId) { synchronized (writestate) { if (this.lastReplayedOpenRegionSeqId > replaySeqId) { LOG.warn(getRegionInfo().getEncodedName() + " : " + "Skipping replaying flush event :" + TextFormat.shortDebugString(flush) + " because its sequence id " + replaySeqId + " is smaller than this regions " + "lastReplayedOpenRegionSeqId of " + lastReplayedOpenRegionSeqId); return; } // If we were waiting for observing a flush or region opening event for // not showing partial // data after a secondary region crash, we can allow reads now. This event // means that the // primary was not able to flush because memstore is empty when we // requested flush. By the // time we observe this, we are guaranteed to have up to date seqId with // our previous // assignment. this.setReadsEnabled(true); } }
public ApplicationLog parseMessage(String messageString) throws InvalidProtocolBufferException, TextFormat .ParseException { LogMessages.Message.Builder builder = LogMessages.Message.newBuilder(); TextFormat.merge(messageString, builder); LogMessages.Message message = builder.build(); return createApplicationLog(message); }
@Override public Optional<Group> register(Group group) { logger.info("{} /group[{}]", kvp("op", "register"), TextFormat.shortDebugString(group)); new GroupValidator().validateRegistrationThrowing(group); final Optional<Group> registration = processRegistration(group); registration.ifPresent(this::addToCache); return registration; }
@Override public Group add(Group group, MemberAccess member) { logger.info("{} /group[{}]/mbr[{}]", kvp("op", "add.member"), TextFormat.shortDebugString(group), TextFormat.shortDebugString(member)); final Group update = processUpdate(group, builder -> { AccessCollection.Builder accessBuilder = newGrantCollectionBuilder(); accessBuilder.addAllMembers(groupUpdateProcessor.mergeMembers(group, member)); accessBuilder.addAllServices(group.getGranted().getServicesList()); builder.setGranted(accessBuilder.buildPartial()); }); addToCache(update); return update; }
@Override public Group add(Group group, final Owner incoming) { logger.info("{} /group[{}]/own[{}]", kvp("op", "add.owner"), TextFormat.shortDebugString(group), TextFormat.shortDebugString(incoming)); final Group update = processUpdate(group, builder -> builder.setOwners(OwnerCollection.newBuilder() .addAllItems(groupUpdateProcessor.mergeOwners(group, incoming)))); addToCache(update); return update; }
/** * Write a log marker that a bulk load has succeeded and is about to be committed. * * @param wal The log to write into. * @param htd A description of the table that we are bulk loading into. * @param hri A description of the region in the table that we are bulk loading into. * @param desc A protocol buffers based description of the client's bulk loading request * @return txid of this transaction or if nothing to do, the last txid * @throws IOException We will throw an IOException if we can not append to the HLog. */ public static long writeBulkLoadMarkerAndSync(final WAL wal, final HTableDescriptor htd, final HRegionInfo hri, final WALProtos.BulkLoadDescriptor desc, final MultiVersionConcurrencyControl mvcc) throws IOException { long trx = writeMarker(wal, htd, hri, WALEdit.createBulkLoadEvent(hri, desc), mvcc, true); if (LOG.isTraceEnabled()) { LOG.trace("Appended Bulk Load marker " + TextFormat.shortDebugString(desc)); } return trx; }
private static BuildFarmServerConfig toBuildFarmServerConfig(InputStream inputStream, BuildFarmServerOptions options) throws IOException { BuildFarmServerConfig.Builder builder = BuildFarmServerConfig.newBuilder(); String data = new String(Encoding.convertFromLatin1(ByteStreams.toByteArray(inputStream))); TextFormat.merge(data, builder); if (options.port > 0) { builder.setPort(options.port); } return builder.build(); }
private static ByteString unescape(String toUnescape) { try { return TextFormat.unescapeBytes(toUnescape); } catch (TextFormat.InvalidEscapeSequenceException e) { throw new RuntimeException(e); } }
public static <K> String mapToString( Map<K, ? extends Message> map) { StringBuilder sb = new StringBuilder(); boolean first = true; for (Map.Entry<K, ? extends Message> e : map.entrySet()) { if (!first) { sb.append("\n"); } first = false; sb.append(e.getKey()).append(": ") .append(TextFormat.shortDebugString(e.getValue())); } return sb.toString(); }
static EditLogLedgerMetadata read(ZooKeeper zkc, String path) throws IOException, KeeperException.NoNodeException { try { byte[] data = zkc.getData(path, false, null); EditLogLedgerProto.Builder builder = EditLogLedgerProto.newBuilder(); if (LOG.isDebugEnabled()) { LOG.debug("Reading " + path + " data: " + new String(data, UTF_8)); } TextFormat.merge(new String(data, UTF_8), builder); if (!builder.isInitialized()) { throw new IOException("Invalid/Incomplete data in znode"); } EditLogLedgerProto ledger = builder.build(); int dataLayoutVersion = ledger.getDataLayoutVersion(); long ledgerId = ledger.getLedgerId(); long firstTxId = ledger.getFirstTxId(); if (ledger.hasLastTxId()) { long lastTxId = ledger.getLastTxId(); return new EditLogLedgerMetadata(path, dataLayoutVersion, ledgerId, firstTxId, lastTxId); } else { return new EditLogLedgerMetadata(path, dataLayoutVersion, ledgerId, firstTxId); } } catch(KeeperException.NoNodeException nne) { throw nne; } catch(KeeperException ke) { throw new IOException("Error reading from zookeeper", ke); } catch (InterruptedException ie) { throw new IOException("Interrupted reading from zookeeper", ie); } }
private static WorkerConfig toWorkerConfig(InputStream inputStream, WorkerOptions options) throws IOException { WorkerConfig.Builder builder = WorkerConfig.newBuilder(); String data = new String(Encoding.convertFromLatin1(ByteStreams.toByteArray(inputStream))); TextFormat.merge(data, builder); if (!Strings.isNullOrEmpty(options.root)) { builder.setRoot(options.root); } if (!Strings.isNullOrEmpty(options.casCacheDirectory)) { builder.setCasCacheDirectory(options.casCacheDirectory); } return builder.build(); }
public Service.Response transform(ByteString serializedMessage) throws InvalidProtocolBufferException { Message msg = parser.parseFrom(serializedMessage); if (LOG.isTraceEnabled()) { LOG.trace("Deserialized response '{}'", TextFormat.shortDebugString(msg)); } return impl.deserialize(msg); }
/** * Write a region open marker indicating that the region is opened */ public static long writeRegionEventMarker(WAL wal, HTableDescriptor htd, HRegionInfo hri, final RegionEventDescriptor r, final MultiVersionConcurrencyControl mvcc) throws IOException { long trx = writeMarker(wal, htd, hri, WALEdit.createRegionEventWALEdit(hri, r), mvcc, true); if (LOG.isTraceEnabled()) { LOG.trace("Appended region event marker " + TextFormat.shortDebugString(r)); } return trx; }
@Override public byte[] serializeRequest(Request request) throws IOException { // Avoid BAOS for its synchronized write methods, we don't need that concurrency control UnsynchronizedBuffer out = threadLocalBuffer.get(); try { Message requestMsg = request.serialize(); // Serialization of the request may be large if (LOG.isTraceEnabled()) { LOG.trace("Serializing request '{}'", TextFormat.shortDebugString(requestMsg)); } serializeMessage(out, requestMsg); return out.toArray(); } finally { out.reset(); } }
@Override public String toString() { return TextFormat.shortDebugString(getProto()); }
private String toString(NamespaceFeature namespaceFeature) { return TextFormat.shortDebugString(namespaceFeature); }
@Override public String toString() { return "EndpointAffinity [endpoint=" + TextFormat.shortDebugString(endpoint) + ", affinity=" + affinity + "]"; }
public static String toString(final MajorType type) { return type != null ? "MajorType[" + TextFormat.shortDebugString(type) + "]" : "null"; }
@Override public String toString() { return "EndpointAffinity [endpoint=" + TextFormat.shortDebugString(endpoint) + ", affinity=" + affinity + ", mandatory=" + mandatory + ", maxWidth=" + maxWidth + "]"; }
@Override public String toString() { return TextFormat.shortDebugString(this.proto); }