No. of Instances - 4600
No. of Commits - 623
No. of Projects - {'JGroups', 'pheonix', 'keywhiz', 'jetty.project', 'keycloak', 'Lealone', 'CoreNLP', 'cryptomator', 'hadoop', 'Chronicle-Queue', 'wildfly', 'pulsar', 'speedment', 'optaplanner', 'redisson', 'camel', 'AxonFramework', 'tomcat', 'gwt', 'jersey', 'j2objc', 'geoserver', 'robovm', 'Chronicle-Map', 'helios', 'JCTools', 'BlossomsPokemonGoManager', 'graphhopper', 'che', 'galen', 'dropwizard', 'sonarqube', 'async-http-client', 'nd4j', 'languagetool', 'drill', 'alluxio', 'datumbox-framework', 'jgit', 'OpenTripPlanner', 'hbase', 'bigbluebutton', 'brave', 'assertj-core', 'ignite', 'graylog2-server', 'junit5', 'presto', 'orbit', 'apollo', 'pinpoint', 'uaa', 'kaa', 'atomix', 'james-project', 'infinispan', 'vert.x', 'jbpm', 'orientdb', 'nutz', 'undertow', 'fastjson', 'bootique', 'HikariCP', 'drools', 'neo4j', 'flink', 'guava', 'hibernate-orm', 'rapidoid', 'usergrid', 'immutables', 'killbill', 'giraph', 'mpush'}
Hierarchy/Composition: -
Primitive Info: Widening
NameSpace: Jdk -> Jdk
Mapping:
- iprot.readI32() to iprot.readI64()
- iprot.readI32() to iprot.readI64()
- fileInfo.getId() to fileInfo.getFileId()
- iprot.readI32() to iprot.readI64()
- iprot.readI32() to iprot.readI64()
- iprot.readI32() to iprot.readI64()
- iprot.readI32() to iprot.readI64()
- mMasterClient.getFileStatus(fileId,path) to mFSMasterClient.getFileInfo(fileId)
- info.getId() to mFSMasterClient.getFileId(path)
- iprot.readI32() to iprot.readI64()
- fileInfo.getId() to fileInfo.getFileId()
- iprot.readI32() to iprot.readI64()
- getInt to getLong
- input.readInt32() to input.readInt64()
- com.google.protobuf.CodedOutputStream.computeInt32Size(21,viewIndexId_) to com.google.protobuf.CodedOutputStream.computeInt64Size(21,viewIndexId_)
- input.readInt32() to input.readInt64()
- com.google.protobuf.CodedOutputStream.computeInt32Size(6,viewIndexId_) to com.google.protobuf.CodedOutputStream.computeInt64Size(6,viewIndexId_)
- input.readInt32() to input.readInt64()
- com.google.protobuf.CodedOutputStream.computeInt32Size(12,viewIndexId_) to com.google.protobuf.CodedOutputStream.computeInt64Size(12,viewIndexId_)
- context.getConnection().getQueryServices().getProps().getInt(QueryServices.SPOOL_THRESHOLD_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_SPOOL_THRESHOLD_BYTES) to context.getConnection().getQueryServices().getProps().getLong(QueryServices.CLIENT_SPOOL_THRESHOLD_BYTES_ATTRIB,QueryServicesOptions.DEFAULT_CLIENT_SPOOL_THRESHOLD_BYTES)
- Number.fromInt(minSize) to Number.fromLong(minSize)
- Number.fromInt(maxSize) to Number.fromLong(maxSize)
- Optional.of(Number.fromInt(minSize)) to Optional.of(Number.fromLong(minSize))
- Optional.of(Number.fromInt(maxSize)) to Optional.of(Number.fromLong(maxSize))
- Optional.of(Number.fromInt(minSize)) to Optional.of(Number.fromLong(minSize))
- Optional.of(Number.fromInt(maxSize)) to Optional.of(Number.fromLong(maxSize))
- size to size
- num_responses to Math.min(min,time_ns)
- Integer.parseInt(v) to Long.parseLong(v)
- System.currentTimeMillis() to System.nanoTime()
- tmpRef to Helper.toSignedInt(tmpRef)
- cmdArgs.getInt("maxqueries",1000) to cmdArgs.getLong("maxqueries",1000L)
- myRequestBean.getkSession().hashCode() to myRequestBean.getkSession().getIdentifier()
- myConversationBean.getkSession().hashCode() to myConversationBean.getkSession().getIdentifier()
- myConversationBean.getkSession().hashCode() to myConversationBean.getkSession().getIdentifier()
- myConversationBean.getkSession().hashCode() to myConversationBean.getkSession().getIdentifier()
- myConversationBean.getkSession().hashCode() to myConversationBean.getkSession().getIdentifier()
- myConversationBean.getkSession().hashCode() to myConversationBean.getkSession().getIdentifier()
- mySessionBean.getkSession().hashCode() to mySessionBean.getkSession().getIdentifier()
- mySessionBean.getkSession().hashCode() to mySessionBean.getkSession().getIdentifier()
- in.skip(byteCount - numSkipped) to super.skip(n)
- config.getMaxThreadCount() to config.getTokenClaimInterval()
- out.writeInt(MAGIC_WORD).writeInt(blockLength).writeLong(firstEntryId) to out.writeInt(MAGIC_WORD).writeLong(headerLength).writeLong(blockLength).writeLong(firstEntryId)
- metaFile.readInt() to metaFile.readLong()
- Hash.hash32(bytes,0) to Math.abs(function1.hashBytes(bytes) % bits)
- Hash.hash32(bytes,1) to Math.abs(function2.hashBytes(bytes) % bits)
- bits.get(index) to bits.isSet(index)
- Maths.nextPower2((long)(-expectedSize * Math.log(falsePositiveProbability == 0 ? Double.MIN_VALUE : falsePositiveProbability) / (Math.log(2) * Math.log(2))),128) to roundBits((long)(-expectedSize * Math.log(falsePositiveProbability == 0 ? Double.MIN_VALUE : falsePositiveProbability) / (Math.log(2) * Math.log(2))))
- buffer.readInt() to buffer.readUnsignedInt()
- buffer.writeLong(id).writeLong(version).writeLong(index).writeLong(range).writeUnsignedMedium(maxEntrySize).writeInt(maxSegmentSize).writeLong(updated).writeBoolean(locked).flush() to buffer.writeLong(id).writeLong(version).writeLong(index).writeLong(range).writeUnsignedMedium(maxEntrySize).writeUnsignedInt(maxSegmentSize).writeInt(maxEntries).writeLong(updated).writeBoolean(locked).flush()
- readInt(buffer) to readLong(buffer)
- inMemoryQueue.size(queueName) to queueMessageManager.getQueueDepth(queueName)
- Conf.HTTP.entry("maxPipeline") to net
- latch.await(timeout,timeUnit) to latch.await(executionTimeout,executionTimeUnit)
- timeUnit.toMillis(timeout) to executionTimeUnit.toMillis(executionTimeout)
- toChars(len) to len
- row.getInt(0) to row.getLong(0)
- input.readUInt32() to input.readUInt64()
- com.google.protobuf.CodedOutputStream.computeUInt32Size(2,totalNumberOfRequests_) to com.google.protobuf.CodedOutputStream.computeUInt64Size(2,totalNumberOfRequests_)
- input.readInt32() to input.readInt64()
- com.google.protobuf.CodedOutputStream.computeInt32Size(3,requests_) to com.google.protobuf.CodedOutputStream.computeInt64Size(3,requests_)
- input.readInt32() to input.readInt64()
- com.google.protobuf.CodedOutputStream.computeInt32Size(4,requests_) to com.google.protobuf.CodedOutputStream.computeInt64Size(4,requests_)
- input.readUInt32() to input.readUInt64()
- com.google.protobuf.CodedOutputStream.computeUInt32Size(1,numberOfRequests_) to com.google.protobuf.CodedOutputStream.computeUInt64Size(1,numberOfRequests_)
- buf1.get(o1 + offset) to UnsafeAccess.theUnsafe.getByte(obj1,o1 + offset)
- buf2.get(o2 + offset) to UnsafeAccess.theUnsafe.getByte(obj2,o2 + offset)
- this.conf.getInt("hbase.ipc.server.max.callqueue.size",DEFAULT_MAX_CALLQUEUE_SIZE) to this.conf.getLong("hbase.ipc.server.max.callqueue.size",DEFAULT_MAX_CALLQUEUE_SIZE)
- this.conf.getInt("hbase.ipc.server.max.callqueue.size",DEFAULT_MAX_CALLQUEUE_SIZE) to this.conf.getLong("hbase.ipc.server.max.callqueue.size",DEFAULT_MAX_CALLQUEUE_SIZE)
- calcCacheCapacity() to resultSize2CacheSize(maxScannerResultSize)
- rl.getStorefileIndexSizeMB() to rl.getStorefileIndexSizeKB()
- Strings.appendKeyValue(sb,"storefileIndexSizeMB",Integer.valueOf(this.storefileIndexSizeMB)) to Strings.appendKeyValue(sb,"storefileIndexSizeKB",Long.valueOf(this.storefileIndexSizeKB))
- op.y() to Nd4j
- ArrayUtil.dotProduct(pointOffsets,pointStrides) to ArrayUtil.dotProductLong(pointOffsets,pointStrides)
- ArrayUtil.dotProduct(accumOffsets,accumStrides) to ArrayUtil.dotProductLong(accumOffsets,accumStrides)
- ArrayUtil.dotProduct(accumOffsets,accumStrides) to ArrayUtil.dotProductLong(accumOffsets,accumStrides)
- ArrayUtil.calcOffset(accumShape,accumOffsets,accumStrides) to ArrayUtil.calcOffsetLong(accumShape,accumOffsets,accumStrides)
- ArrayUtil.prod(shape) to ArrayUtil.prodLong(shape)
- ArrayUtil.sum(offsets) to ArrayUtil.sumLong(offsets)
- array.length() to array.lengthLong()
- array.length() to array.lengthLong()
- conn.getContentLength() to conn.getContentLengthLong()
- ingressStartTimeout to TimeUnit.MINUTES.toMillis(ingressStartTimeoutMin)
- ingressStartTimeout to Math.min(ingressStartTimeoutMillis,startSynchronizer.getStartTimeoutMillis())
- hashCode to hash
- hashCode to hash
- position to toIntExact(position)
- uncompressedLength to toIntExact(uncompressedLength)
- dataStream.next(totalLength) to dataStream.next(toIntExact(totalLength))
- conf.getInt(MIN_MULTIPART_THRESHOLD,DEFAULT_MIN_MULTIPART_THRESHOLD) to conf.getLong(MIN_MULTIPART_THRESHOLD,DEFAULT_MIN_MULTIPART_THRESHOLD)
- conf.getInt(MIN_MULTIPART_THRESHOLD,DEFAULT_MIN_MULTIPART_THRESHOLD) to conf.getLong(MIN_MULTIPART_THRESHOLD,DEFAULT_MIN_MULTIPART_THRESHOLD)
- config.getInt(PERIOD_KEY,PERIOD_DEFAULT) to config.getLong(PERIOD_MILLIS_KEY,periodSec * 1000)
- appMetrics.getResourcePreempted().getMemory() to appMetrics.getResourcePreempted().getMemorySize()
- usedResources.getMemory() to usedResources.getMemorySize()
- app.getApplicationResourceUsageReport().getUsedResources().getMemory() to app.getApplicationResourceUsageReport().getUsedResources().getMemorySize()
- r.getResourcesAtTime(i).getMemory() to r.getResourcesAtTime(i).getMemorySize()
- scheduler.getMaximumResourceCapability().getMemory() to scheduler.getMaximumResourceCapability().getMemorySize()
- scheduler.getMaximumResourceCapability().getMemory() to scheduler.getMaximumResourceCapability().getMemorySize()
- scheduler.getMaximumResourceCapability().getMemory() to scheduler.getMaximumResourceCapability().getMemorySize()
- scheduler.getMaximumResourceCapability().getMemory() to scheduler.getMaximumResourceCapability().getMemorySize()
- scheduler.getMaximumResourceCapability().getMemory() to scheduler.getMaximumResourceCapability().getMemorySize()
- fs.getMaximumResourceCapability().getMemory() to fs.getMaximumResourceCapability().getMemorySize()
- fsAppAttempt.getFairShare().getMemory() to fsAppAttempt.getFairShare().getMemorySize()
- container.getAllocatedResource().getMemory() to container.getAllocatedResource().getMemorySize()
- available.getMemory() to available.getMemorySize()
- required.getMemory() to required.getMemorySize()
- fs.getMinimumResourceCapability().getMemory() to fs.getMinimumResourceCapability().getMemorySize()
- newResource.getMemory() to newResource.getMemorySize()
- res.getMemory() to res.getMemorySize()
- available.getMemory() to available.getMemorySize()
- required.getMemory() to required.getMemorySize()
- toPreempt.getMemory() to toPreempt.getMemorySize()
- clusterMax.getMemory() to clusterMax.getMemorySize()
- conf.getInt(MRJobConfig.TASK_TIMEOUT,5 * 60 * 1000) to conf.getLong(MRJobConfig.TASK_TIMEOUT,MRJobConfig.DEFAULT_TASK_TIMEOUT_MILLIS)
- conf.getInt(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT) to conf.getTimeDuration(DFS_NAMENODE_SAFEMODE_EXTENSION_KEY,DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT,MILLISECONDS)
- Integer.parseInt(cliParser.getOptionValue("container_memory","10")) to Integer.parseInt(cliParser.getOptionValue("container_memory","-1"))
- config.getInt(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS,DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_SECONDS_DEFAULT) to config.getTimeDuration(DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL,DFS_CBLOCK_BLOCK_BUFFER_FLUSH_INTERVAL_DEFAULT,TimeUnit.SECONDS)
- conf.getInt(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_SECONDS,OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT) to conf.getTimeDuration(OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL,OZONE_SCM_CONTAINER_REPORT_PROCESSING_INTERVAL_DEFAULT,TimeUnit.SECONDS)
- conf.getInt(OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS,OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_MS_DEFAULT) to conf.getTimeDuration(OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT,OzoneConfigKeys.OZONE_CLIENT_CONNECTION_TIMEOUT_DEFAULT,TimeUnit.MILLISECONDS)
- HttpClients.custom().setDefaultRequestConfig(RequestConfig.custom().setSocketTimeout(socketTimeout).setConnectTimeout(connectionTimeout).build()) to HttpClients.custom().setDefaultRequestConfig(RequestConfig.custom().setSocketTimeout(Math.toIntExact(socketTimeout)).setConnectTimeout(Math.toIntExact(connectionTimeout)).build())
- conf.getInt(OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS,OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_MS_DEFAULT) to conf.getTimeDuration(OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT,OzoneConfigKeys.OZONE_CLIENT_SOCKET_TIMEOUT_DEFAULT,TimeUnit.MILLISECONDS)
- HttpClients.custom().setDefaultRequestConfig(RequestConfig.custom().setSocketTimeout(socketTimeout).setConnectTimeout(connectionTimeout).build()) to HttpClients.custom().setDefaultRequestConfig(RequestConfig.custom().setSocketTimeout(Math.toIntExact(socketTimeout)).setConnectTimeout(Math.toIntExact(connectionTimeout)).build())
- subnetUtils.getInfo().getAddressCount() to subnetUtils.getInfo().getAddressCountLong()
- Longs to scm.getScmMetadataStore().getDeletedBlocksTXTable()
- deletedBlockLog to scm.getScmMetadataStore()
- input.readInt32() to input.readInt64()
- com.google.protobuf.CodedOutputStream.computeInt32Size(8,snapshotId_) to com.google.protobuf.CodedOutputStream.computeInt64Size(8,snapshotId_)
- input.readInt32() to input.readInt64()
- com.google.protobuf.CodedOutputStream.computeInt32Size(4,componentId_) to com.google.protobuf.CodedOutputStream.computeInt64Size(5,componentId_)
- context.prepareSelect("SELECT qgc.id, qgc.metric_id, qgc.operator, qgc.value_error, qgc.value_warning, qgc.period FROM quality_gate_conditions qgc " + "WHERE qgc.qgate_id=? ").setInt(1,qualityGateId) to context.prepareSelect("SELECT qgc.id, qgc.metric_id, qgc.operator, qgc.value_error, qgc.value_warning, qgc.period FROM quality_gate_conditions qgc " + "WHERE qgc.qgate_id=? ").setLong(1,qualityGateId)
- configuration.getInt(SONAR_CE_GRACEFUL_STOP_TIME_OUT_IN_MS).orElse(GRACEFUL_STOP_TIMEOUT) to configuration.getLong(SONAR_CE_GRACEFUL_STOP_TIME_OUT_IN_MS).orElse(DEFAULT_TASK_TIMEOUT_IN_MS)
- state.compareAndSet(s,RENTING,reservations,reservations) to casState(reservations,RENTING)
- state.compareAndSet(MOVING,OWNING,reservations,reservations) to casState(reservations,OWNING)
- ca.primarySize() to ca.primarySizeLong()
- ca.localSize(PEEK_NO_NEAR) to ca.localSizeLong(PEEK_NO_NEAR)
- in.readInt() to in.readLong()
- cfg.getWalFsyncDelay() to cfg.getWalFsyncDelayNanos()
- in.readInt() to in.readLong()
- in.readInt() to in.readLong()
- txId(txId) to (Object)txId
- random.nextInt(diff) to random.nextLong(diff)
- collisionIndex to Utils.safeCastLongToInt(collisionIndex)
- decode(cursor,adapter,headerByte,HAS_PROPERTY_BIT,NULL) to FIRST_IN_FIRST_CHAIN_BIT
- decode(cursor,adapter,headerByte,HAS_END_NEXT_BIT,NULL) to recordId
- decode(cursor,adapter,headerByte,HAS_START_NEXT_BIT,NULL) to recordId
- has(headerByte,FIRST_IN_START_BIT) to recordId
- has(headerByte,FIRST_IN_END_BIT) to recordId
- count(node.getRelationships(MyRelTypes.TEST,Direction.OUTGOING)) to Iterables.count(node.getRelationships(MyRelTypes.TEST,Direction.OUTGOING))
- count(node.getRelationships(type,direction)) to Iterables.count(node.getRelationships(type,direction))
- Predicates.<T>alwaysTrue() to Predicates.alwaysTrue()
- count(graphDb.getAllNodes()) to Iterables.count(graphDb.getAllNodes())
- count(node.getRelationships(MyRelTypes.TEST,Direction.OUTGOING)) to Iterables.count(node.getRelationships(MyRelTypes.TEST,Direction.OUTGOING))
- count(graphDb.getAllNodes()) to Iterables.count(graphDb.getAllNodes())
- equalTo(Integer.toString(i)) to equalTo(i)
- relGroupCache.getAndResetCount(id,direction) to relGroupCache.getAndSetCount(id,direction,0)
- database.getAllLabels().stream().mapToInt((n) -> 1).sum() to database.getAllLabels().stream().count()
- database.getAllRelationships().stream().mapToInt((n) -> 1).sum() to database.getAllRelationships().stream().count()
- database.getAllRelationshipTypes().stream().mapToInt((n) -> 1).sum() to database.getAllRelationshipTypes().stream().count()
- indexDefinitionStream to stream(database.schema().getIndexes())
- database.getAllRelationshipTypes().stream().mapToInt((n) -> 1).sum() to database.getAllRelationshipTypes().stream().count()
- database.getAllNodes().stream().mapToInt((n) -> 1).sum() to database.getAllNodes().stream().count()
- database.getAllLabels().stream().mapToInt((n) -> 1).sum() to database.getAllLabels().stream().count()
- database.getAllPropertyKeys().stream().mapToInt((n) -> 1).sum() to database.getAllPropertyKeys().stream().count()
- database.getAllRelationships().stream().mapToInt((n) -> 1).sum() to database.getAllRelationships().stream().count()
- database.getAllPropertyKeys().stream().mapToInt((n) -> 1).sum() to database.getAllPropertyKeys().stream().count()
- database.getAllNodes().stream().mapToInt((n) -> 1).sum() to database.getAllNodes().stream().count()
- constraintStream to stream(database.schema().getConstraints())
- constraintStream to stream(database.schema().getConstraints())
- indexDefinitionStream to stream(database.schema().getIndexes())
- relGroupCache.getAndResetCount(id,direction) to relGroupCache.getAndSetCount(id,direction,0)
- cacheFactory.newLongArray(pessimisticNumberOfCollisions,ID_NOT_FOUND) to cacheFactory.newByteArray(pessimisticNumberOfCollisions,new byte[COLLISION_ENTRY_SIZE])
- labelTokenHolder.getTokenById(labelId) to labelTokenHolder.getTokenById(toIntExact(labelId))
- roundUp(netBatchSize,rowCount) to safeDivide(netBatchSize,rowCount)
- roundUp(netBatchSize * 100,accountedMemorySize) to safeDivide(netBatchSize * 100L,accountedMemorySize)
- roundUp(accountedMemorySize,rowCount) to safeDivide(accountedMemorySize,rowCount)
- roundUp(accountedMemorySize,rowCount) to safeDivide(accountedMemorySize,rowCount)
- roundUp(netBatchSize * 100,accountedMemorySize) to safeDivide(netBatchSize * 100L,accountedMemorySize)
- BaseAllocator.nextPowerOfTwo(INT_VALUE_WIDTH * (valueCount + 1)) to BaseAllocator.longNextPowerOfTwo(INT_VALUE_WIDTH * (valueCount + 1))
- BaseAllocator.nextPowerOfTwo(valueCount) to BaseAllocator.longNextPowerOfTwo(valueCount)
- BaseAllocator.nextPowerOfTwo(BYTE_VALUE_WIDTH * valueCount) to BaseAllocator.longNextPowerOfTwo(BYTE_VALUE_WIDTH * valueCount)
- [srcPath, dstPath] to pinned
- >= to -
- count to count
- < to <=
- toChars(len) to toChars(len)
- buf.position() + extra to capacityNeeded
- op.getDimension() == null ? BroadcastDimensions.getDimensions(op.y().shape()) : op.getDimension() to op.getDimension()
- Math.min(numAvailableMemorySegment,totalCapacity) to Math.min(numAvailableMemorySegment,totalCapacity)
- - to +
- - to +
- - to +
- getOffset(positionOffset + length) - getOffset(positionOffset) + arraysSizeInBytes to getOffset(positionOffset + length) - getOffset(positionOffset) + arraysSizeInBytes
- (Long.BYTES + Byte.BYTES) * (long)length to (Long.BYTES + Byte.BYTES) * (long)length
- getRawSlice().length() + valueIsNull.length() to getRawSlice().length() + valueIsNull.length()
- size to size
- INSTANCE_SIZE + getRawSlice().getRetainedSize() + valueIsNull.getRetainedSize() to INSTANCE_SIZE + getRawSlice().getRetainedSize() + valueIsNull.getRetainedSize()
- keyBlock.getRegionSizeInBytes(offset / 2,positionCount / 2) + valueBlock.getRegionSizeInBytes(offset / 2,positionCount / 2) + sizeOfIntArray(positionCount / 2 * HASH_MULTIPLIER) to keyBlock.getRegionSizeInBytes(offset / 2,positionCount / 2) + valueBlock.getRegionSizeInBytes(offset / 2,positionCount / 2) + sizeOfIntArray(positionCount / 2 * HASH_MULTIPLIER)
- sizeInBytes to sizeInBytes
- sizeInBytes to sizeInBytes
- sizeInBytes to sizeInBytes
- sliceOutput.size() + valueIsNull.size() to sliceOutput.size() + valueIsNull.size()
- size to size
- (Short.BYTES + Byte.BYTES) * (long)length to (Short.BYTES + Byte.BYTES) * (long)length
- INSTANCE_SIZE + keyBlock.getRetainedSizeInBytes() + valueBlock.getRetainedSizeInBytes()+ sizeOf(hashTable) to INSTANCE_SIZE + keyBlock.getRetainedSizeInBytes() + valueBlock.getRetainedSizeInBytes()+ sizeOf(hashTable)
- (Integer.BYTES + Byte.BYTES) * (long)length to (Integer.BYTES + Byte.BYTES) * (long)length
- size to size
- (Byte.BYTES + Byte.BYTES) * (long)length to (Byte.BYTES + Byte.BYTES) * (long)length
- offsets[arrayOffset + position + length] - offsets[arrayOffset + position] + ((Integer.BYTES + Byte.BYTES) * (long)length) to offsets[arrayOffset + position + length] - offsets[arrayOffset + position] + ((Integer.BYTES + Byte.BYTES) * (long)length)
- sizeInBytes to sizeInBytes
- sizeInBytes to sizeInBytes
- sizeInBytes to sizeInBytes
- sliceOutput.size() + arraysSizeInBytes to sliceOutput.size() + arraysSizeInBytes
- slice.getRetainedSize() + closedSlicesRetainedSize + INSTANCE_SIZE to slice.getRetainedSize() + closedSlicesRetainedSize + INSTANCE_SIZE
- INSTANCE_SIZE + compressedOutputStream.getRetainedSize() + slice.getRetainedSize()+ SizeOf.sizeOf(compressionBuffer) to INSTANCE_SIZE + compressedOutputStream.getRetainedSize() + slice.getRetainedSize()+ SizeOf.sizeOf(compressionBuffer)
- slice.getRetainedSize() + closedSlicesRetainedSize + INSTANCE_SIZE to slice.getRetainedSize() + closedSlicesRetainedSize + INSTANCE_SIZE
- 0 to DFS_NAMENODE_SAFEMODE_EXTENSION_DEFAULT
- == to !=
- == to !=
- == to !=
- decode(cursor,adapter) to has(headerByte,firstInStartBit) ? decode(cursor,adapter) : toAbsolute(decode(cursor,adapter),recordId)
- value to value
- {
@Override public void run(){
try {
InputStream is;
if (logFile.endsWith(".gz")) is=new GZIPInputStream(new FileInputStream(logFile));
else is=new FileInputStream(logFile);
BufferedReader reader=new BufferedReader(new InputStreamReader(is,Helper.UTF_CS));
try {
String logLine;
while ((logLine=reader.readLine()) != null) {
Query q=Query.parse(logLine);
if (q == null) continue;
double dist=distCalc.calcDist(q.start.lat,q.start.lon,q.end.lat,q.end.lon);
if (dist < 100) {
skippedTooShort++;
continue;
}
readQueries++;
if (noDuplicate.size() >= maxQueries) break;
if (noDuplicate.add(q)) queryQueue.put(q);
}
}
finally {
reader.close();
}
logger.info("Reader finished");
workerStartedBarrier.await();
logfileEOFBarrier.countDown();
service.shutdown();
}
catch ( Exception ex) {
logger.error("Stopped reading logs",ex);
if (service != null) service.shutdownNow();
}
}
}
to {
@Override public void run(){
try {
InputStream is;
if (logFile.endsWith(".gz")) is=new GZIPInputStream(new FileInputStream(logFile));
else is=new FileInputStream(logFile);
BufferedReader reader=new BufferedReader(new InputStreamReader(is,Helper.UTF_CS));
try {
String logLine;
while ((logLine=reader.readLine()) != null) {
Query q=Query.parse(logLine);
if (q == null) continue;
double dist=distCalc.calcDist(q.start.lat,q.start.lon,q.end.lat,q.end.lon);
if (dist < tooShortDistance) {
skippedTooShort++;
continue;
}
readQueries++;
q.realCount=readQueries;
if (noDuplicate.size() >= maxQueries) break;
if (noDuplicate.add(q)) queryQueue.put(q);
}
}
finally {
reader.close();
}
logger.info("Reader finished");
workerStartedBarrier.await();
logfileEOFBarrier.countDown();
service.shutdown();
}
catch ( Exception ex) {
logger.error("Stopped reading logs",ex);
if (service != null) service.shutdownNow();
}
}
}
- {
@Override public void run(){
try {
InputStream is;
if (logFile.endsWith(".gz")) is=new GZIPInputStream(new FileInputStream(logFile));
else is=new FileInputStream(logFile);
BufferedReader reader=new BufferedReader(new InputStreamReader(is,Helper.UTF_CS));
try {
String logLine;
while ((logLine=reader.readLine()) != null) {
Query q=Query.parse(logLine);
if (q == null) continue;
double dist=distCalc.calcDist(q.start.lat,q.start.lon,q.end.lat,q.end.lon);
if (dist < 100) {
skippedTooShort++;
continue;
}
readQueries++;
if (noDuplicate.size() >= maxQueries) break;
if (noDuplicate.add(q)) queryQueue.put(q);
}
}
finally {
reader.close();
}
logger.info("Reader finished");
workerStartedBarrier.await();
logfileEOFBarrier.countDown();
service.shutdown();
}
catch ( Exception ex) {
logger.error("Stopped reading logs",ex);
if (service != null) service.shutdownNow();
}
}
}
to {
@Override public void run(){
try {
InputStream is;
if (logFile.endsWith(".gz")) is=new GZIPInputStream(new FileInputStream(logFile));
else is=new FileInputStream(logFile);
BufferedReader reader=new BufferedReader(new InputStreamReader(is,Helper.UTF_CS));
try {
String logLine;
while ((logLine=reader.readLine()) != null) {
Query q=Query.parse(logLine);
if (q == null) continue;
double dist=distCalc.calcDist(q.start.lat,q.start.lon,q.end.lat,q.end.lon);
if (dist < tooShortDistance) {
skippedTooShort++;
continue;
}
readQueries++;
q.realCount=readQueries;
if (noDuplicate.size() >= maxQueries) break;
if (noDuplicate.add(q)) queryQueue.put(q);
}
}
finally {
reader.close();
}
logger.info("Reader finished");
workerStartedBarrier.await();
logfileEOFBarrier.countDown();
service.shutdown();
}
catch ( Exception ex) {
logger.error("Stopped reading logs",ex);
if (service != null) service.shutdownNow();
}
}
}
- {
@Override public void run(){
try {
InputStream is;
if (logFile.endsWith(".gz")) is=new GZIPInputStream(new FileInputStream(logFile));
else is=new FileInputStream(logFile);
BufferedReader reader=new BufferedReader(new InputStreamReader(is,Helper.UTF_CS));
try {
String logLine;
while ((logLine=reader.readLine()) != null) {
Query q=Query.parse(logLine);
if (q == null) continue;
double dist=distCalc.calcDist(q.start.lat,q.start.lon,q.end.lat,q.end.lon);
if (dist < 100) {
skippedTooShort++;
continue;
}
readQueries++;
if (noDuplicate.size() >= maxQueries) break;
if (noDuplicate.add(q)) queryQueue.put(q);
}
}
finally {
reader.close();
}
logger.info("Reader finished");
workerStartedBarrier.await();
logfileEOFBarrier.countDown();
service.shutdown();
}
catch ( Exception ex) {
logger.error("Stopped reading logs",ex);
if (service != null) service.shutdownNow();
}
}
}
to {
@Override public void run(){
try {
InputStream is;
if (logFile.endsWith(".gz")) is=new GZIPInputStream(new FileInputStream(logFile));
else is=new FileInputStream(logFile);
BufferedReader reader=new BufferedReader(new InputStreamReader(is,Helper.UTF_CS));
try {
String logLine;
while ((logLine=reader.readLine()) != null) {
Query q=Query.parse(logLine);
if (q == null) continue;
double dist=distCalc.calcDist(q.start.lat,q.start.lon,q.end.lat,q.end.lon);
if (dist < tooShortDistance) {
skippedTooShort++;
continue;
}
readQueries++;
q.realCount=readQueries;
if (noDuplicate.size() >= maxQueries) break;
if (noDuplicate.add(q)) queryQueue.put(q);
}
}
finally {
reader.close();
}
logger.info("Reader finished");
workerStartedBarrier.await();
logfileEOFBarrier.countDown();
service.shutdown();
}
catch ( Exception ex) {
logger.error("Stopped reading logs",ex);
if (service != null) service.shutdownNow();
}
}
}
- {
@Override public Object invoke( Object proxy, Method method, Object[] args) throws Throwable {
String requestId=generateRequestId();
String requestQueueName="redisson_remote_service:{" + remoteInterface.getName() + "}";
RBlockingQueue<RemoteServiceRequest> requestQueue=redisson.getBlockingQueue(requestQueueName);
RemoteServiceRequest request=new RemoteServiceRequest(requestId,method.getName(),args);
requestQueue.add(request);
String responseName="redisson_remote_service:{" + remoteInterface.getName() + "}:"+ requestId;
final RTopic<RemoteServiceResponse> topic=redisson.getTopic(responseName);
final CountDownLatch latch=new CountDownLatch(1);
final AtomicReference<RemoteServiceResponse> response=new AtomicReference<RemoteServiceResponse>();
int listenerId=topic.addListener(new MessageListener<RemoteServiceResponse>(){
@Override public void onMessage( String channel, RemoteServiceResponse msg){
response.set(msg);
latch.countDown();
}
}
);
if (timeout == -1) {
latch.await();
}
else {
if (!latch.await(timeout,timeUnit)) {
topic.removeListener(listenerId);
throw new RedisTimeoutException("No response after " + timeUnit.toMillis(timeout) + "ms for request: "+ request);
}
}
topic.removeListener(listenerId);
RemoteServiceResponse msg=response.get();
if (msg.getError() != null) {
throw msg.getError();
}
return msg.getResult();
}
}
to {
@Override public Object invoke( Object proxy, Method method, Object[] args) throws Throwable {
String requestId=generateRequestId();
String requestQueueName="redisson_remote_service:{" + remoteInterface.getName() + "}";
RBlockingQueue<RemoteServiceRequest> requestQueue=redisson.getBlockingQueue(requestQueueName);
RemoteServiceRequest request=new RemoteServiceRequest(requestId,method.getName(),args,ackTimeUnit.toMillis(ackTimeout),System.currentTimeMillis());
requestQueue.add(request);
String responseName="redisson_remote_service:{" + remoteInterface.getName() + "}:"+ requestId;
final CountDownLatch ackLatch=new CountDownLatch(1);
final CountDownLatch latch=new CountDownLatch(1);
final AtomicReference<RRemoteServiceResponse> response=new AtomicReference<RRemoteServiceResponse>();
final RTopic<RRemoteServiceResponse> topic=redisson.getTopic(responseName);
int listenerId=topic.addListener(new MessageListener<RRemoteServiceResponse>(){
@Override public void onMessage( String channel, RRemoteServiceResponse msg){
if (msg instanceof RemoteServiceResponse) {
response.set(msg);
latch.countDown();
}
else {
ackLatch.countDown();
}
}
}
);
if (!ackLatch.await(ackTimeout,ackTimeUnit)) {
topic.removeListener(listenerId);
throw new RemoteServiceAckTimeoutException("No ACK response after " + ackTimeUnit.toMillis(ackTimeout) + "ms for request: "+ request);
}
if (executionTimeout == -1) {
latch.await();
}
else {
if (!latch.await(executionTimeout,executionTimeUnit)) {
topic.removeListener(listenerId);
throw new RemoteServiceTimeoutException("No response after " + executionTimeUnit.toMillis(executionTimeout) + "ms for request: "+ request);
}
}
topic.removeListener(listenerId);
RemoteServiceResponse msg=(RemoteServiceResponse)response.get();
if (msg.getError() != null) {
throw msg.getError();
}
return msg.getResult();
}
}
- new RemoteServiceRequest(requestId,method.getName(),args) to new RemoteServiceRequest(requestId,method.getName(),args,ackTimeUnit.toMillis(ackTimeout),System.currentTimeMillis())
- new double[length] to new DoublePointer(length)
- new float[length] to new FloatPointer(length)
- new int[length] to new IntPointer(length)
- new MetricsSourceAdapter(prefix,MS_STATS_NAME,MS_STATS_DESC,MetricsAnnotations.makeSource(this),injectedTags,period * 1000L,sysConf == null ? config.subset(SOURCE_KEY) : sysConf) to new MetricsSourceAdapter(prefix,MS_STATS_NAME,MS_STATS_DESC,MetricsAnnotations.makeSource(this),injectedTags,period,sysConf == null ? config.subset(SOURCE_KEY) : sysConf)