412 lines
20 KiB
Diff
412 lines
20 KiB
Diff
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
|
|
index f7932a6..ec3d9cf 100644
|
|
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
|
|
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
|
|
@@ -22,6 +22,7 @@
|
|
import java.util.List;
|
|
import java.util.Map;
|
|
import java.util.Set;
|
|
+import java.util.concurrent.TimeUnit;
|
|
|
|
import org.apache.commons.logging.Log;
|
|
import org.apache.commons.logging.LogFactory;
|
|
@@ -153,7 +154,7 @@ public String toString() {
|
|
private class Monitor implements Runnable {
|
|
@Override
|
|
public void run() {
|
|
- Stopwatch sw = new Stopwatch();
|
|
+ Stopwatch sw = Stopwatch.createUnstarted();
|
|
Map<String, GcTimes> gcTimesBeforeSleep = getGcTimes();
|
|
while (shouldRun) {
|
|
sw.reset().start();
|
|
@@ -162,7 +163,7 @@ public void run() {
|
|
} catch (InterruptedException ie) {
|
|
return;
|
|
}
|
|
- long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS;
|
|
+ long extraSleepTime = sw.elapsed(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS;
|
|
Map<String, GcTimes> gcTimesAfterSleep = getGcTimes();
|
|
|
|
if (extraSleepTime > warnThresholdMs) {
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
|
|
index 8588de5..cb0dbae 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
|
|
@@ -133,7 +133,7 @@
|
|
/**
|
|
* Stopwatch which starts counting on each heartbeat that is sent
|
|
*/
|
|
- private final Stopwatch lastHeartbeatStopwatch = new Stopwatch();
|
|
+ private final Stopwatch lastHeartbeatStopwatch = Stopwatch.createUnstarted();
|
|
|
|
private static final long HEARTBEAT_INTERVAL_MILLIS = 1000;
|
|
|
|
@@ -435,7 +435,7 @@ private void throwIfOutOfSync()
|
|
* written.
|
|
*/
|
|
private void heartbeatIfNecessary() throws IOException {
|
|
- if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS ||
|
|
+ if (lastHeartbeatStopwatch.elapsed(TimeUnit.MILLISECONDS) > HEARTBEAT_INTERVAL_MILLIS ||
|
|
!lastHeartbeatStopwatch.isRunning()) {
|
|
try {
|
|
getProxy().heartbeat(createReqInfo());
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
|
index c117ee8..82f01da 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
|
@@ -68,7 +68,6 @@
|
|
import com.google.common.base.Stopwatch;
|
|
import com.google.common.collect.ImmutableList;
|
|
import com.google.common.collect.Range;
|
|
-import com.google.common.collect.Ranges;
|
|
import com.google.protobuf.TextFormat;
|
|
|
|
/**
|
|
@@ -374,15 +373,15 @@ synchronized void journal(RequestInfo reqInfo,
|
|
|
|
curSegment.writeRaw(records, 0, records.length);
|
|
curSegment.setReadyToFlush();
|
|
- Stopwatch sw = new Stopwatch();
|
|
+ Stopwatch sw = Stopwatch.createUnstarted();
|
|
sw.start();
|
|
curSegment.flush(shouldFsync);
|
|
sw.stop();
|
|
|
|
- metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS));
|
|
- if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
|
|
+ metrics.addSync(sw.elapsed(TimeUnit.MICROSECONDS));
|
|
+ if (sw.elapsed(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
|
|
LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
|
|
- " took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms");
|
|
+ " took " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
|
|
}
|
|
|
|
if (isLagging) {
|
|
@@ -853,7 +852,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo,
|
|
private Range<Long> txnRange(SegmentStateProto seg) {
|
|
Preconditions.checkArgument(seg.hasEndTxId(),
|
|
"invalid segment: %s", seg);
|
|
- return Ranges.closed(seg.getStartTxId(), seg.getEndTxId());
|
|
+ return Range.closed(seg.getStartTxId(), seg.getEndTxId());
|
|
}
|
|
|
|
/**
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
|
index 5075da9..0d868d4 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
|
@@ -62,7 +62,7 @@
|
|
|
|
import com.google.common.collect.Lists;
|
|
import com.google.common.collect.Maps;
|
|
-import com.google.common.io.LimitInputStream;
|
|
+import com.google.common.io.ByteStreams;
|
|
import com.google.protobuf.CodedOutputStream;
|
|
|
|
/**
|
|
@@ -215,7 +215,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
|
|
|
for (FileSummary.Section s : sections) {
|
|
channel.position(s.getOffset());
|
|
- InputStream in = new BufferedInputStream(new LimitInputStream(fin,
|
|
+ InputStream in = new BufferedInputStream(ByteStreams.limit(fin,
|
|
s.getLength()));
|
|
|
|
in = FSImageUtil.wrapInputStreamForCompression(conf,
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
|
index c8033dd..b312bfe 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
|
@@ -33,7 +33,7 @@
|
|
import org.apache.hadoop.io.IOUtils;
|
|
|
|
import com.google.common.base.Preconditions;
|
|
-import com.google.common.io.LimitInputStream;
|
|
+import com.google.common.io.ByteStreams;
|
|
|
|
/**
|
|
* This is the tool for analyzing file sizes in the namespace image. In order to
|
|
@@ -106,7 +106,7 @@ void visit(RandomAccessFile file) throws IOException {
|
|
|
|
in.getChannel().position(s.getOffset());
|
|
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
|
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
|
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
|
in, s.getLength())));
|
|
run(is);
|
|
output();
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
|
index d80fcf1..e025f82 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
|
@@ -50,7 +50,7 @@
|
|
|
|
import com.google.common.collect.Lists;
|
|
import com.google.common.collect.Maps;
|
|
-import com.google.common.io.LimitInputStream;
|
|
+import com.google.common.io.ByteStreams;
|
|
|
|
/**
|
|
* LsrPBImage displays the blocks of the namespace in a format very similar
|
|
@@ -110,7 +110,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
|
for (FileSummary.Section s : sections) {
|
|
fin.getChannel().position(s.getOffset());
|
|
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
|
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
|
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
|
fin, s.getLength())));
|
|
|
|
switch (SectionName.fromString(s.getName())) {
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
|
index 99617b8..c613591 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
|
@@ -52,7 +52,7 @@
|
|
import org.apache.hadoop.io.IOUtils;
|
|
|
|
import com.google.common.collect.Lists;
|
|
-import com.google.common.io.LimitInputStream;
|
|
+import com.google.common.io.ByteStreams;
|
|
|
|
/**
|
|
* PBImageXmlWriter walks over an fsimage structure and writes out
|
|
@@ -100,7 +100,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
|
for (FileSummary.Section s : sections) {
|
|
fin.getChannel().position(s.getOffset());
|
|
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
|
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
|
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
|
fin, s.getLength())));
|
|
|
|
switch (SectionName.fromString(s.getName())) {
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
|
index 132218c..09d42e1 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
|
@@ -47,7 +47,7 @@
|
|
import org.junit.Before;
|
|
import org.junit.Test;
|
|
|
|
-import com.google.common.io.NullOutputStream;
|
|
+import com.google.common.io.ByteStreams;
|
|
|
|
public class TestDataTransferKeepalive {
|
|
final Configuration conf = new HdfsConfiguration();
|
|
@@ -224,7 +224,7 @@ public void testManyClosedSocketsInCache() throws Exception {
|
|
stms[i] = fs.open(TEST_FILE);
|
|
}
|
|
for (InputStream stm : stms) {
|
|
- IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
|
|
+ IOUtils.copyBytes(stm, ByteStreams.nullOutputStream(), 1024);
|
|
}
|
|
} finally {
|
|
IOUtils.cleanup(null, stms);
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
|
|
index 92c7672..aa5c351 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
|
|
@@ -100,10 +100,10 @@ public void run() {
|
|
}
|
|
|
|
private void doAWrite() throws IOException {
|
|
- Stopwatch sw = new Stopwatch().start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
stm.write(toWrite);
|
|
stm.hflush();
|
|
- long micros = sw.elapsedTime(TimeUnit.MICROSECONDS);
|
|
+ long micros = sw.elapsed(TimeUnit.MICROSECONDS);
|
|
quantiles.insert(micros);
|
|
}
|
|
}
|
|
@@ -276,12 +276,12 @@ public int run(String args[]) throws Exception {
|
|
int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
|
|
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
|
|
|
|
- Stopwatch sw = new Stopwatch().start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites,
|
|
replication);
|
|
sw.stop();
|
|
|
|
- System.out.println("Finished in " + sw.elapsedMillis() + "ms");
|
|
+ System.out.println("Finished in " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
|
|
System.out.println("Latency quantiles (in microseconds):\n" +
|
|
test.quantiles);
|
|
return 0;
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
|
|
index 10b6b79..9fbcf82 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
|
|
@@ -27,6 +27,7 @@
|
|
import java.net.HttpURLConnection;
|
|
import java.net.URL;
|
|
import java.util.concurrent.ExecutionException;
|
|
+import java.util.concurrent.TimeUnit;
|
|
|
|
import org.apache.hadoop.conf.Configuration;
|
|
import org.apache.hadoop.fs.FileUtil;
|
|
@@ -325,11 +326,11 @@ private void doPerfTest(int editsSize, int numEdits) throws Exception {
|
|
ch.setEpoch(1);
|
|
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
|
|
|
|
- Stopwatch sw = new Stopwatch().start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
for (int i = 1; i < numEdits; i++) {
|
|
ch.sendEdits(1L, i, 1, data).get();
|
|
}
|
|
- long time = sw.elapsedMillis();
|
|
+ long time = sw.elapsed(TimeUnit.MILLISECONDS);
|
|
|
|
System.err.println("Wrote " + numEdits + " batches of " + editsSize +
|
|
" bytes in " + time + "ms");
|
|
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
|
|
index a1e49cc..44751b0 100644
|
|
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
|
|
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
|
|
@@ -20,6 +20,7 @@
|
|
import static org.junit.Assert.*;
|
|
|
|
import java.util.ArrayList;
|
|
+import java.util.concurrent.TimeUnit;
|
|
|
|
import org.junit.Test;
|
|
|
|
@@ -69,24 +70,22 @@ public void testPerformance() {
|
|
System.gc();
|
|
{
|
|
ArrayList<String> arrayList = new ArrayList<String>();
|
|
- Stopwatch sw = new Stopwatch();
|
|
- sw.start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
for (int i = 0; i < numElems; i++) {
|
|
arrayList.add(obj);
|
|
}
|
|
- System.out.println(" ArrayList " + sw.elapsedMillis());
|
|
+ System.out.println(" ArrayList " + sw.elapsed(TimeUnit.MILLISECONDS));
|
|
}
|
|
|
|
// test ChunkedArrayList
|
|
System.gc();
|
|
{
|
|
ChunkedArrayList<String> chunkedList = new ChunkedArrayList<String>();
|
|
- Stopwatch sw = new Stopwatch();
|
|
- sw.start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
for (int i = 0; i < numElems; i++) {
|
|
chunkedList.add(obj);
|
|
}
|
|
- System.out.println("ChunkedArrayList " + sw.elapsedMillis());
|
|
+ System.out.println("ChunkedArrayList " + sw.elapsed(TimeUnit.MILLISECONDS));
|
|
}
|
|
}
|
|
}
|
|
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
|
|
index 9863427..07854a1 100644
|
|
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
|
|
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
|
|
@@ -28,6 +28,7 @@
|
|
import java.util.List;
|
|
import java.util.Map;
|
|
import java.util.Set;
|
|
+import java.util.concurrent.TimeUnit;
|
|
|
|
import org.apache.commons.logging.Log;
|
|
import org.apache.commons.logging.LogFactory;
|
|
@@ -223,7 +224,7 @@ protected void addInputPathRecursively(List<FileStatus> result,
|
|
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
|
|
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
|
|
|
|
- Stopwatch sw = new Stopwatch().start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
if (numThreads == 1) {
|
|
List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
|
|
result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
|
|
@@ -242,7 +243,7 @@ protected void addInputPathRecursively(List<FileStatus> result,
|
|
|
|
sw.stop();
|
|
if (LOG.isDebugEnabled()) {
|
|
- LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
|
|
+ LOG.debug("Time taken to get FileStatuses: " + sw.elapsed(TimeUnit.MILLISECONDS));
|
|
}
|
|
LOG.info("Total input paths to process : " + result.length);
|
|
return result;
|
|
@@ -300,7 +301,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
|
|
* they're too big.*/
|
|
public InputSplit[] getSplits(JobConf job, int numSplits)
|
|
throws IOException {
|
|
- Stopwatch sw = new Stopwatch().start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
FileStatus[] files = listStatus(job);
|
|
|
|
// Save the number of input files for metrics/loadgen
|
|
@@ -362,7 +363,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
|
|
sw.stop();
|
|
if (LOG.isDebugEnabled()) {
|
|
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
|
|
- + ", TimeTaken: " + sw.elapsedMillis());
|
|
+ + ", TimeTaken: " + sw.elapsed(TimeUnit.MILLISECONDS));
|
|
}
|
|
return splits.toArray(new FileSplit[splits.size()]);
|
|
}
|
|
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
|
|
index 5f32f11..a4f293c 100644
|
|
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
|
|
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
|
|
@@ -21,6 +21,7 @@
|
|
import java.io.IOException;
|
|
import java.util.ArrayList;
|
|
import java.util.List;
|
|
+import java.util.concurrent.TimeUnit;
|
|
|
|
import org.apache.commons.logging.Log;
|
|
import org.apache.commons.logging.LogFactory;
|
|
@@ -258,7 +259,7 @@ public static PathFilter getInputPathFilter(JobContext context) {
|
|
|
|
int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS,
|
|
DEFAULT_LIST_STATUS_NUM_THREADS);
|
|
- Stopwatch sw = new Stopwatch().start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
if (numThreads == 1) {
|
|
result = singleThreadedListStatus(job, dirs, inputFilter, recursive);
|
|
} else {
|
|
@@ -275,7 +276,7 @@ public static PathFilter getInputPathFilter(JobContext context) {
|
|
|
|
sw.stop();
|
|
if (LOG.isDebugEnabled()) {
|
|
- LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
|
|
+ LOG.debug("Time taken to get FileStatuses: " + sw.elapsed(TimeUnit.MILLISECONDS));
|
|
}
|
|
LOG.info("Total input paths to process : " + result.size());
|
|
return result;
|
|
@@ -366,7 +367,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
|
|
* @throws IOException
|
|
*/
|
|
public List<InputSplit> getSplits(JobContext job) throws IOException {
|
|
- Stopwatch sw = new Stopwatch().start();
|
|
+ Stopwatch sw = Stopwatch.createStarted();
|
|
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
|
|
long maxSize = getMaxSplitSize(job);
|
|
|
|
@@ -414,7 +415,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
|
|
sw.stop();
|
|
if (LOG.isDebugEnabled()) {
|
|
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
|
|
- + ", TimeTaken: " + sw.elapsedMillis());
|
|
+ + ", TimeTaken: " + sw.elapsed(TimeUnit.MILLISECONDS));
|
|
}
|
|
return splits;
|
|
}
|
|
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
|
|
index b315e2b..9ad8bcd 100644
|
|
--- a/hadoop-project/pom.xml
|
|
+++ b/hadoop-project/pom.xml
|
|
@@ -310,7 +310,7 @@
|
|
<dependency>
|
|
<groupId>com.google.guava</groupId>
|
|
<artifactId>guava</artifactId>
|
|
- <version>11.0.2</version>
|
|
+ <version>17.0</version>
|
|
</dependency>
|
|
<dependency>
|
|
<groupId>commons-cli</groupId>
|