Build 2.4.0-3
This commit is contained in:
parent
80a4840594
commit
682865dceb
18
hadoop-build.patch
Normal file
18
hadoop-build.patch
Normal file
@ -0,0 +1,18 @@
|
||||
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
|
||||
index 3482f96..cbc8d39 100644
|
||||
--- a/hadoop-project-dist/pom.xml
|
||||
+++ b/hadoop-project-dist/pom.xml
|
||||
@@ -58,13 +58,6 @@
|
||||
<artifactId>maven-jar-plugin</artifactId>
|
||||
<executions>
|
||||
<execution>
|
||||
- <id>prepare-jar</id>
|
||||
- <phase>prepare-package</phase>
|
||||
- <goals>
|
||||
- <goal>jar</goal>
|
||||
- </goals>
|
||||
- </execution>
|
||||
- <execution>
|
||||
<id>prepare-test-jar</id>
|
||||
<phase>prepare-package</phase>
|
||||
<goals>
|
@ -1,144 +0,0 @@
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
||||
index c117ee8..9434429 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
||||
@@ -68,7 +68,6 @@
|
||||
import com.google.common.base.Stopwatch;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Range;
|
||||
-import com.google.common.collect.Ranges;
|
||||
import com.google.protobuf.TextFormat;
|
||||
|
||||
/**
|
||||
@@ -853,7 +852,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo,
|
||||
private Range<Long> txnRange(SegmentStateProto seg) {
|
||||
Preconditions.checkArgument(seg.hasEndTxId(),
|
||||
"invalid segment: %s", seg);
|
||||
- return Ranges.closed(seg.getStartTxId(), seg.getEndTxId());
|
||||
+ return Range.closed(seg.getStartTxId(), seg.getEndTxId());
|
||||
}
|
||||
|
||||
/**
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
||||
index 5075da9..0d868d4 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
||||
@@ -62,7 +62,7 @@
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
-import com.google.common.io.LimitInputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
import com.google.protobuf.CodedOutputStream;
|
||||
|
||||
/**
|
||||
@@ -215,7 +215,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
||||
|
||||
for (FileSummary.Section s : sections) {
|
||||
channel.position(s.getOffset());
|
||||
- InputStream in = new BufferedInputStream(new LimitInputStream(fin,
|
||||
+ InputStream in = new BufferedInputStream(ByteStreams.limit(fin,
|
||||
s.getLength()));
|
||||
|
||||
in = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
||||
index c8033dd..b312bfe 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
||||
@@ -33,7 +33,7 @@
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
-import com.google.common.io.LimitInputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
|
||||
/**
|
||||
* This is the tool for analyzing file sizes in the namespace image. In order to
|
||||
@@ -106,7 +106,7 @@ void visit(RandomAccessFile file) throws IOException {
|
||||
|
||||
in.getChannel().position(s.getOffset());
|
||||
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
||||
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
||||
in, s.getLength())));
|
||||
run(is);
|
||||
output();
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
||||
index d80fcf1..e025f82 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
-import com.google.common.io.LimitInputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
|
||||
/**
|
||||
* LsrPBImage displays the blocks of the namespace in a format very similar
|
||||
@@ -110,7 +110,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
||||
for (FileSummary.Section s : sections) {
|
||||
fin.getChannel().position(s.getOffset());
|
||||
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
||||
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
||||
fin, s.getLength())));
|
||||
|
||||
switch (SectionName.fromString(s.getName())) {
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
||||
index 99617b8..c613591 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
||||
@@ -52,7 +52,7 @@
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
-import com.google.common.io.LimitInputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
|
||||
/**
|
||||
* PBImageXmlWriter walks over an fsimage structure and writes out
|
||||
@@ -100,7 +100,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
||||
for (FileSummary.Section s : sections) {
|
||||
fin.getChannel().position(s.getOffset());
|
||||
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
||||
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
||||
fin, s.getLength())));
|
||||
|
||||
switch (SectionName.fromString(s.getName())) {
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
||||
index 132218c..09d42e1 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
||||
@@ -47,7 +47,7 @@
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
-import com.google.common.io.NullOutputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
|
||||
public class TestDataTransferKeepalive {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
@@ -224,7 +224,7 @@ public void testManyClosedSocketsInCache() throws Exception {
|
||||
stms[i] = fs.open(TEST_FILE);
|
||||
}
|
||||
for (InputStream stm : stms) {
|
||||
- IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
|
||||
+ IOUtils.copyBytes(stm, ByteStreams.nullOutputStream(), 1024);
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(null, stms);
|
||||
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
|
||||
index 272dadc..dc5ae3a 100644
|
||||
--- a/hadoop-project/pom.xml
|
||||
+++ b/hadoop-project/pom.xml
|
||||
@@ -310,7 +310,7 @@
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
- <version>11.0.2</version>
|
||||
+ <version>15.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-cli</groupId>
|
411
hadoop-guava.patch
Normal file
411
hadoop-guava.patch
Normal file
@ -0,0 +1,411 @@
|
||||
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
|
||||
index f7932a6..ec3d9cf 100644
|
||||
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
|
||||
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
|
||||
@@ -22,6 +22,7 @@
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
+import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
@@ -153,7 +154,7 @@ public String toString() {
|
||||
private class Monitor implements Runnable {
|
||||
@Override
|
||||
public void run() {
|
||||
- Stopwatch sw = new Stopwatch();
|
||||
+ Stopwatch sw = Stopwatch.createUnstarted();
|
||||
Map<String, GcTimes> gcTimesBeforeSleep = getGcTimes();
|
||||
while (shouldRun) {
|
||||
sw.reset().start();
|
||||
@@ -162,7 +163,7 @@ public void run() {
|
||||
} catch (InterruptedException ie) {
|
||||
return;
|
||||
}
|
||||
- long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS;
|
||||
+ long extraSleepTime = sw.elapsed(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS;
|
||||
Map<String, GcTimes> gcTimesAfterSleep = getGcTimes();
|
||||
|
||||
if (extraSleepTime > warnThresholdMs) {
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
|
||||
index 8588de5..cb0dbae 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
|
||||
@@ -133,7 +133,7 @@
|
||||
/**
|
||||
* Stopwatch which starts counting on each heartbeat that is sent
|
||||
*/
|
||||
- private final Stopwatch lastHeartbeatStopwatch = new Stopwatch();
|
||||
+ private final Stopwatch lastHeartbeatStopwatch = Stopwatch.createUnstarted();
|
||||
|
||||
private static final long HEARTBEAT_INTERVAL_MILLIS = 1000;
|
||||
|
||||
@@ -435,7 +435,7 @@ private void throwIfOutOfSync()
|
||||
* written.
|
||||
*/
|
||||
private void heartbeatIfNecessary() throws IOException {
|
||||
- if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS ||
|
||||
+ if (lastHeartbeatStopwatch.elapsed(TimeUnit.MILLISECONDS) > HEARTBEAT_INTERVAL_MILLIS ||
|
||||
!lastHeartbeatStopwatch.isRunning()) {
|
||||
try {
|
||||
getProxy().heartbeat(createReqInfo());
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
||||
index c117ee8..82f01da 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
|
||||
@@ -68,7 +68,6 @@
|
||||
import com.google.common.base.Stopwatch;
|
||||
import com.google.common.collect.ImmutableList;
|
||||
import com.google.common.collect.Range;
|
||||
-import com.google.common.collect.Ranges;
|
||||
import com.google.protobuf.TextFormat;
|
||||
|
||||
/**
|
||||
@@ -374,15 +373,15 @@ synchronized void journal(RequestInfo reqInfo,
|
||||
|
||||
curSegment.writeRaw(records, 0, records.length);
|
||||
curSegment.setReadyToFlush();
|
||||
- Stopwatch sw = new Stopwatch();
|
||||
+ Stopwatch sw = Stopwatch.createUnstarted();
|
||||
sw.start();
|
||||
curSegment.flush(shouldFsync);
|
||||
sw.stop();
|
||||
|
||||
- metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS));
|
||||
- if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
|
||||
+ metrics.addSync(sw.elapsed(TimeUnit.MICROSECONDS));
|
||||
+ if (sw.elapsed(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
|
||||
LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
|
||||
- " took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms");
|
||||
+ " took " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
|
||||
}
|
||||
|
||||
if (isLagging) {
|
||||
@@ -853,7 +852,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo,
|
||||
private Range<Long> txnRange(SegmentStateProto seg) {
|
||||
Preconditions.checkArgument(seg.hasEndTxId(),
|
||||
"invalid segment: %s", seg);
|
||||
- return Ranges.closed(seg.getStartTxId(), seg.getEndTxId());
|
||||
+ return Range.closed(seg.getStartTxId(), seg.getEndTxId());
|
||||
}
|
||||
|
||||
/**
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
||||
index 5075da9..0d868d4 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
|
||||
@@ -62,7 +62,7 @@
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
-import com.google.common.io.LimitInputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
import com.google.protobuf.CodedOutputStream;
|
||||
|
||||
/**
|
||||
@@ -215,7 +215,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
||||
|
||||
for (FileSummary.Section s : sections) {
|
||||
channel.position(s.getOffset());
|
||||
- InputStream in = new BufferedInputStream(new LimitInputStream(fin,
|
||||
+ InputStream in = new BufferedInputStream(ByteStreams.limit(fin,
|
||||
s.getLength()));
|
||||
|
||||
in = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
||||
index c8033dd..b312bfe 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
|
||||
@@ -33,7 +33,7 @@
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
import com.google.common.base.Preconditions;
|
||||
-import com.google.common.io.LimitInputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
|
||||
/**
|
||||
* This is the tool for analyzing file sizes in the namespace image. In order to
|
||||
@@ -106,7 +106,7 @@ void visit(RandomAccessFile file) throws IOException {
|
||||
|
||||
in.getChannel().position(s.getOffset());
|
||||
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
||||
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
||||
in, s.getLength())));
|
||||
run(is);
|
||||
output();
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
||||
index d80fcf1..e025f82 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
|
||||
@@ -50,7 +50,7 @@
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
import com.google.common.collect.Maps;
|
||||
-import com.google.common.io.LimitInputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
|
||||
/**
|
||||
* LsrPBImage displays the blocks of the namespace in a format very similar
|
||||
@@ -110,7 +110,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
||||
for (FileSummary.Section s : sections) {
|
||||
fin.getChannel().position(s.getOffset());
|
||||
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
||||
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
||||
fin, s.getLength())));
|
||||
|
||||
switch (SectionName.fromString(s.getName())) {
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
||||
index 99617b8..c613591 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
|
||||
@@ -52,7 +52,7 @@
|
||||
import org.apache.hadoop.io.IOUtils;
|
||||
|
||||
import com.google.common.collect.Lists;
|
||||
-import com.google.common.io.LimitInputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
|
||||
/**
|
||||
* PBImageXmlWriter walks over an fsimage structure and writes out
|
||||
@@ -100,7 +100,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
|
||||
for (FileSummary.Section s : sections) {
|
||||
fin.getChannel().position(s.getOffset());
|
||||
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
|
||||
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
|
||||
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
|
||||
fin, s.getLength())));
|
||||
|
||||
switch (SectionName.fromString(s.getName())) {
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
||||
index 132218c..09d42e1 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
|
||||
@@ -47,7 +47,7 @@
|
||||
import org.junit.Before;
|
||||
import org.junit.Test;
|
||||
|
||||
-import com.google.common.io.NullOutputStream;
|
||||
+import com.google.common.io.ByteStreams;
|
||||
|
||||
public class TestDataTransferKeepalive {
|
||||
final Configuration conf = new HdfsConfiguration();
|
||||
@@ -224,7 +224,7 @@ public void testManyClosedSocketsInCache() throws Exception {
|
||||
stms[i] = fs.open(TEST_FILE);
|
||||
}
|
||||
for (InputStream stm : stms) {
|
||||
- IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
|
||||
+ IOUtils.copyBytes(stm, ByteStreams.nullOutputStream(), 1024);
|
||||
}
|
||||
} finally {
|
||||
IOUtils.cleanup(null, stms);
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
|
||||
index 92c7672..aa5c351 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
|
||||
@@ -100,10 +100,10 @@ public void run() {
|
||||
}
|
||||
|
||||
private void doAWrite() throws IOException {
|
||||
- Stopwatch sw = new Stopwatch().start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
stm.write(toWrite);
|
||||
stm.hflush();
|
||||
- long micros = sw.elapsedTime(TimeUnit.MICROSECONDS);
|
||||
+ long micros = sw.elapsed(TimeUnit.MICROSECONDS);
|
||||
quantiles.insert(micros);
|
||||
}
|
||||
}
|
||||
@@ -276,12 +276,12 @@ public int run(String args[]) throws Exception {
|
||||
int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
|
||||
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
|
||||
|
||||
- Stopwatch sw = new Stopwatch().start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites,
|
||||
replication);
|
||||
sw.stop();
|
||||
|
||||
- System.out.println("Finished in " + sw.elapsedMillis() + "ms");
|
||||
+ System.out.println("Finished in " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
|
||||
System.out.println("Latency quantiles (in microseconds):\n" +
|
||||
test.quantiles);
|
||||
return 0;
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
|
||||
index 10b6b79..9fbcf82 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
|
||||
@@ -27,6 +27,7 @@
|
||||
import java.net.HttpURLConnection;
|
||||
import java.net.URL;
|
||||
import java.util.concurrent.ExecutionException;
|
||||
+import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.hadoop.conf.Configuration;
|
||||
import org.apache.hadoop.fs.FileUtil;
|
||||
@@ -325,11 +326,11 @@ private void doPerfTest(int editsSize, int numEdits) throws Exception {
|
||||
ch.setEpoch(1);
|
||||
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
|
||||
|
||||
- Stopwatch sw = new Stopwatch().start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
for (int i = 1; i < numEdits; i++) {
|
||||
ch.sendEdits(1L, i, 1, data).get();
|
||||
}
|
||||
- long time = sw.elapsedMillis();
|
||||
+ long time = sw.elapsed(TimeUnit.MILLISECONDS);
|
||||
|
||||
System.err.println("Wrote " + numEdits + " batches of " + editsSize +
|
||||
" bytes in " + time + "ms");
|
||||
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
|
||||
index a1e49cc..44751b0 100644
|
||||
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
|
||||
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
|
||||
@@ -20,6 +20,7 @@
|
||||
import static org.junit.Assert.*;
|
||||
|
||||
import java.util.ArrayList;
|
||||
+import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.junit.Test;
|
||||
|
||||
@@ -69,24 +70,22 @@ public void testPerformance() {
|
||||
System.gc();
|
||||
{
|
||||
ArrayList<String> arrayList = new ArrayList<String>();
|
||||
- Stopwatch sw = new Stopwatch();
|
||||
- sw.start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
for (int i = 0; i < numElems; i++) {
|
||||
arrayList.add(obj);
|
||||
}
|
||||
- System.out.println(" ArrayList " + sw.elapsedMillis());
|
||||
+ System.out.println(" ArrayList " + sw.elapsed(TimeUnit.MILLISECONDS));
|
||||
}
|
||||
|
||||
// test ChunkedArrayList
|
||||
System.gc();
|
||||
{
|
||||
ChunkedArrayList<String> chunkedList = new ChunkedArrayList<String>();
|
||||
- Stopwatch sw = new Stopwatch();
|
||||
- sw.start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
for (int i = 0; i < numElems; i++) {
|
||||
chunkedList.add(obj);
|
||||
}
|
||||
- System.out.println("ChunkedArrayList " + sw.elapsedMillis());
|
||||
+ System.out.println("ChunkedArrayList " + sw.elapsed(TimeUnit.MILLISECONDS));
|
||||
}
|
||||
}
|
||||
}
|
||||
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
|
||||
index 9863427..07854a1 100644
|
||||
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
|
||||
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
|
||||
@@ -28,6 +28,7 @@
|
||||
import java.util.List;
|
||||
import java.util.Map;
|
||||
import java.util.Set;
|
||||
+import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
@@ -223,7 +224,7 @@ protected void addInputPathRecursively(List<FileStatus> result,
|
||||
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
|
||||
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
|
||||
|
||||
- Stopwatch sw = new Stopwatch().start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
if (numThreads == 1) {
|
||||
List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
|
||||
result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
|
||||
@@ -242,7 +243,7 @@ protected void addInputPathRecursively(List<FileStatus> result,
|
||||
|
||||
sw.stop();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
- LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
|
||||
+ LOG.debug("Time taken to get FileStatuses: " + sw.elapsed(TimeUnit.MILLISECONDS));
|
||||
}
|
||||
LOG.info("Total input paths to process : " + result.length);
|
||||
return result;
|
||||
@@ -300,7 +301,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
|
||||
* they're too big.*/
|
||||
public InputSplit[] getSplits(JobConf job, int numSplits)
|
||||
throws IOException {
|
||||
- Stopwatch sw = new Stopwatch().start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
FileStatus[] files = listStatus(job);
|
||||
|
||||
// Save the number of input files for metrics/loadgen
|
||||
@@ -362,7 +363,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
|
||||
sw.stop();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
|
||||
- + ", TimeTaken: " + sw.elapsedMillis());
|
||||
+ + ", TimeTaken: " + sw.elapsed(TimeUnit.MILLISECONDS));
|
||||
}
|
||||
return splits.toArray(new FileSplit[splits.size()]);
|
||||
}
|
||||
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
|
||||
index 5f32f11..a4f293c 100644
|
||||
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
|
||||
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
|
||||
@@ -21,6 +21,7 @@
|
||||
import java.io.IOException;
|
||||
import java.util.ArrayList;
|
||||
import java.util.List;
|
||||
+import java.util.concurrent.TimeUnit;
|
||||
|
||||
import org.apache.commons.logging.Log;
|
||||
import org.apache.commons.logging.LogFactory;
|
||||
@@ -258,7 +259,7 @@ public static PathFilter getInputPathFilter(JobContext context) {
|
||||
|
||||
int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS,
|
||||
DEFAULT_LIST_STATUS_NUM_THREADS);
|
||||
- Stopwatch sw = new Stopwatch().start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
if (numThreads == 1) {
|
||||
result = singleThreadedListStatus(job, dirs, inputFilter, recursive);
|
||||
} else {
|
||||
@@ -275,7 +276,7 @@ public static PathFilter getInputPathFilter(JobContext context) {
|
||||
|
||||
sw.stop();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
- LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
|
||||
+ LOG.debug("Time taken to get FileStatuses: " + sw.elapsed(TimeUnit.MILLISECONDS));
|
||||
}
|
||||
LOG.info("Total input paths to process : " + result.size());
|
||||
return result;
|
||||
@@ -366,7 +367,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
|
||||
* @throws IOException
|
||||
*/
|
||||
public List<InputSplit> getSplits(JobContext job) throws IOException {
|
||||
- Stopwatch sw = new Stopwatch().start();
|
||||
+ Stopwatch sw = Stopwatch.createStarted();
|
||||
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
|
||||
long maxSize = getMaxSplitSize(job);
|
||||
|
||||
@@ -414,7 +415,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
|
||||
sw.stop();
|
||||
if (LOG.isDebugEnabled()) {
|
||||
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
|
||||
- + ", TimeTaken: " + sw.elapsedMillis());
|
||||
+ + ", TimeTaken: " + sw.elapsed(TimeUnit.MILLISECONDS));
|
||||
}
|
||||
return splits;
|
||||
}
|
||||
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
|
||||
index 272dadc..dc5ae3a 100644
|
||||
--- a/hadoop-project/pom.xml
|
||||
+++ b/hadoop-project/pom.xml
|
||||
@@ -310,7 +310,7 @@
|
||||
<dependency>
|
||||
<groupId>com.google.guava</groupId>
|
||||
<artifactId>guava</artifactId>
|
||||
- <version>11.0.2</version>
|
||||
+ <version>15.0</version>
|
||||
</dependency>
|
||||
<dependency>
|
||||
<groupId>commons-cli</groupId>
|
63
hadoop.spec
63
hadoop.spec
@ -23,7 +23,7 @@
|
||||
|
||||
Name: hadoop
|
||||
Version: 2.4.0
|
||||
Release: 2%{?dist}
|
||||
Release: 3%{?dist}
|
||||
Summary: A software platform for processing vast amounts of data
|
||||
# The BSD license file is missing
|
||||
# https://issues.apache.org/jira/browse/HADOOP-9849
|
||||
@ -57,12 +57,14 @@ Patch3: %{name}-maven.patch
|
||||
Patch4: %{name}-no-download-tomcat.patch
|
||||
# Use dlopen to find libjvm.so
|
||||
Patch5: %{name}-dlopen-libjvm.patch
|
||||
# Update to Guava 15.0
|
||||
Patch7: %{name}-guava-15.0.patch
|
||||
# Update to Guava 17.0
|
||||
Patch7: %{name}-guava.patch
|
||||
# Update to Netty 3.6.6-Final
|
||||
Patch8: %{name}-netty-3.6.6-Final.patch
|
||||
# Remove problematic issues with tools.jar
|
||||
Patch9: %{name}-tools.jar.patch
|
||||
# Workaround for bz1012059
|
||||
Patch10: %{name}-build.patch
|
||||
# The native bits don't compile on ARM
|
||||
ExcludeArch: %{arm}
|
||||
|
||||
@ -485,6 +487,7 @@ This package contains files needed to run Apache Hadoop YARN in secure mode.
|
||||
%patch8 -p1
|
||||
%endif
|
||||
%patch9 -p1
|
||||
%patch10 -p1
|
||||
|
||||
%if 0%{?fedora} < 21
|
||||
# The hadoop test suite needs classes from the zookeeper test suite.
|
||||
@ -558,8 +561,8 @@ rm -f hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-test
|
||||
# Fix scope on hadoop-common:test-jar
|
||||
%pom_xpath_set "pom:project/pom:dependencies/pom:dependency[pom:artifactId='hadoop-common' and pom:type='test-jar']/pom:scope" test hadoop-tools/hadoop-openstack
|
||||
|
||||
# Modify asm version to compat library version 3.3.6
|
||||
#%%pom_xpath_set "pom:project/pom:dependencyManagement/pom:dependencies/pom:dependency[pom:artifactId='asm']/pom:version" 3.3.6 hadoop-project
|
||||
# Modify asm version to version 5.0.2
|
||||
%pom_xpath_set "pom:project/pom:dependencyManagement/pom:dependencies/pom:dependency[pom:artifactId='asm']/pom:version" 5.0.2 hadoop-project
|
||||
|
||||
# War files we don't want
|
||||
%mvn_package :%{name}-auth-examples __noinstall
|
||||
@ -677,12 +680,12 @@ install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/hdfs/webapps
|
||||
install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat/webapps
|
||||
install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/mapreduce/lib
|
||||
install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/yarn/lib
|
||||
install -d -m 0755 %{buildroot}/%{_sharedstatedir}/tomcats/httpfs
|
||||
install -d -m 0755 %{buildroot}/%{_sysconfdir}/%{name}/tomcat/Catalina/localhost
|
||||
install -d -m 0755 %{buildroot}/%{_sysconfdir}/logrotate.d
|
||||
install -d -m 0755 %{buildroot}/%{_sysconfdir}/sysconfig
|
||||
install -d -m 0755 %{buildroot}/%{_tmpfilesdir}
|
||||
install -d -m 0755 %{buildroot}/%{_var}/lib/%{name}-hdfs
|
||||
install -d -m 0755 %{buildroot}/%{_sharedstatedir}/%{name}-hdfs
|
||||
install -d -m 0755 %{buildroot}/%{_sharedstatedir}/tomcats/httpfs
|
||||
install -d -m 0755 %{buildroot}/%{_var}/cache/%{name}-yarn
|
||||
install -d -m 0755 %{buildroot}/%{_var}/cache/%{name}-httpfs/temp
|
||||
install -d -m 0755 %{buildroot}/%{_var}/cache/%{name}-httpfs/work
|
||||
@ -740,9 +743,13 @@ sed -i "s|\(HADOOP_OPTS.*=.*\)\$HADOOP_CLIENT_OPTS|\1 -Djavax.xml.parsers.Docume
|
||||
echo "export YARN_OPTS=\"\$YARN_OPTS -Djavax.xml.parsers.DocumentBuilderFactory=com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl\"" >> %{buildroot}/%{_sysconfdir}/%{name}/yarn-env.sh
|
||||
|
||||
# Workaround for bz1012059
|
||||
install -pm 644 %{name}-project-dist/target/%{name}-project-dist-%{hadoop_version}.jar %{buildroot}/%{_javadir}/%{name}/%{name}-project-dist.jar
|
||||
install -pm 644 hadoop-project-dist/pom.xml %{buildroot}/%{_mavenpomdir}/JPP.%{name}-%{name}-project-dist.pom
|
||||
%add_maven_depmap JPP.%{name}-%{name}-project-dist.pom %{name}/%{name}-project-dist.jar
|
||||
%{__ln_s} %{_jnidir}/%{name}/hadoop-common.jar %{buildroot}/%{_datadir}/%{name}/common
|
||||
#echo %{_datadir}/%{name}/common/hadoop-common.jar >> .mfiles
|
||||
%{__ln_s} %{_javadir}/%{name}/hadoop-hdfs.jar %{buildroot}/%{_datadir}/%{name}/hdfs
|
||||
echo %{_datadir}/%{name}/hdfs/hadoop-hdfs.jar >> .mfiles-%{name}-hdfs
|
||||
%{__ln_s} %{_javadir}/%{name}/hadoop-client.jar %{buildroot}/%{_datadir}/%{name}/client
|
||||
echo %{_datadir}/%{name}/client/hadoop-client.jar >> .mfiles-%{name}-client
|
||||
|
||||
# client jar depenencies
|
||||
copy_dep_jars %{name}-client/target/%{name}-client-%{hadoop_version}/share/%{name}/client/lib %{buildroot}/%{_datadir}/%{name}/client/lib
|
||||
@ -764,7 +771,7 @@ for f in `ls %{buildroot}/%{_datadir}/%{name}/common/*.jar`
|
||||
do
|
||||
echo "$f" | sed "s|%{buildroot}||" >> .mfiles
|
||||
done
|
||||
pushd $basedir/share/%{name}/common/lib
|
||||
pushd $basedir/share/%{name}/common/lib
|
||||
link_hadoop_jars %{buildroot}/%{_datadir}/%{name}/common/lib
|
||||
popd
|
||||
|
||||
@ -923,7 +930,7 @@ getent group hadoop >/dev/null || groupadd -r hadoop
|
||||
|
||||
%pre hdfs
|
||||
getent group hdfs >/dev/null || groupadd -r hdfs
|
||||
getent passwd hdfs >/dev/null || /usr/sbin/useradd --comment "Apache Hadoop HDFS" --shell /sbin/nologin -M -r -g hdfs -G hadoop --home %{_var}/lib/%{name}-hdfs hdfs
|
||||
getent passwd hdfs >/dev/null || /usr/sbin/useradd --comment "Apache Hadoop HDFS" --shell /sbin/nologin -M -r -g hdfs -G hadoop --home %{_sharedstatedir}/%{name}-hdfs hdfs
|
||||
|
||||
%pre mapreduce
|
||||
getent group mapred >/dev/null || groupadd -r mapred
|
||||
@ -945,12 +952,10 @@ getent passwd yarn >/dev/null || /usr/sbin/useradd --comment "Apache Hadoop Yarn
|
||||
%post common-native -p /sbin/ldconfig
|
||||
|
||||
%post hdfs
|
||||
%systemd_post %{hdfs_services}
|
||||
|
||||
# Change the home directory for the hdfs user
|
||||
if [[ `getent passwd hdfs | cut -d: -f 6` != "%{_var}/lib/%{name}-hdfs" ]]
|
||||
if [[ `getent passwd hdfs | cut -d: -f 6` != "%{_sharedstatedir}/%{name}-hdfs" ]]
|
||||
then
|
||||
/usr/sbin/usermod -d %{_var}/lib/%{name}-hdfs hdfs
|
||||
/usr/sbin/usermod -d %{_sharedstatedir}/%{name}-hdfs hdfs
|
||||
fi
|
||||
|
||||
if [ $1 -gt 1 ]
|
||||
@ -958,9 +963,10 @@ then
|
||||
if [ -d %{_var}/cache/%{name}-hdfs ] && [ ! -L %{_var}/cache/%{name}-hdfs ]
|
||||
then
|
||||
# Move the existing hdfs data to the new location
|
||||
mv -f %{_var}/cache/%{name}-hdfs/* %{_var}/lib/%{name}-hdfs/
|
||||
mv -f %{_var}/cache/%{name}-hdfs/* %{_sharedstatedir}/%{name}-hdfs/
|
||||
fi
|
||||
fi
|
||||
%systemd_post %{hdfs_services}
|
||||
|
||||
%if %{package_libhdfs}
|
||||
%post -n libhdfs -p /sbin/ldconfig
|
||||
@ -977,8 +983,11 @@ fi
|
||||
%postun hdfs
|
||||
%systemd_postun_with_restart %{hdfs_services}
|
||||
|
||||
# Remove the compatibility symlink
|
||||
rm -f %{_var}/cache/%{name}-hdfs
|
||||
if [ $1 -lt 1 ]
|
||||
then
|
||||
# Remove the compatibility symlink
|
||||
rm -f %{_var}/cache/%{name}-hdfs
|
||||
fi
|
||||
|
||||
%if %{package_libhdfs}
|
||||
%postun -n libhdfs -p /sbin/ldconfig
|
||||
@ -994,9 +1003,9 @@ rm -f %{_var}/cache/%{name}-hdfs
|
||||
# Create a symlink to the new location for hdfs data in case the user changed
|
||||
# the configuration file and the new one isn't in place to point to the
|
||||
# correct location
|
||||
if [ ! -f %{_var}/cache/%{name}-hdfs ]
|
||||
if [ ! -e %{_var}/cache/%{name}-hdfs ]
|
||||
then
|
||||
%{__ln_s} %{_var}/lib/%{name}-hdfs %{_var}/cache
|
||||
%{__ln_s} %{_sharedstatedir}/%{name}-hdfs %{_var}/cache
|
||||
fi
|
||||
|
||||
%files -f .mfiles-%{name}-client client
|
||||
@ -1019,6 +1028,10 @@ fi
|
||||
%{_datadir}/%{name}/common/lib
|
||||
%{_libexecdir}/%{name}-config.sh
|
||||
%{_libexecdir}/%{name}-layout.sh
|
||||
|
||||
# Workaround for bz1012059
|
||||
%{_mavenpomdir}/JPP.%{name}-%{name}-project-dist.pom
|
||||
|
||||
%{_bindir}/%{name}
|
||||
%{_sbindir}/%{name}-daemon.sh
|
||||
%{_sbindir}/%{name}-daemons.sh
|
||||
@ -1042,10 +1055,8 @@ fi
|
||||
%endif
|
||||
|
||||
%files -f .mfiles-%{name}-hdfs hdfs
|
||||
%exclude %{_datadir}/%{name}/client
|
||||
%config(noreplace) %{_sysconfdir}/%{name}/hdfs-site.xml
|
||||
%{_datadir}/%{name}/hdfs
|
||||
%attr(-,hdfs,hadoop) %{_sharedstatedir}/%{name}-hdfs
|
||||
%{_unitdir}/%{name}-datanode.service
|
||||
%{_unitdir}/%{name}-namenode.service
|
||||
%{_unitdir}/%{name}-journalnode.service
|
||||
@ -1060,7 +1071,7 @@ fi
|
||||
%config(noreplace) %attr(644, root, root) %{_sysconfdir}/logrotate.d/%{name}-hdfs
|
||||
%attr(0755,hdfs,hadoop) %dir %{_var}/run/%{name}-hdfs
|
||||
%attr(0755,hdfs,hadoop) %dir %{_var}/log/%{name}-hdfs
|
||||
%attr(0755,hdfs,hadoop) %dir %{_var}/lib/%{name}-hdfs
|
||||
%attr(0755,hdfs,hadoop) %dir %{_sharedstatedir}/%{name}-hdfs
|
||||
|
||||
%if %{package_libhdfs}
|
||||
%files hdfs-fuse
|
||||
@ -1099,7 +1110,6 @@ fi
|
||||
%endif
|
||||
|
||||
%files -f .mfiles-%{name}-mapreduce mapreduce
|
||||
%exclude %{_datadir}/%{name}/client
|
||||
%config(noreplace) %{_sysconfdir}/%{name}/mapred-env.sh
|
||||
%config(noreplace) %{_sysconfdir}/%{name}/mapred-queues.xml.template
|
||||
%config(noreplace) %{_sysconfdir}/%{name}/mapred-site.xml
|
||||
@ -1123,7 +1133,6 @@ fi
|
||||
%files -f .mfiles-%{name}-tests tests
|
||||
|
||||
%files -f .mfiles-%{name}-yarn yarn
|
||||
%exclude %{_datadir}/%{name}/client
|
||||
%config(noreplace) %{_sysconfdir}/%{name}/capacity-scheduler.xml
|
||||
%config(noreplace) %{_sysconfdir}/%{name}/yarn-env.sh
|
||||
%config(noreplace) %{_sysconfdir}/%{name}/yarn-site.xml
|
||||
@ -1150,6 +1159,10 @@ fi
|
||||
%attr(6050,root,yarn) %{_bindir}/container-executor
|
||||
|
||||
%changelog
|
||||
* Thu Jun 26 2014 Robert Rati <rrati@redhat> - 2.4.0-3
|
||||
- Fixed FTBFS (#1106748)
|
||||
- Update to build with guava 17.0
|
||||
|
||||
* Sat Jun 07 2014 Fedora Release Engineering <rel-eng@lists.fedoraproject.org> - 2.4.0-2
|
||||
- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild
|
||||
|
||||
|
Loading…
Reference in New Issue
Block a user