Revert "2016-06-01: Retired because it depends on nc6, which was"

This reverts commit a0309d53d3.
This commit is contained in:
Till Maas 2016-06-13 07:12:36 +02:00
parent a0309d53d3
commit 1ab2c19177
32 changed files with 5780 additions and 3 deletions

2
.gitignore vendored Normal file
View File

@ -0,0 +1,2 @@
/tarballs/
/clog

View File

@ -1,3 +0,0 @@
2016-06-01: Retired because it depends on nc6, which was
retired, because it was orphaned for more than six weeks.

View File

@ -0,0 +1,12 @@
diff -up hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/JNIFlags.cmake.ppc hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/JNIFlags.cmake
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/JNIFlags.cmake.ppc 2014-06-30 09:04:57.000000000 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/JNIFlags.cmake 2014-10-10 10:37:39.000000000 +0200
@@ -78,6 +78,8 @@ IF("${CMAKE_SYSTEM}" MATCHES "Linux")
SET(_java_libarch "amd64")
ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm")
SET(_java_libarch "arm")
+ ELSEIF (CMAKE_SYSTEM_PROCESSOR MATCHES "^(powerpc|ppc)64le")
+ SET(_java_libarch "ppc64")
ELSE()
SET(_java_libarch ${CMAKE_SYSTEM_PROCESSOR})
ENDIF()

View File

@ -0,0 +1,77 @@
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-auth/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-common-project/hadoop-auth/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-auth/pom.xml 2015-09-10 04:55:53.847449606 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-common-project/hadoop-auth/pom.xml 2015-09-10 04:51:14.215139934 +0200
@@ -153,6 +153,9 @@
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
+ <configuration>
+ <additionalparam>-Xdoclint:none</additionalparam>
+ </configuration>
<executions>
<execution>
<phase>package</phase>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs/pom.xml 2015-09-10 04:55:57.175286681 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-hdfs-project/hadoop-hdfs/pom.xml 2015-09-10 04:53:49.675528855 +0200
@@ -519,6 +519,7 @@
<artifactId>maven-javadoc-plugin</artifactId>
<configuration>
<excludePackageNames>org.apache.hadoop.hdfs.protocol.proto</excludePackageNames>
+ <additionalparam>-Xdoclint:none</additionalparam>
</configuration>
</plugin>
<plugin>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 2015-09-10 04:55:57.176286632 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 2015-09-10 04:54:17.100186189 +0200
@@ -329,6 +329,7 @@
<packages>*</packages>
</group>
</groups>
+ <additionalparam>-Xdoclint:none</additionalparam>
</configuration>
</execution>
</executions>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-project/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-project/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-project/pom.xml 2015-09-10 04:55:57.177286583 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-project/pom.xml 2015-09-10 04:52:44.609714367 +0200
@@ -1115,6 +1115,7 @@
</goals>
<configuration>
<destDir>${project.build.directory}</destDir>
+ <additionalparam>-Xdoclint:none</additionalparam>
</configuration>
</execution>
</executions>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-project-dist/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-project-dist/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-project-dist/pom.xml 2015-09-10 04:55:56.732308368 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/hadoop-project-dist/pom.xml 2015-09-10 04:49:12.634092337 +0200
@@ -118,6 +118,7 @@
<charset>${maven.compile.encoding}</charset>
<reportOutputDirectory>${project.build.directory}/site</reportOutputDirectory>
<destDir>api</destDir>
+ <additionalparam>-Xdoclint:none</additionalparam>
<groups>
<group>
<title>${project.name} API</title>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/pom.xml 2015-09-10 04:55:56.705309690 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.doclint/pom.xml 2015-09-10 04:48:22.464552422 +0200
@@ -289,7 +289,7 @@
</docletArtifact>
</docletArtifacts>
<useStandardDocletOptions>true</useStandardDocletOptions>
-
+ <additionalparam>-Xdoclint:none</additionalparam>
<!-- switch on dependency-driven aggregation -->
<includeDependencySources>false</includeDependencySources>
@@ -398,6 +398,7 @@
</goals>
<configuration>
<overview>hadoop-common-project/hadoop-common/src/main/java/overview.html</overview>
+ <additionalparam>-Xdoclint:none</additionalparam>
</configuration>
</execution>
</executions>

284
hadoop-2.4.1-jersey1.patch Normal file
View File

@ -0,0 +1,284 @@
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-common-project/hadoop-common/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/pom.xml 2015-09-10 04:13:59.016972031 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-common-project/hadoop-common/pom.xml 2015-09-10 03:53:51.902302395 +0200
@@ -112,22 +112,26 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<!-- Used, even though 'mvn dependency:analyze' doesn't find it -->
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-servlet</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-hdfs-project/hadoop-hdfs/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs/pom.xml 2015-09-10 04:13:56.945073866 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-hdfs-project/hadoop-hdfs/pom.xml 2015-09-10 03:55:29.757492758 +0200
@@ -83,11 +83,13 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 2015-09-10 04:13:59.019971884 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml 2015-09-10 03:56:00.339989611 +0200
@@ -67,11 +67,13 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml 2015-09-10 04:13:56.945073866 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-hdfs-project/hadoop-hdfs-nfs/pom.xml 2015-09-10 03:56:32.350416281 +0200
@@ -97,11 +97,13 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
+ <version>${jersey.version}</version>
<scope>compile</scope>
</dependency>
<dependency>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-mapreduce-project/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-mapreduce-project/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-mapreduce-project/pom.xml 2015-09-10 04:13:56.999071212 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-mapreduce-project/pom.xml 2015-09-10 03:52:35.657049893 +0200
@@ -128,6 +128,7 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
+ <version>${jersey.version}</version>
<exclusions>
<exclusion>
<groupId>asm</groupId>
@@ -138,10 +139,12 @@
<dependency>
<groupId>com.sun.jersey.contribs</groupId>
<artifactId>jersey-guice</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.google.inject.extensions</groupId>
<artifactId>guice-servlet</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>junit</groupId>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-project/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-project/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-project/pom.xml 2015-09-10 04:13:59.038970950 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-project/pom.xml 2015-09-10 03:46:03.557321815 +0200
@@ -59,7 +59,7 @@
<avro.version>1.7.4</avro.version>
<!-- jersey version -->
- <jersey.version>1.17.1</jersey.version>
+ <jersey.version>1</jersey.version>
<!-- ProtocolBuffer version, used to verify the protoc version and -->
<!-- define the protobuf JAR version -->
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml 2015-09-10 04:13:57.003071015 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml 2015-09-10 03:47:14.870816716 +0200
@@ -78,6 +78,7 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-client</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 2015-09-10 04:13:57.013070524 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml 2015-09-10 03:46:50.182030184 +0200
@@ -83,6 +83,7 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>org.codehaus.jackson</groupId>
@@ -147,6 +148,7 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-server</artifactId>
+ <version>${jersey.version}</version>
<exclusions>
<exclusion>
<groupId>asm</groupId>
@@ -157,10 +159,12 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey.contribs</groupId>
<artifactId>jersey-guice</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>log4j</groupId>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml 2015-09-10 04:13:57.013070524 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml 2015-09-10 03:48:28.283208456 +0200
@@ -99,15 +99,18 @@
<dependency>
<groupId>com.sun.jersey.jersey-test-framework</groupId>
<artifactId>jersey-test-framework-core</artifactId>
+ <version>${jersey.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey.contribs</groupId>
<artifactId>jersey-guice</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
<dependency>
@@ -137,10 +140,12 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-client</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.google.guava</groupId>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml 2015-09-10 04:13:57.013070524 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml 2015-09-10 03:49:21.079613483 +0200
@@ -89,10 +89,12 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-client</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
@@ -148,15 +150,18 @@
<dependency>
<groupId>com.sun.jersey.jersey-test-framework</groupId>
<artifactId>jersey-test-framework-grizzly2</artifactId>
+ <version>${jersey.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey.contribs</groupId>
<artifactId>jersey-guice</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
<dependency>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml 2015-09-10 04:13:57.022070082 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml 2015-09-10 03:50:18.954768886 +0200
@@ -109,15 +109,18 @@
<dependency>
<groupId>com.sun.jersey.jersey-test-framework</groupId>
<artifactId>jersey-test-framework-core</artifactId>
+ <version>${jersey.version}</version>
<scope>test</scope>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-json</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey.contribs</groupId>
<artifactId>jersey-guice</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<!-- 'mvn dependency:analyze' fails to detect use of this dependency -->
<dependency>
@@ -151,10 +154,12 @@
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-core</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>com.sun.jersey</groupId>
<artifactId>jersey-client</artifactId>
+ <version>${jersey.version}</version>
</dependency>
<dependency>
<groupId>org.eclipse.jetty</groupId>
@@ -210,6 +215,7 @@
<dependency>
<groupId>com.sun.jersey.jersey-test-framework</groupId>
<artifactId>jersey-test-framework-grizzly2</artifactId>
+ <version>${jersey.version}</version>
<scope>test</scope>
</dependency>
</dependencies>
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml 2015-09-10 04:13:57.026069885 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jersey/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml 2015-09-10 03:51:11.787172144 +0200
@@ -119,6 +119,7 @@
<dependency>
<groupId>com.sun.jersey.jersey-test-framework</groupId>
<artifactId>jersey-test-framework-grizzly2</artifactId>
+ <version>${jersey.version}</version>
<scope>test</scope>
</dependency>

View File

@ -0,0 +1,81 @@
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jets3t/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java 2014-06-30 09:04:57.000000000 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jets3t/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/Jets3tFileSystemStore.java 2015-03-14 15:37:19.582587031 +0100
@@ -91,17 +91,17 @@
S3Credentials s3Credentials = new S3Credentials();
s3Credentials.initialize(uri, conf);
- try {
+ //try {
AWSCredentials awsCredentials =
new AWSCredentials(s3Credentials.getAccessKey(),
s3Credentials.getSecretAccessKey());
this.s3Service = new RestS3Service(awsCredentials);
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
+ // } catch (S3ServiceException e) {
+ // if (e.getCause() instanceof IOException) {
+ // throw (IOException) e.getCause();
+ // }
+ // throw new S3Exception(e);
+ // }
bucket = new S3Bucket(uri.getHost());
this.bufferSize = conf.getInt(
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jets3t/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java 2014-06-30 09:04:57.000000000 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jets3t/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3/MigrationTool.java 2015-03-14 15:50:35.036095902 +0100
@@ -117,7 +117,7 @@
- try {
+ //try {
String accessKey = null;
String secretAccessKey = null;
String userInfo = uri.getUserInfo();
@@ -158,12 +158,12 @@
AWSCredentials awsCredentials =
new AWSCredentials(accessKey, secretAccessKey);
this.s3Service = new RestS3Service(awsCredentials);
- } catch (S3ServiceException e) {
- if (e.getCause() instanceof IOException) {
- throw (IOException) e.getCause();
- }
- throw new S3Exception(e);
- }
+ //} catch (S3ServiceException e) {
+ // if (e.getCause() instanceof IOException) {
+ // throw (IOException) e.getCause();
+ // }
+ // throw new S3Exception(e);
+ //}
bucket = new S3Bucket(uri.getHost());
}
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jets3t/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java 2014-06-30 09:04:57.000000000 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.jets3t/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/fs/s3native/Jets3tNativeFileSystemStore.java 2015-03-14 15:24:05.397371065 +0100
@@ -71,14 +71,14 @@
public void initialize(URI uri, Configuration conf) throws IOException {
S3Credentials s3Credentials = new S3Credentials();
s3Credentials.initialize(uri, conf);
- try {
+ //try {
AWSCredentials awsCredentials =
new AWSCredentials(s3Credentials.getAccessKey(),
s3Credentials.getSecretAccessKey());
this.s3Service = new RestS3Service(awsCredentials);
- } catch (S3ServiceException e) {
- handleS3ServiceException(e);
- }
+ //} catch (S3ServiceException e) {
+ // handleS3ServiceException(e);
+ //}
multipartEnabled =
conf.getBoolean("fs.s3n.multipart.uploads.enabled", false);
multipartBlockSize = Math.min(

View File

@ -0,0 +1,13 @@
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.bookkeeper/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java 2014-06-30 09:04:57.000000000 +0200
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.bookkeeper/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/src/main/java/org/apache/hadoop/contrib/bkjournal/BookKeeperJournalManager.java 2016-01-09 13:43:26.831773352 +0100
@@ -237,7 +237,7 @@
zkPathLatch.countDown();
}
};
- ZkUtils.createFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
+ ZkUtils.asyncCreateFullPathOptimistic(zkc, zkAvailablePath, new byte[0],
Ids.OPEN_ACL_UNSAFE, CreateMode.PERSISTENT, callback, null);
try {

View File

@ -0,0 +1,18 @@
diff -Nru hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.servlet/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java 2015-03-20 04:45:08.415241957 +0100
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510.servlet/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java 2015-03-14 16:33:12.627551779 +0100
@@ -308,5 +308,14 @@
public void write(int b) throws IOException {
buffer.append((char) b);
}
+
+ public void setWriteListener(javax.servlet.WriteListener listener) {
+ throw new UnsupportedOperationException("Not implemented yet.");
+ }
+
+ public boolean isReady() {
+ return false;
+ }
+
}
}

34
hadoop-armhfp.patch Normal file
View File

@ -0,0 +1,34 @@
--- hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/JNIFlags.cmake.orig 2014-07-20 15:03:30.473576587 +0100
+++ hadoop-common-9e2ef43a240fb0f603d8c384e501daec11524510/hadoop-common-project/hadoop-common/src/JNIFlags.cmake 2014-07-20 15:06:13.811115845 +0100
@@ -45,22 +45,22 @@
OUTPUT_VARIABLE JVM_ELF_ARCH
ERROR_QUIET)
if (NOT JVM_ELF_ARCH MATCHES "Tag_ABI_VFP_args: VFP registers")
- message("Soft-float JVM detected")
+ message("Hard-float JVM detected")
- # Test compilation with -mfloat-abi=softfp using an arbitrary libc function
+ # Test compilation with -mfloat-abi=hard using an arbitrary libc function
# (typically fails with "fatal error: bits/predefs.h: No such file or directory"
- # if soft-float dev libraries are not installed)
+ # if hard-float dev libraries are not installed)
include(CMakePushCheckState)
cmake_push_check_state()
- set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfloat-abi=softfp")
+ set(CMAKE_REQUIRED_FLAGS "${CMAKE_REQUIRED_FLAGS} -mfloat-abi=hard")
include(CheckSymbolExists)
- check_symbol_exists(exit stdlib.h SOFTFP_AVAILABLE)
- if (NOT SOFTFP_AVAILABLE)
- message(FATAL_ERROR "Soft-float dev libraries required (e.g. 'apt-get install libc6-dev-armel' on Debian/Ubuntu)")
- endif (NOT SOFTFP_AVAILABLE)
+ check_symbol_exists(exit stdlib.h HARDFP_AVAILABLE)
+ if (NOT HARDFP_AVAILABLE)
+ message(FATAL_ERROR "Hard-float dev libraries required (e.g. 'apt-get install libc6-dev-armel' on Debian/Ubuntu)")
+ endif (NOT HARDFP_AVAILABLE)
cmake_pop_check_state()
- set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=softfp")
+ set(CMAKE_C_FLAGS "${CMAKE_C_FLAGS} -mfloat-abi=hard")
endif ()
endif (READELF MATCHES "NOTFOUND")
endif (CMAKE_SYSTEM_PROCESSOR MATCHES "^arm" AND CMAKE_SYSTEM_NAME STREQUAL "Linux")

18
hadoop-build.patch Normal file
View File

@ -0,0 +1,18 @@
diff --git a/hadoop-project-dist/pom.xml b/hadoop-project-dist/pom.xml
index bd3c555..c89a237 100644
--- a/hadoop-project-dist/pom.xml
+++ b/hadoop-project-dist/pom.xml
@@ -58,13 +58,6 @@
<artifactId>maven-jar-plugin</artifactId>
<executions>
<execution>
- <id>prepare-jar</id>
- <phase>prepare-package</phase>
- <goals>
- <goal>jar</goal>
- </goals>
- </execution>
- <execution>
<id>prepare-test-jar</id>
<phase>prepare-package</phase>
<goals>

36
hadoop-core-site.xml Normal file
View File

@ -0,0 +1,36 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>fs.default.name</name>
<value>hdfs://localhost:8020</value>
</property>
<!-- HTTPFS proxy user setting -->
<property>
<name>hadoop.proxyuser.tomcat.hosts</name>
<value>*</value>
</property>
<property>
<name>hadoop.proxyuser.tomcat.groups</name>
<value>*</value>
</property>
</configuration>

127
hadoop-dlopen-libjvm.patch Normal file
View File

@ -0,0 +1,127 @@
diff --git a/hadoop-common-project/hadoop-common/src/CMakeLists.txt b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
index dec63c4..de21bab 100644
--- a/hadoop-common-project/hadoop-common/src/CMakeLists.txt
+++ b/hadoop-common-project/hadoop-common/src/CMakeLists.txt
@@ -205,7 +205,6 @@ ENDIF()
target_link_dual_libraries(hadoop
${LIB_DL}
- ${JAVA_JVM_LIBRARY}
)
SET(LIBHADOOP_VERSION "1.0.0")
SET_TARGET_PROPERTIES(hadoop PROPERTIES
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
index 82d1a32..2151bb8 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/CMakeLists.txt
@@ -99,7 +99,6 @@ if (NEED_LINK_DL)
endif(NEED_LINK_DL)
target_link_dual_libraries(hdfs
- ${JAVA_JVM_LIBRARY}
${LIB_DL}
pthread
)
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
index dd3f1e6..68ba422 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/fuse-dfs/CMakeLists.txt
@@ -68,7 +68,6 @@ IF(FUSE_FOUND)
)
target_link_libraries(fuse_dfs
${FUSE_LIBRARIES}
- ${JAVA_JVM_LIBRARY}
hdfs
m
pthread
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
index 878289f..62686b3 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/native/libhdfs/jni_helper.c
@@ -20,6 +20,7 @@
#include "exception.h"
#include "jni_helper.h"
+#include <dlfcn.h>
#include <stdio.h>
#include <string.h>
@@ -442,10 +443,44 @@ static JNIEnv* getGlobalJNIEnv(void)
jint rv = 0;
jint noVMs = 0;
jthrowable jthr;
+ void *jvmHandle = NULL;
+ jint JNICALL (*getCreatedJavaJVMsPtr)(JavaVM **, jsize, jsize *);
+ getCreatedJavaJVMsPtr = NULL;
+ jint JNICALL (*createJavaVMPtr)(JavaVM **, void **, void *);
+ createJavaVMPtr = NULL;
+ char *dlsym_error = NULL;
+
+ //Get JAVA_HOME to use appropriate libjvm
+ char *javaHome = getenv("JAVA_HOME");
+ if (javaHome == NULL) {
+ javaHome = "/usr/lib/jvm";
+ }
+
+ //Load the appropriate libjvm
+ char libjvmPath[strlen(javaHome)+35];
+ snprintf(libjvmPath, sizeof(libjvmPath), "%s/jre/lib/amd64/server/libjvm.so", javaHome);
+ jvmHandle = dlopen(libjvmPath, RTLD_NOW|RTLD_LOCAL);
+ if (jvmHandle == NULL) {
+ snprintf(libjvmPath, sizeof(libjvmPath), "%s/jre/lib/i386/server/libjvm.so", javaHome);
+ jvmHandle = dlopen(libjvmPath, RTLD_NOW|RTLD_LOCAL);
+ if (jvmHandle == NULL) {
+ fprintf(stderr, "Failed to load libjvm.so!\n");
+ return NULL;
+ }
+ }
- rv = JNI_GetCreatedJavaVMs(&(vmBuf[0]), vmBufLength, &noVMs);
+ //Load the JNI_GetCreatedJavaVMs function from the libjvm library
+ *(void **)(&getCreatedJavaJVMsPtr) = dlsym(jvmHandle, "JNI_GetCreatedJavaVMs");
+ dlsym_error = dlerror();
+ if (dlsym_error) {
+ fprintf(stderr, "Can not load symbol JNI_GetCreatedJavaVMs: %s\n", dlsym_error);
+ dlclose(jvmHandle);
+ return NULL;
+ }
+ rv = (*getCreatedJavaJVMsPtr)(&(vmBuf[0]), vmBufLength, &noVMs);
if (rv != 0) {
fprintf(stderr, "JNI_GetCreatedJavaVMs failed with error: %d\n", rv);
+ dlclose(jvmHandle);
return NULL;
}
@@ -454,6 +489,7 @@ static JNIEnv* getGlobalJNIEnv(void)
char *hadoopClassPath = getenv("CLASSPATH");
if (hadoopClassPath == NULL) {
fprintf(stderr, "Environment variable CLASSPATH not set!\n");
+ dlclose(jvmHandle);
return NULL;
}
char *hadoopClassPathVMArg = "-Djava.class.path=";
@@ -502,7 +538,15 @@ static JNIEnv* getGlobalJNIEnv(void)
vm_args.nOptions = noArgs;
vm_args.ignoreUnrecognized = 1;
- rv = JNI_CreateJavaVM(&vm, (void*)&env, &vm_args);
+ //Load the JNI_CreateJavaVM function from the libjvm library
+ *(void **)(&createJavaVMPtr) = dlsym(jvmHandle, "JNI_CreateJavaVM");
+ dlsym_error = dlerror();
+ if (dlsym_error) {
+ fprintf(stderr, "Can not load symbol JNI_CreateJavaVM: %s\n", dlsym_error);
+ dlclose(jvmHandle);
+ return NULL;
+ }
+ rv = (*createJavaVMPtr)(&vm, (void*)&env, &vm_args);
if (hadoopJvmArgs != NULL) {
free(hadoopJvmArgs);
@@ -512,6 +556,7 @@ static JNIEnv* getGlobalJNIEnv(void)
if (rv != 0) {
fprintf(stderr, "Call to JNI_CreateJavaVM failed "
"with error: %d\n", rv);
+ dlclose(jvmHandle);
return NULL;
}
jthr = invokeMethod(env, NULL, STATIC, NULL,

File diff suppressed because it is too large Load Diff

411
hadoop-guava.patch Normal file
View File

@ -0,0 +1,411 @@
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
index f7932a6..ec3d9cf 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
@@ -22,6 +22,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -153,7 +154,7 @@ public String toString() {
private class Monitor implements Runnable {
@Override
public void run() {
- Stopwatch sw = new Stopwatch();
+ Stopwatch sw = Stopwatch.createUnstarted();
Map<String, GcTimes> gcTimesBeforeSleep = getGcTimes();
while (shouldRun) {
sw.reset().start();
@@ -162,7 +163,7 @@ public void run() {
} catch (InterruptedException ie) {
return;
}
- long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS;
+ long extraSleepTime = sw.elapsed(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS;
Map<String, GcTimes> gcTimesAfterSleep = getGcTimes();
if (extraSleepTime > warnThresholdMs) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
index 8588de5..cb0dbae 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
@@ -133,7 +133,7 @@
/**
* Stopwatch which starts counting on each heartbeat that is sent
*/
- private final Stopwatch lastHeartbeatStopwatch = new Stopwatch();
+ private final Stopwatch lastHeartbeatStopwatch = Stopwatch.createUnstarted();
private static final long HEARTBEAT_INTERVAL_MILLIS = 1000;
@@ -435,7 +435,7 @@ private void throwIfOutOfSync()
* written.
*/
private void heartbeatIfNecessary() throws IOException {
- if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS ||
+ if (lastHeartbeatStopwatch.elapsed(TimeUnit.MILLISECONDS) > HEARTBEAT_INTERVAL_MILLIS ||
!lastHeartbeatStopwatch.isRunning()) {
try {
getProxy().heartbeat(createReqInfo());
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
index c117ee8..82f01da 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
@@ -68,7 +68,6 @@
import com.google.common.base.Stopwatch;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.Range;
-import com.google.common.collect.Ranges;
import com.google.protobuf.TextFormat;
/**
@@ -374,15 +373,15 @@ synchronized void journal(RequestInfo reqInfo,
curSegment.writeRaw(records, 0, records.length);
curSegment.setReadyToFlush();
- Stopwatch sw = new Stopwatch();
+ Stopwatch sw = Stopwatch.createUnstarted();
sw.start();
curSegment.flush(shouldFsync);
sw.stop();
- metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS));
- if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
+ metrics.addSync(sw.elapsed(TimeUnit.MICROSECONDS));
+ if (sw.elapsed(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
- " took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms");
+ " took " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
}
if (isLagging) {
@@ -853,7 +852,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo,
private Range<Long> txnRange(SegmentStateProto seg) {
Preconditions.checkArgument(seg.hasEndTxId(),
"invalid segment: %s", seg);
- return Ranges.closed(seg.getStartTxId(), seg.getEndTxId());
+ return Range.closed(seg.getStartTxId(), seg.getEndTxId());
}
/**
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
index 5075da9..0d868d4 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
@@ -62,7 +62,7 @@
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-import com.google.common.io.LimitInputStream;
+import com.google.common.io.ByteStreams;
import com.google.protobuf.CodedOutputStream;
/**
@@ -215,7 +215,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
for (FileSummary.Section s : sections) {
channel.position(s.getOffset());
- InputStream in = new BufferedInputStream(new LimitInputStream(fin,
+ InputStream in = new BufferedInputStream(ByteStreams.limit(fin,
s.getLength()));
in = FSImageUtil.wrapInputStreamForCompression(conf,
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
index c8033dd..b312bfe 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
@@ -33,7 +33,7 @@
import org.apache.hadoop.io.IOUtils;
import com.google.common.base.Preconditions;
-import com.google.common.io.LimitInputStream;
+import com.google.common.io.ByteStreams;
/**
* This is the tool for analyzing file sizes in the namespace image. In order to
@@ -106,7 +106,7 @@ void visit(RandomAccessFile file) throws IOException {
in.getChannel().position(s.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
in, s.getLength())));
run(is);
output();
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
index d80fcf1..e025f82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
@@ -50,7 +50,7 @@
import com.google.common.collect.Lists;
import com.google.common.collect.Maps;
-import com.google.common.io.LimitInputStream;
+import com.google.common.io.ByteStreams;
/**
* LsrPBImage displays the blocks of the namespace in a format very similar
@@ -110,7 +110,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
for (FileSummary.Section s : sections) {
fin.getChannel().position(s.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
fin, s.getLength())));
switch (SectionName.fromString(s.getName())) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
index 99617b8..c613591 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
@@ -52,7 +52,7 @@
import org.apache.hadoop.io.IOUtils;
import com.google.common.collect.Lists;
-import com.google.common.io.LimitInputStream;
+import com.google.common.io.ByteStreams;
/**
* PBImageXmlWriter walks over an fsimage structure and writes out
@@ -100,7 +100,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
for (FileSummary.Section s : sections) {
fin.getChannel().position(s.getOffset());
InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
+ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
fin, s.getLength())));
switch (SectionName.fromString(s.getName())) {
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
index 132218c..09d42e1 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
@@ -47,7 +47,7 @@
import org.junit.Before;
import org.junit.Test;
-import com.google.common.io.NullOutputStream;
+import com.google.common.io.ByteStreams;
public class TestDataTransferKeepalive {
final Configuration conf = new HdfsConfiguration();
@@ -224,7 +224,7 @@ public void testManyClosedSocketsInCache() throws Exception {
stms[i] = fs.open(TEST_FILE);
}
for (InputStream stm : stms) {
- IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
+ IOUtils.copyBytes(stm, ByteStreams.nullOutputStream(), 1024);
}
} finally {
IOUtils.cleanup(null, stms);
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
index 92c7672..aa5c351 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
@@ -100,10 +100,10 @@ public void run() {
}
private void doAWrite() throws IOException {
- Stopwatch sw = new Stopwatch().start();
+ Stopwatch sw = Stopwatch.createStarted();
stm.write(toWrite);
stm.hflush();
- long micros = sw.elapsedTime(TimeUnit.MICROSECONDS);
+ long micros = sw.elapsed(TimeUnit.MICROSECONDS);
quantiles.insert(micros);
}
}
@@ -276,12 +276,12 @@ public int run(String args[]) throws Exception {
int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
DFSConfigKeys.DFS_REPLICATION_DEFAULT);
- Stopwatch sw = new Stopwatch().start();
+ Stopwatch sw = Stopwatch.createStarted();
test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites,
replication);
sw.stop();
- System.out.println("Finished in " + sw.elapsedMillis() + "ms");
+ System.out.println("Finished in " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
System.out.println("Latency quantiles (in microseconds):\n" +
test.quantiles);
return 0;
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
index 10b6b79..9fbcf82 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
@@ -27,6 +27,7 @@
import java.net.HttpURLConnection;
import java.net.URL;
import java.util.concurrent.ExecutionException;
+import java.util.concurrent.TimeUnit;
import org.apache.hadoop.conf.Configuration;
import org.apache.hadoop.fs.FileUtil;
@@ -325,11 +326,11 @@ private void doPerfTest(int editsSize, int numEdits) throws Exception {
ch.setEpoch(1);
ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
- Stopwatch sw = new Stopwatch().start();
+ Stopwatch sw = Stopwatch.createStarted();
for (int i = 1; i < numEdits; i++) {
ch.sendEdits(1L, i, 1, data).get();
}
- long time = sw.elapsedMillis();
+ long time = sw.elapsed(TimeUnit.MILLISECONDS);
System.err.println("Wrote " + numEdits + " batches of " + editsSize +
" bytes in " + time + "ms");
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
index a1e49cc..44751b0 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
@@ -20,6 +20,7 @@
import static org.junit.Assert.*;
import java.util.ArrayList;
+import java.util.concurrent.TimeUnit;
import org.junit.Test;
@@ -69,24 +70,22 @@ public void testPerformance() {
System.gc();
{
ArrayList<String> arrayList = new ArrayList<String>();
- Stopwatch sw = new Stopwatch();
- sw.start();
+ Stopwatch sw = Stopwatch.createStarted();
for (int i = 0; i < numElems; i++) {
arrayList.add(obj);
}
- System.out.println(" ArrayList " + sw.elapsedMillis());
+ System.out.println(" ArrayList " + sw.elapsed(TimeUnit.MILLISECONDS));
}
// test ChunkedArrayList
System.gc();
{
ChunkedArrayList<String> chunkedList = new ChunkedArrayList<String>();
- Stopwatch sw = new Stopwatch();
- sw.start();
+ Stopwatch sw = Stopwatch.createStarted();
for (int i = 0; i < numElems; i++) {
chunkedList.add(obj);
}
- System.out.println("ChunkedArrayList " + sw.elapsedMillis());
+ System.out.println("ChunkedArrayList " + sw.elapsed(TimeUnit.MILLISECONDS));
}
}
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
index 9863427..07854a1 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
@@ -28,6 +28,7 @@
import java.util.List;
import java.util.Map;
import java.util.Set;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -223,7 +224,7 @@ protected void addInputPathRecursively(List<FileStatus> result,
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
- Stopwatch sw = new Stopwatch().start();
+ Stopwatch sw = Stopwatch.createStarted();
if (numThreads == 1) {
List<FileStatus> locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
@@ -242,7 +243,7 @@ protected void addInputPathRecursively(List<FileStatus> result,
sw.stop();
if (LOG.isDebugEnabled()) {
- LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
+ LOG.debug("Time taken to get FileStatuses: " + sw.elapsed(TimeUnit.MILLISECONDS));
}
LOG.info("Total input paths to process : " + result.length);
return result;
@@ -300,7 +301,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
* they're too big.*/
public InputSplit[] getSplits(JobConf job, int numSplits)
throws IOException {
- Stopwatch sw = new Stopwatch().start();
+ Stopwatch sw = Stopwatch.createStarted();
FileStatus[] files = listStatus(job);
// Save the number of input files for metrics/loadgen
@@ -362,7 +363,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
- + ", TimeTaken: " + sw.elapsedMillis());
+ + ", TimeTaken: " + sw.elapsed(TimeUnit.MILLISECONDS));
}
return splits.toArray(new FileSplit[splits.size()]);
}
diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
index 5f32f11..a4f293c 100644
--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
@@ -21,6 +21,7 @@
import java.io.IOException;
import java.util.ArrayList;
import java.util.List;
+import java.util.concurrent.TimeUnit;
import org.apache.commons.logging.Log;
import org.apache.commons.logging.LogFactory;
@@ -258,7 +259,7 @@ public static PathFilter getInputPathFilter(JobContext context) {
int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS,
DEFAULT_LIST_STATUS_NUM_THREADS);
- Stopwatch sw = new Stopwatch().start();
+ Stopwatch sw = Stopwatch.createStarted();
if (numThreads == 1) {
result = singleThreadedListStatus(job, dirs, inputFilter, recursive);
} else {
@@ -275,7 +276,7 @@ public static PathFilter getInputPathFilter(JobContext context) {
sw.stop();
if (LOG.isDebugEnabled()) {
- LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
+ LOG.debug("Time taken to get FileStatuses: " + sw.elapsed(TimeUnit.MILLISECONDS));
}
LOG.info("Total input paths to process : " + result.size());
return result;
@@ -366,7 +367,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
* @throws IOException
*/
public List<InputSplit> getSplits(JobContext job) throws IOException {
- Stopwatch sw = new Stopwatch().start();
+ Stopwatch sw = Stopwatch.createStarted();
long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
long maxSize = getMaxSplitSize(job);
@@ -414,7 +415,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
sw.stop();
if (LOG.isDebugEnabled()) {
LOG.debug("Total # of splits generated by getSplits: " + splits.size()
- + ", TimeTaken: " + sw.elapsedMillis());
+ + ", TimeTaken: " + sw.elapsed(TimeUnit.MILLISECONDS));
}
return splits;
}
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b315e2b..9ad8bcd 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -310,7 +310,7 @@
<dependency>
<groupId>com.google.guava</groupId>
<artifactId>guava</artifactId>
- <version>11.0.2</version>
+ <version>17.0</version>
</dependency>
<dependency>
<groupId>commons-cli</groupId>

67
hadoop-hdfs-site.xml Normal file
View File

@ -0,0 +1,67 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>dfs.replication</name>
<value>1</value>
</property>
<!-- Immediately exit safemode as soon as one DataNode checks in.
On a multi-node cluster, these configurations must be removed. -->
<property>
<name>dfs.safemode.extension</name>
<value>0</value>
</property>
<property>
<name>dfs.safemode.min.datanodes</name>
<value>1</value>
</property>
<property>
<name>hadoop.tmp.dir</name>
<value>/var/lib/hadoop-hdfs/${user.name}</value>
</property>
<property>
<name>dfs.namenode.name.dir</name>
<value>file:///var/lib/hadoop-hdfs/${user.name}/dfs/namenode</value>
</property>
<property>
<name>dfs.namenode.checkpoint.dir</name>
<value>file:///var/lib/hadoop-hdfs/${user.name}/dfs/secondarynamenode</value>
</property>
<property>
<name>dfs.datanode.data.dir</name>
<value>file:///var/lib/hadoop-hdfs/${user.name}/dfs/datanode</value>
</property>
<property>
<name>dfs.http.address</name>
<value>0.0.0.0:50070</value>
</property>
<property>
<name>dfs.datanode.address</name>
<value>0.0.0.0:50010</value>
</property>
<property>
<name>dfs.datanode.http.address</name>
<value>0.0.0.0:50075</value>
</property>
<property>
<name>dfs.datanode.ipc.address</name>
<value>0.0.0.0:50020</value>
</property>
</configuration>

View File

@ -0,0 +1,37 @@
[Unit]
Description=The Hadoop DAEMON daemon
After=network.target
After=NetworkManager.target
[Service]
Type=forking
EnvironmentFile=-/etc/sysconfig/hadoop-hdfs
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
ExecStart=/usr/sbin/hadoop-daemon.sh start DAEMON
ExecStop=/usr/sbin/hadoop-daemon.sh stop DAEMON
User=hdfs
Group=hadoop
PIDFile=/var/run/hadoop-hdfs/hadoop-hdfs-DAEMON.pid
LimitNOFILE=32768
LimitNPROC=65536
#######################################
# Note: Below are cgroup options
#######################################
#Slice=
#CPUAccounting=true
#CPUShares=1024
#MemoryAccounting=true
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
#BlockIOAccounting=true
#BlockIOWeight=??
#BlockIODeviceWeight=??
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
#DeviceAllow=
#DevicePolicy=auto|closed|strict
[Install]
WantedBy=multi-user.target

5
hadoop-httpfs.sysconfig Normal file
View File

@ -0,0 +1,5 @@
CATALINA_BASE=/usr/share/hadoop/httpfs/tomcat
CATALINA_HOME=/usr/share/hadoop/httpfs/tomcat
CATALINA_TMPDIR=/var/cache/hadoop-httpfs
CATALINA_OPTS="-Dhttpfs.home.dir=/usr -Dhttpfs.config.dir=/etc/hadoop -Dhttpfs.log.dir=/var/log/hadoop-httpfs -Dhttpfs.temp.dir=/var/cache/hadoop-httpfs -Dhttpfs.admin.port=14001 -Dhttpfs.http.port=14000"

View File

@ -0,0 +1,32 @@
diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
index 5667d98..c0106ce 100644
--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
+++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
@@ -46,15 +46,20 @@
LOG.debug("Trying to load the custom-built native-hadoop library...");
}
try {
- System.loadLibrary("hadoop");
+ System.load("/usr/lib64/hadoop/libhadoop.so");
LOG.debug("Loaded the native-hadoop library");
nativeCodeLoaded = true;
- } catch (Throwable t) {
- // Ignore failure to load
- if(LOG.isDebugEnabled()) {
- LOG.debug("Failed to load native-hadoop with error: " + t);
- LOG.debug("java.library.path=" +
- System.getProperty("java.library.path"));
+ } catch (Throwable t64) {
+ LOG.debug("Failed to load 64-bit native-hadoop with error: " + t64);
+ try {
+ System.load("/usr/lib/hadoop/libhadoop.so");
+ LOG.debug("Loaded the native-hadoop library");
+ nativeCodeLoaded = true;
+ } catch (Throwable t32) {
+ // Ignore failure to load
+ if(LOG.isDebugEnabled()) {
+ LOG.debug("Failed to load 32-bit native-hadoop with error: " + t32);
+ }
}
}

29
hadoop-layout.sh Normal file
View File

@ -0,0 +1,29 @@
export HADOOP_PREFIX=/usr
export HADOOP_COMMON_HOME=/usr
export HADOOP_COMMON_DIR=share/hadoop/common
export HADOOP_COMMON_LIB_JARS_DIR=share/hadoop/common/lib
export HADOOP_COMMON_LIB_NATIVE_DIR=lib/hadoop
export HADOOP_CONF_DIR=/etc/hadoop
export HADOOP_LIBEXEC_DIR=/usr/libexec
export HADOOP_HDFS_HOME=$HADOOP_PREFIX
export HDFS_DIR=share/hadoop/hdfs
export HDFS_LIB_JARS_DIR=share/hadoop/hadoop/lib
export HADOOP_PID_DIR=/var/run/hadoop-hdfs
export HADOOP_LOG_DIR=/var/log/hadoop-hdfs
export HADOOP_IDENT_STRING=hdfs
export HADOOP_YARN_HOME=$HADOOP_PREFIX
export YARN_DIR=share/hadoop/yarn
export YARN_LIB_JARS_DIR=share/hadoop/yarn/lib
export YARN_PID_DIR=/var/run/hadoop-yarn
export YARN_LOG_DIR=/var/log/hadoop-yarn
export YARN_CONF_DIR=/etc/hadoop
export YARN_IDENT_STRING=yarn
export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
export MAPRED_DIR=share/hadoop/mapreduce
export MAPRED_LIB_JARS_DIR=share/hadoop/mapreduce/lib
export HADOOP_MAPRED_PID_DIR=/var/run/hadoop-mapreduce
export HADOOP_MAPRED_LOG_DIR=/var/log/hadoop-mapreduce
export HADOOP_MAPRED_IDENT_STRING=mapred

37
hadoop-mapred-site.xml Normal file
View File

@ -0,0 +1,37 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>mapred.job.tracker</name>
<value>localhost:8021</value>
</property>
<property>
<name>mapreduce.framework.name</name>
<value>yarn</value>
</property>
<property>
<description>To set the value of tmp directory for map and reduce tasks.</description>
<name>mapreduce.task.tmp.dir</name>
<value>/var/cache/hadoop-mapreduce/${user.name}/tasks</value>
</property>
</configuration>

View File

@ -0,0 +1,37 @@
[Unit]
Description=The Hadoop DAEMON daemon
After=network.target
After=NetworkManager.target
[Service]
Type=forking
EnvironmentFile=-/etc/sysconfig/hadoop-mapreduce
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
ExecStart=/usr/sbin/mr-jobhistory-daemon.sh start DAEMON
ExecStop=/usr/sbin/mr-jobhistory-daemon.sh stop DAEMON
User=mapred
Group=hadoop
PIDFile=/var/run/hadoop-mapreduce/mapred-mapred-DAEMON.pid
LimitNOFILE=32768
LimitNPROC=65536
#######################################
# Note: Below are cgroup options
#######################################
#Slice=
#CPUAccounting=true
#CPUShares=1024
#MemoryAccounting=true
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
#BlockIOAccounting=true
#BlockIOWeight=??
#BlockIODeviceWeight=??
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
#DeviceAllow=
#DevicePolicy=auto|closed|strict
[Install]
WantedBy=multi-user.target

44
hadoop-maven.patch Normal file
View File

@ -0,0 +1,44 @@
diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
index 7cf67a3..c090916 100644
--- a/hadoop-common-project/hadoop-common/pom.xml
+++ b/hadoop-common-project/hadoop-common/pom.xml
@@ -364,16 +364,6 @@
</executions>
</plugin>
<plugin>
- <groupId>org.apache.maven.plugins</groupId>
- <artifactId>maven-surefire-plugin</artifactId>
- <configuration>
- <systemPropertyVariables>
- <startKdc>${startKdc}</startKdc>
- <kdc.resource.dir>${kdc.resource.dir}</kdc.resource.dir>
- </systemPropertyVariables>
- </configuration>
- </plugin>
- <plugin>
<groupId>org.apache.avro</groupId>
<artifactId>avro-maven-plugin</artifactId>
<executions>
@@ -480,6 +470,10 @@
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-surefire-plugin</artifactId>
<configuration>
+ <systemPropertyVariables>
+ <startKdc>${startKdc}</startKdc>
+ <kdc.resource.dir>${kdc.resource.dir}</kdc.resource.dir>
+ </systemPropertyVariables>
<properties>
<property>
<name>listener</name>
diff --git a/pom.xml b/pom.xml
index 13dbf49..ad84034 100644
--- a/pom.xml
+++ b/pom.xml
@@ -387,6 +387,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
<plugin>
<groupId>org.apache.maven.plugins</groupId>
<artifactId>maven-javadoc-plugin</artifactId>
+ <version>2.8.1</version>
<inherited>false</inherited>
<executions>
<execution>

View File

@ -0,0 +1,31 @@
diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
index 9b267fe..0ce916d 100644
--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
@@ -38,12 +38,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
<dependencyManagement>
<dependencies>
- <!-- This is a really old version of netty, that gets privatized
- via shading and hence it is not managed via a parent pom -->
<dependency>
<groupId>org.jboss.netty</groupId>
<artifactId>netty</artifactId>
- <version>3.2.4.Final</version>
+ <version>3.9.3.Final</version>
</dependency>
</dependencies>
</dependencyManagement>
diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
index b315e2b..a9da3aa 100644
--- a/hadoop-project/pom.xml
+++ b/hadoop-project/pom.xml
@@ -462,7 +462,7 @@
<dependency>
<groupId>io.netty</groupId>
<artifactId>netty</artifactId>
- <version>3.6.2.Final</version>
+ <version>3.9.3.Final</version>
</dependency>
<dependency>

View File

@ -0,0 +1,58 @@
diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
index d01a32f..9ebc494 100644
--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
@@ -523,53 +523,6 @@
<artifactId>maven-antrun-plugin</artifactId>
<executions>
<execution>
- <id>dist</id>
- <goals>
- <goal>run</goal>
- </goals>
- <phase>package</phase>
- <configuration>
- <target>
- <mkdir dir="downloads"/>
- <get
- src="${tomcat.download.url}"
- dest="downloads/apache-tomcat-${tomcat.version}.tar.gz" verbose="true" skipexisting="true"/>
- <delete dir="${project.build.directory}/tomcat.exp"/>
- <mkdir dir="${project.build.directory}/tomcat.exp"/>
-
- <!-- Using Unix script to preserve file permissions -->
- <echo file="${project.build.directory}/tomcat-untar.sh">
- cd "${project.build.directory}/tomcat.exp"
- gzip -cd ../../downloads/apache-tomcat-${tomcat.version}.tar.gz | tar xf -
- </echo>
- <exec executable="sh" dir="${project.build.directory}" failonerror="true">
- <arg line="./tomcat-untar.sh"/>
- </exec>
-
- <move file="${project.build.directory}/tomcat.exp/apache-tomcat-${tomcat.version}"
- tofile="${httpfs.tomcat.dist.dir}"/>
- <delete dir="${project.build.directory}/tomcat.exp"/>
- <delete dir="${httpfs.tomcat.dist.dir}/webapps"/>
- <mkdir dir="${httpfs.tomcat.dist.dir}/webapps"/>
- <delete file="${httpfs.tomcat.dist.dir}/conf/server.xml"/>
- <copy file="${basedir}/src/main/tomcat/server.xml"
- toDir="${httpfs.tomcat.dist.dir}/conf"/>
- <delete file="${httpfs.tomcat.dist.dir}/conf/ssl-server.xml"/>
- <copy file="${basedir}/src/main/tomcat/ssl-server.xml"
- toDir="${httpfs.tomcat.dist.dir}/conf"/>
- <delete file="${httpfs.tomcat.dist.dir}/conf/logging.properties"/>
- <copy file="${basedir}/src/main/tomcat/logging.properties"
- toDir="${httpfs.tomcat.dist.dir}/conf"/>
- <copy toDir="${httpfs.tomcat.dist.dir}/webapps/ROOT">
- <fileset dir="${basedir}/src/main/tomcat/ROOT"/>
- </copy>
- <copy toDir="${httpfs.tomcat.dist.dir}/webapps/webhdfs">
- <fileset dir="${project.build.directory}/webhdfs"/>
- </copy>
- </target>
- </configuration>
- </execution>
- <execution>
<id>tar</id>
<phase>package</phase>
<goals>

49
hadoop-tomcat-users.xml Normal file
View File

@ -0,0 +1,49 @@
<?xml version='1.0' encoding='utf-8'?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<tomcat-users xmlns="http://tomcat.apache.org/xml"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xsi:schemaLocation="http://tomcat.apache.org/xml tomcat-users.xsd"
version="1.0">
<!--
NOTE: By default, no user is included in the "manager-gui" role required
to operate the "/manager/html" web application. If you wish to use this app,
you must define such a user - the username and password are arbitrary.
-->
<!--
NOTE: The sample user and role entries below are wrapped in a comment
and thus are ignored when reading this file. Do not forget to remove
<!.. ..> that surrounds them.
-->
<!--
<role rolename="tomcat"/>
<role rolename="role1"/>
<user username="tomcat" password="tomcat" roles="tomcat"/>
<user username="both" password="tomcat" roles="tomcat,role1"/>
<user username="role1" password="tomcat" roles="role1"/>
-->
<!-- <role rolename="admin"/> -->
<!-- <role rolename="admin-gui"/> -->
<!-- <role rolename="admin-script"/> -->
<!-- <role rolename="manager"/> -->
<!-- <role rolename="manager-gui"/> -->
<!-- <role rolename="manager-script"/> -->
<!-- <role rolename="manager-jmx"/> -->
<!-- <role rolename="manager-status"/> -->
<!-- <user name="admin" password="adminadmin" roles="admin,manager,admin-gui,admin-script,manager-gui,manager-script,manager-jmx,manager-status" /> -->
</tomcat-users>

32
hadoop-tools.jar.patch Normal file
View File

@ -0,0 +1,32 @@
diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
index c3e1aa1..9042f73 100644
--- a/hadoop-common-project/hadoop-annotations/pom.xml
+++ b/hadoop-common-project/hadoop-annotations/pom.xml
@@ -48,11 +48,8 @@
</activation>
<dependencies>
<dependency>
- <groupId>jdk.tools</groupId>
- <artifactId>jdk.tools</artifactId>
- <version>1.6</version>
- <scope>system</scope>
- <systemPath>${java.home}/../lib/tools.jar</systemPath>
+ <groupId>com.sun</groupId>
+ <artifactId>tools</artifactId>
</dependency>
</dependencies>
</profile>
@@ -63,11 +60,8 @@
</activation>
<dependencies>
<dependency>
- <groupId>jdk.tools</groupId>
- <artifactId>jdk.tools</artifactId>
- <version>1.7</version>
- <scope>system</scope>
- <systemPath>${java.home}/../lib/tools.jar</systemPath>
+ <groupId>com.sun</groupId>
+ <artifactId>tools</artifactId>
</dependency>
</dependencies>
</profile>

75
hadoop-yarn-site.xml Normal file
View File

@ -0,0 +1,75 @@
<?xml version="1.0"?>
<!--
Licensed to the Apache Software Foundation (ASF) under one or more
contributor license agreements. See the NOTICE file distributed with
this work for additional information regarding copyright ownership.
The ASF licenses this file to You under the Apache License, Version 2.0
(the "License"); you may not use this file except in compliance with
the License. You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
-->
<?xml-stylesheet type="text/xsl" href="configuration.xsl"?>
<configuration>
<property>
<name>yarn.nodemanager.aux-services</name>
<value>mapreduce_shuffle</value>
</property>
<property>
<name>yarn.nodemanager.aux-services.mapreduce_shuffle.class</name>
<value>org.apache.hadoop.mapred.ShuffleHandler</value>
</property>
<!--
<property>
<name>yarn.log-aggregation-enable</name>
<value>true</value>
</property>
-->
<property>
<name>yarn.dispatcher.exit-on-error</name>
<value>true</value>
</property>
<property>
<description>List of directories to store localized files in.</description>
<name>yarn.nodemanager.local-dirs</name>
<value>/var/cache/hadoop-yarn/${user.name}/nm-local-dir</value>
</property>
<property>
<description>Where to store container logs.</description>
<name>yarn.nodemanager.log-dirs</name>
<value>/var/log/hadoop-yarn/containers</value>
</property>
<!--
<property>
<description>Where to aggregate logs to.</description>
<name>yarn.nodemanager.remote-app-log-dir</name>
<value>/var/log/hadoop-yarn/apps</value>
</property>
-->
<property>
<description>Classpath for typical applications.</description>
<name>yarn.application.classpath</name>
<value>
$HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR/*,
$HADOOP_COMMON_HOME/$HADOOP_COMMON_LIB_JARS_DIR/*,
$HADOOP_HDFS_HOME/$HDFS_DIR/*,$HADOOP_HDFS_HOME/$HDFS_LIB_JARS_DIR/*,
$HADOOP_MAPRED_HOME/$MAPRED_DIR/*,
$HADOOP_MAPRED_HOME/$MAPRED_LIB_JARS_DIR/*,
$HADOOP_YARN_HOME/$YARN_DIR/*,$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR/*
</value>
</property>
</configuration>

View File

@ -0,0 +1,37 @@
[Unit]
Description=The Hadoop DAEMON daemon
After=network.target
After=NetworkManager.target
[Service]
Type=forking
EnvironmentFile=-/etc/sysconfig/hadoop-yarn
EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
ExecStart=/usr/sbin/yarn-daemon.sh start DAEMON
ExecStop=/usr/sbin/yarn-daemon.sh stop DAEMON
User=yarn
Group=hadoop
PIDFile=/var/run/hadoop-yarn/yarn-yarn-DAEMON.pid
LimitNOFILE=32768
LimitNPROC=65536
#######################################
# Note: Below are cgroup options
#######################################
#Slice=
#CPUAccounting=true
#CPUShares=1024
#MemoryAccounting=true
#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
#BlockIOAccounting=true
#BlockIOWeight=??
#BlockIODeviceWeight=??
#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
#DeviceAllow=
#DevicePolicy=auto|closed|strict
[Install]
WantedBy=multi-user.target

8
hadoop.logrotate Normal file
View File

@ -0,0 +1,8 @@
/var/log/hadoop-NAME/*.log
{
missingok
copytruncate
compress
weekly
rotate 52
}

1305
hadoop.spec Normal file

File diff suppressed because it is too large Load Diff

66
hdfs-create-dirs Normal file
View File

@ -0,0 +1,66 @@
#!/bin/bash
hdfs_dirs="/user /var/log /tmp"
mapred_dirs="/tmp/hadoop-yarn/staging /tmp/hadoop-yarn/staging/history /tmp/hadoop-yarn/staging/history/done /tmp/hadoop-yarn/staging/history/done_intermediate"
yarn_dirs="/tmp/hadoop-yarn /var/log/hadoop-yarn"
# Must be run as root
if [[ $EUID -ne 0 ]]
then
echo "This must be run as root" 1>&2
exit 1
fi
# Start the namenode if it isn't running
started=0
systemctl status hadoop-namenode > /dev/null 2>&1
rc=$?
if [[ $rc -gt 0 ]]
then
# Format the namenode if it hasn't been formatted
runuser hdfs -s /bin/bash /bin/bash -c "hdfs namenode -format -nonInteractive" > /dev/null 2>&1
if [[ $? -eq 0 ]]
then
echo "Formatted the Hadoop namenode"
fi
echo "Starting the Hadoop namenode"
systemctl start hadoop-namenode > /dev/null 2>&1
rc=$?
started=1
fi
if [[ $rc -ne 0 ]]
then
echo "The Hadoop namenode failed to start"
exit 1
fi
for dir in $hdfs_dirs $yarn_dirs $mapred_dirs
do
echo "Creating directory $dir"
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -mkdir -p $dir" > /dev/null 2>&1
done
echo "Setting permissions on /tmp"
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chmod 1777 /tmp" > /dev/null 2>&1
for dir in $mapred_dirs
do
echo "Setting permissions and ownership for $dir"
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chown mapred:mapred $dir" > /dev/null 2>&1
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chmod 1777 $dir" > /dev/null 2>&1
done
for dir in $yarn_dirs
do
echo "Setting permissions and ownership for $dir"
runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chown yarn:mapred $dir" > /dev/null 2>&1
done
# Stop the namenode if we started it
if [[ $started -gt 0 ]]
then
echo "Stopping the Hadoop namenode"
systemctl stop hadoop-namenode > /dev/null 2>&1
fi

1
sources Normal file
View File

@ -0,0 +1 @@
52fb8f4c28bc35067f54a4f28bb7596c hadoop-2.4.1-9e2ef43.tar.gz