children = zkc.getChildren("/ledgers/available",
+ false);
+ mostRecentSize = children.size();
++ // TODO: Bookkeeper 4.2.0 introduced "readonly" bookies
++ // which mess with test bookie counts;
++ // unclear why setReadOnlyModeEnabled(false) doesn't have
++ // backward-compat effect hoped for
++ if (children.contains("readonly")) {
++ mostRecentSize = children.size()-1;
++ }
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Found " + mostRecentSize + " bookies up, "
+ + "waiting for " + count);
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+index 50b44f8..d5a91d3 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/JournalNode.java
+@@ -46,7 +46,7 @@
+ import org.apache.hadoop.util.StringUtils;
+ import org.apache.hadoop.util.Tool;
+ import org.apache.hadoop.util.ToolRunner;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ import com.google.common.base.Preconditions;
+ import com.google.common.collect.Maps;
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+index fc85a5e..1610c8c 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/DataNode.java
+@@ -88,7 +88,7 @@
+ import org.apache.hadoop.util.*;
+ import org.apache.hadoop.util.DiskChecker.DiskErrorException;
+ import org.apache.hadoop.util.DiskChecker.DiskOutOfSpaceException;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ import javax.management.ObjectName;
+
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
+index 477b7f6..8a22654 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/datanode/SecureDataNodeStarter.java
+@@ -30,10 +30,11 @@
+ import org.apache.hadoop.http.HttpConfig;
+ import org.apache.hadoop.http.HttpServer2;
+ import org.apache.hadoop.security.UserGroupInformation;
+-import org.mortbay.jetty.Connector;
++import org.eclipse.jetty.server.Connector;
+
+ import com.google.common.annotations.VisibleForTesting;
+
++
+ /**
+ * Utility class to start a datanode in a secure cluster, first obtaining
+ * privileged resources before main startup and handing them to the datanode.
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+index 4232e00..3386dff 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSNamesystem.java
+@@ -264,7 +264,7 @@
+ import org.apache.log4j.Appender;
+ import org.apache.log4j.AsyncAppender;
+ import org.apache.log4j.Logger;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.base.Charsets;
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
+index aa4ba5d..5b945ba 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/StreamFile.java
+@@ -39,7 +39,7 @@
+ import org.apache.hadoop.io.IOUtils;
+ import org.apache.hadoop.security.UserGroupInformation;
+ import org.apache.hadoop.util.ServletUtil;
+-import org.mortbay.jetty.InclusiveByteRange;
++import org.eclipse.jetty.server.InclusiveByteRange;
+
+ @InterfaceAudience.Private
+ public class StreamFile extends DfsServlet {
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+index 50a7f21..1d96e15 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/JsonUtil.java
+@@ -32,7 +32,7 @@
+ import org.apache.hadoop.security.token.TokenIdentifier;
+ import org.apache.hadoop.util.DataChecksum;
+ import org.apache.hadoop.util.StringUtils;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ import java.io.ByteArrayInputStream;
+ import java.io.DataInputStream;
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+index 6aa935c..dfc1e39 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/web/WebHdfsFileSystem.java
+@@ -98,7 +98,7 @@
+ import org.apache.hadoop.security.token.Token;
+ import org.apache.hadoop.security.token.TokenIdentifier;
+ import org.apache.hadoop.util.Progressable;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ import com.google.common.annotations.VisibleForTesting;
+ import com.google.common.base.Charsets;
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
+index 3471848..b4e0202 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNodeMXBean.java
+@@ -34,7 +34,7 @@
+ import org.junit.After;
+ import org.junit.Before;
+ import org.junit.Test;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ /**
+ * Test {@link JournalNodeMXBean}
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
+index db8f92e..79d9003 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestFSNamesystemMBean.java
+@@ -28,7 +28,7 @@
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.hdfs.MiniDFSCluster;
+ import org.junit.Test;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ /**
+ * Class for testing {@link NameNodeMXBean} implementation
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+index d459d30..6327a83 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestNameNodeMXBean.java
+@@ -37,7 +37,7 @@
+ import org.apache.hadoop.io.nativeio.NativeIO.POSIX.NoMlockCacheManipulator;
+ import org.apache.hadoop.util.VersionInfo;
+ import org.junit.Test;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ /**
+ * Class for testing {@link NameNodeMXBean} implementation
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
+index 0f22e9a..bff549a 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStartupProgressServlet.java
+@@ -36,7 +36,7 @@
+ import org.apache.hadoop.hdfs.server.namenode.startupprogress.StartupProgress;
+ import org.junit.Before;
+ import org.junit.Test;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ public class TestStartupProgressServlet {
+
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
+index f24b801..28d05b4 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/server/namenode/TestStreamFile.java
+@@ -46,7 +46,7 @@
+ import org.apache.hadoop.net.NetUtils;
+ import org.junit.Test;
+ import org.mockito.Mockito;
+-import org.mortbay.jetty.InclusiveByteRange;
++import org.eclipse.jetty.server.InclusiveByteRange;
+
+ /*
+ * Mock input stream class that always outputs the current position of the stream.
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+index 2bce30f..eaf836d 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/web/TestJsonUtil.java
+@@ -38,7 +38,7 @@
+ import org.apache.hadoop.util.Time;
+ import org.junit.Assert;
+ import org.junit.Test;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ import com.google.common.collect.Lists;
+
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
+index 7029f42..c7023c9 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/test/MiniDFSClusterManager.java
+@@ -38,7 +38,7 @@
+ import org.apache.hadoop.hdfs.HdfsConfiguration;
+ import org.apache.hadoop.hdfs.MiniDFSCluster;
+ import org.apache.hadoop.hdfs.server.common.HdfsServerConstants.StartupOption;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ /**
+ * This class drives the creation of a mini-cluster on the local machine. By
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
+index 981e6ff..7864756 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-app/src/main/java/org/apache/hadoop/mapreduce/v2/app/JobEndNotifier.java
+@@ -30,7 +30,7 @@
+ import org.apache.hadoop.mapred.JobContext;
+ import org.apache.hadoop.mapreduce.MRJobConfig;
+ import org.apache.hadoop.mapreduce.v2.api.records.JobReport;
+-import org.mortbay.log.Log;
++import org.eclipse.jetty.util.log.Log;
+
+ /**
+ * This class handles job end notification. Submitters of jobs can choose to
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java
+index 8891ec7..1dd369a 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-hs/src/test/java/org/apache/hadoop/mapreduce/v2/hs/webapp/TestHsWebServicesJobsQuery.java
+@@ -136,7 +136,7 @@ public void testJobsQueryStateNone() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
++ assertEquals("jobs is not None", 0, json.getJSONObject("jobs").length());
+ }
+
+ @Test
+@@ -202,7 +202,7 @@ public void testJobsQueryUserNone() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
++ assertEquals("jobs is not None", 0, json.getJSONObject("jobs").length());
+ }
+
+ @Test
+@@ -287,7 +287,7 @@ public void testJobsQueryQueueNonExist() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
++ assertEquals("jobs is not None", 0, json.getJSONObject("jobs").length());
+ }
+
+ @Test
+@@ -319,7 +319,7 @@ public void testJobsQueryStartTimeBegin() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
++ assertEquals("jobs is not None", 0, json.getJSONObject("jobs").length());
+ }
+
+ @Test
+@@ -639,7 +639,7 @@ public void testJobsQueryFinishTimeEnd() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("jobs is not null", JSONObject.NULL, json.get("jobs"));
++ assertEquals("jobs is not None", 0, json.getJSONObject("jobs").length());
+ }
+
+ @Test
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java
+index d2ea74e..d986fdc 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapred/NotificationTestCase.java
+@@ -18,9 +18,9 @@
+
+ package org.apache.hadoop.mapred;
+
+-import org.mortbay.jetty.Server;
+-import org.mortbay.jetty.servlet.Context;
+-import org.mortbay.jetty.servlet.ServletHolder;
++import org.eclipse.jetty.server.Server;
++import org.eclipse.jetty.servlet.ServletContextHandler;
++import org.eclipse.jetty.servlet.ServletHolder;
+ import org.apache.hadoop.fs.Path;
+ import org.apache.hadoop.fs.FileSystem;
+ import org.apache.hadoop.io.Text;
+@@ -69,7 +69,7 @@ private void startHttpServer() throws Exception {
+ }
+ webServer = new Server(0);
+
+- Context context = new Context(webServer, contextPath);
++ ServletContextHandler context = new ServletContextHandler(webServer, contextPath);
+
+ // create servlet handler
+ context.addServlet(new ServletHolder(new NotificationServlet()),
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+index 2e8ba5e..3cc73b5 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-jobclient/src/test/java/org/apache/hadoop/mapreduce/MiniHadoopClusterManager.java
+@@ -45,7 +45,7 @@
+ import org.apache.hadoop.mapreduce.v2.jobhistory.JHAdminConfig;
+ import org.apache.hadoop.yarn.conf.YarnConfiguration;
+ import org.apache.hadoop.yarn.server.MiniYARNCluster;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ /**
+ * This class drives the creation of a mini-cluster on the local machine. By
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+index c803a7f..393d385 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/main/java/org/apache/hadoop/mapred/ShuffleHandler.java
+@@ -111,7 +111,7 @@
+ import org.jboss.netty.handler.ssl.SslHandler;
+ import org.jboss.netty.handler.stream.ChunkedWriteHandler;
+ import org.jboss.netty.util.CharsetUtil;
+-import org.mortbay.jetty.HttpHeaders;
++import org.eclipse.jetty.http.HttpHeaders;
+
+ import com.google.common.base.Charsets;
+ import com.google.common.util.concurrent.ThreadFactoryBuilder;
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
+index 420c428..3a3257e 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-shuffle/src/test/java/org/apache/hadoop/mapred/TestShuffleHandler.java
+@@ -78,7 +78,7 @@
+ import org.jboss.netty.handler.codec.http.HttpResponseStatus;
+ import org.junit.Assert;
+ import org.junit.Test;
+-import org.mortbay.jetty.HttpHeaders;
++import org.eclipse.jetty.http.HttpHeaders;
+
+ public class TestShuffleHandler {
+ static final long MiB = 1024 * 1024;
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+index 8ae5809..b7da2bc 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/pom.xml
+@@ -43,8 +43,8 @@
+ avro
+
+
+- org.mortbay.jetty
+- jetty
++ org.eclipse.jetty
++ jetty-server
+
+
+ org.apache.ant
+@@ -78,16 +78,8 @@
+ commons-el
+
+
+- tomcat
+- jasper-runtime
+-
+-
+- tomcat
+- jasper-compiler
+-
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
++ org.apache.tomcat
++ tomcat-jasper
+
+
+
+diff --git a/hadoop-mapreduce-project/pom.xml b/hadoop-mapreduce-project/pom.xml
+index 8f1d2b0..465c2df 100644
+--- a/hadoop-mapreduce-project/pom.xml
++++ b/hadoop-mapreduce-project/pom.xml
+@@ -52,8 +52,8 @@
+ avro
+
+
+- org.mortbay.jetty
+- jetty
++ org.eclipse.jetty
++ jetty-server
+
+
+ org.apache.ant
+@@ -87,16 +87,8 @@
+ commons-el
+
+
+- tomcat
+- jasper-runtime
+-
+-
+- tomcat
+- jasper-compiler
+-
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
++ org.apache.tomcat
++ tomcat-jasper
+
+
+
+@@ -136,6 +128,12 @@
+
+ com.sun.jersey
+ jersey-server
++
++
++ asm
++ asm
++
++
+
+
+ com.sun.jersey.contribs
+diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
+index b315e2b..e9b072d 100644
+--- a/hadoop-project/pom.xml
++++ b/hadoop-project/pom.xml
+@@ -59,7 +59,7 @@
+ 1.7.4
+
+
+- 1.9
++ 1.17.1
+
+
+
+@@ -360,29 +360,17 @@
+
+ javax.servlet
+ servlet-api
+- 2.5
++ 3.0-alpha-1
+
+
+- org.mortbay.jetty
+- jetty
+- 6.1.26
+-
+-
+- org.mortbay.jetty
+- servlet-api
+-
+-
++ org.eclipse.jetty
++ jetty-server
++ 8.1.14.v20131031
+
+
+- org.mortbay.jetty
++ org.eclipse.jetty
+ jetty-util
+- 6.1.26
+-
+-
+-
+- org.glassfish
+- javax.servlet
+- 3.1
++ 8.1.14.v20131031
+
+
+
+@@ -421,6 +409,17 @@
+ com.sun.jersey
+ jersey-server
+ ${jersey.version}
++
++
++ asm
++ asm
++
++
++
++
++ com.sun.jersey
++ jersey-servlet
++ ${jersey.version}
+
+
+
+@@ -472,34 +471,22 @@
+
+
+
+- org.mortbay.jetty
+- jetty-servlet-tester
+- 6.1.26
+-
+-
+- tomcat
+- jasper-compiler
+- 5.5.23
+-
+-
+- javax.servlet
+- jsp-api
+-
+-
+- ant
+- ant
+-
+-
++ org.eclipse.jetty
++ test-jetty-servlet
++ 8.1.14.v20131031
+
++
+
+- tomcat
+- jasper-runtime
+- 5.5.23
++ org.apache.tomcat
++ tomcat-servlet-api
++ 7.0.37
+
+
+- javax.servlet.jsp
+- jsp-api
+- 2.1
++ org.glassfish.web
++ javax.servlet.jsp
++ 2.2.5
+
+
+ commons-el
+@@ -728,7 +715,7 @@
+
+ org.apache.bookkeeper
+ bookkeeper-server
+- 4.0.0
++ 4.2.1
+ compile
+
+
+diff --git a/hadoop-tools/hadoop-sls/pom.xml b/hadoop-tools/hadoop-sls/pom.xml
+index 6166725..e0d3ee7 100644
+--- a/hadoop-tools/hadoop-sls/pom.xml
++++ b/hadoop-tools/hadoop-sls/pom.xml
+@@ -55,18 +55,12 @@
+ compile
+
+
+- org.mortbay.jetty
+- jetty
++ org.eclipse.jetty
++ jetty-server
+ provided
+-
+-
+- org.mortbay.jetty
+- servlet-api
+-
+-
+
+
+- org.mortbay.jetty
++ org.eclipse.jetty
+ jetty-util
+ provided
+
+diff --git a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
+index 123ccea..e961e58 100644
+--- a/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
++++ b/hadoop-tools/hadoop-sls/src/main/java/org/apache/hadoop/yarn/sls/web/SLSWebApp.java
+@@ -32,10 +32,11 @@
+ import org.apache.commons.io.FileUtils;
+ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.event
+ .SchedulerEventType;
+-import org.mortbay.jetty.Handler;
+-import org.mortbay.jetty.Server;
+-import org.mortbay.jetty.handler.AbstractHandler;
+-import org.mortbay.jetty.Request;
++import org.eclipse.jetty.server.Handler;
++import org.eclipse.jetty.server.Server;
++import org.eclipse.jetty.server.Request;
++import org.eclipse.jetty.server.handler.AbstractHandler;
++import org.eclipse.jetty.server.handler.ResourceHandler;
+
+ import org.apache.hadoop.yarn.sls.SLSRunner;
+ import org.apache.hadoop.yarn.sls.scheduler.FairSchedulerMetrics;
+@@ -45,7 +46,6 @@
+ import com.codahale.metrics.Gauge;
+ import com.codahale.metrics.Histogram;
+ import com.codahale.metrics.MetricRegistry;
+-import org.mortbay.jetty.handler.ResourceHandler;
+
+ public class SLSWebApp extends HttpServlet {
+ private static final long serialVersionUID = 1905162041950251407L;
+@@ -108,8 +108,9 @@ public void start() throws Exception {
+
+ Handler handler = new AbstractHandler() {
+ @Override
+- public void handle(String target, HttpServletRequest request,
+- HttpServletResponse response, int dispatch) {
++ public void handle(String target, Request baseRequest,
++ HttpServletRequest request,
++ HttpServletResponse response) {
+ try{
+ // timeunit
+ int timeunit = 1000; // second, divide millionsecond / 1000
+@@ -131,7 +132,7 @@ public void handle(String target, HttpServletRequest request,
+ // js/css request
+ if (target.startsWith("/js") || target.startsWith("/css")) {
+ response.setCharacterEncoding("utf-8");
+- staticHandler.handle(target, request, response, dispatch);
++ staticHandler.handle(target, baseRequest, request, response);
+ } else
+ // json request
+ if (target.equals("/simulateMetrics")) {
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+index fe2955a..0179f7b 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-api/pom.xml
+@@ -64,10 +64,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+index c639de8..37c0908 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-distributedshell/pom.xml
+@@ -51,10 +51,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
+index 35d1a42..48c0d50 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-applications/hadoop-yarn-applications-unmanaged-am-launcher/pom.xml
+@@ -63,10 +63,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+index 82d66cb..cc7606f 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+@@ -48,10 +48,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+@@ -76,7 +72,7 @@
+ log4j
+
+
+- org.mortbay.jetty
++ org.eclipse.jetty
+ jetty-util
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+index 08e71c1..461c43c 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/api/impl/TestAMRMClient.java
+@@ -83,7 +83,7 @@
+ import org.junit.Test;
+ import org.mockito.invocation.InvocationOnMock;
+ import org.mockito.stubbing.Answer;
+-import org.mortbay.log.Log;
++import org.eclipse.jetty.util.log.Log;
+
+ public class TestAMRMClient {
+ static Configuration conf = null;
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+index 1efb54c..1b3463b 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/src/test/java/org/apache/hadoop/yarn/client/cli/TestYarnCLI.java
+@@ -62,7 +62,7 @@
+ import org.apache.hadoop.yarn.util.Records;
+ import org.junit.Before;
+ import org.junit.Test;
+-import org.mortbay.log.Log;
++import org.eclipse.jetty.util.log.Log;
+
+ import org.apache.commons.cli.Options;
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+index a19a78c..83aa759 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/pom.xml
+@@ -51,10 +51,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+@@ -151,6 +147,12 @@
+
+ com.sun.jersey
+ jersey-server
++
++
++ asm
++ asm
++
++
+
+
+ com.sun.jersey
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+index f8c6f55..71df06b 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-common/src/main/java/org/apache/hadoop/yarn/webapp/WebApps.java
+@@ -266,7 +266,8 @@ public void setup() {
+ server.setAttribute(entry.getKey(), entry.getValue());
+ }
+ HttpServer2.defineFilter(server.getWebAppContext(), "guice",
+- GuiceFilter.class.getName(), null, new String[] { "/*" });
++ GuiceFilter.class.getName(), new HashMap(0),
++ new String[] { "/*" });
+
+ webapp.setConf(conf);
+ webapp.setHttpServer(server);
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+index 8a4e6f5..c785145 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice/pom.xml
+@@ -58,10 +58,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+index 294f969..24d7706 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-common/pom.xml
+@@ -51,10 +51,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+index 0fbafd2..5fe4206 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/pom.xml
+@@ -53,10 +53,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+@@ -99,7 +95,7 @@
+ jersey-client
+
+
+- org.mortbay.jetty
++ org.eclipse.jetty
+ jetty-util
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
+index 7d2948e..81e51c3 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/main/java/org/apache/hadoop/yarn/server/nodemanager/webapp/ContainerLogsPage.java
+@@ -43,6 +43,7 @@
+ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet;
+ import org.apache.hadoop.yarn.webapp.hamlet.Hamlet.PRE;
+ import org.apache.hadoop.yarn.webapp.view.HtmlBlock;
++import org.eclipse.jetty.util.log.Log;
+
+ import com.google.inject.Inject;
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+index bfb0e87..f9fac8e 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/containermanager/logaggregation/TestLogAggregationService.java
+@@ -104,7 +104,7 @@
+ import org.junit.Test;
+ import org.mockito.ArgumentCaptor;
+ import org.mockito.Mockito;
+-import org.mortbay.util.MultiException;
++import org.eclipse.jetty.util.MultiException;
+
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
+index 72c1f6f..d272614 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesApps.java
+@@ -176,7 +176,7 @@ public void testNodeAppsNone() throws JSONException, Exception {
+ .accept(MediaType.APPLICATION_JSON).get(ClientResponse.class);
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+- assertEquals("apps isn't NULL", JSONObject.NULL, json.get("apps"));
++ assertEquals("apps isn't None",0,json.getJSONObject("apps").length());
+ }
+
+ private HashMap addAppContainers(Application app)
+@@ -286,7 +286,7 @@ public void testNodeAppsUserNone() throws JSONException, Exception {
+ .get(ClientResponse.class);
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+- assertEquals("apps is not null", JSONObject.NULL, json.get("apps"));
++ assertEquals("apps is not None", 0, json.getJSONObject("apps").length());
+ }
+
+ @Test
+@@ -368,7 +368,7 @@ public void testNodeAppsStateNone() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+
+- assertEquals("apps is not null", JSONObject.NULL, json.get("apps"));
++ assertEquals("apps is not None", 0, json.getJSONObject("apps").length());
+ }
+
+ @Test
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
+index 29c9253..56ca16e 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-nodemanager/src/test/java/org/apache/hadoop/yarn/server/nodemanager/webapp/TestNMWebServicesContainers.java
+@@ -183,7 +183,7 @@ public void testNodeContainersNone() throws JSONException, Exception {
+ .get(ClientResponse.class);
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+- assertEquals("apps isn't NULL", JSONObject.NULL, json.get("containers"));
++ assertEquals("apps isn't None", 0, json.getJSONObject("containers").length());
+ }
+
+ private HashMap addAppContainers(Application app)
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+index 3e78e02..358a534 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/pom.xml
+@@ -55,10 +55,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+@@ -161,7 +157,7 @@
+ jersey-client
+
+
+- org.mortbay.jetty
++ org.eclipse.jetty
+ jetty-util
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
+index ef4a0d4..f96879e 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/main/java/org/apache/hadoop/yarn/server/resourcemanager/RMNMInfo.java
+@@ -33,7 +33,7 @@
+ import org.apache.hadoop.yarn.server.resourcemanager.rmnode.RMNode;
+ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.ResourceScheduler;
+ import org.apache.hadoop.yarn.server.resourcemanager.scheduler.SchedulerNodeReport;
+-import org.mortbay.util.ajax.JSON;
++import org.eclipse.jetty.util.ajax.JSON;
+
+ /**
+ * JMX bean listing statuses of all node managers.
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
+index 1dcac06..6ecc80d 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/MockNM.java
+@@ -43,7 +43,7 @@
+ import org.apache.hadoop.yarn.server.utils.BuilderUtils;
+ import org.apache.hadoop.yarn.util.Records;
+ import org.apache.hadoop.yarn.util.YarnVersionInfo;
+-import org.mortbay.log.Log;
++import org.eclipse.jetty.util.log.Log;
+
+ public class MockNM {
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+index 45b3803..2b79c2c 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesApps.java
+@@ -376,7 +376,7 @@ public void testAppsQueryStateNone() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("apps is not null", JSONObject.NULL, json.get("apps"));
++ assertEquals("apps is not None", 0, json.getJSONObject("apps").length());
+ rm.stop();
+ }
+
+@@ -491,7 +491,7 @@ public void testAppsQueryFinalStatusNone() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("apps is not null", JSONObject.NULL, json.get("apps"));
++ assertEquals("apps is not None", 0, json.getJSONObject("apps").length());
+ rm.stop();
+ }
+
+@@ -667,7 +667,7 @@ public void testAppsQueryStartEnd() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("apps is not null", JSONObject.NULL, json.get("apps"));
++ assertEquals("apps is not None", 0, json.getJSONObject("apps").length());
+ rm.stop();
+ }
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
+index da2e2b1..77cdfa9 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager/src/test/java/org/apache/hadoop/yarn/server/resourcemanager/webapp/TestRMWebServicesNodes.java
+@@ -204,7 +204,7 @@ public void testNodesQueryStateNone() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("nodes is not null", JSONObject.NULL, json.get("nodes"));
++ assertEquals("nodes is not None", 0, json.getJSONObject("nodes").length());
+ }
+
+ @Test
+@@ -343,7 +343,7 @@ public void testNodesQueryHealthyFalse() throws JSONException, Exception {
+ assertEquals(MediaType.APPLICATION_JSON_TYPE, response.getType());
+ JSONObject json = response.getEntity(JSONObject.class);
+ assertEquals("incorrect number of elements", 1, json.length());
+- assertEquals("nodes is not null", JSONObject.NULL, json.get("nodes"));
++ assertEquals("nodes is not None", 0, json.getJSONObject("nodes").length());
+ }
+
+ public void testNodesHelper(String path, String media) throws JSONException,
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+index 44076eb..065bf72 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/pom.xml
+@@ -50,10 +50,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
+index 10f243c..af23544 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/pom.xml
+@@ -56,10 +56,6 @@
+ tomcat
+ jasper-compiler
+
+-
+- org.mortbay.jetty
+- jsp-2.1-jetty
+-
+
+
+
+@@ -109,8 +105,8 @@
+ commons-logging
+
+
+- org.mortbay.jetty
+- jetty
++ org.eclipse.jetty
++ jetty-server
+
+
+
+diff --git a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
+index 1be0115..420a41c 100644
+--- a/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
++++ b/hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-web-proxy/src/test/java/org/apache/hadoop/yarn/server/webproxy/TestWebAppProxyServlet.java
+@@ -59,9 +59,9 @@
+ import org.junit.AfterClass;
+ import org.junit.BeforeClass;
+ import org.junit.Test;
+-import org.mortbay.jetty.Server;
+-import org.mortbay.jetty.servlet.Context;
+-import org.mortbay.jetty.servlet.ServletHolder;
++import org.eclipse.jetty.server.Server;
++import org.eclipse.jetty.servlet.ServletContextHandler;
++import org.eclipse.jetty.servlet.ServletHolder;
+
+ /**
+ * Test the WebAppProxyServlet and WebAppProxy. For back end use simple web
+@@ -81,7 +81,7 @@
+ @BeforeClass
+ public static void start() throws Exception {
+ server = new Server(0);
+- Context context = new Context();
++ ServletContextHandler context = new ServletContextHandler();
+ context.setContextPath("/foo");
+ server.setHandler(context);
+ context.addServlet(new ServletHolder(TestServlet.class), "/bar/");
diff --git a/hadoop-guava.patch b/hadoop-guava.patch
new file mode 100644
index 0000000..3e39932
--- /dev/null
+++ b/hadoop-guava.patch
@@ -0,0 +1,411 @@
+diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
+index f7932a6..ec3d9cf 100644
+--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
++++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/JvmPauseMonitor.java
+@@ -22,6 +22,7 @@
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
++import java.util.concurrent.TimeUnit;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+@@ -153,7 +154,7 @@ public String toString() {
+ private class Monitor implements Runnable {
+ @Override
+ public void run() {
+- Stopwatch sw = new Stopwatch();
++ Stopwatch sw = Stopwatch.createUnstarted();
+ Map gcTimesBeforeSleep = getGcTimes();
+ while (shouldRun) {
+ sw.reset().start();
+@@ -162,7 +163,7 @@ public void run() {
+ } catch (InterruptedException ie) {
+ return;
+ }
+- long extraSleepTime = sw.elapsedMillis() - SLEEP_INTERVAL_MS;
++ long extraSleepTime = sw.elapsed(TimeUnit.MILLISECONDS) - SLEEP_INTERVAL_MS;
+ Map gcTimesAfterSleep = getGcTimes();
+
+ if (extraSleepTime > warnThresholdMs) {
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+index 8588de5..cb0dbae 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/client/IPCLoggerChannel.java
+@@ -133,7 +133,7 @@
+ /**
+ * Stopwatch which starts counting on each heartbeat that is sent
+ */
+- private final Stopwatch lastHeartbeatStopwatch = new Stopwatch();
++ private final Stopwatch lastHeartbeatStopwatch = Stopwatch.createUnstarted();
+
+ private static final long HEARTBEAT_INTERVAL_MILLIS = 1000;
+
+@@ -435,7 +435,7 @@ private void throwIfOutOfSync()
+ * written.
+ */
+ private void heartbeatIfNecessary() throws IOException {
+- if (lastHeartbeatStopwatch.elapsedMillis() > HEARTBEAT_INTERVAL_MILLIS ||
++ if (lastHeartbeatStopwatch.elapsed(TimeUnit.MILLISECONDS) > HEARTBEAT_INTERVAL_MILLIS ||
+ !lastHeartbeatStopwatch.isRunning()) {
+ try {
+ getProxy().heartbeat(createReqInfo());
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+index c117ee8..82f01da 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/qjournal/server/Journal.java
+@@ -68,7 +68,6 @@
+ import com.google.common.base.Stopwatch;
+ import com.google.common.collect.ImmutableList;
+ import com.google.common.collect.Range;
+-import com.google.common.collect.Ranges;
+ import com.google.protobuf.TextFormat;
+
+ /**
+@@ -374,15 +373,15 @@ synchronized void journal(RequestInfo reqInfo,
+
+ curSegment.writeRaw(records, 0, records.length);
+ curSegment.setReadyToFlush();
+- Stopwatch sw = new Stopwatch();
++ Stopwatch sw = Stopwatch.createUnstarted();
+ sw.start();
+ curSegment.flush(shouldFsync);
+ sw.stop();
+
+- metrics.addSync(sw.elapsedTime(TimeUnit.MICROSECONDS));
+- if (sw.elapsedTime(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
++ metrics.addSync(sw.elapsed(TimeUnit.MICROSECONDS));
++ if (sw.elapsed(TimeUnit.MILLISECONDS) > WARN_SYNC_MILLIS_THRESHOLD) {
+ LOG.warn("Sync of transaction range " + firstTxnId + "-" + lastTxnId +
+- " took " + sw.elapsedTime(TimeUnit.MILLISECONDS) + "ms");
++ " took " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
+ }
+
+ if (isLagging) {
+@@ -853,7 +852,7 @@ public synchronized void acceptRecovery(RequestInfo reqInfo,
+ private Range txnRange(SegmentStateProto seg) {
+ Preconditions.checkArgument(seg.hasEndTxId(),
+ "invalid segment: %s", seg);
+- return Ranges.closed(seg.getStartTxId(), seg.getEndTxId());
++ return Range.closed(seg.getStartTxId(), seg.getEndTxId());
+ }
+
+ /**
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+index 5075da9..0d868d4 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/server/namenode/FSImageFormatProtobuf.java
+@@ -62,7 +62,7 @@
+
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Maps;
+-import com.google.common.io.LimitInputStream;
++import com.google.common.io.ByteStreams;
+ import com.google.protobuf.CodedOutputStream;
+
+ /**
+@@ -215,7 +215,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
+
+ for (FileSummary.Section s : sections) {
+ channel.position(s.getOffset());
+- InputStream in = new BufferedInputStream(new LimitInputStream(fin,
++ InputStream in = new BufferedInputStream(ByteStreams.limit(fin,
+ s.getLength()));
+
+ in = FSImageUtil.wrapInputStreamForCompression(conf,
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
+index c8033dd..b312bfe 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/FileDistributionCalculator.java
+@@ -33,7 +33,7 @@
+ import org.apache.hadoop.io.IOUtils;
+
+ import com.google.common.base.Preconditions;
+-import com.google.common.io.LimitInputStream;
++import com.google.common.io.ByteStreams;
+
+ /**
+ * This is the tool for analyzing file sizes in the namespace image. In order to
+@@ -106,7 +106,7 @@ void visit(RandomAccessFile file) throws IOException {
+
+ in.getChannel().position(s.getOffset());
+ InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
+- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
++ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
+ in, s.getLength())));
+ run(is);
+ output();
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
+index d80fcf1..e025f82 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/LsrPBImage.java
+@@ -50,7 +50,7 @@
+
+ import com.google.common.collect.Lists;
+ import com.google.common.collect.Maps;
+-import com.google.common.io.LimitInputStream;
++import com.google.common.io.ByteStreams;
+
+ /**
+ * LsrPBImage displays the blocks of the namespace in a format very similar
+@@ -110,7 +110,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
+ for (FileSummary.Section s : sections) {
+ fin.getChannel().position(s.getOffset());
+ InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
+- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
++ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
+ fin, s.getLength())));
+
+ switch (SectionName.fromString(s.getName())) {
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+index 99617b8..c613591 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/main/java/org/apache/hadoop/hdfs/tools/offlineImageViewer/PBImageXmlWriter.java
+@@ -52,7 +52,7 @@
+ import org.apache.hadoop.io.IOUtils;
+
+ import com.google.common.collect.Lists;
+-import com.google.common.io.LimitInputStream;
++import com.google.common.io.ByteStreams;
+
+ /**
+ * PBImageXmlWriter walks over an fsimage structure and writes out
+@@ -100,7 +100,7 @@ public int compare(FileSummary.Section s1, FileSummary.Section s2) {
+ for (FileSummary.Section s : sections) {
+ fin.getChannel().position(s.getOffset());
+ InputStream is = FSImageUtil.wrapInputStreamForCompression(conf,
+- summary.getCodec(), new BufferedInputStream(new LimitInputStream(
++ summary.getCodec(), new BufferedInputStream(ByteStreams.limit(
+ fin, s.getLength())));
+
+ switch (SectionName.fromString(s.getName())) {
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
+index 132218c..09d42e1 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestDataTransferKeepalive.java
+@@ -47,7 +47,7 @@
+ import org.junit.Before;
+ import org.junit.Test;
+
+-import com.google.common.io.NullOutputStream;
++import com.google.common.io.ByteStreams;
+
+ public class TestDataTransferKeepalive {
+ final Configuration conf = new HdfsConfiguration();
+@@ -224,7 +224,7 @@ public void testManyClosedSocketsInCache() throws Exception {
+ stms[i] = fs.open(TEST_FILE);
+ }
+ for (InputStream stm : stms) {
+- IOUtils.copyBytes(stm, new NullOutputStream(), 1024);
++ IOUtils.copyBytes(stm, ByteStreams.nullOutputStream(), 1024);
+ }
+ } finally {
+ IOUtils.cleanup(null, stms);
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
+index 92c7672..aa5c351 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/TestMultiThreadedHflush.java
+@@ -100,10 +100,10 @@ public void run() {
+ }
+
+ private void doAWrite() throws IOException {
+- Stopwatch sw = new Stopwatch().start();
++ Stopwatch sw = Stopwatch.createStarted();
+ stm.write(toWrite);
+ stm.hflush();
+- long micros = sw.elapsedTime(TimeUnit.MICROSECONDS);
++ long micros = sw.elapsed(TimeUnit.MICROSECONDS);
+ quantiles.insert(micros);
+ }
+ }
+@@ -276,12 +276,12 @@ public int run(String args[]) throws Exception {
+ int replication = conf.getInt(DFSConfigKeys.DFS_REPLICATION_KEY,
+ DFSConfigKeys.DFS_REPLICATION_DEFAULT);
+
+- Stopwatch sw = new Stopwatch().start();
++ Stopwatch sw = Stopwatch.createStarted();
+ test.doMultithreadedWrites(conf, p, numThreads, writeSize, numWrites,
+ replication);
+ sw.stop();
+
+- System.out.println("Finished in " + sw.elapsedMillis() + "ms");
++ System.out.println("Finished in " + sw.elapsed(TimeUnit.MILLISECONDS) + "ms");
+ System.out.println("Latency quantiles (in microseconds):\n" +
+ test.quantiles);
+ return 0;
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
+index 10b6b79..9fbcf82 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/qjournal/server/TestJournalNode.java
+@@ -27,6 +27,7 @@
+ import java.net.HttpURLConnection;
+ import java.net.URL;
+ import java.util.concurrent.ExecutionException;
++import java.util.concurrent.TimeUnit;
+
+ import org.apache.hadoop.conf.Configuration;
+ import org.apache.hadoop.fs.FileUtil;
+@@ -325,11 +326,11 @@ private void doPerfTest(int editsSize, int numEdits) throws Exception {
+ ch.setEpoch(1);
+ ch.startLogSegment(1, NameNodeLayoutVersion.CURRENT_LAYOUT_VERSION).get();
+
+- Stopwatch sw = new Stopwatch().start();
++ Stopwatch sw = Stopwatch.createStarted();
+ for (int i = 1; i < numEdits; i++) {
+ ch.sendEdits(1L, i, 1, data).get();
+ }
+- long time = sw.elapsedMillis();
++ long time = sw.elapsed(TimeUnit.MILLISECONDS);
+
+ System.err.println("Wrote " + numEdits + " batches of " + editsSize +
+ " bytes in " + time + "ms");
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
+index a1e49cc..44751b0 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/test/java/org/apache/hadoop/hdfs/util/TestChunkedArrayList.java
+@@ -20,6 +20,7 @@
+ import static org.junit.Assert.*;
+
+ import java.util.ArrayList;
++import java.util.concurrent.TimeUnit;
+
+ import org.junit.Test;
+
+@@ -69,24 +70,22 @@ public void testPerformance() {
+ System.gc();
+ {
+ ArrayList arrayList = new ArrayList();
+- Stopwatch sw = new Stopwatch();
+- sw.start();
++ Stopwatch sw = Stopwatch.createStarted();
+ for (int i = 0; i < numElems; i++) {
+ arrayList.add(obj);
+ }
+- System.out.println(" ArrayList " + sw.elapsedMillis());
++ System.out.println(" ArrayList " + sw.elapsed(TimeUnit.MILLISECONDS));
+ }
+
+ // test ChunkedArrayList
+ System.gc();
+ {
+ ChunkedArrayList chunkedList = new ChunkedArrayList();
+- Stopwatch sw = new Stopwatch();
+- sw.start();
++ Stopwatch sw = Stopwatch.createStarted();
+ for (int i = 0; i < numElems; i++) {
+ chunkedList.add(obj);
+ }
+- System.out.println("ChunkedArrayList " + sw.elapsedMillis());
++ System.out.println("ChunkedArrayList " + sw.elapsed(TimeUnit.MILLISECONDS));
+ }
+ }
+ }
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+index 9863427..07854a1 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapred/FileInputFormat.java
+@@ -28,6 +28,7 @@
+ import java.util.List;
+ import java.util.Map;
+ import java.util.Set;
++import java.util.concurrent.TimeUnit;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+@@ -223,7 +224,7 @@ protected void addInputPathRecursively(List result,
+ org.apache.hadoop.mapreduce.lib.input.FileInputFormat.LIST_STATUS_NUM_THREADS,
+ org.apache.hadoop.mapreduce.lib.input.FileInputFormat.DEFAULT_LIST_STATUS_NUM_THREADS);
+
+- Stopwatch sw = new Stopwatch().start();
++ Stopwatch sw = Stopwatch.createStarted();
+ if (numThreads == 1) {
+ List locatedFiles = singleThreadedListStatus(job, dirs, inputFilter, recursive);
+ result = locatedFiles.toArray(new FileStatus[locatedFiles.size()]);
+@@ -242,7 +243,7 @@ protected void addInputPathRecursively(List result,
+
+ sw.stop();
+ if (LOG.isDebugEnabled()) {
+- LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
++ LOG.debug("Time taken to get FileStatuses: " + sw.elapsed(TimeUnit.MILLISECONDS));
+ }
+ LOG.info("Total input paths to process : " + result.length);
+ return result;
+@@ -300,7 +301,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
+ * they're too big.*/
+ public InputSplit[] getSplits(JobConf job, int numSplits)
+ throws IOException {
+- Stopwatch sw = new Stopwatch().start();
++ Stopwatch sw = Stopwatch.createStarted();
+ FileStatus[] files = listStatus(job);
+
+ // Save the number of input files for metrics/loadgen
+@@ -362,7 +363,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
+ sw.stop();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+- + ", TimeTaken: " + sw.elapsedMillis());
++ + ", TimeTaken: " + sw.elapsed(TimeUnit.MILLISECONDS));
+ }
+ return splits.toArray(new FileSplit[splits.size()]);
+ }
+diff --git a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+index 5f32f11..a4f293c 100644
+--- a/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
++++ b/hadoop-mapreduce-project/hadoop-mapreduce-client/hadoop-mapreduce-client-core/src/main/java/org/apache/hadoop/mapreduce/lib/input/FileInputFormat.java
+@@ -21,6 +21,7 @@
+ import java.io.IOException;
+ import java.util.ArrayList;
+ import java.util.List;
++import java.util.concurrent.TimeUnit;
+
+ import org.apache.commons.logging.Log;
+ import org.apache.commons.logging.LogFactory;
+@@ -258,7 +259,7 @@ public static PathFilter getInputPathFilter(JobContext context) {
+
+ int numThreads = job.getConfiguration().getInt(LIST_STATUS_NUM_THREADS,
+ DEFAULT_LIST_STATUS_NUM_THREADS);
+- Stopwatch sw = new Stopwatch().start();
++ Stopwatch sw = Stopwatch.createStarted();
+ if (numThreads == 1) {
+ result = singleThreadedListStatus(job, dirs, inputFilter, recursive);
+ } else {
+@@ -275,7 +276,7 @@ public static PathFilter getInputPathFilter(JobContext context) {
+
+ sw.stop();
+ if (LOG.isDebugEnabled()) {
+- LOG.debug("Time taken to get FileStatuses: " + sw.elapsedMillis());
++ LOG.debug("Time taken to get FileStatuses: " + sw.elapsed(TimeUnit.MILLISECONDS));
+ }
+ LOG.info("Total input paths to process : " + result.size());
+ return result;
+@@ -366,7 +367,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
+ * @throws IOException
+ */
+ public List getSplits(JobContext job) throws IOException {
+- Stopwatch sw = new Stopwatch().start();
++ Stopwatch sw = Stopwatch.createStarted();
+ long minSize = Math.max(getFormatMinSplitSize(), getMinSplitSize(job));
+ long maxSize = getMaxSplitSize(job);
+
+@@ -414,7 +415,7 @@ protected FileSplit makeSplit(Path file, long start, long length,
+ sw.stop();
+ if (LOG.isDebugEnabled()) {
+ LOG.debug("Total # of splits generated by getSplits: " + splits.size()
+- + ", TimeTaken: " + sw.elapsedMillis());
++ + ", TimeTaken: " + sw.elapsed(TimeUnit.MILLISECONDS));
+ }
+ return splits;
+ }
+diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
+index b315e2b..9ad8bcd 100644
+--- a/hadoop-project/pom.xml
++++ b/hadoop-project/pom.xml
+@@ -310,7 +310,7 @@
+
+ com.google.guava
+ guava
+- 11.0.2
++ 17.0
+
+
+ commons-cli
diff --git a/hadoop-hdfs-site.xml b/hadoop-hdfs-site.xml
new file mode 100644
index 0000000..2e543b0
--- /dev/null
+++ b/hadoop-hdfs-site.xml
@@ -0,0 +1,67 @@
+
+
+
+
+
+
+ dfs.replication
+ 1
+
+
+
+ dfs.safemode.extension
+ 0
+
+
+ dfs.safemode.min.datanodes
+ 1
+
+
+ hadoop.tmp.dir
+ /var/lib/hadoop-hdfs/${user.name}
+
+
+ dfs.namenode.name.dir
+ file:///var/lib/hadoop-hdfs/${user.name}/dfs/namenode
+
+
+ dfs.namenode.checkpoint.dir
+ file:///var/lib/hadoop-hdfs/${user.name}/dfs/secondarynamenode
+
+
+ dfs.datanode.data.dir
+ file:///var/lib/hadoop-hdfs/${user.name}/dfs/datanode
+
+
+ dfs.http.address
+ 0.0.0.0:50070
+
+
+ dfs.datanode.address
+ 0.0.0.0:50010
+
+
+ dfs.datanode.http.address
+ 0.0.0.0:50075
+
+
+ dfs.datanode.ipc.address
+ 0.0.0.0:50020
+
+
diff --git a/hadoop-hdfs.service.template b/hadoop-hdfs.service.template
new file mode 100644
index 0000000..bca5c5f
--- /dev/null
+++ b/hadoop-hdfs.service.template
@@ -0,0 +1,37 @@
+[Unit]
+Description=The Hadoop DAEMON daemon
+After=network.target
+After=NetworkManager.target
+
+[Service]
+Type=forking
+EnvironmentFile=-/etc/sysconfig/hadoop-hdfs
+EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
+ExecStart=/usr/sbin/hadoop-daemon.sh start DAEMON
+ExecStop=/usr/sbin/hadoop-daemon.sh stop DAEMON
+User=hdfs
+Group=hadoop
+PIDFile=/var/run/hadoop-hdfs/hadoop-hdfs-DAEMON.pid
+LimitNOFILE=32768
+LimitNPROC=65536
+
+#######################################
+# Note: Below are cgroup options
+#######################################
+#Slice=
+#CPUAccounting=true
+#CPUShares=1024
+
+#MemoryAccounting=true
+#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
+
+#BlockIOAccounting=true
+#BlockIOWeight=??
+#BlockIODeviceWeight=??
+#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
+
+#DeviceAllow=
+#DevicePolicy=auto|closed|strict
+
+[Install]
+WantedBy=multi-user.target
diff --git a/hadoop-httpfs.sysconfig b/hadoop-httpfs.sysconfig
new file mode 100644
index 0000000..63c953c
--- /dev/null
+++ b/hadoop-httpfs.sysconfig
@@ -0,0 +1,5 @@
+CATALINA_BASE=/usr/share/hadoop/httpfs/tomcat
+CATALINA_HOME=/usr/share/hadoop/httpfs/tomcat
+CATALINA_TMPDIR=/var/cache/hadoop-httpfs
+
+CATALINA_OPTS="-Dhttpfs.home.dir=/usr -Dhttpfs.config.dir=/etc/hadoop -Dhttpfs.log.dir=/var/log/hadoop-httpfs -Dhttpfs.temp.dir=/var/cache/hadoop-httpfs -Dhttpfs.admin.port=14001 -Dhttpfs.http.port=14000"
diff --git a/hadoop-jni-library-loading.patch b/hadoop-jni-library-loading.patch
new file mode 100644
index 0000000..bd88dfa
--- /dev/null
+++ b/hadoop-jni-library-loading.patch
@@ -0,0 +1,32 @@
+diff --git a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
+index 5667d98..c0106ce 100644
+--- a/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
++++ b/hadoop-common-project/hadoop-common/src/main/java/org/apache/hadoop/util/NativeCodeLoader.java
+@@ -46,15 +46,20 @@
+ LOG.debug("Trying to load the custom-built native-hadoop library...");
+ }
+ try {
+- System.loadLibrary("hadoop");
++ System.load("/usr/lib64/hadoop/libhadoop.so");
+ LOG.debug("Loaded the native-hadoop library");
+ nativeCodeLoaded = true;
+- } catch (Throwable t) {
+- // Ignore failure to load
+- if(LOG.isDebugEnabled()) {
+- LOG.debug("Failed to load native-hadoop with error: " + t);
+- LOG.debug("java.library.path=" +
+- System.getProperty("java.library.path"));
++ } catch (Throwable t64) {
++ LOG.debug("Failed to load 64-bit native-hadoop with error: " + t64);
++ try {
++ System.load("/usr/lib/hadoop/libhadoop.so");
++ LOG.debug("Loaded the native-hadoop library");
++ nativeCodeLoaded = true;
++ } catch (Throwable t32) {
++ // Ignore failure to load
++ if(LOG.isDebugEnabled()) {
++ LOG.debug("Failed to load 32-bit native-hadoop with error: " + t32);
++ }
+ }
+ }
+
diff --git a/hadoop-layout.sh b/hadoop-layout.sh
new file mode 100644
index 0000000..7801fc8
--- /dev/null
+++ b/hadoop-layout.sh
@@ -0,0 +1,29 @@
+export HADOOP_PREFIX=/usr
+export HADOOP_COMMON_HOME=/usr
+export HADOOP_COMMON_DIR=share/hadoop/common
+export HADOOP_COMMON_LIB_JARS_DIR=share/hadoop/common/lib
+export HADOOP_COMMON_LIB_NATIVE_DIR=lib/hadoop
+export HADOOP_CONF_DIR=/etc/hadoop
+export HADOOP_LIBEXEC_DIR=/usr/libexec
+
+export HADOOP_HDFS_HOME=$HADOOP_PREFIX
+export HDFS_DIR=share/hadoop/hdfs
+export HDFS_LIB_JARS_DIR=share/hadoop/hadoop/lib
+export HADOOP_PID_DIR=/var/run/hadoop-hdfs
+export HADOOP_LOG_DIR=/var/log/hadoop-hdfs
+export HADOOP_IDENT_STRING=hdfs
+
+export HADOOP_YARN_HOME=$HADOOP_PREFIX
+export YARN_DIR=share/hadoop/yarn
+export YARN_LIB_JARS_DIR=share/hadoop/yarn/lib
+export YARN_PID_DIR=/var/run/hadoop-yarn
+export YARN_LOG_DIR=/var/log/hadoop-yarn
+export YARN_CONF_DIR=/etc/hadoop
+export YARN_IDENT_STRING=yarn
+
+export HADOOP_MAPRED_HOME=$HADOOP_PREFIX
+export MAPRED_DIR=share/hadoop/mapreduce
+export MAPRED_LIB_JARS_DIR=share/hadoop/mapreduce/lib
+export HADOOP_MAPRED_PID_DIR=/var/run/hadoop-mapreduce
+export HADOOP_MAPRED_LOG_DIR=/var/log/hadoop-mapreduce
+export HADOOP_MAPRED_IDENT_STRING=mapred
diff --git a/hadoop-mapred-site.xml b/hadoop-mapred-site.xml
new file mode 100644
index 0000000..4be352f
--- /dev/null
+++ b/hadoop-mapred-site.xml
@@ -0,0 +1,37 @@
+
+
+
+
+
+
+ mapred.job.tracker
+ localhost:8021
+
+
+
+ mapreduce.framework.name
+ yarn
+
+
+
+ To set the value of tmp directory for map and reduce tasks.
+ mapreduce.task.tmp.dir
+ /var/cache/hadoop-mapreduce/${user.name}/tasks
+
+
+
diff --git a/hadoop-mapreduce.service.template b/hadoop-mapreduce.service.template
new file mode 100644
index 0000000..fb90804
--- /dev/null
+++ b/hadoop-mapreduce.service.template
@@ -0,0 +1,37 @@
+[Unit]
+Description=The Hadoop DAEMON daemon
+After=network.target
+After=NetworkManager.target
+
+[Service]
+Type=forking
+EnvironmentFile=-/etc/sysconfig/hadoop-mapreduce
+EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
+ExecStart=/usr/sbin/mr-jobhistory-daemon.sh start DAEMON
+ExecStop=/usr/sbin/mr-jobhistory-daemon.sh stop DAEMON
+User=mapred
+Group=hadoop
+PIDFile=/var/run/hadoop-mapreduce/mapred-mapred-DAEMON.pid
+LimitNOFILE=32768
+LimitNPROC=65536
+
+#######################################
+# Note: Below are cgroup options
+#######################################
+#Slice=
+#CPUAccounting=true
+#CPUShares=1024
+
+#MemoryAccounting=true
+#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
+
+#BlockIOAccounting=true
+#BlockIOWeight=??
+#BlockIODeviceWeight=??
+#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
+
+#DeviceAllow=
+#DevicePolicy=auto|closed|strict
+
+[Install]
+WantedBy=multi-user.target
diff --git a/hadoop-maven.patch b/hadoop-maven.patch
new file mode 100644
index 0000000..0026ae3
--- /dev/null
+++ b/hadoop-maven.patch
@@ -0,0 +1,44 @@
+diff --git a/hadoop-common-project/hadoop-common/pom.xml b/hadoop-common-project/hadoop-common/pom.xml
+index 7cf67a3..c090916 100644
+--- a/hadoop-common-project/hadoop-common/pom.xml
++++ b/hadoop-common-project/hadoop-common/pom.xml
+@@ -364,16 +364,6 @@
+
+
+
+- org.apache.maven.plugins
+- maven-surefire-plugin
+-
+-
+- ${startKdc}
+- ${kdc.resource.dir}
+-
+-
+-
+-
+ org.apache.avro
+ avro-maven-plugin
+
+@@ -480,6 +470,10 @@
+ org.apache.maven.plugins
+ maven-surefire-plugin
+
++
++ ${startKdc}
++ ${kdc.resource.dir}
++
+
+
+ listener
+diff --git a/pom.xml b/pom.xml
+index 13dbf49..ad84034 100644
+--- a/pom.xml
++++ b/pom.xml
+@@ -387,6 +387,7 @@ xsi:schemaLocation="http://maven.apache.org/POM/4.0.0 http://maven.apache.org/xs
+
+ org.apache.maven.plugins
+ maven-javadoc-plugin
++ 2.8.1
+ false
+
+
diff --git a/hadoop-netty-3-Final.patch b/hadoop-netty-3-Final.patch
new file mode 100644
index 0000000..7980e21
--- /dev/null
+++ b/hadoop-netty-3-Final.patch
@@ -0,0 +1,31 @@
+diff --git a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
+index 9b267fe..0ce916d 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
++++ b/hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal/pom.xml
+@@ -38,12 +38,10 @@ http://maven.apache.org/xsd/maven-4.0.0.xsd">
+
+
+
+-
+
+ org.jboss.netty
+ netty
+- 3.2.4.Final
++ 3.9.3.Final
+
+
+
+diff --git a/hadoop-project/pom.xml b/hadoop-project/pom.xml
+index b315e2b..a9da3aa 100644
+--- a/hadoop-project/pom.xml
++++ b/hadoop-project/pom.xml
+@@ -462,7 +462,7 @@
+
+ io.netty
+ netty
+- 3.6.2.Final
++ 3.9.3.Final
+
+
+
diff --git a/hadoop-no-download-tomcat.patch b/hadoop-no-download-tomcat.patch
new file mode 100644
index 0000000..6ea06d7
--- /dev/null
+++ b/hadoop-no-download-tomcat.patch
@@ -0,0 +1,58 @@
+diff --git a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+index d01a32f..9ebc494 100644
+--- a/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
++++ b/hadoop-hdfs-project/hadoop-hdfs-httpfs/pom.xml
+@@ -523,53 +523,6 @@
+ maven-antrun-plugin
+
+
+- dist
+-
+- run
+-
+- package
+-
+-
+-
+-
+-
+-
+-
+-
+-
+- cd "${project.build.directory}/tomcat.exp"
+- gzip -cd ../../downloads/apache-tomcat-${tomcat.version}.tar.gz | tar xf -
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+-
+ tar
+ package
+
diff --git a/hadoop-tomcat-users.xml b/hadoop-tomcat-users.xml
new file mode 100644
index 0000000..daa8e18
--- /dev/null
+++ b/hadoop-tomcat-users.xml
@@ -0,0 +1,49 @@
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
diff --git a/hadoop-tools.jar.patch b/hadoop-tools.jar.patch
new file mode 100644
index 0000000..b160ed7
--- /dev/null
+++ b/hadoop-tools.jar.patch
@@ -0,0 +1,32 @@
+diff --git a/hadoop-common-project/hadoop-annotations/pom.xml b/hadoop-common-project/hadoop-annotations/pom.xml
+index c3e1aa1..9042f73 100644
+--- a/hadoop-common-project/hadoop-annotations/pom.xml
++++ b/hadoop-common-project/hadoop-annotations/pom.xml
+@@ -48,11 +48,8 @@
+
+
+
+- jdk.tools
+- jdk.tools
+- 1.6
+- system
+- ${java.home}/../lib/tools.jar
++ com.sun
++ tools
+
+
+
+@@ -63,11 +60,8 @@
+
+
+
+- jdk.tools
+- jdk.tools
+- 1.7
+- system
+- ${java.home}/../lib/tools.jar
++ com.sun
++ tools
+
+
+
diff --git a/hadoop-yarn-site.xml b/hadoop-yarn-site.xml
new file mode 100644
index 0000000..d4d273f
--- /dev/null
+++ b/hadoop-yarn-site.xml
@@ -0,0 +1,75 @@
+
+
+
+
+
+
+ yarn.nodemanager.aux-services
+ mapreduce_shuffle
+
+
+
+ yarn.nodemanager.aux-services.mapreduce_shuffle.class
+ org.apache.hadoop.mapred.ShuffleHandler
+
+
+
+
+
+ yarn.dispatcher.exit-on-error
+ true
+
+
+
+ List of directories to store localized files in.
+ yarn.nodemanager.local-dirs
+ /var/cache/hadoop-yarn/${user.name}/nm-local-dir
+
+
+
+ Where to store container logs.
+ yarn.nodemanager.log-dirs
+ /var/log/hadoop-yarn/containers
+
+
+
+
+
+ Classpath for typical applications.
+ yarn.application.classpath
+
+ $HADOOP_CONF_DIR,$HADOOP_COMMON_HOME/$HADOOP_COMMON_DIR/*,
+ $HADOOP_COMMON_HOME/$HADOOP_COMMON_LIB_JARS_DIR/*,
+ $HADOOP_HDFS_HOME/$HDFS_DIR/*,$HADOOP_HDFS_HOME/$HDFS_LIB_JARS_DIR/*,
+ $HADOOP_MAPRED_HOME/$MAPRED_DIR/*,
+ $HADOOP_MAPRED_HOME/$MAPRED_LIB_JARS_DIR/*,
+ $HADOOP_YARN_HOME/$YARN_DIR/*,$HADOOP_YARN_HOME/$YARN_LIB_JARS_DIR/*
+
+
+
diff --git a/hadoop-yarn.service.template b/hadoop-yarn.service.template
new file mode 100644
index 0000000..00e53f4
--- /dev/null
+++ b/hadoop-yarn.service.template
@@ -0,0 +1,37 @@
+[Unit]
+Description=The Hadoop DAEMON daemon
+After=network.target
+After=NetworkManager.target
+
+[Service]
+Type=forking
+EnvironmentFile=-/etc/sysconfig/hadoop-yarn
+EnvironmentFile=-/etc/sysconfig/hadoop-DAEMON
+ExecStart=/usr/sbin/yarn-daemon.sh start DAEMON
+ExecStop=/usr/sbin/yarn-daemon.sh stop DAEMON
+User=yarn
+Group=hadoop
+PIDFile=/var/run/hadoop-yarn/yarn-yarn-DAEMON.pid
+LimitNOFILE=32768
+LimitNPROC=65536
+
+#######################################
+# Note: Below are cgroup options
+#######################################
+#Slice=
+#CPUAccounting=true
+#CPUShares=1024
+
+#MemoryAccounting=true
+#TBD: MemoryLimit=bytes, MemorySoftLimit=bytes
+
+#BlockIOAccounting=true
+#BlockIOWeight=??
+#BlockIODeviceWeight=??
+#TBD: BlockIOReadBandwidth=bytes, BlockIOWriteBandwidth=bytes
+
+#DeviceAllow=
+#DevicePolicy=auto|closed|strict
+
+[Install]
+WantedBy=multi-user.target
diff --git a/hadoop.logrotate b/hadoop.logrotate
new file mode 100644
index 0000000..e722f00
--- /dev/null
+++ b/hadoop.logrotate
@@ -0,0 +1,8 @@
+/var/log/hadoop-NAME/*.log
+{
+ missingok
+ copytruncate
+ compress
+ weekly
+ rotate 52
+}
diff --git a/hadoop.spec b/hadoop.spec
new file mode 100644
index 0000000..4a197f3
--- /dev/null
+++ b/hadoop.spec
@@ -0,0 +1,1305 @@
+%global _hardened_build 1
+
+# libhdfs is only supported on intel architectures atm.
+%ifarch %ix86 x86_64
+%global package_libhdfs 1
+%else
+%global package_libhdfs 0
+%endif
+
+%global commit 9e2ef43a240fb0f603d8c384e501daec11524510
+%global shortcommit %(c=%{commit}; echo ${c:0:7})
+
+%global hadoop_version %{version}
+%global hdfs_services hadoop-zkfc.service hadoop-datanode.service hadoop-secondarynamenode.service hadoop-namenode.service hadoop-journalnode.service
+%global mapreduce_services hadoop-historyserver.service
+%global yarn_services hadoop-proxyserver.service hadoop-resourcemanager.service hadoop-nodemanager.service hadoop-timelineserver.service
+
+# Filter out undesired provides and requires
+%global __requires_exclude_from ^%{_libdir}/%{name}/libhadoop.so$
+%global __provides_exclude_from ^%{_libdir}/%{name}/.*$
+
+%bcond_with javadoc
+
+Name: hadoop
+Version: 2.4.1
+Release: 16%{?dist}
+Summary: A software platform for processing vast amounts of data
+# The BSD license file is missing
+# https://issues.apache.org/jira/browse/HADOOP-9849
+License: ASL 2.0 and BSD
+URL: http://hadoop.apache.org
+Source0: https://github.com/apache/hadoop-common/archive/%{commit}/%{name}-%{version}-%{shortcommit}.tar.gz
+Source1: %{name}-layout.sh
+Source2: %{name}-hdfs.service.template
+Source3: %{name}-mapreduce.service.template
+Source4: %{name}-yarn.service.template
+Source6: %{name}.logrotate
+Source8: %{name}-core-site.xml
+Source9: %{name}-hdfs-site.xml
+Source10: %{name}-mapred-site.xml
+Source11: %{name}-yarn-site.xml
+Source12: %{name}-httpfs.sysconfig
+Source13: hdfs-create-dirs
+Source14: %{name}-tomcat-users.xml
+# This patch includes the following upstream tickets:
+# https://issues.apache.org/jira/browse/HADOOP-9613
+# https://issues.apache.org/jira/browse/HDFS-5411
+# https://issues.apache.org/jira/browse/HADOOP-10068
+# https://issues.apache.org/jira/browse/HADOOP-10075
+# https://issues.apache.org/jira/browse/HADOOP-10076
+Patch0: %{name}-fedora-integration.patch
+# Fedora packaging guidelines for JNI library loading
+Patch2: %{name}-jni-library-loading.patch
+# Clean up warnings with maven 3.0.5
+Patch3: %{name}-maven.patch
+# Don't download tomcat
+Patch4: %{name}-no-download-tomcat.patch
+# Use dlopen to find libjvm.so
+Patch5: %{name}-dlopen-libjvm.patch
+# Update to Guava 17.0
+Patch7: %{name}-guava.patch
+# Update to Netty 3.6.6-Final
+Patch8: %{name}-netty-3-Final.patch
+# Remove problematic issues with tools.jar
+Patch9: %{name}-tools.jar.patch
+# Workaround for bz1012059
+Patch10: %{name}-build.patch
+# Fix Java detection on ppc64le
+Patch11: %{name}-2.4.1-cmake-java-ppc64le.patch
+# Build with hard-float on ARMv7
+Patch12: %{name}-armhfp.patch
+
+# fix Jersey1 support
+Patch13: hadoop-2.4.1-jersey1.patch
+# fix java8 doclint
+Patch14: hadoop-2.4.1-disable-doclint.patch
+# fix exception org.jets3t.service.S3ServiceException is never thrown in body of corresponding try statement
+Patch15: hadoop-2.4.1-jets3t0.9.3.patch
+# add some servlet3.1 missing methods
+Patch16: hadoop-2.4.1-servlet-3.1-api.patch
+# Adapt to the new BookKeeper ZkUtils API
+Patch17: hadoop-2.4.1-new-bookkeeper.patch
+
+# This is not a real BR, but is here because of rawhide shift to eclipse
+# aether packages which caused a dependency of a dependency to not get
+# pulled in.
+BuildRequires: aether
+
+BuildRequires: ant
+BuildRequires: antlr-tool
+BuildRequires: aopalliance
+BuildRequires: apache-commons-beanutils
+BuildRequires: apache-commons-cli
+BuildRequires: apache-commons-codec
+BuildRequires: apache-commons-collections
+BuildRequires: apache-commons-configuration
+BuildRequires: apache-commons-daemon
+BuildRequires: apache-commons-el
+BuildRequires: apache-commons-io
+BuildRequires: apache-commons-lang
+BuildRequires: apache-commons-logging
+BuildRequires: apache-commons-math
+BuildRequires: apache-commons-net
+BuildRequires: apache-rat-plugin
+BuildRequires: atinject
+BuildRequires: avalon-framework
+BuildRequires: avalon-logkit
+BuildRequires: avro
+BuildRequires: avro-maven-plugin
+BuildRequires: bookkeeper-java
+BuildRequires: cglib
+BuildRequires: checkstyle
+BuildRequires: chrpath
+BuildRequires: cmake
+BuildRequires: ecj >= 1:4.2.1-6
+BuildRequires: fuse-devel
+BuildRequires: fusesource-pom
+BuildRequires: geronimo-jms
+BuildRequires: glassfish-jaxb
+BuildRequires: glassfish-jsp
+BuildRequires: glassfish-jsp-api
+BuildRequires: google-guice
+BuildRequires: grizzly
+BuildRequires: guava
+BuildRequires: guice-servlet
+BuildRequires: hamcrest
+BuildRequires: hawtjni
+BuildRequires: hsqldb
+BuildRequires: httpcomponents-client
+BuildRequires: httpcomponents-core
+BuildRequires: istack-commons
+BuildRequires: jackson
+BuildRequires: jakarta-commons-httpclient
+BuildRequires: java-base64
+BuildRequires: java-devel
+BuildRequires: java-xmlbuilder
+BuildRequires: javamail
+BuildRequires: javapackages-tools
+BuildRequires: jdiff
+BuildRequires: jersey1
+BuildRequires: jersey1-contribs
+BuildRequires: jets3t
+BuildRequires: jettison
+BuildRequires: jetty8
+BuildRequires: jsch
+BuildRequires: json_simple
+BuildRequires: jspc
+BuildRequires: jsr-305
+BuildRequires: jsr-311
+BuildRequires: junit
+BuildRequires: jzlib
+BuildRequires: leveldbjni
+BuildRequires: groovy18
+BuildRequires: log4j12
+BuildRequires: maven-antrun-plugin
+BuildRequires: maven-assembly-plugin
+BuildRequires: maven-clean-plugin
+BuildRequires: maven-dependency-plugin
+BuildRequires: maven-enforcer-plugin
+BuildRequires: maven-invoker-plugin
+BuildRequires: maven-local
+BuildRequires: maven-plugin-build-helper
+BuildRequires: maven-plugin-exec
+BuildRequires: maven-plugin-plugin
+BuildRequires: maven-release-plugin
+BuildRequires: maven-remote-resources-plugin
+BuildRequires: maven-shade-plugin
+BuildRequires: maven-war-plugin
+BuildRequires: metrics
+BuildRequires: mockito
+BuildRequires: native-maven-plugin
+BuildRequires: netty3
+BuildRequires: objectweb-asm
+BuildRequires: objenesis >= 1.2-16
+BuildRequires: openssl-devel
+BuildRequires: paranamer
+BuildRequires: protobuf-compiler
+BuildRequires: protobuf-java
+BuildRequires: relaxngDatatype
+BuildRequires: servlet3
+BuildRequires: slf4j
+BuildRequires: snappy-devel
+BuildRequires: snappy-java
+BuildRequires: systemd
+BuildRequires: tomcat
+BuildRequires: tomcat-el-3.0-api
+BuildRequires: tomcat-log4j
+BuildRequires: tomcat-servlet-3.1-api
+BuildRequires: txw2
+BuildRequires: xmlenc
+BuildRequires: znerd-oss-parent
+BuildRequires: zookeeper-java > 3.4.5-15
+# For tests
+BuildRequires: jersey1-test-framework
+
+%description
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+%package client
+Summary: Libraries for Apache Hadoop clients
+BuildArch: noarch
+Requires: %{name}-common = %{version}-%{release}
+Requires: %{name}-hdfs = %{version}-%{release}
+Requires: %{name}-mapreduce = %{version}-%{release}
+Requires: %{name}-yarn = %{version}-%{release}
+
+%description client
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package provides libraries for Apache Hadoop clients.
+
+%package common
+Summary: Common files needed by Apache Hadoop daemons
+BuildArch: noarch
+Requires: /usr/sbin/useradd
+
+# These are required to meet the symlinks for the classpath
+Requires: antlr-tool
+Requires: apache-commons-beanutils
+Requires: avalon-framework
+Requires: avalon-logkit
+Requires: checkstyle
+Requires: coreutils
+Requires: geronimo-jms
+Requires: glassfish-jaxb
+Requires: glassfish-jsp
+Requires: glassfish-jsp-api
+Requires: istack-commons
+Requires: jakarta-commons-httpclient
+Requires: java-base64
+Requires: java-xmlbuilder
+Requires: javamail
+Requires: jettison
+Requires: jetty8
+Requires: jsr-311
+Requires: mockito
+Requires: nc6
+Requires: objectweb-asm
+Requires: objenesis
+Requires: paranamer
+Requires: relaxngDatatype
+Requires: servlet3
+Requires: snappy-java
+Requires: txw2
+Requires: which
+
+%description common
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package contains common files and utilities needed by other Apache
+Hadoop modules.
+
+%package common-native
+Summary: The native Apache Hadoop library file
+Requires: %{name}-common = %{version}-%{release}
+
+%description common-native
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package contains the native-hadoop library
+
+%if %{package_libhdfs}
+%package devel
+Summary: Headers for Apache Hadoop
+Requires: libhdfs%{?_isa} = %{version}-%{release}
+
+%description devel
+Header files for Apache Hadoop's hdfs library and other utilities
+%endif
+
+%package hdfs
+Summary: The Apache Hadoop Distributed File System
+BuildArch: noarch
+Requires: apache-commons-daemon-jsvc
+Requires: %{name}-common = %{version}-%{release}
+Requires(pre): %{name}-common = %{version}-%{release}
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+
+%description hdfs
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+The Hadoop Distributed File System (HDFS) is the primary storage system
+used by Apache Hadoop applications.
+
+%if %{package_libhdfs}
+%package hdfs-fuse
+Summary: Allows mounting of Apache Hadoop HDFS
+Requires: fuse
+Requires: libhdfs%{?_isa} = %{version}-%{release}
+Requires: %{name}-common = %{version}-%{release}
+Requires: %{name}-hdfs = %{version}-%{release}
+Requires: %{name}-mapreduce = %{version}-%{release}
+Requires: %{name}-yarn = %{version}-%{release}
+
+%description hdfs-fuse
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package provides tools that allow HDFS to be mounted as a standard
+file system through fuse.
+%endif
+
+%package httpfs
+Summary: Provides web access to HDFS
+BuildArch: noarch
+Requires: apache-commons-dbcp
+Requires: ecj >= 1:4.2.1-6
+Requires: json_simple
+Requires: tomcat
+Requires: tomcat-lib
+Requires: tomcat-native
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+
+%description httpfs
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package provides a server that provides HTTP REST API support for
+the complete FileSystem/FileContext interface in HDFS.
+
+# Creation of javadocs takes too many resources and results in failures on
+# most architectures so only generate on intel 64-bit
+%ifarch x86_64
+%if %{with javadoc}
+%package javadoc
+Summary: Javadoc for Apache Hadoop
+BuildArch: noarch
+
+%description javadoc
+This package contains the API documentation for %{name}.
+%endif
+%endif
+
+%if %{package_libhdfs}
+%package -n libhdfs
+Summary: The Apache Hadoop Filesystem Library
+Requires: %{name}-hdfs = %{version}-%{release}
+Requires: lzo
+
+%description -n libhdfs
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package provides the Apache Hadoop Filesystem Library.
+%endif
+
+%package mapreduce
+Summary: Apache Hadoop MapReduce (MRv2)
+BuildArch: noarch
+Requires(pre): %{name}-common = %{version}-%{release}
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+
+%description mapreduce
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package provides Apache Hadoop MapReduce (MRv2).
+
+%package mapreduce-examples
+Summary: Apache Hadoop MapReduce (MRv2) examples
+BuildArch: noarch
+Requires: hsqldb
+
+%description mapreduce-examples
+This package contains mapreduce examples.
+
+%package maven-plugin
+Summary: Apache Hadoop maven plugin
+BuildArch: noarch
+Requires: maven
+
+%description maven-plugin
+The Apache Hadoop maven plugin
+
+%package tests
+Summary: Apache Hadoop test resources
+BuildArch: noarch
+Requires: %{name}-common = %{version}-%{release}
+Requires: %{name}-hdfs = %{version}-%{release}
+Requires: %{name}-mapreduce = %{version}-%{release}
+Requires: %{name}-yarn = %{version}-%{release}
+
+%description tests
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package contains test related resources for Apache Hadoop.
+
+%package yarn
+Summary: Apache Hadoop YARN
+BuildArch: noarch
+Requires(pre): %{name}-common = %{version}-%{release}
+Requires: %{name}-mapreduce = %{version}-%{release}
+Requires: aopalliance
+Requires: atinject
+Requires: hamcrest
+Requires: hawtjni
+Requires: leveldbjni
+Requires(post): systemd
+Requires(preun): systemd
+Requires(postun): systemd
+
+%description yarn
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package contains Apache Hadoop YARN.
+
+%package yarn-security
+Summary: The ability to run Apache Hadoop YARN in secure mode
+Requires: %{name}-yarn = %{version}-%{release}
+
+%description yarn-security
+Apache Hadoop is a framework that allows for the distributed processing of
+large data sets across clusters of computers using simple programming models.
+It is designed to scale up from single servers to thousands of machines, each
+offering local computation and storage.
+
+This package contains files needed to run Apache Hadoop YARN in secure mode.
+
+%prep
+%setup -qn %{name}-common-%{commit}
+%patch0 -p1
+%patch2 -p1
+%patch3 -p1
+%patch4 -p1
+%if %{package_libhdfs}
+%patch5 -p1
+%endif
+%if 0%{?fedora} >= 21
+%patch7 -p1
+%patch8 -p1
+%endif
+%patch9 -p1
+%patch10 -p1
+%patch11 -p1
+%patch12 -p1
+%patch13 -p1
+%patch14 -p1
+%patch15 -p1
+%patch16 -p1
+%patch17 -p1
+
+%pom_xpath_set "pom:properties/pom:protobuf.version" 2.6.1 hadoop-project
+
+%if 0%{?fedora} < 21
+# The hadoop test suite needs classes from the zookeeper test suite.
+# We need to modify the deps to use the pom for the zookeeper-test jar
+fix_zookeeper_test()
+{
+%pom_xpath_remove "pom:project/pom:dependencies/pom:dependency[pom:artifactId='zookeeper' and pom:scope='test']/pom:type" $1
+%pom_xpath_inject "pom:project/pom:dependencies/pom:dependency[pom:artifactId='zookeeper' and pom:scope='test']" "
+
+
+ org.jboss.netty
+ netty
+
+
+ " $1
+%pom_xpath_set "pom:project/pom:dependencies/pom:dependency[pom:artifactId='zookeeper' and pom:scope='test']/pom:artifactId" zookeeper-test $1
+}
+
+fix_zookeeper_test hadoop-common-project/hadoop-common
+fix_zookeeper_test hadoop-hdfs-project/hadoop-hdfs
+fix_zookeeper_test hadoop-hdfs-project/hadoop-hdfs-nfs
+fix_zookeeper_test hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-resourcemanager
+
+sed -i "s/:pom//" hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client/pom.xml
+fix_zookeeper_test hadoop-yarn-project/hadoop-yarn/hadoop-yarn-client
+%endif
+
+
+# Remove the maven-site-plugin. It's not needed
+%pom_remove_plugin :maven-site-plugin
+%pom_remove_plugin :maven-site-plugin hadoop-common-project/hadoop-auth
+%pom_remove_plugin :maven-site-plugin hadoop-hdfs-project/hadoop-hdfs-httpfs
+
+# Remove the findbugs-maven-plugin. It's not needed and isn't available
+%pom_remove_plugin :findbugs-maven-plugin hadoop-hdfs-project/hadoop-hdfs-httpfs
+%pom_remove_plugin :findbugs-maven-plugin hadoop-hdfs-project/hadoop-hdfs/src/contrib/bkjournal
+%pom_remove_plugin :findbugs-maven-plugin hadoop-mapreduce-project/hadoop-mapreduce-client
+%pom_remove_plugin :findbugs-maven-plugin hadoop-mapreduce-project/hadoop-mapreduce-examples
+%pom_remove_plugin :findbugs-maven-plugin hadoop-mapreduce-project
+%pom_remove_plugin :findbugs-maven-plugin hadoop-project-dist
+%pom_remove_plugin :findbugs-maven-plugin hadoop-project
+%pom_remove_plugin :findbugs-maven-plugin hadoop-tools/hadoop-rumen
+%pom_remove_plugin :findbugs-maven-plugin hadoop-tools/hadoop-streaming
+%pom_remove_plugin :findbugs-maven-plugin hadoop-yarn-project/hadoop-yarn
+%pom_remove_plugin :findbugs-maven-plugin hadoop-yarn-project
+
+# Remove the maven-project-info-reports plugin. It's not needed and isn't available
+%pom_remove_plugin :maven-project-info-reports-plugin hadoop-common-project/hadoop-auth
+%pom_remove_plugin :maven-project-info-reports-plugin hadoop-hdfs-project/hadoop-hdfs-httpfs
+%pom_remove_plugin :maven-project-info-reports-plugin hadoop-project
+
+# Remove the maven-checkstyle plugin. It's not needed and isn't available
+%pom_remove_plugin :maven-checkstyle-plugin hadoop-project-dist
+%pom_remove_plugin :maven-checkstyle-plugin hadoop-project
+%pom_remove_plugin :maven-checkstyle-plugin hadoop-tools/hadoop-distcp
+
+# Disable the hadoop-minikdc module due to missing deps
+%pom_disable_module hadoop-minikdc hadoop-common-project
+%pom_remove_dep :hadoop-minikdc hadoop-common-project/hadoop-auth
+%pom_remove_dep :hadoop-minikdc hadoop-project
+%pom_remove_dep :hadoop-minikdc hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests
+rm -f hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/client/TestKerberosAuthenticator.java
+rm -f hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
+rm -f hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestAltKerberosAuthenticationHandler.java
+rm -f hadoop-common-project/hadoop-auth/src/test/java/org/apache/hadoop/security/authentication/server/TestKerberosAuthenticationHandler.java
+rm -f hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-tests/src/test/java/org/apache/hadoop/yarn/server/TestContainerManagerSecurity.java
+
+# Add dependencies for timeline service
+%pom_add_dep org.iq80.leveldb:leveldb hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice
+%pom_add_dep org.fusesource.hawtjni:hawtjni-runtime hadoop-yarn-project/hadoop-yarn/hadoop-yarn-server/hadoop-yarn-server-applicationhistoryservice
+
+# Fix scope on hadoop-common:test-jar
+%pom_xpath_set "pom:project/pom:dependencies/pom:dependency[pom:artifactId='hadoop-common' and pom:type='test-jar']/pom:scope" test hadoop-tools/hadoop-openstack
+
+%if 0%{?fedora} > 20
+# Modify asm version to version 5.0.2 and groupId to org.ow2.asm
+%pom_xpath_set "pom:project/pom:dependencyManagement/pom:dependencies/pom:dependency[pom:artifactId='asm']/pom:version" 5.0.2 hadoop-project
+%pom_xpath_set "pom:project/pom:dependencyManagement/pom:dependencies/pom:dependency[pom:artifactId='asm']/pom:groupId" org.ow2.asm hadoop-project
+%endif
+
+
+# War files we don't want
+%mvn_package :%{name}-auth-examples __noinstall
+%mvn_package :%{name}-hdfs-httpfs __noinstall
+
+# Parts we don't want to distribute
+%mvn_package :%{name}-assemblies __noinstall
+
+# Workaround for bz1012059
+%mvn_package :%{name}-project-dist __noinstall
+
+# Create separate file lists for packaging
+%mvn_package :::tests: %{name}-tests
+%mvn_package :%{name}-*-tests::{}: %{name}-tests
+%mvn_package :%{name}-client*::{}: %{name}-client
+%mvn_package :%{name}-hdfs*::{}: %{name}-hdfs
+%mvn_package :%{name}-mapreduce-examples*::{}: %{name}-mapreduce-examples
+%mvn_package :%{name}-mapreduce*::{}: %{name}-mapreduce
+%mvn_package :%{name}-archives::{}: %{name}-mapreduce
+%mvn_package :%{name}-datajoin::{}: %{name}-mapreduce
+%mvn_package :%{name}-distcp::{}: %{name}-mapreduce
+%mvn_package :%{name}-extras::{}: %{name}-mapreduce
+%mvn_package :%{name}-gridmix::{}: %{name}-mapreduce
+%mvn_package :%{name}-openstack::{}: %{name}-mapreduce
+%mvn_package :%{name}-rumen::{}: %{name}-mapreduce
+%mvn_package :%{name}-sls::{}: %{name}-mapreduce
+%mvn_package :%{name}-streaming::{}: %{name}-mapreduce
+%mvn_package :%{name}-pipes::{}: %{name}-mapreduce
+%mvn_package :%{name}-tools*::{}: %{name}-mapreduce
+%mvn_package :%{name}-maven-plugins::{}: %{name}-maven-plugin
+%mvn_package :%{name}-minicluster::{}: %{name}-tests
+%mvn_package :%{name}-yarn*::{}: %{name}-yarn
+
+# Jar files that need to be overridden due to installation location
+%mvn_file :%{name}-common::tests: %{name}/%{name}-common
+
+%build
+%ifnarch x86_64
+opts="-j"
+%else
+%if %{without javadoc}
+opts="-j"
+%endif
+%endif
+# increase JVM memory limits to avoid OOM during build
+%ifarch s390x ppc64le
+export MAVEN_OPTS="-Xms2048M -Xmx4096M"
+%endif
+%mvn_build $opts -- -Drequire.snappy=true -Dcontainer-executor.conf.dir=%{_sysconfdir}/%{name} -Pdist,native -DskipTests -DskipTest -DskipIT
+
+# This takes a long time to run, so comment out for now
+#%%check
+#mvn-rpmbuild -Pdist,native test -Dmaven.test.failure.ignore=true
+
+%install
+# Copy all jar files except those generated by the build
+# $1 the src directory
+# $2 the dest directory
+copy_dep_jars()
+{
+ find $1 ! -name "hadoop-*.jar" -name "*.jar" | xargs install -m 0644 -t $2
+ rm -f $2/tools-*.jar
+}
+
+# Create symlinks for jars from the build
+# $1 the location to create the symlink
+link_hadoop_jars()
+{
+ for f in `ls hadoop-* | grep -v tests | grep -v examples`
+ do
+ n=`echo $f | sed "s/-%{version}//"`
+ if [ -L $1/$n ]
+ then
+ continue
+ elif [ -e $1/$f ]
+ then
+ rm -f $1/$f $1/$n
+ fi
+ p=`find %{buildroot}/%{_jnidir} %{buildroot}/%{_javadir}/%{name} -name $n | sed "s#%{buildroot}##"`
+ %{__ln_s} $p $1/$n
+ done
+}
+
+%mvn_install
+
+install -d -m 0755 %{buildroot}/%{_libdir}/%{name}
+install -d -m 0755 %{buildroot}/%{_includedir}/%{name}
+install -d -m 0755 %{buildroot}/%{_jnidir}/%{name}
+
+install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/client/lib
+install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/common/lib
+install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/hdfs/lib
+install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/hdfs/webapps
+install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat/webapps
+install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/mapreduce/lib
+install -d -m 0755 %{buildroot}/%{_datadir}/%{name}/yarn/lib
+install -d -m 0755 %{buildroot}/%{_sysconfdir}/%{name}/tomcat/Catalina/localhost
+install -d -m 0755 %{buildroot}/%{_sysconfdir}/logrotate.d
+install -d -m 0755 %{buildroot}/%{_sysconfdir}/sysconfig
+install -d -m 0755 %{buildroot}/%{_tmpfilesdir}
+install -d -m 0755 %{buildroot}/%{_sharedstatedir}/%{name}-hdfs
+install -d -m 0755 %{buildroot}/%{_sharedstatedir}/tomcats/httpfs
+install -d -m 0755 %{buildroot}/%{_var}/cache/%{name}-yarn
+install -d -m 0755 %{buildroot}/%{_var}/cache/%{name}-httpfs/temp
+install -d -m 0755 %{buildroot}/%{_var}/cache/%{name}-httpfs/work
+install -d -m 0755 %{buildroot}/%{_var}/cache/%{name}-mapreduce
+install -d -m 0755 %{buildroot}/%{_var}/log/%{name}-yarn
+install -d -m 0755 %{buildroot}/%{_var}/log/%{name}-hdfs
+install -d -m 0755 %{buildroot}/%{_var}/log/%{name}-httpfs
+install -d -m 0755 %{buildroot}/%{_var}/log/%{name}-mapreduce
+install -d -m 0755 %{buildroot}/%{_var}/run/%{name}-yarn
+install -d -m 0755 %{buildroot}/%{_var}/run/%{name}-hdfs
+install -d -m 0755 %{buildroot}/%{_var}/run/%{name}-mapreduce
+
+basedir='%{name}-dist/target/%{name}-%{hadoop_version}'
+
+for dir in bin libexec sbin
+do
+ cp -arf $basedir/$dir %{buildroot}/%{_prefix}
+done
+
+# This binary is obsoleted and causes a conflict with qt-devel
+rm -rf %{buildroot}/%{_bindir}/rcc
+
+# We don't care about this
+rm -f %{buildroot}/%{_bindir}/test-container-executor
+
+# Duplicate files
+rm -f %{buildroot}/%{_sbindir}/hdfs-config.sh
+
+cp -arf $basedir/etc/* %{buildroot}/%{_sysconfdir}
+cp -arf $basedir/lib/native/libhadoop.so* %{buildroot}/%{_libdir}/%{name}
+chrpath --delete %{buildroot}/%{_libdir}/%{name}/*
+%if %{package_libhdfs}
+cp -arf $basedir/include/hdfs.h %{buildroot}/%{_includedir}/%{name}
+cp -arf $basedir/lib/native/libhdfs.so* %{buildroot}/%{_libdir}
+chrpath --delete %{buildroot}/%{_libdir}/libhdfs*
+cp -af hadoop-hdfs-project/hadoop-hdfs/target/native/main/native/fuse-dfs/fuse_dfs %{buildroot}/%{_bindir}
+chrpath --delete %{buildroot}/%{_bindir}/fuse_dfs
+%endif
+
+# Not needed since httpfs is deployed with existing systemd setup
+rm -f %{buildroot}/%{_sbindir}/httpfs.sh
+rm -f %{buildroot}/%{_libexecdir}/httpfs-config.sh
+rm -f %{buildroot}/%{_bindir}/httpfs-env.sh
+
+# Remove files with .cmd extension
+find %{buildroot} -name *.cmd | xargs rm -f
+
+# Modify hadoop-env.sh to point to correct locations for JAVA_HOME
+# and JSVC_HOME.
+sed -i "s|\${JAVA_HOME}|/usr/lib/jvm/jre|" %{buildroot}/%{_sysconfdir}/%{name}/%{name}-env.sh
+sed -i "s|\${JSVC_HOME}|/usr/bin|" %{buildroot}/%{_sysconfdir}/%{name}/%{name}-env.sh
+
+# Ensure the java provided DocumentBuilderFactory is used
+sed -i "s|\(HADOOP_OPTS.*=.*\)\$HADOOP_CLIENT_OPTS|\1 -Djavax.xml.parsers.DocumentBuilderFactory=com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl \$HADOOP_CLIENT_OPTS|" %{buildroot}/%{_sysconfdir}/%{name}/%{name}-env.sh
+echo "export YARN_OPTS=\"\$YARN_OPTS -Djavax.xml.parsers.DocumentBuilderFactory=com.sun.org.apache.xerces.internal.jaxp.DocumentBuilderFactoryImpl\"" >> %{buildroot}/%{_sysconfdir}/%{name}/yarn-env.sh
+
+# Workaround for bz1012059
+install -pm 644 hadoop-project-dist/pom.xml %{buildroot}/%{_mavenpomdir}/JPP.%{name}-%{name}-project-dist.pom
+%{__ln_s} %{_jnidir}/%{name}/hadoop-common.jar %{buildroot}/%{_datadir}/%{name}/common
+%{__ln_s} %{_javadir}/%{name}/hadoop-hdfs.jar %{buildroot}/%{_datadir}/%{name}/hdfs
+%{__ln_s} %{_javadir}/%{name}/hadoop-client.jar %{buildroot}/%{_datadir}/%{name}/client
+
+# client jar depenencies
+copy_dep_jars %{name}-client/target/%{name}-client-%{hadoop_version}/share/%{name}/client/lib %{buildroot}/%{_datadir}/%{name}/client/lib
+%{_bindir}/xmvn-subst %{buildroot}/%{_datadir}/%{name}/client/lib
+pushd %{name}-client/target/%{name}-client-%{hadoop_version}/share/%{name}/client/lib
+ link_hadoop_jars %{buildroot}/%{_datadir}/%{name}/client/lib
+popd
+pushd %{name}-client/target/%{name}-client-%{hadoop_version}/share/%{name}/client
+ link_hadoop_jars %{buildroot}/%{_datadir}/%{name}/client
+popd
+
+# common jar depenencies
+copy_dep_jars $basedir/share/%{name}/common/lib %{buildroot}/%{_datadir}/%{name}/common/lib
+%{_bindir}/xmvn-subst %{buildroot}/%{_datadir}/%{name}/common/lib
+pushd $basedir/share/%{name}/common
+ link_hadoop_jars %{buildroot}/%{_datadir}/%{name}/common
+popd
+for f in `ls %{buildroot}/%{_datadir}/%{name}/common/*.jar`
+do
+ echo "$f" | sed "s|%{buildroot}||" >> .mfiles
+done
+pushd $basedir/share/%{name}/common/lib
+ link_hadoop_jars %{buildroot}/%{_datadir}/%{name}/common/lib
+popd
+
+# hdfs jar dependencies
+copy_dep_jars $basedir/share/%{name}/hdfs/lib %{buildroot}/%{_datadir}/%{name}/hdfs/lib
+%{_bindir}/xmvn-subst %{buildroot}/%{_datadir}/%{name}/hdfs/lib
+%{__ln_s} %{_javadir}/%{name}/%{name}-hdfs-bkjournal.jar %{buildroot}/%{_datadir}/%{name}/hdfs/lib
+pushd $basedir/share/%{name}/hdfs
+ link_hadoop_jars %{buildroot}/%{_datadir}/%{name}/hdfs
+popd
+
+# httpfs
+# Create the webapp directory structure
+pushd %{buildroot}/%{_sharedstatedir}/tomcats/httpfs
+ %{__ln_s} %{_datadir}/%{name}/httpfs/tomcat/conf conf
+ %{__ln_s} %{_datadir}/%{name}/httpfs/tomcat/lib lib
+ %{__ln_s} %{_datadir}/%{name}/httpfs/tomcat/logs logs
+ %{__ln_s} %{_datadir}/%{name}/httpfs/tomcat/temp temp
+ %{__ln_s} %{_datadir}/%{name}/httpfs/tomcat/webapps webapps
+ %{__ln_s} %{_datadir}/%{name}/httpfs/tomcat/work work
+popd
+
+# Copy the tomcat configuration and overlay with specific configuration bits.
+# This is needed so the httpfs instance won't collide with a system running
+# tomcat
+for cfgfile in catalina.policy catalina.properties context.xml \
+ tomcat.conf web.xml server.xml logging.properties;
+do
+ cp -a %{_sysconfdir}/tomcat/$cfgfile %{buildroot}/%{_sysconfdir}/%{name}/tomcat
+done
+
+# Replace, in place, the Tomcat configuration files delivered with the current
+# Fedora release. See BZ#1295968 for some reason.
+sed -i -e 's/8005/${httpfs.admin.port}/g' -e 's/8080/${httpfs.http.port}/g' %{buildroot}/%{_sysconfdir}/%{name}/tomcat/server.xml
+sed -i -e 's/catalina.base/httpfs.log.dir/g' %{buildroot}/%{_sysconfdir}/%{name}/tomcat/logging.properties
+# Given the permission, only the root and tomcat users can access to that file,
+# not the build user. So, the build would fail here.
+install -m 660 %{SOURCE14} %{buildroot}/%{_sysconfdir}/%{name}/tomcat/tomcat-users.xml
+# No longer needed: see above
+#install -m 664 %{name}-hdfs-project/%{name}-hdfs-httpfs/src/main/tomcat/ssl-server.xml %{buildroot}/%{_sysconfdir}/%{name}/tomcat
+
+# Copy the httpfs webapp
+cp -arf %{name}-hdfs-project/%{name}-hdfs-httpfs/target/webhdfs %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat/webapps
+
+# Tell tomcat to follow symlinks
+cat > %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat/webapps/webhdfs/META-INF/context.xml <
+
+
+EOF
+
+# Remove the jars included in the webapp and create symlinks
+rm -f %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/tools*.jar
+rm -f %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib/tomcat-*.jar
+%{_bindir}/xmvn-subst %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib
+pushd %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat/webapps/webhdfs/WEB-INF/lib
+ link_hadoop_jars .
+popd
+
+pushd %{buildroot}/%{_datadir}/%{name}/httpfs/tomcat
+ %{__ln_s} %{_datadir}/tomcat/bin bin
+ %{__ln_s} %{_sysconfdir}/%{name}/tomcat conf
+ %{__ln_s} %{_datadir}/tomcat/lib lib
+ %{__ln_s} %{_var}/cache/%{name}-httpfs/temp temp
+ %{__ln_s} %{_var}/cache/%{name}-httpfs/work work
+ %{__ln_s} %{_var}/log/%{name}-httpfs logs
+popd
+
+# mapreduce jar dependencies
+copy_dep_jars $basedir/share/%{name}/mapreduce/lib %{buildroot}/%{_datadir}/%{name}/mapreduce/lib
+%{_bindir}/xmvn-subst %{buildroot}/%{_datadir}/%{name}/mapreduce/lib
+%{__ln_s} %{_javadir}/%{name}/%{name}-annotations.jar %{buildroot}/%{_datadir}/%{name}/mapreduce/lib
+pushd $basedir/share/%{name}/mapreduce
+ link_hadoop_jars %{buildroot}/%{_datadir}/%{name}/mapreduce
+popd
+
+# yarn jar dependencies
+copy_dep_jars $basedir/share/%{name}/yarn/lib %{buildroot}/%{_datadir}/%{name}/yarn/lib
+%{_bindir}/xmvn-subst %{buildroot}/%{_datadir}/%{name}/yarn/lib
+%{__ln_s} %{_javadir}/%{name}/%{name}-annotations.jar %{buildroot}/%{_datadir}/%{name}/yarn/lib
+pushd $basedir/share/%{name}/yarn
+ link_hadoop_jars %{buildroot}/%{_datadir}/%{name}/yarn
+popd
+
+# Install hdfs webapp bits
+cp -arf $basedir/share/hadoop/hdfs/webapps/* %{buildroot}/%{_datadir}/%{name}/hdfs/webapps
+
+# hadoop layout. Convert to appropriate lib location for 32 and 64 bit archs
+lib=$(echo %{?_libdir} | sed -e 's:/usr/\(.*\):\1:')
+if [ "$lib" = "%_libdir" ]; then
+ echo "_libdir is not located in /usr. Lib location is wrong"
+ exit 1
+fi
+sed -e "s|HADOOP_COMMON_LIB_NATIVE_DIR\s*=.*|HADOOP_COMMON_LIB_NATIVE_DIR=$lib/%{name}|" %{SOURCE1} > %{buildroot}/%{_libexecdir}/%{name}-layout.sh
+
+# Default config
+cp -f %{SOURCE8} %{buildroot}/%{_sysconfdir}/%{name}/core-site.xml
+cp -f %{SOURCE9} %{buildroot}/%{_sysconfdir}/%{name}/hdfs-site.xml
+cp -f %{SOURCE10} %{buildroot}/%{_sysconfdir}/%{name}/mapred-site.xml
+cp -f %{SOURCE11} %{buildroot}/%{_sysconfdir}/%{name}/yarn-site.xml
+
+# systemd configuration
+install -d -m 0755 %{buildroot}/%{_unitdir}/
+for service in %{hdfs_services} %{mapreduce_services} %{yarn_services}
+do
+ s=`echo $service | cut -d'-' -f 2 | cut -d'.' -f 1`
+ daemon=$s
+ if [[ "%{hdfs_services}" == *$service* ]]
+ then
+ src=%{SOURCE2}
+ elif [[ "%{mapreduce_services}" == *$service* ]]
+ then
+ src=%{SOURCE3}
+ elif [[ "%{yarn_services}" == *$service* ]]
+ then
+ if [[ "$s" == "timelineserver" ]]
+ then
+ daemon='historyserver'
+ fi
+ src=%{SOURCE4}
+ else
+ echo "Failed to determine type of service for %service"
+ exit 1
+ fi
+ sed -e "s|DAEMON|$daemon|g" $src > %{buildroot}/%{_unitdir}/%{name}-$s.service
+done
+
+cp -f %{SOURCE12} %{buildroot}/%{_sysconfdir}/sysconfig/tomcat@httpfs
+
+# Ensure /var/run directories are recreated on boot
+echo "d %{_var}/run/%{name}-yarn 0775 yarn hadoop -" > %{buildroot}/%{_tmpfilesdir}/%{name}-yarn.conf
+echo "d %{_var}/run/%{name}-hdfs 0775 hdfs hadoop -" > %{buildroot}/%{_tmpfilesdir}/%{name}-hdfs.conf
+echo "d %{_var}/run/%{name}-mapreduce 0775 mapred hadoop -" > %{buildroot}/%{_tmpfilesdir}/%{name}-mapreduce.conf
+
+# logrotate config
+for type in hdfs httpfs yarn mapreduce
+do
+ sed -e "s|NAME|$type|" %{SOURCE6} > %{buildroot}/%{_sysconfdir}/logrotate.d/%{name}-$type
+done
+sed -i "s|{|%{_var}/log/hadoop-hdfs/*.audit\n{|" %{buildroot}/%{_sysconfdir}/logrotate.d/%{name}-hdfs
+
+# hdfs init script
+install -m 755 %{SOURCE13} %{buildroot}/%{_sbindir}
+
+%pretrans -p hdfs
+path = "%{_datadir}/%{name}/hdfs/webapps"
+st = posix.stat(path)
+if st and st.type == "link" then
+ os.remove(path)
+end
+
+%pre common
+getent group hadoop >/dev/null || groupadd -r hadoop
+
+%pre hdfs
+getent group hdfs >/dev/null || groupadd -r hdfs
+getent passwd hdfs >/dev/null || /usr/sbin/useradd --comment "Apache Hadoop HDFS" --shell /sbin/nologin -M -r -g hdfs -G hadoop --home %{_sharedstatedir}/%{name}-hdfs hdfs
+
+%pre mapreduce
+getent group mapred >/dev/null || groupadd -r mapred
+getent passwd mapred >/dev/null || /usr/sbin/useradd --comment "Apache Hadoop MapReduce" --shell /sbin/nologin -M -r -g mapred -G hadoop --home %{_var}/cache/%{name}-mapreduce mapred
+
+%pre yarn
+getent group yarn >/dev/null || groupadd -r yarn
+getent passwd yarn >/dev/null || /usr/sbin/useradd --comment "Apache Hadoop Yarn" --shell /sbin/nologin -M -r -g yarn -G hadoop --home %{_var}/cache/%{name}-yarn yarn
+
+%preun hdfs
+%systemd_preun %{hdfs_services}
+
+%preun mapreduce
+%systemd_preun %{mapreduce_services}
+
+%preun yarn
+%systemd_preun %{yarn_services}
+
+%post common-native -p /sbin/ldconfig
+
+%post hdfs
+# Change the home directory for the hdfs user
+if [[ `getent passwd hdfs | cut -d: -f 6` != "%{_sharedstatedir}/%{name}-hdfs" ]]
+then
+ /usr/sbin/usermod -d %{_sharedstatedir}/%{name}-hdfs hdfs
+fi
+
+if [ $1 -gt 1 ]
+then
+ if [ -d %{_var}/cache/%{name}-hdfs ] && [ ! -L %{_var}/cache/%{name}-hdfs ]
+ then
+ # Move the existing hdfs data to the new location
+ mv -f %{_var}/cache/%{name}-hdfs/* %{_sharedstatedir}/%{name}-hdfs/
+ fi
+fi
+%systemd_post %{hdfs_services}
+
+%if %{package_libhdfs}
+%post -n libhdfs -p /sbin/ldconfig
+%endif
+
+%post mapreduce
+%systemd_post %{mapreduce_services}
+
+%post yarn
+%systemd_post %{yarn_services}
+
+%postun common-native -p /sbin/ldconfig
+
+%postun hdfs
+%systemd_postun_with_restart %{hdfs_services}
+
+if [ $1 -lt 1 ]
+then
+ # Remove the compatibility symlink
+ rm -f %{_var}/cache/%{name}-hdfs
+fi
+
+%if %{package_libhdfs}
+%postun -n libhdfs -p /sbin/ldconfig
+%endif
+
+%postun mapreduce
+%systemd_postun_with_restart %{mapreduce_services}
+
+%postun yarn
+%systemd_postun_with_restart %{yarn_services}
+
+%posttrans hdfs
+# Create a symlink to the new location for hdfs data in case the user changed
+# the configuration file and the new one isn't in place to point to the
+# correct location
+if [ ! -e %{_var}/cache/%{name}-hdfs ]
+then
+ %{__ln_s} %{_sharedstatedir}/%{name}-hdfs %{_var}/cache
+fi
+
+%files -f .mfiles-%{name}-client client
+%{_datadir}/%{name}/client
+
+%files -f .mfiles common
+%doc hadoop-dist/target/hadoop-%{hadoop_version}/share/doc/hadoop/common/*
+%config(noreplace) %{_sysconfdir}/%{name}/configuration.xsl
+%config(noreplace) %{_sysconfdir}/%{name}/core-site.xml
+%config(noreplace) %{_sysconfdir}/%{name}/%{name}-env.sh
+%config(noreplace) %{_sysconfdir}/%{name}/%{name}-metrics.properties
+%config(noreplace) %{_sysconfdir}/%{name}/%{name}-metrics2.properties
+%config(noreplace) %{_sysconfdir}/%{name}/%{name}-policy.xml
+%config(noreplace) %{_sysconfdir}/%{name}/log4j.properties
+%config(noreplace) %{_sysconfdir}/%{name}/slaves
+%config(noreplace) %{_sysconfdir}/%{name}/ssl-client.xml.example
+%config(noreplace) %{_sysconfdir}/%{name}/ssl-server.xml.example
+%dir %{_datadir}/%{name}
+%dir %{_datadir}/%{name}/common
+%{_datadir}/%{name}/common/lib
+%{_libexecdir}/%{name}-config.sh
+%{_libexecdir}/%{name}-layout.sh
+
+# Workaround for bz1012059
+%{_mavenpomdir}/JPP.%{name}-%{name}-project-dist.pom
+
+%{_bindir}/%{name}
+%{_sbindir}/%{name}-daemon.sh
+%{_sbindir}/%{name}-daemons.sh
+%{_sbindir}/start-all.sh
+%{_sbindir}/start-balancer.sh
+%{_sbindir}/start-dfs.sh
+%{_sbindir}/start-secure-dns.sh
+%{_sbindir}/stop-all.sh
+%{_sbindir}/stop-balancer.sh
+%{_sbindir}/stop-dfs.sh
+%{_sbindir}/stop-secure-dns.sh
+%{_sbindir}/slaves.sh
+
+%files common-native
+%{_libdir}/%{name}/libhadoop.*
+
+%if %{package_libhdfs}
+%files devel
+%{_includedir}/%{name}
+%{_libdir}/libhdfs.so
+%endif
+
+%files -f .mfiles-%{name}-hdfs hdfs
+%config(noreplace) %{_sysconfdir}/%{name}/hdfs-site.xml
+%{_datadir}/%{name}/hdfs
+%{_unitdir}/%{name}-datanode.service
+%{_unitdir}/%{name}-namenode.service
+%{_unitdir}/%{name}-journalnode.service
+%{_unitdir}/%{name}-secondarynamenode.service
+%{_unitdir}/%{name}-zkfc.service
+%{_libexecdir}/hdfs-config.sh
+%{_bindir}/hdfs
+%{_sbindir}/distribute-exclude.sh
+%{_sbindir}/refresh-namenodes.sh
+%{_sbindir}/hdfs-create-dirs
+%{_tmpfilesdir}/%{name}-hdfs.conf
+%config(noreplace) %attr(644, root, root) %{_sysconfdir}/logrotate.d/%{name}-hdfs
+%attr(0755,hdfs,hadoop) %dir %{_var}/run/%{name}-hdfs
+%attr(0755,hdfs,hadoop) %dir %{_var}/log/%{name}-hdfs
+%attr(0755,hdfs,hadoop) %dir %{_sharedstatedir}/%{name}-hdfs
+
+%if %{package_libhdfs}
+%files hdfs-fuse
+%attr(755,hdfs,hadoop) %{_bindir}/fuse_dfs
+%endif
+
+%files httpfs
+%config(noreplace) %{_sysconfdir}/sysconfig/tomcat@httpfs
+%config(noreplace) %{_sysconfdir}/%{name}/httpfs-env.sh
+%config(noreplace) %{_sysconfdir}/%{name}/httpfs-log4j.properties
+%config(noreplace) %{_sysconfdir}/%{name}/httpfs-signature.secret
+%config(noreplace) %{_sysconfdir}/%{name}/httpfs-site.xml
+%attr(-,tomcat,tomcat) %config(noreplace) %{_sysconfdir}/%{name}/tomcat/*.*
+%attr(0775,root,tomcat) %dir %{_sysconfdir}/%{name}/tomcat
+%attr(0775,root,tomcat) %dir %{_sysconfdir}/%{name}/tomcat/Catalina
+%attr(0775,root,tomcat) %dir %{_sysconfdir}/%{name}/tomcat/Catalina/localhost
+%{_datadir}/%{name}/httpfs
+%{_sharedstatedir}/tomcats/httpfs
+%config(noreplace) %attr(644, root, root) %{_sysconfdir}/logrotate.d/%{name}-httpfs
+%attr(0775,root,tomcat) %dir %{_var}/log/%{name}-httpfs
+%attr(0775,root,tomcat) %dir %{_var}/cache/%{name}-httpfs
+%attr(0775,root,tomcat) %dir %{_var}/cache/%{name}-httpfs/temp
+%attr(0775,root,tomcat) %dir %{_var}/cache/%{name}-httpfs/work
+
+%ifarch x86_64
+%if %{with javadoc}
+%files -f .mfiles-javadoc javadoc
+%doc hadoop-dist/target/hadoop-%{hadoop_version}/share/doc/hadoop/common/LICENSE.txt hadoop-dist/target/hadoop-%{hadoop_version}/share/doc/hadoop/common/NOTICE.txt
+%endif
+%endif
+
+%if %{package_libhdfs}
+%files -n libhdfs
+%doc hadoop-dist/target/hadoop-%{hadoop_version}/share/doc/hadoop/hdfs/LICENSE.txt
+%{_libdir}/libhdfs.so.*
+%endif
+
+%files -f .mfiles-%{name}-mapreduce mapreduce
+%config(noreplace) %{_sysconfdir}/%{name}/mapred-env.sh
+%config(noreplace) %{_sysconfdir}/%{name}/mapred-queues.xml.template
+%config(noreplace) %{_sysconfdir}/%{name}/mapred-site.xml
+%config(noreplace) %{_sysconfdir}/%{name}/mapred-site.xml.template
+%{_datadir}/%{name}/mapreduce
+%{_libexecdir}/mapred-config.sh
+%{_unitdir}/%{name}-historyserver.service
+%{_bindir}/mapred
+%{_sbindir}/mr-jobhistory-daemon.sh
+%{_tmpfilesdir}/%{name}-mapreduce.conf
+%config(noreplace) %attr(644, root, root) %{_sysconfdir}/logrotate.d/%{name}-mapreduce
+%attr(0755,mapred,hadoop) %dir %{_var}/run/%{name}-mapreduce
+%attr(0755,mapred,hadoop) %dir %{_var}/log/%{name}-mapreduce
+%attr(0755,mapred,hadoop) %dir %{_var}/cache/%{name}-mapreduce
+
+%files -f .mfiles-%{name}-mapreduce-examples mapreduce-examples
+
+%files -f .mfiles-%{name}-maven-plugin maven-plugin
+%doc hadoop-dist/target/hadoop-%{hadoop_version}/share/doc/hadoop/common/LICENSE.txt
+
+%files -f .mfiles-%{name}-tests tests
+
+%files -f .mfiles-%{name}-yarn yarn
+%config(noreplace) %{_sysconfdir}/%{name}/capacity-scheduler.xml
+%config(noreplace) %{_sysconfdir}/%{name}/yarn-env.sh
+%config(noreplace) %{_sysconfdir}/%{name}/yarn-site.xml
+%{_unitdir}/%{name}-nodemanager.service
+%{_unitdir}/%{name}-proxyserver.service
+%{_unitdir}/%{name}-resourcemanager.service
+%{_unitdir}/%{name}-timelineserver.service
+%{_libexecdir}/yarn-config.sh
+%{_datadir}/%{name}/yarn
+%{_bindir}/yarn
+%{_sbindir}/yarn-daemon.sh
+%{_sbindir}/yarn-daemons.sh
+%{_sbindir}/start-yarn.sh
+%{_sbindir}/stop-yarn.sh
+%{_tmpfilesdir}/%{name}-yarn.conf
+%config(noreplace) %attr(644, root, root) %{_sysconfdir}/logrotate.d/%{name}-yarn
+%attr(0755,yarn,hadoop) %dir %{_var}/run/%{name}-yarn
+%attr(0755,yarn,hadoop) %dir %{_var}/log/%{name}-yarn
+%attr(0755,yarn,hadoop) %dir %{_var}/cache/%{name}-yarn
+
+%files yarn-security
+%config(noreplace) %{_sysconfdir}/%{name}/container-executor.cfg
+# Permissions set per upstream guidelines: http://hadoop.apache.org/docs/current/hadoop-project-dist/hadoop-common/ClusterSetup.html#Configuration_in_Secure_Mode
+%attr(6050,root,yarn) %{_bindir}/container-executor
+
+%changelog
+* Sun May 8 2016 Peter Robinson 2.4.1-16
+- rebuild (aarch64)
+
+* Sat Feb 06 2016 Denis Arnaud 2.4.1-15
+- Rebuilt for new EclipseLink jersey1
+
+* Wed Feb 03 2016 Fedora Release Engineering - 2.4.1-14
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild
+
+* Sun Jan 31 2016 Denis Arnaud 2.4.1-13
+- Fixed the FTBFS on Fedora 24+
+
+* Sat Jan 09 2016 Denis Arnaud 2.4.1-12
+- Fix BZ#1295968: start of tomcat@httpfs
+
+* Wed Sep 09 2015 gil cattaneo 2.4.1-11
+- fix FTBFS RHBZ#1239555
+- remove all BuildRequires which have been istalled by default
+
+* Fri Jul 10 2015 Mosaab Alzoubi - 2.4.1-10
+- Fix #1239555
+
+* Wed Jun 17 2015 Fedora Release Engineering - 2.4.1-9
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild
+
+* Tue Apr 21 2015 Peter Robinson 2.4.1-8
+- Fix building on ARMv7
+
+* Wed Mar 11 2015 Swapnil Kulkarni 2.4.1-7
+- Added groovy18 dependency
+
+* Sun Feb 15 2015 Peter Robinson 2.4.1-7
+- Update netty3 patch for 3.9.3
+
+* Mon Oct 27 2014 Robert Rati - 2.4.1-6
+- Changed commons-httpclient BR/R to jakarta-commons-httpclient
+- Changed commons-codec BR to apache-commons-codec
+
+* Fri Oct 10 2014 Dan HorĂ¡k - 2.4.1-5
+- fix OOM during build on s390x and ppc64le (#1149295)
+- fix Java detection on ppc64le
+
+* Wed Oct 8 2014 Robert Rati - 2.4.1-4
+- Exclude asm3 as a runtime dependency
+- Removed explict dependency on yarn from the mapreduce package
+- Added mapreduce dependency on yarn package
+
+* Mon Sep 29 2014 Robert Rati - 2.4.1-3
+- Rebuild
+
+* Sat Aug 16 2014 Fedora Release Engineering - 2.4.1-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_22_Mass_Rebuild
+
+* Tue Jul 15 2014 Robert Rati - 2.4.1-1
+- Update to upstream release 2.4.1
+- Fixed resolution of test jars
+
+* Thu Jun 26 2014 Robert Rati - 2.4.0-3
+- Fixed FTBFS (#1106748)
+- Update to build with guava 17.0
+
+* Sat Jun 07 2014 Fedora Release Engineering - 2.4.0-2
+- Rebuilt for https://fedoraproject.org/wiki/Fedora_21_Mass_Rebuild
+
+* Tue May 27 2014 Robert Rati - 2.4.0-1
+- Update to upstream release 2.4.0
+- Fix fedora conditionals for non-fedora systems (BZ1083135)
+- Conditionalize javadoc generation
+- Update BuildRequires
+
+* Fri Mar 28 2014 Michael Simacek - 2.2.0-7
+- Use Requires: java-headless rebuild (#1067528)
+
+* Mon Feb 17 2014 Timothy St. Clair - 2.2.0-6
+- Rebuild with modification to systemd initialization for tachyon support
+
+* Mon Feb 3 2014 Robert Rati - 2.2.0-5
+- Added json_simple dependency to httpfs package
+- Added default tomcat-users file
+- Fixed up file permissions and ownership for tomcat configuration
+- Conditionalize the zookeeper-test modes to < F21
+- Additional fix for netty3 compat package for >F20
+
+* Fri Jan 24 2014 Robert Rati - 2.2.0-4
+- Fixed 2 packages providing hadoop-yarn-server-tests (BZ1056521)
+- Package httpfs bits using tomcat@ service
+- Patches for jetty 9.1.0 and guava 0.15 on >F20
+- Use netty3 compat package for >F20
+- Moved limits configuration to systemd files
+- By default logrotate will keep 1 year of logs
+
+* Tue Dec 3 2013 Robert Rati - 2.2.0-3
+- Removed jline Requires
+
+* Tue Dec 3 2013 Robert Rati - 2.2.0-2
+- Changed provides filter to just filter the .so
+- Corrected naming of hadoop-common test jar
+- Removed jline BuildRequires
+- Moved pre/port install invocation of ldconfig to common-native
+- Added workaround for bz1023116
+
+* Wed Oct 23 2013 Robert Rati - 2.2.0-1
+- Update to upstream 2.2.0
+- New patch to open libjvm with dlopen
+- Conditionally compile libhdfs and deps for x86 only
+- Added BR on objenesis >= 1.2-16
+- Removed rpath from libhdfs
+- Removed unneeded header files from devel
+- Removed kfs removal patch
+
+* Thu Oct 10 2013 Robert Rati - 2.0.5-12
+- Removed workaround for BZ1015612
+- Filtered libhadoop provides/requires (BZ1017596)
+- Fixed symlink for hdfs-bkjournal
+- Moved libhdfs.so to devel package (BZ1017579)
+- Fixed symlink paths for hadoop jars (BZ1017568)
+- Added ownership of %{_datadir}/%{name}/hadoop/common
+
+* Mon Oct 7 2013 Robert Rati - 2.0.5-11
+- Workaround for BZ1015612
+- Added BuildRequires on gcc-g++ and make
+- Removed duplicated deps from common package
+
+* Thu Oct 3 2013 Robert Rati - 2.0.5-10
+- Added dependency on which
+- Added pom files for test jars
+- Removed workaround for BZ986909
+- Packaged additional test jars and pom files
+- Added workaround for bz1012059
+- Updated hdfs-create-dirs to format the namenode if it is not formatted
+- Spec cleanup
+
+* Fri Sep 13 2013 Robert Rati - 2.0.5-9
+- Removed rcc. It was obsolete and conflicted with qt-devel (BZ1003034)
+- Moved to xmvn-subst for jar dependency symlinks
+- Packaged test jars into test subpackage
+- hdfs subpackage contains bkjounal jar
+- Created client subpackage
+- Moved libhdfs to %{_libdir} (BZ1003036)
+- Added dependency from libhdfs to hdfs (BZ1003039)
+
+* Wed Aug 28 2013 Robert Rati - 2.0.5-8
+- Removed systemPath, version, and scope from tools.jar dependency definition
+
+* Tue Aug 20 2013 Robert Rati - 2.0.5-7
+- Changed hdfs subpackage from hadoop-libhdfs to libhdfs
+- Don't build any packages on arm architectures
+
+* Thu Aug 08 2013 Robert Rati - 2.0.5-6
+- Made libhdfs dependencies arch specific
+- Moved docs into common
+
+* Wed Aug 07 2013 Robert Rati - 2.0.5-5
+- Corrected license info
+- Removed duplicate Requires
+- Removed rpath references
+- Corrected some permissions
+
+* Tue Aug 06 2013 Robert Rati - 2.0.5-4
+- Native bits only built/packaged for intel architectures
+- javadoc only generated on 64-bit intel
+- Updated URL
+
+* Wed Jul 24 2013 Robert Rati - 2.0.5-3
+- Removed gmaven as BR
+
+* Wed Jul 24 2013 Robert Rati - 2.0.5-2
+- Fixed packaging for JNI jar/libraries
+- Made packages noarch that are architecture independent
+- Added cglib as a BuildRequires
+- Removed explicit lib Requires
+- Convert to XMvn macros
+- Packaged the maven plugin
+- Convert to jetty9 jspc compiler
+- Removed xmlenc workaround
+
+* Tue Jul 16 2013 Robert Rati - 2.0.5-1
+- Initial packaging
diff --git a/hdfs-create-dirs b/hdfs-create-dirs
new file mode 100644
index 0000000..0f0d7d8
--- /dev/null
+++ b/hdfs-create-dirs
@@ -0,0 +1,66 @@
+#!/bin/bash
+
+hdfs_dirs="/user /var/log /tmp"
+mapred_dirs="/tmp/hadoop-yarn/staging /tmp/hadoop-yarn/staging/history /tmp/hadoop-yarn/staging/history/done /tmp/hadoop-yarn/staging/history/done_intermediate"
+yarn_dirs="/tmp/hadoop-yarn /var/log/hadoop-yarn"
+
+# Must be run as root
+if [[ $EUID -ne 0 ]]
+then
+ echo "This must be run as root" 1>&2
+ exit 1
+fi
+
+# Start the namenode if it isn't running
+started=0
+systemctl status hadoop-namenode > /dev/null 2>&1
+rc=$?
+if [[ $rc -gt 0 ]]
+then
+ # Format the namenode if it hasn't been formatted
+ runuser hdfs -s /bin/bash /bin/bash -c "hdfs namenode -format -nonInteractive" > /dev/null 2>&1
+ if [[ $? -eq 0 ]]
+ then
+ echo "Formatted the Hadoop namenode"
+ fi
+
+ echo "Starting the Hadoop namenode"
+ systemctl start hadoop-namenode > /dev/null 2>&1
+ rc=$?
+ started=1
+fi
+
+if [[ $rc -ne 0 ]]
+then
+ echo "The Hadoop namenode failed to start"
+ exit 1
+fi
+
+for dir in $hdfs_dirs $yarn_dirs $mapred_dirs
+do
+ echo "Creating directory $dir"
+ runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -mkdir -p $dir" > /dev/null 2>&1
+done
+
+echo "Setting permissions on /tmp"
+runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chmod 1777 /tmp" > /dev/null 2>&1
+
+for dir in $mapred_dirs
+do
+ echo "Setting permissions and ownership for $dir"
+ runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chown mapred:mapred $dir" > /dev/null 2>&1
+ runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chmod 1777 $dir" > /dev/null 2>&1
+done
+
+for dir in $yarn_dirs
+do
+ echo "Setting permissions and ownership for $dir"
+ runuser hdfs -s /bin/bash /bin/bash -c "hadoop fs -chown yarn:mapred $dir" > /dev/null 2>&1
+done
+
+# Stop the namenode if we started it
+if [[ $started -gt 0 ]]
+then
+ echo "Stopping the Hadoop namenode"
+ systemctl stop hadoop-namenode > /dev/null 2>&1
+fi
diff --git a/sources b/sources
new file mode 100644
index 0000000..a7a3c02
--- /dev/null
+++ b/sources
@@ -0,0 +1 @@
+52fb8f4c28bc35067f54a4f28bb7596c hadoop-2.4.1-9e2ef43.tar.gz