Deprecate the package

The client part of the package will be provided in rhel 7.1.
Keep a virtual ceph package to allow clean update for 7.0 as
well as 7.1 users.
This commit is contained in:
Boris Ranto 2015-01-16 15:17:32 +01:00
parent 120ad0de01
commit 50eb9a7e7b
7 changed files with 23 additions and 996 deletions

View File

@ -1,31 +0,0 @@
From 11995b329045341c17553269267cfd3688a51b0f Mon Sep 17 00:00:00 2001
From: Dan Mick <dan.mick@redhat.com>
Date: Wed, 10 Dec 2014 13:19:53 -0800
Subject: [PATCH 2/2] Call Rados.shutdown() explicitly before exit
This is mostly a demonstration of good behavior, as the resources will
be reclaimed on exit anyway.
Signed-off-by: Dan Mick <dan.mick@redhat.com>
(cherry picked from commit b038e8fbf9103cc42a4cde734b3ee601af6019ea)
---
src/ceph.in | 6 +++++-
1 file changed, 5 insertions(+), 1 deletion(-)
diff --git a/src/ceph.in b/src/ceph.in
index 82c9085..c5b97ef 100755
--- a/src/ceph.in
+++ b/src/ceph.in
@@ -841,4 +841,8 @@ def main():
return 0
if __name__ == '__main__':
- sys.exit(main())
+ retval = main()
+ # shutdown explicitly; Rados() does not
+ if cluster_handle:
+ cluster_handle.shutdown()
+ sys.exit(retval)
--
1.9.3

View File

@ -1,27 +0,0 @@
From 922247e25bfb64bdbe43dd8133881aaf405b8a0b Mon Sep 17 00:00:00 2001
From: Boris Ranto <branto@redhat.com>
Date: Mon, 8 Dec 2014 08:36:37 +0100
Subject: [PATCH] Backport pull request #2937 to firefly
---
src/common/RWLock.h | 4 +++-
1 file changed, 3 insertions(+), 1 deletion(-)
diff --git a/src/common/RWLock.h b/src/common/RWLock.h
index f901ac0..4699b66 100644
--- a/src/common/RWLock.h
+++ b/src/common/RWLock.h
@@ -36,7 +36,9 @@ public:
}
virtual ~RWLock() {
- pthread_rwlock_unlock(&L);
+ // The following check is racy but we are about to destroy
+ // the object and we assume that there are no other users.
+ //assert(!is_locked()); -- hacky backport, no is_locked in firefly
pthread_rwlock_destroy(&L);
}
--
2.1.3

View File

@ -1,13 +0,0 @@
--- ceph-0.80.5/src/perfglue/heap_profiler.cc.orig 2014-08-15 16:05:00.161794290 +0200
+++ ceph-0.80.5/src/perfglue/heap_profiler.cc 2014-08-15 16:05:04.691794305 +0200
@@ -12,8 +12,8 @@
*
*/
-#include <google/heap-profiler.h>
-#include <google/malloc_extension.h>
+#include <gperftools/heap-profiler.h>
+#include <gperftools/malloc_extension.h>
#include "heap_profiler.h"
#include "common/environment.h"
#include "common/LogClient.h"

View File

@ -1,11 +0,0 @@
--- ceph-0.80.5/src/test/Makefile.am.orig 2014-08-15 16:30:18.831799418 +0200
+++ ceph-0.80.5/src/test/Makefile.am 2014-08-15 16:23:17.758464663 +0200
@@ -642,7 +642,7 @@ bin_DEBUGPROGRAMS += ceph_test_librbd
if LINUX
ceph_test_librbd_fsx_SOURCES = test/librbd/fsx.c
ceph_test_librbd_fsx_LDADD = $(LIBRBD) $(LIBRADOS) -lm
-ceph_test_librbd_fsx_CFLAGS = ${AM_CFLAGS} -Wno-format
+ceph_test_librbd_fsx_CFLAGS = ${AM_CFLAGS}
bin_DEBUGPROGRAMS += ceph_test_librbd_fsx
endif

View File

@ -1,61 +0,0 @@
From e00270b51896f168d5013b7dc92ec7f8b9e19da3 Mon Sep 17 00:00:00 2001
From: Dan Mick <dan.mick@redhat.com>
Date: Wed, 10 Dec 2014 13:19:16 -0800
Subject: [PATCH 1/2] rados.py: remove Rados.__del__(); it just causes problems
Recent versions of Python contain a change to thread shutdown that
causes ceph to hang on exit; see http://bugs.python.org/issue21963.
As it turns out, this is relatively easy to avoid by not spawning
threads on exit, as Rados.__del__() will certainly do by calling
shutdown(); I suspect, but haven't proven, that the problem is
that shutdown() tries to start() a threading.Thread() that never
makes it all the way back to signal start().
Also add a PendingReleaseNote and extra doc comments to clarify.
Fixes: #8797
Signed-off-by: Dan Mick <dan.mick@redhat.com>
(cherry picked from commit 5ba9b8f21f8010c59dd84a0ef2acfec99e4b048f)
Conflicts:
PendingReleaseNotes
---
src/pybind/rados.py | 8 +++-----
1 file changed, 3 insertions(+), 5 deletions(-)
diff --git a/src/pybind/rados.py b/src/pybind/rados.py
index 0fbd10e..ec68919 100644
--- a/src/pybind/rados.py
+++ b/src/pybind/rados.py
@@ -246,7 +246,8 @@ Rados object in state %s." % (self.state))
def shutdown(self):
"""
- Disconnects from the cluster.
+ Disconnects from the cluster. Call this explicitly when a
+ Rados.connect()ed object is no longer used.
"""
if (self.__dict__.has_key("state") and self.state != "shutdown"):
run_in_thread(self.librados.rados_shutdown, (self.cluster,))
@@ -260,9 +261,6 @@ Rados object in state %s." % (self.state))
self.shutdown()
return False
- def __del__(self):
- self.shutdown()
-
def version(self):
"""
Get the version number of the ``librados`` C library.
@@ -410,7 +408,7 @@ Rados object in state %s." % (self.state))
def connect(self, timeout=0):
"""
- Connect to the cluster.
+ Connect to the cluster. Use shutdown() to release resources.
"""
self.require_state("configuring")
ret = run_in_thread(self.librados.rados_connect, (self.cluster,),
--
1.9.3

875
ceph.spec
View File

@ -1,888 +1,59 @@
%bcond_with ocf
%if ! (0%{?fedora} > 12 || 0%{?rhel} > 5)
%{!?python_sitelib: %global python_sitelib %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")}
%{!?python_sitearch: %global python_sitearch %(%{__python} -c "from distutils.sysconfig import get_python_lib; print(get_python_lib(1))")}
%endif
#################################################################################
# common
#################################################################################
Name: ceph
Version: 0.80.7
Release: 3%{?dist}
Version: 0.80.5
Release: 10%{?dist}
Epoch: 1
Summary: User space components of the Ceph file system
Summary: Virtual package that allows clean ceph update path for all epel users
License: GPLv2
Group: System Environment/Base
URL: http://ceph.com/
Source0: http://ceph.com/download/%{name}-%{version}.tar.bz2
Patch0: ceph-google-gperftools.patch
Patch1: ceph-no-format-security.patch
Patch2: ceph-common-do-not-unlock-rwlock-on-destruction.patch
Patch3: ceph-remove-rados-py-destructor.patch
Patch4: ceph-call-rados-shutdown-explicitly.patch
Requires: librbd1 = %{epoch}:%{version}-%{release}
Requires: librados2 = %{epoch}:%{version}-%{release}
Requires: libcephfs1 = %{epoch}:%{version}-%{release}
Requires: ceph-common = %{epoch}:%{version}-%{release}
Requires: python-rados = %{epoch}:%{version}-%{release}
Requires: python-rbd = %{epoch}:%{version}-%{release}
Requires: python-cephfs = %{epoch}:%{version}-%{release}
Requires: python
Requires: python-argparse
Requires: python-requests
# For ceph-rest-api
Requires: python-flask
%if ! ( 0%{?rhel} && 0%{?rhel} <= 6 )
Requires: xfsprogs
%endif
Requires: cryptsetup
Requires: parted
Requires: util-linux
%ifnarch s390 s390x
Requires: hdparm
%endif
# For initscript
Requires: redhat-lsb-core
Requires(post): binutils
BuildRequires: make
BuildRequires: gcc-c++
BuildRequires: libtool
BuildRequires: boost-devel
BuildRequires: bzip2-devel
BuildRequires: libedit-devel
BuildRequires: perl
BuildRequires: gdbm
BuildRequires: pkgconfig
BuildRequires: python
BuildRequires: python-nose
BuildRequires: python-argparse
BuildRequires: libaio-devel
BuildRequires: libcurl-devel
BuildRequires: libxml2-devel
BuildRequires: libuuid-devel
BuildRequires: libblkid-devel >= 2.17
BuildRequires: libudev-devel
BuildRequires: leveldb-devel > 1.2
%if ! ( 0%{?rhel} && 0%{?rhel} <= 6 )
BuildRequires: xfsprogs-devel
%endif
# No yasm dependency for now, it causes selinux issues
#BuildRequires: yasm
%if 0%{?rhel} || 0%{?centos} || 0%{?fedora}
BuildRequires: snappy-devel
%endif
#################################################################################
# specific
#################################################################################
%if ! 0%{?rhel}
BuildRequires: sharutils
%endif
%if 0%{defined suse_version}
%if 0%{?suse_version} > 1210
Requires: gptfdisk
BuildRequires: gperftools-devel
%else
Requires: scsirastools
BuildRequires: google-perftools-devel
%endif
Recommends: logrotate
BuildRequires: %insserv_prereq
BuildRequires: mozilla-nss-devel
BuildRequires: keyutils-devel
BuildRequires: libatomic-ops-devel
BuildRequires: fdupes
%else
Requires: gdisk
BuildRequires: nss-devel
BuildRequires: keyutils-libs-devel
BuildRequires: libatomic_ops-devel
Requires: gdisk
Requires(post): chkconfig
Requires(preun):chkconfig
Requires(preun):initscripts
%ifnarch ppc ppc64 s390 s390x
BuildRequires: gperftools-devel
%endif
%endif
Obsoletes: ceph-common < 1:0.80.5-10
Obsoletes: ceph-devel < 1:0.80.5-10
Obsoletes: ceph-fuse < 1:0.80.5-10
Obsoletes: ceph-libs-compat < 1:0.80.5-10
Obsoletes: ceph-radosgw < 1:0.80.5-10
Obsoletes: ceph-test < 1:0.80.5-10
Obsoletes: cephfs-java < 1:0.80.5-10
Obsoletes: libcephfs1 < 1:0.80.5-10
Obsoletes: libcephfs1_jni1 < 1:0.80.5-10
Obsoletes: librados2 < 1:0.80.5-10
Obsoletes: librbd1 < 1:0.80.5-10
Obsoletes: python-ceph < 1:0.80.5-10
Obsoletes: rbd-fuse < 1:0.80.5-10
Obsoletes: rest-bench < 1:0.80.5-10
Obsoletes: ceph-debuginfo < 1:0.80.5-10
%description
Ceph is a massively scalable, open-source, distributed
storage system that runs on commodity hardware and delivers object,
block and file system storage.
This is an empty virtual package. The sole purpose of this package
is to allow clean update of Ceph distributed object store.
#################################################################################
# packages
#################################################################################
%package -n ceph-common
Summary: Ceph Common
Group: System Environment/Base
Requires: librbd1 = %{epoch}:%{version}-%{release}
Requires: librados2 = %{epoch}:%{version}-%{release}
Requires: python-rados = %{epoch}:%{version}-%{release}
Requires: python-rbd = %{epoch}:%{version}-%{release}
Requires: python-cephfs = %{epoch}:%{version}-%{release}
Requires: python-requests
Requires: redhat-lsb-core
%description -n ceph-common
common utilities to mount and interact with a ceph storage cluster
%package fuse
Summary: Ceph fuse-based client
Group: System Environment/Base
Requires: %{name} = %{epoch}:%{version}-%{release}
BuildRequires: fuse-devel
%description fuse
FUSE based client for Ceph distributed network file system
%package -n rbd-fuse
Summary: Ceph fuse-based client
Group: System Environment/Base
Requires: %{name} = %{epoch}:%{version}-%{release}
Requires: librados2 = %{epoch}:%{version}-%{release}
Requires: librbd1 = %{epoch}:%{version}-%{release}
BuildRequires: fuse-devel
%description -n rbd-fuse
FUSE based client to map Ceph rbd images to files
%package radosgw
Summary: Rados REST gateway
Group: Development/Libraries
Requires: ceph-common = %{epoch}:%{version}-%{release}
Requires: librados2 = %{epoch}:%{version}-%{release}
%if 0%{defined suse_version}
BuildRequires: libexpat-devel
BuildRequires: FastCGI-devel
Requires: apache2-mod_fcgid
%else
BuildRequires: expat-devel
BuildRequires: fcgi-devel
%endif
%description radosgw
radosgw is an S3 HTTP REST gateway for the RADOS object store. It is
implemented as a FastCGI module using libfcgi, and can be used in
conjunction with any FastCGI capable web server.
%if %{with ocf}
%package resource-agents
Summary: OCF-compliant resource agents for Ceph daemons
Group: System Environment/Base
License: LGPL-2.0
Requires: %{name} = %{epoch}:%{version}-%{release}
Requires: resource-agents
%description resource-agents
Resource agents for monitoring and managing Ceph daemons
under Open Cluster Framework (OCF) compliant resource
managers such as Pacemaker.
%endif
%package -n librados2
Summary: RADOS distributed object store client library
Group: System Environment/Libraries
License: LGPL-2.0
%if 0%{?rhel} || 0%{?centos} || 0%{?fedora}
Obsoletes: ceph-libs < 1:0.80.5
%endif
%description -n librados2
RADOS is a reliable, autonomic distributed object storage cluster
developed as part of the Ceph distributed storage system. This is a
shared library allowing applications to access the distributed object
store using a simple file-like interface.
%package -n librados2-devel
Summary: RADOS headers
Group: Development/Libraries
License: LGPL-2.0
Requires: librados2 = %{epoch}:%{version}-%{release}
Obsoletes: ceph-devel
%description -n librados2-devel
This package contains libraries and headers needed to develop programs
that use RADOS object store.
%package -n python-rados
Summary: Python libraries for the RADOS object store
Group: System Environment/Libraries
License: LGPL-2.0
Requires: librados2 = %{epoch}:%{version}-%{release}
Obsoletes: python-ceph
%description -n python-rados
This package contains Python libraries for interacting with Cephs RADOS
object store.
%package -n librbd1
Summary: RADOS block device client library
Group: System Environment/Libraries
License: LGPL-2.0
Requires: librados2 = %{epoch}:%{version}-%{release}
%if 0%{?rhel} || 0%{?centos} || 0%{?fedora}
Obsoletes: ceph-libs < 1:0.80.5
%endif
%description -n librbd1
RBD is a block device striped across multiple distributed objects in
RADOS, a reliable, autonomic distributed object storage cluster
developed as part of the Ceph distributed storage system. This is a
shared library allowing applications to manage these block devices.
%package -n librbd1-devel
Summary: RADOS block device headers
Group: Development/Libraries
License: LGPL-2.0
Requires: librbd1 = %{epoch}:%{version}-%{release}
Requires: librados2-devel = %{epoch}:%{version}-%{release}
Obsoletes: ceph-devel
%description -n librbd1-devel
This package contains libraries and headers needed to develop programs
that use RADOS block device.
%package -n python-rbd
Summary: Python libraries for the RADOS block device
Group: System Environment/Libraries
License: LGPL-2.0
Requires: librbd1 = %{epoch}:%{version}-%{release}
Requires: python-rados = %{epoch}:%{version}-%{release}
Obsoletes: python-ceph
%description -n python-rbd
This package contains Python libraries for interacting with Cephs RADOS
block device.
%package -n libcephfs1
Summary: Ceph distributed file system client library
Group: System Environment/Libraries
License: LGPL-2.0
%if 0%{?rhel} || 0%{?centos} || 0%{?fedora}
Obsoletes: ceph-libs < 1:0.80.5
Obsoletes: ceph-libcephfs < 1:0.80.5
%endif
%description -n libcephfs1
Ceph is a distributed network file system designed to provide excellent
performance, reliability, and scalability. This is a shared library
allowing applications to access a Ceph distributed file system via a
POSIX-like interface.
%package -n libcephfs1-devel
Summary: Ceph distributed file system headers
Group: Development/Libraries
License: LGPL-2.0
Requires: libcephfs1 = %{epoch}:%{version}-%{release}
Requires: librados2-devel = %{epoch}:%{version}-%{release}
Obsoletes: ceph-devel
%description -n libcephfs1-devel
This package contains libraries and headers needed to develop programs
that use Cephs distributed file system.
%package -n python-cephfs
Summary: Python libraries for Ceph distributed file system
Group: System Environment/Libraries
License: LGPL-2.0
Requires: libcephfs1 = %{epoch}:%{version}-%{release}
Requires: python-rados = %{epoch}:%{version}-%{release}
Obsoletes: python-ceph
%description -n python-cephfs
This package contains Python libraries for interacting with Cephs distributed
file system.
%package -n rest-bench
Summary: RESTful benchmark
Group: System Environment/Libraries
License: LGPL-2.0
Requires: ceph-common = %{epoch}:%{version}-%{release}
%description -n rest-bench
RESTful bencher that can be used to benchmark radosgw performance.
%package -n ceph-test
Summary: Ceph benchmarks and test tools
Group: System Environment/Libraries
License: LGPL-2.0
Requires: librados2 = %{epoch}:%{version}-%{release}
Requires: librbd1 = %{epoch}:%{version}-%{release}
Requires: libcephfs1 = %{epoch}:%{version}-%{release}
%description -n ceph-test
This package contains Ceph benchmarks and test tools.
%package -n libcephfs_jni1
Summary: Java Native Interface library for CephFS Java bindings.
Group: System Environment/Libraries
License: LGPL-2.0
Requires: java
Requires: libcephfs1 = %{epoch}:%{version}-%{release}
BuildRequires: java-devel
%description -n libcephfs_jni1
This package contains the Java Native Interface library for CephFS Java
bindings.
%package -n libcephfs_jni1-devel
Summary: Development files for CephFS Java Native Interface library.
Group: System Environment/Libraries
License: LGPL-2.0
Requires: java
Requires: libcephfs_jni1 = %{epoch}:%{version}-%{release}
%description -n libcephfs_jni1-devel
This package contains the development files for CephFS Java Native Interface
library.
%package -n cephfs-java
Summary: Java libraries for the Ceph File System.
Group: System Environment/Libraries
License: LGPL-2.0
Requires: java
Requires: libcephfs_jni1 = %{epoch}:%{version}-%{release}
BuildRequires: java-devel
%description -n cephfs-java
This package contains the Java libraries for the Ceph File System.
%package libs-compat
Summary: Meta package to include ceph libraries.
Group: System Environment/Libraries
License: LGPL-2.0
Obsoletes: ceph-libs
Requires: librados2 = %{epoch}:%{version}-%{release}
Requires: librbd1 = %{epoch}:%{version}-%{release}
Requires: libcephfs1 = %{epoch}:%{version}-%{release}
Provides: ceph-libs
%description libs-compat
This is a meta package, that pulls in librados2, librbd1 and libcephfs1. It
is included for backwards compatibility with distributions that depend on the
former ceph-libs package, which is now split up into these three subpackages.
Packages still depending on ceph-libs should be fixed to depend on librados2,
librbd1 or libcephfs1 instead.
%package devel-compat
Summary: Compatibility package for Ceph headers
Group: Development/Libraries
License: LGPL-2.0
Obsoletes: ceph-devel
Requires: %{name} = %{epoch}:%{version}-%{release}
Requires: librados2-devel = %{epoch}:%{version}-%{release}
Requires: librbd1-devel = %{epoch}:%{version}-%{release}
Requires: libcephfs1-devel = %{epoch}:%{version}-%{release}
Requires: libcephfs_jni1-devel = %{epoch}:%{version}-%{release}
Provides: ceph-devel
%description devel-compat
This is a compatibility package to accommodate ceph-devel split into
librados2-devel, librbd1-devel and libcephfs1-devel. Packages still depending
on ceph-devel should be fixed to depend on librados2-devel, librbd1-devel
or libcephfs1-devel instead.
%package -n python-ceph-compat
Summary: Compatibility package for Cephs python libraries
Group: System Environment/Libraries
License: LGPL-2.0
Obsoletes: python-ceph
Requires: python-rados = %{epoch}:%{version}-%{release}
Requires: python-rbd = %{epoch}:%{version}-%{release}
Requires: python-cephfs = %{epoch}:%{version}-%{release}
Provides: python-ceph
%description -n python-ceph-compat
This is a compatibility package to accommodate python-ceph split into
python-rados, python-rbd and python-cephfs. Packages still depending on
python-ceph should be fixed to depend on python-rados, python-rbd or
python-cephfs instead.
%if 0%{?opensuse} || 0%{?suse_version}
%debug_package
%endif
#################################################################################
# common
#################################################################################
%prep
%setup -q
%patch0 -p1
%patch1 -p1
%patch2 -p1
%patch3 -p1
%patch4 -p1
%build
# Find jni.h
for i in /usr/{lib64,lib}/jvm/java/include{,/linux}; do
[ -d $i ] && java_inc="$java_inc -I$i"
done
./autogen.sh
%if ( 0%{?rhel} && 0%{?rhel} <= 6)
MY_CONF_OPT="--without-libxfs"
%else
MY_CONF_OPT=""
%endif
MY_CONF_OPT="$MY_CONF_OPT --with-radosgw"
# No gperftools on these architectures
%ifarch ppc ppc64 s390 s390x
MY_CONF_OPT="$MY_CONF_OPT --without-tcmalloc"
%endif
export RPM_OPT_FLAGS=`echo $RPM_OPT_FLAGS | sed -e 's/i386/i486/'`
%ifarch armv5tel
# libatomic_ops does not have correct asm for ARMv5tel
EXTRA_CFLAGS="-DAO_USE_PTHREAD_DEFS"
%endif
%ifarch %{arm}
# libatomic_ops seems to fallback on some pthread implementation on ARM
EXTRA_LDFLAGS="-lpthread"
%endif
%{configure} CPPFLAGS="$java_inc" \
--prefix=/usr \
--localstatedir=/var \
--sysconfdir=/etc \
--docdir=%{_docdir}/ceph \
--with-nss \
--without-cryptopp \
--with-rest-bench \
--with-debug \
--enable-cephfs-java \
$MY_CONF_OPT \
%{?_with_ocf} \
CFLAGS="$RPM_OPT_FLAGS $EXTRA_CFLAGS" \
CXXFLAGS="$RPM_OPT_FLAGS $EXTRA_CFLAGS" \
LDFLAGS="$EXTRA_LDFLAGS"
# fix bug in specific version of libedit-devel
%if 0%{defined suse_version}
sed -i -e "s/-lcurses/-lncurses/g" Makefile
sed -i -e "s/-lcurses/-lncurses/g" src/Makefile
sed -i -e "s/-lcurses/-lncurses/g" man/Makefile
sed -i -e "s/-lcurses/-lncurses/g" src/ocf/Makefile
sed -i -e "s/-lcurses/-lncurses/g" src/java/Makefile
%endif
make %{_smp_mflags}
%install
make DESTDIR=$RPM_BUILD_ROOT install
find $RPM_BUILD_ROOT -type f -name "*.la" -exec rm -f {} ';'
find $RPM_BUILD_ROOT -type f -name "*.a" -exec rm -f {} ';'
install -D src/init-ceph $RPM_BUILD_ROOT%{_initrddir}/ceph
install -D src/init-radosgw.sysv $RPM_BUILD_ROOT%{_initrddir}/ceph-radosgw
install -D src/init-rbdmap $RPM_BUILD_ROOT%{_initrddir}/rbdmap
install -D src/rbdmap $RPM_BUILD_ROOT%{_sysconfdir}/ceph/rbdmap
mkdir -p $RPM_BUILD_ROOT%{_sbindir}
ln -sf ../../etc/init.d/ceph %{buildroot}/%{_sbindir}/rcceph
ln -sf ../../etc/init.d/ceph-radosgw %{buildroot}/%{_sbindir}/rcceph-radosgw
install -m 0644 -D src/logrotate.conf $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/ceph
install -m 0644 -D src/rgw/logrotate.conf $RPM_BUILD_ROOT%{_sysconfdir}/logrotate.d/radosgw
chmod 0644 $RPM_BUILD_ROOT%{_docdir}/ceph/sample.ceph.conf
chmod 0644 $RPM_BUILD_ROOT%{_docdir}/ceph/sample.fetch_config
# udev rules
%if 0%{?rhel} >= 7 || 0%{?fedora}
install -m 0644 -D udev/50-rbd.rules $RPM_BUILD_ROOT/usr/lib/udev/rules.d/50-rbd.rules
install -m 0644 -D udev/60-ceph-partuuid-workaround.rules $RPM_BUILD_ROOT/usr/lib/udev/rules.d/60-ceph-partuuid-workaround.rules
%else
install -m 0644 -D udev/50-rbd.rules $RPM_BUILD_ROOT/lib/udev/rules.d/50-rbd.rules
install -m 0644 -D udev/60-ceph-partuuid-workaround.rules $RPM_BUILD_ROOT/lib/udev/rules.d/60-ceph-partuuid-workaround.rules
%endif
%if (0%{?rhel} && 0%{?rhel} < 7)
install -m 0644 -D udev/95-ceph-osd-alt.rules $RPM_BUILD_ROOT/lib/udev/rules.d/95-ceph-osd.rules
%else
install -m 0644 -D udev/95-ceph-osd.rules $RPM_BUILD_ROOT/lib/udev/rules.d/95-ceph-osd.rules
%endif
%if 0%{?rhel} >= 7 || 0%{?fedora}
mv $RPM_BUILD_ROOT/lib/udev/rules.d/95-ceph-osd.rules $RPM_BUILD_ROOT/usr/lib/udev/rules.d/95-ceph-osd.rules
mv $RPM_BUILD_ROOT/sbin/mkcephfs $RPM_BUILD_ROOT/usr/sbin/mkcephfs
mv $RPM_BUILD_ROOT/sbin/mount.ceph $RPM_BUILD_ROOT/usr/sbin/mount.ceph
mv $RPM_BUILD_ROOT/sbin/mount.fuse.ceph $RPM_BUILD_ROOT/usr/sbin/mount.fuse.ceph
%endif
#set up placeholder directories
mkdir -p $RPM_BUILD_ROOT%{_sysconfdir}/ceph
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/run/ceph
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/log/ceph
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/lib/ceph/tmp
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/lib/ceph/mon
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/lib/ceph/osd
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/lib/ceph/mds
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/lib/ceph/bootstrap-osd
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/lib/ceph/bootstrap-mds
mkdir -p $RPM_BUILD_ROOT%{_localstatedir}/log/radosgw
%if %{defined suse_version}
# Fedora seems to have some problems with this macro, use it only on SUSE
%fdupes -s $RPM_BUILD_ROOT/%{python_sitelib}
%fdupes %buildroot
%endif
%clean
rm -rf $RPM_BUILD_ROOT
%post
/sbin/ldconfig
/sbin/chkconfig --add ceph
mkdir -p %{_localstatedir}/run/ceph/
%preun
%if %{defined suse_version}
%stop_on_removal ceph
%endif
if [ $1 = 0 ] ; then
/sbin/service ceph stop >/dev/null 2>&1
/sbin/chkconfig --del ceph
fi
%postun
/sbin/ldconfig
%if %{defined suse_version}
%insserv_cleanup
%endif
#################################################################################
# files
#################################################################################
%files
%defattr(-,root,root,-)
%docdir %{_docdir}
%dir %{_docdir}/ceph
%{_docdir}/ceph/sample.ceph.conf
%{_docdir}/ceph/sample.fetch_config
%{_bindir}/cephfs
%{_bindir}/ceph-clsinfo
%{_bindir}/ceph-rest-api
%{python_sitelib}/ceph_rest_api.py*
%{_bindir}/crushtool
%{_bindir}/monmaptool
%{_bindir}/osdmaptool
%{_bindir}/ceph-run
%{_bindir}/ceph-mon
%{_bindir}/ceph-mds
%{_bindir}/ceph-osd
%{_bindir}/ceph-rbdnamer
%{_bindir}/librados-config
%{_bindir}/ceph-client-debug
%{_bindir}/ceph-debugpack
%{_bindir}/ceph-coverage
%{_bindir}/ceph_mon_store_converter
%{_initrddir}/ceph
%{_sbindir}/ceph-disk
%{_sbindir}/ceph-disk-activate
%{_sbindir}/ceph-disk-prepare
%{_sbindir}/ceph-disk-udev
%{_sbindir}/ceph-create-keys
%{_sbindir}/rcceph
%if 0%{?rhel} >= 7 || 0%{?fedora}
%{_sbindir}/mkcephfs
%{_sbindir}/mount.ceph
%else
/sbin/mkcephfs
/sbin/mount.ceph
%endif
%dir %{_libdir}/ceph
%{_libdir}/ceph/ceph_common.sh
%dir %{_libdir}/rados-classes
%{_libdir}/rados-classes/libcls_rbd.so*
%{_libdir}/rados-classes/libcls_hello.so*
%{_libdir}/rados-classes/libcls_rgw.so*
%{_libdir}/rados-classes/libcls_lock.so*
%{_libdir}/rados-classes/libcls_kvs.so*
%{_libdir}/rados-classes/libcls_refcount.so*
%{_libdir}/rados-classes/libcls_log.so*
%{_libdir}/rados-classes/libcls_replica_log.so*
%{_libdir}/rados-classes/libcls_statelog.so*
%{_libdir}/rados-classes/libcls_user.so*
%{_libdir}/rados-classes/libcls_version.so*
%dir %{_libdir}/ceph/erasure-code
%{_libdir}/ceph/erasure-code/libec_example.so*
%{_libdir}/ceph/erasure-code/libec_fail_to_initialize.so*
%{_libdir}/ceph/erasure-code/libec_fail_to_register.so*
%{_libdir}/ceph/erasure-code/libec_hangs.so*
%{_libdir}/ceph/erasure-code/libec_jerasure*.so*
%{_libdir}/ceph/erasure-code/libec_test_jerasure*.so*
%{_libdir}/ceph/erasure-code/libec_missing_entry_point.so*
%if 0%{?rhel} >= 7 || 0%{?fedora}
/usr/lib/udev/rules.d/60-ceph-partuuid-workaround.rules
/usr/lib/udev/rules.d/95-ceph-osd.rules
%else
/lib/udev/rules.d/60-ceph-partuuid-workaround.rules
/lib/udev/rules.d/95-ceph-osd.rules
%endif
%config %{_sysconfdir}/bash_completion.d/ceph
%config(noreplace) %{_sysconfdir}/logrotate.d/ceph
%config(noreplace) %{_sysconfdir}/logrotate.d/radosgw
%{_mandir}/man8/ceph-mon.8*
%{_mandir}/man8/ceph-mds.8*
%{_mandir}/man8/ceph-osd.8*
%{_mandir}/man8/mkcephfs.8*
%{_mandir}/man8/ceph-run.8*
%{_mandir}/man8/ceph-rest-api.8*
%{_mandir}/man8/crushtool.8*
%{_mandir}/man8/osdmaptool.8*
%{_mandir}/man8/monmaptool.8*
%{_mandir}/man8/cephfs.8*
%{_mandir}/man8/mount.ceph.8*
%{_mandir}/man8/ceph-rbdnamer.8*
%{_mandir}/man8/ceph-debugpack.8*
%{_mandir}/man8/ceph-clsinfo.8.gz
%{_mandir}/man8/librados-config.8.gz
#set up placeholder directories
%dir %{_localstatedir}/lib/ceph/
%dir %{_localstatedir}/lib/ceph/tmp
%dir %{_localstatedir}/lib/ceph/mon
%dir %{_localstatedir}/lib/ceph/osd
%dir %{_localstatedir}/lib/ceph/mds
%dir %{_localstatedir}/lib/ceph/bootstrap-osd
%dir %{_localstatedir}/lib/ceph/bootstrap-mds
%ghost %dir %{_localstatedir}/run/ceph/
#################################################################################
%files -n ceph-common
%defattr(-,root,root,-)
%{_bindir}/ceph
%{_bindir}/ceph-authtool
%{_bindir}/ceph-conf
%{_bindir}/ceph-dencoder
%{_bindir}/ceph-syn
%{_bindir}/ceph-crush-location
%{_bindir}/rados
%{_bindir}/rbd
%{_bindir}/ceph-post-file
%{_bindir}/ceph-brag
%{_mandir}/man8/ceph-authtool.8*
%{_mandir}/man8/ceph-conf.8*
%{_mandir}/man8/ceph-dencoder.8*
%{_mandir}/man8/ceph-syn.8*
%{_mandir}/man8/ceph-post-file.8*
%{_mandir}/man8/ceph.8*
%{_mandir}/man8/rados.8*
%{_mandir}/man8/rbd.8*
%{_datadir}/ceph/known_hosts_drop.ceph.com
%{_datadir}/ceph/id_dsa_drop.ceph.com
%{_datadir}/ceph/id_dsa_drop.ceph.com.pub
%dir %{_sysconfdir}/ceph/
%dir %{_localstatedir}/log/ceph/
%config %{_sysconfdir}/bash_completion.d/rados
%config %{_sysconfdir}/bash_completion.d/rbd
%config(noreplace) %{_sysconfdir}/ceph/rbdmap
%{_initrddir}/rbdmap
%{python_sitelib}/ceph_argparse.py*
%postun -n ceph-common
# Package removal cleanup
if [ "$1" -eq "0" ] ; then
rm -rf /var/log/ceph
rm -rf /etc/ceph
fi
#################################################################################
%files fuse
%defattr(-,root,root,-)
%{_bindir}/ceph-fuse
%{_mandir}/man8/ceph-fuse.8*
%if 0%{?rhel} >= 7 || 0%{?fedora}
%{_sbindir}/mount.fuse.ceph
%else
/sbin/mount.fuse.ceph
%endif
#################################################################################
%files -n rbd-fuse
%defattr(-,root,root,-)
%{_bindir}/rbd-fuse
%{_mandir}/man8/rbd-fuse.8*
#################################################################################
%files radosgw
%defattr(-,root,root,-)
%{_initrddir}/ceph-radosgw
%{_bindir}/radosgw
%{_bindir}/radosgw-admin
%{_mandir}/man8/radosgw.8*
%{_mandir}/man8/radosgw-admin.8*
%{_sbindir}/rcceph-radosgw
%config %{_sysconfdir}/bash_completion.d/radosgw-admin
%dir %{_localstatedir}/log/radosgw/
%post radosgw
/sbin/ldconfig
%if %{defined suse_version}
%fillup_and_insserv -f -y ceph-radosgw
%endif
%preun radosgw
%if %{defined suse_version}
%stop_on_removal ceph-radosgw
%endif
%postun radosgw
/sbin/ldconfig
%if %{defined suse_version}
%restart_on_update ceph-radosgw
%insserv_cleanup
%endif
# Package removal cleanup
if [ "$1" -eq "0" ] ; then
rm -rf /var/log/radosgw
fi
#################################################################################
%if %{with ocf}
%files resource-agents
%defattr(0755,root,root,-)
%dir /usr/lib/ocf
%dir /usr/lib/ocf/resource.d
%dir /usr/lib/ocf/resource.d/ceph
/usr/lib/ocf/resource.d/%{name}/*
%endif
#################################################################################
%files -n librados2
%defattr(-,root,root,-)
%{_libdir}/librados.so.*
%post -n librados2
/sbin/ldconfig
%postun -n librados2
/sbin/ldconfig
#################################################################################
%files -n librados2-devel
%defattr(-,root,root,-)
%dir %{_includedir}/rados
%{_includedir}/rados/librados.h
%{_includedir}/rados/librados.hpp
%{_includedir}/rados/buffer.h
%{_includedir}/rados/page.h
%{_includedir}/rados/crc32c.h
%{_includedir}/rados/rados_types.h
%{_includedir}/rados/rados_types.hpp
%{_includedir}/rados/memory.h
%{_libdir}/librados.so
#################################################################################
%files -n python-rados
%defattr(-,root,root,-)
%{python_sitelib}/rados.py*
#################################################################################
%files -n librbd1
%defattr(-,root,root,-)
%{_libdir}/librbd.so.*
%if 0%{?rhel} >= 7 || 0%{?fedora}
/usr/lib/udev/rules.d/50-rbd.rules
%else
/lib/udev/rules.d/50-rbd.rules
%endif
%post -n librbd1
/sbin/ldconfig
# First, cleanup
rm -f /usr/lib64/qemu/librbd.so.1
rmdir /usr/lib64/qemu 2>/dev/null || true
rmdir /usr/lib64/ 2>/dev/null || true
# If x86_64 and rhel6+, link the library to /usr/lib64/qemu -- rhel hack
%ifarch x86_64
%if 0%{?rhel} >= 6
mkdir -p /usr/lib64/qemu/
ln -sf %{_libdir}/librbd.so.1 /usr/lib64/qemu/librbd.so.1
%endif
%endif
%postun -n librbd1
/sbin/ldconfig
#################################################################################
%files -n librbd1-devel
%defattr(-,root,root,-)
%dir %{_includedir}/rbd
%{_includedir}/rbd/librbd.h
%{_includedir}/rbd/librbd.hpp
%{_includedir}/rbd/features.h
%{_libdir}/librbd.so
#################################################################################
%files -n python-rbd
%defattr(-,root,root,-)
%{python_sitelib}/rbd.py*
#################################################################################
%files -n libcephfs1
%defattr(-,root,root,-)
%{_libdir}/libcephfs.so.*
%post -n libcephfs1
/sbin/ldconfig
%postun -n libcephfs1
/sbin/ldconfig
#################################################################################
%files -n libcephfs1-devel
%defattr(-,root,root,-)
%dir %{_includedir}/cephfs
%{_includedir}/cephfs/libcephfs.h
%{_libdir}/libcephfs.so
#################################################################################
%files -n python-cephfs
%defattr(-,root,root,-)
%{python_sitelib}/cephfs.py*
#################################################################################
%files -n rest-bench
%defattr(-,root,root,-)
%{_bindir}/rest-bench
#################################################################################
%files -n ceph-test
%defattr(-,root,root,-)
%{_bindir}/ceph_bench_log
%{_bindir}/ceph_dupstore
%{_bindir}/ceph_kvstorebench
%{_bindir}/ceph_multi_stress_watch
%{_bindir}/ceph_erasure_code
%{_bindir}/ceph_erasure_code_benchmark
%{_bindir}/ceph_omapbench
%{_bindir}/ceph_psim
%{_bindir}/ceph_radosacl
%{_bindir}/ceph_rgw_jsonparser
%{_bindir}/ceph_rgw_multiparser
%{_bindir}/ceph_scratchtool
%{_bindir}/ceph_scratchtoolpp
%{_bindir}/ceph_smalliobench
%{_bindir}/ceph_smalliobenchdumb
%{_bindir}/ceph_smalliobenchfs
%{_bindir}/ceph_smalliobenchrbd
%{_bindir}/ceph_filestore_dump
%{_bindir}/ceph_filestore_tool
%{_bindir}/ceph_streamtest
%{_bindir}/ceph_test_*
%{_bindir}/ceph_tpbench
%{_bindir}/ceph_xattr_bench
%{_bindir}/ceph-monstore-tool
%{_bindir}/ceph-osdomap-tool
%{_bindir}/ceph-kvstore-tool
%files -n libcephfs_jni1
%defattr(-,root,root,-)
%{_libdir}/libcephfs_jni.so.*
%files -n libcephfs_jni1-devel
%defattr(-,root,root,-)
%{_libdir}/libcephfs_jni.so
%files -n cephfs-java
%defattr(-,root,root,-)
%{_javadir}/libcephfs.jar
# We need to create these three for compatibility reasons
%files libs-compat
%files devel-compat
%files -n python-ceph-compat
%changelog
* Fri Jan 16 2015 Boris Ranto <branto@redhat.com> - 1:0.80.5-10
- Deprecate the package -- the client part of the package will be provided
* Wed Jan 14 2015 Boris Ranto <branto@redhat.com> - 1:0.80.7-3
- Fix rhbz#1155335 -- /usr/bin/ceph hangs indefinitely

View File

@ -1 +0,0 @@
dc7e46b5804fd5fdb8796416b914c4be ceph-0.80.7.tar.bz2