diff --git a/.gitignore b/.gitignore index 1844bef..ba50493 100644 --- a/.gitignore +++ b/.gitignore @@ -5,59 +5,4 @@ /thin-provisioning-tools-v0.2.8.tar.bz2 /thin-provisioning-tools-v0.3.0.tar.bz2 /thin-provisioning-tools-v0.3.2.tar.bz2 -/thin-provisioning-tools-0.5.0.tar.gz -/thin-provisioning-tools-0.5.1.tar.gz -/thin-provisioning-tools-0.5.2.tar.gz -/thin-provisioning-tools-0.5.3.tar.gz -/thin-provisioning-tools-0.5.4.tar.gz /thin-provisioning-tools-0.5.5.tar.gz -/thin-provisioning-tools-0.5.6.tar.gz -/thin-provisioning-tools-0.6.0.tar.gz -/thin-provisioning-tools-0.6.2-rc3.tar.gz -/thin-provisioning-tools-0.6.2-rc4.tar.gz -/thin-provisioning-tools-0.6.2-rc5.tar.gz -/thin-provisioning-tools-0.6.2-rc6.tar.gz -/thin-provisioning-tools-0.6.2-rc7.tar.gz -/thin-provisioning-tools-0.6.2-rc8.tar.gz -/thin-provisioning-tools-0.6.2.tar.gz -/thin-provisioning-tools-0.6.3.tar.gz -/thin-provisioning-tools-0.7.0-rc2.tar.gz -/thin-provisioning-tools-0.7.0-rc3.tar.gz -/thin-provisioning-tools-0.7.0-rc4.tar.gz -/thin-provisioning-tools-0.7.0-rc5.tar.gz -/thin-provisioning-tools-0.7.0-rc6.tar.gz -/v0.7.2.tar.gz -/v0.7.3.tar.gz -/v0.7.5.tar.gz -/v0.7.6.tar.gz -/v0.8.0.tar.gz -/v0.8.1.tar.gz -/v0.8.5.tar.gz -/v0.9.0-rc2.tar.gz -/device-mapper-persistent-data-0.9.0-rc2-vendor.tar.gz -/v0.9.0.tar.gz -/dmpd090-vendor.tar.gz -/dmpd090-vendor2.tar.gz -/dmpd090-vendor3.tar.gz -/v1.0.2.tar.gz -/dmpd102-vendor.tar.gz -/v1.0.4.tar.gz -/dmpd104-vendor.tar.gz -/v1.0.5.tar.gz -/dmpd105-vendor.tar.gz -/v1.0.6.tar.gz -/dmpd106-vendor.tar.gz -/v1.0.9.tar.gz -/dmpd109-vendor.tar.gz -/v1.0.11.tar.gz -/dmpd1011-vendor.tar.gz -/v1.0.12.tar.gz -/dmpd1012-vendor.tar.gz -/v1.1.0.tar.gz -/dmpd110-vendor.tar.gz -/v1.2.1.tar.gz -/dmpd121-vendor.tar.gz -/v1.3.0.tar.gz -/dmpd130-vendor.tar.gz -/v1.3.1.tar.gz -/dmpd131-vendor.tar.gz diff --git a/README.md b/README.md deleted file mode 100644 index a81396d..0000000 --- a/README.md +++ /dev/null @@ -1,54 +0,0 @@ -# Packaging device-mapper-persitent-data (AKA dmpd) - -This is rust package using *vendor* file for dependencies. (Vendor file is an archive of dependencies' sources.) - -For most of simple fixes, there is nothing special, -to add a patches simply add them to dist-git, -add a corresponding `PatchN: 000N-...` lines, -increase `Release:` or `release_suffix` for Z-stream (or for test build use suffix like `.bzNNNNNN`), -add to `%changelog`, -commit, -push, -and build the package using `fedpkg build`. - -Alternatively before committing anything use `fedpkg srpm` and `fedpkg scratch-build --srpm $SRPM [--arches x86_64]` to create a scratch build. - -However when building a new version of dmpd or when updating a dependency is -needed (e.g. because of CVE in the dependency) vendor file -has to be regenerated. - -## Updating vendor file - -To build a new version of the package following tooling is needed: - -- `rust >= 1.35` -- `cargo` providing vendor subcommand (now upstream, included in latest Fedora and RHEL8+) - -To create the vendor file: - -In the upstream directory: - -1. Run `cargo vendor` in the directory with upstream sources to create *vendor* - directory with sources. - - TODO: There is a *cargo-vendor-filterer* project used by *stratisd* to - filter out unnecessary dependencies for other operating systems. -2. Run `tar czf device-mapper-persistent-data-vendor-$VERSION.tar.gz ./vendor` to create a tarball. -3. Copy the vendor file to dist git directory. - -In the dist-git directory: - -1. Get the upstream tarball `wget https://github.com/jthornber/thin-provisioning-tools/archive/v$VERSION.tar.gz` - - NOTE: Migration to `https://github.com/device-mapper-utils/thin-provisioning-tools` is coming. -2. Update *Source0* and *Source1* to correct values. -3. Add the tarballs to koji/brew lookaside: - - `fedpkg new-sources v$VERSION.tar.gz device-mapper-persistent-data-vendor-$VERSION.tar.gz` - -## TODO/NOTES - -Some of the dependencies are already packaged by Fedora. Can we instruct *cargo vendor* to include only those which are not provided by Fedora? -It would be possible to include these as submodules, and the rest could be used from Fedora. -For RHEL and CentOS Stream using vendor file is the recommended way. - -*%cargo_install* installs by default in */usr/bin* but the package expects */usr/sbin*. For now I run *make install-rust-tools*. -Now Fedora unified the */usr/sbin* and */usr/bin* directories, to this can be "fixed" in Fedora and later in CentOS Stream. - diff --git a/device-mapper-persistent-avoid-strip.patch b/device-mapper-persistent-avoid-strip.patch new file mode 100644 index 0000000..f7babda --- /dev/null +++ b/device-mapper-persistent-avoid-strip.patch @@ -0,0 +1,11 @@ +--- a/Makefile.in 2014-11-12 18:46:30.282251378 +0100 ++++ b/Makefile.in 2014-11-12 18:46:56.108608255 +0100 +@@ -108,7 +108,7 @@ + vpath %.cc $(TOP_DIR) + + INSTALL_DIR = $(INSTALL) -m 755 -d +-INSTALL_PROGRAM = $(INSTALL) -m 755 -s ++INSTALL_PROGRAM = $(INSTALL) -m 755 + INSTALL_DATA = $(INSTALL) -p -m 644 + + ifeq ("@TESTING@", "yes") diff --git a/device-mapper-persistent-data-add-era_restore-and-cache_metadata_size-man-pages.patch b/device-mapper-persistent-data-add-era_restore-and-cache_metadata_size-man-pages.patch new file mode 100644 index 0000000..ac2e687 --- /dev/null +++ b/device-mapper-persistent-data-add-era_restore-and-cache_metadata_size-man-pages.patch @@ -0,0 +1,116 @@ +--- thin-provisioning-tools-0.4.1/man8/era_restore.8.orig 2014-10-28 14:24:43.356272471 +0100 ++++ thin-provisioning-tools-0.4.1/man8/era_restore.8 2014-10-28 14:28:56.358133852 +0100 +@@ -0,0 +1,63 @@ ++.TH ERA_RESTORE 8 "Thin Provisioning Tools" "Red Hat, Inc." \" -*- nroff -*- ++.SH NAME ++era_restore \- restore era metadata file to device or file ++ ++.SH SYNOPSIS ++.B era_restore ++.RB [ options ] ++.RB -i ++.I {device|file} ++.RB -o ++.I {device|file} ++ ++.SH DESCRIPTION ++.B era_restore ++restores binary era metadata created by the ++respective device-mapper target dumped into an XML formatted (see ++.BR era_dump(8) ) ++.I file ++, which optionally can be preprocessed before the restore to another ++.I device ++or ++.I file. ++If restored to a metadata ++.I device ++, the metadata can be processed by the device-mapper target. ++ ++.IP "\fB\-q, \-\-quiet\fP" ++Suppress output messages, return only exit code. ++ ++.IP "\fB\-i, \-\-input\fP \fI{device|file}\fP" ++Input file or device with metadata. ++ ++.IP "\fB\-o, \-\-output\fP \fI{device|file}\fP" ++Output file or device. ++ ++.IP "\fB\-h, \-\-help\fP" ++Print help and exit. ++ ++.IP "\fB\-V, \-\-version\fP" ++Output version information and exit. ++ ++.SH EXAMPLE ++Restores the XML formatted era metadata on file ++.B metadata ++to logical volume /dev/vg/metadata for further processing by the ++respective device-mapper target: ++.sp ++.B era_restore -i metadata -o /dev/vg/metadata ++ ++.SH DIAGNOSTICS ++.B era_restore ++returns an exit code of 0 for success or 1 for error. ++ ++.SH SEE ALSO ++.B era_check(8) ++.B era_dump(8) ++.B era_invaidate(8) ++.B era_restore(8) ++ ++.SH AUTHOR ++Joe Thornber ++.br ++Heinz Mauelshagen +--- thin-provisioning-tools-0.4.1/man8/cache_metadata_size.8.orig 2014-10-28 14:31:49.116087709 +0100 ++++ thin-provisioning-tools-0.4.1/man8/cache_metadata_size.8 2014-10-28 14:50:29.337238755 +0100 +@@ -0,0 +1,47 @@ ++.TH CACHE_METADATA_SIZE 8 "Thin Provisioning Tools" "Red Hat, Inc." \" -*- nroff -*- ++.SH NAME ++cache_metadata_size \- cache metadata device/file size calculator. ++ ++.SH SYNOPSIS ++.B cache_metadata_size ++.RB [ options ] ++ ++.SH DESCRIPTION ++.B cache_metadata_size ++calculates the size of the cache metadata based on the block size ++of the cache device and the cache block size. ++All relates to the size of the fast device (eg, SSD), rather ++than the whole cached (i.e. origin) device. ++ ++.IP "\fB\\-\-block\-size \fP ++Block size of cache in units of sectors. ++ ++.IP "\fB\\-\-device\-size \fP ++Device size of the cache device in units of sectors. ++ ++.IP "\fB\\-\-nr\-blocks \fP ++Capacity of the cache in number of blocks. ++ ++.IP "\fB\-h, \-\-help\fP" ++Print help and exit. ++ ++.IP "\fB\-V, \-\-version\fP" ++Output version information and exit. ++ ++.SH EXAMPLES ++Calculates the cache metadata device size for block size 256 sectors.and device size of 2GiB ++.sp ++.B cache_metadata_size --block-size 256 --device-size $((2*1024*1024)) ++ ++.SH DIAGNOSTICS ++.B cache_metadata_size ++returns an exit code of 0 for success or 1 for error. ++ ++.SH SEE ALSO ++.B cache_check(8) ++.B cache_dump(8) ++.B cache_repair(8) ++.B cache_restore(8) ++ ++.SH AUTHOR ++Heinz Mauelshagen diff --git a/device-mapper-persistent-data-avoid-strip.patch b/device-mapper-persistent-data-avoid-strip.patch deleted file mode 100644 index 6b3410e..0000000 --- a/device-mapper-persistent-data-avoid-strip.patch +++ /dev/null @@ -1,15 +0,0 @@ - Makefile.in | 1 - - 1 file changed, 1 deletion(-) - -diff --git a/Makefile.in b/Makefile.in -index 0aa9401..0f4f7cb 100644 ---- a/Makefile.in -+++ b/Makefile.in -@@ -262,7 +262,6 @@ MANPAGES:=$(patsubst %,man8/%.8,$(TOOLS)) - install: bin/pdata_tools $(MANPAGES) - $(INSTALL_DIR) $(BINDIR) - $(INSTALL_PROGRAM) bin/pdata_tools $(BINDIR) -- $(STRIP) $(BINDIR)/pdata_tools - ln -s -f pdata_tools $(BINDIR)/cache_check - ln -s -f pdata_tools $(BINDIR)/cache_dump - ln -s -f pdata_tools $(BINDIR)/cache_metadata_size diff --git a/device-mapper-persistent-data-document-clear-needs-check-flag.patch b/device-mapper-persistent-data-document-clear-needs-check-flag.patch new file mode 100644 index 0000000..6fa4163 --- /dev/null +++ b/device-mapper-persistent-data-document-clear-needs-check-flag.patch @@ -0,0 +1,15 @@ +--- thin-provisioning-tools-0.4.1/man8/thin_check.8.orig 2014-10-28 14:19:02.800420873 +0100 ++++ thin-provisioning-tools-0.4.1/man8/thin_check.8 2014-10-28 14:19:47.335924554 +0100 +@@ -25,6 +25,12 @@ + .IP "\fB\-V, \-\-version\fP" + Output version information and exit. + ++.IP "\fB\-\-clear\-needs\-check\-flag\fP" ++Clear the needs-check-flag in case the check of the thin pool metadata succeeded. ++If the metadata check failed, the flag is not cleared and a thin_repair run is ++needed to fix any issues. ++After thin_repair succeeded, you may run thin_check again. ++ + .IP "\fB\-\-super\-block\-only\fP" + Only check the superblock is present. + diff --git a/device-mapper-persistent-data.spec b/device-mapper-persistent-data.spec index 213bfff..1dcad2f 100644 --- a/device-mapper-persistent-data.spec +++ b/device-mapper-persistent-data.spec @@ -1,38 +1,21 @@ # -# Copyright (C) 2011-2017 Red Hat, Inc +# Copyright (C) 2011-2015 Red Hat, Inc # -%bcond_without check -#%%global debug_package %%{nil} - -#%%global version_suffix -rc2 -#%%global release_suffix .test3 - Summary: Device-mapper Persistent Data Tools Name: device-mapper-persistent-data -Version: 1.3.1 -Release: 1%{?dist}%{?release_suffix} -License: GPL-3.0-only AND (0BSD OR MIT OR Apache-2.0) AND Apache-2.0 AND (Apache-2.0 OR MIT) AND (Apache-2.0 WITH LLVM-exception OR Apache-2.0 OR MIT) AND BSD-3-Clause AND MIT AND (MIT OR Apache-2.0) AND (MIT OR Zlib OR Apache-2.0) AND (Unlicense OR MIT) AND (Zlib OR Apache-2.0 OR MIT) - -#ExcludeArch: %%{ix86} +Version: 0.5.5 +Release: 1%{?dist} +License: GPLv3+ +Group: System Environment/Base URL: https://github.com/jthornber/thin-provisioning-tools -#Source0: https://github.com/jthornber/thin-provisioning-tools/archive/thin-provisioning-tools-%%{version}.tar.gz -Source0: https://github.com/jthornber/thin-provisioning-tools/archive/v%{version}%{?version_suffix}.tar.gz -Source1: dmpd131-vendor.tar.gz +Source0: https://github.com/jthornber/thin-provisioning-tools/archive/thin-provisioning-tools-%{version}.tar.gz +# Source1: https://github.com/jthornber/thin-provisioning-tools/archive/v%{version}.tar.gz +Patch0: device-mapper-persistent-data-document-clear-needs-check-flag.patch +Patch1: device-mapper-persistent-data-add-era_restore-and-cache_metadata_size-man-pages.patch +Patch2: device-mapper-persistent-avoid-strip.patch -%if %{defined rhel} -BuildRequires: rust-toolset -%else -BuildRequires: rust-packaging -BuildRequires: rust >= 1.35 -BuildRequires: cargo -%endif -BuildRequires: make -BuildRequires: systemd-devel -BuildRequires: clang-libs -BuildRequires: glibc-static -BuildRequires: device-mapper-devel -BuildRequires: clang -#BuildRequires: gcc +BuildRequires: autoconf, expat-devel, libaio-devel, libstdc++-devel, boost-devel +Requires: expat %description thin-provisioning-tools contains check,dump,restore,repair,rmap @@ -43,372 +26,67 @@ are included and era check, dump, restore and invalidate to manage snapshot eras %prep -%autosetup -p1 -n thin-provisioning-tools-%{version}%{?version_suffix} -a1 -%cargo_prep -v vendor - -# NOTE: Following could replace Cargo.toml patching, but some macros are not working well with it -# Notably at least one of cargo_license_summary, cargo_license_summary, or cargo_vendor_manifest -#cat >> .cargo/config.toml << EOF -# -#[source."git+https://github.com/jthornber/rio?branch=master"] -#git = "https://github.com/jthornber/rio" -#branch = "master" -#replace-with = "vendored-sources" -# -#EOF - +%setup -q -n thin-provisioning-tools-%{version} +%patch0 -p1 -b .clear_needs_check_flag +%patch1 -p1 -b .man_pages +%patch2 -p1 -b .avoid_strip echo %{version}-%{release} > VERSION -%generate_buildrequires - %build -#make %{?_smp_mflags} V= -%cargo_build -%cargo_license_summary -%{cargo_license} > LICENSE.dependencies -%cargo_vendor_manifest +autoconf +%configure --with-optimisation= +make %{?_smp_mflags} V= %install -# TODO: Check that MANDIR is unused and remove -%make_install STRIP=true MANDIR=%{_mandir} BINDIR=%{buildroot}%{_sbindir} +make DESTDIR=%{buildroot} MANDIR=%{_mandir} install -%if %{with check} -%check -%cargo_test -#cargo test --test thin_shrink -- --nocapture --test-threads=1 -%endif +%clean %files -%doc README.md -%license COPYING -%license LICENSE.dependencies -%license cargo-vendor.txt +%doc COPYING README.md %{_mandir}/man8/cache_check.8.gz %{_mandir}/man8/cache_dump.8.gz -%{_mandir}/man8/cache_metadata_size.8.gz -%{_mandir}/man8/cache_repair.8.gz %{_mandir}/man8/cache_restore.8.gz -%{_mandir}/man8/cache_writeback.8.gz +%{_mandir}/man8/cache_repair.8.gz %{_mandir}/man8/era_check.8.gz %{_mandir}/man8/era_dump.8.gz %{_mandir}/man8/era_invalidate.8.gz -%{_mandir}/man8/era_restore.8.gz %{_mandir}/man8/thin_check.8.gz %{_mandir}/man8/thin_delta.8.gz %{_mandir}/man8/thin_dump.8.gz -%{_mandir}/man8/thin_ls.8.gz %{_mandir}/man8/thin_metadata_size.8.gz -%{_mandir}/man8/thin_migrate.8.gz -%{_mandir}/man8/thin_repair.8.gz %{_mandir}/man8/thin_restore.8.gz +%{_mandir}/man8/thin_repair.8.gz %{_mandir}/man8/thin_rmap.8.gz %{_mandir}/man8/thin_trim.8.gz -%{_mandir}/man8/thin_metadata_pack.8.gz -%{_mandir}/man8/thin_metadata_unpack.8.gz %{_sbindir}/pdata_tools %{_sbindir}/cache_check %{_sbindir}/cache_dump %{_sbindir}/cache_metadata_size -%{_sbindir}/cache_repair %{_sbindir}/cache_restore -%{_sbindir}/cache_writeback +%{_sbindir}/cache_repair %{_sbindir}/era_check %{_sbindir}/era_dump -%{_sbindir}/era_invalidate %{_sbindir}/era_restore +%{_sbindir}/era_invalidate %{_sbindir}/thin_check %{_sbindir}/thin_delta %{_sbindir}/thin_dump -%{_sbindir}/thin_ls %{_sbindir}/thin_metadata_size -%{_sbindir}/thin_migrate -%{_sbindir}/thin_repair %{_sbindir}/thin_restore +%{_sbindir}/thin_repair %{_sbindir}/thin_rmap %{_sbindir}/thin_trim -%{_sbindir}/thin_metadata_pack -%{_sbindir}/thin_metadata_unpack -#% {_sbindir}/thin_show_duplicates %changelog -* Tue Dec 02 2025 Marian Csontos - 1.3.1-1 -- Update to latest upstream release 1.3.1. - -* Wed Oct 22 2025 Marian Csontos - 1.3.0-1 -- Update to latest upstream release 1.3.0. - -* Thu Sep 04 2025 Marian Csontos - 1.2.1-1 -- Update to latest upstream release 1.2.1. - -* Wed Jul 23 2025 Fedora Release Engineering - 1.1.0-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_43_Mass_Rebuild - -* Thu Jan 16 2025 Fedora Release Engineering - 1.1.0-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_42_Mass_Rebuild - -* Sun Jan 12 2025 Zbigniew Jędrzejewski-Szmek - 1.1.0-2 -- Rebuilt for the bin-sbin merge (2nd attempt) - -* Mon Sep 02 2024 Marian Csontos - 1.1.0-1 -- Update to latest upstream release 1.1.0. - -* Wed Jul 17 2024 Fedora Release Engineering - 1.0.12-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_41_Mass_Rebuild - -* Tue Jul 09 2024 Zbigniew Jędrzejewski-Szmek - 1.0.12-2 -- Rebuilt for the bin-sbin merge - -* Tue Feb 27 2024 Marian Csontos - 1.0.12-1 -- Update to latest upstream release 1.0.12. - -* Tue Feb 13 2024 Marian Csontos - 1.0.11-4 -- Add licenses for statically linked libraries. - -* Tue Feb 13 2024 Marian Csontos - 1.0.11-3 -- SPDX migration - -* Thu Feb 08 2024 Yaakov Selkowitz - 1.0.11-2 -- Update Rust macro usage - -* Thu Feb 08 2024 Marian Csontos - 1.0.11-1 -- Update to latest upstream release 1.0.11. - -* Wed Jan 24 2024 Fedora Release Engineering - 1.0.9-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild - -* Fri Jan 19 2024 Fedora Release Engineering - 1.0.9-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_40_Mass_Rebuild - -* Mon Dec 11 2023 Marian Csontos - 1.0.9-1 -- Update to latest upstream release 1.0.9. - -* Thu Aug 31 2023 Marian Csontos - 1.0.6-2 -- Fix broken installation on ppc64le caused by incorrect ioctl call. - -* Wed Aug 09 2023 Marian Csontos - 1.0.6-1 -- Update to latest upstream release 1.0.6. - -* Thu Jul 27 2023 Marian Csontos - 1.0.5-1 -- Update to latest upstream release 1.0.5. - -* Wed Jul 19 2023 Fedora Release Engineering - 1.0.4-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_39_Mass_Rebuild - -* Tue May 30 2023 Yaakov Selkowitz - 1.0.4-2 -- Use rust-toolset in RHEL builds - -* Fri Apr 28 2023 Marian Csontos - 1.0.4-1 -- Update to latest upstream release 1.0.4. - -* Wed Mar 22 2023 Marian Csontos - 1.0.3-1 -- Update to latest upstream release 1.0.3. - -* Sun Feb 05 2023 Fabio Valentini - 0.9.0-10 -- Rebuild for fixed frame pointer compiler flags in Rust RPM macros. - -* Thu Jan 19 2023 Fedora Release Engineering - 0.9.0-9 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_38_Mass_Rebuild - -* Thu Jul 21 2022 Fedora Release Engineering - 0.9.0-8 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_37_Mass_Rebuild - -* Thu Jan 20 2022 Fedora Release Engineering - 0.9.0-7 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_36_Mass_Rebuild - -* Thu Jul 22 2021 Marian Csontos - 0.9.0-6 -- Fix rust-1.53 compilation issues. - -* Wed Jul 21 2021 Fedora Release Engineering - 0.9.0-5 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_35_Mass_Rebuild - -* Thu Jun 10 2021 Marian Csontos - 0.9.0-4 -- Fix gating test syntax. -- Fix important issues found by static analysis. - -* Tue Jan 26 2021 Fedora Release Engineering - 0.9.0-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_34_Mass_Rebuild - -* Mon Sep 21 2020 Marian Csontos - 0.9.0-2 -- Update crc32c to version 0.5 supporting non x86 architectures - -* Thu Sep 17 2020 Marian Csontos - 0.9.0-1 -- Update to latest upstream version -- New tools thin_metadata_pack and thin_metadata_unpack - -* Mon Jul 27 2020 Fedora Release Engineering - 0.8.5-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_33_Mass_Rebuild - -* Tue Jan 28 2020 Fedora Release Engineering - 0.8.5-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_32_Mass_Rebuild - -* Wed Jul 24 2019 Fedora Release Engineering - 0.8.5-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_31_Mass_Rebuild - -* Tue Jun 04 2019 Marian Csontos - 0.8.5-1 -- Update to latest upstream version - -* Sat May 04 2019 Marian Csontos - 0.8.1-1 -- Fix thin_repair should not require --repair option. - -* Mon Apr 29 2019 Marian Csontos - 0.8.0-1 -- Update to latest upstream version - -* Thu Jan 31 2019 Fedora Release Engineering - 0.7.6-4 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_30_Mass_Rebuild - -* Fri Jan 25 2019 Jonathan Wakely - 0.7.6-3 -- Rebuilt for Boost 1.69 - -* Thu Jul 12 2018 Fedora Release Engineering - 0.7.6-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_29_Mass_Rebuild - -* Tue Apr 24 2018 Marian Csontos - 0.7.6-1 -- Update to latest upstream version - -* Wed Feb 07 2018 Fedora Release Engineering - 0.7.5-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_28_Mass_Rebuild - -* Tue Jan 23 2018 Jonathan Wakely - 0.7.5-2 -- Rebuilt for Boost 1.66 - -* Tue Nov 14 2017 Marian Csontos - 0.7.5-1 -- Fix version 2 metadata corruption in cache_restore. - -* Fri Oct 06 2017 Marian Csontos - 0.7.3-1 -- Update to latest bugfix and documentation update release. -- *_restore tools wipe superblock as a last resort. -- Add thin_check --override-mapping-root. - -* Fri Sep 22 2017 Marian Csontos - 0.7.2-1 -- Update to latest upstream release including various bug fixes and new features. -- Fix segfault when dump tools are given a tiny metadata file. -- Fix -V exiting with 1. -- Fix thin_check when running on XML dump instead of binary data. -- Speed up free block searching. - -* Wed Aug 02 2017 Fedora Release Engineering - 0.7.0-0.6.rc6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Binutils_Mass_Rebuild - -* Wed Jul 26 2017 Fedora Release Engineering - 0.7.0-0.5.rc6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_27_Mass_Rebuild - -* Mon Jul 03 2017 Jonathan Wakely - 0.7.0-0.4.rc6 -- Rebuilt for Boost 1.64 - -* Tue May 23 2017 Marian Csontos - 0.7.0-0.3.rc6 -- Rebuilt for mass rebuild incorrectly tagging master to .fc26 - -* Mon May 15 2017 Fedora Release Engineering - 0.7.0-0.2.rc6 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_27_Mass_Rebuild - -* Mon Mar 27 2017 Peter Rajnoha - 0.7.0-0.1-rc6 -- Don't open devices as writeable if --clear-needs-check-flag is not set. -- Fix cache metadata format version 2 superblock packing. - -* Wed Mar 22 2017 Peter Rajnoha - 0.7.0-0.1-rc5 -- Switch to a faster implementation of crc32 used for checksums. - -* Tue Mar 21 2017 Peter Rajnoha - 0.7.0-0.1-rc4 -- Add support for cache metadata format version 2 in cache tools. - -* Thu Mar 16 2017 Peter Rajnoha - 0.7.0-0.1-rc3 -- Fix compilation warnings and further code cleanup. - -* Thu Mar 09 2017 Peter Rajnoha - 0.7.0-0.1-rc2 -- Update to latest upstream release including various bug fixes and new features. -- New thin_show_duplicates command. -- Add '--skip-mappings' and '--format custom' options to thin_dump. - -* Fri Feb 10 2017 Fedora Release Engineering - 0.6.3-3 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_26_Mass_Rebuild - -* Fri Jan 27 2017 Jonathan Wakely - 0.6.3-2 -- Rebuilt for Boost 1.63 - -* Thu Sep 22 2016 Peter Rajnoha - 0.6.3-1 -- Preallocate output file for thin_repair and thin_restore. - -* Mon Jul 11 2016 Peter Rajnoha - 0.6.2-1 -- Fixes providing proper use of compiler flags. - -* Mon Apr 11 2016 Peter Rajnoha - 0.6.2-0.1.rc8 -- Fixes for thin_trim. - -* Tue Mar 22 2016 Peter Rajnoha - 0.6.2-0.1.rc7 -- Fixes for thin_repair. - -* Wed Mar 09 2016 Peter Rajnoha - 0.6.2-0.1.rc6 -- Add new fields to thin_ls: MAPPED_BYTES, EXCLUSIVE_BYTES and SHARED_BYTES. - -* Thu Feb 18 2016 Peter Rajnoha - 0.6.2-0.1.rc5 -- Fixes for thin_delta. - -* Mon Feb 15 2016 Peter Rajnoha - 0.6.2-0.1.rc4 -- Fix bug in mapping comparison while using thin_delta. - -* Mon Feb 15 2016 Peter Rajnoha - 0.6.2-0.1.rc3 -- Fix recent regression in thin_repair. -- Force g++-98 dialect. - -* Mon Feb 15 2016 Peter Rajnoha - 0.6.2-0.1.rc1 -- Fix bug in thin_dump when using metadata snaps. - -* Wed Feb 03 2016 Fedora Release Engineering - 0.6.0-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_24_Mass_Rebuild - -* Wed Jan 20 2016 Peter Rajnoha - 0.6.0-1 -- New thin_ls command. - -* Wed Jan 20 2016 Peter Rajnoha - 0.5.6-1 -- era_invalidate may be run on live metadata if the --metadata-snap - option is given. - -* Fri Jan 15 2016 Jonathan Wakely - 0.5.5-3 -- Rebuilt for Boost 1.60 - -* Thu Aug 27 2015 Jonathan Wakely - 0.5.5-2 -- Rebuilt for Boost 1.59 - * Thu Aug 13 2015 Peter Rajnoha - 0.5.5-1 -- Support thin_delta's --metadata_snap option without specifying snap location. -- Update man pages to make it clearer that tools shoulnd't be run on live metadata. +- Update man pages to make it clearer that tools shouldn't be run on live metadata. - Fix bugs in the metadata reference counting for thin_check. - -* Wed Jul 29 2015 Fedora Release Engineering - 0.5.4-3 -- Rebuilt for https://fedoraproject.org/wiki/Changes/F23Boost159 - -* Wed Jul 22 2015 David Tardon - 0.5.4-2 -- rebuild for Boost 1.58 - -* Fri Jul 17 2015 Peter Rajnoha - 0.5.4-1 -- Fix cache_check with --clear-needs-check-flag option to - make sure metadata device is not open already by the tool - when open with O_EXCL mode is requested. - -* Fri Jul 03 2015 Peter Rajnoha - 0.5.3-1 - Tools now open the metadata device in O_EXCL mode to stop running the tools on active metadata. - -* Fri Jul 03 2015 Peter Rajnoha - 0.5.2-1 -- Fix bug in damage reporting in thin_dump and thin_check. - -* Thu Jun 25 2015 Peter Rajnoha - 0.5.1-1 -- Fix crash if tools are given a very large metadata device to restore to. - -* Mon Jun 22 2015 Peter Rajnoha - 0.5.0-1 - Add space map checking for thin_check. - Add --clear-needs-check option for cache_check. -- Update to latest upstream release. - -* Wed Jun 17 2015 Fedora Release Engineering - 0.4.2-2 -- Rebuilt for https://fedoraproject.org/wiki/Fedora_23_Mass_Rebuild - -* Mon Jun 08 2015 Peter Rajnoha - 0.4.2-1 - New thin_delta and thin_trim commands. -- Update to latest upstream release. - -* Sat May 02 2015 Kalev Lember - 0.4.1-4 -- Rebuilt for GCC 5 C++11 ABI change * Mon Jan 26 2015 Petr Machata - 0.4.1-3 - Rebuild for boost 1.57.0 diff --git a/sources b/sources index 4ddfdfb..0890450 100644 --- a/sources +++ b/sources @@ -1,2 +1 @@ -SHA512 (v1.3.1.tar.gz) = ff0758b21b50702568cad88522ee4c2b6b4433cec0a5f5074c9d1791c13e630e5c516601d7a68c51ac34e036091fc82fe831dbe51e6776737571d90ed266878e -SHA512 (dmpd131-vendor.tar.gz) = 0e1b8e501e330b64415c9097c94dfc1f1b43d2900a66258e40b6c8f28c51fd61247d60495f594f14550fb349ed4ad435f8959a8808fea1d363a206c5ead7db1e +6aa3d8294e66d967ac7c3d524e9835d2 thin-provisioning-tools-0.5.5.tar.gz diff --git a/tests/functions_test/Makefile b/tests/functions_test/Makefile deleted file mode 100644 index 6b14a2f..0000000 --- a/tests/functions_test/Makefile +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2006 Red Hat, Inc. All rights reserved. This copyrighted material -# is made available to anyone wishing to use, modify, copy, or -# redistribute it subject to the terms and conditions of the GNU General -# Public License v.2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. -# -# Author: Jakub Krysl - -# The toplevel namespace within which the test lives. -TOPLEVEL_NAMESPACE=kernel - -# The name of the package under test: -PACKAGE_NAME=storage - -# The path of the test below the package: -RELATIVE_PATH=lvm/device-mapper-persistent-data/thin - -# Version of the Test. Used with make tag. -export TESTVERSION=1.0 - -# The combined namespace of the test. -export TEST=/$(TOPLEVEL_NAMESPACE)/$(PACKAGE_NAME)/$(RELATIVE_PATH) - - -# A phony target is one that is not really the name of a file. -# It is just a name for some commands to be executed when you -# make an explicit request. There are two reasons to use a -# phony target: to avoid a conflict with a file of the same -# name, and to improve performance. -.PHONY: all install download clean - -# executables to be built should be added here, they will be generated on the system under test. -BUILT_FILES= - -# data files, .c files, scripts anything needed to either compile the test and/or run it. -FILES=$(METADATA) runtest.sh PURPOSE testinfo.desc dmpd_functions.py dmpd_library.py Makefile - -run: $(FILES) build - ./runtest.sh - -build: $(BUILT_FILES) - chmod a+x ./runtest.sh - -clean: - rm -f *~ *.rpm $(BUILT_FILES) - -# You may need to add other targets e.g. to build executables from source code -# Add them here: - - -# Include Common Makefile -include /usr/share/rhts/lib/rhts-make.include - -# Generate the testinfo.desc here: -$(METADATA): Makefile - @touch $(METADATA) -# Change to the test owner's name - @echo "Owner: Filip Suba " > $(METADATA) - @echo "Name: $(TEST)" >> $(METADATA) - @echo "Path: $(TEST_DIR)" >> $(METADATA) - @echo "License: GPL" >> $(METADATA) - @echo "TestVersion: $(TESTVERSION)" >> $(METADATA) - @echo "Description: Testing thin tools provided by device-mapper-persistent-data">> $(METADATA) - @echo "TestTime: 1h" >> $(METADATA) - @echo "RunFor: $(PACKAGE_NAME)" >> $(METADATA) - @echo "Requires: $(PACKAGE_NAME)" >> $(METADATA) - - rhts-lint $(METADATA) diff --git a/tests/functions_test/PURPOSE b/tests/functions_test/PURPOSE deleted file mode 100644 index 1465ab6..0000000 --- a/tests/functions_test/PURPOSE +++ /dev/null @@ -1,30 +0,0 @@ -#=========================================================================== -# -# PURPOSE file for: -# /kernel/storage/lvm/device-mapper-persistent-data/thin -# -# Description: -# Testing thin tools provided by device-mapper-persistent-data -# -# Bugs related: -# -# -# Author(s): -# Jakub Krysl -# -# -#=========================================================================== -# Note: -# As with some of Storage tests, at the end of the test it will check -# for errors on the logs. -# This check if implemented on /kernel/storage/include/python_modules -# LogChecker.py module -# -# To avoid Storage tests reporting error that were caused before the test -# started to run is recommended to run /kernel/storage/misc/log_checker -# just before running this test -#=========================================================================== -# This task takes optional parameters: -# -#=========================================================================== -# EndFile diff --git a/tests/functions_test/dmpd_functions.py b/tests/functions_test/dmpd_functions.py deleted file mode 100755 index 4d23f70..0000000 --- a/tests/functions_test/dmpd_functions.py +++ /dev/null @@ -1,1238 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2017 Red Hat, Inc. All rights reserved. This copyrighted material -# is made available to anyone wishing to use, modify, copy, or -# redistribute it subject to the terms and conditions of the GNU General -# Public License v.2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; If not, see http://www.gnu.org/licenses/. -# -# Author: Jakub Krysl - -import sys, os -sys.path.append(os.path.abspath("dmpd_library.py")) - -from dmpd_library import * - -def thin_init(args): - # Create thin pool with LVs - print("INFO: Initializing test case") - errors = [] - - atomic_run("Creating loopdev", - name=args["loop1"], - size=args["loop1_size"], - command=loopdev.create_loopdev, - errors=errors) - - atomic_run("Creating VG", - vg_name=args["group"], - pv_name="/dev/" + args["loop1"], - command=lvm.vg_create, - errors=errors) - - atomic_run("Creating thin pool", - vg_name=args["group"], - lv_name=args["pool"], - options=["-T", "-L 1500"], - command=lvm.lv_create, - errors=errors) - - # create few LVs to increase transaction ID and be able to do thin_delta - for i in range(args["number of vols"]): - atomic_run("Creating thin LV No. %s" % i, - vg_name=args["group"] + "/" + args["pool"], - lv_name=args["vol"] + str(i), - options=["-T", "-V 300"], - command=lvm.lv_create, - errors=errors) - - atomic_run("Creating filesystem on LV No. %s" % i, - vg_name=args["group"], - lv_name=args["vol"] + str(i), - command=create_filesystem, - errors=errors) - - atomic_run("Creating metadata snapshot", - lv_name=args["pool"], - vg_name=args["group"], - command=metadata_snapshot, - errors=errors) - - atomic_run("Creating swap LV", - vg_name=args["group"], - lv_name=args["swap"], - options=["-L 300"], - command=lvm.lv_create, - errors=errors) - - atomic_run("Deactivating swap", - lv_name=args["swap"], - vg_name=args["group"], - command=lvm.lv_deactivate, - errors=errors) - - atomic_run("Deactivating pool", - lv_name=args["pool"], - vg_name=args["group"], - command=lvm.lv_deactivate, - errors=errors) - - for i in range(args["number of vols"]): - atomic_run("Deactivating thin LV No. %s" % i, - lv_name=args["vol"] + str(i), - vg_name=args["group"], - command=lvm.lv_deactivate, - errors=errors) - - atomic_run("Swapping metadata", - vg_name=args["group"], - lv_name=args["swap"], - options=["-y", "--thinpool " + args["group"] + "/" + args["pool"], - "--poolmetadata "], - command=lvm.lv_convert, - errors=errors) - - atomic_run("Activating swap", - lv_name=args["swap"], - vg_name=args["group"], - command=lvm.lv_activate, - errors=errors) - - if len(errors) == 0: - TC.tpass("Initialization passed") - else: - TC.tfail("Initialization failed with following errors: \n\t'" + - "\n\t ".join([str(i) for i in errors])) - return 1 - return 0 - - -def thin_clean(args): - print("INFO: Cleaning up") - errors = [] - - # restoring metadata device in case it is corrupted - atomic_run("Repairing metadata device", - source_file="/tmp/metadata", - target_vg=args["group"], - target_lv=args["swap"], - quiet=True, - command=dmpd.thin_restore, - errors=errors) - - # thinpool got activated after checking its metadata to get bad checksum - atomic_run("Deactivating pool", - lv_name=args["pool"], - vg_name=args["group"], - command=lvm.lv_deactivate, - errors=errors) - - atomic_run("Deactivating swap", - lv_name=args["swap"], - vg_name=args["group"], - command=lvm.lv_deactivate, - errors=errors) - - atomic_run("Swapping back metadata", - vg_name=args["group"], - lv_name=args["swap"], - options=["-y", "--thinpool " + args["group"] + "/" + args["pool"], - "--poolmetadata "], - command=lvm.lv_convert, - errors=errors) - - atomic_run("Removing swap", - lv_name=args["swap"], - vg_name=args["group"], - command=lvm.lv_remove, - errors=errors) - - atomic_run("Removing thinpool", - lv_name=args["pool"], - vg_name=args["group"], - command=lvm.lv_remove, - errors=errors) - - atomic_run("Removing VG", - vg_name=args["group"], - force=True, - command=lvm.vg_remove, - errors=errors) - - atomic_run("Deleting loopdev", - name=args["loop1"], - command=loopdev.delete_loopdev, - errors=errors) - - atomic_run("Deleting metadata file", - cmd="rm -f /tmp/metadata", - command=run, - errors=errors) - - atomic_run("Deleting repair metadata file", - cmd="rm -f /tmp/metadata_repair", - command=run, - errors=errors) - - atomic_run("Deleting snapshot metadata file", - cmd="rm -f /tmp/metadata_snap", - command=run, - errors=errors) - - if len(errors) == 0: - TC.tpass("Cleanup passed") - else: - TC.tfail("Cleanup failed with following errors: \n\t'" + - "\n\t ".join([str(i) for i in errors])) - print(errors) - return 1 - return 0 - -def thin_test(args): - print("\n#######################################\n") - print( - "INFO: Testing thin tools runtime provided by device_mapper_persistent_data") - - errors = [] - - atomic_run("Checking metadata", - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.thin_check, - errors=errors) - - atomic_run("Checking metadata with few paramethers", - source_vg=args["group"], - source_lv=args["swap"], - super_block_only=True, - skip_mappings=True, - ignore_non_fatal_errors=True, - command=dmpd.thin_check, - errors=errors) - - atomic_run("Listing information about thin LVs", - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.thin_ls, - errors=errors) - - atomic_run("Listing information about thin LVs without headers", - source_vg=args["group"], - source_lv=args["swap"], - no_headers=True, - command=dmpd.thin_ls, - errors=errors) - - # Not yet in Fedora 26, shoud be in F27 - #atomic_run("Dumping metadata to standard output without mappings", - # formatting="human_readable", - # source_vg=args["group"], - # source_lv=args["swap"], - # skip_mappings=True, - # command=dmpd.thin_dump, - # errors=errors) - - atomic_run("Dumping metadata to standard output", - formatting="human_readable", - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.thin_dump, - errors=errors) - - atomic_run("Dumping metadata to standard output from snapshot", - formatting="human_readable", - source_vg=args["group"], - source_lv=args["swap"], - snapshot=True, - command=dmpd.thin_dump, - errors=errors) - - # Not yet in Fedora 26, shoud be in F27 - #atomic_run("Dumping metadata with dev-id", - # formatting="human_readable", - # source_vg=args["group"], - # source_lv=args["swap"], - # dev_id=args["number of vols"] - 1, - # command=dmpd.thin_dump, - # errors=errors) - - atomic_run("Calculating metadata size for pool of 64k blocks and 100M size", - cmd="thin_metadata_size -b64k -s100m -m1 -um", - command=run, - errors=errors) - - atomic_run("Calculating metadata size for pool of 64k blocks and 100M size", - cmd="thin_metadata_size -b64k -s100m -m1 -um -n", - command=run, - errors=errors) - - atomic_run("Calculating metadata size for pool of 64k blocks and 100M size", - cmd="thin_metadata_size -b64k -s100m -m1 -um -nlong", - command=run, - errors=errors) - - atomic_run("Calculating metadata size for pool of 64k blocks and 100M size", - cmd="thin_metadata_size -b64k -s100m -m1 -um -nshort", - command=run, - errors=errors) - - atomic_run("Outputting reverse map of metadata device with negative number in region", - False, - source_vg=args["group"], - source_lv=args["swap"], - region="0..-1", - command=dmpd.thin_rmap, - errors=errors) - - # this fails now and it should not - # atomic_run("Discarding free space of pool", - # target_vg=args["group"], - # target_lv=args["swap"], - # command=dmpd.thin_trim, - # errors=errors) - - atomic_run("Dumping metadata to file", - formatting="xml", - source_vg=args["group"], - source_lv=args["swap"], - repair=True, - output="/tmp/metadata", - command=dmpd.thin_dump, - errors=errors) - - atomic_run("Dumping metadata to file from snapshot", - formatting="xml", - source_vg=args["group"], - source_lv=args["swap"], - snapshot=True, - output="/tmp/metadata_snap", - command=dmpd.thin_dump, - errors=errors) - - atomic_run("Getting differences between thin LVs", - source_vg=args["group"], - source_lv=args["swap"], - thin1=1, - thin2=args["number of vols"] - 1, - snapshot=True, - command=dmpd.thin_delta, - errors=errors) - - atomic_run("Getting differences between thin LVs with --verbose", - source_vg=args["group"], - source_lv=args["swap"], - thin1=1, - thin2=args["number of vols"] - 1, - verbosity=True, - snapshot=True, - command=dmpd.thin_delta, - errors=errors) - - atomic_run("Getting differences between the same LV", - source_vg=args["group"], - source_lv=args["swap"], - thin1=1, - thin2=1, - snapshot=True, - command=dmpd.thin_delta, - errors=errors) - - atomic_run("Getting differences between the same LV with --verbose", - source_vg=args["group"], - source_lv=args["swap"], - thin1=1, - thin2=1, - verbosity=True, - snapshot=True, - command=dmpd.thin_delta, - errors=errors) - - atomic_run("Listing metadata output from snapshot", - source_vg=args["group"], - source_lv=args["swap"], - snapshot=True, - command=dmpd.thin_ls, - errors=errors) - - # Need to run everything on snapshot before this as thin_restore removes the metadata snapshot - # This should work but is not working due to a bug - #atomic_run("Restoring metadata", - # source_file="/tmp/metadata_snap", - # target_vg=args["group"], - # target_lv=args["swap"], - # command=dmpd.thin_restore, - # errors=errors) - - atomic_run("Restoring metadata", - source_file="/tmp/metadata", - target_vg=args["group"], - target_lv=args["swap"], - command=dmpd.thin_restore, - errors=errors) - - # Repairing from non-binary file leads to segmentation fault - #atomic_run("Repairing metadata from file", - # source_file="/tmp/metadata", - # target_vg=args["group"], - # target_lv=args["swap"], - # command=dmpd.thin_repair, - # errors=errors) - - atomic_run("Repairing metadata to file", - target_file="/tmp/metadata_repair", - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.thin_repair, - errors=errors) - - atomic_run("Repairing metadata from file", - source_file="/tmp/metadata_repair", - target_vg=args["group"], - target_lv=args["swap"], - command=dmpd.thin_repair, - errors=errors) - - atomic_run("Checking metadata", - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.thin_check, - errors=errors) - - print("\n#######################################\n") - - if len(errors) == 0: - TC.tpass("Testing thin tools of device_mapper_persistent_data passed") - else: - TC.tfail("Testing thin tools of device_mapper_persistent_data failed with following errors: \n\t'" + - "\n\t ".join([str(i) for i in errors])) - return 1 - return 0 - - -def thin_errors_test(args): - print("\n#######################################\n") - print( - "INFO: Testing thin tools errors provided by device_mapper_persistent_data") - - errors = [] - - # "thin_show_duplicates" does not work yet - functions = ["thin_check", "thin_delta", "thin_dump", "thin_ls", "thin_metadata_size", "thin_repair", - "thin_restore", "thin_rmap", "thin_trim"] - - # Sanity to check for missing input - for func in functions: - atomic_run("Validating missing input", - False, - cmd=func, - command=run, - errors=errors) - - # Sanity to check with wrong input - for func in functions: - atomic_run("Validating wrong input", - False, - cmd=func + " wrong", - command=run, - errors=errors) - - # Sanity to check with wrong option - for func in functions: - atomic_run("Validating wrong option", - False, - cmd=func + " -wrong", - command=run, - errors=errors) - - # Sanity to check present functions with -h - for func in functions: - atomic_run("Checking help of command", - cmd=func, - command=dmpd.get_help, - errors=errors) - - # Sanity to check present functions with -V - for func in functions: - # thin_metadata_size has wrong return value - if func == "thin_metadata_size": - continue - atomic_run("Checking version of command", - cmd=func, - command=dmpd.get_version, - errors=errors) - - atomic_run("Checking original pool metadata, should fail", - False, - source_vg=args["group"], - source_lv=args["pool"], - command=dmpd.thin_check, - errors=errors) - - atomic_run("Listing information about thin LVs", - False, - cmd="thin_ls /dev/mapper/%s-%s --format \"WRONG\"" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Checking thin_metadata_size inputs", - False, - cmd="thin_metadata_size -b 64", - command=run, - errors=errors) - - atomic_run("Checking thin_metadata_size inputs", - False, - cmd="thin_metadata_size -b 64 -s 128", - command=run, - errors=errors) - - atomic_run("Checking thin_metadata_size inputs", - False, - cmd="thin_metadata_size -b 25 -s 128 -m 10", - command=run, - errors=errors) - - atomic_run("Checking thin_metadata_size inputs", - False, - cmd="thin_metadata_size -b 128 -s 64 -m 10", - command=run, - errors=errors) - - atomic_run("Checking thin_metadata_size inputs", - False, - cmd="thin_metadata_size -u h", - command=run, - errors=errors) - - atomic_run("Checking thin_metadata_size inputs", - False, - cmd="thin_metadata_size -n -n", - command=run, - errors=errors) - - atomic_run("Checking thin_metadata_size inputs", - False, - cmd="thin_metadata_size -nlongshort", - command=run, - errors=errors) - - atomic_run("Checking thin_metadata_size inputs", - False, - cmd="thin_metadata_size -b 128 -b 64", - command=run, - errors=errors) - - atomic_run("Repairing metadata without output", - False, - cmd="thin_repair -i /tmp/metadata_repair", - command=run, - errors=errors) - - atomic_run("Dumping metadata with wrong custom format", - False, - cmd="thin_dump /dev/mapper/%s-%s --format custom=wrong" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Dumping metadata with unknown format", - False, - cmd="thin_dump /dev/mapper/%s-%s --format wrong" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Dumping metadata with wrong dev-id", - False, - cmd="thin_dump /dev/mapper/%s-%s --dev-id wrong" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Repairing metadata to produce 'output file does not exist' error", - False, - cmd="thin_repair -i /dev/mapper/%s-%s -o /tmp/wrong.wrong" % - (args['group'], args['swap']), - command=run, - errors=errors) - - atomic_run("Repairing metadata to produce 'output file too small' error", - False, - cmd="thin_repair -i /tmp/metadata -o /tmp/metadata", - command=run, - errors=errors) - - # This does not fail now due to a bug - #atomic_run("Outputting reverse map of metadata device, should fail without region", - # cmd="thin_rmap /dev/mapper/%s-%s" % (args["group"], args["swap"]), - # command=run, - # errors=errors) - - atomic_run("Outputting reverse map of metadata device with wrong region", - False, - cmd="thin_rmap /dev/mapper/%s-%s --region 0..0" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Outputting reverse map of metadata device with wrong region", - False, - cmd="thin_rmap /dev/mapper/%s-%s --region 0...1" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Outputting reverse map of metadata device with wrong region", - False, - cmd="thin_rmap /dev/mapper/%s-%s --region 00" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Outputting reverse map of metadata device with wrong device", - False, - cmd="thin_rmap --region 0..-1 /tmp/wrong.wrong", - command=run, - errors=errors) - - # Reverse mapping from bad file leads to segmentation fault - #atomic_run("Outputting reverse map of metadata device with wrong device", - # False, - # cmd="thin_rmap --region 0..-1 /tmp/metadata", - # command=run, - # errors=errors) - - atomic_run("Getting differences with thin1 ID out of range", - False, - source_vg=args["group"], - source_lv=args["swap"], - thin1=-1, - thin2=args["number of vols"] - 1, - command=dmpd.thin_delta, - errors=errors) - - atomic_run("Getting differences with thin2 ID out of range", - False, - source_vg=args["group"], - source_lv=args["swap"], - thin1=1, - thin2=args["number of vols"] + 1, - command=dmpd.thin_delta, - errors=errors) - - atomic_run("Restoring metadata without output", - False, - cmd="thin_restore -i /tmp/metadata", - command=run, - errors=errors) - - atomic_run("Restoring metadata with wrong options", - False, - cmd="thin_restore -i /tmp/metadata -o /dev/mapper/%s-%s --wrong test" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Restoring metadata with wrong source", - False, - cmd="thin_restore -i /tmp/wrong.wrong -o /dev/mapper/%s-%s" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Getting differences without thin2", - False, - cmd="thin_delta --thin1 1 /dev/mapper/%s-%s" % - (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Corrupting metadata on device", - cmd="echo 'nothing' >> /dev/mapper/%s-%s" % - (args['group'], args['swap']), - command=run, - errors=errors) - - atomic_run("Trying to fail while repairing metadata", - False, - source_vg=args['group'], - source_lv=args['swap'], - target_file="/tmp/metadata_repair", - command=dmpd.thin_repair, - errors=errors) - - atomic_run("Trying to fail listing volumes", - False, - source_vg=args['group'], - source_lv=args['swap'], - command=dmpd.thin_ls, - errors=errors) - - atomic_run("Trying to fail while checking metadata", - False, - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.thin_check, - errors=errors) - - atomic_run("Trying to fail while dumping metadata from snapshot", - False, - formatting="human_readable", - source_vg=args["group"], - source_lv=args["swap"], - snapshot=True, - command=dmpd.thin_dump, - errors=errors) - - # restoring metadata device after corrupting it - atomic_run("Repairing metadata device", - source_file="/tmp/metadata", - target_vg=args["group"], - target_lv=args["swap"], - quiet=True, - command=dmpd.thin_restore, - errors=errors) - - print("\n#######################################\n") - - if len(errors) == 0: - TC.tpass("Testing thin tools errors of device_mapper_persistent_data passed") - else: - TC.tfail("Testing thin tools errors of device_mapper_persistent_data failed with following errors: \n\t'" + - "\n\t ".join([str(i) for i in errors])) - return 1 - return 0 - - -def cache_init(args): - print("INFO: Initializing test case") - errors = [] - - atomic_run("Creating loopdev 1 - 'fast' device", - name=args["loop1"], - size=args["loop1_size"], - command=loopdev.create_loopdev, - errors=errors) - - atomic_run("Creating loopdev 2 - 'slow' device", - name=args["loop2"], - size=args["loop2_size"], - command=loopdev.create_loopdev, - errors=errors) - - atomic_run("Creating VG", - vg_name=args["group"], - pv_name="/dev/" + args["loop1"] + - " /dev/" + args["loop2"], - command=lvm.vg_create, - errors=errors) - - atomic_run("Creating cache metadata volume", - vg_name=args["group"] + " /dev/" + args["loop1"], - lv_name=args["meta"], - options=["-L 12"], - command=lvm.lv_create, - errors=errors) - - atomic_run("Creating origin volume", - vg_name=args["group"] + " /dev/" + args["loop2"], - lv_name=args["origin"], - options=["-L 2G"], - command=lvm.lv_create, - errors=errors) - - atomic_run("Creating cache data volume", - vg_name=args["group"] + " /dev/" + args["loop1"], - lv_name=args["data"], - options=["-L 1G"], - command=lvm.lv_create, - errors=errors) - - atomic_run("Creating cache pool", - vg_name=args["group"], - lv_name=args["data"], - options=["-y --type cache-pool", "--cachemode writeback", "--poolmetadata %s/%s" % - (args["group"], args["meta"])], - command=lvm.lv_convert, - errors=errors) - - atomic_run("Creating cache logical volume", - vg_name=args["group"], - lv_name=args["origin"], - options=["-y", "--type cache", "--cachepool %s/%s" % - (args["group"], args["data"])], - command=lvm.lv_convert, - errors=errors) - - atomic_run("Creating filesystem on cache logical volume", - vg_name=args["group"], - lv_name=args["origin"], - command=create_filesystem, - errors=errors) - - atomic_run("Splitting cache logical volume", - vg_name=args["group"], - lv_name=args["origin"], - options=["-y", "--splitcache"], - command=lvm.lv_convert, - errors=errors) - - atomic_run("Creating swap LV", - vg_name=args["group"], - lv_name=args["swap"], - options=["-L 100"], - command=lvm.lv_create, - errors=errors) - - atomic_run("Swapping metadata", - vg_name=args["group"], - lv_name=args["swap"], - options=["-y", "--cachepool " + args["group"] + "/" + args["data"], - "--poolmetadata "], - command=lvm.lv_convert, - errors=errors) - - atomic_run("Activating swap", - lv_name=args["swap"], - vg_name=args["group"], - command=lvm.lv_activate, - errors=errors) - - if len(errors) == 0: - TC.tpass("Initialization passed") - else: - TC.tfail("Initialization failed with following errors: \n\t'" + - "\n\t ".join([str(i) for i in errors])) - return 1 - return 0 - - -def cache_clean(args): - print("INFO: Cleaning up") - errors = [] - - atomic_run("Removing VG", - vg_name=args["group"], - force=True, - command=lvm.vg_remove, - errors=errors) - - atomic_run("Deleting loopdev loop1", - name=args["loop1"], - command=loopdev.delete_loopdev, - errors=errors) - - atomic_run("Deleting loopdev loop2", - name=args["loop2"], - command=loopdev.delete_loopdev, - errors=errors) - - atomic_run("Deleting metadata file", - cmd="rm -f /tmp/metadata", - command=run, - errors=errors) - - atomic_run("Deleting repair metadata file", - cmd="rm -f /tmp/metadata_repair", - command=run, - errors=errors) - - if len(errors) == 0: - TC.tpass("Cleanup passed") - else: - TC.tfail("Cleanup failed with following errors: \n\t'" + - "\n\t ".join([str(i) for i in errors])) - print(errors) - return 1 - return 0 - - -def cache_test(args): - print("\n#######################################\n") - print("INFO: Testing cache tools runtime provided by device_mapper_persistent_data") - - errors = [] - - atomic_run("Checking metadata", - source_lv=args["swap"], - source_vg=args["group"], - command=dmpd.cache_check, - errors=errors) - - atomic_run("Checking metadata with clear-need-check-flag", - source_lv=args["swap"], - source_vg=args["group"], - clear_needs_check_flag=True, - command=dmpd.cache_check, - errors=errors) - - atomic_run("Checking metadata with super-block-only", - source_lv=args["swap"], - source_vg=args["group"], - super_block_only=True, - command=dmpd.cache_check, - errors=errors) - - atomic_run("Checking metadata with few paramethers", - source_vg=args["group"], - source_lv=args["swap"], - skip_discards=True, - skip_mappings=True, - skip_hints=True, - command=dmpd.cache_check, - errors=errors) - - atomic_run("Dumping metadata to standard output", - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.cache_dump, - errors=errors) - - atomic_run("Calculating metadata size for cache of 64 blocks and 128 size", - cmd="cache_metadata_size --block-size 64 --device-size 128", - command=run, - errors=errors) - - atomic_run("Calculating metadata size for cache of 128 nr blocks", - cmd="cache_metadata_size --nr-blocks 128 --max-hint-width 4", - command=run, - errors=errors) - - atomic_run("Dumping metadata to file", - source_vg=args["group"], - source_lv=args["swap"], - repair=True, - output="/tmp/metadata", - command=dmpd.cache_dump, - errors=errors) - - # Not yet in Fedora 26, shoud be in F27 - #atomic_run("Checking metadata file", - # source_file="/tmp/metadata", - # command=dmpd.cache_check, - # errors=errors) - # - #atomic_run("Restoring metadata with options", - # source_file="/tmp/metadata", - # target_vg=args["group"], - # target_lv=args["swap"], - # quiet=True, - # override_metadata_version=1, - # metadata_version=1, - # command=dmpd.cache_restore, - # errors=errors) - - atomic_run("Restoring metadata from file", - source_file="/tmp/metadata", - target_vg=args["group"], - target_lv=args["swap"], - command=dmpd.cache_restore, - errors=errors) - - atomic_run("Repairing metadata to file", - target_file="/tmp/metadata_repair", - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.cache_repair, - errors=errors) - - atomic_run("Repairing metadata from file", - source_file="/tmp/metadata_repair", - target_vg=args["group"], - target_lv=args["swap"], - command=dmpd.cache_repair, - errors=errors) - - atomic_run("Simulating TTY for cache_restore", - cmd="script --return -c 'cache_restore -i /tmp/metadata -o /dev/mapper/%s-%s' /dev/null" % - (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Checking metadata", - source_vg=args["group"], - source_lv=args["swap"], - quiet=True, - command=dmpd.cache_check, - errors=errors) - - print("\n#######################################\n") - - if len(errors) == 0: - TC.tpass("Testing cache tools of device_mapper_persistent_data passed") - else: - TC.tfail("Testing cache tools of device_mapper_persistent_data failed with following errors: \n\t'" + - "\n\t ".join([str(i) for i in errors])) - return 1 - return 0 - - -def cache_errors_test(args): - print("\n#######################################\n") - print("INFO: Testing cache tools errors provided by device_mapper_persistent_data") - - errors = [] - - functions = ["cache_check", "cache_dump", "cache_metadata_size", "cache_repair", "cache_restore"] - - # Sanity to check for missing input - for func in functions: - atomic_run("Validating missing input", - False, - cmd=func, - command=run, - errors=errors) - - # Sanity to check with wrong input - for func in functions: - atomic_run("Validating wrong input", - False, - cmd=func + " wrong", - command=run, - errors=errors) - - # Sanity to check with wrong option - for func in functions: - atomic_run("Validating wrong option", - False, - cmd=func + " -wrong", - command=run, - errors=errors) - - # Sanity to check with wrong -- option - for func in functions: - atomic_run("Validating wrong -- option", - False, - cmd=func + " --wrong", - command=run, - errors=errors) - - # Sanity to check present functions with -h - for func in functions: - atomic_run("Checking help of command", - cmd="%s" % func, - command=dmpd.get_help, - errors=errors) - - # Sanity to check present functions with -V - for func in functions: - atomic_run("Checking version of command", - cmd="%s" % func, - command=dmpd.get_version, - errors=errors) - - atomic_run("Checking metadata of non-metadata file", - False, - cmd="cache_check README", - command=run, - errors=errors) - - atomic_run("Checking metadata of non-existent file", - False, - cmd="cache_check WRONG", - command=run, - errors=errors) - - atomic_run("Checking metadata of non-regular file", - False, - cmd="cache_check /dev/mapper/control", - command=run, - errors=errors) - - atomic_run("Calculating metadata size for cache of 64 blocks", - False, - cmd="cache_metadata_size --block-size 64", - command=run, - errors=errors) - - atomic_run("Calculating metadata size for cache of 128 size", - False, - cmd="cache_metadata_size --device-size 128", - command=run, - errors=errors) - - atomic_run("Calculating metadata size for cache of 64 blocks and 128 size and 128 nr blocks", - False, - cmd="cache_metadata_size --block-size 64 --device-size 128 --nr-blocks 128", - command=run, - errors=errors) - - atomic_run("Repairing metadata without output", - False, - cmd="cache_repair -i /tmp/metadata_repair", - command=run, - errors=errors) - - atomic_run("Restoring metadata with wrong options", - False, - cmd="cache_restore -i /tmp/metadata -o /dev/mapper/%s-%s --wrong test" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Restoring metadata with wrong metadata version", - False, - source_file="/tmp/metadata", - target_vg=args["group"], - target_lv=args["swap"], - metadata_version=12445, - command=dmpd.cache_restore, - errors=errors) - - atomic_run("Restoring metadata with wrong source", - False, - cmd="cache_restore -i /tmp/wrong.wrong -o /dev/mapper/%s-%s" % (args["group"], args["swap"]), - command=run, - errors=errors) - - atomic_run("Restoring metadata with bit source", - False, - source_file="/tmp/metadata_repair", - target_vg=args["group"], - target_lv=args["swap"], - command=dmpd.cache_restore, - errors=errors) - - atomic_run("Restoring metadata without output", - False, - cmd="cache_restore -i /tmp/metadata", - command=run, - errors=errors) - - # I am not able to run cache_restore with --omit-clean-shutdown successfully - #atomic_run("Restoring metadata with options", - # source_file="/tmp/metadata", - # target_vg=args["group"], - # target_lv=args["swap"], - # omit_clean_shutdown=True, - # command=dmpd.cache_restore, - # errors=errors) - - # This fails in Fedora 26, should work in F27 - #atomic_run("Checking metadata", - # source_vg=args["group"], - # source_lv=args["swap"], - # command=dmpd.cache_check, - # errors=errors) - - #FIXME: Find other way to corrupt metadata, this exploits a bug - """ - atomic_run("Corrupting mappings on metadata device", - False, - source_file="Makefile", - target_vg=args["group"], - target_lv=args["swap"], - command=dmpd.cache_restore, - errors=errors) - - atomic_run("Checking corrupted mappings", - False, - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.cache_check, - errors=errors) - - atomic_run("Trying to fail while dumping metadata", - False, - source_vg=args['group'], - source_lv=args['swap'], - output="/tmp/metadata", - command=dmpd.cache_dump, - errors=errors) - - atomic_run("Repairing metadata", - source_vg=args['group'], - source_lv=args['swap'], - target_file="/tmp/metadata_repair", - command=dmpd.cache_repair, - errors=errors)""" - - atomic_run("Corrupting metadata on device", - cmd="echo 'nothing' >> /dev/mapper/%s-%s" % (args['group'], args['swap']), - command=run, - errors=errors) - - atomic_run("Trying to fail while repairing metadata", - False, - source_vg=args['group'], - source_lv=args['swap'], - target_file="/tmp/metadata_repair", - command=dmpd.cache_repair, - errors=errors) - - atomic_run("Trying to fail while dumping metadata", - False, - source_vg=args['group'], - source_lv=args['swap'], - output="/tmp/metadata", - command=dmpd.cache_dump, - errors=errors) - - atomic_run("Checking corrupted metadata", - False, - source_vg=args["group"], - source_lv=args["swap"], - command=dmpd.cache_check, - errors=errors) - - - print("\n#######################################\n") - - if len(errors) == 0: - TC.tpass("Testing cache tools errors of device_mapper_persistent_data passed") - else: - TC.tfail("Testing cache tools errors of device_mapper_persistent_data failed with following errors: \n\t'" + - "\n\t ".join([str(i) for i in errors])) - return 1 - return 0 - - -def main(): - # Initialize Test Case - global TC - TC = TestClass() - - # Initialize library classes - global loopdev - global lvm - global dmpd - loopdev = LoopDev() - lvm = LVM() - dmpd = DMPD() - - args = {"loop1": "loop1", - "loop1_size": 2048, - "loop2": "loop2", - "loop2_size": 4128, - "group": "vgtest", - "origin": "origin", - "data": "cache_data", - "meta": "cache_meta", - "pool": "pool", - "vol": "thinvol", - "number of vols": 10, - "swap": "swapvol"} - - # Initialization - install_package("lvm2") - install_package("device-mapper-persistent-data") - - # Tests for thin tools provided by device-mapper-persistent-data - thin_init(args) - thin_test(args) - thin_errors_test(args) - thin_clean(args) - - # Tests for cache tools provided by device-mapper-persistent-data - cache_init(args) - cache_test(args) - cache_errors_test(args) - cache_clean(args) - - if not TC.tend(): - print("FAIL: test failed") - sys.exit(1) - - print("PASS: Test pass") - sys.exit(0) - - -if __name__ == "__main__": - main() diff --git a/tests/functions_test/dmpd_library.py b/tests/functions_test/dmpd_library.py deleted file mode 100755 index 1cf44df..0000000 --- a/tests/functions_test/dmpd_library.py +++ /dev/null @@ -1,2373 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2017 Red Hat, Inc. All rights reserved. This copyrighted material -# is made available to anyone wishing to use, modify, copy, or -# redistribute it subject to the terms and conditions of the GNU General -# Public License v.2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; If not, see http://www.gnu.org/licenses/. -# -# Author: Jakub Krysl - -"""dmpd_library.py: Complete library providing functionality for device-mapper-persistent-data upstream test.""" - -from __future__ import print_function - -import platform -from os.path import expanduser -import re #regex -import sys, os -import subprocess -import time -import fileinput -# TODO: Is this really necessary? Unlikely we will run into python2 in rawhide -# again... - - -def _print(string): - module_name = __name__ - string = re.sub("DEBUG:", "DEBUG:("+ module_name + ") ", string) - string = re.sub("FAIL:", "FAIL:("+ module_name + ") ", string) - string = re.sub("FATAL:", "FATAL:("+ module_name + ") ", string) - string = re.sub("WARN:", "WARN:("+ module_name + ") ", string) - print(string) - return - - -def run(cmd, return_output=False, verbose=True, force_flush=False): - """Run a command line specified as cmd. - The arguments are: - \tcmd (str): Command to be executed - \tverbose: if we should show command output or not - \tforce_flush: if we want to show command output while command is being executed. eg. hba_test run - \treturn_output (Boolean): Set to True if want output result to be returned as tuple. Default is False - Returns: - \tint: Return code of the command executed - \tstr: As tuple of return code if return_output is set to True - """ - #by default print command output - if (verbose == True): - #Append time information to command - date = "date \"+%Y-%m-%d %H:%M:%S\"" - p = subprocess.Popen(date, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - stdout, stderr = p.communicate() - stdout = stdout.decode('ascii', 'ignore').rstrip("\n") - _print("INFO: [%s] Running: '%s'..." % (stdout, cmd)) - - #enabling shell=True, because was the only way I found to run command with '|' - if not force_flush: - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - stdout, stderr = p.communicate() - sys.stdout.flush() - sys.stderr.flush() - else: - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True) - stdout = "" - stderr = "" - while p.poll() is None: - new_data = p.stdout.readline() - stdout += new_data - if verbose: - sys.stdout.write(new_data) - sys.stdout.flush() - - retcode = p.returncode - - output = stdout.decode('ascii', 'ignore') + stderr.decode('ascii', 'ignore') - - #remove new line from last line - output = output.rstrip() - - #by default print command output - #if force_flush we already printed it - if verbose == True and not force_flush: - print(output) - - if return_output == False: - return retcode - else: - return retcode, output - - -def atomic_run(message, success=True, return_output=False, **kwargs): - errors = kwargs.pop("errors") - command = kwargs.pop("command") - params = [] - for a in kwargs: - params.append(str(a) + " = " + str(kwargs[a])) - params = ", ".join([str(i) for i in params]) - _print("\nINFO: " + message + " with params %s" % params) - if return_output: - kwargs["return_output"] = True - ret, output = command(**kwargs) - else: - ret = command(**kwargs) - expected_break = {True: False, False: True} - print("(Returned, Expected)") - if command == run: - expected_break = {True: 1, False: 0} - if ret in expected_break: - print(not expected_break[ret], success) - else: - print(ret, success) - else: - print(ret, success) - if ret == expected_break[success]: - error = "FAIL: " + message + " with params %s failed" % params - _print(error) - errors.append(error) - sleep(0.2) - if return_output: - return output - else: - return ret - - -def sleep(duration): - """ - It basically call sys.sleep, but as stdout and stderr can be buffered - We flush them before sleep - """ - sys.stdout.flush() - sys.stderr.flush() - time.sleep(duration) - return - - -def mkdir(new_dir): - if os.path.isdir(new_dir): - _print("INFO: %s already exist" % new_dir) - return True - cmd = "mkdir -p %s" % new_dir - retcode, output = run(cmd, return_output=True, verbose=False) - if retcode != 0: - _print("WARN: could create directory %s" % new_dir) - print(output) - return False - return True - - -def dist_release(): - """ - Find out the release number of Linux distribution. - """ - dist = platform.release() - if not dist: - _print("WARN: dist_release() - Could not determine dist release") - return None - return dist - - -def dist_ver(): - """ - Check the Linux distribution version. - """ - release = dist_release() - if not release: - return None - m = re.match("(\d+).\d+", release) - if m: - return int(m.group(1)) - - # See if it is only digits, in that case return it - m = re.match("(\d+)", release) - if m: - return int(m.group(1)) - - _print("WARN: dist_ver() - Invalid release output %s" % release) - return None - - -def show_sys_info(): - print("### Kernel Info: ###") - ret, kernel = run ("uname -a", return_output=True, verbose=False) - ret, taint_val = run("cat /proc/sys/kernel/tainted", return_output=True, verbose=False) - print("Kernel version: %s" % kernel) - print("Kernel tainted: %s" % taint_val) - print("### IP settings: ###") - run("ip a") - - if run("rpm -q device-mapper-multipath") == 0: - #Abort test execution if multipath is not working well - if run("multipath -l 2>/dev/null") != 0: - print("WARN: Multipath is not configured correctly") - return - #Flush all unused multipath devices before starting the test - run("multipath -F") - run("multipath -r") - - -def get_free_space(path): - """ - Get free space of a path. - Path could be: - \t/dev/sda - \t/root - \t./ - """ - if not path: - return None - - cmd = "df -B 1 %s" % (path) - retcode, output = run(cmd, return_output=True, verbose=False) - if retcode != 0: - _print("WARN: get_free_space() - could not run %s" % (cmd)) - print(output) - return None - fs_list = output.split("\n") - # delete the header info - del fs_list[0] - - if len(fs_list) > 1: - #Could be the information was too long and splited in lines - tmp_info = "".join(fs_list) - fs_list[0] = tmp_info - - #expected order - #Filesystem 1B-blocks Used Available Use% Mounted on - free_space_regex = re.compile("\S+\s+\d+\s+\d+\s+(\d+)") - m = free_space_regex.search(fs_list[0]) - if m: - return int(m.group(1)) - return None - - -def size_human_2_size_bytes(size_human): - """ - Usage - size_human_2_size_bytes(size_human) - Purpose - Convert human readable stander size to B - Parameter - size_human # like '1KiB' - Returns - size_bytes # like 1024 - """ - if not size_human: - return None - - # make sure size_human is a string, could be only numbers, for example - size_human = str(size_human) - if not re.search("\d", size_human): - # Need at least 1 digit - return None - - size_human_regex = re.compile("([\-0-9\.]+)(Ki|Mi|Gi|Ti|Ei|Zi){0,1}B$") - m = size_human_regex.match(size_human) - if not m: - if re.match("^\d+$", size_human): - # Assume size is already in bytes - return size_human - _print("WARN: '%s' is an invalid human size format" % size_human) - return None - - number = None - fraction = 0 - # check if number is fractional - f = re.match("(\d+)\.(\d+)", m.group(1)) - if f: - number = int(f.group(1)) - fraction = int(f.group(2)) - else: - number = int(m.group(1)) - - unit = m.group(2) - if not unit: - unit = 'B' - - for valid_unit in ['B', 'Ki', 'Mi', 'Gi', 'Ti', 'Pi', 'Ei', 'Zi']: - if unit == valid_unit: - if unit == 'B': - # cut any fraction if was given, as it is not valid - return str(number) - return int(number + fraction) - number *= 1024 - fraction *= 1024 - fraction /= 10 - return int(number + fraction) - - -def size_bytes_2_size_human(num): - if not num: - return None - - #Even if we receive string we convert so we can process it - num = int(num) - for unit in ['B','KiB','MiB','GiB','TiB','PiB','EiB','ZiB']: - if abs(num) < 1024.0: - size_human = "%3.1f%s" % (num, unit) - #round it down removing decimal numbers - size_human = re.sub("\.\d+", "", size_human) - return size_human - num /= 1024.0 - #Very big number!! - size_human = "%.1f%s" % (num, 'Yi') - #round it down removing decimal numbers - size_human = re.sub("\.\d+", "", size_human) - return size_human - - -def install_package(pack): - """ - Install a package "pack" via `yum install -y ` - """ - #Check if package is already installed - ret, ver = run("rpm -q %s" % pack, verbose=False, return_output=True) - if ret == 0: - _print("INFO: %s is already installed (%s)" % (pack, ver)) - return True - - if run("yum install -y %s" % pack) != 0: - msg = "FAIL: Could not install %s" % pack - _print(msg) - return False - - _print("INFO: %s was successfully installed" % pack) - return True - - -def create_filesystem(vg_name, lv_name, filesystem="xfs"): - if filesystem not in ["xfs", "ext4", "btrfs"]: - _print("WARN: Unknown filesystem.") - return False - if run("mkfs.%s /dev/%s/%s" % (filesystem, vg_name, lv_name), verbose=True) != 0: - _print("WARN: Could not create filesystem %s on %s/%s" % (filesystem, vg_name, lv_name)) - return False - return True - - -def metadata_snapshot(vg_name, lv_name): - if run("dmsetup suspend /dev/mapper/%s-%s-tpool" % (vg_name, lv_name), verbose=True) != 0: - _print("WARN: Device mapper could not suspend /dev/mapper/%s-%s-tpool" % (vg_name, lv_name)) - return False - if run("dmsetup message /dev/mapper/%s-%s-tpool 0 reserve_metadata_snap" % (vg_name, lv_name), verbose=True) != 0: - _print("WARN: Device mapper could not create metadata snaphot on /dev/mapper/%s-%s-tpool" % (vg_name, lv_name)) - return False - if run("dmsetup resume /dev/mapper/%s-%s-tpool" % (vg_name, lv_name), verbose=True) != 0: - _print("WARN: Device mapper could not resume /dev/mapper/%s-%s-tpool" % (vg_name, lv_name)) - return False - return True - - -class LogChecker: - def __init__(self): - segfault_msg = " segfault " - calltrace_msg = "Call Trace:" - self.error_mgs = [segfault_msg, calltrace_msg] - - def check_all(self): - """Check for error on the system - Returns: - \tBoolean: - \t\tTrue is no error was found - \t\tFalse if some error was found - """ - _print("INFO: Checking for error on the system") - error = 0 - - if not self.kernel_check(): - error += 1 - if not self.abrt_check(): - error += 1 - if not self.messages_dump_check(): - error += 1 - if not self.dmesg_check(): - error += 1 - if not self.console_log_check(): - error += 1 - if not self.kdump_check(): - error += 1 - - if error: - log_messages = "/var/log/messages" - if os.path.isfile(log_messages): - print("submit %s, named messages.log" % log_messages) - run("cp %s messages.log" % log_messages) - run("rhts-submit-log -l messages.log") - - _print("INFO: Umounting NFS to avoid sosreport being hang there") - run("umount /var/crash") - - - - ret_code = run("which sosreport", verbose=False) - if ret_code != 0: - _print("WARN: sosreport is not installed") - _print("INFO: Mounting NFS again") - run("mount /var/crash") - return False - - print("Generating sosreport log") - disable_plugin = "" - if run("sosreport --list-plugins | grep emc") == 0: - disable_plugin = "-n emc" - ret_code, sosreport_log = run("sosreport --batch %s" % disable_plugin, return_output=True) - if ret_code != 0: - _print("WARN: sosreport command failed") - _print("INFO: Mounting NFS again") - run("mount /var/crash") - return False - - sos_lines = sosreport_log.split("\n") - sos_file = None - for line in sos_lines: - #In RHEL7 sosreport is saving under /var/tmp while RHEL6 uses /tmp... - m = re.match(r"\s+((\/var)?\/tmp\/sosreport\S+)", line) - if m: - sos_file = m.group(1) - if not sos_file: - _print("WARN: could not save sosreport log") - _print("INFO: Mounting NFS again") - run("mount /var/crash") - return False - - run("rhts-submit-log -l %s" % sos_file) - _print("INFO: Mounting NFS again") - run("mount /var/crash") - - return False - return True - - @staticmethod - def abrt_check(): - """Check if abrtd found any issue - Returns: - \tBoolean: - \t\tTrue no error was found - \t\tFalse some error was found - """ - _print("INFO: Checking abrt for error") - - if run("rpm -q abrt", verbose=False) != 0: - _print("WARN: abrt tool does not seem to be installed") - _print("WARN: skipping abrt check") - return True - - if run("pidof abrtd", verbose=False) != 0: - _print("WARN: abrtd is not running") - return False - - ret, log = run("abrt-cli list", return_output=True) - if ret != 0: - _print("WARN: abrt-cli command failed") - return False - - # We try to match for "Directory" to check if - # abrt-cli list is actually listing any issue - error = False - if log: - lines = log.split("\n") - for line in lines: - m = re.match(r"Directory:\s+(\S+)", line) - if m: - directory = m.group(1) - filename = directory - filename = filename.replace(":", "-") - filename += ".tar.gz" - run("tar cfzP %s %s" % (filename, directory)) - run("rhts-submit-log -l %s" % filename) - # if log is saved on beaker, it can be deleted from server - # it avoids next test from detecting this failure - run("abrt-cli rm %s" % directory) - error = True - - if error: - _print("WARN: Found abrt error") - return False - - _print("PASS: no Abrt entry has been found.") - return True - - @staticmethod - def kernel_check(): - """ - Check if kernel got tainted. - It checks /proc/sys/kernel/tainted which returns a bitmask. - The values are defined in the kernel source file include/linux/kernel.h, - and explained in kernel/panic.c - cd /usr/src/kernels/`uname -r`/ - Sources are provided by kernel-devel - Returns: - \tBoolean: - \t\tTrue if did not find any issue - \t\tFalse if found some issue - """ - _print("INFO: Checking for tainted kernel") - - previous_tainted_file = "/tmp/previous-tainted" - - ret, tainted = run("cat /proc/sys/kernel/tainted", return_output=True) - - tainted_val = int(tainted) - if tainted_val == 0: - run("echo %d > %s" % (tainted_val, previous_tainted_file), verbose=False) - _print("PASS: Kernel is not tainted.") - return True - - _print("WARN: Kernel is tainted!") - - if not os.path.isfile(previous_tainted_file): - run("echo 0 > %s" % previous_tainted_file, verbose=False) - ret, prev_taint = run("cat %s" % previous_tainted_file, return_output=True) - prev_taint_val = int(prev_taint) - if prev_taint_val == tainted_val: - _print("INFO: Kernel tainted has already been handled") - return True - - run("echo %d > %s" % (tainted_val, previous_tainted_file), verbose=False) - - # check all bits that are set - bit = 0 - while tainted_val != 0: - if tainted_val & 1: - _print("\tTAINT bit %d is set\n" % bit) - bit += 1 - # shift tainted value - tainted_val /= 2 - # List all tainted bits that are defined - print("List bit definition for tainted kernel") - run("cat /usr/src/kernels/`uname -r`/include/linux/kernel.h | grep TAINT_") - - found_issue = False - # try to find the module which tainted the kernel, tainted module have a mark between '('')' - ret, output = run("cat /proc/modules | grep -e '(.*)' | cut -d' ' -f1", return_output=True) - tainted_mods = output.split("\n") - # For example during iscsi async_events scst tool loads an unsigned module - # just ignores it, so we will ignore this tainted if there is no tainted - # modules loaded - if not tainted_mods: - _print("INFO: ignoring tainted as the module is not loaded anymore") - else: - ignore_modules = ["ocrdma", "nvme_fc", "nvmet_fc"] - for tainted_mod in tainted_mods: - if tainted_mod: - _print("INFO: The following module got tainted: %s" % tainted_mod) - run("modinfo %s" % tainted_mod) - # we are ignoring ocrdma module - if tainted_mod in ignore_modules: - _print("INFO: ignoring tainted on %s" % tainted_mod) - run("echo %d > %s" % (tainted_val, previous_tainted_file), verbose=False) - continue - found_issue = True - - run("echo %s > %s" % (tainted, previous_tainted_file), verbose=False) - if found_issue: - return False - - return True - - @staticmethod - def _date2num(date): - date_map = {"Jan": "1", - "Feb": "2", - "Mar": "3", - "Apr": "4", - "May": "5", - "Jun": "6", - "Jul": "7", - "Aug": "8", - "Sep": "9", - "Oct": "10", - "Nov": "11", - "Dec": "12"} - - date_regex = r"(\S\S\S)\s(\d+)\s(\d\d:\d\d:\d\d)" - m = re.match(date_regex, date) - month = date_map[m.group(1)] - day = str(m.group(2)) - # if day is a single digit, add '0' to begin - if len(day) == 1: - day = "0" + day - - hour = m.group(3) - hour = hour.replace(":", "") - - value = month + day + hour - - return value - - @staticmethod - def clear_dmesg(): - cmd = "dmesg --clear" - if dist_ver() < 7: - cmd = "dmesg -c" - run(cmd, verbose=False) - return True - - def messages_dump_check(self): - previous_time_file = "/tmp/previous-dump-check" - - log_msg_file = "/var/log/messages" - if not os.path.isfile(log_msg_file): - _print("WARN: Could not open %s" % log_msg_file) - return True - - log_file = open(log_msg_file, encoding="utf-8", errors="ignore") - log = log_file.read() - - begin_tag = "\\[ cut here \\]" - end_tag = "\\[ end trace " - - if not os.path.isfile(previous_time_file): - first_time = "Jan 01 00:00:00" - time = self._date2num(first_time) - run("echo %s > %s" % (time, previous_time_file)) - - # Read the last time test ran - ret, last_run = run("cat %s" % previous_time_file, return_output=True) - _print("INFO: Checking for stack dump messages after: %s" % last_run) - - # Going to search the file for text that matches begin_tag until end_tag - dump_regex = begin_tag + "(.*?)" + end_tag - m = re.findall(dump_regex, log, re.MULTILINE) - if m: - _print("INFO: Checking if it is newer than: %s" % last_run) - print(m.group(1)) - # TODO - - _print("PASS: No recent dump messages has been found.") - return True - - def dmesg_check(self): - """ - Check for error messages on dmesg ("Call Trace and segfault") - """ - _print("INFO: Checking for errors on dmesg.") - error = 0 - for msg in self.error_mgs: - ret, output = run("dmesg | grep -i '%s'" % msg, return_output=True) - if output: - _print("WARN: found %s on dmesg" % msg) - run("echo '\nINFO found %s Saving it\n'>> dmesg.log" % msg) - run("dmesg >> dmesg.log") - run("rhts-submit-log -l dmesg.log") - error = + 1 - self.clear_dmesg() - if error: - return False - - _print("PASS: No errors on dmesg have been found.") - return True - - def console_log_check(self): - """ - Checks for error messages on console log ("Call Trace and segfault") - """ - error = 0 - console_log_file = "/root/console.log" - prev_console_log_file = "/root/console.log.prev" - new_console_log_file = "/root/console.log.new" - - if not os.environ.get('LAB_CONTROLLER'): - _print("WARN: Could not find lab controller") - return True - - if not os.environ.get('RECIPEID'): - _print("WARN: Could not find recipe ID") - return True - - lab_controller = os.environ['LAB_CONTROLLER'] - recipe_id = os.environ['RECIPEID'] - - # get current console log - url = "http://%s:8000/recipes/%s/logs/console.log" % (lab_controller, recipe_id) - - if (run("wget -q %s -O %s" % (url, new_console_log_file)) != 0): - _print("INFO: Could not get console log") - # return sucess if could not get console.log - return True - - # if there was previous console log, we just check the new part - run("diff -N -n --unidirectional-new-file %s %s > %s" % ( - prev_console_log_file, new_console_log_file, console_log_file)) - - # backup the current full console.log - # next time we run the test we will compare just - # what has been appended to console.log - run("mv -f %s %s" % (new_console_log_file, prev_console_log_file)) - - _print("INFO: Checking for errors on %s" % console_log_file) - for msg in self.error_mgs: - ret, output = run("cat %s | grep -i '%s'" % (console_log_file, msg), return_output=True) - if output: - _print("INFO found %s on %s" % (msg, console_log_file)) - run("rhts-submit-log -l %s" % console_log_file) - error = + 1 - - if error: - return False - - _print("PASS: No errors on %s have been found." % console_log_file) - return True - - @staticmethod - def kdump_check(): - """ - Check for kdump error messages. - It assumes kdump is configured on /var/crash - """ - error = 0 - - previous_kdump_check_file = "/tmp/previous-kdump-check" - kdump_dir = "/var/crash" - ret, hostname = run("hostname", verbose=False, return_output=True) - - if not os.path.exists("%s/%s" % (kdump_dir, hostname)): - _print("INFO: No kdump log found for this server") - return True - - ret, output = run("ls -l %s/%s | awk '{print$9}'" % (kdump_dir, hostname), return_output=True) - kdumps = output.split("\n") - kdump_dates = [] - for kdump in kdumps: - if kdump == "": - continue - # parse on the date, remove the ip of the uploader - m = re.match(".*?-(.*)", kdump) - if not m: - _print("WARN: unexpected format for kdump (%s)" % kdump) - continue - date = m.group(1) - # Old dump were using "." - date = date.replace(r"\.", "-") - # replace last "-" with space to format date properly - index = date.rfind("-") - date = date[:index] + " " + date[index + 1:] - _print("INFO: Found kdump from %s" % date) - kdump_dates.append(date) - - # checks if a file to store last run exists, if not create it - if not os.path.isfile("%s" % previous_kdump_check_file): - # time in seconds - ret, time = run(r"date +\"\%s\"", verbose=False, return_output=True) - run("echo -n %s > %s" % (time, previous_kdump_check_file), verbose=False) - _print("INFO: kdump check is executing for the first time.") - _print("INFO: doesn't know from which date should check files.") - _print("PASS: Returning success.") - return True - - # Read the last time test ran - ret, previous_check_time = run("cat %s" % previous_kdump_check_file, return_output=True) - # just add new line to terminal because the file should not have already new line character - print("") - - for date in kdump_dates: - # Note %% is escape form to use % in a string - ret, kdump_time = run("date --date=\"%s\" +%%s" % date, return_output=True) - if ret != 0: - _print("WARN: Could not convert date %s" % date) - continue - - if not kdump_time: - continue - if (int(kdump_time) > int(previous_check_time)): - _print("WARN: Found a kdump log from %s (more recent than %s)" % (date, previous_check_time)) - _print("WARN: Check %s/%s" % (kdump_dir, hostname)) - error += 1 - - ret, time = run(r"date +\"\%s\"", verbose=False, return_output=True) - run("echo -n %s > %s" % (time, previous_kdump_check_file), verbose=False) - - if error: - return False - - _print("PASS: No errors on kdump have been found.") - return True - - -class TestClass: - #we currently support these exit code for a test case - tc_sup_status = {"pass" : "PASS: ", - "fail" : "ERROR: ", - "skip" : "SKIP: "} - tc_pass = [] - tc_fail = [] - tc_skip = [] #For some reason it did not execute - tc_results = [] #Test results stored in a list - - test_dir = "%s/.stqe-test" % expanduser("~") - test_log = "%s/test.log" % test_dir - - def __init__(self): - print("################################## Test Init ###################################") - self.log_checker = LogChecker() - if not os.path.isdir(self.test_dir): - mkdir(self.test_dir) - # read entries on test.log, there will be entries if tend was not called - # before starting a TC class again, usually if the test case reboots the server - if not os.path.isfile(self.test_log): - #running the test for the first time - show_sys_info() - #Track memory usage during test - run("free -b > init_mem.txt", verbose=False) - run("top -b -n 1 > init_top.txt", verbose=False) - else: - try: - f = open(self.test_log) - file_data = f.read() - f.close() - except: - _print("WARN: TestClass() could not read %s" % self.test_log) - return - finally: - f.close() - log_entries = file_data.split("\n") - #remove the file, once tlog is ran it will add the entries again... - run("rm -f %s" % (self.test_log), verbose=False) - if log_entries: - _print("INFO: Loading test result from previous test run...") - for entry in log_entries: - self.tlog(entry) - print("################################################################################") - return - - def tlog(self, string): - """print message, if message begins with supported message status - the test message will be added to specific test result array - """ - print(string) - if re.match(self.tc_sup_status["pass"], string): - self.tc_pass.append(string) - self.tc_results.append(string) - run("echo '%s' >> %s" % (string, self.test_log), verbose=False) - if re.match(self.tc_sup_status["fail"], string): - self.tc_fail.append(string) - self.tc_results.append(string) - run("echo '%s' >> %s" % (string, self.test_log), verbose=False) - if re.match(self.tc_sup_status["skip"], string): - self.tc_skip.append(string) - self.tc_results.append(string) - run("echo '%s' >> %s" % (string, self.test_log), verbose=False) - return None - - @staticmethod - def trun(cmd, return_output=False): - """Run the cmd and format the log. return the exitint status of cmd - The arguments are: - \tCommand to run - \treturn_output: if should return command output as well (Boolean) - Returns: - \tint: Command exit code - \tstr: command output (optional) - """ - return run(cmd, return_output) - - def tok(self, cmd, return_output=False): - """Run the cmd and expect it to pass. - The arguments are: - \tCommand to run - \treturn_output: if should return command output as well (Boolean) - Returns: - \tBoolean: return_code - \t\tTrue: If command excuted successfully - \t\tFalse: Something went wrong - \tstr: command output (optional) - """ - cmd_code = None - ouput = None - ret_code = None - if not return_output: - cmd_code = run(cmd) - else: - cmd_code, output = run(cmd, return_output) - - if cmd_code == 0: - self.tpass(cmd) - ret_code = True - else: - self.tfail(cmd) - ret_code = False - - if return_output: - return ret_code, output - else: - return ret_code - - def tnok(self, cmd, return_output=False): - """Run the cmd and expect it to fail. - The arguments are: - \tCommand to run - \treturn_output: if should return command output as well (Boolean) - Returns: - \tBoolean: return_code - \t\tFalse: If command excuted successfully - \t\tTrue: Something went wrong - \tstr: command output (optional) - """ - cmd_code = None - ouput = None - ret_code = None - if not return_output: - cmd_code = run(cmd) - else: - cmd_code, output = run(cmd, return_output) - - if cmd_code != 0: - self.tpass(cmd + " [exited with error, as expected]") - ret_code = True - else: - self.tfail(cmd + " [expected to fail, but it did not]") - ret_code = False - - if return_output: - return ret_code, output - else: - return ret_code - - def tpass(self, string): - """Will add PASS + string to test log summary - """ - self.tlog(self.tc_sup_status["pass"] + string) - return None - - def tfail(self, string): - """Will add ERROR + string to test log summary - """ - self.tlog(self.tc_sup_status["fail"] + string) - return None - - def tskip(self, string): - """Will add SKIP + string to test log summary - """ - self.tlog(self.tc_sup_status["skip"] + string) - return None - - def tend(self): - """It checks for error in the system and print test summary - Returns: - \tBoolean - \t\tTrue if all test passed and no error was found on server - \t\tFalse if some test failed or found error on server - """ - if self.log_checker.check_all(): - self.tpass("Search for error on the server") - else: - self.tfail("Search for error on the server") - - print("################################ Test Summary ##################################") - #Will print test results in order and not by test result order - for tc in self.tc_results: - print(tc) - - n_tc_pass = len(self.tc_pass) - n_tc_fail = len(self.tc_fail) - n_tc_skip = len(self.tc_skip) - print("#############################") - print("Total tests that passed: " + str(n_tc_pass)) - print("Total tests that failed: " + str(n_tc_fail)) - print("Total tests that skipped: " + str(n_tc_skip)) - print("################################################################################") - sys.stdout.flush() - #Added this sleep otherwise some of the prints were not being shown.... - sleep(1) - run("rm -f %s" % (self.test_log), verbose=False) - run("rmdir %s" % (self.test_dir), verbose=False) - - #If at least one test failed, return error - if n_tc_fail > 0: - return False - - return True - - -class LoopDev: - def __init__(self): - self.image_path = "/tmp" - - @staticmethod - def _get_loop_path(name): - loop_path = name - if "/dev/" not in name: - loop_path = "/dev/" + name - - return loop_path - - @staticmethod - def _get_image_file(name, image_path): - image_file = "%s/%s.img" % (image_path, name) - return image_file - - @staticmethod - def _standardize_name(name): - """ - Make sure use same standard for name, for example remove /dev/ from it if exists - """ - if not name: - _print("WARN: _standardize_name() - requires name as parameter") - return None - return name.replace("/dev/", "") - - def create_loopdev(self, name=None, size=1024): - """ - Create a loop device - Parameters: - \tname: eg. loop0 (optional) - \tsize: Size in MB (default: 1024MB) - """ - - ret_fail = False - if not name: - cmd = "losetup -f" - retcode, output = run(cmd, return_output=True, verbose=False) - if retcode != 0: - _print("WARN: Could not find free loop device") - print(output) - return None - name = output - ret_fail = None - name = self._standardize_name(name) - - fname = self._get_image_file(name, self.image_path) - _print("INFO: Creating loop device %s with size %d" % (fname, size)) - - _print("INFO: Checking if %s exists" % fname) - if not os.path.isfile(fname): - # make sure we have enough space to create the file - free_space_bytes = get_free_space(self.image_path) - # Convert the size given in megabytes to bytes - size_bytes = int(size_human_2_size_bytes("%sMiB" % size)) - if free_space_bytes <= size_bytes: - _print("WARN: Not enough space to create loop device with size %s" - % size_bytes_2_size_human(size_bytes)) - _print("available space: %s" % size_bytes_2_size_human(free_space_bytes)) - return ret_fail - _print("INFO: Creating file %s" % fname) - # cmd = "dd if=/dev/zero of=%s seek=%d bs=1M count=0" % (fname, size) - cmd = "fallocate -l %sM %s" % (size, fname) - try: - # We are just creating the file, not writting zeros to it - retcode = run(cmd) - if retcode != 0: - _print("command failed with code %s" % retcode) - _print("WARN: Could not create loop device image file") - return ret_fail - except OSError as e: - print("command failed: ", e, file=sys.err) - return ret_fail - - loop_path = self._get_loop_path(name) - # detach loop device if it exists - self.detach_loopdev(loop_path) - - # Going to associate the file to the loopdevice - cmd = "losetup %s %s" % (loop_path, fname) - retcode = run(cmd) - if retcode != 0: - _print("WARN: Could not create loop device") - return ret_fail - - if ret_fail is None: - return loop_path - return True - - def delete_loopdev(self, name): - """ - Delete a loop device - Parameters: - \tname: eg. loop0 or /dev/loop0 - """ - if not name: - _print("WARN: delete_loopdev() - requires name parameter") - return False - - _print("INFO: Deleting loop device %s" % name) - name = self._standardize_name(name) - - loop_path = self._get_loop_path(name) - - # detach loop device if it exists - if not self.detach_loopdev(name): - _print("WARN: could not detach %s" % loop_path) - return False - - fname = self._get_image_file(name, self.image_path) - if os.path.isfile(fname): - cmd = "rm -f %s" % fname - retcode = run(cmd) - if retcode != 0: - _print("WARN: Could not delete loop device %s" % name) - return False - - # check if loopdev file is deleted as it sometimes remains - if os.path.isfile(fname): - _print("WARN: Deleted loop device file %s but it is still there" % fname) - return False - - return True - - @staticmethod - def get_loopdev(): - # example of output on rhel-6.7 - # /dev/loop0: [fd00]:396428 (/tmp/loop0.img) - retcode, output = run("losetup -a | awk '{print$1}'", return_output=True, verbose=False) - # retcode, output = run("losetup -l | tail -n +2", return_output=True, verbose=False) - if (retcode != 0): - _print("WARN: get_loopdev failed to execute") - print(output) - return None - - devs = None - if output: - devs = output.split("\n") - # remove the ":" character from all devices - devs = [d.replace(':', "") for d in devs] - - return devs - - def detach_loopdev(self, name=None): - cmd = "losetup -D" - if name: - devs = self.get_loopdev() - if not devs: - # No device was found - return False - - name = self._standardize_name(name) - - # Just try to detach if device is connected, otherwise ignore - # print("INFO: Checking if ", loop_path, " exists, to be detached") - dev_path = self._get_loop_path(name) - if dev_path in devs: - cmd = "losetup -d %s" % dev_path - else: - # if loop device does not exist just ignore it - return True - - # run losetup -D or -d - retcode = run(cmd) - if retcode != 0: - _print("WARN: Could not detach loop device") - return False - - return True - - -class LVM: - ########################################### - # VG section - ########################################### - @staticmethod - def vg_query(verbose=False): - """Query Volume Groups and return a dictonary with VG information for each VG. - The arguments are: - \tNone - Returns: - \tdict: Return a dictionary with VG info for each VG - """ - cmd = "vgs --noheadings --separator \",\"" - retcode, output = run(cmd, return_output=True, verbose=verbose) - if (retcode != 0): - _print("INFO: there is no VGs") - return None - vgs = output.split("\n") - - # format of VG info: name #PV #LV #SN Attr VSize VFree - vg_info_regex = "\s+(\S+),(\S+),(\S+),(.*),(.*),(.*),(.*)$" - - vg_dict = {} - for vg in vgs: - m = re.match(vg_info_regex, vg) - if not m: - continue - vg_info_dict = {"num_pvs": m.group(2), - "num_lvs": m.group(3), - "num_sn": m.group(4), # not sure what it is - "attr": m.group(5), - "vsize": m.group(6), - "vfree": m.group(7)} - vg_dict[m.group(1)] = vg_info_dict - - return vg_dict - - @staticmethod - def vg_create(vg_name, pv_name, force=False, verbose=True): - """Create a Volume Group. - The arguments are: - \tPV name - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - if not vg_name or not pv_name: - _print("WARN: vg_create requires vg_name and pv_name") - return False - - options = "" - if force: - options += "--force" - cmd = "vgcreate %s %s %s" % (options, vg_name, pv_name) - retcode = run(cmd, verbose=verbose) - if (retcode != 0): - # _print("WARN: Could not create %s" % vg_name) - return False - return True - - def vg_remove(self, vg_name, force=False, verbose=True): - """Delete a Volume Group. - The arguments are: - \tVG name - \tforce (boolean) - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - if not vg_name: - _print("WARN: vg_remove requires vg_name") - return False - - vg_dict = self.vg_query() - if vg_name not in vg_dict.keys(): - _print("INFO: vg_remove - %s does not exist. Skipping..." % vg_name) - return True - - options = "" - if force: - options += "--force" - cmd = "vgremove %s %s" % (options, vg_name) - retcode = run(cmd, verbose=verbose) - if (retcode != 0): - return False - return True - - ########################################### - # LV section - ########################################### - @staticmethod - def lv_query(options=None, verbose=False): - """Query Logical Volumes and return a dictonary with LV information for each LV. - The arguments are: - \toptions: If not want to use default lvs output. Use -o for no default fields - Returns: - \tdict: Return a list with LV info for each LV - """ - # Use \",\" as separator, as some output might contain ',' - # For example, lvs -o modules on thin device returns "thin,thin-pool" - cmd = "lvs -a --noheadings --separator \\\",\\\"" - - # format of LV info: Name VG Attr LSize Pool Origin Data% Meta% Move Log Cpy%Sync Convert - lv_info_regex = "\s+(\S+)\",\"(\S+)\",\"(\S+)\",\"(\S+)\",\"(.*)\",\"(.*)\",\"(.*)\",\"(.*)\",\"(.*)\",\"(.*)\",\"(.*)\",\"(.*)$" - - # default parameters returned by lvs -a - param_names = ["name", "vg_name", "attr", "size", "pool", "origin", "data_per", "meta_per", "move", "log", - "copy_per", "convert"] - - if options: - param_names = ["name", "vg_name"] - # need to change default regex - lv_info_regex = "\s+(\S+)\",\"(\S+)" - parameters = options.split(",") - for param in parameters: - lv_info_regex += "\",\"(.*)" - param_names.append(param) - lv_info_regex += "$" - cmd += " -o lv_name,vg_name,%s" % options - - retcode, output = run(cmd, return_output=True, verbose=verbose) - if (retcode != 0): - _print("INFO: there is no LVs") - return None - lvs = output.split("\n") - - lv_list = [] - for lv in lvs: - m = re.match(lv_info_regex, lv) - if not m: - _print("WARN: (%s) does not match lvs output format" % lv) - continue - lv_info_dict = {} - for index in range(len(param_names)): - lv_info_dict[param_names[index]] = m.group(index + 1) - lv_list.append(lv_info_dict) - - return lv_list - - def lv_info(self, lv_name, vg_name, options=None, verbose=False): - """ - Show information of specific LV - """ - if not lv_name or not vg_name: - _print("WARN: lv_info() - requires lv_name and vg_name as parameters") - return None - - lvs = self.lv_query(options=options, verbose=verbose) - - if not lvs: - return None - - for lv in lvs: - if (lv["name"] == lv_name and - lv["vg_name"] == vg_name): - return lv - return None - - @staticmethod - def lv_create(vg_name, lv_name, options=(""), verbose=True): - """Create a Logical Volume. - The arguments are: - \tVG name - \tLV name - \toptions - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - if not vg_name or not lv_name: - _print("WARN: lv_create requires vg_name and lv_name") - return False - - cmd = "lvcreate %s %s -n %s" % (" ".join(str(i) for i in options), vg_name, lv_name) - retcode = run(cmd, verbose=verbose) - if (retcode != 0): - # _print("WARN: Could not create %s" % lv_name) - return False - return True - - @staticmethod - def lv_activate(lv_name, vg_name, verbose=True): - """Activate a Logical Volume - The arguments are: - \tLV name - \tVG name - Returns: - \tBoolean: - \t\tTrue in case of success - \t\tFalse if something went wrong - """ - if not lv_name or not vg_name: - _print("WARN: lv_activate requires lv_name and vg_name") - return False - - cmd = "lvchange -ay %s/%s" % (vg_name, lv_name) - retcode = run(cmd, verbose=verbose) - if (retcode != 0): - _print("WARN: Could not activate LV %s" % lv_name) - return False - - # Maybe we should query the LVs and make sure it is really activated - return True - - @staticmethod - def lv_deactivate(lv_name, vg_name, verbose=True): - """Deactivate a Logical Volume - The arguments are: - \tLV name - \tVG name - Returns: - \tBoolean: - \t\tTrue in case of success - \t\tFalse if something went wrong - """ - if not lv_name or not vg_name: - _print("WARN: lv_deactivate requires lv_name and vg_name") - return False - - cmd = "lvchange -an %s/%s" % (vg_name, lv_name) - retcode = run(cmd, verbose=verbose) - if (retcode != 0): - _print("WARN: Could not deactivate LV %s" % lv_name) - return False - - # Maybe we should query the LVs and make sure it is really deactivated - return True - - def lv_remove(self, lv_name, vg_name, verbose=True): - """Remove an LV from a VG - The arguments are: - \tLV name - \tVG name - Returns: - \tBoolean: - \t\tTrue in case of success - \t\tFalse if something went wrong - """ - if not lv_name or not vg_name: - _print("WARN: lv_remove requires lv_name and vg_name") - return False - - lvs = self.lv_query() - - lv_names = lv_name.split() - - for lv_name in lv_names: - if not self.lv_info(lv_name, vg_name): - _print("INFO: lv_remove - LV %s does not exist. Skipping" % lv_name) - continue - - cmd = "lvremove --force %s/%s" % (vg_name, lv_name) - retcode = run(cmd, verbose=verbose) - if (retcode != 0): - _print("WARN: Could not remove LV %s" % lv_name) - return False - - if self.lv_info(lv_name, vg_name): - _print("INFO: lv_remove - LV %s still exists." % lv_name) - return False - - return True - - @staticmethod - def lv_convert(vg_name, lv_name, options, verbose=True): - """Change Logical Volume layout. - The arguments are: - \tVG name - \tLV name - \toptions - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - if not options: - _print("WARN: lv_convert requires at least some options specified.") - return False - - if not lv_name or not vg_name: - _print("WARN: lv_convert requires vg_name and lv_name") - return False - - cmd = "lvconvert %s %s/%s" % (" ".join(options), vg_name, lv_name) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not convert %s" % lv_name) - return False - - return True - - ########################################### - # Config file - ########################################### - @staticmethod - def get_config_file_path(): - return "/etc/lvm/lvm.conf" - - def update_config(self, key, value): - config_file = self.get_config_file_path() - search_regex = re.compile("(\s*)%s(\s*)=(\s*)\S*" % key) - for line in fileinput.input(config_file, inplace=1): - m = search_regex.match(line) - if m: - line = "%s%s = %s" % (m.group(1), key, value) - # print saves the line to the file - # need to remove new line character as print will add it - line = line.rstrip('\n') - print(line) - - -class DMPD: - def __init__(self): - self.lvm = LVM() - - def _get_devices(self): - lv_list = self.lvm.lv_query() - return lv_list - - @staticmethod - def _get_active_devices(): - cmd = "ls /dev/mapper/" - retcode, output = run(cmd, return_output=True, verbose=False) - if retcode != 0: - _print("WARN: Could not find active dm devices") - return False - devices = output.split() - return devices - - @staticmethod - def _get_device_path(vg_name, lv_name): - device_path = vg_name + "-" + lv_name - if "/dev/mapper/" not in device_path: - device_path = "/dev/mapper/" + device_path - return device_path - - def _check_device(self, vg_name, lv_name): - devices = self._get_devices() - device_list = [x["name"] for x in devices] - if lv_name not in device_list: - _print("WARN: %s is not a device" % lv_name) - return False - for x in devices: - if x["name"] == lv_name and x["vg_name"] == vg_name: - _print("INFO: Found device %s in group %s" % (lv_name, vg_name)) - return True - return False - - def _activate_device(self, vg_name, lv_name): - devices_active = self._get_active_devices() - if vg_name + "-" + lv_name not in devices_active: - ret = self.lvm.lv_activate(lv_name, vg_name) - if not ret: - _print("WARN: Could not activate device %s" % lv_name) - return False - _print("INFO: device %s was activated" % lv_name) - _print("INFO: device %s is active" % lv_name) - return True - - @staticmethod - def _fallocate(_file, size, command_message): - cmd = "fallocate -l %sM %s" % (size, _file) - try: - retcode = run(cmd) - if retcode != 0: - _print("WARN: Command failed with code %s." % retcode) - _print("WARN: Could not create file to %s metadata to." % command_message) - return False - except OSError as e: - print("command failed: ", e, file=sys.err) - return False - return True - - @staticmethod - def get_help(cmd): - commands = ["cache_check", "cache_dump", "cache_metadata_size", "cache_repair", "cache_restore", "era_check", - "era_dump", "era_invalidate", "era_restore", "thin_check", "thin_delta", "thin_dump", "thin_ls", - "thin_metadata_size", "thin_repair", "thin_restore", "thin_rmap", "thin_show_duplicates", - "thin_trim"] - if cmd not in commands: - _print("WARN: Unknown command %s" % cmd) - return False - - command = "%s -h" % cmd - retcode = run(command, verbose=True) - if retcode != 0: - _print("WARN: Could not get help for %s." % cmd) - return False - - return True - - @staticmethod - def get_version(cmd): - commands = ["cache_check", "cache_dump", "cache_metadata_size", "cache_repair", "cache_restore", "era_check", - "era_dump", "era_invalidate", "era_restore", "thin_check", "thin_delta", "thin_dump", "thin_ls", - "thin_metadata_size", "thin_repair", "thin_restore", "thin_rmap", "thin_show_duplicates", - "thin_trim"] - if cmd not in commands: - _print("WARN: Unknown command %s" % cmd) - return False - - command = "%s -V" % cmd - retcode = run(command, verbose=True) - if retcode != 0: - _print("WARN: Could not get version of %s." % cmd) - return False - - return True - - def _get_dev_id(self, dev_id, path=None, lv_name=None, vg_name=None): - dev_ids = [] - - if path is None: - retcode, data = self.thin_dump(source_vg=vg_name, source_lv=lv_name, formatting="xml", return_output=True) - if not retcode: - _print("WARN: Could not dump metadata from %s/%s" % (vg_name, lv_name)) - return False - data_lines = data.splitlines() - for line in data_lines: - blocks = line.split() - for block in blocks: - if not block.startswith("dev_"): - continue - else: - dev_ids.append(int(block[8:-1])) - - else: - with open(path, "r") as meta: - for line in meta: - blocks = line.split() - for block in blocks: - if not block.startswith("dev_"): - continue - else: - dev_ids.append(int(block[8:-1])) - - if dev_id in dev_ids: - return True - - return False - - @staticmethod - def _metadata_size(source=None, lv_name=None, vg_name=None): - if source is None: - cmd = "lvs -a --units m" - ret, data = run(cmd, return_output=True) - if ret != 0: - _print("WARN: Could not list LVs") - data_line = data.splitlines() - for line in data_line: - cut = line.split() - if not cut or lv_name != cut[0] and vg_name != cut[1]: - continue - cut = cut[3] - cut = cut.split("m") - size = float(cut[0]) - cmd = "rm -f /tmp/meta_size" - run(cmd) - return int(size) - _print("WARN: Could not find %s %s in lvs, setting size to 100m" % (lv_name, vg_name)) - return 100 - else: - return int(os.stat(source).st_size) / 1000000 - - ########################################### - # cache section - ########################################### - - def cache_check(self, source_file=None, source_vg=None, source_lv=None, quiet=False, super_block_only=False, - clear_needs_check_flag=False, skip_mappings=False, skip_hints=False, skip_discards=False, - verbose=True): - """Check cache pool metadata from either file or device. - The arguments are: - \tsource_file - \tsource_vg VG name - \tsource_lv LV name - \tquiet Mute STDOUT - \tsuper_block_only - \tclear_needs_check_flag - \tskip_mappings - \tskip_hints - \tskip_discards - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - options = "" - - if not source_file and (not source_vg or not source_lv): - _print("WARN: cache_check requires either source_file OR source_vg and source_lv.") - return False - - if not source_file: - ret = self._check_device(source_vg, source_lv) - if not ret: - return False - ret = self._activate_device(source_vg, source_lv) - if not ret: - return False - device = self._get_device_path(source_vg, source_lv) - else: - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return False - device = source_file - - if quiet: - options += "--quiet " - - if super_block_only: - options += "--super-block-only " - - if clear_needs_check_flag: - options += "--clear-needs-check-flag " - - if skip_mappings: - options += "--skip-mappings " - - if skip_hints: - options += "--skip-hints " - - if skip_discards: - options += "--skip-discards " - - cmd = "cache_check %s %s" % (device, options) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not check %s metadata" % device) - return False - - return True - - def cache_dump(self, source_file=None, source_vg=None, source_lv=None, output=None, repair=False, verbose=True, - return_output=False): - """Dumps cache metadata from device of source file to standard output or file. - The arguments are: - \tsource_file - \tsource_vg VG name - \tsource_lv LV name - \toutput specify output xml file - \treturn_output see 'Returns', not usable with output=True - \trepair Repair the metadata while dumping it - Returns: - \tOnly Boolean if return_output False: - \t\tTrue if success - \t'tFalse in case of failure - \tBoolean and data if return_output True - """ - options = "" - - if return_output and output: - _print("INFO: Cannot return to both STDOUT and file, returning only to file.") - return_output = False - - if return_output: - ret_fail = (False, None) - else: - ret_fail = False - - if not source_file and (not source_vg or not source_lv): - _print("WARN: cache_dump requires either source_file OR source_vg and source_lv.") - return ret_fail - - if not source_file: - ret = self._check_device(source_vg, source_lv) - if not ret: - return ret_fail - ret = self._activate_device(source_vg, source_lv) - if not ret: - return ret_fail - device = self._get_device_path(source_vg, source_lv) - else: - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return ret_fail - device = source_file - - if output: - if not os.path.isfile(output): - size = self._metadata_size(source_file, source_lv, source_vg) - ret = self._fallocate(output, size + 1, "dump") - if not ret: - return ret_fail - options += "-o %s " % output - - if repair: - options += "--repair" - - cmd = "cache_dump %s %s" % (device, options) - if return_output: - retcode, data = run(cmd, return_output=True, verbose=verbose) - else: - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not dump %s metadata." % device) - return ret_fail - - if return_output: - return True, data - return True - - def cache_repair(self, source_file=None, source_vg=None, source_lv=None, target_file=None, target_vg=None, - target_lv=None, verbose=True): - """Repairs cache metadata from source file/device to target file/device - The arguments are: - \tsource as either source_file OR source_vg and source_lv - \ttarget as either target_file OR target_vg and target_lv - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - if not source_file and (not source_vg or not source_lv): - _print("WARN: cache_repair requires either source_file OR source_vg and source_lv as source.") - return False - - if not target_file and (not target_vg or not target_lv): - _print("WARN: cache_repair requires either target_file OR target_vg and target_lv as target.") - return False - - if not source_file: - ret = self._check_device(source_vg, source_lv) - if not ret: - return False - ret = self._activate_device(source_vg, source_lv) - if not ret: - return False - source = self._get_device_path(source_vg, source_lv) - else: - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return False - source = source_file - - if not target_file: - ret = self._check_device(target_vg, target_lv) - if not ret: - return False - ret = self._activate_device(target_vg, target_lv) - if not ret: - return False - target = self._get_device_path(target_vg, target_lv) - else: - if not os.path.isfile(target_file): - size = self._metadata_size(source_file, source_lv, source_vg) - ret = self._fallocate(target_file, size + 1, "repair") - if not ret: - return False - target = target_file - - cmd = "cache_repair -i %s -o %s" % (source, target) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not repair metadata from %s to %s" % (source, target)) - return False - - return True - - def cache_restore(self, source_file, target_vg=None, target_lv=None, target_file=None, quiet=False, - metadata_version=None, omit_clean_shutdown=False, override_metadata_version=None, verbose=True): - """Restores cache metadata from source xml file to target device/file - The arguments are: - \tsource_file Source xml file - \ttarget as either target_file OR target_vg and target_lv - \tquiet Mute STDOUT - \tmetadata_version Specify metadata version to restore - \tomit_clean_shutdown Disable clean shutdown - \toverride_metadata_version DEBUG option to override metadata version without checking - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - options = "" - - if source_file is None: - _print("WARN: cache_restore requires source file.") - return False - - if not target_file and (not target_vg or not target_lv): - _print("WARN: cache_restore requires either target_file OR target_vg and target_lv as target.") - return False - - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return False - - if not target_file: - ret = self._check_device(target_vg, target_lv) - if not ret: - return False - ret = self._activate_device(target_vg, target_lv) - if not ret: - return False - target = self._get_device_path(target_vg, target_lv) - else: - if not os.path.isfile(target_file): - size = self._metadata_size(source_file) - ret = self._fallocate(target_file, size + 1, "restore") - if not ret: - return False - target = target_file - - if quiet: - options += "--quiet " - - if metadata_version: - options += "--metadata-version %s " % metadata_version - - if omit_clean_shutdown: - options += "--omit-clean-shutdown " - - if override_metadata_version: - options += "--debug-override-metadata-version %s" % override_metadata_version - - cmd = "cache_restore -i %s -o %s %s" % (source_file, target, options) - - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not restore metadata from %s to %s" % (source_file, target)) - return False - - return True - - ########################################### - # thinp section - ########################################### - - def thin_check(self, source_file=None, source_vg=None, source_lv=None, quiet=False, super_block_only=False, - clear_needs_check_flag=False, skip_mappings=False, ignore_non_fatal_errors=False, verbose=True): - """Check thin pool metadata from either file or device. - The arguments are: - \tsource_file - \tsource_vg VG name - \tsource_lv LV name - \tquiet Mute STDOUT - \tsuper_block_only - \tclear_needs_check_flag - \tskip_mappings - \tignore_non_fatal_errors - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - options = "" - - if not source_file and (not source_vg or not source_lv): - _print("WARN: thin_check requires either source_file OR source_vg and source_lv.") - return False - - if not source_file: - ret = self._check_device(source_vg, source_lv) - if not ret: - return False - ret = self._activate_device(source_vg, source_lv) - if not ret: - return False - device = self._get_device_path(source_vg, source_lv) - else: - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return False - device = source_file - - if quiet: - options += "--quiet " - - if super_block_only: - options += "--super-block-only " - - if clear_needs_check_flag: - options += "--clear-needs-check-flag " - - if skip_mappings: - options += "--skip-mappings " - - if ignore_non_fatal_errors: - options += "--ignore-non-fatal-errors " - - cmd = "thin_check %s %s" % (device, options) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not check %s metadata" % device) - return False - - return True - - def thin_ls(self, source_vg, source_lv, no_headers=False, fields=None, snapshot=False, verbose=True): - """List information about thin LVs on thin pool. - The arguments are: - \tsource_vg VG name - \tsource_lv LV name - \tfields list of fields to output, default is all - \tsnapshot If use metadata snapshot, able to run on live snapshotted pool - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - options = "" - - if not source_vg or not source_lv: - _print("WARN: thin_ls requires source_vg and source_lv.") - return False - - ret = self._check_device(source_vg, source_lv) - if not ret: - return False - ret = self._activate_device(source_vg, source_lv) - if not ret: - return False - device = self._get_device_path(source_vg, source_lv) - - if no_headers: - options += "--no-headers " - - fields_possible = ["DEV", "MAPPED_BLOCKS", "EXCLUSIVE_BLOCKS", "SHARED_BLOCKS", "MAPPED_SECTORS", - "EXCLUSIVE_SECTORS", "SHARED_SECTORS", "MAPPED_BYTES", "EXCLUSIVE_BYTES", "SHARED_BYTES", - "MAPPED", "EXCLUSIVE", "TRANSACTION", "CREATE_TIME", "SHARED", "SNAP_TIME"] - if fields is None: - options += " --format \"%s\" " % ",".join([str(i) for i in fields_possible]) - else: - for field in fields: - if field not in fields_possible: - _print("WARN: Unknown field %s specified." % field) - _print("INFO: Possible fields are: %s" % ", ".join([str(i) for i in fields_possible])) - return False - options += " --format \"%s\" " % ",".join([str(i) for i in fields]) - - if snapshot: - options += "--metadata-snap" - - cmd = "thin_ls %s %s" % (device, options) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not list %s metadata" % device) - return False - - return True - - def thin_dump(self, source_file=None, source_vg=None, source_lv=None, output=None, repair=False, formatting=None, - snapshot=None, dev_id=None, skip_mappings=False, verbose=True, return_output=False): - """Dumps thin metadata from device of source file to standard output or file. - The arguments are: - \tsource_file - \tsource_vg VG name - \tsource_lv LV name - \toutput specify output xml file - \treturn_output see 'Returns', not usable with output=True - \trepair Repair the metadata while dumping it - \tformatting Specify output format [xml, human_readable, custom='file'] - \tsnapshot (Boolean/Int) Use metadata snapshot. If Int provided, specifies block number - \tdev_id ID of the device - Returns: - \tOnly Boolean if return_output False: - \t\tTrue if success - \t'tFalse in case of failure - \tBoolean and data if return_output True - """ - options = "" - - if return_output and output: - _print("INFO: Cannot return to both STDOUT and file, returning only to file.") - return_output = False - - if return_output: - ret_fail = (False, None) - else: - ret_fail = False - - if not source_file and (not source_vg or not source_lv): - _print("WARN: thin_dump requires either source_file OR source_vg and source_lv.") - return ret_fail - - if not source_file: - ret = self._check_device(source_vg, source_lv) - if not ret: - return ret_fail - ret = self._activate_device(source_vg, source_lv) - if not ret: - return ret_fail - device = self._get_device_path(source_vg, source_lv) - else: - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return ret_fail - device = source_file - - if output: - if not os.path.isfile(output): - size = self._metadata_size(source_file, source_lv, source_vg) - ret = self._fallocate(output, size + 1, "dump") - if not ret: - return ret_fail - options += "-o %s " % output - - if repair: - options += "--repair " - - if snapshot: - if isinstance(snapshot, bool): - options += "--metadata-snap " - elif isinstance(snapshot, int): - options += "--metadata-snap %s " % snapshot - else: - _print("WARN: Unknown snapshot value, use either Boolean or Int.") - return ret_fail - - if formatting: - if formatting in ["xml", "human_readable"]: - options += "--format %s " % formatting - elif formatting.startswith("custom="): - if not os.path.isfile(formatting[8:-1]): - _print("WARN: Specified custom formatting file is not a file.") - return ret_fail - options += "--format %s " % formatting - else: - _print("WARN: Unknown formatting specified, please use one of [xml, human_readable, custom='file'].") - return ret_fail - - if dev_id: - if isinstance(dev_id, int): - if self._get_dev_id(dev_id, source_file, source_lv, source_vg): - options += "--dev-id %s " % dev_id - else: - _print("WARN: Unknown dev_id value, device with ID %s does not exist." % dev_id) - return ret_fail - else: - _print("WARN: Unknown dev_id value, must be Int.") - return ret_fail - - if skip_mappings: - options += "--skip-mappings " - - cmd = "thin_dump %s %s" % (device, options) - if return_output: - retcode, data = run(cmd, return_output=True, verbose=verbose) - else: - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not dump %s metadata." % device) - return ret_fail - - if return_output: - return True, data - return True - - def thin_restore(self, source_file, target_vg=None, target_lv=None, target_file=None, quiet=False, verbose=True): - """Restores thin metadata from source xml file to target device/file - The arguments are: - \tsource_file Source xml file - \ttarget as either target_file OR target_vg and target_lv - \tquiet Mute STDOUT - \tmetadata_version Specify metadata version to restore - \tomit_clean_shutdown Disable clean shutdown - \toverride_metadata_version DEBUG option to override metadata version without checking - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - options = "" - - if source_file is None: - _print("WARN: thin_restore requires source file.") - return False - - if not target_file and (not target_vg or not target_lv): - _print("WARN: thin_restore requires either target_file OR target_vg and target_lv as target.") - return False - - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return False - - if not target_file: - ret = self._check_device(target_vg, target_lv) - if not ret: - return False - ret = self._activate_device(target_vg, target_lv) - if not ret: - return False - target = self._get_device_path(target_vg, target_lv) - else: - if not os.path.isfile(target_file): - size = self._metadata_size(source_file) - ret = self._fallocate(target_file, size + 1, "restore") - if not ret: - return False - target = target_file - - if quiet: - options += "--quiet" - - cmd = "thin_restore -i %s -o %s %s" % (source_file, target, options) - - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not restore metadata from %s to %s" % (source_file, target)) - return False - - return True - - def thin_repair(self, source_file=None, source_vg=None, source_lv=None, target_file=None, target_vg=None, - target_lv=None, verbose=True): - """Repairs thin metadata from source file/device to target file/device - The arguments are: - \tsource as either source_file OR source_vg and source_lv - \ttarget as either target_file OR target_vg and target_lv - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - if not source_file and (not source_vg or not source_lv): - _print("WARN: thin_repair requires either source_file OR source_vg and source_lv as source.") - return False - - if not target_file and (not target_vg or not target_lv): - _print("WARN: thin_repair requires either target_file OR target_vg and target_lv as target.") - return False - - if not source_file: - ret = self._check_device(source_vg, source_lv) - if not ret: - return False - ret = self._activate_device(source_vg, source_lv) - if not ret: - return False - source = self._get_device_path(source_vg, source_lv) - else: - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return False - source = source_file - - if not target_file: - ret = self._check_device(target_vg, target_lv) - if not ret: - return False - ret = self._activate_device(target_vg, target_lv) - if not ret: - return False - target = self._get_device_path(target_vg, target_lv) - else: - if not os.path.isfile(target_file): - size = self._metadata_size(source_file, source_lv, source_vg) - ret = self._fallocate(target_file, size + 1, "repair") - if not ret: - return False - target = target_file - - cmd = "thin_repair -i %s -o %s" % (source, target) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not repair metadata from %s to %s" % (source, target)) - return False - - return True - - def thin_rmap(self, region, source_file=None, source_vg=None, source_lv=None, verbose=True): - """Output reverse map of a thin provisioned region of blocks from metadata device. - The arguments are: - \tsource_vg VG name - \tsource_lv LV name - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - if not source_file and (not source_vg or not source_lv): - _print("WARN: thin_rmap requires either source_file OR source_vg and source_lv as source.") - return False - - if not source_file: - ret = self._check_device(source_vg, source_lv) - if not ret: - return False - ret = self._activate_device(source_vg, source_lv) - if not ret: - return False - device = self._get_device_path(source_vg, source_lv) - else: - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return False - device = source_file - - regions = region.split(".") - try: - int(regions[0]) - if regions[1] != '': - raise ValueError - int(regions[2]) - if regions[3] is not None: - raise ValueError - except ValueError: - _print("WARN: Region must be in format 'INT..INT'") - return False - except IndexError: - pass - # region 1..-1 must be valid, using usigned 32bit ints - if int(regions[0]) & 0xffffffff >= int(regions[2]) & 0xffffffff: - _print("WARN: Beginning of the region must be before its end.") - return False - options = "--region %s" % region - - cmd = "thin_rmap %s %s" % (device, options) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not output reverse map from %s metadata device" % device) - return False - - return True - - def thin_trim(self, target_vg, target_lv, force=True, verbose=True): - """Issue discard requests for free pool space. - The arguments are: - \ttarget_vg VG name - \ttarget_lv LV name - \tforce suppress warning message and disable prompt, default True - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - options = "" - - if force: - options += " --pool-inactive" - - if not target_vg or not target_lv: - _print("WARN: thin_trim requires target_vg and target_lv.") - return False - - ret = self._check_device(target_vg, target_lv) - if not ret: - return False - - ret = self._activate_device(target_vg, target_lv) - if not ret: - return False - - device = self._get_device_path(target_vg, target_lv) - cmd = "thin_trim %s %s" % (device, options) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not discard free pool space on device %s." % device) - return False - - return True - - def thin_delta(self, thin1, thin2, source_file=None, source_vg=None, source_lv=None, snapshot=False, - verbosity=False, verbose=True): - """Print the differences in the mappings between two thin devices.. - The arguments are: - \tsource_vg VG name - \tsource_lv LV name - \tthin1 numeric identificator of first thin volume - \tthin2 numeric identificator of second thin volume - \tsnapshot (Boolean/Int) Use metadata snapshot. If Int provided, specifies block number - \tverbosity Provide extra information on the mappings - Returns: - \tBoolean: - \t\tTrue if success - \t'tFalse in case of failure - """ - - options = "" - - if not source_file and (not source_vg or not source_lv): - _print("WARN: thin_delta requires either source_file OR source_vg and source_lv.") - return False - - if not source_file: - ret = self._check_device(source_vg, source_lv) - if not ret: - return False - ret = self._activate_device(source_vg, source_lv) - if not ret: - return False - device = self._get_device_path(source_vg, source_lv) - else: - if not os.path.isfile(source_file): - _print("WARN: Source file is not a file.") - return False - device = source_file - - if snapshot: - if isinstance(snapshot, bool): - options += "--metadata-snap " - elif isinstance(snapshot, int): - options += "--metadata-snap %s " % snapshot - else: - _print("WARN: Unknown snapshot value, use either Boolean or Int.") - return False - - if verbosity: - options += "--verbose" - - if self._get_dev_id(thin1, source_file, source_lv, source_vg) and \ - self._get_dev_id(thin2, source_file, source_lv, source_vg): - cmd = "thin_delta %s --thin1 %s --thin2 %s %s" % (options, thin1, thin2, device) - retcode = run(cmd, verbose=verbose) - if retcode != 0: - _print("WARN: Could not get differences in mappings between two thin LVs.") - return False - else: - _print("WARN: Specified ID does not exist.") - return False - return True diff --git a/tests/functions_test/runtest.sh b/tests/functions_test/runtest.sh deleted file mode 100755 index 6387254..0000000 --- a/tests/functions_test/runtest.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -# Copyright (c) 2017 Red Hat, Inc. All rights reserved. This copyrighted -# material is made available to anyone wishing to use, modify, copy, or -# redistribute it subject to the terms and conditions of the GNU General -# Public License v.2. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -# USA. -# -# Author: Jakub Krysl -rhts-run-simple-test functions_test ./dmpd_functions.py diff --git a/tests/tests.yml b/tests/tests.yml deleted file mode 100644 index 67072a2..0000000 --- a/tests/tests.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -# Tests suitable to run in classic environment -- hosts: localhost - roles: - - role: standard-test-beakerlib - tags: - - classic - tests: - - tools_not_linked_usr - - functions_test - required_packages: - - findutils # beakerlib needs find command - - which # tools_not_linked_usr needs which command - -# Tests suitable to run in container and atomic environments -- hosts: localhost - roles: - - role: standard-test-beakerlib - tags: - - container - - atomic - tests: - - tools_not_linked_usr - required_packages: - - findutils # beakerlib needs find command - - which # tools_not_linked_usr needs which command diff --git a/tests/tools_not_linked_usr/Makefile b/tests/tools_not_linked_usr/Makefile deleted file mode 100644 index 892073f..0000000 --- a/tests/tools_not_linked_usr/Makefile +++ /dev/null @@ -1,75 +0,0 @@ -# Copyright (c) 2006 Red Hat, Inc. All rights reserved. This copyrighted material -# is made available to anyone wishing to use, modify, copy, or -# redistribute it subject to the terms and conditions of the GNU General -# Public License v.2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; If not, see http://www.gnu.org/licenses/. -# -# Author: Bruno Goncalves - -# The toplevel namespace within which the test lives. -TOPLEVEL_NAMESPACE=kernel - -# The name of the package under test: -PACKAGE_NAME=storage - -# The path of the test below the package: -RELATIVE_PATH=lvm/device-mapper-persistent-data/tools_not_linked_usr - -# Version of the Test. Used with make tag. -export TESTVERSION=1.0 - -# The combined namespace of the test. -export TEST=/$(TOPLEVEL_NAMESPACE)/$(PACKAGE_NAME)/$(RELATIVE_PATH) - - -# A phony target is one that is not really the name of a file. -# It is just a name for some commands to be executed when you -# make an explicit request. There are two reasons to use a -# phony target: to avoid a conflict with a file of the same -# name, and to improve performance. -.PHONY: all install download clean - -# executables to be built should be added here, they will be generated on the system under test. -BUILT_FILES= - -# data files, .c files, scripts anything needed to either compile the test and/or run it. -FILES=$(METADATA) runtest.sh PURPOSE tools_not_linked_usr.py - -run: $(FILES) build - ./runtest.sh - -build: $(BUILT_FILES) - chmod a+x ./runtest.sh - chmod a+x ./tools_not_linked_usr.py - -clean: - rm -f *~ *.rpm $(BUILT_FILES) - -# You may need to add other targets e.g. to build executables from source code -# Add them here: - - -# Include Common Makefile -include /usr/share/rhts/lib/rhts-make.include - -# Generate the testinfo.desc here: -$(METADATA): Makefile - @touch $(METADATA) -# Change to the test owner's name - @echo "Owner: Bruno Goncalves " > $(METADATA) - @echo "Name: $(TEST)" >> $(METADATA) - @echo "Path: $(TEST_DIR)" >> $(METADATA) - @echo "License: GPL" >> $(METADATA) - @echo "TestVersion: $(TESTVERSION)" >> $(METADATA) - @echo "Description: Make sure tools are not linked to lib under /usr.">> $(METADATA) - @echo "TestTime: 1h" >> $(METADATA) - @echo "RunFor: $(PACKAGE_NAME)" >> $(METADATA) - @echo "Requires: $(PACKAGE_NAME)" >> $(METADATA) - - rhts-lint $(METADATA) diff --git a/tests/tools_not_linked_usr/PURPOSE b/tests/tools_not_linked_usr/PURPOSE deleted file mode 100644 index 3408f47..0000000 --- a/tests/tools_not_linked_usr/PURPOSE +++ /dev/null @@ -1,13 +0,0 @@ -#=========================================================================== -# -# Description: -# Make sure tools are not using any library that is linked to /usr -# -# Bugs related: -# -# Author(s): -# Bruno Goncalves -# -# -#=========================================================================== -# EndFile diff --git a/tests/tools_not_linked_usr/runtest.sh b/tests/tools_not_linked_usr/runtest.sh deleted file mode 100755 index 06c675c..0000000 --- a/tests/tools_not_linked_usr/runtest.sh +++ /dev/null @@ -1,18 +0,0 @@ -#!/bin/sh -# Copyright (c) 2006 Red Hat, Inc. All rights reserved. This copyrighted -# material is made available to anyone wishing to use, modify, copy, or -# redistribute it subject to the terms and conditions of the GNU General -# Public License v.2. -# -# This program is distributed in the hope that it will be useful, but -# WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY -# or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License -# for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; if not, write to the Free Software -# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, -# USA. -# -# Author: Bruno Goncalves -rhts-run-simple-test tools_not_linked_usr ./tools_not_linked_usr.py diff --git a/tests/tools_not_linked_usr/tools_not_linked_usr.py b/tests/tools_not_linked_usr/tools_not_linked_usr.py deleted file mode 100755 index 7be6393..0000000 --- a/tests/tools_not_linked_usr/tools_not_linked_usr.py +++ /dev/null @@ -1,95 +0,0 @@ -#!/usr/bin/python - -# Copyright (c) 2006 Red Hat, Inc. All rights reserved. This copyrighted material -# is made available to anyone wishing to use, modify, copy, or -# redistribute it subject to the terms and conditions of the GNU General -# Public License v.2. -# -# This program is distributed in the hope that it will be useful, but WITHOUT ANY -# WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A -# PARTICULAR PURPOSE. See the GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program; If not, see http://www.gnu.org/licenses/. -# -# Author: Bruno Goncalves - -import subprocess -import sys -import re - -def run(cmd): - print("INFO: Running '%s'..." % cmd) - p = subprocess.Popen(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE, shell=True) - stdout, stderr = p.communicate() - - retcode = p.returncode - output = stdout.decode('ascii', 'ignore') + stderr.decode('ascii', 'ignore') - - # remove new line from last line - output = output.rstrip() - print(output) - return retcode, output - -def start_test(): - - #if uses any library linked to /usr this my affect the tools during boot - print("INFO: Making sure tools provided by device-mapper-persistent-data " - "are not linked to /usr") - - #Paths where we should have no libraries linked from - lib_paths = ["/usr/"] - - package = "device-mapper-persistent-data" - run("yum install -y %s" % package) - #Get all tools that we need to check - ret, output = run("rpm -ql %s | grep \"sbin/\"" % package) - if ret != 0: - print("FAIL: Could not get the tools shipped from %s" % package) - return False - tools = output.split("\n") - - error = False - for tool in tools: - if not tool: - #skip any blank line - continue - tool_error = 0 - for lib_path in lib_paths: - print("INFO: Checking if %s is not linked to libraries at %s" % (tool, lib_path)) - ret, linked_lib = run("ldd %s" % tool) - if ret != 0: - print("FAIL: Could not list dynamically libraries for %s" % (tool)) - tool_error += 1 - else: - #The command executed sucessfuly - #check if any library linked is from lib_path - links = linked_lib.split("\n") - for link in links: - if re.match(".*%s.*" % lib_path, link): - print("FAIL: %s is linked to %s" % (tool, link)) - tool_error += 1 - - if tool_error == 0: - print("%s is not linked to %s" % (tool, lib_path)) - else: - #found some error in at least 1 tool - error = True - - if error: - return False - - return True - - -def main(): - - if not start_test(): - print("FAIL: test failed") - sys.exit(1) - - print("PASS: Test pass") - sys.exit(0) - -main() -