From: "Sam James" <sam@gentoo.org>
To: gentoo-commits@lists.gentoo.org
Subject: [gentoo-commits] proj/portage:master commit in: bin/
Date: Sat, 31 Dec 2022 13:33:08 +0000 (UTC) [thread overview]
Message-ID: <1672493364.7141d7f0033bc7bf5bdf825271a0002657d4fb83.sam@gentoo> (raw)
commit: 7141d7f0033bc7bf5bdf825271a0002657d4fb83
Author: Mike Gilbert <floppym <AT> gentoo <DOT> org>
AuthorDate: Tue Dec 27 03:57:56 2022 +0000
Commit: Sam James <sam <AT> gentoo <DOT> org>
CommitDate: Sat Dec 31 13:29:24 2022 +0000
URL: https://gitweb.gentoo.org/proj/portage.git/commit/?id=7141d7f0
Rework signal handling in entry scripts
Introduce a new exception SignalInterrupt which inherits from
KeyboardInterrupt and adds a 'signum' member. When a signal is received,
raise SignalInterrupt.
At the end of the script, catch KeyboardInterrupt, and look for the
signum member. Reset the signal handler to SIG_DFL, and re-raise the
signal to kill the process.
This ensures that the invoking shell sees that we got killed by a signal
instead of calling exit.
Bug: https://bugs.gentoo.org/887817
Signed-off-by: Mike Gilbert <floppym <AT> gentoo.org>
Closes: https://github.com/gentoo/portage/pull/965
Signed-off-by: Sam James <sam <AT> gentoo.org>
bin/ebuild | 781 +++++++--------
bin/ebuild-ipc.py | 505 +++++-----
bin/egencache | 2473 +++++++++++++++++++++++-----------------------
bin/emaint | 86 +-
bin/emerge | 43 +-
bin/portageq | 2854 ++++++++++++++++++++++++++---------------------------
6 files changed, 3381 insertions(+), 3361 deletions(-)
diff --git a/bin/ebuild b/bin/ebuild
index 112e14e3d..8f73b8684 100755
--- a/bin/ebuild
+++ b/bin/ebuild
@@ -2,434 +2,441 @@
# Copyright 1999-2022 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
-import argparse
+import os
import signal
-import sys
-import textwrap
-# This block ensures that ^C interrupts are handled quietly.
-try:
+# For compatibility with Python < 3.8
+raise_signal = getattr(
+ signal, "raise_signal", lambda signum: os.kill(os.getpid(), signum)
+)
- def exithandler(signum, _frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- sys.exit(128 + signum)
+# Inherit from KeyboardInterrupt to avoid a traceback from asyncio.
+class SignalInterrupt(KeyboardInterrupt):
+ def __init__(self, signum):
+ self.signum = signum
- signal.signal(signal.SIGINT, exithandler)
- signal.signal(signal.SIGTERM, exithandler)
- # Prevent "[Errno 32] Broken pipe" exceptions when
- # writing to a pipe.
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
-except KeyboardInterrupt:
- sys.exit(128 + signal.SIGINT)
+try:
+ def signal_interrupt(signum, _frame):
+ raise SignalInterrupt(signum)
-def debug_signal(_signum, _frame):
- import pdb
+ def debug_signal(_signum, _frame):
+ import pdb
- pdb.set_trace()
+ pdb.set_trace()
+ # Prevent "[Errno 32] Broken pipe" exceptions when writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal_interrupt)
+ signal.signal(signal.SIGUSR1, debug_signal)
-signal.signal(signal.SIGUSR1, debug_signal)
+ import argparse
+ from os import path as osp
+ import sys
+ import textwrap
-import os
-from os import path as osp
+ if osp.isfile(
+ osp.join(
+ osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed"
+ )
+ ):
+ sys.path.insert(
+ 0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "lib")
+ )
+ import portage
+
+ portage._internal_caller = True
+ from portage import os
+ from portage import _encodings
+ from portage import _shell_quote
+ from portage import _unicode_encode
+ from portage.const import VDB_PATH
+ from portage.exception import (
+ PermissionDenied,
+ PortageKeyError,
+ PortagePackageException,
+ UnsupportedAPIException,
+ )
+ from portage.localization import _
+ import portage.util
+ from portage.util._eventloop.global_event_loop import global_event_loop
+ from _emerge.actions import apply_priorities
+ from _emerge.Package import Package
+ from _emerge.RootConfig import RootConfig
+
+ portage.process.sanitize_fds()
+
+ description = "See the ebuild(1) man page for more info"
+ usage = "Usage: ebuild <ebuild file> <command> [command] ..."
+ parser = argparse.ArgumentParser(description=description, usage=usage)
+
+ force_help = (
+ "When used together with the digest or manifest "
+ + "command, this option forces regeneration of digests for all "
+ + "distfiles associated with the current ebuild. Any distfiles "
+ + "that do not already exist in ${DISTDIR} will be automatically fetched."
+ )
-if osp.isfile(
- osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")
-):
- sys.path.insert(
- 0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "lib")
+ parser.add_argument("--force", help=force_help, action="store_true")
+ parser.add_argument(
+ "--color", help="enable or disable color output", choices=("y", "n")
+ )
+ parser.add_argument("--debug", help="show debug output", action="store_true")
+ parser.add_argument("--version", help="show version and exit", action="store_true")
+ parser.add_argument(
+ "--ignore-default-opts",
+ action="store_true",
+ help="do not use the EBUILD_DEFAULT_OPTS environment variable",
+ )
+ parser.add_argument(
+ "--skip-manifest", help="skip all manifest checks", action="store_true"
)
-import portage
-
-portage._internal_caller = True
-from portage import os
-from portage import _encodings
-from portage import _shell_quote
-from portage import _unicode_encode
-from portage.const import VDB_PATH
-from portage.exception import (
- PermissionDenied,
- PortageKeyError,
- PortagePackageException,
- UnsupportedAPIException,
-)
-from portage.localization import _
-import portage.util
-from portage.util._eventloop.global_event_loop import global_event_loop
-from _emerge.actions import apply_priorities
-from _emerge.Package import Package
-from _emerge.RootConfig import RootConfig
-
-portage.process.sanitize_fds()
-
-description = "See the ebuild(1) man page for more info"
-usage = "Usage: ebuild <ebuild file> <command> [command] ..."
-parser = argparse.ArgumentParser(description=description, usage=usage)
-
-force_help = (
- "When used together with the digest or manifest "
- + "command, this option forces regeneration of digests for all "
- + "distfiles associated with the current ebuild. Any distfiles "
- + "that do not already exist in ${DISTDIR} will be automatically fetched."
-)
-parser.add_argument("--force", help=force_help, action="store_true")
-parser.add_argument(
- "--color", help="enable or disable color output", choices=("y", "n")
-)
-parser.add_argument("--debug", help="show debug output", action="store_true")
-parser.add_argument("--version", help="show version and exit", action="store_true")
-parser.add_argument(
- "--ignore-default-opts",
- action="store_true",
- help="do not use the EBUILD_DEFAULT_OPTS environment variable",
-)
-parser.add_argument(
- "--skip-manifest", help="skip all manifest checks", action="store_true"
-)
+ opts, pargs = parser.parse_known_args(args=sys.argv[1:])
-opts, pargs = parser.parse_known_args(args=sys.argv[1:])
+ def err(txt):
+ portage.writemsg("ebuild: {}\n".format(txt), noiselevel=-1)
+ sys.exit(1)
+ if opts.version:
+ print("Portage", portage.VERSION)
+ sys.exit(os.EX_OK)
-def err(txt):
- portage.writemsg("ebuild: {}\n".format(txt), noiselevel=-1)
- sys.exit(1)
+ if len(pargs) < 2:
+ parser.error("missing required args")
+ if not opts.ignore_default_opts:
+ default_opts = portage.util.shlex_split(
+ portage.settings.get("EBUILD_DEFAULT_OPTS", "")
+ )
+ opts, pargs = parser.parse_known_args(default_opts + sys.argv[1:])
+
+ debug = opts.debug
+ force = opts.force
+
+ if debug:
+ # Ensure that all config instances have this setting,
+ # including the one that's used by portdbapi for aux_get.
+ os.environ["PORTAGE_DEBUG"] = "1"
+ portage._reset_legacy_globals()
+
+ # do this _after_ 'import portage' to prevent unnecessary tracing
+ if debug and "python-trace" in portage.features:
+ portage.debug.set_trace(True)
+
+ if not opts.color == "y" and (
+ opts.color == "n"
+ or portage.settings.get("NOCOLOR") in ("yes", "true")
+ or portage.settings.get("TERM") == "dumb"
+ or not sys.stdout.isatty()
+ ):
+ portage.output.nocolor()
+ portage.settings.unlock()
+ portage.settings["NOCOLOR"] = "true"
+ portage.settings.backup_changes("NOCOLOR")
+ portage.settings.lock()
+
+ apply_priorities(portage.settings)
+
+ ebuild = pargs.pop(0)
+
+ pf = None
+ if ebuild.endswith(".ebuild"):
+ pf = os.path.basename(ebuild)[:-7]
+
+ if pf is None:
+ err("{}: does not end with '.ebuild'".format(ebuild))
+
+ if not os.path.isabs(ebuild):
+ mycwd = os.getcwd()
+ # Try to get the non-canonical path from the PWD evironment variable, since
+ # the canonical path returned from os.getcwd() may may be unusable in
+ # cases where the directory stucture is built from symlinks.
+ pwd = os.environ.get("PWD", "")
+ if pwd and pwd != mycwd and os.path.realpath(pwd) == mycwd:
+ mycwd = portage.normalize_path(pwd)
+ ebuild = os.path.join(mycwd, ebuild)
+ ebuild = portage.normalize_path(ebuild)
+ # portdbapi uses the canonical path for the base of the ebuild repository, but
+ # subdirectories of the base can be built from symlinks (like crossdev does).
+ ebuild_portdir = os.path.realpath(
+ os.path.dirname(os.path.dirname(os.path.dirname(ebuild)))
+ )
+ ebuild = os.path.join(ebuild_portdir, *ebuild.split(os.path.sep)[-3:])
+ vdb_path = os.path.realpath(os.path.join(portage.settings["EROOT"], VDB_PATH))
+
+ # Make sure that portdb.findname() returns the correct ebuild.
+ if ebuild_portdir != vdb_path and ebuild_portdir not in portage.portdb.porttrees:
+ portdir_overlay = portage.settings.get("PORTDIR_OVERLAY", "")
+ os.environ["PORTDIR_OVERLAY"] = (
+ portdir_overlay + " " + _shell_quote(ebuild_portdir)
+ )
-if opts.version:
- print("Portage", portage.VERSION)
- sys.exit(os.EX_OK)
+ print("Appending %s to PORTDIR_OVERLAY..." % ebuild_portdir)
+ portage._reset_legacy_globals()
-if len(pargs) < 2:
- parser.error("missing required args")
+ myrepo = None
+ if ebuild_portdir != vdb_path:
+ myrepo = portage.portdb.getRepositoryName(ebuild_portdir)
-if not opts.ignore_default_opts:
- default_opts = portage.util.shlex_split(
- portage.settings.get("EBUILD_DEFAULT_OPTS", "")
- )
- opts, pargs = parser.parse_known_args(default_opts + sys.argv[1:])
-
-debug = opts.debug
-force = opts.force
-
-if debug:
- # Ensure that all config instances have this setting,
- # including the one that's used by portdbapi for aux_get.
- os.environ["PORTAGE_DEBUG"] = "1"
- portage._reset_legacy_globals()
-
-# do this _after_ 'import portage' to prevent unnecessary tracing
-if debug and "python-trace" in portage.features:
- portage.debug.set_trace(True)
-
-if not opts.color == "y" and (
- opts.color == "n"
- or portage.settings.get("NOCOLOR") in ("yes", "true")
- or portage.settings.get("TERM") == "dumb"
- or not sys.stdout.isatty()
-):
- portage.output.nocolor()
- portage.settings.unlock()
- portage.settings["NOCOLOR"] = "true"
- portage.settings.backup_changes("NOCOLOR")
- portage.settings.lock()
-
-apply_priorities(portage.settings)
-
-ebuild = pargs.pop(0)
-
-pf = None
-if ebuild.endswith(".ebuild"):
- pf = os.path.basename(ebuild)[:-7]
-
-if pf is None:
- err("{}: does not end with '.ebuild'".format(ebuild))
-
-if not os.path.isabs(ebuild):
- mycwd = os.getcwd()
- # Try to get the non-canonical path from the PWD evironment variable, since
- # the canonical path returned from os.getcwd() may may be unusable in
- # cases where the directory stucture is built from symlinks.
- pwd = os.environ.get("PWD", "")
- if pwd and pwd != mycwd and os.path.realpath(pwd) == mycwd:
- mycwd = portage.normalize_path(pwd)
- ebuild = os.path.join(mycwd, ebuild)
-ebuild = portage.normalize_path(ebuild)
-# portdbapi uses the canonical path for the base of the ebuild repository, but
-# subdirectories of the base can be built from symlinks (like crossdev does).
-ebuild_portdir = os.path.realpath(
- os.path.dirname(os.path.dirname(os.path.dirname(ebuild)))
-)
-ebuild = os.path.join(ebuild_portdir, *ebuild.split(os.path.sep)[-3:])
-vdb_path = os.path.realpath(os.path.join(portage.settings["EROOT"], VDB_PATH))
+ if not os.path.exists(ebuild):
+ err("{}: does not exist".format(ebuild))
-# Make sure that portdb.findname() returns the correct ebuild.
-if ebuild_portdir != vdb_path and ebuild_portdir not in portage.portdb.porttrees:
- portdir_overlay = portage.settings.get("PORTDIR_OVERLAY", "")
- os.environ["PORTDIR_OVERLAY"] = portdir_overlay + " " + _shell_quote(ebuild_portdir)
+ ebuild_split = ebuild.split("/")
+ cpv = "{}/{}".format(ebuild_split[-3], pf)
- print("Appending %s to PORTDIR_OVERLAY..." % ebuild_portdir)
- portage._reset_legacy_globals()
+ with open(
+ _unicode_encode(ebuild, encoding=_encodings["fs"], errors="strict"),
+ encoding=_encodings["repo.content"],
+ errors="replace",
+ ) as f:
+ eapi = portage._parse_eapi_ebuild_head(f)[0]
+ if eapi is None:
+ eapi = "0"
+ if not portage.catpkgsplit(cpv, eapi=eapi):
+ err("{}: {}: does not follow correct package syntax".format(ebuild, cpv))
-myrepo = None
-if ebuild_portdir != vdb_path:
- myrepo = portage.portdb.getRepositoryName(ebuild_portdir)
+ if ebuild.startswith(vdb_path):
+ mytree = "vartree"
+ pkg_type = "installed"
-if not os.path.exists(ebuild):
- err("{}: does not exist".format(ebuild))
+ portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(
+ cpv, myrepo=myrepo
+ )
-ebuild_split = ebuild.split("/")
-cpv = "{}/{}".format(ebuild_split[-3], pf)
+ if os.path.realpath(portage_ebuild) != ebuild:
+ err("Portage seems to think that {} is at {}".format(cpv, portage_ebuild))
+
+ else:
+ mytree = "porttree"
+ pkg_type = "ebuild"
+
+ portage_ebuild = portage.portdb.findname(cpv, myrepo=myrepo)
+
+ if not portage_ebuild or portage_ebuild != ebuild:
+ err("{}: does not seem to have a valid PORTDIR structure".format(ebuild))
+
+ if len(pargs) > 1 and "config" in pargs:
+ other_phases = set(pargs)
+ other_phases.difference_update(("clean", "config", "digest", "manifest"))
+ if other_phases:
+ err('"config" must not be called with any other phase')
+
+ def discard_digests(myebuild, mysettings, mydbapi):
+ """Discard all distfiles digests for the given ebuild. This is useful when
+ upstream has changed the identity of the distfiles and the user would
+ otherwise have to manually remove the Manifest and files/digest-* files in
+ order to ensure correct results."""
+ try:
+ portage._doebuild_manifest_exempt_depend += 1
+ pkgdir = os.path.dirname(myebuild)
+ fetchlist_dict = portage.FetchlistDict(pkgdir, mysettings, mydbapi)
+ mf = mysettings.repositories.get_repo_for_location(
+ os.path.dirname(os.path.dirname(pkgdir))
+ )
+ mf = mf.load_manifest(
+ pkgdir, mysettings["DISTDIR"], fetchlist_dict=fetchlist_dict
+ )
+ mf.create(
+ requiredDistfiles=None,
+ assumeDistHashesSometimes=True,
+ assumeDistHashesAlways=True,
+ )
+ distfiles = fetchlist_dict[cpv]
+ for myfile in distfiles:
+ try:
+ del mf.fhashdict["DIST"][myfile]
+ except KeyError:
+ pass
+ mf.write()
+ finally:
+ portage._doebuild_manifest_exempt_depend -= 1
+
+ portage.settings.validate() # generate warning messages if necessary
+
+ build_dir_phases = {
+ "setup",
+ "unpack",
+ "prepare",
+ "configure",
+ "compile",
+ "test",
+ "install",
+ "package",
+ "rpm",
+ "merge",
+ "qmerge",
+ }
+
+ # If the current metadata is invalid then force the ebuild to be
+ # sourced again even if ${T}/environment already exists.
+ ebuild_changed = False
+ if mytree == "porttree" and build_dir_phases.intersection(pargs):
+ ebuild_changed = (
+ portage.portdb._pull_valid_cache(cpv, ebuild, ebuild_portdir)[0] is None
+ )
-with open(
- _unicode_encode(ebuild, encoding=_encodings["fs"], errors="strict"),
- encoding=_encodings["repo.content"],
- errors="replace",
-) as f:
- eapi = portage._parse_eapi_ebuild_head(f)[0]
-if eapi is None:
- eapi = "0"
-if not portage.catpkgsplit(cpv, eapi=eapi):
- err("{}: {}: does not follow correct package syntax".format(ebuild, cpv))
+ # Make configuration adjustments to portage.portdb.doebuild_settings,
+ # in order to enforce consistency for EBUILD_FORCE_TEST support
+ # (see bug 601466).
+ tmpsettings = portage.portdb.doebuild_settings
-if ebuild.startswith(vdb_path):
- mytree = "vartree"
- pkg_type = "installed"
+ tmpsettings["PORTAGE_VERBOSE"] = "1"
+ tmpsettings.backup_changes("PORTAGE_VERBOSE")
- portage_ebuild = portage.db[portage.root][mytree].dbapi.findname(cpv, myrepo=myrepo)
+ if opts.skip_manifest:
+ tmpsettings["EBUILD_SKIP_MANIFEST"] = "1"
+ tmpsettings.backup_changes("EBUILD_SKIP_MANIFEST")
- if os.path.realpath(portage_ebuild) != ebuild:
- err("Portage seems to think that {} is at {}".format(cpv, portage_ebuild))
+ if (
+ opts.skip_manifest
+ or "digest" in tmpsettings.features
+ or "digest" in pargs
+ or "manifest" in pargs
+ ):
+ portage._doebuild_manifest_exempt_depend += 1
-else:
- mytree = "porttree"
- pkg_type = "ebuild"
+ if "test" in pargs:
+ # This variable is a signal to config.regenerate() to
+ # indicate that the test phase should be enabled regardless
+ # of problems such as masked "test" USE flag.
+ tmpsettings["EBUILD_FORCE_TEST"] = "1"
+ tmpsettings.backup_changes("EBUILD_FORCE_TEST")
+ tmpsettings.features.add("test")
+ portage.writemsg(_("Forcing test.\n"), noiselevel=-1)
- portage_ebuild = portage.portdb.findname(cpv, myrepo=myrepo)
+ tmpsettings.features.discard("fail-clean")
- if not portage_ebuild or portage_ebuild != ebuild:
- err("{}: does not seem to have a valid PORTDIR structure".format(ebuild))
+ if "merge" in pargs and "noauto" in tmpsettings.features:
+ print("Disabling noauto in features... merge disables it. (qmerge doesn't)")
+ tmpsettings.features.discard("noauto")
-if len(pargs) > 1 and "config" in pargs:
- other_phases = set(pargs)
- other_phases.difference_update(("clean", "config", "digest", "manifest"))
- if other_phases:
- err('"config" must not be called with any other phase')
+ if "digest" in tmpsettings.features:
+ if pargs and pargs[0] not in ("digest", "manifest"):
+ pargs = ["digest"] + pargs
+ # We only need to build digests on the first pass.
+ tmpsettings.features.discard("digest")
+ # Now that configuration adjustments are complete, create a clone of
+ # tmpsettings. The current instance refers to portdb.doebuild_settings,
+ # and we want to avoid the possibility of unintended side-effects.
+ tmpsettings = portage.config(clone=tmpsettings)
-def discard_digests(myebuild, mysettings, mydbapi):
- """Discard all distfiles digests for the given ebuild. This is useful when
- upstream has changed the identity of the distfiles and the user would
- otherwise have to manually remove the Manifest and files/digest-* files in
- order to ensure correct results."""
try:
- portage._doebuild_manifest_exempt_depend += 1
- pkgdir = os.path.dirname(myebuild)
- fetchlist_dict = portage.FetchlistDict(pkgdir, mysettings, mydbapi)
- mf = mysettings.repositories.get_repo_for_location(
- os.path.dirname(os.path.dirname(pkgdir))
- )
- mf = mf.load_manifest(
- pkgdir, mysettings["DISTDIR"], fetchlist_dict=fetchlist_dict
+ metadata = dict(
+ zip(
+ Package.metadata_keys,
+ portage.db[portage.settings["EROOT"]][mytree].dbapi.aux_get(
+ cpv, Package.metadata_keys, myrepo=myrepo
+ ),
+ )
)
- mf.create(
- requiredDistfiles=None,
- assumeDistHashesSometimes=True,
- assumeDistHashesAlways=True,
- )
- distfiles = fetchlist_dict[cpv]
- for myfile in distfiles:
- try:
- del mf.fhashdict["DIST"][myfile]
- except KeyError:
- pass
- mf.write()
- finally:
- portage._doebuild_manifest_exempt_depend -= 1
-
-
-portage.settings.validate() # generate warning messages if necessary
-
-build_dir_phases = {
- "setup",
- "unpack",
- "prepare",
- "configure",
- "compile",
- "test",
- "install",
- "package",
- "rpm",
- "merge",
- "qmerge",
-}
-
-# If the current metadata is invalid then force the ebuild to be
-# sourced again even if ${T}/environment already exists.
-ebuild_changed = False
-if mytree == "porttree" and build_dir_phases.intersection(pargs):
- ebuild_changed = (
- portage.portdb._pull_valid_cache(cpv, ebuild, ebuild_portdir)[0] is None
- )
-
-# Make configuration adjustments to portage.portdb.doebuild_settings,
-# in order to enforce consistency for EBUILD_FORCE_TEST support
-# (see bug 601466).
-tmpsettings = portage.portdb.doebuild_settings
-
-tmpsettings["PORTAGE_VERBOSE"] = "1"
-tmpsettings.backup_changes("PORTAGE_VERBOSE")
-
-if opts.skip_manifest:
- tmpsettings["EBUILD_SKIP_MANIFEST"] = "1"
- tmpsettings.backup_changes("EBUILD_SKIP_MANIFEST")
-
-if (
- opts.skip_manifest
- or "digest" in tmpsettings.features
- or "digest" in pargs
- or "manifest" in pargs
-):
- portage._doebuild_manifest_exempt_depend += 1
-
-if "test" in pargs:
- # This variable is a signal to config.regenerate() to
- # indicate that the test phase should be enabled regardless
- # of problems such as masked "test" USE flag.
- tmpsettings["EBUILD_FORCE_TEST"] = "1"
- tmpsettings.backup_changes("EBUILD_FORCE_TEST")
- tmpsettings.features.add("test")
- portage.writemsg(_("Forcing test.\n"), noiselevel=-1)
-
-tmpsettings.features.discard("fail-clean")
-
-if "merge" in pargs and "noauto" in tmpsettings.features:
- print("Disabling noauto in features... merge disables it. (qmerge doesn't)")
- tmpsettings.features.discard("noauto")
-
-if "digest" in tmpsettings.features:
- if pargs and pargs[0] not in ("digest", "manifest"):
- pargs = ["digest"] + pargs
- # We only need to build digests on the first pass.
- tmpsettings.features.discard("digest")
-
-# Now that configuration adjustments are complete, create a clone of
-# tmpsettings. The current instance refers to portdb.doebuild_settings,
-# and we want to avoid the possibility of unintended side-effects.
-tmpsettings = portage.config(clone=tmpsettings)
+ except PortageKeyError:
+ # aux_get failure, message should have been shown on stderr.
+ sys.exit(1)
-try:
- metadata = dict(
- zip(
- Package.metadata_keys,
- portage.db[portage.settings["EROOT"]][mytree].dbapi.aux_get(
- cpv, Package.metadata_keys, myrepo=myrepo
- ),
- )
+ root_config = RootConfig(
+ portage.settings, portage.db[portage.settings["EROOT"]], None
)
-except PortageKeyError:
- # aux_get failure, message should have been shown on stderr.
- sys.exit(1)
-
-root_config = RootConfig(portage.settings, portage.db[portage.settings["EROOT"]], None)
-cpv = portage.versions._pkg_str(
- cpv,
- metadata=metadata,
- settings=portage.settings,
- db=portage.db[portage.settings["EROOT"]][mytree].dbapi,
-)
-
-pkg = Package(
- built=(pkg_type != "ebuild"),
- cpv=cpv,
- installed=(pkg_type == "installed"),
- metadata=metadata,
- root_config=root_config,
- type_name=pkg_type,
-)
-
-# Apply package.env and repo-level settings. This allows per-package
-# FEATURES and other variables (possibly PORTAGE_TMPDIR) to be
-# available as soon as possible. Also, note that the only way to ensure
-# that setcpv gets metadata from the correct repository is to pass in
-# a Package instance, as we do here (previously we had to modify
-# portdb.porttrees in order to accomplish this).
-tmpsettings.setcpv(pkg)
+ cpv = portage.versions._pkg_str(
+ cpv,
+ metadata=metadata,
+ settings=portage.settings,
+ db=portage.db[portage.settings["EROOT"]][mytree].dbapi,
+ )
+ pkg = Package(
+ built=(pkg_type != "ebuild"),
+ cpv=cpv,
+ installed=(pkg_type == "installed"),
+ metadata=metadata,
+ root_config=root_config,
+ type_name=pkg_type,
+ )
-def stale_env_warning():
- if (
- "clean" not in pargs
- and "noauto" not in tmpsettings.features
- and build_dir_phases.intersection(pargs)
- ):
- portage.doebuild_environment(
- ebuild, "setup", portage.root, tmpsettings, debug, 1, portage.portdb
- )
- env_filename = os.path.join(tmpsettings["T"], "environment")
- if os.path.exists(env_filename):
- msg = (
- "Existing ${T}/environment for '%s' will be sourced. "
- + "Run 'clean' to start with a fresh environment."
- ) % (tmpsettings["PF"],)
- msg = textwrap.wrap(msg, 70)
+ # Apply package.env and repo-level settings. This allows per-package
+ # FEATURES and other variables (possibly PORTAGE_TMPDIR) to be
+ # available as soon as possible. Also, note that the only way to ensure
+ # that setcpv gets metadata from the correct repository is to pass in
+ # a Package instance, as we do here (previously we had to modify
+ # portdb.porttrees in order to accomplish this).
+ tmpsettings.setcpv(pkg)
+
+ def stale_env_warning():
+ if (
+ "clean" not in pargs
+ and "noauto" not in tmpsettings.features
+ and build_dir_phases.intersection(pargs)
+ ):
+ portage.doebuild_environment(
+ ebuild, "setup", portage.root, tmpsettings, debug, 1, portage.portdb
+ )
+ env_filename = os.path.join(tmpsettings["T"], "environment")
+ if os.path.exists(env_filename):
+ msg = (
+ "Existing ${T}/environment for '%s' will be sourced. "
+ + "Run 'clean' to start with a fresh environment."
+ ) % (tmpsettings["PF"],)
+ msg = textwrap.wrap(msg, 70)
+ for x in msg:
+ portage.writemsg(">>> %s\n" % x)
+
+ if ebuild_changed:
+ open(
+ os.path.join(
+ tmpsettings["PORTAGE_BUILDDIR"], ".ebuild_changed"
+ ),
+ "w",
+ ).close()
+
+ checked_for_stale_env = False
+
+ for arg in pargs:
+ try:
+ if not checked_for_stale_env and arg not in ("digest", "manifest"):
+ # This has to go after manifest generation since otherwise
+ # aux_get() might fail due to invalid ebuild digests.
+ stale_env_warning()
+ checked_for_stale_env = True
+
+ if arg in ("digest", "manifest") and force:
+ discard_digests(ebuild, tmpsettings, portage.portdb)
+ a = portage.doebuild(
+ ebuild,
+ arg,
+ settings=tmpsettings,
+ debug=debug,
+ tree=mytree,
+ vartree=portage.db[portage.root]["vartree"],
+ )
+ except PortageKeyError:
+ # aux_get error
+ a = 1
+ except UnsupportedAPIException as e:
+ msg = textwrap.wrap(str(e), 70)
+ del e
for x in msg:
- portage.writemsg(">>> %s\n" % x)
-
- if ebuild_changed:
- open(
- os.path.join(tmpsettings["PORTAGE_BUILDDIR"], ".ebuild_changed"),
- "w",
- ).close()
-
-
-checked_for_stale_env = False
-
-for arg in pargs:
- try:
- if not checked_for_stale_env and arg not in ("digest", "manifest"):
- # This has to go after manifest generation since otherwise
- # aux_get() might fail due to invalid ebuild digests.
- stale_env_warning()
- checked_for_stale_env = True
-
- if arg in ("digest", "manifest") and force:
- discard_digests(ebuild, tmpsettings, portage.portdb)
- a = portage.doebuild(
- ebuild,
- arg,
- settings=tmpsettings,
- debug=debug,
- tree=mytree,
- vartree=portage.db[portage.root]["vartree"],
- )
- except KeyboardInterrupt:
- print("Interrupted.")
- a = 1
- except PortageKeyError:
- # aux_get error
- a = 1
- except UnsupportedAPIException as e:
- msg = textwrap.wrap(str(e), 70)
- del e
- for x in msg:
- portage.writemsg("!!! %s\n" % x, noiselevel=-1)
- a = 1
- except PortagePackageException as e:
- portage.writemsg("!!! {}\n".format(e), noiselevel=-1)
- a = 1
- except PermissionDenied as e:
- portage.writemsg("!!! Permission Denied: {}\n".format(e), noiselevel=-1)
- a = 1
- if a is None:
- print("Could not run the required binary?")
- a = 127
- if a:
- global_event_loop().close()
- sys.exit(a)
-
-global_event_loop().close()
+ portage.writemsg("!!! %s\n" % x, noiselevel=-1)
+ a = 1
+ except PortagePackageException as e:
+ portage.writemsg("!!! {}\n".format(e), noiselevel=-1)
+ a = 1
+ except PermissionDenied as e:
+ portage.writemsg("!!! Permission Denied: {}\n".format(e), noiselevel=-1)
+ a = 1
+ if a is None:
+ print("Could not run the required binary?")
+ a = 127
+ if a:
+ global_event_loop().close()
+ sys.exit(a)
+
+ global_event_loop().close()
+
+except KeyboardInterrupt as e:
+ # Prevent traceback on ^C
+ signum = getattr(e, "signum", signal.SIGINT)
+ signal.signal(signum, signal.SIG_DFL)
+ raise_signal(signum)
diff --git a/bin/ebuild-ipc.py b/bin/ebuild-ipc.py
index d0d902aff..fc632e015 100755
--- a/bin/ebuild-ipc.py
+++ b/bin/ebuild-ipc.py
@@ -5,316 +5,323 @@
# This is a helper which ebuild processes can use
# to communicate with portage's main python process.
-# This block ensures that ^C interrupts are handled quietly.
-try:
- import os
- import signal
+import os
+import signal
- def exithandler(signum, _frame):
- signal.signal(signum, signal.SIG_DFL)
- os.kill(os.getpid(), signum)
+# For compatibility with Python < 3.8
+raise_signal = getattr(
+ signal, "raise_signal", lambda signum: os.kill(os.getpid(), signum)
+)
- signal.signal(signal.SIGINT, exithandler)
- signal.signal(signal.SIGTERM, exithandler)
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+# Inherit from KeyboardInterrupt to avoid a traceback from asyncio.
+class SignalInterrupt(KeyboardInterrupt):
+ def __init__(self, signum):
+ self.signum = signum
-except KeyboardInterrupt:
- raise SystemExit(130)
-import errno
-import logging
-import pickle
-import sys
-import time
+try:
+ def signal_interrupt(signum, _frame):
+ raise SignalInterrupt(signum)
-def debug_signal(signum, frame):
- import pdb
+ def debug_signal(_signum, _frame):
+ import pdb
- pdb.set_trace()
+ pdb.set_trace()
+ # Prevent "[Errno 32] Broken pipe" exceptions when writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal_interrupt)
+ signal.signal(signal.SIGUSR1, debug_signal)
-signal.signal(signal.SIGUSR1, debug_signal)
+ import errno
+ import logging
+ import pickle
+ import sys
+ import time
-if os.path.isfile(
- os.path.join(
- os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
- ".portage_not_installed",
- )
-):
- pym_paths = [
+ if os.path.isfile(
os.path.join(
- os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "lib"
+ os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
+ ".portage_not_installed",
)
- ]
- sys.path.insert(0, pym_paths[0])
-else:
- import sysconfig
-
- pym_paths = [
- os.path.join(sysconfig.get_path("purelib"), x) for x in ("_emerge", "portage")
- ]
-# Avoid sandbox violations after Python upgrade.
-if os.environ.get("SANDBOX_ON") == "1":
- sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
- for pym_path in pym_paths:
- if pym_path not in sandbox_write:
- sandbox_write.append(pym_path)
- os.environ["SANDBOX_WRITE"] = ":".join(filter(None, sandbox_write))
- del pym_path, sandbox_write
-del pym_paths
+ ):
+ pym_paths = [
+ os.path.join(
+ os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "lib"
+ )
+ ]
+ sys.path.insert(0, pym_paths[0])
+ else:
+ import sysconfig
-import portage
+ pym_paths = [
+ os.path.join(sysconfig.get_path("purelib"), x)
+ for x in ("_emerge", "portage")
+ ]
+ # Avoid sandbox violations after Python upgrade.
+ if os.environ.get("SANDBOX_ON") == "1":
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ for pym_path in pym_paths:
+ if pym_path not in sandbox_write:
+ sandbox_write.append(pym_path)
+ os.environ["SANDBOX_WRITE"] = ":".join(filter(None, sandbox_write))
+ del pym_path, sandbox_write
+ del pym_paths
-portage._internal_caller = True
-portage._disable_legacy_globals()
+ import portage
-from portage.util._eventloop.global_event_loop import global_event_loop
-from _emerge.AbstractPollTask import AbstractPollTask
-from _emerge.PipeReader import PipeReader
+ portage._internal_caller = True
+ portage._disable_legacy_globals()
-RETURNCODE_WRITE_FAILED = 2
+ from portage.util._eventloop.global_event_loop import global_event_loop
+ from _emerge.AbstractPollTask import AbstractPollTask
+ from _emerge.PipeReader import PipeReader
+ RETURNCODE_WRITE_FAILED = 2
-class FifoWriter(AbstractPollTask):
+ class FifoWriter(AbstractPollTask):
- __slots__ = ("buf", "fifo", "_fd")
+ __slots__ = ("buf", "fifo", "_fd")
- def _start(self):
- try:
- self._fd = os.open(self.fifo, os.O_WRONLY | os.O_NONBLOCK)
- except OSError as e:
- if e.errno == errno.ENXIO:
- # This happens if the daemon has been killed.
- self.returncode = RETURNCODE_WRITE_FAILED
- self._unregister()
- self._async_wait()
- return
- else:
- raise
- self.scheduler.add_writer(self._fd, self._output_handler)
- self._registered = True
-
- def _output_handler(self):
- # The whole buf should be able to fit in the fifo with
- # a single write call, so there's no valid reason for
- # os.write to raise EAGAIN here.
- fd = self._fd
- buf = self.buf
- while buf:
+ def _start(self):
try:
- buf = buf[os.write(fd, buf) :]
- except OSError:
- self.returncode = RETURNCODE_WRITE_FAILED
- self._async_wait()
- return
-
- self.returncode = os.EX_OK
- self._async_wait()
-
- def _cancel(self):
- self.returncode = self._cancelled_returncode
- self._unregister()
-
- def _unregister(self):
- self._registered = False
- if self._fd is not None:
- self.scheduler.remove_writer(self._fd)
- os.close(self._fd)
- self._fd = None
-
+ self._fd = os.open(self.fifo, os.O_WRONLY | os.O_NONBLOCK)
+ except OSError as e:
+ if e.errno == errno.ENXIO:
+ # This happens if the daemon has been killed.
+ self.returncode = RETURNCODE_WRITE_FAILED
+ self._unregister()
+ self._async_wait()
+ return
+ else:
+ raise
+ self.scheduler.add_writer(self._fd, self._output_handler)
+ self._registered = True
+
+ def _output_handler(self):
+ # The whole buf should be able to fit in the fifo with
+ # a single write call, so there's no valid reason for
+ # os.write to raise EAGAIN here.
+ fd = self._fd
+ buf = self.buf
+ while buf:
+ try:
+ buf = buf[os.write(fd, buf) :]
+ except OSError:
+ self.returncode = RETURNCODE_WRITE_FAILED
+ self._async_wait()
+ return
+
+ self.returncode = os.EX_OK
+ self._async_wait()
+
+ def _cancel(self):
+ self.returncode = self._cancelled_returncode
+ self._unregister()
+
+ def _unregister(self):
+ self._registered = False
+ if self._fd is not None:
+ self.scheduler.remove_writer(self._fd)
+ os.close(self._fd)
+ self._fd = None
+
+ class EbuildIpc:
+
+ # Timeout for each individual communication attempt (we retry
+ # as long as the daemon process appears to be alive).
+ _COMMUNICATE_RETRY_TIMEOUT = 15 # seconds
+
+ def __init__(self):
+ self.fifo_dir = os.environ["PORTAGE_BUILDDIR"]
+ self.ipc_in_fifo = os.path.join(self.fifo_dir, ".ipc", "in")
+ self.ipc_out_fifo = os.path.join(self.fifo_dir, ".ipc", "out")
+ self.ipc_lock_file = os.path.join(self.fifo_dir, ".ipc", "lock")
+
+ def _daemon_is_alive(self):
+ try:
+ builddir_lock = portage.locks.lockfile(
+ self.fifo_dir, wantnewlockfile=True, flags=os.O_NONBLOCK
+ )
+ except portage.exception.TryAgain:
+ return True
+ else:
+ portage.locks.unlockfile(builddir_lock)
+ return False
-class EbuildIpc:
+ def communicate(self, args):
- # Timeout for each individual communication attempt (we retry
- # as long as the daemon process appears to be alive).
- _COMMUNICATE_RETRY_TIMEOUT = 15 # seconds
+ # Make locks quiet since unintended locking messages displayed on
+ # stdout could corrupt the intended output of this program.
+ portage.locks._quiet = True
+ lock_obj = portage.locks.lockfile(self.ipc_lock_file, unlinkfile=True)
- def __init__(self):
- self.fifo_dir = os.environ["PORTAGE_BUILDDIR"]
- self.ipc_in_fifo = os.path.join(self.fifo_dir, ".ipc", "in")
- self.ipc_out_fifo = os.path.join(self.fifo_dir, ".ipc", "out")
- self.ipc_lock_file = os.path.join(self.fifo_dir, ".ipc", "lock")
+ try:
+ return self._communicate(args)
+ finally:
+ portage.locks.unlockfile(lock_obj)
- def _daemon_is_alive(self):
- try:
- builddir_lock = portage.locks.lockfile(
- self.fifo_dir, wantnewlockfile=True, flags=os.O_NONBLOCK
+ def _timeout_retry_msg(self, start_time, when):
+ time_elapsed = time.time() - start_time
+ portage.util.writemsg_level(
+ portage.localization._(
+ "ebuild-ipc timed out %s after %d seconds," + " retrying...\n"
+ )
+ % (when, time_elapsed),
+ level=logging.ERROR,
+ noiselevel=-1,
)
- except portage.exception.TryAgain:
- return True
- else:
- portage.locks.unlockfile(builddir_lock)
- return False
-
- def communicate(self, args):
-
- # Make locks quiet since unintended locking messages displayed on
- # stdout could corrupt the intended output of this program.
- portage.locks._quiet = True
- lock_obj = portage.locks.lockfile(self.ipc_lock_file, unlinkfile=True)
-
- try:
- return self._communicate(args)
- finally:
- portage.locks.unlockfile(lock_obj)
- def _timeout_retry_msg(self, start_time, when):
- time_elapsed = time.time() - start_time
- portage.util.writemsg_level(
- portage.localization._(
- "ebuild-ipc timed out %s after %d seconds," + " retrying...\n"
+ def _no_daemon_msg(self):
+ portage.util.writemsg_level(
+ portage.localization._("ebuild-ipc: daemon process not detected\n"),
+ level=logging.ERROR,
+ noiselevel=-1,
)
- % (when, time_elapsed),
- level=logging.ERROR,
- noiselevel=-1,
- )
-
- def _no_daemon_msg(self):
- portage.util.writemsg_level(
- portage.localization._("ebuild-ipc: daemon process not detected\n"),
- level=logging.ERROR,
- noiselevel=-1,
- )
-
- def _run_writer(self, fifo_writer, msg):
- """
- Wait on pid and return an appropriate exit code. This
- may return unsuccessfully due to timeout if the daemon
- process does not appear to be alive.
- """
-
- start_time = time.time()
- fifo_writer.start()
- eof = fifo_writer.poll() is not None
+ def _run_writer(self, fifo_writer, msg):
+ """
+ Wait on pid and return an appropriate exit code. This
+ may return unsuccessfully due to timeout if the daemon
+ process does not appear to be alive.
+ """
- while not eof:
- fifo_writer._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT)
+ start_time = time.time()
+ fifo_writer.start()
eof = fifo_writer.poll() is not None
- if eof:
- break
- elif self._daemon_is_alive():
- self._timeout_retry_msg(start_time, msg)
- else:
- fifo_writer.cancel()
- self._no_daemon_msg()
- fifo_writer.wait()
- return 2
-
- return fifo_writer.wait()
-
- def _receive_reply(self, input_fd):
- start_time = time.time()
+ while not eof:
+ fifo_writer._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT)
- pipe_reader = PipeReader(
- input_files={"input_fd": input_fd}, scheduler=global_event_loop()
- )
- pipe_reader.start()
-
- eof = pipe_reader.poll() is not None
-
- while not eof:
- pipe_reader._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT)
- eof = pipe_reader.poll() is not None
- if not eof:
- if self._daemon_is_alive():
- self._timeout_retry_msg(
- start_time, portage.localization._("during read")
- )
+ eof = fifo_writer.poll() is not None
+ if eof:
+ break
+ elif self._daemon_is_alive():
+ self._timeout_retry_msg(start_time, msg)
else:
- pipe_reader.cancel()
+ fifo_writer.cancel()
self._no_daemon_msg()
+ fifo_writer.wait()
return 2
- buf = pipe_reader.getvalue()
+ return fifo_writer.wait()
- retval = 2
+ def _receive_reply(self, input_fd):
- if not buf:
+ start_time = time.time()
- portage.util.writemsg_level(
- "ebuild-ipc: {}\n".format(portage.localization._("read failed")),
- level=logging.ERROR,
- noiselevel=-1,
+ pipe_reader = PipeReader(
+ input_files={"input_fd": input_fd}, scheduler=global_event_loop()
)
+ pipe_reader.start()
- else:
+ eof = pipe_reader.poll() is not None
+
+ while not eof:
+ pipe_reader._wait_loop(timeout=self._COMMUNICATE_RETRY_TIMEOUT)
+ eof = pipe_reader.poll() is not None
+ if not eof:
+ if self._daemon_is_alive():
+ self._timeout_retry_msg(
+ start_time, portage.localization._("during read")
+ )
+ else:
+ pipe_reader.cancel()
+ self._no_daemon_msg()
+ return 2
+
+ buf = pipe_reader.getvalue()
+
+ retval = 2
+
+ if not buf:
- try:
- reply = pickle.loads(buf)
- except SystemExit:
- raise
- except Exception as e:
- # The pickle module can raise practically
- # any exception when given corrupt data.
portage.util.writemsg_level(
- "ebuild-ipc: {}\n".format(e), level=logging.ERROR, noiselevel=-1
+ "ebuild-ipc: {}\n".format(portage.localization._("read failed")),
+ level=logging.ERROR,
+ noiselevel=-1,
)
else:
- (out, err, retval) = reply
+ try:
+ reply = pickle.loads(buf)
+ except SystemExit:
+ raise
+ except Exception as e:
+ # The pickle module can raise practically
+ # any exception when given corrupt data.
+ portage.util.writemsg_level(
+ "ebuild-ipc: {}\n".format(e), level=logging.ERROR, noiselevel=-1
+ )
- if out:
- portage.util.writemsg_stdout(out, noiselevel=-1)
+ else:
- if err:
- portage.util.writemsg(err, noiselevel=-1)
+ (out, err, retval) = reply
- return retval
+ if out:
+ portage.util.writemsg_stdout(out, noiselevel=-1)
- def _communicate(self, args):
+ if err:
+ portage.util.writemsg(err, noiselevel=-1)
- if not self._daemon_is_alive():
- self._no_daemon_msg()
- return 2
+ return retval
- # Open the input fifo before the output fifo, in order to make it
- # possible for the daemon to send a reply without blocking. This
- # improves performance, and also makes it possible for the daemon
- # to do a non-blocking write without a race condition.
- input_fd = os.open(self.ipc_out_fifo, os.O_RDONLY | os.O_NONBLOCK)
+ def _communicate(self, args):
- # Use forks so that the child process can handle blocking IO
- # un-interrupted, while the parent handles all timeout
- # considerations. This helps to avoid possible race conditions
- # from interference between timeouts and blocking IO operations.
- msg = portage.localization._("during write")
- retval = self._run_writer(
- FifoWriter(
- buf=pickle.dumps(args),
- fifo=self.ipc_in_fifo,
- scheduler=global_event_loop(),
- ),
- msg,
- )
+ if not self._daemon_is_alive():
+ self._no_daemon_msg()
+ return 2
- if retval != os.EX_OK:
- portage.util.writemsg_level(
- "ebuild-ipc: %s: %s\n"
- % (msg, portage.localization._("subprocess failure: %s") % retval),
- level=logging.ERROR,
- noiselevel=-1,
+ # Open the input fifo before the output fifo, in order to make it
+ # possible for the daemon to send a reply without blocking. This
+ # improves performance, and also makes it possible for the daemon
+ # to do a non-blocking write without a race condition.
+ input_fd = os.open(self.ipc_out_fifo, os.O_RDONLY | os.O_NONBLOCK)
+
+ # Use forks so that the child process can handle blocking IO
+ # un-interrupted, while the parent handles all timeout
+ # considerations. This helps to avoid possible race conditions
+ # from interference between timeouts and blocking IO operations.
+ msg = portage.localization._("during write")
+ retval = self._run_writer(
+ FifoWriter(
+ buf=pickle.dumps(args),
+ fifo=self.ipc_in_fifo,
+ scheduler=global_event_loop(),
+ ),
+ msg,
)
- return retval
- if not self._daemon_is_alive():
- self._no_daemon_msg()
- return 2
+ if retval != os.EX_OK:
+ portage.util.writemsg_level(
+ "ebuild-ipc: %s: %s\n"
+ % (msg, portage.localization._("subprocess failure: %s") % retval),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ return retval
- return self._receive_reply(input_fd)
+ if not self._daemon_is_alive():
+ self._no_daemon_msg()
+ return 2
+ return self._receive_reply(input_fd)
-def ebuild_ipc_main(args):
- ebuild_ipc = EbuildIpc()
- return ebuild_ipc.communicate(args)
+ def ebuild_ipc_main(args):
+ ebuild_ipc = EbuildIpc()
+ return ebuild_ipc.communicate(args)
+ if __name__ == "__main__":
+ try:
+ sys.exit(ebuild_ipc_main(sys.argv[1:]))
+ finally:
+ global_event_loop().close()
-if __name__ == "__main__":
- try:
- sys.exit(ebuild_ipc_main(sys.argv[1:]))
- finally:
- global_event_loop().close()
+except KeyboardInterrupt as e:
+ # Prevent traceback on ^C
+ signum = getattr(e, "signum", signal.SIGINT)
+ signal.signal(signum, signal.SIG_DFL)
+ raise_signal(signum)
diff --git a/bin/egencache b/bin/egencache
index 47c9dd340..5f5664131 100755
--- a/bin/egencache
+++ b/bin/egencache
@@ -2,797 +2,807 @@
# Copyright 2009-2022 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
-import argparse
+import os
import signal
-import stat
-import sys
-# This block ensures that ^C interrupts are handled quietly.
-try:
-
- def exithandler(signum, _frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- sys.exit(128 + signum)
+# For compatibility with Python < 3.8
+raise_signal = getattr(
+ signal, "raise_signal", lambda signum: os.kill(os.getpid(), signum)
+)
- signal.signal(signal.SIGINT, exithandler)
- signal.signal(signal.SIGTERM, exithandler)
+# Inherit from KeyboardInterrupt to avoid a traceback from asyncio.
+class SignalInterrupt(KeyboardInterrupt):
+ def __init__(self, signum):
+ self.signum = signum
-except KeyboardInterrupt:
- sys.exit(128 + signal.SIGINT)
+try:
-def debug_signal(_signum, _frame):
- import pdb
+ def signal_interrupt(signum, _frame):
+ raise SignalInterrupt(signum)
- pdb.set_trace()
+ def debug_signal(_signum, _frame):
+ import pdb
+ pdb.set_trace()
-signal.signal(signal.SIGUSR1, debug_signal)
+ # Prevent "[Errno 32] Broken pipe" exceptions when writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal_interrupt)
+ signal.signal(signal.SIGUSR1, debug_signal)
-import functools
-import logging
-import subprocess
-import time
-import textwrap
-import re
+ import argparse
+ import stat
+ import sys
+ import functools
+ import logging
+ import subprocess
+ import time
+ import textwrap
+ import re
-from os import path as osp
+ from os import path as osp
-if osp.isfile(
- osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")
-):
- sys.path.insert(
- 0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "lib")
+ if osp.isfile(
+ osp.join(
+ osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed"
+ )
+ ):
+ sys.path.insert(
+ 0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "lib")
+ )
+ import portage
+
+ portage._internal_caller = True
+ from portage import os, _encodings, _unicode_encode, _unicode_decode
+ from portage.cache.cache_errors import CacheError, StatCollision
+ from portage.cache.index.pkg_desc_index import (
+ pkg_desc_index_line_format,
+ pkg_desc_index_line_read,
)
-import portage
-
-portage._internal_caller = True
-from portage import os, _encodings, _unicode_encode, _unicode_decode
-from portage.cache.cache_errors import CacheError, StatCollision
-from portage.cache.index.pkg_desc_index import (
- pkg_desc_index_line_format,
- pkg_desc_index_line_read,
-)
-from portage.const import TIMESTAMP_FORMAT
-from portage.dep import _repo_separator
-from portage.output import colorize, EOutput
-from portage.package.ebuild._parallel_manifest.ManifestScheduler import (
- ManifestScheduler,
-)
-from portage.util import cmp_sort_key, writemsg_level
-from portage.util._async.AsyncFunction import AsyncFunction
-from portage.util._async.run_main_scheduler import run_main_scheduler
-from portage.util._async.TaskScheduler import TaskScheduler
-from portage.util._eventloop.global_event_loop import global_event_loop
-from portage.util.changelog import ChangeLogTypeSort
-from portage import cpv_getkey
-from portage.dep import Atom, isjustname
-from portage.versions import vercmp
-from _emerge.MetadataRegen import MetadataRegen
+ from portage.const import TIMESTAMP_FORMAT
+ from portage.dep import _repo_separator
+ from portage.output import colorize, EOutput
+ from portage.package.ebuild._parallel_manifest.ManifestScheduler import (
+ ManifestScheduler,
+ )
+ from portage.util import cmp_sort_key, writemsg_level
+ from portage.util._async.AsyncFunction import AsyncFunction
+ from portage.util._async.run_main_scheduler import run_main_scheduler
+ from portage.util._async.TaskScheduler import TaskScheduler
+ from portage.util._eventloop.global_event_loop import global_event_loop
+ from portage.util.changelog import ChangeLogTypeSort
+ from portage import cpv_getkey
+ from portage.dep import Atom, isjustname
+ from portage.versions import vercmp
+ from _emerge.MetadataRegen import MetadataRegen
-try:
- from xml.etree import ElementTree
-except ImportError:
- pass
-else:
try:
- from xml.parsers.expat import ExpatError
+ from xml.etree import ElementTree
except ImportError:
pass
else:
- from portage.xml.metadata import ( # pylint: disable=ungrouped-imports
- parse_metadata_use,
- )
-
-
-def parse_args(args):
- usage = "egencache [options] <action> ... [atom] ..."
- parser = argparse.ArgumentParser(usage=usage)
+ try:
+ from xml.parsers.expat import ExpatError
+ except ImportError:
+ pass
+ else:
+ from portage.xml.metadata import ( # pylint: disable=ungrouped-imports
+ parse_metadata_use,
+ )
- actions = parser.add_argument_group("Actions")
- actions.add_argument(
- "--update",
- action="store_true",
- help="update metadata/md5-cache/ (generate as necessary)",
- )
- actions.add_argument(
- "--update-use-local-desc",
- action="store_true",
- help="update the use.local.desc file from metadata.xml",
- )
- actions.add_argument(
- "--update-changelogs",
- action="store_true",
- help="update the ChangeLog files from SCM logs",
- )
- actions.add_argument(
- "--update-pkg-desc-index",
- action="store_true",
- help="update package description index",
- )
- actions.add_argument(
- "--update-manifests", action="store_true", help="update manifests"
- )
+ def parse_args(args):
+ usage = "egencache [options] <action> ... [atom] ..."
+ parser = argparse.ArgumentParser(usage=usage)
- common = parser.add_argument_group("Common options")
- common.add_argument("--repo", action="store", help="name of repo to operate on")
- common.add_argument(
- "--config-root",
- help="location of portage config files",
- dest="portage_configroot",
- )
- common.add_argument(
- "--external-cache-only",
- action="store_true",
- help="Output only to the external cache (not the repository itself)",
- )
- common.add_argument(
- "--gpg-dir", help="override the PORTAGE_GPG_DIR variable", dest="gpg_dir"
- )
- common.add_argument(
- "--gpg-key", help="override the PORTAGE_GPG_KEY variable", dest="gpg_key"
- )
- common.add_argument(
- "--repositories-configuration",
- help="override configuration of repositories (in format of repos.conf)",
- dest="repositories_configuration",
- )
- common.add_argument(
- "--sign-manifests",
- choices=("y", "n"),
- metavar="<y|n>",
- help="manually override layout.conf sign-manifests setting",
- )
- common.add_argument(
- "--strict-manifests",
- choices=("y", "n"),
- metavar="<y|n>",
- help='manually override "strict" FEATURES setting',
- )
- common.add_argument(
- "--thin-manifests",
- choices=("y", "n"),
- metavar="<y|n>",
- help="manually override layout.conf thin-manifests setting",
- )
- common.add_argument(
- "--tolerant",
- action="store_true",
- help="exit successfully if only minor errors occurred",
- )
- common.add_argument(
- "--ignore-default-opts",
- action="store_true",
- help="do not use the EGENCACHE_DEFAULT_OPTS environment variable",
- )
- common.add_argument(
- "-v", "--verbose", action="count", default=0, help="increase verbosity"
- )
- common.add_argument(
- "--write-timestamp",
- action="store_true",
- help="write metadata/timestamp.chk as required for rsync repositories",
- )
+ actions = parser.add_argument_group("Actions")
+ actions.add_argument(
+ "--update",
+ action="store_true",
+ help="update metadata/md5-cache/ (generate as necessary)",
+ )
+ actions.add_argument(
+ "--update-use-local-desc",
+ action="store_true",
+ help="update the use.local.desc file from metadata.xml",
+ )
+ actions.add_argument(
+ "--update-changelogs",
+ action="store_true",
+ help="update the ChangeLog files from SCM logs",
+ )
+ actions.add_argument(
+ "--update-pkg-desc-index",
+ action="store_true",
+ help="update package description index",
+ )
+ actions.add_argument(
+ "--update-manifests", action="store_true", help="update manifests"
+ )
- update = parser.add_argument_group("--update options")
- update.add_argument(
- "--cache-dir", help="location of the metadata cache", dest="cache_dir"
- )
- update.add_argument(
- "-j", "--jobs", type=int, action="store", help="max ebuild processes to spawn"
- )
- update.add_argument(
- "--load-average",
- type=float,
- action="store",
- help="max load allowed when spawning multiple jobs",
- dest="load_average",
- )
- update.add_argument(
- "--rsync",
- action="store_true",
- help="enable rsync stat collision workaround "
- + "for bug 139134 (use with --update)",
- )
+ common = parser.add_argument_group("Common options")
+ common.add_argument("--repo", action="store", help="name of repo to operate on")
+ common.add_argument(
+ "--config-root",
+ help="location of portage config files",
+ dest="portage_configroot",
+ )
+ common.add_argument(
+ "--external-cache-only",
+ action="store_true",
+ help="Output only to the external cache (not the repository itself)",
+ )
+ common.add_argument(
+ "--gpg-dir", help="override the PORTAGE_GPG_DIR variable", dest="gpg_dir"
+ )
+ common.add_argument(
+ "--gpg-key", help="override the PORTAGE_GPG_KEY variable", dest="gpg_key"
+ )
+ common.add_argument(
+ "--repositories-configuration",
+ help="override configuration of repositories (in format of repos.conf)",
+ dest="repositories_configuration",
+ )
+ common.add_argument(
+ "--sign-manifests",
+ choices=("y", "n"),
+ metavar="<y|n>",
+ help="manually override layout.conf sign-manifests setting",
+ )
+ common.add_argument(
+ "--strict-manifests",
+ choices=("y", "n"),
+ metavar="<y|n>",
+ help='manually override "strict" FEATURES setting',
+ )
+ common.add_argument(
+ "--thin-manifests",
+ choices=("y", "n"),
+ metavar="<y|n>",
+ help="manually override layout.conf thin-manifests setting",
+ )
+ common.add_argument(
+ "--tolerant",
+ action="store_true",
+ help="exit successfully if only minor errors occurred",
+ )
+ common.add_argument(
+ "--ignore-default-opts",
+ action="store_true",
+ help="do not use the EGENCACHE_DEFAULT_OPTS environment variable",
+ )
+ common.add_argument(
+ "-v", "--verbose", action="count", default=0, help="increase verbosity"
+ )
+ common.add_argument(
+ "--write-timestamp",
+ action="store_true",
+ help="write metadata/timestamp.chk as required for rsync repositories",
+ )
- uld = parser.add_argument_group("--update-use-local-desc options")
- uld.add_argument(
- "--preserve-comments",
- action="store_true",
- help="preserve the comments from the existing use.local.desc file",
- )
- uld.add_argument(
- "--use-local-desc-output",
- help="output file for use.local.desc data (or '-' for stdout)",
- dest="uld_output",
- )
+ update = parser.add_argument_group("--update options")
+ update.add_argument(
+ "--cache-dir", help="location of the metadata cache", dest="cache_dir"
+ )
+ update.add_argument(
+ "-j",
+ "--jobs",
+ type=int,
+ action="store",
+ help="max ebuild processes to spawn",
+ )
+ update.add_argument(
+ "--load-average",
+ type=float,
+ action="store",
+ help="max load allowed when spawning multiple jobs",
+ dest="load_average",
+ )
+ update.add_argument(
+ "--rsync",
+ action="store_true",
+ help="enable rsync stat collision workaround "
+ + "for bug 139134 (use with --update)",
+ )
- uc = parser.add_argument_group("--update-changelogs options")
- uc.add_argument(
- "--changelog-reversed",
- action="store_true",
- help="log commits in reverse order (oldest first)",
- )
- uc.add_argument(
- "--changelog-output",
- help="output filename for change logs",
- dest="changelog_output",
- default="ChangeLog",
- )
+ uld = parser.add_argument_group("--update-use-local-desc options")
+ uld.add_argument(
+ "--preserve-comments",
+ action="store_true",
+ help="preserve the comments from the existing use.local.desc file",
+ )
+ uld.add_argument(
+ "--use-local-desc-output",
+ help="output file for use.local.desc data (or '-' for stdout)",
+ dest="uld_output",
+ )
- options, args = parser.parse_known_args(args)
+ uc = parser.add_argument_group("--update-changelogs options")
+ uc.add_argument(
+ "--changelog-reversed",
+ action="store_true",
+ help="log commits in reverse order (oldest first)",
+ )
+ uc.add_argument(
+ "--changelog-output",
+ help="output filename for change logs",
+ dest="changelog_output",
+ default="ChangeLog",
+ )
- if options.jobs:
- jobs = None
- try:
- jobs = int(options.jobs)
- except ValueError:
- jobs = -1
+ options, args = parser.parse_known_args(args)
- if jobs < 1:
- parser.error("Invalid: --jobs='{}'".format(options.jobs))
+ if options.jobs:
+ jobs = None
+ try:
+ jobs = int(options.jobs)
+ except ValueError:
+ jobs = -1
- options.jobs = jobs
+ if jobs < 1:
+ parser.error("Invalid: --jobs='{}'".format(options.jobs))
- else:
- options.jobs = None
+ options.jobs = jobs
- if options.load_average:
- try:
- load_average = float(options.load_average)
- except ValueError:
- load_average = 0.0
+ else:
+ options.jobs = None
- if load_average <= 0.0:
- parser.error("Invalid: --load-average='{}'".format(options.load_average))
+ if options.load_average:
+ try:
+ load_average = float(options.load_average)
+ except ValueError:
+ load_average = 0.0
- options.load_average = load_average
+ if load_average <= 0.0:
+ parser.error(
+ "Invalid: --load-average='{}'".format(options.load_average)
+ )
- else:
- options.load_average = None
+ options.load_average = load_average
- options.config_root = options.portage_configroot
- if options.config_root is not None and not os.path.isdir(options.config_root):
- parser.error("Not a directory: --config-root='{}'".format(options.config_root))
+ else:
+ options.load_average = None
- if options.cache_dir is not None:
- if not os.path.isdir(options.cache_dir):
- parser.error("Not a directory: --cache-dir='{}'".format(options.cache_dir))
- if not os.access(options.cache_dir, os.W_OK):
+ options.config_root = options.portage_configroot
+ if options.config_root is not None and not os.path.isdir(options.config_root):
parser.error(
- "Write access denied: --cache-dir='{}'".format(options.cache_dir)
+ "Not a directory: --config-root='{}'".format(options.config_root)
)
- for atom in args:
- try:
- atom = portage.dep.Atom(atom)
- except portage.exception.InvalidAtom:
- parser.error("Invalid atom: {}".format(atom))
-
- if not isjustname(atom):
- parser.error("Atom is too specific: {}".format(atom))
-
- if options.update_use_local_desc:
- try:
- ElementTree
- ExpatError
- except NameError:
- parser.error("--update-use-local-desc requires python with USE=xml!")
-
- if options.uld_output == "-" and options.preserve_comments:
- parser.error("--preserve-comments can not be used when outputting to stdout")
-
- return parser, options, args
-
-
-class GenCache:
- def __init__(
- self,
- portdb,
- cp_iter=None,
- max_jobs=None,
- max_load=None,
- rsync=False,
- external_cache_only=False,
- ):
- # The caller must set portdb.porttrees in order to constrain
- # findname, cp_list, and cpv_list to the desired tree.
- tree = portdb.porttrees[0]
- self._portdb = portdb
- self._eclass_db = portdb.repositories.get_repo_for_location(tree).eclass_db
- self._auxdbkeys = portdb._known_keys
- # We can globally cleanse stale cache only if we
- # iterate over every single cp.
- self._global_cleanse = cp_iter is None
- if cp_iter is not None:
- self._cp_set = set(cp_iter)
- cp_iter = iter(self._cp_set)
- self._cp_missing = self._cp_set.copy()
- else:
- self._cp_set = None
- self._cp_missing = set()
- write_auxdb = (
- external_cache_only or "metadata-transfer" in portdb.settings.features
- )
- self._regen = MetadataRegen(
- portdb,
- cp_iter=cp_iter,
- consumer=self._metadata_callback,
- max_jobs=max_jobs,
- max_load=max_load,
- write_auxdb=write_auxdb,
- main=True,
- )
- self.returncode = os.EX_OK
- conf = portdb.repositories.get_repo_for_location(tree)
- if external_cache_only:
- self._trg_caches = ()
- else:
- self._trg_caches = tuple(
- conf.iter_pregenerated_caches(
- self._auxdbkeys, force=True, readonly=False
+ if options.cache_dir is not None:
+ if not os.path.isdir(options.cache_dir):
+ parser.error(
+ "Not a directory: --cache-dir='{}'".format(options.cache_dir)
)
- )
- if not self._trg_caches:
- raise Exception(
- "cache formats '%s' aren't supported"
- % (" ".join(conf.cache_formats),)
+ if not os.access(options.cache_dir, os.W_OK):
+ parser.error(
+ "Write access denied: --cache-dir='{}'".format(options.cache_dir)
)
- if rsync:
- for trg_cache in self._trg_caches:
- if hasattr(trg_cache, "raise_stat_collision"):
- trg_cache.raise_stat_collision = True
- # Make _metadata_callback write this cache first, in case
- # it raises a StatCollision and triggers mtime
- # modification.
- self._trg_caches = tuple(
- [trg_cache]
- + [x for x in self._trg_caches if x is not trg_cache]
- )
+ for atom in args:
+ try:
+ atom = portage.dep.Atom(atom)
+ except portage.exception.InvalidAtom:
+ parser.error("Invalid atom: {}".format(atom))
- self._existing_nodes = set()
+ if not isjustname(atom):
+ parser.error("Atom is too specific: {}".format(atom))
- def _metadata_callback(self, cpv, repo_path, metadata, ebuild_hash, eapi_supported):
- self._existing_nodes.add(cpv)
- self._cp_missing.discard(cpv_getkey(cpv))
+ if options.update_use_local_desc:
+ try:
+ ElementTree
+ ExpatError
+ except NameError:
+ parser.error("--update-use-local-desc requires python with USE=xml!")
- # Since we're supposed to be able to efficiently obtain the
- # EAPI from _parse_eapi_ebuild_head, we don't write cache
- # entries for unsupported EAPIs.
- if metadata is not None and eapi_supported:
- for trg_cache in self._trg_caches:
- self._write_cache(trg_cache, cpv, repo_path, metadata, ebuild_hash)
+ if options.uld_output == "-" and options.preserve_comments:
+ parser.error(
+ "--preserve-comments can not be used when outputting to stdout"
+ )
- def _write_cache(self, trg_cache, cpv, repo_path, metadata, ebuild_hash):
+ return parser, options, args
- if not hasattr(trg_cache, "raise_stat_collision"):
- # This cache does not avoid redundant writes automatically,
- # so check for an identical existing entry before writing.
- # This prevents unnecessary disk writes and can also prevent
- # unnecessary rsync transfers.
- try:
- dest = trg_cache[cpv]
- except (KeyError, CacheError):
- pass
+ class GenCache:
+ def __init__(
+ self,
+ portdb,
+ cp_iter=None,
+ max_jobs=None,
+ max_load=None,
+ rsync=False,
+ external_cache_only=False,
+ ):
+ # The caller must set portdb.porttrees in order to constrain
+ # findname, cp_list, and cpv_list to the desired tree.
+ tree = portdb.porttrees[0]
+ self._portdb = portdb
+ self._eclass_db = portdb.repositories.get_repo_for_location(tree).eclass_db
+ self._auxdbkeys = portdb._known_keys
+ # We can globally cleanse stale cache only if we
+ # iterate over every single cp.
+ self._global_cleanse = cp_iter is None
+ if cp_iter is not None:
+ self._cp_set = set(cp_iter)
+ cp_iter = iter(self._cp_set)
+ self._cp_missing = self._cp_set.copy()
else:
- if trg_cache.validate_entry(dest, ebuild_hash, self._eclass_db):
- identical = True
- for k in self._auxdbkeys:
- if dest.get(k, "") != metadata.get(k, ""):
- identical = False
- break
- if identical:
- return
-
- try:
- chf = trg_cache.validation_chf
- metadata["_%s_" % chf] = getattr(ebuild_hash, chf)
- try:
- trg_cache[cpv] = metadata
- except StatCollision as sc:
- # If the content of a cache entry changes and neither the
- # file mtime nor size changes, it will prevent rsync from
- # detecting changes. Cache backends may raise this
- # exception from _setitem() if they detect this type of stat
- # collision. These exceptions are handled by bumping the
- # mtime on the ebuild (and the corresponding cache entry).
- # See bug #139134. It is convenient to include checks for
- # redundant writes along with the internal StatCollision
- # detection code, so for caches with the
- # raise_stat_collision attribute, we do not need to
- # explicitly check for redundant writes like we do for the
- # other cache types above.
- max_mtime = sc.mtime
- for _ec, ec_hash in metadata["_eclasses_"].items():
- if max_mtime < ec_hash.mtime:
- max_mtime = ec_hash.mtime
- if max_mtime == sc.mtime:
- max_mtime += 1
- max_mtime = int(max_mtime)
- try:
- os.utime(ebuild_hash.location, (max_mtime, max_mtime))
- except OSError as e:
- self.returncode |= 1
- writemsg_level(
- "{} writing target: {}\n".format(cpv, e),
- level=logging.ERROR,
- noiselevel=-1,
- )
- else:
- ebuild_hash.mtime = max_mtime
- metadata["_mtime_"] = max_mtime
- trg_cache[cpv] = metadata
- self._portdb.auxdb[repo_path][cpv] = metadata
-
- except CacheError as ce:
- self.returncode |= 1
- writemsg_level(
- "{} writing target: {}\n".format(cpv, ce),
- level=logging.ERROR,
- noiselevel=-1,
+ self._cp_set = None
+ self._cp_missing = set()
+ write_auxdb = (
+ external_cache_only or "metadata-transfer" in portdb.settings.features
+ )
+ self._regen = MetadataRegen(
+ portdb,
+ cp_iter=cp_iter,
+ consumer=self._metadata_callback,
+ max_jobs=max_jobs,
+ max_load=max_load,
+ write_auxdb=write_auxdb,
+ main=True,
)
+ self.returncode = os.EX_OK
+ conf = portdb.repositories.get_repo_for_location(tree)
+ if external_cache_only:
+ self._trg_caches = ()
+ else:
+ self._trg_caches = tuple(
+ conf.iter_pregenerated_caches(
+ self._auxdbkeys, force=True, readonly=False
+ )
+ )
+ if not self._trg_caches:
+ raise Exception(
+ "cache formats '%s' aren't supported"
+ % (" ".join(conf.cache_formats),)
+ )
- def run(self):
- signum = run_main_scheduler(self._regen)
- if signum is not None:
- sys.exit(128 + signum)
+ if rsync:
+ for trg_cache in self._trg_caches:
+ if hasattr(trg_cache, "raise_stat_collision"):
+ trg_cache.raise_stat_collision = True
+ # Make _metadata_callback write this cache first, in case
+ # it raises a StatCollision and triggers mtime
+ # modification.
+ self._trg_caches = tuple(
+ [trg_cache]
+ + [x for x in self._trg_caches if x is not trg_cache]
+ )
- self.returncode |= self._regen.returncode
+ self._existing_nodes = set()
- for trg_cache in self._trg_caches:
- self._cleanse_cache(trg_cache)
+ def _metadata_callback(
+ self, cpv, repo_path, metadata, ebuild_hash, eapi_supported
+ ):
+ self._existing_nodes.add(cpv)
+ self._cp_missing.discard(cpv_getkey(cpv))
+
+ # Since we're supposed to be able to efficiently obtain the
+ # EAPI from _parse_eapi_ebuild_head, we don't write cache
+ # entries for unsupported EAPIs.
+ if metadata is not None and eapi_supported:
+ for trg_cache in self._trg_caches:
+ self._write_cache(trg_cache, cpv, repo_path, metadata, ebuild_hash)
+
+ def _write_cache(self, trg_cache, cpv, repo_path, metadata, ebuild_hash):
+
+ if not hasattr(trg_cache, "raise_stat_collision"):
+ # This cache does not avoid redundant writes automatically,
+ # so check for an identical existing entry before writing.
+ # This prevents unnecessary disk writes and can also prevent
+ # unnecessary rsync transfers.
+ try:
+ dest = trg_cache[cpv]
+ except (KeyError, CacheError):
+ pass
+ else:
+ if trg_cache.validate_entry(dest, ebuild_hash, self._eclass_db):
+ identical = True
+ for k in self._auxdbkeys:
+ if dest.get(k, "") != metadata.get(k, ""):
+ identical = False
+ break
+ if identical:
+ return
- def _cleanse_cache(self, trg_cache):
- cp_missing = self._cp_missing
- dead_nodes = set()
- if self._global_cleanse:
try:
- for cpv in trg_cache:
- cp = cpv_getkey(cpv)
- if cp is None:
+ chf = trg_cache.validation_chf
+ metadata["_%s_" % chf] = getattr(ebuild_hash, chf)
+ try:
+ trg_cache[cpv] = metadata
+ except StatCollision as sc:
+ # If the content of a cache entry changes and neither the
+ # file mtime nor size changes, it will prevent rsync from
+ # detecting changes. Cache backends may raise this
+ # exception from _setitem() if they detect this type of stat
+ # collision. These exceptions are handled by bumping the
+ # mtime on the ebuild (and the corresponding cache entry).
+ # See bug #139134. It is convenient to include checks for
+ # redundant writes along with the internal StatCollision
+ # detection code, so for caches with the
+ # raise_stat_collision attribute, we do not need to
+ # explicitly check for redundant writes like we do for the
+ # other cache types above.
+ max_mtime = sc.mtime
+ for _ec, ec_hash in metadata["_eclasses_"].items():
+ if max_mtime < ec_hash.mtime:
+ max_mtime = ec_hash.mtime
+ if max_mtime == sc.mtime:
+ max_mtime += 1
+ max_mtime = int(max_mtime)
+ try:
+ os.utime(ebuild_hash.location, (max_mtime, max_mtime))
+ except OSError as e:
self.returncode |= 1
writemsg_level(
- "Unable to parse cp for '{}'\n".format(cpv),
+ "{} writing target: {}\n".format(cpv, e),
level=logging.ERROR,
noiselevel=-1,
)
else:
- dead_nodes.add(cpv)
+ ebuild_hash.mtime = max_mtime
+ metadata["_mtime_"] = max_mtime
+ trg_cache[cpv] = metadata
+ self._portdb.auxdb[repo_path][cpv] = metadata
+
except CacheError as ce:
self.returncode |= 1
writemsg_level(
- "Error listing cache entries for "
- + "'{}': {}, continuing...\n".format(trg_cache.location, ce),
+ "{} writing target: {}\n".format(cpv, ce),
level=logging.ERROR,
noiselevel=-1,
)
- else:
- cp_set = self._cp_set
- try:
- for cpv in trg_cache:
- cp = cpv_getkey(cpv)
- if cp is None:
+ def run(self):
+ signum = run_main_scheduler(self._regen)
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ self.returncode |= self._regen.returncode
+
+ for trg_cache in self._trg_caches:
+ self._cleanse_cache(trg_cache)
+
+ def _cleanse_cache(self, trg_cache):
+ cp_missing = self._cp_missing
+ dead_nodes = set()
+ if self._global_cleanse:
+ try:
+ for cpv in trg_cache:
+ cp = cpv_getkey(cpv)
+ if cp is None:
+ self.returncode |= 1
+ writemsg_level(
+ "Unable to parse cp for '{}'\n".format(cpv),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ else:
+ dead_nodes.add(cpv)
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "Error listing cache entries for "
+ + "'{}': {}, continuing...\n".format(trg_cache.location, ce),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+
+ else:
+ cp_set = self._cp_set
+ try:
+ for cpv in trg_cache:
+ cp = cpv_getkey(cpv)
+ if cp is None:
+ self.returncode |= 1
+ writemsg_level(
+ "Unable to parse cp for '{}'\n".format(cpv),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ else:
+ cp_missing.discard(cp)
+ if cp in cp_set:
+ dead_nodes.add(cpv)
+ except CacheError as ce:
+ self.returncode |= 1
+ writemsg_level(
+ "Error listing cache entries for "
+ + "'{}': {}, continuing...\n".format(trg_cache.location, ce),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+
+ if cp_missing:
+ self.returncode |= 1
+ for cp in sorted(cp_missing):
+ writemsg_level(
+ "No ebuilds or cache entries found for '{}'\n".format(cp),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+
+ if dead_nodes:
+ dead_nodes.difference_update(self._existing_nodes)
+ for k in dead_nodes:
+ try:
+ del trg_cache[k]
+ except KeyError:
+ pass
+ except CacheError as ce:
self.returncode |= 1
writemsg_level(
- "Unable to parse cp for '{}'\n".format(cpv),
+ "{} deleting stale cache: {}\n".format(k, ce),
level=logging.ERROR,
noiselevel=-1,
)
- else:
- cp_missing.discard(cp)
- if cp in cp_set:
- dead_nodes.add(cpv)
- except CacheError as ce:
- self.returncode |= 1
- writemsg_level(
- "Error listing cache entries for "
- + "'{}': {}, continuing...\n".format(trg_cache.location, ce),
- level=logging.ERROR,
- noiselevel=-1,
- )
-
- if cp_missing:
- self.returncode |= 1
- for cp in sorted(cp_missing):
- writemsg_level(
- "No ebuilds or cache entries found for '{}'\n".format(cp),
- level=logging.ERROR,
- noiselevel=-1,
- )
- if dead_nodes:
- dead_nodes.difference_update(self._existing_nodes)
- for k in dead_nodes:
+ if not trg_cache.autocommits:
try:
- del trg_cache[k]
- except KeyError:
- pass
+ trg_cache.commit()
except CacheError as ce:
self.returncode |= 1
writemsg_level(
- "{} deleting stale cache: {}\n".format(k, ce),
+ "committing target: {}\n".format(ce),
level=logging.ERROR,
noiselevel=-1,
)
- if not trg_cache.autocommits:
- try:
- trg_cache.commit()
- except CacheError as ce:
- self.returncode |= 1
- writemsg_level(
- "committing target: {}\n".format(ce),
- level=logging.ERROR,
- noiselevel=-1,
- )
+ if hasattr(trg_cache, "_prune_empty_dirs"):
+ trg_cache._prune_empty_dirs()
- if hasattr(trg_cache, "_prune_empty_dirs"):
- trg_cache._prune_empty_dirs()
+ class GenPkgDescIndex:
+ def __init__(self, repo_config, portdb, output_file, verbose=False):
+ self.returncode = os.EX_OK
+ self._repo_config = repo_config
+ self._portdb = portdb
+ self._output_file = output_file
+ self._verbose = verbose
+ def run(self):
-class GenPkgDescIndex:
- def __init__(self, repo_config, portdb, output_file, verbose=False):
- self.returncode = os.EX_OK
- self._repo_config = repo_config
- self._portdb = portdb
- self._output_file = output_file
- self._verbose = verbose
+ display_updates = self._verbose > 0
+ old = {}
+ new = {}
+ if display_updates:
+ try:
+ with open(
+ self._output_file, encoding=_encodings["repo.content"]
+ ) as f:
+ for line in f:
+ pkg_desc = pkg_desc_index_line_read(line)
+ old[pkg_desc.cp] = pkg_desc
+ except FileNotFoundError:
+ pass
- def run(self):
+ portage.util.ensure_dirs(os.path.dirname(self._output_file))
+ f = portage.util.atomic_ofstream(
+ self._output_file, encoding=_encodings["repo.content"]
+ )
- display_updates = self._verbose > 0
- old = {}
- new = {}
- if display_updates:
- try:
- with open(self._output_file, encoding=_encodings["repo.content"]) as f:
- for line in f:
- pkg_desc = pkg_desc_index_line_read(line)
- old[pkg_desc.cp] = pkg_desc
- except FileNotFoundError:
- pass
+ portdb = self._portdb
+ for cp in portdb.cp_all():
+ pkgs = portdb.cp_list(cp)
+ if not pkgs:
+ continue
+ (desc,) = portdb.aux_get(pkgs[-1], ["DESCRIPTION"])
- portage.util.ensure_dirs(os.path.dirname(self._output_file))
- f = portage.util.atomic_ofstream(
- self._output_file, encoding=_encodings["repo.content"]
- )
+ line = pkg_desc_index_line_format(cp, pkgs, desc)
+ f.write(line)
+ if display_updates:
+ new[cp] = pkg_desc_index_line_read(line)
- portdb = self._portdb
- for cp in portdb.cp_all():
- pkgs = portdb.cp_list(cp)
- if not pkgs:
- continue
- (desc,) = portdb.aux_get(pkgs[-1], ["DESCRIPTION"])
+ f.close()
- line = pkg_desc_index_line_format(cp, pkgs, desc)
- f.write(line)
if display_updates:
- new[cp] = pkg_desc_index_line_read(line)
-
- f.close()
-
- if display_updates:
- out = EOutput()
- out.einfo("Searching for changes")
- print("")
- items = sorted(new.values(), key=lambda pkg_desc: pkg_desc.cp)
- haspkgs = False
- for pkg_desc in items:
- masked = False
- version = self._portdb.xmatch(
- "bestmatch-visible",
- Atom(
- "{}{}{}".format(
- pkg_desc.cp, _repo_separator, self._repo_config.name
- )
- ),
- )
- if not version:
- version = pkg_desc.cpv_list[-1]
- masked = True
- old_versions = old.get(pkg_desc.cp)
- if old_versions is None or version not in old_versions.cpv_list:
- prefix0 = " "
- prefix1 = " "
-
- if old_versions is None:
- color = functools.partial(colorize, "darkgreen")
- prefix1 = "N"
- else:
- color = functools.partial(colorize, "turquoise")
- prefix1 = "U"
-
- if masked:
- prefix0 = "M"
-
- print(
- " [%s%s] %s (%s): %s"
- % (
- colorize("red", prefix0),
- color(prefix1),
- colorize("bold", pkg_desc.cp),
- color(version[len(pkg_desc.cp) + 1 :]),
- pkg_desc.desc,
- )
+ out = EOutput()
+ out.einfo("Searching for changes")
+ print("")
+ items = sorted(new.values(), key=lambda pkg_desc: pkg_desc.cp)
+ haspkgs = False
+ for pkg_desc in items:
+ masked = False
+ version = self._portdb.xmatch(
+ "bestmatch-visible",
+ Atom(
+ "{}{}{}".format(
+ pkg_desc.cp, _repo_separator, self._repo_config.name
+ )
+ ),
)
- haspkgs = True
-
- if not haspkgs:
- out.einfo("No updates found")
-
+ if not version:
+ version = pkg_desc.cpv_list[-1]
+ masked = True
+ old_versions = old.get(pkg_desc.cp)
+ if old_versions is None or version not in old_versions.cpv_list:
+ prefix0 = " "
+ prefix1 = " "
+
+ if old_versions is None:
+ color = functools.partial(colorize, "darkgreen")
+ prefix1 = "N"
+ else:
+ color = functools.partial(colorize, "turquoise")
+ prefix1 = "U"
-class GenUseLocalDesc:
- def __init__(self, portdb, output=None, preserve_comments=False):
- self.returncode = os.EX_OK
- self._portdb = portdb
- self._output = output
- self._preserve_comments = preserve_comments
+ if masked:
+ prefix0 = "M"
- def run(self):
- repo_path = self._portdb.porttrees[0]
- ops = {"<": 0, "<=": 1, "=": 2, ">=": 3, ">": 4}
- prev_mtime = None
- prev_md5 = None
+ print(
+ " [%s%s] %s (%s): %s"
+ % (
+ colorize("red", prefix0),
+ color(prefix1),
+ colorize("bold", pkg_desc.cp),
+ color(version[len(pkg_desc.cp) + 1 :]),
+ pkg_desc.desc,
+ )
+ )
+ haspkgs = True
+
+ if not haspkgs:
+ out.einfo("No updates found")
+
+ class GenUseLocalDesc:
+ def __init__(self, portdb, output=None, preserve_comments=False):
+ self.returncode = os.EX_OK
+ self._portdb = portdb
+ self._output = output
+ self._preserve_comments = preserve_comments
+
+ def run(self):
+ repo_path = self._portdb.porttrees[0]
+ ops = {"<": 0, "<=": 1, "=": 2, ">=": 3, ">": 4}
+ prev_mtime = None
+ prev_md5 = None
+
+ if self._output is None or self._output != "-":
+ if self._output is None:
+ prof_path = os.path.join(repo_path, "profiles")
+ desc_path = os.path.join(prof_path, "use.local.desc")
+ try:
+ os.mkdir(prof_path)
+ except OSError:
+ pass
+ else:
+ desc_path = self._output
- if self._output is None or self._output != "-":
- if self._output is None:
- prof_path = os.path.join(repo_path, "profiles")
- desc_path = os.path.join(prof_path, "use.local.desc")
try:
- os.mkdir(prof_path)
- except OSError:
+ prev_md5 = portage.checksum.perform_md5(desc_path)
+ prev_mtime = os.stat(desc_path)[stat.ST_MTIME]
+ except (portage.exception.FileNotFound, OSError):
pass
- else:
- desc_path = self._output
- try:
- prev_md5 = portage.checksum.perform_md5(desc_path)
- prev_mtime = os.stat(desc_path)[stat.ST_MTIME]
- except (portage.exception.FileNotFound, OSError):
- pass
+ try:
+ if self._preserve_comments:
+ # Probe in binary mode, in order to avoid
+ # potential character encoding issues.
+ output = open(
+ _unicode_encode(
+ desc_path, encoding=_encodings["fs"], errors="strict"
+ ),
+ "r+b",
+ )
+ else:
+ output = open(
+ _unicode_encode(
+ desc_path, encoding=_encodings["fs"], errors="strict"
+ ),
+ mode="w",
+ encoding=_encodings["repo.content"],
+ errors="backslashreplace",
+ )
+ except OSError as e:
+ if not self._preserve_comments or os.path.isfile(desc_path):
+ writemsg_level(
+ "ERROR: failed to open output file {}: {}\n".format(
+ desc_path, e
+ ),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ self.returncode |= 2
+ return
- try:
- if self._preserve_comments:
- # Probe in binary mode, in order to avoid
- # potential character encoding issues.
- output = open(
- _unicode_encode(
- desc_path, encoding=_encodings["fs"], errors="strict"
- ),
- "r+b",
- )
- else:
- output = open(
- _unicode_encode(
- desc_path, encoding=_encodings["fs"], errors="strict"
- ),
- mode="w",
- encoding=_encodings["repo.content"],
- errors="backslashreplace",
- )
- except OSError as e:
- if not self._preserve_comments or os.path.isfile(desc_path):
+ # Open in r+b mode failed because the file doesn't
+ # exist yet. We can probably recover if we disable
+ # preserve_comments mode now.
writemsg_level(
- "ERROR: failed to open output file {}: {}\n".format(
- desc_path, e
- ),
- level=logging.ERROR,
+ "WARNING: --preserve-comments enabled, but "
+ + "output file not found: {}\n".format(desc_path),
+ level=logging.WARNING,
noiselevel=-1,
)
- self.returncode |= 2
- return
-
- # Open in r+b mode failed because the file doesn't
- # exist yet. We can probably recover if we disable
- # preserve_comments mode now.
- writemsg_level(
- "WARNING: --preserve-comments enabled, but "
- + "output file not found: {}\n".format(desc_path),
- level=logging.WARNING,
- noiselevel=-1,
+ self._preserve_comments = False
+ try:
+ output = open(
+ _unicode_encode(
+ desc_path, encoding=_encodings["fs"], errors="strict"
+ ),
+ mode="w",
+ encoding=_encodings["repo.content"],
+ errors="backslashreplace",
+ )
+ except OSError as e:
+ writemsg_level(
+ "ERROR: failed to open output file {}: {}\n".format(
+ desc_path, e
+ ),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ self.returncode |= 2
+ return
+ else:
+ output = sys.stdout
+
+ if self._preserve_comments:
+ while True:
+ pos = output.tell()
+ if not output.readline().startswith(b"#"):
+ break
+ output.seek(pos)
+ output.truncate()
+ output.close()
+
+ # Finished probing comments in binary mode, now append
+ # in text mode.
+ output = open(
+ _unicode_encode(
+ desc_path, encoding=_encodings["fs"], errors="strict"
+ ),
+ mode="a",
+ encoding=_encodings["repo.content"],
+ errors="backslashreplace",
)
- self._preserve_comments = False
- try:
- output = open(
- _unicode_encode(
- desc_path, encoding=_encodings["fs"], errors="strict"
- ),
- mode="w",
- encoding=_encodings["repo.content"],
- errors="backslashreplace",
- )
- except OSError as e:
- writemsg_level(
- "ERROR: failed to open output file {}: {}\n".format(
- desc_path, e
- ),
- level=logging.ERROR,
- noiselevel=-1,
+ output.write("\n")
+ else:
+ output.write(
+ textwrap.dedent(
+ """\
+ # This file is deprecated as per GLEP 56 in favor of metadata.xml. Please add
+ # your descriptions to your package's metadata.xml ONLY.
+ # * generated automatically using egencache *
+
+ """
)
- self.returncode |= 2
- return
- else:
- output = sys.stdout
-
- if self._preserve_comments:
- while True:
- pos = output.tell()
- if not output.readline().startswith(b"#"):
- break
- output.seek(pos)
- output.truncate()
- output.close()
-
- # Finished probing comments in binary mode, now append
- # in text mode.
- output = open(
- _unicode_encode(desc_path, encoding=_encodings["fs"], errors="strict"),
- mode="a",
- encoding=_encodings["repo.content"],
- errors="backslashreplace",
- )
- output.write("\n")
- else:
- output.write(
- textwrap.dedent(
- """\
- # This file is deprecated as per GLEP 56 in favor of metadata.xml. Please add
- # your descriptions to your package's metadata.xml ONLY.
- # * generated automatically using egencache *
-
- """
)
- )
- # The cmp function no longer exists in python3, so we'll
- # implement our own here under a slightly different name
- # since we don't want any confusion given that we never
- # want to rely on the builtin cmp function.
- def cmp_func(a, b):
- if a is None or b is None:
- # None can't be compared with other types in python3.
- if a is None and b is None:
- return 0
- elif a is None:
- return -1
- else:
- return 1
- return (a > b) - (a < b)
+ # The cmp function no longer exists in python3, so we'll
+ # implement our own here under a slightly different name
+ # since we don't want any confusion given that we never
+ # want to rely on the builtin cmp function.
+ def cmp_func(a, b):
+ if a is None or b is None:
+ # None can't be compared with other types in python3.
+ if a is None and b is None:
+ return 0
+ elif a is None:
+ return -1
+ else:
+ return 1
+ return (a > b) - (a < b)
- class _MetadataTreeBuilder(ElementTree.TreeBuilder):
- """
- Implements doctype() as required to avoid deprecation warnings
- since Python >=2.7
- """
+ class _MetadataTreeBuilder(ElementTree.TreeBuilder):
+ """
+ Implements doctype() as required to avoid deprecation warnings
+ since Python >=2.7
+ """
- def doctype(self, name, pubid, system):
- pass
+ def doctype(self, name, pubid, system):
+ pass
- for cp in self._portdb.cp_all():
- metadata_path = os.path.join(repo_path, cp, "metadata.xml")
- try:
- metadata = ElementTree.parse(
- _unicode_encode(
- metadata_path, encoding=_encodings["fs"], errors="strict"
- ),
- parser=ElementTree.XMLParser(target=_MetadataTreeBuilder()),
- )
- except OSError:
- pass
- except (ExpatError, OSError) as e:
- writemsg_level(
- "ERROR: failed parsing {}/metadata.xml: {}\n".format(cp, e),
- level=logging.ERROR,
- noiselevel=-1,
- )
- self.returncode |= 1
- else:
+ for cp in self._portdb.cp_all():
+ metadata_path = os.path.join(repo_path, cp, "metadata.xml")
try:
- usedict = parse_metadata_use(metadata)
- except portage.exception.ParseError as e:
+ metadata = ElementTree.parse(
+ _unicode_encode(
+ metadata_path, encoding=_encodings["fs"], errors="strict"
+ ),
+ parser=ElementTree.XMLParser(target=_MetadataTreeBuilder()),
+ )
+ except OSError:
+ pass
+ except (ExpatError, OSError) as e:
writemsg_level(
"ERROR: failed parsing {}/metadata.xml: {}\n".format(cp, e),
level=logging.ERROR,
@@ -800,610 +810,635 @@ class GenUseLocalDesc:
)
self.returncode |= 1
else:
- for flag in sorted(usedict):
-
- def atomcmp(atoma, atomb):
- # None is better than an atom, that's why we reverse the args
- if atoma is None or atomb is None:
- return cmp_func(atomb, atoma)
- # Same for plain PNs (.operator is None then)
- elif atoma.operator is None or atomb.operator is None:
- return cmp_func(atomb.operator, atoma.operator)
- # Version matching
- elif atoma.cpv != atomb.cpv:
- return vercmp(atoma.version, atomb.version)
- # Versions match, let's fallback to operator matching
- else:
- return cmp_func(
- ops.get(atoma.operator, -1),
- ops.get(atomb.operator, -1),
- )
-
- def _Atom(key):
- if key is not None:
- return Atom(key)
- return None
-
- resdict = usedict[flag]
- if len(resdict) == 1:
- resdesc = next(iter(resdict.items()))[1]
- else:
- try:
- reskeys = {_Atom(k): k for k in resdict}
- except portage.exception.InvalidAtom as e:
- writemsg_level(
- "ERROR: failed parsing %s/metadata.xml: %s\n"
- % (cp, e),
- level=logging.ERROR,
- noiselevel=-1,
- )
- self.returncode |= 1
+ try:
+ usedict = parse_metadata_use(metadata)
+ except portage.exception.ParseError as e:
+ writemsg_level(
+ "ERROR: failed parsing {}/metadata.xml: {}\n".format(cp, e),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ self.returncode |= 1
+ else:
+ for flag in sorted(usedict):
+
+ def atomcmp(atoma, atomb):
+ # None is better than an atom, that's why we reverse the args
+ if atoma is None or atomb is None:
+ return cmp_func(atomb, atoma)
+ # Same for plain PNs (.operator is None then)
+ elif atoma.operator is None or atomb.operator is None:
+ return cmp_func(atomb.operator, atoma.operator)
+ # Version matching
+ elif atoma.cpv != atomb.cpv:
+ return vercmp(atoma.version, atomb.version)
+ # Versions match, let's fallback to operator matching
+ else:
+ return cmp_func(
+ ops.get(atoma.operator, -1),
+ ops.get(atomb.operator, -1),
+ )
+
+ def _Atom(key):
+ if key is not None:
+ return Atom(key)
+ return None
+
+ resdict = usedict[flag]
+ if len(resdict) == 1:
resdesc = next(iter(resdict.items()))[1]
else:
- resatoms = sorted(reskeys, key=cmp_sort_key(atomcmp))
- resdesc = resdict[reskeys[resatoms[-1]]]
+ try:
+ reskeys = {_Atom(k): k for k in resdict}
+ except portage.exception.InvalidAtom as e:
+ writemsg_level(
+ "ERROR: failed parsing %s/metadata.xml: %s\n"
+ % (cp, e),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ self.returncode |= 1
+ resdesc = next(iter(resdict.items()))[1]
+ else:
+ resatoms = sorted(
+ reskeys, key=cmp_sort_key(atomcmp)
+ )
+ resdesc = resdict[reskeys[resatoms[-1]]]
+
+ output.write("{}:{} - {}\n".format(cp, flag, resdesc))
- output.write("{}:{} - {}\n".format(cp, flag, resdesc))
+ output.close()
+ if prev_mtime is not None and prev_md5 == portage.checksum.perform_md5(
+ desc_path
+ ):
+ # Preserve mtime for rsync.
+ mtime = prev_mtime
+ else:
+ # For portability, and consistency with the mtime preservation
+ # code, set mtime to an exact integer value.
+ mtime = int(time.time())
- output.close()
- if prev_mtime is not None and prev_md5 == portage.checksum.perform_md5(
- desc_path
- ):
- # Preserve mtime for rsync.
- mtime = prev_mtime
- else:
- # For portability, and consistency with the mtime preservation
- # code, set mtime to an exact integer value.
- mtime = int(time.time())
+ os.utime(desc_path, (mtime, mtime))
- os.utime(desc_path, (mtime, mtime))
+ class GenChangeLogs:
+ def __init__(
+ self,
+ portdb,
+ changelog_output,
+ changelog_reversed,
+ max_jobs=None,
+ max_load=None,
+ ):
+ self.returncode = os.EX_OK
+ self._portdb = portdb
+ self._wrapper = textwrap.TextWrapper(
+ width=78, initial_indent=" ", subsequent_indent=" "
+ )
+ self._changelog_output = changelog_output
+ self._changelog_reversed = changelog_reversed
+ self._max_jobs = max_jobs
+ self._max_load = max_load
+ self._repo_path = self._portdb.porttrees[0]
+ # --work-tree=... must be passed to Git if GIT_DIR is used
+ # and GIT_DIR is not a child of the root of the checkout
+ # eg:
+ # GIT_DIR=${parent}/work/.git/
+ # work-tree=${parent}/staging/
+ # If work-tree is not passed, Git tries to use the shared
+ # parent of the current directory and the ${GIT_DIR}, which can
+ # be outside the root of the checkout.
+ self._work_tree = "--work-tree=%s" % self._repo_path
+
+ @staticmethod
+ def grab(cmd):
+ p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
+ return _unicode_decode(
+ p.communicate()[0], encoding=_encodings["stdio"], errors="strict"
+ )
+ def generate_changelog(self, cp):
-class GenChangeLogs:
- def __init__(
- self, portdb, changelog_output, changelog_reversed, max_jobs=None, max_load=None
- ):
- self.returncode = os.EX_OK
- self._portdb = portdb
- self._wrapper = textwrap.TextWrapper(
- width=78, initial_indent=" ", subsequent_indent=" "
- )
- self._changelog_output = changelog_output
- self._changelog_reversed = changelog_reversed
- self._max_jobs = max_jobs
- self._max_load = max_load
- self._repo_path = self._portdb.porttrees[0]
- # --work-tree=... must be passed to Git if GIT_DIR is used
- # and GIT_DIR is not a child of the root of the checkout
- # eg:
- # GIT_DIR=${parent}/work/.git/
- # work-tree=${parent}/staging/
- # If work-tree is not passed, Git tries to use the shared
- # parent of the current directory and the ${GIT_DIR}, which can
- # be outside the root of the checkout.
- self._work_tree = "--work-tree=%s" % self._repo_path
-
- @staticmethod
- def grab(cmd):
- p = subprocess.Popen(cmd, stdout=subprocess.PIPE)
- return _unicode_decode(
- p.communicate()[0], encoding=_encodings["stdio"], errors="strict"
- )
+ os.chdir(os.path.join(self._repo_path, cp))
+ # Determine whether ChangeLog is up-to-date by comparing
+ # the newest commit timestamp with the ChangeLog timestamp.
+ lmod = self.grab(["git", self._work_tree, "log", "--format=%ct", "-1", "."])
+ if not lmod:
+ # This cp has not been added to the repo.
+ return
- def generate_changelog(self, cp):
+ lmod = int(lmod)
- os.chdir(os.path.join(self._repo_path, cp))
- # Determine whether ChangeLog is up-to-date by comparing
- # the newest commit timestamp with the ChangeLog timestamp.
- lmod = self.grab(["git", self._work_tree, "log", "--format=%ct", "-1", "."])
- if not lmod:
- # This cp has not been added to the repo.
- return
+ try:
+ cmod = os.stat("ChangeLog")[stat.ST_MTIME]
+ except OSError:
+ cmod = 0
- lmod = int(lmod)
+ # Use exact comparison, since commit times are
+ # not necessarily ordered.
+ if cmod == lmod:
+ return
- try:
- cmod = os.stat("ChangeLog")[stat.ST_MTIME]
- except OSError:
- cmod = 0
+ try:
+ output = open(
+ self._changelog_output,
+ mode="w",
+ encoding=_encodings["repo.content"],
+ errors="backslashreplace",
+ )
+ except OSError as e:
+ writemsg_level(
+ "ERROR: failed to open ChangeLog for %s: %s\n"
+ % (
+ cp,
+ e,
+ ),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ self.returncode |= 2
+ return
- # Use exact comparison, since commit times are
- # not necessarily ordered.
- if cmod == lmod:
- return
+ output.write(
+ textwrap.dedent(
+ """\
+ # ChangeLog for %s
+ # Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2
+ # (auto-generated from git log)
- try:
- output = open(
- self._changelog_output,
- mode="w",
- encoding=_encodings["repo.content"],
- errors="backslashreplace",
- )
- except OSError as e:
- writemsg_level(
- "ERROR: failed to open ChangeLog for %s: %s\n"
- % (
- cp,
- e,
- ),
- level=logging.ERROR,
- noiselevel=-1,
- )
- self.returncode |= 2
- return
-
- output.write(
- textwrap.dedent(
- """\
- # ChangeLog for %s
- # Copyright 1999-%s Gentoo Foundation; Distributed under the GPL v2
- # (auto-generated from git log)
-
- """
- % (cp, time.strftime("%Y"))
+ """
+ % (cp, time.strftime("%Y"))
+ )
)
- )
- # now grab all the commits
- revlist_cmd = ["git", self._work_tree, "rev-list"]
- if self._changelog_reversed:
- revlist_cmd.append("--reverse")
- revlist_cmd.extend(["HEAD", "--", "."])
- commits = self.grab(revlist_cmd).split()
-
- for c in commits:
- # Explaining the arguments:
- # --name-status to get a list of added/removed files
- # --no-renames to avoid getting more complex records on the list
- # --format to get the timestamp, author and commit description
- # --root to make it work fine even with the initial commit
- # --relative=${cp} to get paths relative to ebuilddir
- # -r (recursive) to get per-file changes
- # then the commit-id and path.
-
- cinfo = (
- self.grab(
- [
- "git",
- self._work_tree,
- "diff-tree",
- "--name-status",
- "--no-renames",
- "--format=%ct %cN <%cE>%n%B",
- "--root",
- "--relative={}".format(cp),
- "-r",
- c,
- "--",
- ".",
- ]
+ # now grab all the commits
+ revlist_cmd = ["git", self._work_tree, "rev-list"]
+ if self._changelog_reversed:
+ revlist_cmd.append("--reverse")
+ revlist_cmd.extend(["HEAD", "--", "."])
+ commits = self.grab(revlist_cmd).split()
+
+ for c in commits:
+ # Explaining the arguments:
+ # --name-status to get a list of added/removed files
+ # --no-renames to avoid getting more complex records on the list
+ # --format to get the timestamp, author and commit description
+ # --root to make it work fine even with the initial commit
+ # --relative=${cp} to get paths relative to ebuilddir
+ # -r (recursive) to get per-file changes
+ # then the commit-id and path.
+
+ cinfo = (
+ self.grab(
+ [
+ "git",
+ self._work_tree,
+ "diff-tree",
+ "--name-status",
+ "--no-renames",
+ "--format=%ct %cN <%cE>%n%B",
+ "--root",
+ "--relative={}".format(cp),
+ "-r",
+ c,
+ "--",
+ ".",
+ ]
+ )
+ .rstrip("\n")
+ .split("\n")
)
- .rstrip("\n")
- .split("\n")
- )
- # Expected output:
- # timestamp Author Name <author@email>
- # commit message l1
- # ...
- # commit message ln
- #
- # status1 filename1
- # ...
- # statusn filenamen
-
- changed = []
- for n, l in enumerate(reversed(cinfo)):
- if not l:
- body = cinfo[1 : -n - 1]
- break
- else:
- f = l.split()
- if f[1] == "Manifest":
- pass # XXX: remanifest commits?
- elif f[1].startswith("ChangeLog"):
- pass
- elif f[0].startswith("A"):
- changed.append(ChangeLogTypeSort("+", f[1]))
- elif f[0].startswith("D"):
- changed.append(ChangeLogTypeSort("-", f[1]))
- elif f[0].startswith("M"):
- changed.append(ChangeLogTypeSort("", f[1]))
+ # Expected output:
+ # timestamp Author Name <author@email>
+ # commit message l1
+ # ...
+ # commit message ln
+ #
+ # status1 filename1
+ # ...
+ # statusn filenamen
+
+ changed = []
+ for n, l in enumerate(reversed(cinfo)):
+ if not l:
+ body = cinfo[1 : -n - 1]
+ break
else:
- writemsg_level(
- "ERROR: unexpected git file status for %s: %s\n"
- % (
- cp,
- f,
- ),
- level=logging.ERROR,
- noiselevel=-1,
- )
- self.returncode |= 1
-
- if not changed:
- continue
-
- (ts, author) = cinfo[0].split(" ", 1)
- date = time.strftime("%d %b %Y", time.gmtime(float(ts)))
-
- changed = [str(x) for x in sorted(changed)]
-
- wroteheader = False
- # Reverse the sort order for headers.
- for c in reversed(changed):
- if c.startswith("+") and c.endswith(".ebuild"):
- output.write("*{} ({})\n".format(c[1:-7], date))
- wroteheader = True
- if wroteheader:
- output.write("\n")
-
- # strip '<cp>: ', '[<cp>] ', and similar
- body[0] = re.sub(r"^\W*" + re.escape(cp) + r"\W+", "", body[0])
- # strip trailing newline
- if not body[-1]:
- body = body[:-1]
- # strip git-svn id
- if body[-1].startswith("git-svn-id:") and not body[-2]:
- body = body[:-2]
- # strip the repoman version/manifest note
- if (
- body[-1] == " (Signed Manifest commit)"
- or body[-1] == " (Unsigned Manifest commit)"
- ):
- body = body[:-1]
- if body[-1].startswith("(Portage version:") and body[-1].endswith(")"):
- body = body[:-1]
+ f = l.split()
+ if f[1] == "Manifest":
+ pass # XXX: remanifest commits?
+ elif f[1].startswith("ChangeLog"):
+ pass
+ elif f[0].startswith("A"):
+ changed.append(ChangeLogTypeSort("+", f[1]))
+ elif f[0].startswith("D"):
+ changed.append(ChangeLogTypeSort("-", f[1]))
+ elif f[0].startswith("M"):
+ changed.append(ChangeLogTypeSort("", f[1]))
+ else:
+ writemsg_level(
+ "ERROR: unexpected git file status for %s: %s\n"
+ % (
+ cp,
+ f,
+ ),
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ self.returncode |= 1
+
+ if not changed:
+ continue
+
+ (ts, author) = cinfo[0].split(" ", 1)
+ date = time.strftime("%d %b %Y", time.gmtime(float(ts)))
+
+ changed = [str(x) for x in sorted(changed)]
+
+ wroteheader = False
+ # Reverse the sort order for headers.
+ for c in reversed(changed):
+ if c.startswith("+") and c.endswith(".ebuild"):
+ output.write("*{} ({})\n".format(c[1:-7], date))
+ wroteheader = True
+ if wroteheader:
+ output.write("\n")
+
+ # strip '<cp>: ', '[<cp>] ', and similar
+ body[0] = re.sub(r"^\W*" + re.escape(cp) + r"\W+", "", body[0])
+ # strip trailing newline
if not body[-1]:
body = body[:-1]
-
- # don't break filenames on hyphens
- self._wrapper.break_on_hyphens = False
- output.write(
- self._wrapper.fill(
- "{}; {} {}:".format(date, author, ", ".join(changed))
+ # strip git-svn id
+ if body[-1].startswith("git-svn-id:") and not body[-2]:
+ body = body[:-2]
+ # strip the repoman version/manifest note
+ if (
+ body[-1] == " (Signed Manifest commit)"
+ or body[-1] == " (Unsigned Manifest commit)"
+ ):
+ body = body[:-1]
+ if body[-1].startswith("(Portage version:") and body[-1].endswith(")"):
+ body = body[:-1]
+ if not body[-1]:
+ body = body[:-1]
+
+ # don't break filenames on hyphens
+ self._wrapper.break_on_hyphens = False
+ output.write(
+ self._wrapper.fill(
+ "{}; {} {}:".format(date, author, ", ".join(changed))
+ )
+ )
+ # but feel free to break commit messages there
+ self._wrapper.break_on_hyphens = True
+ output.write(
+ "\n%s\n\n" % "\n".join(self._wrapper.fill(x) for x in body)
)
- )
- # but feel free to break commit messages there
- self._wrapper.break_on_hyphens = True
- output.write("\n%s\n\n" % "\n".join(self._wrapper.fill(x) for x in body))
- output.close()
- os.utime(self._changelog_output, (lmod, lmod))
+ output.close()
+ os.utime(self._changelog_output, (lmod, lmod))
- def _task_iter(self):
- if not os.path.isdir(
- os.environ.get("GIT_DIR", os.path.join(self._repo_path, ".git"))
- ):
- writemsg_level(
- "ERROR: --update-changelogs supported only in git repos\n",
- level=logging.ERROR,
- noiselevel=-1,
- )
- self.returncode = 127
- return
-
- for cp in self._portdb.cp_all():
- yield AsyncFunction(target=self.generate_changelog, args=[cp])
-
- def run(self):
- return run_main_scheduler(
- TaskScheduler(
- self._task_iter(),
- event_loop=global_event_loop(),
- max_jobs=self._max_jobs,
- max_load=self._max_load,
+ def _task_iter(self):
+ if not os.path.isdir(
+ os.environ.get("GIT_DIR", os.path.join(self._repo_path, ".git"))
+ ):
+ writemsg_level(
+ "ERROR: --update-changelogs supported only in git repos\n",
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ self.returncode = 127
+ return
+
+ for cp in self._portdb.cp_all():
+ yield AsyncFunction(target=self.generate_changelog, args=[cp])
+
+ def run(self):
+ return run_main_scheduler(
+ TaskScheduler(
+ self._task_iter(),
+ event_loop=global_event_loop(),
+ max_jobs=self._max_jobs,
+ max_load=self._max_load,
+ )
)
- )
-
-
-def egencache_main(args):
-
- # The calling environment is ignored, so the program is
- # completely controlled by commandline arguments.
- env = {}
-
- if not sys.stdout.isatty() or os.environ.get("NOCOLOR", "").lower() in (
- "yes",
- "true",
- ):
- portage.output.nocolor()
- env["NOCOLOR"] = "true"
- parser, options, atoms = parse_args(args)
+ def egencache_main(args):
- config_root = options.config_root
+ # The calling environment is ignored, so the program is
+ # completely controlled by commandline arguments.
+ env = {}
- if options.repositories_configuration is not None:
- env["PORTAGE_REPOSITORIES"] = options.repositories_configuration
-
- if options.cache_dir is not None:
- env["PORTAGE_DEPCACHEDIR"] = options.cache_dir
+ if not sys.stdout.isatty() or os.environ.get("NOCOLOR", "").lower() in (
+ "yes",
+ "true",
+ ):
+ portage.output.nocolor()
+ env["NOCOLOR"] = "true"
- settings = portage.config(config_root=config_root, local_config=False, env=env)
+ parser, options, atoms = parse_args(args)
- default_opts = None
- if not options.ignore_default_opts:
- default_opts = portage.util.shlex_split(
- settings.get("EGENCACHE_DEFAULT_OPTS", "")
- )
+ config_root = options.config_root
- if default_opts:
- parser, options, args = parse_args(default_opts + args)
+ if options.repositories_configuration is not None:
+ env["PORTAGE_REPOSITORIES"] = options.repositories_configuration
if options.cache_dir is not None:
env["PORTAGE_DEPCACHEDIR"] = options.cache_dir
settings = portage.config(config_root=config_root, local_config=False, env=env)
- if not (
- options.update
- or options.update_use_local_desc
- or options.update_changelogs
- or options.update_manifests
- or options.update_pkg_desc_index
- ):
- parser.error("No action specified")
- return 1
-
- if options.repo is None:
- if len(settings.repositories.prepos) == 2:
- for repo in settings.repositories:
- if repo.name != "DEFAULT":
- options.repo = repo.name
- break
+ default_opts = None
+ if not options.ignore_default_opts:
+ default_opts = portage.util.shlex_split(
+ settings.get("EGENCACHE_DEFAULT_OPTS", "")
+ )
- if options.repo is None:
- parser.error("--repo option is required")
+ if default_opts:
+ parser, options, args = parse_args(default_opts + args)
- repo_path = settings.repositories.treemap.get(options.repo)
- if repo_path is None:
- parser.error("Unable to locate repository named '{}'".format(options.repo))
- return 1
+ if options.cache_dir is not None:
+ env["PORTAGE_DEPCACHEDIR"] = options.cache_dir
- repo_config = settings.repositories.get_repo_for_location(repo_path)
+ settings = portage.config(
+ config_root=config_root, local_config=False, env=env
+ )
- if options.strict_manifests is not None:
- if options.strict_manifests == "y":
- settings.features.add("strict")
- else:
- settings.features.discard("strict")
+ if not (
+ options.update
+ or options.update_use_local_desc
+ or options.update_changelogs
+ or options.update_manifests
+ or options.update_pkg_desc_index
+ ):
+ parser.error("No action specified")
+ return 1
- if options.update and "metadata-transfer" not in settings.features:
- # Forcibly enable metadata-transfer if portdbapi has a pregenerated
- # cache that does not support eclass validation.
- cache = repo_config.get_pregenerated_cache(
- portage.dbapi.dbapi._known_keys, readonly=True
- )
- if cache is not None and not cache.complete_eclass_entries:
- settings.features.add("metadata-transfer")
- cache = None
+ if options.repo is None:
+ if len(settings.repositories.prepos) == 2:
+ for repo in settings.repositories:
+ if repo.name != "DEFAULT":
+ options.repo = repo.name
+ break
- settings.lock()
+ if options.repo is None:
+ parser.error("--repo option is required")
- portdb = portage.portdbapi(mysettings=settings)
+ repo_path = settings.repositories.treemap.get(options.repo)
+ if repo_path is None:
+ parser.error("Unable to locate repository named '{}'".format(options.repo))
+ return 1
- # Limit ebuilds to the specified repo.
- portdb.porttrees = [repo_path]
+ repo_config = settings.repositories.get_repo_for_location(repo_path)
- if options.update:
- if options.cache_dir is not None:
- # already validated earlier
- pass
- else:
- # We check write access after the portdbapi constructor
- # has had an opportunity to create it. This ensures that
- # we don't use the cache in the "volatile" mode which is
- # undesirable for egencache.
- if not os.access(settings["PORTAGE_DEPCACHEDIR"], os.W_OK):
- writemsg_level(
- "ecachegen: error: "
- + "write access denied: {}\n".format(
- settings["PORTAGE_DEPCACHEDIR"]
- ),
- level=logging.ERROR,
- noiselevel=-1,
- )
- return 1
+ if options.strict_manifests is not None:
+ if options.strict_manifests == "y":
+ settings.features.add("strict")
+ else:
+ settings.features.discard("strict")
- if options.sign_manifests is not None:
- repo_config.sign_manifest = options.sign_manifests == "y"
+ if options.update and "metadata-transfer" not in settings.features:
+ # Forcibly enable metadata-transfer if portdbapi has a pregenerated
+ # cache that does not support eclass validation.
+ cache = repo_config.get_pregenerated_cache(
+ portage.dbapi.dbapi._known_keys, readonly=True
+ )
+ if cache is not None and not cache.complete_eclass_entries:
+ settings.features.add("metadata-transfer")
+ cache = None
- if options.thin_manifests is not None:
- repo_config.thin_manifest = options.thin_manifests == "y"
+ settings.lock()
- gpg_cmd = None
- gpg_vars = None
- force_sign_key = None
+ portdb = portage.portdbapi(mysettings=settings)
- if options.update_manifests:
- if repo_config.sign_manifest:
+ # Limit ebuilds to the specified repo.
+ portdb.porttrees = [repo_path]
- sign_problem = False
- gpg_dir = None
- gpg_cmd = settings.get("PORTAGE_GPG_SIGNING_COMMAND")
- if gpg_cmd is None:
- writemsg_level(
- "egencache: error: "
- "PORTAGE_GPG_SIGNING_COMMAND is unset! "
- "Is make.globals missing?\n",
- level=logging.ERROR,
- noiselevel=-1,
- )
- sign_problem = True
- elif (
- "${PORTAGE_GPG_KEY}" in gpg_cmd
- and options.gpg_key is None
- and "PORTAGE_GPG_KEY" not in settings
- ):
- writemsg_level(
- "egencache: error: " "PORTAGE_GPG_KEY is unset!\n",
- level=logging.ERROR,
- noiselevel=-1,
- )
- sign_problem = True
- elif "${PORTAGE_GPG_DIR}" in gpg_cmd:
- if options.gpg_dir is not None:
- gpg_dir = options.gpg_dir
- elif "PORTAGE_GPG_DIR" not in settings:
- gpg_dir = os.path.expanduser("~/.gnupg")
- else:
- gpg_dir = os.path.expanduser(settings["PORTAGE_GPG_DIR"])
- if not os.access(gpg_dir, os.X_OK):
+ if options.update:
+ if options.cache_dir is not None:
+ # already validated earlier
+ pass
+ else:
+ # We check write access after the portdbapi constructor
+ # has had an opportunity to create it. This ensures that
+ # we don't use the cache in the "volatile" mode which is
+ # undesirable for egencache.
+ if not os.access(settings["PORTAGE_DEPCACHEDIR"], os.W_OK):
writemsg_level(
- (
- "egencache: error: "
- "Unable to access directory: "
- "PORTAGE_GPG_DIR='%s'\n"
- )
- % gpg_dir,
+ "ecachegen: error: "
+ + "write access denied: {}\n".format(
+ settings["PORTAGE_DEPCACHEDIR"]
+ ),
level=logging.ERROR,
noiselevel=-1,
)
- sign_problem = True
-
- if sign_problem:
- writemsg_level(
- "egencache: You may disable manifest "
- "signatures with --sign-manifests=n or by setting "
- '"sign-manifests = false" in metadata/layout.conf\n',
- level=logging.ERROR,
- noiselevel=-1,
- )
- return 1
-
- gpg_vars = {}
- if gpg_dir is not None:
- gpg_vars["PORTAGE_GPG_DIR"] = gpg_dir
- gpg_var_names = []
- if options.gpg_key is None:
- gpg_var_names.append("PORTAGE_GPG_KEY")
- else:
- gpg_vars["PORTAGE_GPG_KEY"] = options.gpg_key
+ return 1
- for k in gpg_var_names:
- v = settings.get(k)
- if v is not None:
- gpg_vars[k] = v
+ if options.sign_manifests is not None:
+ repo_config.sign_manifest = options.sign_manifests == "y"
- force_sign_key = gpg_vars.get("PORTAGE_GPG_KEY")
+ if options.thin_manifests is not None:
+ repo_config.thin_manifest = options.thin_manifests == "y"
- ret = [os.EX_OK]
+ gpg_cmd = None
+ gpg_vars = None
+ force_sign_key = None
- if options.update:
- cp_iter = None
- if atoms:
- cp_iter = iter(atoms)
+ if options.update_manifests:
+ if repo_config.sign_manifest:
- gen_cache = GenCache(
- portdb,
- cp_iter=cp_iter,
- max_jobs=options.jobs,
- max_load=options.load_average,
- rsync=options.rsync,
- external_cache_only=options.external_cache_only,
- )
- gen_cache.run()
- if options.tolerant:
- ret.append(os.EX_OK)
- else:
- ret.append(gen_cache.returncode)
+ sign_problem = False
+ gpg_dir = None
+ gpg_cmd = settings.get("PORTAGE_GPG_SIGNING_COMMAND")
+ if gpg_cmd is None:
+ writemsg_level(
+ "egencache: error: "
+ "PORTAGE_GPG_SIGNING_COMMAND is unset! "
+ "Is make.globals missing?\n",
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ sign_problem = True
+ elif (
+ "${PORTAGE_GPG_KEY}" in gpg_cmd
+ and options.gpg_key is None
+ and "PORTAGE_GPG_KEY" not in settings
+ ):
+ writemsg_level(
+ "egencache: error: " "PORTAGE_GPG_KEY is unset!\n",
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ sign_problem = True
+ elif "${PORTAGE_GPG_DIR}" in gpg_cmd:
+ if options.gpg_dir is not None:
+ gpg_dir = options.gpg_dir
+ elif "PORTAGE_GPG_DIR" not in settings:
+ gpg_dir = os.path.expanduser("~/.gnupg")
+ else:
+ gpg_dir = os.path.expanduser(settings["PORTAGE_GPG_DIR"])
+ if not os.access(gpg_dir, os.X_OK):
+ writemsg_level(
+ (
+ "egencache: error: "
+ "Unable to access directory: "
+ "PORTAGE_GPG_DIR='%s'\n"
+ )
+ % gpg_dir,
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ sign_problem = True
- if options.update_pkg_desc_index:
- if not options.external_cache_only and repo_config.writable:
- writable_location = repo_config.location
- else:
- writable_location = os.path.join(
- portdb.depcachedir, repo_config.location.lstrip(os.sep)
- )
- if not options.external_cache_only:
- msg = [
- "WARNING: Repository is not writable: {}".format(
- repo_config.location
- ),
- " Using cache directory instead: {}".format(
- writable_location
- ),
- ]
- msg = "".join(line + "\n" for line in msg)
- writemsg_level(msg, level=logging.WARNING, noiselevel=-1)
+ if sign_problem:
+ writemsg_level(
+ "egencache: You may disable manifest "
+ "signatures with --sign-manifests=n or by setting "
+ '"sign-manifests = false" in metadata/layout.conf\n',
+ level=logging.ERROR,
+ noiselevel=-1,
+ )
+ return 1
- gen_index = GenPkgDescIndex(
- repo_config,
- portdb,
- os.path.join(writable_location, "metadata", "pkg_desc_index"),
- verbose=options.verbose,
- )
- gen_index.run()
- ret.append(gen_index.returncode)
+ gpg_vars = {}
+ if gpg_dir is not None:
+ gpg_vars["PORTAGE_GPG_DIR"] = gpg_dir
+ gpg_var_names = []
+ if options.gpg_key is None:
+ gpg_var_names.append("PORTAGE_GPG_KEY")
+ else:
+ gpg_vars["PORTAGE_GPG_KEY"] = options.gpg_key
- if options.update_use_local_desc:
- gen_desc = GenUseLocalDesc(
- portdb,
- output=options.uld_output,
- preserve_comments=options.preserve_comments,
- )
- gen_desc.run()
- ret.append(gen_desc.returncode)
+ for k in gpg_var_names:
+ v = settings.get(k)
+ if v is not None:
+ gpg_vars[k] = v
- if options.update_changelogs:
- gen_clogs = GenChangeLogs(
- portdb,
- changelog_output=options.changelog_output,
- changelog_reversed=options.changelog_reversed,
- max_jobs=options.jobs,
- max_load=options.load_average,
- )
- signum = gen_clogs.run()
- if signum is not None:
- sys.exit(128 + signum)
- ret.append(gen_clogs.returncode)
+ force_sign_key = gpg_vars.get("PORTAGE_GPG_KEY")
- if options.update_manifests:
+ ret = [os.EX_OK]
- cp_iter = None
- if atoms:
- cp_iter = iter(atoms)
+ if options.update:
+ cp_iter = None
+ if atoms:
+ cp_iter = iter(atoms)
- event_loop = global_event_loop()
- scheduler = ManifestScheduler(
- portdb,
- cp_iter=cp_iter,
- gpg_cmd=gpg_cmd,
- gpg_vars=gpg_vars,
- force_sign_key=force_sign_key,
- max_jobs=options.jobs,
- max_load=options.load_average,
- event_loop=event_loop,
- )
+ gen_cache = GenCache(
+ portdb,
+ cp_iter=cp_iter,
+ max_jobs=options.jobs,
+ max_load=options.load_average,
+ rsync=options.rsync,
+ external_cache_only=options.external_cache_only,
+ )
+ gen_cache.run()
+ if options.tolerant:
+ ret.append(os.EX_OK)
+ else:
+ ret.append(gen_cache.returncode)
- signum = run_main_scheduler(scheduler)
- if signum is not None:
- sys.exit(128 + signum)
+ if options.update_pkg_desc_index:
+ if not options.external_cache_only and repo_config.writable:
+ writable_location = repo_config.location
+ else:
+ writable_location = os.path.join(
+ portdb.depcachedir, repo_config.location.lstrip(os.sep)
+ )
+ if not options.external_cache_only:
+ msg = [
+ "WARNING: Repository is not writable: {}".format(
+ repo_config.location
+ ),
+ " Using cache directory instead: {}".format(
+ writable_location
+ ),
+ ]
+ msg = "".join(line + "\n" for line in msg)
+ writemsg_level(msg, level=logging.WARNING, noiselevel=-1)
+
+ gen_index = GenPkgDescIndex(
+ repo_config,
+ portdb,
+ os.path.join(writable_location, "metadata", "pkg_desc_index"),
+ verbose=options.verbose,
+ )
+ gen_index.run()
+ ret.append(gen_index.returncode)
+
+ if options.update_use_local_desc:
+ gen_desc = GenUseLocalDesc(
+ portdb,
+ output=options.uld_output,
+ preserve_comments=options.preserve_comments,
+ )
+ gen_desc.run()
+ ret.append(gen_desc.returncode)
+
+ if options.update_changelogs:
+ gen_clogs = GenChangeLogs(
+ portdb,
+ changelog_output=options.changelog_output,
+ changelog_reversed=options.changelog_reversed,
+ max_jobs=options.jobs,
+ max_load=options.load_average,
+ )
+ signum = gen_clogs.run()
+ if signum is not None:
+ sys.exit(128 + signum)
+ ret.append(gen_clogs.returncode)
+
+ if options.update_manifests:
+
+ cp_iter = None
+ if atoms:
+ cp_iter = iter(atoms)
+
+ event_loop = global_event_loop()
+ scheduler = ManifestScheduler(
+ portdb,
+ cp_iter=cp_iter,
+ gpg_cmd=gpg_cmd,
+ gpg_vars=gpg_vars,
+ force_sign_key=force_sign_key,
+ max_jobs=options.jobs,
+ max_load=options.load_average,
+ event_loop=event_loop,
+ )
- if options.tolerant:
- ret.append(os.EX_OK)
- else:
- ret.append(scheduler.returncode)
+ signum = run_main_scheduler(scheduler)
+ if signum is not None:
+ sys.exit(128 + signum)
- if options.write_timestamp:
- timestamp_path = os.path.join(repo_path, "metadata", "timestamp.chk")
- try:
- portage.util.write_atomic(
- timestamp_path, time.strftime("%s\n" % TIMESTAMP_FORMAT, time.gmtime())
- )
- except (OSError, portage.exception.PortageException):
- ret.append(os.EX_IOERR)
- else:
- ret.append(os.EX_OK)
+ if options.tolerant:
+ ret.append(os.EX_OK)
+ else:
+ ret.append(scheduler.returncode)
- return max(ret)
+ if options.write_timestamp:
+ timestamp_path = os.path.join(repo_path, "metadata", "timestamp.chk")
+ try:
+ portage.util.write_atomic(
+ timestamp_path,
+ time.strftime("%s\n" % TIMESTAMP_FORMAT, time.gmtime()),
+ )
+ except (OSError, portage.exception.PortageException):
+ ret.append(os.EX_IOERR)
+ else:
+ ret.append(os.EX_OK)
+ return max(ret)
-if __name__ == "__main__":
- portage._disable_legacy_globals()
- portage.util.noiselimit = -1
- try:
- sys.exit(egencache_main(sys.argv[1:]))
- finally:
- global_event_loop().close()
+ if __name__ == "__main__":
+ portage._disable_legacy_globals()
+ portage.util.noiselimit = -1
+ try:
+ sys.exit(egencache_main(sys.argv[1:]))
+ finally:
+ global_event_loop().close()
+
+except KeyboardInterrupt as e:
+ # Prevent traceback on ^C
+ signum = getattr(e, "signum", signal.SIGINT)
+ signal.signal(signum, signal.SIG_DFL)
+ raise_signal(signum)
diff --git a/bin/emaint b/bin/emaint
index 103dc2571..7fb38f5e3 100755
--- a/bin/emaint
+++ b/bin/emaint
@@ -5,46 +5,66 @@
"""System health checks and maintenance utilities.
"""
-import sys
-import errno
+import os
+import signal
+
+# For compatibility with Python < 3.8
+raise_signal = getattr(
+ signal, "raise_signal", lambda signum: os.kill(os.getpid(), signum)
+)
+
+# Inherit from KeyboardInterrupt to avoid a traceback from asyncio.
+class SignalInterrupt(KeyboardInterrupt):
+ def __init__(self, signum):
+ self.signum = signum
+
-# This block ensures that ^C interrupts are handled quietly.
try:
- import signal
- def exithandler(signum, _frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- sys.exit(128 + signum)
+ def signal_interrupt(signum, _frame):
+ raise SignalInterrupt(signum)
- signal.signal(signal.SIGINT, exithandler)
- signal.signal(signal.SIGTERM, exithandler)
+ def debug_signal(_signum, _frame):
+ import pdb
+
+ pdb.set_trace()
+
+ # Prevent "[Errno 32] Broken pipe" exceptions when writing to a pipe.
signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal_interrupt)
+ signal.signal(signal.SIGUSR1, debug_signal)
-except KeyboardInterrupt:
- sys.exit(1)
+ import sys
+ import errno
+ from os import path as osp
-from os import path as osp
+ if osp.isfile(
+ osp.join(
+ osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed"
+ )
+ ):
+ sys.path.insert(
+ 0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "lib")
+ )
+ import portage
-if osp.isfile(
- osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), ".portage_not_installed")
-):
- sys.path.insert(
- 0, osp.join(osp.dirname(osp.dirname(osp.realpath(__file__))), "lib")
- )
-import portage
+ portage._internal_caller = True
+ from portage.emaint.main import emaint_main
+ from portage.util._eventloop.global_event_loop import global_event_loop
-portage._internal_caller = True
-from portage.emaint.main import emaint_main
-from portage.util._eventloop.global_event_loop import global_event_loop
+ try:
+ emaint_main(sys.argv[1:])
+ except OSError as e:
+ if e.errno == errno.EACCES:
+ print("\nemaint: Need superuser access")
+ sys.exit(1)
+ else:
+ raise
+ finally:
+ global_event_loop().close()
-try:
- emaint_main(sys.argv[1:])
-except OSError as e:
- if e.errno == errno.EACCES:
- print("\nemaint: Need superuser access")
- sys.exit(1)
- else:
- raise
-finally:
- global_event_loop().close()
+except KeyboardInterrupt as e:
+ # Prevent traceback on ^C
+ signum = getattr(e, "signum", signal.SIGINT)
+ signal.signal(signum, signal.SIG_DFL)
+ raise_signal(signum)
diff --git a/bin/emerge b/bin/emerge
index d90a73c34..a16c5039a 100755
--- a/bin/emerge
+++ b/bin/emerge
@@ -2,31 +2,35 @@
# Copyright 2006-2022 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
+import os
import signal
import sys
-# This block ensures that ^C interrupts are handled quietly. We handle
-# KeyboardInterrupt instead of installing a SIGINT handler, since
-# exiting from signal handlers intermittently causes python to ignore
-# the SystemExit exception with a message like this:
-# Exception SystemExit: 130 in <function remove at 0x7fd2146c1320> ignored
+# For compatibility with Python < 3.8
+raise_signal = getattr(
+ signal, "raise_signal", lambda signum: os.kill(os.getpid(), signum)
+)
+
+# Inherit from KeyboardInterrupt to avoid a traceback from asyncio.
+class SignalInterrupt(KeyboardInterrupt):
+ def __init__(self, signum):
+ self.signum = signum
+
+
global_event_loop = None
try:
- def exithandler(signum, _frame):
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- sys.exit(128 + signum)
-
- signal.signal(signal.SIGTERM, exithandler)
- # Prevent "[Errno 32] Broken pipe" exceptions when
- # writing to a pipe.
- signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ def signal_interrupt(signum, _frame):
+ raise SignalInterrupt(signum)
def debug_signal(_signum, _frame):
import pdb
pdb.set_trace()
+ # Prevent "[Errno 32] Broken pipe" exceptions when writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal_interrupt)
signal.signal(signal.SIGUSR1, debug_signal)
from os import path as osp
@@ -79,10 +83,17 @@ try:
sys.exit(1)
sys.exit(retval)
-except KeyboardInterrupt:
- sys.stderr.write("\n\nExiting on signal {signal}\n".format(signal=signal.SIGINT))
+except KeyboardInterrupt as e:
+ # This block ensures that ^C interrupts are handled quietly. We handle
+ # KeyboardInterrupt instead of installing a SIGINT handler, since
+ # exiting from signal handlers intermittently causes python to ignore
+ # the SystemExit exception with a message like this:
+ # Exception SystemExit: 130 in <function remove at 0x7fd2146c1320> ignored
+ signum = getattr(e, "signum", signal.SIGINT)
+ signal.signal(signum, signal.SIG_DFL)
+ sys.stderr.write(f"\n\nExiting on signal {signum}\n")
sys.stderr.flush()
- sys.exit(128 + signal.SIGINT)
+ raise_signal(signum)
finally:
if global_event_loop is not None:
global_event_loop().close()
diff --git a/bin/portageq b/bin/portageq
index dca249a7b..20a2f6646 100755
--- a/bin/portageq
+++ b/bin/portageq
@@ -2,1644 +2,1584 @@
# Copyright 1999-2021 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
-import argparse
+import os
import signal
-import sys
-# This block ensures that ^C interrupts are handled quietly.
+# For compatibility with Python < 3.8
+raise_signal = getattr(
+ signal, "raise_signal", lambda signum: os.kill(os.getpid(), signum)
+)
+
+# Inherit from KeyboardInterrupt to avoid a traceback from asyncio.
+class SignalInterrupt(KeyboardInterrupt):
+ def __init__(self, signum):
+ self.signum = signum
+
+
try:
- def exithandler(signum, _frame):
- signal.signal(signal.SIGINT, signal.SIG_IGN)
- signal.signal(signal.SIGTERM, signal.SIG_IGN)
- sys.exit(128 + signum)
+ def signal_interrupt(signum, _frame):
+ raise SignalInterrupt(signum)
- signal.signal(signal.SIGINT, exithandler)
- signal.signal(signal.SIGTERM, exithandler)
+ def debug_signal(_signum, _frame):
+ import pdb
-except KeyboardInterrupt:
- sys.exit(128 + signal.SIGINT)
+ pdb.set_trace()
-import os
-import types
+ # Prevent "[Errno 32] Broken pipe" exceptions when writing to a pipe.
+ signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+ signal.signal(signal.SIGTERM, signal_interrupt)
+ signal.signal(signal.SIGUSR1, debug_signal)
-if os.path.isfile(
- os.path.join(
- os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
- ".portage_not_installed",
- )
-):
- pym_paths = [
+ import argparse
+ import sys
+ import types
+
+ if os.path.isfile(
os.path.join(
- os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "lib"
+ os.path.dirname(os.path.dirname(os.path.realpath(__file__))),
+ ".portage_not_installed",
)
- ]
- sys.path.insert(0, pym_paths[0])
-else:
- import sysconfig
-
- pym_paths = [
- os.path.join(sysconfig.get_path("purelib"), x) for x in ("_emerge", "portage")
- ]
-# Avoid sandbox violations after Python upgrade.
-if os.environ.get("SANDBOX_ON") == "1":
- sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
- for pym_path in pym_paths:
- if pym_path not in sandbox_write:
- sandbox_write.append(pym_path)
- os.environ["SANDBOX_WRITE"] = ":".join(filter(None, sandbox_write))
- del pym_path, sandbox_write
-del pym_paths
-
-import portage
-
-portage._internal_caller = True
-from portage import os
-from portage.eapi import eapi_has_repo_deps
-from portage.util import writemsg, writemsg_stdout
-
-portage.proxy.lazyimport.lazyimport(
- globals(),
- "re",
- "subprocess",
- "_emerge.Package:Package",
- "_emerge.RootConfig:RootConfig",
- "_emerge.is_valid_package_atom:insert_category_into_atom",
- "portage.dbapi._expand_new_virt:expand_new_virt",
- "portage._sets.base:InternalPackageSet",
- "portage.util._eventloop.global_event_loop:global_event_loop",
- "portage.xml.metadata:MetaDataXML",
-)
+ ):
+ pym_paths = [
+ os.path.join(
+ os.path.dirname(os.path.dirname(os.path.realpath(__file__))), "lib"
+ )
+ ]
+ sys.path.insert(0, pym_paths[0])
+ else:
+ import sysconfig
+
+ pym_paths = [
+ os.path.join(sysconfig.get_path("purelib"), x)
+ for x in ("_emerge", "portage")
+ ]
+ # Avoid sandbox violations after Python upgrade.
+ if os.environ.get("SANDBOX_ON") == "1":
+ sandbox_write = os.environ.get("SANDBOX_WRITE", "").split(":")
+ for pym_path in pym_paths:
+ if pym_path not in sandbox_write:
+ sandbox_write.append(pym_path)
+ os.environ["SANDBOX_WRITE"] = ":".join(filter(None, sandbox_write))
+ del pym_path, sandbox_write
+ del pym_paths
+
+ import portage
+
+ portage._internal_caller = True
+ from portage import os
+ from portage.eapi import eapi_has_repo_deps
+ from portage.util import writemsg, writemsg_stdout
+
+ portage.proxy.lazyimport.lazyimport(
+ globals(),
+ "re",
+ "subprocess",
+ "_emerge.Package:Package",
+ "_emerge.RootConfig:RootConfig",
+ "_emerge.is_valid_package_atom:insert_category_into_atom",
+ "portage.dbapi._expand_new_virt:expand_new_virt",
+ "portage._sets.base:InternalPackageSet",
+ "portage.util._eventloop.global_event_loop:global_event_loop",
+ "portage.xml.metadata:MetaDataXML",
+ )
+ def eval_atom_use(atom):
+ if "USE" in os.environ:
+ use = frozenset(os.environ["USE"].split())
+ atom = atom.evaluate_conditionals(use)
+ return atom
-def eval_atom_use(atom):
- if "USE" in os.environ:
- use = frozenset(os.environ["USE"].split())
- atom = atom.evaluate_conditionals(use)
- return atom
-
-
-def uses_configroot(function):
- function.uses_configroot = True
- return function
-
-
-def uses_eroot(function):
- function.uses_eroot = True
- return function
-
-
-# global to hold all function docstrings to be used for argparse help.
-# Avoids python compilation level 2 optimization troubles.
-docstrings = {}
-
-# -----------------------------------------------------------------------------
-#
-# To add functionality to this tool, add a function below.
-#
-# The format for functions is:
-#
-# def function(argv):
-# <code>
-#
-# docstrings['function'] = """<list of options for this function>
-# <description of the function>
-# """
-# function.__doc__ = docstrings['function']
-#
-# "argv" is an array of the command line parameters provided after the command.
-#
-# Make sure you document the function in the right format. The documentation
-# is used to display help on the function.
-#
-# You do not need to add the function to any lists, this tool is introspective,
-# and will automaticly add a command by the same name as the function!
-#
-
-
-@uses_eroot
-def has_version(argv):
- if len(argv) < 2:
- print("ERROR: insufficient parameters!")
- return 3
-
- warnings = []
-
- allow_repo = atom_validate_strict is False or eapi_has_repo_deps(eapi)
- try:
- atom = portage.dep.Atom(argv[1], allow_repo=allow_repo)
- except portage.exception.InvalidAtom:
- if atom_validate_strict:
- portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1], noiselevel=-1)
- return 2
- else:
- atom = argv[1]
- else:
- if atom_validate_strict:
- try:
- atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
- except portage.exception.InvalidAtom as e:
- warnings.append("QA Notice: {}: {}".format("has_version", e))
- atom = eval_atom_use(atom)
+ def uses_configroot(function):
+ function.uses_configroot = True
+ return function
- if warnings:
- elog("eqawarn", warnings)
+ def uses_eroot(function):
+ function.uses_eroot = True
+ return function
- try:
- mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
- if mylist:
- return 0
- else:
- return 1
- except KeyError:
- return 1
- except portage.exception.InvalidAtom:
- portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1], noiselevel=-1)
- return 2
+ # global to hold all function docstrings to be used for argparse help.
+ # Avoids python compilation level 2 optimization troubles.
+ docstrings = {}
+ # -----------------------------------------------------------------------------
+ #
+ # To add functionality to this tool, add a function below.
+ #
+ # The format for functions is:
+ #
+ # def function(argv):
+ # <code>
+ #
+ # docstrings['function'] = """<list of options for this function>
+ # <description of the function>
+ # """
+ # function.__doc__ = docstrings['function']
+ #
+ # "argv" is an array of the command line parameters provided after the command.
+ #
+ # Make sure you document the function in the right format. The documentation
+ # is used to display help on the function.
+ #
+ # You do not need to add the function to any lists, this tool is introspective,
+ # and will automaticly add a command by the same name as the function!
+ #
-docstrings[
- "has_version"
-] = """<eroot> <category/package>
- Return code 0 if it's available, 1 otherwise.
- """
-has_version.__doc__ = docstrings["has_version"]
+ @uses_eroot
+ def has_version(argv):
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!")
+ return 3
+ warnings = []
-@uses_eroot
-def best_version(argv):
- if len(argv) < 2:
- print("ERROR: insufficient parameters!")
- return 3
+ allow_repo = atom_validate_strict is False or eapi_has_repo_deps(eapi)
+ try:
+ atom = portage.dep.Atom(argv[1], allow_repo=allow_repo)
+ except portage.exception.InvalidAtom:
+ if atom_validate_strict:
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1], noiselevel=-1)
+ return 2
+ else:
+ atom = argv[1]
+ else:
+ if atom_validate_strict:
+ try:
+ atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
+ except portage.exception.InvalidAtom as e:
+ warnings.append("QA Notice: {}: {}".format("has_version", e))
+ atom = eval_atom_use(atom)
- warnings = []
+ if warnings:
+ elog("eqawarn", warnings)
- allow_repo = atom_validate_strict is False or eapi_has_repo_deps(eapi)
- try:
- atom = portage.dep.Atom(argv[1], allow_repo=allow_repo)
- except portage.exception.InvalidAtom:
- if atom_validate_strict:
+ try:
+ mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
+ if mylist:
+ return 0
+ else:
+ return 1
+ except KeyError:
+ return 1
+ except portage.exception.InvalidAtom:
portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1], noiselevel=-1)
return 2
- else:
- atom = argv[1]
- else:
- if atom_validate_strict:
- try:
- atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
- except portage.exception.InvalidAtom as e:
- warnings.append("QA Notice: {}: {}".format("best_version", e))
- atom = eval_atom_use(atom)
-
- if warnings:
- elog("eqawarn", warnings)
-
- try:
- mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
- print(portage.best(mylist))
- except KeyError:
- return 1
+ docstrings[
+ "has_version"
+ ] = """<eroot> <category/package>
+ Return code 0 if it's available, 1 otherwise.
+ """
+ has_version.__doc__ = docstrings["has_version"]
-docstrings[
- "best_version"
-] = """<eroot> <category/package>
- Returns highest installed matching category/package-version (without .ebuild).
- """
-best_version.__doc__ = docstrings["best_version"]
-
-
-@uses_eroot
-def mass_best_version(argv):
- if len(argv) < 2:
- print("ERROR: insufficient parameters!")
- return 2
- try:
- for pack in argv[1:]:
- mylist = portage.db[argv[0]]["vartree"].dbapi.match(pack)
- print("{}:{}".format(pack, portage.best(mylist)))
- except KeyError:
- return 1
+ @uses_eroot
+ def best_version(argv):
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!")
+ return 3
+
+ warnings = []
+ allow_repo = atom_validate_strict is False or eapi_has_repo_deps(eapi)
+ try:
+ atom = portage.dep.Atom(argv[1], allow_repo=allow_repo)
+ except portage.exception.InvalidAtom:
+ if atom_validate_strict:
+ portage.writemsg("ERROR: Invalid atom: '%s'\n" % argv[1], noiselevel=-1)
+ return 2
+ else:
+ atom = argv[1]
+ else:
+ if atom_validate_strict:
+ try:
+ atom = portage.dep.Atom(argv[1], allow_repo=allow_repo, eapi=eapi)
+ except portage.exception.InvalidAtom as e:
+ warnings.append("QA Notice: {}: {}".format("best_version", e))
+ atom = eval_atom_use(atom)
-docstrings[
- "mass_best_version"
-] = """<eroot> [<category/package>]+
- Returns category/package-version (without .ebuild).
- """
-mass_best_version.__doc__ = docstrings["mass_best_version"]
+ if warnings:
+ elog("eqawarn", warnings)
+ try:
+ mylist = portage.db[argv[0]]["vartree"].dbapi.match(atom)
+ print(portage.best(mylist))
+ except KeyError:
+ return 1
-@uses_eroot
-def metadata(argv):
- if len(argv) < 4:
- print("ERROR: insufficient parameters!", file=sys.stderr)
- return 2
+ docstrings[
+ "best_version"
+ ] = """<eroot> <category/package>
+ Returns highest installed matching category/package-version (without .ebuild).
+ """
+ best_version.__doc__ = docstrings["best_version"]
+
+ @uses_eroot
+ def mass_best_version(argv):
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!")
+ return 2
+ try:
+ for pack in argv[1:]:
+ mylist = portage.db[argv[0]]["vartree"].dbapi.match(pack)
+ print("{}:{}".format(pack, portage.best(mylist)))
+ except KeyError:
+ return 1
- eroot, pkgtype, pkgspec = argv[0:3]
- metakeys = argv[3:]
- type_map = {"ebuild": "porttree", "binary": "bintree", "installed": "vartree"}
- if pkgtype not in type_map:
- print("Unrecognized package type: '%s'" % pkgtype, file=sys.stderr)
- return 1
- trees = portage.db
- repo = portage.dep.dep_getrepo(pkgspec)
- pkgspec = portage.dep.remove_slot(pkgspec)
- try:
- values = trees[eroot][type_map[pkgtype]].dbapi.aux_get(
- pkgspec, metakeys, myrepo=repo
- )
- writemsg_stdout("".join("%s\n" % x for x in values), noiselevel=-1)
- except KeyError:
- print("Package not found: '%s'" % pkgspec, file=sys.stderr)
- return 1
+ docstrings[
+ "mass_best_version"
+ ] = """<eroot> [<category/package>]+
+ Returns category/package-version (without .ebuild).
+ """
+ mass_best_version.__doc__ = docstrings["mass_best_version"]
+
+ @uses_eroot
+ def metadata(argv):
+ if len(argv) < 4:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 2
+ eroot, pkgtype, pkgspec = argv[0:3]
+ metakeys = argv[3:]
+ type_map = {"ebuild": "porttree", "binary": "bintree", "installed": "vartree"}
+ if pkgtype not in type_map:
+ print("Unrecognized package type: '%s'" % pkgtype, file=sys.stderr)
+ return 1
+ trees = portage.db
+ repo = portage.dep.dep_getrepo(pkgspec)
+ pkgspec = portage.dep.remove_slot(pkgspec)
+ try:
+ values = trees[eroot][type_map[pkgtype]].dbapi.aux_get(
+ pkgspec, metakeys, myrepo=repo
+ )
+ writemsg_stdout("".join("%s\n" % x for x in values), noiselevel=-1)
+ except KeyError:
+ print("Package not found: '%s'" % pkgspec, file=sys.stderr)
+ return 1
-docstrings[
- "metadata"
-] = """
-<eroot> <pkgtype> <category/package> [<key>]+
-Returns metadata values for the specified package.
-Available keys: %s
-""" % ",".join(
- sorted(x for x in portage.auxdbkeys)
-)
-metadata.__doc__ = docstrings["metadata"]
+ docstrings[
+ "metadata"
+ ] = """
+ <eroot> <pkgtype> <category/package> [<key>]+
+ Returns metadata values for the specified package.
+ Available keys: %s
+ """ % ",".join(
+ sorted(x for x in portage.auxdbkeys)
+ )
+ metadata.__doc__ = docstrings["metadata"]
+ @uses_eroot
+ def contents(argv):
+ if len(argv) != 2:
+ print("ERROR: expected 2 parameters, got %d!" % len(argv))
+ return 2
-@uses_eroot
-def contents(argv):
- if len(argv) != 2:
- print("ERROR: expected 2 parameters, got %d!" % len(argv))
- return 2
+ root, cpv = argv
+ vartree = portage.db[root]["vartree"]
+ if not vartree.dbapi.cpv_exists(cpv):
+ sys.stderr.write("Package not found: '%s'\n" % cpv)
+ return 1
+ cat, pkg = portage.catsplit(cpv)
+ db = portage.dblink(
+ cat, pkg, root, vartree.settings, treetype="vartree", vartree=vartree
+ )
+ writemsg_stdout(
+ "".join("%s\n" % x for x in sorted(db.getcontents())), noiselevel=-1
+ )
- root, cpv = argv
- vartree = portage.db[root]["vartree"]
- if not vartree.dbapi.cpv_exists(cpv):
- sys.stderr.write("Package not found: '%s'\n" % cpv)
- return 1
- cat, pkg = portage.catsplit(cpv)
- db = portage.dblink(
- cat, pkg, root, vartree.settings, treetype="vartree", vartree=vartree
- )
- writemsg_stdout(
- "".join("%s\n" % x for x in sorted(db.getcontents())), noiselevel=-1
- )
+ docstrings[
+ "contents"
+ ] = """<eroot> <category/package>
+ List the files that are installed for a given package, with
+ one file listed on each line. All file names will begin with
+ <eroot>.
+ """
+ contents.__doc__ = docstrings["contents"]
+
+ @uses_eroot
+ def owners(argv):
+ if len(argv) < 2:
+ sys.stderr.write("ERROR: insufficient parameters!\n")
+ sys.stderr.flush()
+ return 2
+ eroot = argv[0]
+ vardb = portage.db[eroot]["vartree"].dbapi
+ root = portage.settings["ROOT"]
-docstrings[
- "contents"
-] = """<eroot> <category/package>
- List the files that are installed for a given package, with
- one file listed on each line. All file names will begin with
- <eroot>.
- """
-contents.__doc__ = docstrings["contents"]
-
-
-@uses_eroot
-def owners(argv):
- if len(argv) < 2:
- sys.stderr.write("ERROR: insufficient parameters!\n")
- sys.stderr.flush()
- return 2
-
- eroot = argv[0]
- vardb = portage.db[eroot]["vartree"].dbapi
- root = portage.settings["ROOT"]
-
- cwd = None
- try:
- cwd = os.getcwd()
- except OSError:
- pass
-
- files = []
- orphan_abs_paths = set()
- orphan_basenames = set()
- for f in argv[1:]:
- f = portage.normalize_path(f)
- is_basename = os.sep not in f
- if not is_basename and f[:1] != os.sep:
- if cwd is None:
- sys.stderr.write("ERROR: cwd does not exist!\n")
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
+
+ files = []
+ orphan_abs_paths = set()
+ orphan_basenames = set()
+ for f in argv[1:]:
+ f = portage.normalize_path(f)
+ is_basename = os.sep not in f
+ if not is_basename and f[:1] != os.sep:
+ if cwd is None:
+ sys.stderr.write("ERROR: cwd does not exist!\n")
+ sys.stderr.flush()
+ return 2
+ f = os.path.join(cwd, f)
+ f = portage.normalize_path(f)
+ if not is_basename and not f.startswith(eroot):
+ sys.stderr.write("ERROR: file paths must begin with <eroot>!\n")
sys.stderr.flush()
return 2
- f = os.path.join(cwd, f)
- f = portage.normalize_path(f)
- if not is_basename and not f.startswith(eroot):
- sys.stderr.write("ERROR: file paths must begin with <eroot>!\n")
- sys.stderr.flush()
- return 2
- if is_basename:
- files.append(f)
- orphan_basenames.add(f)
- else:
- files.append(f[len(root) - 1 :])
- orphan_abs_paths.add(f)
-
- owners = vardb._owners.get_owners(files)
-
- msg = []
- for pkg, owned_files in owners.items():
- cpv = pkg.mycpv
- msg.append("%s\n" % cpv)
- for f in sorted(owned_files):
- f_abs = os.path.join(root, f.lstrip(os.path.sep))
- msg.append("\t{}\n".format(f_abs))
- orphan_abs_paths.discard(f_abs)
- if orphan_basenames:
- orphan_basenames.discard(os.path.basename(f_abs))
-
- writemsg_stdout("".join(msg), noiselevel=-1)
-
- if orphan_abs_paths or orphan_basenames:
- orphans = []
- orphans.extend(orphan_abs_paths)
- orphans.extend(orphan_basenames)
- orphans.sort()
+ if is_basename:
+ files.append(f)
+ orphan_basenames.add(f)
+ else:
+ files.append(f[len(root) - 1 :])
+ orphan_abs_paths.add(f)
+
+ owners = vardb._owners.get_owners(files)
+
msg = []
- msg.append("None of the installed packages claim these files:\n")
- for f in orphans:
- msg.append("\t{}\n".format(f))
- sys.stderr.write("".join(msg))
- sys.stderr.flush()
+ for pkg, owned_files in owners.items():
+ cpv = pkg.mycpv
+ msg.append("%s\n" % cpv)
+ for f in sorted(owned_files):
+ f_abs = os.path.join(root, f.lstrip(os.path.sep))
+ msg.append("\t{}\n".format(f_abs))
+ orphan_abs_paths.discard(f_abs)
+ if orphan_basenames:
+ orphan_basenames.discard(os.path.basename(f_abs))
+
+ writemsg_stdout("".join(msg), noiselevel=-1)
+
+ if orphan_abs_paths or orphan_basenames:
+ orphans = []
+ orphans.extend(orphan_abs_paths)
+ orphans.extend(orphan_basenames)
+ orphans.sort()
+ msg = []
+ msg.append("None of the installed packages claim these files:\n")
+ for f in orphans:
+ msg.append("\t{}\n".format(f))
+ sys.stderr.write("".join(msg))
+ sys.stderr.flush()
- if owners:
- return 0
- return 1
-
-
-docstrings[
- "owners"
-] = """<eroot> [<filename>]+
- Given a list of files, print the packages that own the files and which
- files belong to each package. Files owned by a package are listed on
- the lines below it, indented by a single tab character (\\t). All file
- paths must either start with <eroot> or be a basename alone.
- Returns 1 if no owners could be found, and 0 otherwise.
- """
-owners.__doc__ = docstrings["owners"]
-
-
-@uses_eroot
-def is_protected(argv):
- if len(argv) != 2:
- sys.stderr.write("ERROR: expected 2 parameters, got %d!\n" % len(argv))
- sys.stderr.flush()
- return 2
-
- root, filename = argv
-
- err = sys.stderr
- cwd = None
- try:
- cwd = os.getcwd()
- except OSError:
- pass
-
- f = portage.normalize_path(filename)
- if not f.startswith(os.path.sep):
- if cwd is None:
- err.write("ERROR: cwd does not exist!\n")
- err.flush()
+ if owners:
+ return 0
+ return 1
+
+ docstrings[
+ "owners"
+ ] = """<eroot> [<filename>]+
+ Given a list of files, print the packages that own the files and which
+ files belong to each package. Files owned by a package are listed on
+ the lines below it, indented by a single tab character (\\t). All file
+ paths must either start with <eroot> or be a basename alone.
+ Returns 1 if no owners could be found, and 0 otherwise.
+ """
+ owners.__doc__ = docstrings["owners"]
+
+ @uses_eroot
+ def is_protected(argv):
+ if len(argv) != 2:
+ sys.stderr.write("ERROR: expected 2 parameters, got %d!\n" % len(argv))
+ sys.stderr.flush()
return 2
- f = os.path.join(cwd, f)
- f = portage.normalize_path(f)
-
- if not f.startswith(root):
- err.write("ERROR: file paths must begin with <eroot>!\n")
- err.flush()
- return 2
-
- from portage.util import ConfigProtect
-
- settings = portage.settings
- protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
- protect_mask = portage.util.shlex_split(settings.get("CONFIG_PROTECT_MASK", ""))
- protect_obj = ConfigProtect(
- root,
- protect,
- protect_mask,
- case_insensitive=("case-insensitive-fs" in settings.features),
- )
- if protect_obj.isprotected(f):
- return 0
- return 1
-
-
-docstrings[
- "is_protected"
-] = """<eroot> <filename>
- Given a single filename, return code 0 if it's protected, 1 otherwise.
- The filename must begin with <eroot>.
- """
-is_protected.__doc__ = docstrings["is_protected"]
-
-
-@uses_eroot
-def filter_protected(argv):
- if len(argv) != 1:
- sys.stderr.write("ERROR: expected 1 parameter, got %d!\n" % len(argv))
- sys.stderr.flush()
- return 2
-
- (root,) = argv
- out = sys.stdout
- err = sys.stderr
- cwd = None
- try:
- cwd = os.getcwd()
- except OSError:
- pass
-
- from portage.util import ConfigProtect
-
- settings = portage.settings
- protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
- protect_mask = portage.util.shlex_split(settings.get("CONFIG_PROTECT_MASK", ""))
- protect_obj = ConfigProtect(
- root,
- protect,
- protect_mask,
- case_insensitive=("case-insensitive-fs" in settings.features),
- )
- errors = 0
+ root, filename = argv
+
+ err = sys.stderr
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
- for line in sys.stdin:
- filename = line.rstrip("\n")
f = portage.normalize_path(filename)
if not f.startswith(os.path.sep):
if cwd is None:
err.write("ERROR: cwd does not exist!\n")
err.flush()
- errors += 1
- continue
+ return 2
f = os.path.join(cwd, f)
f = portage.normalize_path(f)
if not f.startswith(root):
err.write("ERROR: file paths must begin with <eroot>!\n")
err.flush()
- errors += 1
- continue
-
- if protect_obj.isprotected(f):
- out.write("%s\n" % filename)
- out.flush()
-
- if errors:
- return 2
+ return 2
- return 0
+ from portage.util import ConfigProtect
+ settings = portage.settings
+ protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
+ protect_mask = portage.util.shlex_split(settings.get("CONFIG_PROTECT_MASK", ""))
+ protect_obj = ConfigProtect(
+ root,
+ protect,
+ protect_mask,
+ case_insensitive=("case-insensitive-fs" in settings.features),
+ )
+ if protect_obj.isprotected(f):
+ return 0
+ return 1
-docstrings[
- "filter_protected"
-] = """<eroot>
- Read filenames from stdin and write them to stdout if they are protected.
- All filenames are delimited by \\n and must begin with <eroot>.
- """
-filter_protected.__doc__ = docstrings["filter_protected"]
+ docstrings[
+ "is_protected"
+ ] = """<eroot> <filename>
+ Given a single filename, return code 0 if it's protected, 1 otherwise.
+ The filename must begin with <eroot>.
+ """
+ is_protected.__doc__ = docstrings["is_protected"]
+
+ @uses_eroot
+ def filter_protected(argv):
+ if len(argv) != 1:
+ sys.stderr.write("ERROR: expected 1 parameter, got %d!\n" % len(argv))
+ sys.stderr.flush()
+ return 2
+ (root,) = argv
+ out = sys.stdout
+ err = sys.stderr
+ cwd = None
+ try:
+ cwd = os.getcwd()
+ except OSError:
+ pass
+
+ from portage.util import ConfigProtect
+
+ settings = portage.settings
+ protect = portage.util.shlex_split(settings.get("CONFIG_PROTECT", ""))
+ protect_mask = portage.util.shlex_split(settings.get("CONFIG_PROTECT_MASK", ""))
+ protect_obj = ConfigProtect(
+ root,
+ protect,
+ protect_mask,
+ case_insensitive=("case-insensitive-fs" in settings.features),
+ )
-@uses_eroot
-def best_visible(argv):
- if len(argv) < 2:
- writemsg("ERROR: insufficient parameters!\n", noiselevel=-1)
- return 2
+ errors = 0
- pkgtype = "ebuild"
- if len(argv) > 2:
- pkgtype = argv[1]
- atom = argv[2]
- else:
- atom = argv[1]
+ for line in sys.stdin:
+ filename = line.rstrip("\n")
+ f = portage.normalize_path(filename)
+ if not f.startswith(os.path.sep):
+ if cwd is None:
+ err.write("ERROR: cwd does not exist!\n")
+ err.flush()
+ errors += 1
+ continue
+ f = os.path.join(cwd, f)
+ f = portage.normalize_path(f)
- type_map = {"ebuild": "porttree", "binary": "bintree", "installed": "vartree"}
+ if not f.startswith(root):
+ err.write("ERROR: file paths must begin with <eroot>!\n")
+ err.flush()
+ errors += 1
+ continue
- if pkgtype not in type_map:
- writemsg("Unrecognized package type: '%s'\n" % pkgtype, noiselevel=-1)
- return 2
+ if protect_obj.isprotected(f):
+ out.write("%s\n" % filename)
+ out.flush()
- eroot = argv[0]
- db = portage.db[eroot][type_map[pkgtype]].dbapi
+ if errors:
+ return 2
- try:
- atom = portage.dep_expand(atom, mydb=db, settings=portage.settings)
- except portage.exception.InvalidAtom:
- writemsg("ERROR: Invalid atom: '%s'\n" % atom, noiselevel=-1)
- return 2
+ return 0
- root_config = RootConfig(portage.settings, portage.db[eroot], None)
+ docstrings[
+ "filter_protected"
+ ] = """<eroot>
+ Read filenames from stdin and write them to stdout if they are protected.
+ All filenames are delimited by \\n and must begin with <eroot>.
+ """
+ filter_protected.__doc__ = docstrings["filter_protected"]
+
+ @uses_eroot
+ def best_visible(argv):
+ if len(argv) < 2:
+ writemsg("ERROR: insufficient parameters!\n", noiselevel=-1)
+ return 2
- if hasattr(db, "xmatch"):
- cpv_list = db.xmatch("match-all-cpv-only", atom)
- else:
- cpv_list = db.match(atom)
-
- if cpv_list:
- # reversed, for descending order
- cpv_list.reverse()
- # verify match, since the atom may match the package
- # for a given cpv from one repo but not another, and
- # we can use match-all-cpv-only to avoid redundant
- # metadata access.
- atom_set = InternalPackageSet(initial_atoms=(atom,))
-
- if atom.repo is None and hasattr(db, "getRepositories"):
- repo_list = db.getRepositories()
+ pkgtype = "ebuild"
+ if len(argv) > 2:
+ pkgtype = argv[1]
+ atom = argv[2]
else:
- repo_list = [atom.repo]
-
- for cpv in cpv_list:
- for repo in repo_list:
- try:
- metadata = dict(
- zip(
- Package.metadata_keys,
- db.aux_get(cpv, Package.metadata_keys, myrepo=repo),
- )
- )
- except KeyError:
- continue
- pkg = Package(
- built=(pkgtype != "ebuild"),
- cpv=cpv,
- installed=(pkgtype == "installed"),
- metadata=metadata,
- root_config=root_config,
- type_name=pkgtype,
- )
- if not atom_set.findAtomForPackage(pkg):
- continue
-
- if pkg.visible:
- writemsg_stdout("{}\n".format(pkg.cpv), noiselevel=-1)
- return os.EX_OK
-
- # No package found, write out an empty line.
- writemsg_stdout("\n", noiselevel=-1)
-
- return 1
-
+ atom = argv[1]
-docstrings[
- "best_visible"
-] = """<eroot> [pkgtype] <atom>
- Returns category/package-version (without .ebuild).
- The pkgtype argument defaults to "ebuild" if unspecified,
- otherwise it must be one of ebuild, binary, or installed.
- """
-best_visible.__doc__ = docstrings["best_visible"]
+ type_map = {"ebuild": "porttree", "binary": "bintree", "installed": "vartree"}
+ if pkgtype not in type_map:
+ writemsg("Unrecognized package type: '%s'\n" % pkgtype, noiselevel=-1)
+ return 2
-@uses_eroot
-def mass_best_visible(argv):
- type_map = {"ebuild": "porttree", "binary": "bintree", "installed": "vartree"}
+ eroot = argv[0]
+ db = portage.db[eroot][type_map[pkgtype]].dbapi
- if len(argv) < 2:
- print("ERROR: insufficient parameters!")
- return 2
- try:
- root = argv.pop(0)
- pkgtype = "ebuild"
- if argv[0] in type_map:
- pkgtype = argv.pop(0)
- for pack in argv:
- writemsg_stdout("%s:" % pack, noiselevel=-1)
- best_visible([root, pkgtype, pack])
- except KeyError:
- return 1
+ try:
+ atom = portage.dep_expand(atom, mydb=db, settings=portage.settings)
+ except portage.exception.InvalidAtom:
+ writemsg("ERROR: Invalid atom: '%s'\n" % atom, noiselevel=-1)
+ return 2
+ root_config = RootConfig(portage.settings, portage.db[eroot], None)
-docstrings[
- "mass_best_visible"
-] = """<eroot> [<type>] [<category/package>]+
- Returns category/package-version (without .ebuild).
- The pkgtype argument defaults to "ebuild" if unspecified,
- otherwise it must be one of ebuild, binary, or installed.
- """
-mass_best_visible.__doc__ = docstrings["mass_best_visible"]
-
-
-@uses_eroot
-def all_best_visible(argv):
- if len(argv) < 1:
- sys.stderr.write("ERROR: insufficient parameters!\n")
- sys.stderr.flush()
- return 2
-
- # print portage.db[argv[0]]["porttree"].dbapi.cp_all()
- for pkg in portage.db[argv[0]]["porttree"].dbapi.cp_all():
- mybest = portage.best(portage.db[argv[0]]["porttree"].dbapi.match(pkg))
- if mybest:
- print(mybest)
-
-
-docstrings[
- "all_best_visible"
-] = """<eroot>
- Returns all best_visible packages (without .ebuild).
- """
-all_best_visible.__doc__ = docstrings["all_best_visible"]
-
-
-@uses_eroot
-def match(argv):
- if len(argv) != 2:
- print("ERROR: expected 2 parameters, got %d!" % len(argv))
- return 2
- root, atom = argv
- if not atom:
- atom = "*/*"
-
- vardb = portage.db[root]["vartree"].dbapi
- try:
- atom = portage.dep.Atom(atom, allow_wildcard=True, allow_repo=True)
- except portage.exception.InvalidAtom:
- # maybe it's valid but missing category
- atom = portage.dep_expand(atom, mydb=vardb, settings=vardb.settings)
-
- if atom.extended_syntax:
- if atom == "*/*":
- results = vardb.cpv_all()
+ if hasattr(db, "xmatch"):
+ cpv_list = db.xmatch("match-all-cpv-only", atom)
else:
- results = []
- require_metadata = atom.slot or atom.repo
- for cpv in vardb.cpv_all():
-
- if not portage.match_from_list(atom, [cpv]):
- continue
+ cpv_list = db.match(atom)
+
+ if cpv_list:
+ # reversed, for descending order
+ cpv_list.reverse()
+ # verify match, since the atom may match the package
+ # for a given cpv from one repo but not another, and
+ # we can use match-all-cpv-only to avoid redundant
+ # metadata access.
+ atom_set = InternalPackageSet(initial_atoms=(atom,))
+
+ if atom.repo is None and hasattr(db, "getRepositories"):
+ repo_list = db.getRepositories()
+ else:
+ repo_list = [atom.repo]
- if require_metadata:
+ for cpv in cpv_list:
+ for repo in repo_list:
try:
- cpv = vardb._pkg_str(cpv, atom.repo)
- except (KeyError, portage.exception.InvalidData):
+ metadata = dict(
+ zip(
+ Package.metadata_keys,
+ db.aux_get(cpv, Package.metadata_keys, myrepo=repo),
+ )
+ )
+ except KeyError:
continue
- if not portage.match_from_list(atom, [cpv]):
+ pkg = Package(
+ built=(pkgtype != "ebuild"),
+ cpv=cpv,
+ installed=(pkgtype == "installed"),
+ metadata=metadata,
+ root_config=root_config,
+ type_name=pkgtype,
+ )
+ if not atom_set.findAtomForPackage(pkg):
continue
- results.append(cpv)
-
- results.sort()
- else:
- results = vardb.match(atom)
- for cpv in results:
- print(cpv)
-
-
-docstrings[
- "match"
-] = """<eroot> <atom>
- Returns a \\n separated list of category/package-version.
- When given an empty string, all installed packages will
- be listed.
- """
-match.__doc__ = docstrings["match"]
-
-
-@uses_eroot
-def expand_virtual(argv):
- if len(argv) != 2:
- writemsg("ERROR: expected 2 parameters, got %d!\n" % len(argv), noiselevel=-1)
- return 2
-
- root, atom = argv
-
- try:
- results = list(expand_new_virt(portage.db[root]["vartree"].dbapi, atom))
- except portage.exception.InvalidAtom:
- writemsg("ERROR: Invalid atom: '%s'\n" % atom, noiselevel=-1)
- return 2
-
- results.sort()
- for x in results:
- if not x.blocker:
- writemsg_stdout("{}\n".format(x))
-
- return os.EX_OK
-
-
-docstrings[
- "expand_virtual"
-] = """<eroot> <atom>
- Returns a \\n separated list of atoms expanded from a
- given virtual atom (GLEP 37 virtuals only),
- excluding blocker atoms. Satisfied
- virtual atoms are not included in the output, since
- they are expanded to real atoms which are displayed.
- Unsatisfied virtual atoms are displayed without
- any expansion. The "match" command can be used to
- resolve the returned atoms to specific installed
- packages.
- """
-expand_virtual.__doc__ = docstrings["expand_virtual"]
-
-
-def vdb_path(_argv):
- out = sys.stdout
- out.write(os.path.join(portage.settings["EROOT"], portage.VDB_PATH) + "\n")
- out.flush()
- return os.EX_OK
-
-
-docstrings[
- "vdb_path"
-] = """
- Returns the path used for the var(installed) package database for the
- set environment/configuration options.
- """
-vdb_path.__doc__ = docstrings["vdb_path"]
-
-
-def gentoo_mirrors(_argv):
- print(portage.settings["GENTOO_MIRRORS"])
-
-
-docstrings[
- "gentoo_mirrors"
-] = """
- Returns the mirrors set to use in the portage configuration.
- """
-gentoo_mirrors.__doc__ = docstrings["gentoo_mirrors"]
-
-
-@uses_configroot
-@uses_eroot
-def repositories_configuration(argv):
- if len(argv) < 1:
- print("ERROR: insufficient parameters!", file=sys.stderr)
- return 3
- sys.stdout.write(
- portage.db[argv[0]]["vartree"].settings.repositories.config_string()
- )
- sys.stdout.flush()
-
-
-docstrings[
- "repositories_configuration"
-] = """<eroot>
- Returns the configuration of repositories.
- """
-repositories_configuration.__doc__ = docstrings["repositories_configuration"]
-
-
-@uses_configroot
-@uses_eroot
-def repos_config(argv):
- return repositories_configuration(argv)
-
-
-docstrings[
- "repos_config"
-] = """
- <eroot>
- This is an alias for the repositories_configuration command.
- """
-repos_config.__doc__ = docstrings["repos_config"]
-
-
-def portdir(_argv):
- print(
- "WARNING: 'portageq portdir' is deprecated. Use the get_repo_path "
- "command instead. eg: "
- "'portageq get_repo_path / gentoo' instead.",
- file=sys.stderr,
- )
- print(portage.settings["PORTDIR"])
-
+ if pkg.visible:
+ writemsg_stdout("{}\n".format(pkg.cpv), noiselevel=-1)
+ return os.EX_OK
-docstrings[
- "portdir"
-] = """
- Returns the PORTDIR path.
- Deprecated in favor of get_repo_path command.
- """
-portdir.__doc__ = docstrings["portdir"]
+ # No package found, write out an empty line.
+ writemsg_stdout("\n", noiselevel=-1)
+ return 1
-def config_protect(_argv):
- print(portage.settings["CONFIG_PROTECT"])
-
-
-docstrings[
- "config_protect"
-] = """
- Returns the CONFIG_PROTECT paths.
- """
-config_protect.__doc__ = docstrings["config_protect"]
-
-
-def config_protect_mask(_argv):
- print(portage.settings["CONFIG_PROTECT_MASK"])
-
-
-docstrings[
- "config_protect_mask"
-] = """
- Returns the CONFIG_PROTECT_MASK paths.
- """
-config_protect_mask.__doc__ = docstrings["config_protect_mask"]
-
-
-def portdir_overlay(_argv):
- print(
- "WARNING: 'portageq portdir_overlay' is deprecated. Use the get_repos"
- " and get_repo_path commands or the repos_config command instead. eg: "
- "'portageq repos_config /'",
- file=sys.stderr,
- )
- print(portage.settings["PORTDIR_OVERLAY"])
-
-
-docstrings[
- "portdir_overlay"
-] = """
- Returns the PORTDIR_OVERLAY path.
- Deprecated in favor of get_repos & get_repo_path or repos_config commands.
- """
-portdir_overlay.__doc__ = docstrings["portdir_overlay"]
-
-
-def pkgdir(_argv):
- print(portage.settings["PKGDIR"])
-
-
-docstrings[
- "pkgdir"
-] = """
- Returns the PKGDIR path.
- """
-pkgdir.__doc__ = docstrings["pkgdir"]
-
+ docstrings[
+ "best_visible"
+ ] = """<eroot> [pkgtype] <atom>
+ Returns category/package-version (without .ebuild).
+ The pkgtype argument defaults to "ebuild" if unspecified,
+ otherwise it must be one of ebuild, binary, or installed.
+ """
+ best_visible.__doc__ = docstrings["best_visible"]
+
+ @uses_eroot
+ def mass_best_visible(argv):
+ type_map = {"ebuild": "porttree", "binary": "bintree", "installed": "vartree"}
+
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!")
+ return 2
+ try:
+ root = argv.pop(0)
+ pkgtype = "ebuild"
+ if argv[0] in type_map:
+ pkgtype = argv.pop(0)
+ for pack in argv:
+ writemsg_stdout("%s:" % pack, noiselevel=-1)
+ best_visible([root, pkgtype, pack])
+ except KeyError:
+ return 1
-def distdir(_argv):
- print(portage.settings["DISTDIR"])
+ docstrings[
+ "mass_best_visible"
+ ] = """<eroot> [<type>] [<category/package>]+
+ Returns category/package-version (without .ebuild).
+ The pkgtype argument defaults to "ebuild" if unspecified,
+ otherwise it must be one of ebuild, binary, or installed.
+ """
+ mass_best_visible.__doc__ = docstrings["mass_best_visible"]
+
+ @uses_eroot
+ def all_best_visible(argv):
+ if len(argv) < 1:
+ sys.stderr.write("ERROR: insufficient parameters!\n")
+ sys.stderr.flush()
+ return 2
+ # print portage.db[argv[0]]["porttree"].dbapi.cp_all()
+ for pkg in portage.db[argv[0]]["porttree"].dbapi.cp_all():
+ mybest = portage.best(portage.db[argv[0]]["porttree"].dbapi.match(pkg))
+ if mybest:
+ print(mybest)
+
+ docstrings[
+ "all_best_visible"
+ ] = """<eroot>
+ Returns all best_visible packages (without .ebuild).
+ """
+ all_best_visible.__doc__ = docstrings["all_best_visible"]
+
+ @uses_eroot
+ def match(argv):
+ if len(argv) != 2:
+ print("ERROR: expected 2 parameters, got %d!" % len(argv))
+ return 2
+ root, atom = argv
+ if not atom:
+ atom = "*/*"
-docstrings[
- "distdir"
-] = """
- Returns the DISTDIR path.
- """
-distdir.__doc__ = docstrings["distdir"]
+ vardb = portage.db[root]["vartree"].dbapi
+ try:
+ atom = portage.dep.Atom(atom, allow_wildcard=True, allow_repo=True)
+ except portage.exception.InvalidAtom:
+ # maybe it's valid but missing category
+ atom = portage.dep_expand(atom, mydb=vardb, settings=vardb.settings)
+ if atom.extended_syntax:
+ if atom == "*/*":
+ results = vardb.cpv_all()
+ else:
+ results = []
+ require_metadata = atom.slot or atom.repo
+ for cpv in vardb.cpv_all():
-def colormap(_argv):
- print(portage.output.colormap())
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+ if require_metadata:
+ try:
+ cpv = vardb._pkg_str(cpv, atom.repo)
+ except (KeyError, portage.exception.InvalidData):
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
-docstrings[
- "colormap"
-] = """
- Display the color.map as environment variables.
- """
-colormap.__doc__ = docstrings["colormap"]
+ results.append(cpv)
+ results.sort()
+ else:
+ results = vardb.match(atom)
+ for cpv in results:
+ print(cpv)
+
+ docstrings[
+ "match"
+ ] = """<eroot> <atom>
+ Returns a \\n separated list of category/package-version.
+ When given an empty string, all installed packages will
+ be listed.
+ """
+ match.__doc__ = docstrings["match"]
+
+ @uses_eroot
+ def expand_virtual(argv):
+ if len(argv) != 2:
+ writemsg(
+ "ERROR: expected 2 parameters, got %d!\n" % len(argv), noiselevel=-1
+ )
+ return 2
-def envvar(argv):
- verbose = "-v" in argv
- if verbose:
- argv.pop(argv.index("-v"))
+ root, atom = argv
- if len(argv) == 0:
- print("ERROR: insufficient parameters!")
- return 2
+ try:
+ results = list(expand_new_virt(portage.db[root]["vartree"].dbapi, atom))
+ except portage.exception.InvalidAtom:
+ writemsg("ERROR: Invalid atom: '%s'\n" % atom, noiselevel=-1)
+ return 2
- exit_status = 0
+ results.sort()
+ for x in results:
+ if not x.blocker:
+ writemsg_stdout("{}\n".format(x))
- for arg in argv:
- if arg in ("PORTDIR", "PORTDIR_OVERLAY", "SYNC"):
- print(
- "WARNING: 'portageq envvar %s' is deprecated. Use any of "
- "'get_repos, get_repo_path, repos_config' instead." % arg,
- file=sys.stderr,
- )
+ return os.EX_OK
- value = portage.settings.get(arg)
- if value is None:
- value = ""
- exit_status = 1
+ docstrings[
+ "expand_virtual"
+ ] = """<eroot> <atom>
+ Returns a \\n separated list of atoms expanded from a
+ given virtual atom (GLEP 37 virtuals only),
+ excluding blocker atoms. Satisfied
+ virtual atoms are not included in the output, since
+ they are expanded to real atoms which are displayed.
+ Unsatisfied virtual atoms are displayed without
+ any expansion. The "match" command can be used to
+ resolve the returned atoms to specific installed
+ packages.
+ """
+ expand_virtual.__doc__ = docstrings["expand_virtual"]
+
+ def vdb_path(_argv):
+ out = sys.stdout
+ out.write(os.path.join(portage.settings["EROOT"], portage.VDB_PATH) + "\n")
+ out.flush()
+ return os.EX_OK
+ docstrings[
+ "vdb_path"
+ ] = """
+ Returns the path used for the var(installed) package database for the
+ set environment/configuration options.
+ """
+ vdb_path.__doc__ = docstrings["vdb_path"]
+
+ def gentoo_mirrors(_argv):
+ print(portage.settings["GENTOO_MIRRORS"])
+
+ docstrings[
+ "gentoo_mirrors"
+ ] = """
+ Returns the mirrors set to use in the portage configuration.
+ """
+ gentoo_mirrors.__doc__ = docstrings["gentoo_mirrors"]
+
+ @uses_configroot
+ @uses_eroot
+ def repositories_configuration(argv):
+ if len(argv) < 1:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ sys.stdout.write(
+ portage.db[argv[0]]["vartree"].settings.repositories.config_string()
+ )
+ sys.stdout.flush()
+
+ docstrings[
+ "repositories_configuration"
+ ] = """<eroot>
+ Returns the configuration of repositories.
+ """
+ repositories_configuration.__doc__ = docstrings["repositories_configuration"]
+
+ @uses_configroot
+ @uses_eroot
+ def repos_config(argv):
+ return repositories_configuration(argv)
+
+ docstrings[
+ "repos_config"
+ ] = """
+ <eroot>
+ This is an alias for the repositories_configuration command.
+ """
+ repos_config.__doc__ = docstrings["repos_config"]
+
+ def portdir(_argv):
+ print(
+ "WARNING: 'portageq portdir' is deprecated. Use the get_repo_path "
+ "command instead. eg: "
+ "'portageq get_repo_path / gentoo' instead.",
+ file=sys.stderr,
+ )
+ print(portage.settings["PORTDIR"])
+
+ docstrings[
+ "portdir"
+ ] = """
+ Returns the PORTDIR path.
+ Deprecated in favor of get_repo_path command.
+ """
+ portdir.__doc__ = docstrings["portdir"]
+
+ def config_protect(_argv):
+ print(portage.settings["CONFIG_PROTECT"])
+
+ docstrings[
+ "config_protect"
+ ] = """
+ Returns the CONFIG_PROTECT paths.
+ """
+ config_protect.__doc__ = docstrings["config_protect"]
+
+ def config_protect_mask(_argv):
+ print(portage.settings["CONFIG_PROTECT_MASK"])
+
+ docstrings[
+ "config_protect_mask"
+ ] = """
+ Returns the CONFIG_PROTECT_MASK paths.
+ """
+ config_protect_mask.__doc__ = docstrings["config_protect_mask"]
+
+ def portdir_overlay(_argv):
+ print(
+ "WARNING: 'portageq portdir_overlay' is deprecated. Use the get_repos"
+ " and get_repo_path commands or the repos_config command instead. eg: "
+ "'portageq repos_config /'",
+ file=sys.stderr,
+ )
+ print(portage.settings["PORTDIR_OVERLAY"])
+
+ docstrings[
+ "portdir_overlay"
+ ] = """
+ Returns the PORTDIR_OVERLAY path.
+ Deprecated in favor of get_repos & get_repo_path or repos_config commands.
+ """
+ portdir_overlay.__doc__ = docstrings["portdir_overlay"]
+
+ def pkgdir(_argv):
+ print(portage.settings["PKGDIR"])
+
+ docstrings[
+ "pkgdir"
+ ] = """
+ Returns the PKGDIR path.
+ """
+ pkgdir.__doc__ = docstrings["pkgdir"]
+
+ def distdir(_argv):
+ print(portage.settings["DISTDIR"])
+
+ docstrings[
+ "distdir"
+ ] = """
+ Returns the DISTDIR path.
+ """
+ distdir.__doc__ = docstrings["distdir"]
+
+ def colormap(_argv):
+ print(portage.output.colormap())
+
+ docstrings[
+ "colormap"
+ ] = """
+ Display the color.map as environment variables.
+ """
+ colormap.__doc__ = docstrings["colormap"]
+
+ def envvar(argv):
+ verbose = "-v" in argv
if verbose:
- print(arg + "=" + portage._shell_quote(value))
- else:
- print(value)
+ argv.pop(argv.index("-v"))
- return exit_status
+ if len(argv) == 0:
+ print("ERROR: insufficient parameters!")
+ return 2
+ exit_status = 0
-docstrings[
- "envvar"
-] = """<variable>+
- Returns a specific environment variable as exists prior to ebuild.sh.
- Similar to: emerge --verbose --info | grep -E '^<variable>='
- """
-envvar.__doc__ = docstrings["envvar"]
+ for arg in argv:
+ if arg in ("PORTDIR", "PORTDIR_OVERLAY", "SYNC"):
+ print(
+ "WARNING: 'portageq envvar %s' is deprecated. Use any of "
+ "'get_repos, get_repo_path, repos_config' instead." % arg,
+ file=sys.stderr,
+ )
+ value = portage.settings.get(arg)
+ if value is None:
+ value = ""
+ exit_status = 1
-@uses_configroot
-@uses_eroot
-def get_repos(argv):
- if len(argv) < 1:
- print("ERROR: insufficient parameters!")
- return 2
- print(
- " ".join(
- reversed(portage.db[argv[0]]["vartree"].settings.repositories.prepos_order)
+ if verbose:
+ print(arg + "=" + portage._shell_quote(value))
+ else:
+ print(value)
+
+ return exit_status
+
+ docstrings[
+ "envvar"
+ ] = """<variable>+
+ Returns a specific environment variable as exists prior to ebuild.sh.
+ Similar to: emerge --verbose --info | grep -E '^<variable>='
+ """
+ envvar.__doc__ = docstrings["envvar"]
+
+ @uses_configroot
+ @uses_eroot
+ def get_repos(argv):
+ if len(argv) < 1:
+ print("ERROR: insufficient parameters!")
+ return 2
+ print(
+ " ".join(
+ reversed(
+ portage.db[argv[0]]["vartree"].settings.repositories.prepos_order
+ )
+ )
)
- )
-
-docstrings[
- "get_repos"
-] = """<eroot>
- Returns all repos with names (repo_name file) argv[0] = ${EROOT}
- """
-get_repos.__doc__ = docstrings["get_repos"]
-
-
-@uses_configroot
-@uses_eroot
-def master_repositories(argv):
- if len(argv) < 2:
- print("ERROR: insufficient parameters!", file=sys.stderr)
- return 3
- for arg in argv[1:]:
- if portage.dep._repo_name_re.match(arg) is None:
- print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ docstrings[
+ "get_repos"
+ ] = """<eroot>
+ Returns all repos with names (repo_name file) argv[0] = ${EROOT}
+ """
+ get_repos.__doc__ = docstrings["get_repos"]
+
+ @uses_configroot
+ @uses_eroot
+ def master_repositories(argv):
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ for arg in argv[1:]:
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[arg]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ print(" ".join(x.name for x in repo.masters))
+
+ docstrings[
+ "master_repositories"
+ ] = """<eroot> <repo_id>+
+ Returns space-separated list of master repositories for specified repository.
+ """
+ master_repositories.__doc__ = docstrings["master_repositories"]
+
+ @uses_configroot
+ @uses_eroot
+ def master_repos(argv):
+ return master_repositories(argv)
+
+ docstrings[
+ "master_repos"
+ ] = """<eroot> <repo_id>+
+ This is an alias for the master_repositories command.
+ """
+ master_repos.__doc__ = docstrings["master_repos"]
+
+ @uses_configroot
+ @uses_eroot
+ def get_repo_path(argv):
+
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ for arg in argv[1:]:
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ path = portage.db[argv[0]]["vartree"].settings.repositories.treemap.get(arg)
+ if path is None:
+ print("")
+ return 1
+ print(path)
+
+ docstrings[
+ "get_repo_path"
+ ] = """<eroot> <repo_id>+
+ Returns the path to the repo named argv[1], argv[0] = ${EROOT}
+ """
+ get_repo_path.__doc__ = docstrings["get_repo_path"]
+
+ @uses_eroot
+ def available_eclasses(argv):
+ if len(argv) < 2:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ for arg in argv[1:]:
+ if portage.dep._repo_name_re.match(arg) is None:
+ print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ return 2
+ try:
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[arg]
+ except KeyError:
+ print("")
+ return 1
+ else:
+ print(" ".join(sorted(repo.eclass_db.eclasses)))
+
+ docstrings[
+ "available_eclasses"
+ ] = """<eroot> <repo_id>+
+ Returns space-separated list of available eclasses for specified repository.
+ """
+ available_eclasses.__doc__ = docstrings["available_eclasses"]
+
+ @uses_eroot
+ def eclass_path(argv):
+ if len(argv) < 3:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ if portage.dep._repo_name_re.match(argv[1]) is None:
+ print("ERROR: invalid repository: %s" % argv[1], file=sys.stderr)
return 2
try:
- repo = portage.db[argv[0]]["vartree"].settings.repositories[arg]
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[argv[1]]
except KeyError:
print("")
return 1
else:
- print(" ".join(x.name for x in repo.masters))
-
-
-docstrings[
- "master_repositories"
-] = """<eroot> <repo_id>+
- Returns space-separated list of master repositories for specified repository.
- """
-master_repositories.__doc__ = docstrings["master_repositories"]
-
-
-@uses_configroot
-@uses_eroot
-def master_repos(argv):
- return master_repositories(argv)
-
-
-docstrings[
- "master_repos"
-] = """<eroot> <repo_id>+
- This is an alias for the master_repositories command.
- """
-master_repos.__doc__ = docstrings["master_repos"]
-
-
-@uses_configroot
-@uses_eroot
-def get_repo_path(argv):
-
- if len(argv) < 2:
- print("ERROR: insufficient parameters!", file=sys.stderr)
- return 3
- for arg in argv[1:]:
- if portage.dep._repo_name_re.match(arg) is None:
- print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
- return 2
- path = portage.db[argv[0]]["vartree"].settings.repositories.treemap.get(arg)
- if path is None:
- print("")
- return 1
- print(path)
-
-
-docstrings[
- "get_repo_path"
-] = """<eroot> <repo_id>+
- Returns the path to the repo named argv[1], argv[0] = ${EROOT}
- """
-get_repo_path.__doc__ = docstrings["get_repo_path"]
-
-
-@uses_eroot
-def available_eclasses(argv):
- if len(argv) < 2:
- print("ERROR: insufficient parameters!", file=sys.stderr)
- return 3
- for arg in argv[1:]:
- if portage.dep._repo_name_re.match(arg) is None:
- print("ERROR: invalid repository: %s" % arg, file=sys.stderr)
+ retval = 0
+ for arg in argv[2:]:
+ try:
+ eclass = repo.eclass_db.eclasses[arg]
+ except KeyError:
+ print("")
+ retval = 1
+ else:
+ print(eclass.location)
+ return retval
+
+ docstrings[
+ "eclass_path"
+ ] = """<eroot> <repo_id> <eclass>+
+ Returns the path to specified eclass for specified repository.
+ """
+ eclass_path.__doc__ = docstrings["eclass_path"]
+
+ @uses_eroot
+ def license_path(argv):
+ if len(argv) < 3:
+ print("ERROR: insufficient parameters!", file=sys.stderr)
+ return 3
+ if portage.dep._repo_name_re.match(argv[1]) is None:
+ print("ERROR: invalid repository: %s" % argv[1], file=sys.stderr)
return 2
try:
- repo = portage.db[argv[0]]["vartree"].settings.repositories[arg]
+ repo = portage.db[argv[0]]["vartree"].settings.repositories[argv[1]]
except KeyError:
print("")
return 1
else:
- print(" ".join(sorted(repo.eclass_db.eclasses)))
-
-
-docstrings[
- "available_eclasses"
-] = """<eroot> <repo_id>+
- Returns space-separated list of available eclasses for specified repository.
- """
-available_eclasses.__doc__ = docstrings["available_eclasses"]
-
-
-@uses_eroot
-def eclass_path(argv):
- if len(argv) < 3:
- print("ERROR: insufficient parameters!", file=sys.stderr)
- return 3
- if portage.dep._repo_name_re.match(argv[1]) is None:
- print("ERROR: invalid repository: %s" % argv[1], file=sys.stderr)
- return 2
- try:
- repo = portage.db[argv[0]]["vartree"].settings.repositories[argv[1]]
- except KeyError:
- print("")
- return 1
- else:
- retval = 0
- for arg in argv[2:]:
- try:
- eclass = repo.eclass_db.eclasses[arg]
- except KeyError:
- print("")
- retval = 1
- else:
- print(eclass.location)
- return retval
-
-
-docstrings[
- "eclass_path"
-] = """<eroot> <repo_id> <eclass>+
- Returns the path to specified eclass for specified repository.
- """
-eclass_path.__doc__ = docstrings["eclass_path"]
-
-
-@uses_eroot
-def license_path(argv):
- if len(argv) < 3:
- print("ERROR: insufficient parameters!", file=sys.stderr)
- return 3
- if portage.dep._repo_name_re.match(argv[1]) is None:
- print("ERROR: invalid repository: %s" % argv[1], file=sys.stderr)
- return 2
- try:
- repo = portage.db[argv[0]]["vartree"].settings.repositories[argv[1]]
- except KeyError:
- print("")
- return 1
- else:
- retval = 0
- for arg in argv[2:]:
- eclass_path = ""
- paths = reversed(
- [
- os.path.join(x.location, "licenses", arg)
- for x in list(repo.masters) + [repo]
- ]
- )
- for path in paths:
- if os.path.exists(path):
- eclass_path = path
+ retval = 0
+ for arg in argv[2:]:
+ eclass_path = ""
+ paths = reversed(
+ [
+ os.path.join(x.location, "licenses", arg)
+ for x in list(repo.masters) + [repo]
+ ]
+ )
+ for path in paths:
+ if os.path.exists(path):
+ eclass_path = path
+ break
+ if eclass_path == "":
+ retval = 1
+ print(eclass_path)
+ return retval
+
+ docstrings[
+ "license_path"
+ ] = """<eroot> <repo_id> <license>+
+ Returns the path to specified license for specified repository.
+ """
+ license_path.__doc__ = docstrings["license_path"]
+
+ @uses_eroot
+ def list_preserved_libs(argv):
+ if len(argv) != 1:
+ print("ERROR: wrong number of arguments")
+ return 2
+ mylibs = portage.db[argv[0]]["vartree"].dbapi._plib_registry.getPreservedLibs()
+ rValue = 1
+ msg = []
+ for cpv in sorted(mylibs):
+ msg.append(cpv)
+ for path in mylibs[cpv]:
+ msg.append(" " + path)
+ rValue = 0
+ msg.append("\n")
+ writemsg_stdout("".join(msg), noiselevel=-1)
+ return rValue
+
+ docstrings[
+ "list_preserved_libs"
+ ] = """<eroot>
+ Print a list of libraries preserved during a package update in the form
+ package: path. Returns 1 if no preserved libraries could be found,
+ 0 otherwise.
+ """
+ list_preserved_libs.__doc__ = docstrings["list_preserved_libs"]
+
+ class MaintainerEmailMatcher:
+ def __init__(self, maintainer_emails):
+ self._re = re.compile("^(%s)$" % "|".join(maintainer_emails), re.I)
+
+ def __call__(self, metadata_xml):
+ match = False
+ matcher = self._re.match
+ for x in metadata_xml.maintainers():
+ if x.email is not None and matcher(x.email) is not None:
+ match = True
break
- if eclass_path == "":
- retval = 1
- print(eclass_path)
- return retval
-
-
-docstrings[
- "license_path"
-] = """<eroot> <repo_id> <license>+
- Returns the path to specified license for specified repository.
- """
-license_path.__doc__ = docstrings["license_path"]
-
-
-@uses_eroot
-def list_preserved_libs(argv):
- if len(argv) != 1:
- print("ERROR: wrong number of arguments")
- return 2
- mylibs = portage.db[argv[0]]["vartree"].dbapi._plib_registry.getPreservedLibs()
- rValue = 1
- msg = []
- for cpv in sorted(mylibs):
- msg.append(cpv)
- for path in mylibs[cpv]:
- msg.append(" " + path)
- rValue = 0
- msg.append("\n")
- writemsg_stdout("".join(msg), noiselevel=-1)
- return rValue
-
-
-docstrings[
- "list_preserved_libs"
-] = """<eroot>
- Print a list of libraries preserved during a package update in the form
- package: path. Returns 1 if no preserved libraries could be found,
- 0 otherwise.
- """
-list_preserved_libs.__doc__ = docstrings["list_preserved_libs"]
-
-
-class MaintainerEmailMatcher:
- def __init__(self, maintainer_emails):
- self._re = re.compile("^(%s)$" % "|".join(maintainer_emails), re.I)
-
- def __call__(self, metadata_xml):
- match = False
- matcher = self._re.match
- for x in metadata_xml.maintainers():
- if x.email is not None and matcher(x.email) is not None:
- match = True
- break
- return match
-
-
-# Match if metadata.xml contains no maintainer (orphaned package)
-def match_orphaned(metadata_xml):
- if not metadata_xml.maintainers():
- return True
- else:
- return False
+ return match
+ # Match if metadata.xml contains no maintainer (orphaned package)
+ def match_orphaned(metadata_xml):
+ if not metadata_xml.maintainers():
+ return True
+ else:
+ return False
-def pquery(parser, opts, args):
- portdb = portage.db[portage.root]["porttree"].dbapi
- root_config = RootConfig(portdb.settings, portage.db[portage.root], None)
+ def pquery(parser, opts, args):
+ portdb = portage.db[portage.root]["porttree"].dbapi
+ root_config = RootConfig(portdb.settings, portage.db[portage.root], None)
- def _pkg(cpv, repo_name):
- try:
- metadata = dict(
- zip(
- Package.metadata_keys,
- portdb.aux_get(cpv, Package.metadata_keys, myrepo=repo_name),
+ def _pkg(cpv, repo_name):
+ try:
+ metadata = dict(
+ zip(
+ Package.metadata_keys,
+ portdb.aux_get(cpv, Package.metadata_keys, myrepo=repo_name),
+ )
)
+ except KeyError:
+ raise portage.exception.PackageNotFound(cpv)
+ return Package(
+ built=False,
+ cpv=cpv,
+ installed=False,
+ metadata=metadata,
+ root_config=root_config,
+ type_name="ebuild",
)
- except KeyError:
- raise portage.exception.PackageNotFound(cpv)
- return Package(
- built=False,
- cpv=cpv,
- installed=False,
- metadata=metadata,
- root_config=root_config,
- type_name="ebuild",
- )
- need_metadata = False
- atoms = []
- for arg in args:
- if "/" not in arg.split(":")[0]:
- atom = insert_category_into_atom(arg, "*")
- if atom is None:
+ need_metadata = False
+ atoms = []
+ for arg in args:
+ if "/" not in arg.split(":")[0]:
+ atom = insert_category_into_atom(arg, "*")
+ if atom is None:
+ writemsg("ERROR: Invalid atom: '%s'\n" % arg, noiselevel=-1)
+ return 2
+ else:
+ atom = arg
+
+ try:
+ atom = portage.dep.Atom(atom, allow_wildcard=True, allow_repo=True)
+ except portage.exception.InvalidAtom:
writemsg("ERROR: Invalid atom: '%s'\n" % arg, noiselevel=-1)
return 2
- else:
- atom = arg
- try:
- atom = portage.dep.Atom(atom, allow_wildcard=True, allow_repo=True)
- except portage.exception.InvalidAtom:
- writemsg("ERROR: Invalid atom: '%s'\n" % arg, noiselevel=-1)
- return 2
+ if atom.slot is not None:
+ need_metadata = True
- if atom.slot is not None:
- need_metadata = True
+ atoms.append(atom)
- atoms.append(atom)
+ if "*/*" in atoms:
+ del atoms[:]
+ need_metadata = False
- if "*/*" in atoms:
- del atoms[:]
- need_metadata = False
-
- if not opts.no_filters:
- need_metadata = True
-
- xml_matchers = []
- if opts.maintainer_email:
- maintainer_emails = []
- for x in opts.maintainer_email:
- maintainer_emails.extend(x.split(","))
- if opts.no_regex: # Escape regex-special characters for an exact match
- maintainer_emails = [re.escape(x) for x in maintainer_emails]
- xml_matchers.append(MaintainerEmailMatcher(maintainer_emails))
- if opts.orphaned:
- xml_matchers.append(match_orphaned)
-
- if opts.repo is not None:
- repos = [portdb.repositories[opts.repo]]
- else:
- repos = list(portdb.repositories)
+ if not opts.no_filters:
+ need_metadata = True
- if not atoms:
- names = None
- categories = list(portdb.categories)
- else:
- category_wildcard = False
- name_wildcard = False
- categories = []
- names = []
- for atom in atoms:
- category, name = portage.catsplit(atom.cp)
- categories.append(category)
- names.append(name)
- if "*" in category:
- category_wildcard = True
- if "*" in name:
- name_wildcard = True
-
- if category_wildcard:
- categories = list(portdb.categories)
+ xml_matchers = []
+ if opts.maintainer_email:
+ maintainer_emails = []
+ for x in opts.maintainer_email:
+ maintainer_emails.extend(x.split(","))
+ if opts.no_regex: # Escape regex-special characters for an exact match
+ maintainer_emails = [re.escape(x) for x in maintainer_emails]
+ xml_matchers.append(MaintainerEmailMatcher(maintainer_emails))
+ if opts.orphaned:
+ xml_matchers.append(match_orphaned)
+
+ if opts.repo is not None:
+ repos = [portdb.repositories[opts.repo]]
else:
- categories = list(set(categories))
+ repos = list(portdb.repositories)
- if name_wildcard:
+ if not atoms:
names = None
+ categories = list(portdb.categories)
else:
- names = sorted(set(names))
+ category_wildcard = False
+ name_wildcard = False
+ categories = []
+ names = []
+ for atom in atoms:
+ category, name = portage.catsplit(atom.cp)
+ categories.append(category)
+ names.append(name)
+ if "*" in category:
+ category_wildcard = True
+ if "*" in name:
+ name_wildcard = True
+
+ if category_wildcard:
+ categories = list(portdb.categories)
+ else:
+ categories = list(set(categories))
- no_version = opts.no_version
- categories.sort()
+ if name_wildcard:
+ names = None
+ else:
+ names = sorted(set(names))
- for category in categories:
- if names is None:
- cp_list = portdb.cp_all(categories=(category,))
- else:
- cp_list = [category + "/" + name for name in names]
- for cp in cp_list:
- matches = []
- for repo in repos:
- match = True
- if xml_matchers:
- metadata_xml_path = os.path.join(repo.location, cp, "metadata.xml")
- try:
- metadata_xml = MetaDataXML(metadata_xml_path, None)
- except (OSError, SyntaxError):
- match = False
- else:
- for matcher in xml_matchers:
- if not matcher(metadata_xml):
- match = False
- break
- if not match:
- continue
- cpv_list = portdb.cp_list(cp, mytree=[repo.location])
- if atoms:
- for cpv in cpv_list:
- pkg = None
- for atom in atoms:
- if atom.repo is not None and atom.repo != repo.name:
- continue
- if not portage.match_from_list(atom, [cpv]):
- continue
- if need_metadata:
- if pkg is None:
- try:
- pkg = _pkg(cpv, repo.name)
- except portage.exception.PackageNotFound:
- continue
+ no_version = opts.no_version
+ categories.sort()
- if not (opts.no_filters or pkg.visible):
- continue
- if not portage.match_from_list(atom, [pkg]):
- continue
- matches.append(cpv)
- break
- if no_version and matches:
- break
- elif opts.no_filters:
- matches.extend(cpv_list)
- else:
- for cpv in cpv_list:
+ for category in categories:
+ if names is None:
+ cp_list = portdb.cp_all(categories=(category,))
+ else:
+ cp_list = [category + "/" + name for name in names]
+ for cp in cp_list:
+ matches = []
+ for repo in repos:
+ match = True
+ if xml_matchers:
+ metadata_xml_path = os.path.join(
+ repo.location, cp, "metadata.xml"
+ )
try:
- pkg = _pkg(cpv, repo.name)
- except portage.exception.PackageNotFound:
- continue
+ metadata_xml = MetaDataXML(metadata_xml_path, None)
+ except (OSError, SyntaxError):
+ match = False
else:
- if pkg.visible:
- matches.append(cpv)
- if no_version:
+ for matcher in xml_matchers:
+ if not matcher(metadata_xml):
+ match = False
break
+ if not match:
+ continue
+ cpv_list = portdb.cp_list(cp, mytree=[repo.location])
+ if atoms:
+ for cpv in cpv_list:
+ pkg = None
+ for atom in atoms:
+ if atom.repo is not None and atom.repo != repo.name:
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+ if need_metadata:
+ if pkg is None:
+ try:
+ pkg = _pkg(cpv, repo.name)
+ except portage.exception.PackageNotFound:
+ continue
+
+ if not (opts.no_filters or pkg.visible):
+ continue
+ if not portage.match_from_list(atom, [pkg]):
+ continue
+ matches.append(cpv)
+ break
+ if no_version and matches:
+ break
+ elif opts.no_filters:
+ matches.extend(cpv_list)
+ else:
+ for cpv in cpv_list:
+ try:
+ pkg = _pkg(cpv, repo.name)
+ except portage.exception.PackageNotFound:
+ continue
+ else:
+ if pkg.visible:
+ matches.append(cpv)
+ if no_version:
+ break
- if no_version and matches:
- break
+ if no_version and matches:
+ break
- if not matches:
- continue
+ if not matches:
+ continue
- if no_version:
- writemsg_stdout("{}\n".format(cp), noiselevel=-1)
- else:
- matches = list(set(matches))
- portdb._cpv_sort_ascending(matches)
- for cpv in matches:
- writemsg_stdout("{}\n".format(cpv), noiselevel=-1)
-
- return os.EX_OK
-
-
-docstrings[
- "pquery"
-] = """[options] [atom]+
- Emulates a subset of Pkgcore's pquery tool.
- """
-pquery.__doc__ = docstrings["pquery"]
-
-
-# -----------------------------------------------------------------------------
-#
-# DO NOT CHANGE CODE BEYOND THIS POINT - IT'S NOT NEEDED!
-#
-
-non_commands = frozenset(
- [
- "elog",
- "eval_atom_use",
- "exithandler",
- "match_orphaned",
- "main",
- "usage",
- "uses_eroot",
- ]
-)
-commands = sorted(
- k
- for k, v in globals().items()
- if k not in non_commands
- and isinstance(v, types.FunctionType)
- and v.__module__ == "__main__"
-)
+ if no_version:
+ writemsg_stdout("{}\n".format(cp), noiselevel=-1)
+ else:
+ matches = list(set(matches))
+ portdb._cpv_sort_ascending(matches)
+ for cpv in matches:
+ writemsg_stdout("{}\n".format(cpv), noiselevel=-1)
+
+ return os.EX_OK
+ docstrings[
+ "pquery"
+ ] = """[options] [atom]+
+ Emulates a subset of Pkgcore's pquery tool.
+ """
+ pquery.__doc__ = docstrings["pquery"]
+
+ non_commands = frozenset(
+ [
+ "elog",
+ "eval_atom_use",
+ "exithandler",
+ "match_orphaned",
+ "main",
+ "usage",
+ "uses_eroot",
+ ]
+ )
+ commands = sorted(
+ k
+ for k, v in globals().items()
+ if k not in non_commands
+ and isinstance(v, types.FunctionType)
+ and v.__module__ == "__main__"
+ )
-def add_pquery_arguments(parser):
- pquery_option_groups = (
- (
- "Repository matching options",
+ def add_pquery_arguments(parser):
+ pquery_option_groups = (
(
- {
- "longopt": "--no-filters",
- "action": "store_true",
- "help": "no visibility filters (ACCEPT_KEYWORDS, package masking, etc)",
- },
- {
- "longopt": "--repo",
- "help": "repository to use (all repositories are used by default)",
- },
+ "Repository matching options",
+ (
+ {
+ "longopt": "--no-filters",
+ "action": "store_true",
+ "help": "no visibility filters (ACCEPT_KEYWORDS, package masking, etc)",
+ },
+ {
+ "longopt": "--repo",
+ "help": "repository to use (all repositories are used by default)",
+ },
+ ),
),
- ),
- (
- "Package matching options",
(
- {
- "longopt": "--maintainer-email",
- "action": "append",
- "help": "comma-separated list of maintainer email regexes to search for",
- },
- {
- "longopt": "--no-regex",
- "action": "store_true",
- "help": "Use exact matching instead of regex matching for --maintainer-email",
- },
- {
- "longopt": "--orphaned",
- "action": "store_true",
- "help": "match only orphaned (maintainer-needed) packages",
- },
+ "Package matching options",
+ (
+ {
+ "longopt": "--maintainer-email",
+ "action": "append",
+ "help": "comma-separated list of maintainer email regexes to search for",
+ },
+ {
+ "longopt": "--no-regex",
+ "action": "store_true",
+ "help": "Use exact matching instead of regex matching for --maintainer-email",
+ },
+ {
+ "longopt": "--orphaned",
+ "action": "store_true",
+ "help": "match only orphaned (maintainer-needed) packages",
+ },
+ ),
),
- ),
- (
- "Output formatting",
(
- {
- "shortopt": "-n",
- "longopt": "--no-version",
- "action": "store_true",
- "help": "collapse multiple matching versions together",
- },
+ "Output formatting",
+ (
+ {
+ "shortopt": "-n",
+ "longopt": "--no-version",
+ "action": "store_true",
+ "help": "collapse multiple matching versions together",
+ },
+ ),
),
- ),
- )
-
- for group_title, opt_data in pquery_option_groups:
- arg_group = parser.add_argument_group(group_title)
- for opt_info in opt_data:
- pargs = []
- try:
- pargs.append(opt_info["shortopt"])
- except KeyError:
- pass
- try:
- pargs.append(opt_info["longopt"])
- except KeyError:
- pass
-
- kwargs = {}
- try:
- kwargs["action"] = opt_info["action"]
- except KeyError:
- pass
- try:
- kwargs["help"] = opt_info["help"]
- except KeyError:
- pass
- arg_group.add_argument(*pargs, **kwargs)
-
-
-def usage(argv):
- print(">>> Portage information query tool")
- print(">>> %s" % portage.VERSION)
- print(">>> Usage: portageq <command> [<option> ...]")
- print("")
- print("Available commands:")
+ )
- #
- # Show our commands -- we do this by scanning the functions in this
- # file, and formatting each functions documentation.
- #
- help_mode = "--help" in argv
- for name in commands:
- doc = docstrings.get(name)
- if doc is None:
- print(" " + name)
- print(" MISSING DOCUMENTATION!")
- print("")
- continue
-
- lines = doc.lstrip("\n").split("\n")
- print(" " + name + " " + lines[0].strip())
- if len(argv) > 1:
- if not help_mode:
- lines = lines[:-1]
- for line in lines[1:]:
- print(" " + line.strip())
-
- print()
- print("Pkgcore pquery compatible options:")
- print()
- parser = argparse.ArgumentParser(
- add_help=False, usage="portageq pquery [options] [atom ...]"
- )
- add_pquery_arguments(parser)
- parser.print_help()
+ for group_title, opt_data in pquery_option_groups:
+ arg_group = parser.add_argument_group(group_title)
+ for opt_info in opt_data:
+ pargs = []
+ try:
+ pargs.append(opt_info["shortopt"])
+ except KeyError:
+ pass
+ try:
+ pargs.append(opt_info["longopt"])
+ except KeyError:
+ pass
- if len(argv) == 1:
- print("\nRun portageq with --help for info")
+ kwargs = {}
+ try:
+ kwargs["action"] = opt_info["action"]
+ except KeyError:
+ pass
+ try:
+ kwargs["help"] = opt_info["help"]
+ except KeyError:
+ pass
+ arg_group.add_argument(*pargs, **kwargs)
+ def usage(argv):
+ print(">>> Portage information query tool")
+ print(">>> %s" % portage.VERSION)
+ print(">>> Usage: portageq <command> [<option> ...]")
+ print("")
+ print("Available commands:")
+
+ #
+ # Show our commands -- we do this by scanning the functions in this
+ # file, and formatting each functions documentation.
+ #
+ help_mode = "--help" in argv
+ for name in commands:
+ doc = docstrings.get(name)
+ if doc is None:
+ print(" " + name)
+ print(" MISSING DOCUMENTATION!")
+ print("")
+ continue
-atom_validate_strict = "EBUILD_PHASE" in os.environ
-eapi = None
-if atom_validate_strict:
- eapi = os.environ.get("EAPI")
+ lines = doc.lstrip("\n").split("\n")
+ print(" " + name + " " + lines[0].strip())
+ if len(argv) > 1:
+ if not help_mode:
+ lines = lines[:-1]
+ for line in lines[1:]:
+ print(" " + line.strip())
+
+ print()
+ print("Pkgcore pquery compatible options:")
+ print()
+ parser = argparse.ArgumentParser(
+ add_help=False, usage="portageq pquery [options] [atom ...]"
+ )
+ add_pquery_arguments(parser)
+ parser.print_help()
- def elog(elog_funcname, lines):
- cmd = "source '%s/isolated-functions.sh' ; " % os.environ["PORTAGE_BIN_PATH"]
- for line in lines:
- cmd += "{} {} ; ".format(elog_funcname, portage._shell_quote(line))
- subprocess.call([portage.const.BASH_BINARY, "-c", cmd])
+ if len(argv) == 1:
+ print("\nRun portageq with --help for info")
-else:
+ atom_validate_strict = "EBUILD_PHASE" in os.environ
+ eapi = None
+ if atom_validate_strict:
+ eapi = os.environ.get("EAPI")
- def elog(elog_funcname, lines):
- pass
+ def elog(elog_funcname, lines):
+ cmd = (
+ "source '%s/isolated-functions.sh' ; " % os.environ["PORTAGE_BIN_PATH"]
+ )
+ for line in lines:
+ cmd += "{} {} ; ".format(elog_funcname, portage._shell_quote(line))
+ subprocess.call([portage.const.BASH_BINARY, "-c", cmd])
+ else:
-def main(argv):
+ def elog(elog_funcname, lines):
+ pass
- argv = portage._decode_argv(argv)
+ def main(argv):
- nocolor = os.environ.get("NOCOLOR")
- if nocolor in ("yes", "true"):
- portage.output.nocolor()
+ argv = portage._decode_argv(argv)
- parser = argparse.ArgumentParser(add_help=False)
+ nocolor = os.environ.get("NOCOLOR")
+ if nocolor in ("yes", "true"):
+ portage.output.nocolor()
- # used by envvar
- parser.add_argument("-v", dest="verbose", action="store_true")
+ parser = argparse.ArgumentParser(add_help=False)
- actions = parser.add_argument_group("Actions")
- actions.add_argument("-h", "--help", action="store_true")
- actions.add_argument("--version", action="store_true")
+ # used by envvar
+ parser.add_argument("-v", dest="verbose", action="store_true")
- add_pquery_arguments(parser)
+ actions = parser.add_argument_group("Actions")
+ actions.add_argument("-h", "--help", action="store_true")
+ actions.add_argument("--version", action="store_true")
- opts, args = parser.parse_known_args(argv[1:])
+ add_pquery_arguments(parser)
- if opts.help:
- usage(argv)
- return os.EX_OK
- elif opts.version:
- print("Portage", portage.VERSION)
- return os.EX_OK
+ opts, args = parser.parse_known_args(argv[1:])
- cmd = None
- if args and args[0] in commands:
- cmd = args[0]
+ if opts.help:
+ usage(argv)
+ return os.EX_OK
+ elif opts.version:
+ print("Portage", portage.VERSION)
+ return os.EX_OK
- if cmd == "pquery":
cmd = None
- args = args[1:]
+ if args and args[0] in commands:
+ cmd = args[0]
- if cmd is None:
- return pquery(parser, opts, args)
+ if cmd == "pquery":
+ cmd = None
+ args = args[1:]
- if opts.verbose:
- # used by envvar
- args.append("-v")
+ if cmd is None:
+ return pquery(parser, opts, args)
- argv = argv[:1] + args
+ if opts.verbose:
+ # used by envvar
+ args.append("-v")
- if len(argv) < 2:
- usage(argv)
- sys.exit(os.EX_USAGE)
+ argv = argv[:1] + args
- function = globals()[cmd]
- uses_eroot = getattr(function, "uses_eroot", False) and len(argv) > 2
- if uses_eroot:
- if not os.path.isdir(argv[2]):
- sys.stderr.write("Not a directory: '%s'\n" % argv[2])
- sys.stderr.write("Run portageq with --help for info\n")
- sys.stderr.flush()
+ if len(argv) < 2:
+ usage(argv)
sys.exit(os.EX_USAGE)
- # Calculate EPREFIX and ROOT that will be used to construct
- # portage.settings later. It's tempting to use
- # portage.settings["EPREFIX"] here, but that would force
- # instantiation of portage.settings, which we don't want to do
- # until after we've calculated ROOT (see bug #529200).
- eprefix = portage.data._target_eprefix()
- eroot = portage.util.normalize_path(argv[2])
-
- if eprefix:
- if not eroot.endswith(eprefix):
- sys.stderr.write(
- "ERROR: This version of portageq"
- " only supports <eroot>s ending in"
- " '%s'. The provided <eroot>, '%s',"
- " doesn't.\n" % (eprefix, eroot)
- )
+
+ function = globals()[cmd]
+ uses_eroot = getattr(function, "uses_eroot", False) and len(argv) > 2
+ if uses_eroot:
+ if not os.path.isdir(argv[2]):
+ sys.stderr.write("Not a directory: '%s'\n" % argv[2])
+ sys.stderr.write("Run portageq with --help for info\n")
sys.stderr.flush()
sys.exit(os.EX_USAGE)
- root = eroot[: 1 - len(eprefix)]
- else:
- root = eroot
+ # Calculate EPREFIX and ROOT that will be used to construct
+ # portage.settings later. It's tempting to use
+ # portage.settings["EPREFIX"] here, but that would force
+ # instantiation of portage.settings, which we don't want to do
+ # until after we've calculated ROOT (see bug #529200).
+ eprefix = portage.data._target_eprefix()
+ eroot = portage.util.normalize_path(argv[2])
+
+ if eprefix:
+ if not eroot.endswith(eprefix):
+ sys.stderr.write(
+ "ERROR: This version of portageq"
+ " only supports <eroot>s ending in"
+ " '%s'. The provided <eroot>, '%s',"
+ " doesn't.\n" % (eprefix, eroot)
+ )
+ sys.stderr.flush()
+ sys.exit(os.EX_USAGE)
+ root = eroot[: 1 - len(eprefix)]
+ else:
+ root = eroot
- os.environ["ROOT"] = root
+ os.environ["ROOT"] = root
- if getattr(function, "uses_configroot", False):
- os.environ["PORTAGE_CONFIGROOT"] = eroot
- # Disable RepoConfigLoader location validation, allowing raw
- # configuration to pass through, since repo locations are not
- # necessarily expected to exist if the configuration comes
- # from a chroot.
- portage._sync_mode = True
+ if getattr(function, "uses_configroot", False):
+ os.environ["PORTAGE_CONFIGROOT"] = eroot
+ # Disable RepoConfigLoader location validation, allowing raw
+ # configuration to pass through, since repo locations are not
+ # necessarily expected to exist if the configuration comes
+ # from a chroot.
+ portage._sync_mode = True
- args = argv[2:]
+ args = argv[2:]
- try:
- if uses_eroot:
- args[0] = portage.settings["EROOT"]
- retval = function(args)
- if retval:
- sys.exit(retval)
- except portage.exception.PermissionDenied as e:
- sys.stderr.write("Permission denied: '%s'\n" % str(e))
- sys.exit(e.errno)
- except portage.exception.ParseError as e:
- sys.stderr.write("%s\n" % str(e))
- sys.exit(1)
- except portage.exception.AmbiguousPackageName as e:
- # Multiple matches thrown from cpv_expand
- pkgs = e.args[0]
- # An error has occurred so we writemsg to stderr and exit nonzero.
- portage.writemsg(
- "You specified an unqualified atom that matched multiple packages:\n",
- noiselevel=-1,
- )
- for pkg in pkgs:
- portage.writemsg("* %s\n" % pkg, noiselevel=-1)
- portage.writemsg("\nPlease use a more specific atom.\n", noiselevel=-1)
- sys.exit(1)
-
-
-if __name__ == "__main__":
- try:
- sys.exit(main(sys.argv))
- finally:
- global_event_loop().close()
+ try:
+ if uses_eroot:
+ args[0] = portage.settings["EROOT"]
+ retval = function(args)
+ if retval:
+ sys.exit(retval)
+ except portage.exception.PermissionDenied as e:
+ sys.stderr.write("Permission denied: '%s'\n" % str(e))
+ sys.exit(e.errno)
+ except portage.exception.ParseError as e:
+ sys.stderr.write("%s\n" % str(e))
+ sys.exit(1)
+ except portage.exception.AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ pkgs = e.args[0]
+ # An error has occurred so we writemsg to stderr and exit nonzero.
+ portage.writemsg(
+ "You specified an unqualified atom that matched multiple packages:\n",
+ noiselevel=-1,
+ )
+ for pkg in pkgs:
+ portage.writemsg("* %s\n" % pkg, noiselevel=-1)
+ portage.writemsg("\nPlease use a more specific atom.\n", noiselevel=-1)
+ sys.exit(1)
-# -----------------------------------------------------------------------------
+ if __name__ == "__main__":
+ try:
+ sys.exit(main(sys.argv))
+ finally:
+ global_event_loop().close()
+
+except KeyboardInterrupt as e:
+ # Prevent traceback on ^C
+ signum = getattr(e, "signum", signal.SIGINT)
+ signal.signal(signum, signal.SIG_DFL)
+ raise_signal(signum)
next reply other threads:[~2022-12-31 13:33 UTC|newest]
Thread overview: 885+ messages / expand[flat|nested] mbox.gz Atom feed top
2022-12-31 13:33 Sam James [this message]
-- strict thread matches above, loose matches on Subject: below --
2025-01-21 21:02 [gentoo-commits] proj/portage:master commit in: bin/ Sam James
2025-01-21 21:02 Sam James
2025-01-14 13:16 Ulrich Müller
2024-12-31 14:21 Ulrich Müller
2024-09-10 10:29 Ulrich Müller
2024-09-10 10:29 Ulrich Müller
2024-09-10 10:29 Ulrich Müller
2024-08-15 17:28 Mike Gilbert
2024-08-14 14:44 Mike Gilbert
2024-05-22 16:35 Mike Gilbert
2024-05-04 1:32 Sam James
2024-03-15 14:55 Zac Medico
2024-03-10 20:14 Zac Medico
2024-03-09 18:46 Sam James
2024-02-22 15:36 Zac Medico
2024-02-05 6:44 Zac Medico
2023-10-26 18:44 Ulrich Müller
2023-10-24 17:49 Zac Medico
2023-10-24 2:27 Zac Medico
2023-10-24 2:27 Zac Medico
2023-10-24 2:21 Zac Medico
2023-10-24 2:21 Zac Medico
2023-10-24 2:10 Zac Medico
2023-10-13 10:19 Sam James
2023-09-26 20:54 Sam James
2023-09-26 20:54 Sam James
2023-09-08 21:43 Sam James
2023-08-19 15:03 Sam James
2023-08-19 14:49 Sam James
2023-08-19 14:49 Sam James
2023-07-03 20:19 Sam James
2023-05-26 1:33 Sam James
2023-05-26 1:33 Sam James
2023-05-18 7:23 Ulrich Müller
2023-05-17 6:20 Sam James
2023-05-13 20:29 Ulrich Müller
2023-05-12 17:23 Ulrich Müller
2023-05-12 17:23 Ulrich Müller
2023-05-10 1:28 Sam James
2023-05-01 7:21 Sam James
2023-05-01 7:21 Sam James
2023-05-01 7:21 Sam James
2023-05-01 7:21 Sam James
2023-04-29 4:44 Sam James
2023-04-09 7:26 Sam James
2023-04-09 7:19 Sam James
2023-04-09 6:52 Sam James
2023-04-06 8:08 Ulrich Müller
2023-03-21 23:52 Sam James
2023-03-21 23:52 Sam James
2023-01-02 20:45 Mike Gilbert
2022-11-24 4:16 Sam James
2022-10-21 2:34 Sam James
2022-09-24 22:13 Sam James
2022-08-17 3:26 Sam James
2022-08-13 17:51 Sam James
2022-08-10 4:31 Sam James
2022-08-06 21:01 Sam James
2022-08-06 21:01 Sam James
2022-08-06 21:01 Sam James
2022-08-06 21:01 Sam James
2022-08-01 22:39 Sam James
2022-08-01 1:19 Sam James
2022-07-27 19:07 Fabian Groffen
2022-07-27 19:07 Fabian Groffen
2022-07-20 18:07 Ulrich Müller
2022-07-18 15:16 Sam James
2022-07-11 23:02 Sam James
2022-07-03 19:36 Mike Gilbert
2022-06-24 2:20 Zac Medico
2022-05-22 14:51 Mike Gilbert
2022-05-22 14:51 Mike Gilbert
2022-05-15 1:02 Sam James
2022-04-28 15:50 Sam James
2022-04-15 4:47 Sam James
2022-04-14 2:36 Sam James
2022-04-13 4:02 Sam James
2022-04-12 2:00 Sam James
2022-03-28 6:20 Sam James
2022-02-15 21:30 Mike Frysinger
2022-02-15 21:30 Mike Frysinger
2022-02-15 21:30 Mike Frysinger
2022-02-04 17:32 Mike Gilbert
2022-01-24 2:07 Sam James
2021-11-19 23:59 Zac Medico
2021-11-19 16:17 Mike Gilbert
2021-11-19 16:17 Mike Gilbert
2021-11-19 16:17 Mike Gilbert
2021-11-01 17:32 Mike Gilbert
2021-11-01 17:32 Mike Gilbert
2021-10-20 7:02 Sam James
2021-09-27 11:19 Ulrich Müller
2021-09-21 17:27 Sam James
2021-09-03 11:34 Michał Górny
2021-09-03 11:34 Michał Górny
2021-07-17 22:16 Zac Medico
2021-06-20 20:21 Michał Górny
2021-06-18 6:59 Zac Medico
2021-06-13 21:48 Zac Medico
2021-06-05 18:47 Zac Medico
2021-06-02 21:47 Michał Górny
2021-05-31 16:35 Michał Górny
2021-05-24 4:55 Zac Medico
2021-05-24 4:37 Zac Medico
2021-05-24 4:37 Zac Medico
2021-05-24 4:37 Zac Medico
2021-05-24 4:34 Zac Medico
2021-05-12 11:28 Michał Górny
2021-05-12 11:28 Michał Górny
2021-05-01 23:25 Zac Medico
2021-03-29 0:30 Zac Medico
2021-03-04 9:14 Zac Medico
2021-02-24 18:51 Zac Medico
2020-12-24 17:55 Mike Gilbert
2020-11-01 21:46 Zac Medico
2020-10-18 9:32 Ulrich Müller
2020-08-03 19:30 Zac Medico
2020-07-13 5:22 Zac Medico
2020-06-24 23:09 Zac Medico
2020-06-24 3:01 Zac Medico
2020-06-14 19:12 Zac Medico
2020-05-24 23:49 Zac Medico
2020-05-03 21:42 Mike Gilbert
2020-03-21 22:08 Zac Medico
2020-03-02 6:13 Zac Medico
2020-02-21 6:35 Ulrich Müller
2020-02-08 18:19 Mike Gilbert
2019-12-30 21:30 Zac Medico
2019-12-26 20:36 Ulrich Müller
2019-12-15 22:01 Zac Medico
2019-12-15 7:40 Zac Medico
2019-12-09 9:19 Zac Medico
2019-11-27 19:39 Michał Górny
2019-11-25 16:59 Ulrich Müller
2019-11-08 16:02 Mike Gilbert
2019-10-18 3:47 Zac Medico
2019-09-15 3:08 Zac Medico
2019-09-15 3:08 Zac Medico
2019-09-15 1:51 Zac Medico
2019-09-15 1:50 Zac Medico
2019-09-01 19:03 Zac Medico
2019-09-01 18:26 Zac Medico
2019-08-31 3:44 Zac Medico
2019-08-31 3:42 Zac Medico
2019-08-31 3:42 Zac Medico
2019-08-31 3:42 Zac Medico
2019-08-30 17:05 Zac Medico
2019-08-26 18:13 Zac Medico
2019-08-23 17:58 Zac Medico
2019-08-18 19:27 Ulrich Müller
2019-08-16 1:09 Zac Medico
2019-08-14 2:19 Zac Medico
2019-08-14 2:08 Zac Medico
2019-08-10 19:12 Zac Medico
2019-08-03 17:38 Zac Medico
2019-07-21 4:12 Zac Medico
2019-07-03 21:27 Zac Medico
2019-06-05 20:33 Zac Medico
2019-05-20 5:09 Zac Medico
2019-05-20 4:21 Zac Medico
2019-05-20 4:21 Zac Medico
2019-05-20 0:35 Zac Medico
2019-04-30 18:56 Zac Medico
2019-03-19 1:20 Ulrich Müller
2019-01-28 19:45 Zac Medico
2019-01-28 6:41 Zac Medico
2019-01-23 5:33 Zac Medico
2019-01-23 4:35 Zac Medico
2019-01-23 1:08 Zac Medico
2019-01-17 18:30 Zac Medico
2019-01-16 8:33 Zac Medico
2019-01-06 19:07 Zac Medico
2018-11-19 21:40 Zac Medico
2018-11-19 7:29 Zac Medico
2018-11-18 8:13 Michał Górny
2018-11-05 22:10 Ulrich Müller
2018-11-05 18:48 Michał Górny
2018-11-02 19:14 Zac Medico
2018-10-31 16:37 Michał Górny
2018-10-20 4:08 Zac Medico
2018-10-08 21:47 Zac Medico
2018-10-06 1:35 Zac Medico
2018-09-28 20:33 Michał Górny
2018-09-28 20:33 Michał Górny
2018-09-28 20:33 Michał Górny
2018-09-28 20:33 Michał Górny
2018-09-26 22:53 Zac Medico
2018-09-25 21:04 Zac Medico
2018-09-24 20:24 Zac Medico
2018-09-20 18:49 Michał Górny
2018-09-17 18:18 Michał Górny
2018-09-03 18:25 Zac Medico
2018-08-16 17:15 Zac Medico
2018-08-11 8:14 Zac Medico
2018-08-08 21:45 Zac Medico
2018-08-01 20:51 Zac Medico
2018-07-28 6:12 Zac Medico
2018-06-15 23:56 Zac Medico
2018-05-26 6:36 Zac Medico
2018-05-18 16:08 Zac Medico
2018-05-18 16:08 Zac Medico
2018-05-16 20:58 Zac Medico
2018-05-16 20:46 Zac Medico
2018-05-16 17:32 Zac Medico
2018-05-16 16:53 Zac Medico
2018-05-03 1:15 Zac Medico
2018-05-01 16:26 Zac Medico
2018-05-01 16:26 Zac Medico
2018-04-30 18:28 Zac Medico
2018-04-30 6:29 Zac Medico
2018-04-26 18:04 Zac Medico
2018-04-26 17:57 Zac Medico
2018-04-26 10:08 Zac Medico
2018-04-26 10:08 Zac Medico
2018-04-26 9:06 Zac Medico
2018-04-26 9:06 Zac Medico
2018-04-24 20:20 Zac Medico
2018-04-07 17:12 Zac Medico
2018-03-28 15:42 Zac Medico
2018-03-28 6:52 Zac Medico
2018-03-28 5:47 Zac Medico
2018-03-26 17:43 Zac Medico
2018-03-15 20:43 Zac Medico
2018-03-04 21:05 Michał Górny
2018-03-04 21:05 Michał Górny
2018-03-04 18:35 Zac Medico
2018-03-04 18:22 Zac Medico
2018-02-07 5:24 Zac Medico
2018-02-07 5:08 Zac Medico
2018-02-07 4:58 Zac Medico
2018-02-05 4:22 Zac Medico
2018-02-05 3:34 Zac Medico
2018-02-05 1:03 Zac Medico
2018-02-01 6:18 Zac Medico
2018-01-17 19:39 Zac Medico
2017-12-10 8:55 Zac Medico
2017-12-10 8:51 Zac Medico
2017-12-08 3:30 Zac Medico
2017-12-02 21:33 Zac Medico
2017-11-16 23:47 Zac Medico
2017-11-16 23:22 Zac Medico
2017-09-19 7:00 Zac Medico
2017-09-11 20:32 Michał Górny
2017-08-31 18:10 Michał Górny
2017-08-31 14:07 Michał Górny
2017-08-28 13:23 Fabian Groffen
2017-08-28 6:24 Fabian Groffen
2017-08-16 23:06 Zac Medico
2017-08-16 23:06 Zac Medico
2017-08-16 23:03 Zac Medico
2017-08-11 16:06 Zac Medico
2017-07-02 16:31 Zac Medico
2017-03-26 8:44 Ulrich Müller
2017-03-26 7:43 Michał Górny
2017-03-24 20:33 Zac Medico
2017-02-22 22:28 Zac Medico
2017-01-18 16:29 Zac Medico
2017-01-17 17:52 Zac Medico
2017-01-14 0:19 Zac Medico
2017-01-12 23:52 Zac Medico
2017-01-12 23:45 Zac Medico
2016-12-31 22:08 Zac Medico
2016-11-25 19:46 Zac Medico
2016-10-04 16:41 Zac Medico
2016-09-26 17:19 Zac Medico
2016-06-26 23:40 Zac Medico
2016-06-19 5:51 Zac Medico
2016-05-20 21:14 Michał Górny
2016-05-18 16:49 Zac Medico
2016-05-18 16:29 Zac Medico
2016-05-18 16:20 Zac Medico
2016-04-22 8:21 Alexander Berntsen
2016-03-12 18:47 Zac Medico
2016-03-08 22:52 Zac Medico
2016-03-06 18:05 Brian Dolbec
2016-03-06 18:05 Brian Dolbec
2016-03-06 2:11 Brian Dolbec
2016-02-24 21:40 Zac Medico
2016-01-29 23:04 Brian Dolbec
2016-01-28 12:10 Alexander Berntsen
2016-01-29 11:17 ` Alexander Berntsen
2016-01-15 13:43 Michał Górny
2016-01-02 5:18 Zac Medico
2015-12-20 17:37 Michał Górny
2015-12-13 12:57 Michał Górny
2015-12-13 12:57 Michał Górny
2015-12-08 20:57 Arfrever Frehtes Taifersar Arahesis
2015-12-08 10:32 Arfrever Frehtes Taifersar Arahesis
2015-12-08 7:23 Arfrever Frehtes Taifersar Arahesis
2015-12-01 0:27 Arfrever Frehtes Taifersar Arahesis
2015-11-25 12:51 Arfrever Frehtes Taifersar Arahesis
2015-11-24 1:08 Zac Medico
2015-11-22 21:07 Robin H. Johnson
2015-11-22 20:57 Robin H. Johnson
2015-11-18 16:57 Zac Medico
2015-11-18 16:50 Michał Górny
2015-11-18 5:12 Michał Górny
2015-11-14 22:13 Michał Górny
2015-11-14 22:13 Michał Górny
2015-11-14 22:13 Michał Górny
2015-11-14 22:13 Michał Górny
2015-11-13 17:52 Zac Medico
2015-11-13 2:55 Mike Frysinger
2015-11-13 2:55 Mike Frysinger
2015-11-13 1:42 Mike Frysinger
2015-11-12 21:43 Michał Górny
2015-11-12 21:19 Robin H. Johnson
2015-11-12 19:32 Michał Górny
2015-11-12 19:32 Michał Górny
2015-11-12 19:32 Michał Górny
2015-11-12 19:32 Michał Górny
2015-11-12 19:32 Michał Górny
2015-11-12 19:32 Michał Górny
2015-11-12 18:56 Michał Górny
2015-11-11 22:43 Zac Medico
2015-10-04 21:29 Zac Medico
2015-09-28 19:10 Brian Dolbec
2015-08-26 1:52 Zac Medico
2015-08-11 19:57 Michał Górny
2015-07-20 21:48 Brian Dolbec
2015-07-17 20:53 Zac Medico
2015-07-07 18:10 Brian Dolbec
2015-05-18 23:19 Brian Dolbec
2015-05-09 23:10 Brian Dolbec
2015-05-09 15:22 Brian Dolbec
2015-05-06 18:26 Zac Medico
2015-04-28 23:47 Zac Medico
2015-04-22 0:23 Brian Dolbec
2015-04-20 23:34 Zac Medico
2015-04-20 23:34 Zac Medico
2015-04-13 17:27 Brian Dolbec
2015-04-11 15:57 Zac Medico
2015-04-10 16:58 Zac Medico
2015-03-31 16:52 Michał Górny
2015-02-21 20:24 Zac Medico
2015-02-16 18:54 Ulrich Müller
2015-02-09 20:32 Zac Medico
2015-02-09 0:45 Zac Medico
2015-02-03 21:39 Brian Dolbec
2015-01-19 20:47 Zac Medico
2015-01-18 5:06 Zac Medico
2014-12-15 16:28 Arfrever Frehtes Taifersar Arahesis
2014-12-07 23:53 Zac Medico
2014-12-07 23:23 Brian Dolbec
2014-12-07 18:51 Ulrich Müller
2014-12-07 6:02 Zac Medico
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-04 14:01 Michał Górny
2014-12-02 23:06 Michał Górny
2014-11-29 22:48 Michał Górny
2014-11-26 8:40 Zac Medico
2014-11-19 23:26 Michał Górny
2014-11-19 23:26 Michał Górny
2014-11-08 20:26 Zac Medico
2014-11-08 20:24 Zac Medico
2014-11-03 4:42 Zac Medico
2014-10-27 19:28 Zac Medico
2014-10-27 9:47 Zac Medico
2014-10-27 9:46 Zac Medico
2014-10-24 22:55 Zac Medico
2014-10-24 20:39 Zac Medico
2014-10-19 17:31 Zac Medico
2014-09-26 2:17 Brian Dolbec
2014-09-26 2:17 Brian Dolbec
2014-09-26 2:17 Brian Dolbec
2014-09-24 22:23 Brian Dolbec
2014-09-20 15:09 Brian Dolbec
2014-09-20 4:52 Brian Dolbec
2014-09-20 4:52 Brian Dolbec
2014-09-20 4:52 Brian Dolbec
2014-09-20 3:56 Arfrever Frehtes Taifersar Arahesis
2014-09-11 23:45 Brian Dolbec
2014-09-11 23:45 Brian Dolbec
2014-09-11 23:45 Brian Dolbec
2014-09-11 23:45 Brian Dolbec
2014-09-10 6:51 Michał Górny
2014-09-03 20:22 Michał Górny
2014-08-26 19:38 Michał Górny
2014-08-19 7:01 Michał Górny
2014-08-19 7:01 Michał Górny
2014-08-06 21:11 ` Michał Górny
2014-08-19 7:01 Michał Górny
2014-08-19 7:01 Michał Górny
2014-08-19 7:01 Michał Górny
2014-08-11 20:52 Michał Górny
2014-08-19 7:01 ` Michał Górny
2014-08-11 20:52 Michał Górny
2014-08-19 7:01 ` Michał Górny
2014-08-10 10:32 Arfrever Frehtes Taifersar Arahesis
2014-08-04 12:16 Arfrever Frehtes Taifersar Arahesis
2014-08-03 15:24 [gentoo-commits] proj/portage:v2.2.11 " Brian Dolbec
2014-08-03 15:22 ` [gentoo-commits] proj/portage:master " Brian Dolbec
2014-07-25 16:15 Alexander Berntsen
2014-06-16 5:16 Brian Dolbec
2014-04-19 7:59 Brian Dolbec
2014-04-19 5:26 Brian Dolbec
2014-04-19 5:26 Brian Dolbec
2014-02-19 17:52 Chris Reffett
2014-01-05 17:56 Brian Dolbec
2013-12-24 8:28 Arfrever Frehtes Taifersar Arahesis
2013-12-18 2:50 Mike Frysinger
2013-12-12 19:39 Mike Frysinger
2013-12-02 15:18 Arfrever Frehtes Taifersar Arahesis
2013-12-01 7:22 Mike Frysinger
2013-11-30 4:22 Mike Frysinger
2013-11-27 3:27 Mike Frysinger
2013-10-16 6:56 Mike Frysinger
2013-10-11 10:33 Mike Frysinger
2013-10-11 10:33 Mike Frysinger
2013-10-11 10:33 Mike Frysinger
2013-10-11 10:33 Mike Frysinger
2013-10-11 10:33 Mike Frysinger
2013-10-11 10:33 Mike Frysinger
2013-10-08 20:00 Mike Frysinger
2013-10-08 19:40 Mike Frysinger
2013-10-08 19:40 Mike Frysinger
2013-09-20 12:40 Zac Medico
2013-09-16 5:03 Arfrever Frehtes Taifersar Arahesis
2013-09-15 0:30 Zac Medico
2013-09-13 3:07 Zac Medico
2013-09-11 20:47 Zac Medico
2013-09-03 19:43 Zac Medico
2013-09-01 19:04 Zac Medico
2013-09-01 18:46 Zac Medico
2013-09-01 18:16 Zac Medico
2013-08-27 4:00 Zac Medico
2013-08-24 18:24 Zac Medico
2013-08-22 2:19 Zac Medico
2013-08-18 6:52 Zac Medico
2013-08-12 23:09 Zac Medico
2013-08-06 4:30 Zac Medico
2013-08-05 17:57 Zac Medico
2013-08-03 22:14 Zac Medico
2013-08-03 11:09 Zac Medico
2013-08-03 1:32 Zac Medico
2013-08-03 1:29 Zac Medico
2013-08-03 1:27 Zac Medico
2013-08-03 1:21 Zac Medico
2013-08-03 1:02 Zac Medico
2013-08-03 0:59 Zac Medico
2013-08-03 0:49 Zac Medico
2013-08-03 0:49 Zac Medico
2013-08-02 23:04 Zac Medico
2013-08-02 22:49 Zac Medico
2013-08-02 22:37 Zac Medico
2013-08-02 18:06 Zac Medico
2013-07-30 22:26 Zac Medico
2013-07-27 22:29 Zac Medico
2013-07-25 18:00 Zac Medico
2013-07-23 22:18 Arfrever Frehtes Taifersar Arahesis
2013-07-22 20:48 Zac Medico
2013-07-22 3:21 Zac Medico
2013-07-22 1:59 Zac Medico
2013-07-21 16:53 Zac Medico
2013-07-16 18:57 Arfrever Frehtes Taifersar Arahesis
2013-07-14 18:56 Arfrever Frehtes Taifersar Arahesis
2013-07-14 18:40 Arfrever Frehtes Taifersar Arahesis
2013-07-14 18:02 Arfrever Frehtes Taifersar Arahesis
2013-07-14 8:39 Arfrever Frehtes Taifersar Arahesis
2013-07-13 18:19 Zac Medico
2013-06-29 4:21 Zac Medico
2013-06-25 19:24 Zac Medico
2013-06-25 18:50 Zac Medico
2013-06-25 3:29 Zac Medico
2013-06-25 3:26 Zac Medico
2013-06-25 2:23 Arfrever Frehtes Taifersar Arahesis
2013-06-24 21:21 Zac Medico
2013-06-21 23:07 Zac Medico
2013-06-21 22:10 Zac Medico
2013-06-21 21:24 Zac Medico
2013-06-21 20:41 Zac Medico
2013-06-20 7:05 Zac Medico
2013-05-20 15:30 Zac Medico
2013-05-18 23:25 Zac Medico
2013-05-18 18:58 Zac Medico
2013-05-15 22:27 Zac Medico
2013-05-15 20:21 Zac Medico
2013-05-15 20:16 Zac Medico
2013-05-15 18:02 Zac Medico
2013-05-12 20:21 Zac Medico
2013-05-12 20:12 Zac Medico
2013-05-12 19:48 Zac Medico
2013-04-30 5:07 Zac Medico
2013-04-30 4:54 Zac Medico
2013-04-29 4:27 Zac Medico
2013-04-23 3:20 Zac Medico
2013-04-22 21:02 Zac Medico
2013-04-16 19:26 Zac Medico
2013-04-16 18:19 Zac Medico
2013-04-16 18:16 Zac Medico
2013-04-12 7:20 Zac Medico
2013-04-11 17:51 Zac Medico
2013-03-27 19:11 Mike Frysinger
2013-03-20 3:30 Zac Medico
2013-03-19 19:58 Zac Medico
2013-03-18 19:39 Zac Medico
2013-03-18 12:01 Zac Medico
2013-03-18 11:06 Zac Medico
2013-03-18 10:09 Zac Medico
2013-03-18 8:32 Zac Medico
2013-03-18 6:27 Zac Medico
2013-03-17 23:45 Zac Medico
2013-03-17 22:38 Zac Medico
2013-03-17 22:37 Zac Medico
2013-03-17 20:02 Zac Medico
2013-03-09 7:53 Zac Medico
2013-03-06 22:16 Zac Medico
2013-03-03 17:59 Zac Medico
2013-03-03 8:12 Zac Medico
2013-03-02 3:44 Zac Medico
2013-03-02 3:42 Zac Medico
2013-03-02 3:24 Zac Medico
2013-02-18 2:09 Mike Frysinger
2013-02-17 22:53 Zac Medico
2013-02-14 16:47 Zac Medico
2013-02-14 5:33 Zac Medico
2013-02-11 7:20 Zac Medico
2013-02-05 8:39 Zac Medico
2013-02-04 17:53 Zac Medico
2013-02-03 5:52 Mike Frysinger
2013-01-27 21:27 Zac Medico
2013-01-25 22:35 Zac Medico
2013-01-20 15:43 Zac Medico
2013-01-19 6:40 Zac Medico
2013-01-18 17:27 Zac Medico
2013-01-12 0:09 Zac Medico
2013-01-04 13:23 Zac Medico
2013-01-04 7:34 Zac Medico
2013-01-04 7:14 Zac Medico
2013-01-04 7:07 Zac Medico
2013-01-04 6:30 Zac Medico
2013-01-02 0:44 Zac Medico
2013-01-02 0:30 Zac Medico
2013-01-01 23:50 Zac Medico
2012-12-29 1:11 Zac Medico
2012-12-26 22:47 Zac Medico
2012-12-21 22:02 Zac Medico
2012-12-16 8:56 Zac Medico
2012-12-15 23:42 Zac Medico
2012-12-15 23:08 Zac Medico
2012-12-15 22:24 Zac Medico
2012-12-11 17:14 Zac Medico
2012-12-11 11:09 Zac Medico
2012-11-29 21:40 Zac Medico
2012-11-29 5:58 Zac Medico
2012-11-29 5:37 Zac Medico
2012-11-25 10:41 Arfrever Frehtes Taifersar Arahesis
2012-11-25 10:41 Arfrever Frehtes Taifersar Arahesis
2012-10-27 10:01 Zac Medico
2012-10-25 3:21 Zac Medico
2012-10-24 21:04 Arfrever Frehtes Taifersar Arahesis
2012-10-18 1:59 Zac Medico
2012-10-17 22:54 Arfrever Frehtes Taifersar Arahesis
2012-10-17 22:46 Arfrever Frehtes Taifersar Arahesis
2012-10-16 22:35 Zac Medico
2012-10-16 21:46 Zac Medico
2012-10-15 0:11 Zac Medico
2012-10-14 20:32 Zac Medico
2012-10-14 20:17 Zac Medico
2012-10-14 19:48 Zac Medico
2012-10-14 19:26 Zac Medico
2012-10-08 16:26 Zac Medico
2012-10-07 21:31 Zac Medico
2012-10-04 22:18 Zac Medico
2012-10-03 23:53 Zac Medico
2012-09-30 17:31 Zac Medico
2012-09-30 17:23 Zac Medico
2012-09-30 8:40 Zac Medico
2012-09-27 19:12 Zac Medico
2012-09-27 17:38 Zac Medico
2012-09-24 22:30 Arfrever Frehtes Taifersar Arahesis
2012-09-24 3:47 Mike Frysinger
2012-09-24 0:13 Mike Frysinger
2012-09-17 1:36 Zac Medico
2012-09-14 17:17 Zac Medico
2012-09-14 17:09 Zac Medico
2012-09-14 7:26 Zac Medico
2012-09-14 7:26 Zac Medico
2012-09-14 7:26 Zac Medico
2012-09-14 7:26 Zac Medico
2012-09-12 6:33 Zac Medico
2012-09-12 4:52 Zac Medico
2012-09-10 20:45 Zac Medico
2012-09-10 20:33 Zac Medico
2012-09-10 19:48 Zac Medico
2012-09-10 1:26 Zac Medico
2012-09-10 0:53 Zac Medico
2012-09-08 20:32 Zac Medico
2012-09-08 16:50 Zac Medico
2012-09-08 16:15 Zac Medico
2012-09-08 5:35 Zac Medico
2012-09-04 1:34 Zac Medico
2012-09-02 21:56 Zac Medico
2012-09-02 2:24 Zac Medico
2012-08-31 16:37 Zac Medico
2012-08-31 14:55 Ulrich Mueller
2012-08-31 14:52 Zac Medico
2012-08-31 14:47 Ulrich Mueller
2012-08-31 1:49 Zac Medico
2012-08-30 16:33 Zac Medico
2012-08-30 5:30 Zac Medico
2012-08-30 5:26 Zac Medico
2012-08-30 5:05 Zac Medico
2012-08-29 16:43 Zac Medico
2012-08-19 4:03 Zac Medico
2012-08-19 0:00 Zac Medico
2012-08-17 19:10 Mike Frysinger
2012-08-15 3:04 Zac Medico
2012-08-15 2:55 Zac Medico
2012-08-15 2:00 Zac Medico
2012-08-15 1:03 Zac Medico
2012-08-14 4:08 Zac Medico
2012-08-07 21:09 Zac Medico
2012-08-05 20:11 Zac Medico
2012-08-04 21:18 Zac Medico
2012-07-31 23:12 Arfrever Frehtes Taifersar Arahesis
2012-07-18 20:23 Zac Medico
2012-07-10 0:13 Zac Medico
2012-07-03 21:35 Zac Medico
2012-06-25 4:16 Arfrever Frehtes Taifersar Arahesis
2012-06-25 1:26 Arfrever Frehtes Taifersar Arahesis
2012-06-20 21:58 Zac Medico
2012-06-16 0:45 Zac Medico
2012-06-12 2:18 Zac Medico
2012-06-11 23:24 Zac Medico
2012-06-06 1:35 Zac Medico
2012-06-04 22:22 Zac Medico
2012-06-04 20:34 Zac Medico
2012-06-02 6:45 Zac Medico
2012-06-02 6:19 Zac Medico
2012-05-24 5:50 Mike Frysinger
2012-05-14 18:51 Mike Frysinger
2012-05-14 8:00 Zac Medico
2012-05-13 21:43 Zac Medico
2012-05-13 21:42 Zac Medico
2012-05-09 18:21 Zac Medico
2012-05-09 0:46 Zac Medico
2012-05-08 15:42 Zac Medico
2012-05-08 7:39 Zac Medico
2012-05-05 7:32 Zac Medico
2012-05-05 7:17 Zac Medico
2012-05-01 13:10 Zac Medico
2012-04-30 19:17 Zac Medico
2012-04-28 20:19 Zac Medico
2012-04-22 21:59 Zac Medico
2012-04-22 21:10 Zac Medico
2012-04-22 19:50 Arfrever Frehtes Taifersar Arahesis
2012-04-22 17:41 Zac Medico
2012-04-14 0:37 Zac Medico
2012-04-13 21:52 Zac Medico
2012-04-05 16:45 Zac Medico
2012-04-01 17:16 Zac Medico
2012-04-01 16:48 Zac Medico
2012-03-27 17:36 Zac Medico
2012-03-27 17:06 Zac Medico
2012-03-23 5:38 Zac Medico
2012-03-21 7:40 Zac Medico
2012-03-17 23:29 Zac Medico
2012-03-17 22:56 Zac Medico
2012-03-17 19:48 Zac Medico
2012-03-12 5:29 Mike Frysinger
2012-03-12 4:42 Mike Frysinger
2012-03-11 19:41 Mike Frysinger
2012-03-11 3:58 Mike Frysinger
2012-03-11 3:25 Mike Frysinger
2012-03-10 17:45 Zac Medico
2012-03-10 15:22 Zac Medico
2012-03-08 23:55 Zac Medico
2012-03-08 23:26 Mike Frysinger
2012-03-08 18:46 Mike Frysinger
2012-03-08 5:18 Mike Frysinger
2012-03-05 0:55 Zac Medico
2012-03-05 0:49 Zac Medico
2012-02-11 5:43 Zac Medico
2012-02-10 23:19 Zac Medico
2012-02-10 23:14 Zac Medico
2012-02-08 17:33 Zac Medico
2012-02-08 17:33 Zac Medico
2012-02-01 18:52 Zac Medico
2012-01-25 5:34 Zac Medico
2012-01-17 20:39 Zac Medico
2012-01-17 15:30 Zac Medico
2012-01-15 21:46 Arfrever Frehtes Taifersar Arahesis
2012-01-14 17:10 Zac Medico
2012-01-14 17:01 Zac Medico
2012-01-13 16:22 Zac Medico
2012-01-13 16:06 Zac Medico
2012-01-13 14:14 Zac Medico
2012-01-13 12:20 Zac Medico
2012-01-08 6:07 Arfrever Frehtes Taifersar Arahesis
2012-01-02 7:37 Zac Medico
2011-12-25 20:05 Zac Medico
2011-12-24 11:53 Zac Medico
2011-12-24 10:13 Zac Medico
2011-12-24 9:10 Zac Medico
2011-12-23 0:27 Zac Medico
2011-12-22 19:39 Zac Medico
2011-12-22 5:44 Zac Medico
2011-12-22 4:06 Zac Medico
2011-12-21 19:36 Zac Medico
2011-12-19 1:38 Arfrever Frehtes Taifersar Arahesis
2011-12-18 22:13 Zac Medico
2011-12-18 20:48 Arfrever Frehtes Taifersar Arahesis
2011-12-18 20:18 Zac Medico
2011-12-14 9:43 Zac Medico
2011-12-14 5:32 Zac Medico
2011-12-14 4:06 Zac Medico
2011-12-10 23:39 Zac Medico
2011-12-10 20:52 Zac Medico
2011-12-10 18:45 Zac Medico
2011-12-09 18:54 Zac Medico
2011-12-08 19:51 Zac Medico
2011-12-08 18:03 Zac Medico
2011-12-08 6:43 Zac Medico
2011-12-06 21:57 Zac Medico
2011-12-06 19:06 Zac Medico
2011-12-06 19:01 Zac Medico
2011-12-05 2:28 Zac Medico
2011-12-05 1:52 Zac Medico
2011-11-25 5:29 Zac Medico
2011-11-09 18:48 Zac Medico
2011-11-09 18:42 Zac Medico
2011-11-09 2:36 Zac Medico
2011-11-08 19:28 Zac Medico
2011-11-07 20:42 Zac Medico
2011-11-07 20:20 Zac Medico
2011-11-07 19:16 Zac Medico
2011-11-07 18:30 Arfrever Frehtes Taifersar Arahesis
2011-11-07 8:32 Zac Medico
2011-11-02 4:57 Zac Medico
2011-11-02 1:48 Zac Medico
2011-10-30 6:42 Arfrever Frehtes Taifersar Arahesis
2011-10-30 5:25 Zac Medico
2011-10-29 20:38 Zac Medico
2011-10-29 19:38 Zac Medico
2011-10-29 18:56 Zac Medico
2011-10-29 18:33 Zac Medico
2011-10-29 7:59 Zac Medico
2011-10-29 3:26 Zac Medico
2011-10-21 2:23 Zac Medico
2011-10-20 14:17 Zac Medico
2011-10-18 6:05 Zac Medico
2011-10-17 2:51 Zac Medico
2011-10-17 0:39 Zac Medico
2011-10-16 0:03 Zac Medico
2011-10-15 23:10 Zac Medico
2011-10-15 22:33 Zac Medico
2011-10-15 18:58 Zac Medico
2011-10-15 18:20 Zac Medico
2011-10-15 3:21 Zac Medico
2011-10-15 3:03 Zac Medico
2011-10-15 2:54 Zac Medico
2011-10-15 2:34 Zac Medico
2011-10-14 19:31 Zac Medico
2011-10-14 5:13 Zac Medico
2011-10-14 5:04 Zac Medico
2011-10-13 21:58 Zac Medico
2011-10-13 21:53 Zac Medico
2011-10-13 15:14 Zac Medico
2011-10-13 15:07 Zac Medico
2011-10-11 18:28 Zac Medico
2011-10-08 6:55 Zac Medico
2011-10-07 16:07 Zac Medico
2011-10-07 15:04 Zac Medico
2011-10-07 14:33 Zac Medico
2011-10-04 5:42 Zac Medico
2011-10-03 10:02 Zac Medico
2011-10-02 0:36 Zac Medico
2011-09-29 16:49 Zac Medico
2011-09-29 0:36 Zac Medico
2011-09-28 13:51 Zac Medico
2011-09-23 21:00 Zac Medico
2011-09-23 18:36 Fabian Groffen
2011-09-23 17:47 Zac Medico
2011-09-23 17:15 Zac Medico
2011-09-23 3:26 Arfrever Frehtes Taifersar Arahesis
2011-09-22 23:53 Zac Medico
2011-09-19 3:18 Zac Medico
2011-09-18 20:41 Zac Medico
2011-09-17 17:34 Zac Medico
2011-09-17 4:09 Zac Medico
2011-09-14 20:23 Zac Medico
2011-09-14 18:40 Zac Medico
2011-09-14 16:11 Zac Medico
2011-09-13 5:22 Zac Medico
2011-09-13 4:57 Zac Medico
2011-09-13 3:20 Zac Medico
2011-09-12 20:43 Zac Medico
2011-09-12 3:06 Zac Medico
2011-09-12 1:25 Zac Medico
2011-09-12 1:10 Zac Medico
2011-09-12 0:57 Zac Medico
2011-09-12 0:41 Zac Medico
2011-09-12 0:16 Zac Medico
2011-09-12 0:02 Zac Medico
2011-09-11 23:40 Zac Medico
2011-09-11 23:15 Zac Medico
2011-09-11 22:58 Zac Medico
2011-09-11 22:50 Zac Medico
2011-09-11 22:17 Zac Medico
2011-09-11 21:29 Zac Medico
2011-09-11 21:17 Zac Medico
2011-09-11 0:47 Zac Medico
2011-09-11 0:25 Zac Medico
2011-09-10 19:43 Zac Medico
2011-09-07 18:02 Zac Medico
2011-09-07 16:30 Zac Medico
2011-09-04 0:23 Zac Medico
2011-08-27 19:04 Zac Medico
2011-08-27 19:01 Zac Medico
2011-08-25 23:45 Zac Medico
2011-08-25 16:38 Zac Medico
2011-08-20 21:09 Zac Medico
2011-08-12 11:44 Zac Medico
2011-08-12 11:23 Zac Medico
2011-08-06 5:16 Zac Medico
2011-08-04 21:26 Zac Medico
2011-08-02 18:28 Zac Medico
2011-08-01 23:48 Zac Medico
2011-07-15 17:06 Zac Medico
2011-07-14 22:33 Zac Medico
2011-07-12 20:17 Zac Medico
2011-07-12 18:46 Zac Medico
2011-07-11 14:27 Zac Medico
2011-07-08 17:23 Zac Medico
2011-07-08 16:54 Zac Medico
2011-07-03 15:53 Zac Medico
2011-07-01 13:27 Zac Medico
2011-06-30 8:55 Zac Medico
2011-06-30 4:40 Zac Medico
2011-06-30 4:27 Zac Medico
2011-06-30 4:13 Zac Medico
2011-06-24 20:35 Zac Medico
2011-06-24 10:29 Zac Medico
2011-06-18 22:11 Zac Medico
2011-06-18 19:51 Zac Medico
2011-06-18 17:54 Zac Medico
2011-06-16 20:06 Zac Medico
2011-06-09 15:12 Zac Medico
2011-06-09 14:36 Zac Medico
2011-06-06 2:53 Zac Medico
2011-06-02 21:02 Zac Medico
2011-06-02 13:06 Zac Medico
2011-05-26 6:28 Zac Medico
2011-05-23 7:05 Zac Medico
2011-05-18 3:46 Zac Medico
2011-05-05 13:47 Zac Medico
2011-05-02 20:17 Arfrever Frehtes Taifersar Arahesis
2011-05-02 17:58 Zac Medico
2011-04-30 16:32 Arfrever Frehtes Taifersar Arahesis
2011-04-30 0:25 Zac Medico
2011-04-26 21:18 Zac Medico
2011-04-23 15:42 Zac Medico
2011-04-14 1:55 Zac Medico
2011-04-03 15:56 Zac Medico
2011-03-30 22:22 Arfrever Frehtes Taifersar Arahesis
2011-03-30 20:35 Arfrever Frehtes Taifersar Arahesis
2011-03-30 15:03 Arfrever Frehtes Taifersar Arahesis
2011-03-27 20:01 Zac Medico
2011-03-24 5:05 Zac Medico
2011-03-20 17:41 Zac Medico
2011-03-17 21:52 Zac Medico
2011-03-17 21:12 Zac Medico
2011-03-10 22:52 Zac Medico
2011-03-02 0:53 Zac Medico
2011-02-28 18:58 Zac Medico
2011-02-21 16:18 Zac Medico
2011-02-18 23:39 Ulrich Mueller
2011-02-18 15:38 Zac Medico
2011-02-13 14:19 Zac Medico
2011-02-13 8:38 Zac Medico
2011-02-06 23:03 Zac Medico
Reply instructions:
You may reply publicly to this message via plain-text email
using any one of the following methods:
* Save the following mbox file, import it into your mail client,
and reply-to-all from there: mbox
Avoid top-posting and favor interleaved quoting:
https://en.wikipedia.org/wiki/Posting_style#Interleaved_style
* Reply using the --to, --cc, and --in-reply-to
switches of git-send-email(1):
git send-email \
--in-reply-to=1672493364.7141d7f0033bc7bf5bdf825271a0002657d4fb83.sam@gentoo \
--to=sam@gentoo.org \
--cc=gentoo-commits@lists.gentoo.org \
--cc=gentoo-dev@lists.gentoo.org \
/path/to/YOUR_REPLY
https://kernel.org/pub/software/scm/git/docs/git-send-email.html
* If your mail client supports setting the In-Reply-To header
via mailto: links, try the mailto: link
Be sure your reply has a Subject: header at the top and a blank line
before the message body.
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox