* [gentoo-commits] proj/tinderbox-cluster:master commit in: gosbs/scheduler/, gosbs/cmd/, gosbs/db/sqlalchemy/, gosbs/builder/, ...
@ 2020-04-25 21:20 Magnus Granberg
0 siblings, 0 replies; only message in thread
From: Magnus Granberg @ 2020-04-25 21:20 UTC (permalink / raw
To: gentoo-commits
commit: 9b15e4b8753999f6b227e5e71f2b7fdc614da30e
Author: Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sat Apr 25 21:10:22 2020 +0000
Commit: Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sat Apr 25 21:16:35 2020 +0000
URL: https://gitweb.gentoo.org/proj/tinderbox-cluster.git/commit/?id=9b15e4b8
Add task pkg_build to Service Builder and remove Origin notes."
Signed-off-by: Magnus Granberg <zorry <AT> gentoo.org>
gosbs/_emerge/Scheduler.py | 2042 ++++++++++++
gosbs/_emerge/__init__.py | 0
gosbs/_emerge/actions.py | 3378 ++++++++++++++++++++
gosbs/_emerge/main.py | 1317 ++++++++
gosbs/baserpc.py | 2 -
gosbs/builder/depclean.py | 82 +
gosbs/builder/manager.py | 10 +
gosbs/builder/wrapper_depgraph.py | 61 +
gosbs/cmd/__init__.py | 20 -
gosbs/common/binary.py | 32 +
gosbs/common/flags.py | 17 +-
gosbs/common/git.py | 22 +-
gosbs/common/task.py | 19 +-
gosbs/config.py | 2 -
gosbs/context.py | 2 -
gosbs/db/sqlalchemy/models.py | 45 +
gosbs/debugger.py | 2 -
gosbs/exception.py | 2 -
gosbs/i18n.py | 2 -
gosbs/manager.py | 4 +-
gosbs/middleware.py | 2 -
gosbs/objects/__init__.py | 4 +-
gosbs/objects/build_iuse.py | 7 +-
gosbs/objects/category.py | 4 -
gosbs/objects/category_metadata.py | 4 -
gosbs/objects/ebuild.py | 4 -
gosbs/objects/ebuild_iuse.py | 4 -
gosbs/objects/ebuild_keyword.py | 4 -
gosbs/objects/ebuild_metadata.py | 4 -
gosbs/objects/ebuild_restriction.py | 8 +-
gosbs/objects/email.py | 6 +-
gosbs/objects/fields.py | 6 +-
gosbs/objects/flavor.py | 4 -
gosbs/objects/image.py | 4 -
gosbs/objects/keyword.py | 6 +-
.../{project_metadata.py => local_binary.py} | 198 +-
gosbs/objects/objectstor_binary.py | 306 ++
gosbs/objects/package.py | 4 -
gosbs/objects/package_email.py | 4 -
gosbs/objects/package_metadata.py | 4 -
gosbs/objects/project.py | 4 -
gosbs/objects/project_build.py | 30 +-
gosbs/objects/project_metadata.py | 4 -
.../{project_metadata.py => project_option.py} | 184 +-
gosbs/objects/project_repo.py | 5 -
gosbs/objects/repo.py | 4 -
gosbs/objects/restriction.py | 6 +-
gosbs/objects/service.py | 4 -
gosbs/objects/service_repo.py | 4 -
gosbs/objects/task.py | 4 +-
gosbs/objects/use.py | 6 +-
gosbs/objects/user.py | 4 -
gosbs/policy.py | 2 -
gosbs/profiler.py | 2 -
gosbs/rpc.py | 2 -
gosbs/scheduler/category.py | 14 +-
gosbs/scheduler/ebuild.py | 22 +-
gosbs/scheduler/email.py | 16 +-
gosbs/scheduler/manager.py | 4 -
gosbs/scheduler/package.py | 15 +-
gosbs/scheduler/rpcapi.py | 4 -
gosbs/service.py | 5 +-
gosbs/tasks/builder/__init__.py | 3 +-
gosbs/tasks/builder/build_pkg.py | 164 +
gosbs/tasks/scheduler/rebuild_db.py | 12 +-
gosbs/tasks/scheduler/update_db.py | 13 +-
gosbs/tasks/scheduler/update_git.py | 19 +-
gosbs/tasks/scheduler/update_repos.py | 12 +-
gosbs/utils.py | 2 -
gosbs/version.py | 2 -
70 files changed, 7712 insertions(+), 508 deletions(-)
diff --git a/gosbs/_emerge/Scheduler.py b/gosbs/_emerge/Scheduler.py
new file mode 100644
index 0000000..6f45640
--- /dev/null
+++ b/gosbs/_emerge/Scheduler.py
@@ -0,0 +1,2042 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function, unicode_literals
+
+from collections import deque
+import gc
+import gzip
+import logging
+import signal
+import sys
+import textwrap
+import time
+import warnings
+import weakref
+import zlib
+
+import portage
+from portage import os
+from portage import _encodings
+from portage import _unicode_encode
+from portage.cache.mappings import slot_dict_class
+from portage.elog.messages import eerror
+from portage.localization import _
+from portage.output import colorize, create_color_func, red
+bad = create_color_func("BAD")
+from portage._sets import SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import ensure_dirs, writemsg, writemsg_level
+from portage.util.futures import asyncio
+from portage.util.SlotObject import SlotObject
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.EventLoop import EventLoop
+from portage.package.ebuild.digestcheck import digestcheck
+from portage.package.ebuild.digestgen import digestgen
+from portage.package.ebuild.doebuild import (_check_temp_dir,
+ _prepare_self_update)
+from portage.package.ebuild.prepare_build_dirs import prepare_build_dirs
+
+import _emerge
+from _emerge.BinpkgFetcher import BinpkgFetcher
+from _emerge.BinpkgPrefetcher import BinpkgPrefetcher
+from _emerge.BinpkgVerifier import BinpkgVerifier
+from _emerge.Blocker import Blocker
+from _emerge.BlockerDB import BlockerDB
+from _emerge.clear_caches import clear_caches
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.create_world_atom import create_world_atom
+from _emerge.DepPriority import DepPriority
+from _emerge.depgraph import depgraph, resume_depgraph
+from _emerge.EbuildBuildDir import EbuildBuildDir
+from _emerge.EbuildFetcher import EbuildFetcher
+from _emerge.EbuildPhase import EbuildPhase
+from _emerge.emergelog import emergelog
+from _emerge.FakeVartree import FakeVartree
+from _emerge.getloadavg import getloadavg
+from _emerge._find_deep_system_runtime_deps import _find_deep_system_runtime_deps
+from _emerge._flush_elog_mod_echo import _flush_elog_mod_echo
+from _emerge.JobStatusDisplay import JobStatusDisplay
+from _emerge.MergeListItem import MergeListItem
+from _emerge.Package import Package
+from _emerge.PackageMerge import PackageMerge
+from _emerge.PollScheduler import PollScheduler
+from _emerge.SequentialTaskQueue import SequentialTaskQueue
+
+if sys.hexversion >= 0x3000000:
+ basestring = str
+
+# enums
+FAILURE = 1
+
+
+class Scheduler(PollScheduler):
+
+ # max time between loadavg checks (seconds)
+ _loadavg_latency = 30
+
+ # max time between display status updates (seconds)
+ _max_display_latency = 3
+
+ _opts_ignore_blockers = \
+ frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri",
+ "--nodeps", "--pretend"])
+
+ _opts_no_background = \
+ frozenset(["--pretend",
+ "--fetchonly", "--fetch-all-uri"])
+
+ _opts_no_self_update = frozenset(["--buildpkgonly",
+ "--fetchonly", "--fetch-all-uri", "--pretend"])
+
+ class _iface_class(SchedulerInterface):
+ __slots__ = ("fetch",
+ "scheduleSetup", "scheduleUnpack")
+
+ class _fetch_iface_class(SlotObject):
+ __slots__ = ("log_file", "schedule")
+
+ _task_queues_class = slot_dict_class(
+ ("merge", "jobs", "ebuild_locks", "fetch", "unpack"), prefix="")
+
+ class _build_opts_class(SlotObject):
+ __slots__ = ("buildpkg", "buildpkg_exclude", "buildpkgonly",
+ "fetch_all_uri", "fetchonly", "pretend")
+
+ class _binpkg_opts_class(SlotObject):
+ __slots__ = ("fetchonly", "getbinpkg", "pretend")
+
+ class _pkg_count_class(SlotObject):
+ __slots__ = ("curval", "maxval")
+
+ class _emerge_log_class(SlotObject):
+ __slots__ = ("xterm_titles",)
+
+ def log(self, *pargs, **kwargs):
+ if not self.xterm_titles:
+ # Avoid interference with the scheduler's status display.
+ kwargs.pop("short_msg", None)
+ emergelog(self.xterm_titles, *pargs, **kwargs)
+
+ class _failed_pkg(SlotObject):
+ __slots__ = ("build_dir", "build_log", "pkg",
+ "postinst_failure", "returncode")
+
+ class _ConfigPool(object):
+ """Interface for a task to temporarily allocate a config
+ instance from a pool. This allows a task to be constructed
+ long before the config instance actually becomes needed, like
+ when prefetchers are constructed for the whole merge list."""
+ __slots__ = ("_root", "_allocate", "_deallocate")
+ def __init__(self, root, allocate, deallocate):
+ self._root = root
+ self._allocate = allocate
+ self._deallocate = deallocate
+ def allocate(self):
+ return self._allocate(self._root)
+ def deallocate(self, settings):
+ self._deallocate(settings)
+
+ class _unknown_internal_error(portage.exception.PortageException):
+ """
+ Used internally to terminate scheduling. The specific reason for
+ the failure should have been dumped to stderr.
+ """
+ def __init__(self, value=""):
+ portage.exception.PortageException.__init__(self, value)
+
+ def __init__(self, settings, trees, mtimedb, myopts,
+ spinner, mergelist=None, favorites=None, graph_config=None):
+ PollScheduler.__init__(self, main=True)
+
+ if mergelist is not None:
+ warnings.warn("The mergelist parameter of the " + \
+ "_emerge.Scheduler constructor is now unused. Use " + \
+ "the graph_config parameter instead.",
+ DeprecationWarning, stacklevel=2)
+
+ self.settings = settings
+ self.target_root = settings["EROOT"]
+ self.trees = trees
+ self.myopts = myopts
+ self._spinner = spinner
+ self._mtimedb = mtimedb
+ self._favorites = favorites
+ self._args_set = InternalPackageSet(favorites, allow_repo=True)
+ self._build_opts = self._build_opts_class()
+
+ for k in self._build_opts.__slots__:
+ setattr(self._build_opts, k, myopts.get("--" + k.replace("_", "-")))
+ self._build_opts.buildpkg_exclude = InternalPackageSet( \
+ initial_atoms=" ".join(myopts.get("--buildpkg-exclude", [])).split(), \
+ allow_wildcard=True, allow_repo=True)
+ if "mirror" in self.settings.features:
+ self._build_opts.fetch_all_uri = True
+
+ self._binpkg_opts = self._binpkg_opts_class()
+ for k in self._binpkg_opts.__slots__:
+ setattr(self._binpkg_opts, k, "--" + k.replace("_", "-") in myopts)
+
+ self.curval = 0
+ self._logger = self._emerge_log_class()
+ self._task_queues = self._task_queues_class()
+ for k in self._task_queues.allowed_keys:
+ setattr(self._task_queues, k,
+ SequentialTaskQueue())
+
+ # Holds merges that will wait to be executed when no builds are
+ # executing. This is useful for system packages since dependencies
+ # on system packages are frequently unspecified. For example, see
+ # bug #256616.
+ self._merge_wait_queue = deque()
+ # Holds merges that have been transfered from the merge_wait_queue to
+ # the actual merge queue. They are removed from this list upon
+ # completion. Other packages can start building only when this list is
+ # empty.
+ self._merge_wait_scheduled = []
+
+ # Holds system packages and their deep runtime dependencies. Before
+ # being merged, these packages go to merge_wait_queue, to be merged
+ # when no other packages are building.
+ self._deep_system_deps = set()
+
+ # Holds packages to merge which will satisfy currently unsatisfied
+ # deep runtime dependencies of system packages. If this is not empty
+ # then no parallel builds will be spawned until it is empty. This
+ # minimizes the possibility that a build will fail due to the system
+ # being in a fragile state. For example, see bug #259954.
+ self._unsatisfied_system_deps = set()
+
+ self._status_display = JobStatusDisplay(
+ xterm_titles=('notitles' not in settings.features))
+ self._max_load = myopts.get("--load-average")
+ max_jobs = myopts.get("--jobs")
+ if max_jobs is None:
+ max_jobs = 1
+ self._set_max_jobs(max_jobs)
+ self._running_root = trees[trees._running_eroot]["root_config"]
+ self.edebug = 0
+ if settings.get("PORTAGE_DEBUG", "") == "1":
+ self.edebug = 1
+ self.pkgsettings = {}
+ self._config_pool = {}
+ for root in self.trees:
+ self._config_pool[root] = []
+
+ self._fetch_log = os.path.join(_emerge.emergelog._emerge_log_dir,
+ 'emerge-fetch.log')
+ fetch_iface = self._fetch_iface_class(log_file=self._fetch_log,
+ schedule=self._schedule_fetch)
+ self._sched_iface = self._iface_class(
+ self._event_loop,
+ is_background=self._is_background,
+ fetch=fetch_iface,
+ scheduleSetup=self._schedule_setup,
+ scheduleUnpack=self._schedule_unpack)
+
+ self._prefetchers = weakref.WeakValueDictionary()
+ self._pkg_queue = []
+ self._jobs = 0
+ self._running_tasks = {}
+ self._completed_tasks = set()
+ self._main_exit = None
+ self._main_loadavg_handle = None
+ self._schedule_merge_wakeup_task = None
+
+ self._failed_pkgs = []
+ self._failed_pkgs_all = []
+ self._failed_pkgs_die_msgs = []
+ self._post_mod_echo_msgs = []
+ self._parallel_fetch = False
+ self._init_graph(graph_config)
+ merge_count = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._pkg_count = self._pkg_count_class(
+ curval=0, maxval=merge_count)
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # The load average takes some time to respond when new
+ # jobs are added, so we need to limit the rate of adding
+ # new jobs.
+ self._job_delay_max = 5
+ self._previous_job_start_time = None
+ self._job_delay_timeout_id = None
+
+ # The load average takes some time to respond when after
+ # a SIGSTOP/SIGCONT cycle, so delay scheduling for some
+ # time after SIGCONT is received.
+ self._sigcont_delay = 5
+ self._sigcont_time = None
+
+ # This is used to memoize the _choose_pkg() result when
+ # no packages can be chosen until one of the existing
+ # jobs completes.
+ self._choose_pkg_return_early = False
+
+ features = self.settings.features
+ if "parallel-fetch" in features and \
+ not ("--pretend" in self.myopts or \
+ "--fetch-all-uri" in self.myopts or \
+ "--fetchonly" in self.myopts):
+ if "distlocks" not in features:
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ portage.writemsg(red("!!!")+" parallel-fetching " + \
+ "requires the distlocks feature enabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+" you have it disabled, " + \
+ "thus parallel-fetching is being disabled"+"\n",
+ noiselevel=-1)
+ portage.writemsg(red("!!!")+"\n", noiselevel=-1)
+ elif merge_count > 1:
+ self._parallel_fetch = True
+
+ if self._parallel_fetch:
+ # clear out existing fetch log if it exists
+ try:
+ open(self._fetch_log, 'w').close()
+ except EnvironmentError:
+ pass
+
+ self._running_portage = None
+ portage_match = self._running_root.trees["vartree"].dbapi.match(
+ portage.const.PORTAGE_PACKAGE_ATOM)
+ if portage_match:
+ cpv = portage_match.pop()
+ self._running_portage = self._pkg(cpv, "installed",
+ self._running_root, installed=True)
+
+ def _handle_self_update(self):
+
+ if self._opts_no_self_update.intersection(self.myopts):
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+ if x.operation != "merge":
+ continue
+ if x.root != self._running_root.root:
+ continue
+ if not portage.dep.match_from_list(
+ portage.const.PORTAGE_PACKAGE_ATOM, [x]):
+ continue
+ rval = _check_temp_dir(self.settings)
+ if rval != os.EX_OK:
+ return rval
+ _prepare_self_update(self.settings)
+ break
+
+ return os.EX_OK
+
+ def _terminate_tasks(self):
+ self._status_display.quiet = True
+ for task in list(self._running_tasks.values()):
+ if task.isAlive():
+ # This task should keep the main loop running until
+ # it has had an opportunity to clean up after itself.
+ # Rely on its exit hook to remove it from
+ # self._running_tasks when it has finished cleaning up.
+ task.cancel()
+ else:
+ # This task has been waiting to be started in one of
+ # self._task_queues which are all cleared below. It
+ # will never be started, so purged it from
+ # self._running_tasks so that it won't keep the main
+ # loop running.
+ del self._running_tasks[id(task)]
+
+ for q in self._task_queues.values():
+ q.clear()
+
+ def _init_graph(self, graph_config):
+ """
+ Initialization structures used for dependency calculations
+ involving currently installed packages.
+ """
+ self._set_graph_config(graph_config)
+ self._blocker_db = {}
+ depgraph_params = create_depgraph_params(self.myopts, None)
+ dynamic_deps = "dynamic_deps" in depgraph_params
+ ignore_built_slot_operator_deps = self.myopts.get(
+ "--ignore-built-slot-operator-deps", "n") == "y"
+ for root in self.trees:
+ if graph_config is None:
+ fake_vartree = FakeVartree(self.trees[root]["root_config"],
+ pkg_cache=self._pkg_cache, dynamic_deps=dynamic_deps,
+ ignore_built_slot_operator_deps=ignore_built_slot_operator_deps)
+ fake_vartree.sync()
+ else:
+ fake_vartree = graph_config.trees[root]['vartree']
+ self._blocker_db[root] = BlockerDB(fake_vartree)
+
+ def _destroy_graph(self):
+ """
+ Use this to free memory at the beginning of _calc_resume_list().
+ After _calc_resume_list(), the _init_graph() method
+ must to be called in order to re-generate the structures that
+ this method destroys.
+ """
+ self._blocker_db = None
+ self._set_graph_config(None)
+ gc.collect()
+
+ def _set_max_jobs(self, max_jobs):
+ self._max_jobs = max_jobs
+ self._task_queues.jobs.max_jobs = max_jobs
+ if "parallel-install" in self.settings.features:
+ self._task_queues.merge.max_jobs = max_jobs
+
+ def _background_mode(self):
+ """
+ Check if background mode is enabled and adjust states as necessary.
+
+ @rtype: bool
+ @return: True if background mode is enabled, False otherwise.
+ """
+ background = (self._max_jobs is True or \
+ self._max_jobs > 1 or "--quiet" in self.myopts \
+ or self.myopts.get("--quiet-build") == "y") and \
+ not bool(self._opts_no_background.intersection(self.myopts))
+
+ if background:
+ interactive_tasks = self._get_interactive_tasks()
+ if interactive_tasks:
+ background = False
+ writemsg_level(">>> Sending package output to stdio due " + \
+ "to interactive package(s):\n",
+ level=logging.INFO, noiselevel=-1)
+ msg = [""]
+ for pkg in interactive_tasks:
+ pkg_str = " " + colorize("INFORM", str(pkg.cpv))
+ if pkg.root_config.settings["ROOT"] != "/":
+ pkg_str += " for " + pkg.root
+ msg.append(pkg_str)
+ msg.append("")
+ writemsg_level("".join("%s\n" % (l,) for l in msg),
+ level=logging.INFO, noiselevel=-1)
+ if self._max_jobs is True or self._max_jobs > 1:
+ self._set_max_jobs(1)
+ writemsg_level(">>> Setting --jobs=1 due " + \
+ "to the above interactive package(s)\n",
+ level=logging.INFO, noiselevel=-1)
+ writemsg_level(">>> In order to temporarily mask " + \
+ "interactive updates, you may\n" + \
+ ">>> specify --accept-properties=-interactive\n",
+ level=logging.INFO, noiselevel=-1)
+ self._status_display.quiet = \
+ not background or \
+ ("--quiet" in self.myopts and \
+ "--verbose" not in self.myopts)
+
+ self._logger.xterm_titles = \
+ "notitles" not in self.settings.features and \
+ self._status_display.quiet
+
+ return background
+
+ def _get_interactive_tasks(self):
+ interactive_tasks = []
+ for task in self._mergelist:
+ if not (isinstance(task, Package) and \
+ task.operation == "merge"):
+ continue
+ if 'interactive' in task.properties:
+ interactive_tasks.append(task)
+ return interactive_tasks
+
+ def _set_graph_config(self, graph_config):
+
+ if graph_config is None:
+ self._graph_config = None
+ self._pkg_cache = {}
+ self._digraph = None
+ self._mergelist = []
+ self._world_atoms = None
+ self._deep_system_deps.clear()
+ return
+
+ self._graph_config = graph_config
+ self._pkg_cache = graph_config.pkg_cache
+ self._digraph = graph_config.graph
+ self._mergelist = graph_config.mergelist
+
+ # Generate world atoms while the event loop is not running,
+ # since otherwise portdbapi match calls in the create_world_atom
+ # function could trigger event loop recursion.
+ self._world_atoms = {}
+ for pkg in self._mergelist:
+ if getattr(pkg, 'operation', None) != 'merge':
+ continue
+ atom = create_world_atom(pkg, self._args_set,
+ pkg.root_config, before_install=True)
+ if atom is not None:
+ self._world_atoms[pkg] = atom
+
+ if "--nodeps" in self.myopts or \
+ (self._max_jobs is not True and self._max_jobs < 2):
+ # save some memory
+ self._digraph = None
+ graph_config.graph = None
+ graph_config.pkg_cache.clear()
+ self._deep_system_deps.clear()
+ for pkg in self._mergelist:
+ self._pkg_cache[pkg] = pkg
+ return
+
+ self._find_system_deps()
+ self._prune_digraph()
+ self._prevent_builddir_collisions()
+ if '--debug' in self.myopts:
+ writemsg("\nscheduler digraph:\n\n", noiselevel=-1)
+ self._digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ def _find_system_deps(self):
+ """
+ Find system packages and their deep runtime dependencies. Before being
+ merged, these packages go to merge_wait_queue, to be merged when no
+ other packages are building.
+ NOTE: This can only find deep system deps if the system set has been
+ added to the graph and traversed deeply (the depgraph "complete"
+ parameter will do this, triggered by emerge --complete-graph option).
+ """
+ params = create_depgraph_params(self.myopts, None)
+ if not params["implicit_system_deps"]:
+ return
+
+ deep_system_deps = self._deep_system_deps
+ deep_system_deps.clear()
+ deep_system_deps.update(
+ _find_deep_system_runtime_deps(self._digraph))
+ deep_system_deps.difference_update([pkg for pkg in \
+ deep_system_deps if pkg.operation != "merge"])
+
+ def _prune_digraph(self):
+ """
+ Prune any root nodes that are irrelevant.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+ removed_nodes = set()
+ while True:
+ for node in graph.root_nodes():
+ if not isinstance(node, Package) or \
+ (node.installed and node.operation == "nomerge") or \
+ node.onlydeps or \
+ node in completed_tasks:
+ removed_nodes.add(node)
+ if removed_nodes:
+ graph.difference_update(removed_nodes)
+ if not removed_nodes:
+ break
+ removed_nodes.clear()
+
+ def _prevent_builddir_collisions(self):
+ """
+ When building stages, sometimes the same exact cpv needs to be merged
+ to both $ROOTs. Add edges to the digraph in order to avoid collisions
+ in the builddir. Currently, normal file locks would be inappropriate
+ for this purpose since emerge holds all of it's build dir locks from
+ the main process.
+ """
+ cpv_map = {}
+ for pkg in self._mergelist:
+ if not isinstance(pkg, Package):
+ # a satisfied blocker
+ continue
+ if pkg.installed:
+ continue
+ if pkg.cpv not in cpv_map:
+ cpv_map[pkg.cpv] = [pkg]
+ continue
+ for earlier_pkg in cpv_map[pkg.cpv]:
+ self._digraph.add(earlier_pkg, pkg,
+ priority=DepPriority(buildtime=True))
+ cpv_map[pkg.cpv].append(pkg)
+
+ class _pkg_failure(portage.exception.PortageException):
+ """
+ An instance of this class is raised by unmerge() when
+ an uninstallation fails.
+ """
+ status = 1
+ def __init__(self, *pargs):
+ portage.exception.PortageException.__init__(self, pargs)
+ if pargs:
+ self.status = pargs[0]
+
+ def _schedule_fetch(self, fetcher):
+ """
+ Schedule a fetcher, in order to control the number of concurrent
+ fetchers. If self._max_jobs is greater than 1 then the fetch
+ queue is bypassed and the fetcher is started immediately,
+ otherwise it is added to the front of the parallel-fetch queue.
+ NOTE: The parallel-fetch queue is currently used to serialize
+ access to the parallel-fetch log, so changes in the log handling
+ would be required before it would be possible to enable
+ concurrent fetching within the parallel-fetch queue.
+ """
+ if self._max_jobs > 1:
+ fetcher.start()
+ else:
+ self._task_queues.fetch.addFront(fetcher)
+
+ def _schedule_setup(self, setup_phase):
+ """
+ Schedule a setup phase on the merge queue, in order to
+ serialize unsandboxed access to the live filesystem.
+ """
+ if self._task_queues.merge.max_jobs > 1 and \
+ "ebuild-locks" in self.settings.features:
+ # Use a separate queue for ebuild-locks when the merge
+ # queue allows more than 1 job (due to parallel-install),
+ # since the portage.locks module does not behave as desired
+ # if we try to lock the same file multiple times
+ # concurrently from the same process.
+ self._task_queues.ebuild_locks.add(setup_phase)
+ else:
+ self._task_queues.merge.add(setup_phase)
+ self._schedule()
+
+ def _schedule_unpack(self, unpack_phase):
+ """
+ Schedule an unpack phase on the unpack queue, in order
+ to serialize $DISTDIR access for live ebuilds.
+ """
+ self._task_queues.unpack.add(unpack_phase)
+
+ def _find_blockers(self, new_pkg):
+ """
+ Returns a callable.
+ """
+ def get_blockers():
+ return self._find_blockers_impl(new_pkg)
+ return get_blockers
+
+ def _find_blockers_impl(self, new_pkg):
+ if self._opts_ignore_blockers.intersection(self.myopts):
+ return None
+
+ blocker_db = self._blocker_db[new_pkg.root]
+
+ blocked_pkgs = []
+ for blocking_pkg in blocker_db.findInstalledBlockers(new_pkg):
+ if new_pkg.slot_atom == blocking_pkg.slot_atom:
+ continue
+ if new_pkg.cpv == blocking_pkg.cpv:
+ continue
+ blocked_pkgs.append(blocking_pkg)
+
+ return blocked_pkgs
+
+ def _generate_digests(self):
+ """
+ Generate digests if necessary for --digests or FEATURES=digest.
+ In order to avoid interference, this must done before parallel
+ tasks are started.
+ """
+
+ digest = '--digest' in self.myopts
+ if not digest:
+ for pkgsettings in self.pkgsettings.values():
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if 'digest' in pkgsettings.features:
+ digest = True
+ break
+
+ if not digest:
+ return os.EX_OK
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != 'ebuild' or \
+ x.operation != 'merge':
+ continue
+ pkgsettings = self.pkgsettings[x.root]
+ if pkgsettings.mycpv is not None:
+ # ensure that we are using global features
+ # settings rather than those from package.env
+ pkgsettings.reset()
+ if '--digest' not in self.myopts and \
+ 'digest' not in pkgsettings.features:
+ continue
+ portdb = x.root_config.trees['porttree'].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ pkgsettings['O'] = os.path.dirname(ebuild_path)
+ if not digestgen(mysettings=pkgsettings, myportdb=portdb):
+ writemsg_level(
+ "!!! Unable to generate manifest for '%s'.\n" \
+ % x.cpv, level=logging.ERROR, noiselevel=-1)
+ return FAILURE
+
+ return os.EX_OK
+
+ def _check_manifests(self):
+ # Verify all the manifests now so that the user is notified of failure
+ # as soon as possible.
+ if "strict" not in self.settings.features or \
+ "--fetchonly" in self.myopts or \
+ "--fetch-all-uri" in self.myopts:
+ return os.EX_OK
+
+ shown_verifying_msg = False
+ quiet_settings = {}
+ for myroot, pkgsettings in self.pkgsettings.items():
+ quiet_config = portage.config(clone=pkgsettings)
+ quiet_config["PORTAGE_QUIET"] = "1"
+ quiet_config.backup_changes("PORTAGE_QUIET")
+ quiet_settings[myroot] = quiet_config
+ del quiet_config
+
+ failures = 0
+
+ for x in self._mergelist:
+ if not isinstance(x, Package) or \
+ x.type_name != "ebuild":
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if not shown_verifying_msg:
+ shown_verifying_msg = True
+ self._status_msg("Verifying ebuild manifests")
+
+ root_config = x.root_config
+ portdb = root_config.trees["porttree"].dbapi
+ quiet_config = quiet_settings[root_config.root]
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ quiet_config["O"] = os.path.dirname(ebuild_path)
+ if not digestcheck([], quiet_config, strict=True):
+ failures |= 1
+
+ if failures:
+ return FAILURE
+ return os.EX_OK
+
+ def _add_prefetchers(self):
+
+ if not self._parallel_fetch:
+ return
+
+ if self._parallel_fetch:
+
+ prefetchers = self._prefetchers
+
+ for pkg in self._mergelist:
+ # mergelist can contain solved Blocker instances
+ if not isinstance(pkg, Package) or pkg.operation == "uninstall":
+ continue
+ prefetcher = self._create_prefetcher(pkg)
+ if prefetcher is not None:
+ # This will start the first prefetcher immediately, so that
+ # self._task() won't discard it. This avoids a case where
+ # the first prefetcher is discarded, causing the second
+ # prefetcher to occupy the fetch queue before the first
+ # fetcher has an opportunity to execute.
+ prefetchers[pkg] = prefetcher
+ self._task_queues.fetch.add(prefetcher)
+
+ def _create_prefetcher(self, pkg):
+ """
+ @return: a prefetcher, or None if not applicable
+ """
+ prefetcher = None
+
+ if not isinstance(pkg, Package):
+ pass
+
+ elif pkg.type_name == "ebuild":
+
+ prefetcher = EbuildFetcher(background=True,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ fetchonly=1, fetchall=self._build_opts.fetch_all_uri,
+ logfile=self._fetch_log,
+ pkg=pkg, prefetch=True, scheduler=self._sched_iface)
+
+ elif pkg.type_name == "binary" and \
+ "--getbinpkg" in self.myopts and \
+ pkg.root_config.trees["bintree"].isremote(pkg.cpv):
+
+ prefetcher = BinpkgPrefetcher(background=True,
+ pkg=pkg, scheduler=self._sched_iface)
+
+ return prefetcher
+
+ def _run_pkg_pretend(self):
+ """
+ Since pkg_pretend output may be important, this method sends all
+ output directly to stdout (regardless of options like --quiet or
+ --jobs).
+ """
+
+ failures = 0
+ sched_iface = self._sched_iface
+
+ for x in self._mergelist:
+ if not isinstance(x, Package):
+ continue
+
+ if x.operation == "uninstall":
+ continue
+
+ if x.eapi in ("0", "1", "2", "3"):
+ continue
+
+ if "pretend" not in x.defined_phases:
+ continue
+
+ out_str =">>> Running pre-merge checks for " + colorize("INFORM", x.cpv) + "\n"
+ portage.util.writemsg_stdout(out_str, noiselevel=-1)
+
+ root_config = x.root_config
+ settings = self.pkgsettings[root_config.root]
+ settings.setcpv(x)
+
+ # setcpv/package.env allows for per-package PORTAGE_TMPDIR so we
+ # have to validate it for each package
+ rval = _check_temp_dir(settings)
+ if rval != os.EX_OK:
+ return rval
+
+ build_dir_path = os.path.join(
+ os.path.realpath(settings["PORTAGE_TMPDIR"]),
+ "portage", x.category, x.pf)
+ existing_builddir = os.path.isdir(build_dir_path)
+ settings["PORTAGE_BUILDDIR"] = build_dir_path
+ build_dir = EbuildBuildDir(scheduler=sched_iface,
+ settings=settings)
+ sched_iface.run_until_complete(build_dir.async_lock())
+ current_task = None
+
+ try:
+
+ # Clean up the existing build dir, in case pkg_pretend
+ # checks for available space (bug #390711).
+ if existing_builddir:
+ if x.built:
+ tree = "bintree"
+ infloc = os.path.join(build_dir_path, "build-info")
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError(
+ "ebuild not found for '%s'" % x.cpv)
+ portage.package.ebuild.doebuild.doebuild_environment(
+ ebuild_path, "clean", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface, settings=settings)
+ current_task = clean_phase
+ clean_phase.start()
+ clean_phase.wait()
+
+ if x.built:
+ tree = "bintree"
+ bintree = root_config.trees["bintree"].dbapi.bintree
+ fetched = False
+
+ # Display fetch on stdout, so that it's always clear what
+ # is consuming time here.
+ if bintree.isremote(x.cpv):
+ fetcher = BinpkgFetcher(pkg=x,
+ scheduler=sched_iface)
+ fetcher.start()
+ if fetcher.wait() != os.EX_OK:
+ failures += 1
+ continue
+ fetched = fetcher.pkg_path
+
+ if fetched is False:
+ filename = bintree.getname(x.cpv)
+ else:
+ filename = fetched
+ verifier = BinpkgVerifier(pkg=x,
+ scheduler=sched_iface, _pkg_path=filename)
+ current_task = verifier
+ verifier.start()
+ if verifier.wait() != os.EX_OK:
+ failures += 1
+ continue
+
+ if fetched:
+ bintree.inject(x.cpv, filename=fetched)
+
+ infloc = os.path.join(build_dir_path, "build-info")
+ ensure_dirs(infloc)
+ self._sched_iface.run_until_complete(
+ bintree.dbapi.unpack_metadata(settings, infloc))
+ ebuild_path = os.path.join(infloc, x.pf + ".ebuild")
+ settings.configdict["pkg"]["EMERGE_FROM"] = "binary"
+ settings.configdict["pkg"]["MERGE_TYPE"] = "binary"
+
+ else:
+ tree = "porttree"
+ portdb = root_config.trees["porttree"].dbapi
+ ebuild_path = portdb.findname(x.cpv, myrepo=x.repo)
+ if ebuild_path is None:
+ raise AssertionError("ebuild not found for '%s'" % x.cpv)
+ settings.configdict["pkg"]["EMERGE_FROM"] = "ebuild"
+ if self._build_opts.buildpkgonly:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "buildonly"
+ else:
+ settings.configdict["pkg"]["MERGE_TYPE"] = "source"
+
+ portage.package.ebuild.doebuild.doebuild_environment(ebuild_path,
+ "pretend", settings=settings,
+ db=self.trees[settings['EROOT']][tree].dbapi)
+
+ prepare_build_dirs(root_config.root, settings, cleanup=0)
+
+ vardb = root_config.trees['vartree'].dbapi
+ settings["REPLACING_VERSIONS"] = " ".join(
+ set(portage.versions.cpv_getversion(match) \
+ for match in vardb.match(x.slot_atom) + \
+ vardb.match('='+x.cpv)))
+ pretend_phase = EbuildPhase(
+ phase="pretend", scheduler=sched_iface,
+ settings=settings)
+
+ current_task = pretend_phase
+ pretend_phase.start()
+ ret = pretend_phase.wait()
+ if ret != os.EX_OK:
+ failures += 1
+ portage.elog.elog_process(x.cpv, settings)
+ finally:
+
+ if current_task is not None:
+ if current_task.isAlive():
+ current_task.cancel()
+ current_task.wait()
+ if current_task.returncode == os.EX_OK:
+ clean_phase = EbuildPhase(background=False,
+ phase='clean', scheduler=sched_iface,
+ settings=settings)
+ clean_phase.start()
+ clean_phase.wait()
+
+ sched_iface.run_until_complete(build_dir.async_unlock())
+
+ if failures:
+ return FAILURE
+ return os.EX_OK
+
+ def merge(self):
+ if "--resume" in self.myopts:
+ # We're resuming.
+ portage.writemsg_stdout(
+ colorize("GOOD", "*** Resuming merge...\n"), noiselevel=-1)
+ self._logger.log(" *** Resuming merge...")
+
+ self._save_resume_list()
+
+ try:
+ self._background = self._background_mode()
+ except self._unknown_internal_error:
+ return FAILURE
+
+ rval = self._handle_self_update()
+ if rval != os.EX_OK:
+ return rval
+
+ for root in self.trees:
+ root_config = self.trees[root]["root_config"]
+
+ # Even for --pretend --fetch mode, PORTAGE_TMPDIR is required
+ # since it might spawn pkg_nofetch which requires PORTAGE_BUILDDIR
+ # for ensuring sane $PWD (bug #239560) and storing elog messages.
+ tmpdir = root_config.settings.get("PORTAGE_TMPDIR", "")
+ if not tmpdir or not os.path.isdir(tmpdir):
+ msg = (
+ 'The directory specified in your PORTAGE_TMPDIR variable does not exist:',
+ tmpdir,
+ 'Please create this directory or correct your PORTAGE_TMPDIR setting.',
+ )
+ out = portage.output.EOutput()
+ for l in msg:
+ out.eerror(l)
+ return FAILURE
+
+ if self._background:
+ root_config.settings.unlock()
+ root_config.settings["PORTAGE_BACKGROUND"] = "1"
+ root_config.settings.backup_changes("PORTAGE_BACKGROUND")
+ root_config.settings.lock()
+
+ self.pkgsettings[root] = portage.config(
+ clone=root_config.settings)
+
+ keep_going = "--keep-going" in self.myopts
+ fetchonly = self._build_opts.fetchonly
+ mtimedb = self._mtimedb
+ failed_pkgs = self._failed_pkgs
+
+ rval = self._generate_digests()
+ if rval != os.EX_OK:
+ return rval
+
+ # TODO: Immediately recalculate deps here if --keep-going
+ # is enabled and corrupt manifests are detected.
+ rval = self._check_manifests()
+ if rval != os.EX_OK and not keep_going:
+ return rval
+
+ if not fetchonly:
+ rval = self._run_pkg_pretend()
+ if rval != os.EX_OK:
+ return rval
+
+ while True:
+
+ received_signal = []
+
+ def sighandler(signum, frame):
+ signal.signal(signal.SIGINT, signal.SIG_IGN)
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg("\n\nExiting on signal %(signal)s\n" % \
+ {"signal":signum})
+ self.terminate()
+ received_signal.append(128 + signum)
+
+ earlier_sigint_handler = signal.signal(signal.SIGINT, sighandler)
+ earlier_sigterm_handler = signal.signal(signal.SIGTERM, sighandler)
+ earlier_sigcont_handler = \
+ signal.signal(signal.SIGCONT, self._sigcont_handler)
+ signal.siginterrupt(signal.SIGCONT, False)
+
+ try:
+ rval = self._merge()
+ finally:
+ # Restore previous handlers
+ if earlier_sigint_handler is not None:
+ signal.signal(signal.SIGINT, earlier_sigint_handler)
+ else:
+ signal.signal(signal.SIGINT, signal.SIG_DFL)
+ if earlier_sigterm_handler is not None:
+ signal.signal(signal.SIGTERM, earlier_sigterm_handler)
+ else:
+ signal.signal(signal.SIGTERM, signal.SIG_DFL)
+ if earlier_sigcont_handler is not None:
+ signal.signal(signal.SIGCONT, earlier_sigcont_handler)
+ else:
+ signal.signal(signal.SIGCONT, signal.SIG_DFL)
+
+ self._termination_check()
+ if received_signal:
+ sys.exit(received_signal[0])
+
+ if rval == os.EX_OK or fetchonly or not keep_going:
+ break
+ if "resume" not in mtimedb:
+ break
+ mergelist = self._mtimedb["resume"].get("mergelist")
+ if not mergelist:
+ break
+
+ if not failed_pkgs:
+ break
+
+ for failed_pkg in failed_pkgs:
+ mergelist.remove(list(failed_pkg.pkg))
+
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ if not mergelist:
+ break
+
+ if not self._calc_resume_list():
+ break
+
+ clear_caches(self.trees)
+ if not self._mergelist:
+ break
+
+ self._save_resume_list()
+ self._pkg_count.curval = 0
+ self._pkg_count.maxval = len([x for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"])
+ self._status_display.maxval = self._pkg_count.maxval
+
+ # Cleanup any callbacks that have been registered with the global
+ # event loop by calls to the terminate method.
+ self._cleanup()
+
+ self._logger.log(" *** Finished. Cleaning up...")
+
+ if failed_pkgs:
+ self._failed_pkgs_all.extend(failed_pkgs)
+ del failed_pkgs[:]
+
+ printer = portage.output.EOutput()
+ background = self._background
+ failure_log_shown = False
+ if background and len(self._failed_pkgs_all) == 1 and \
+ self.myopts.get('--quiet-fail', 'n') != 'y':
+ # If only one package failed then just show it's
+ # whole log for easy viewing.
+ failed_pkg = self._failed_pkgs_all[-1]
+ log_file = None
+ log_file_real = None
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ try:
+ log_file = open(_unicode_encode(log_path,
+ encoding=_encodings['fs'], errors='strict'), mode='rb')
+ except IOError:
+ pass
+ else:
+ if log_path.endswith('.gz'):
+ log_file_real = log_file
+ log_file = gzip.GzipFile(filename='',
+ mode='rb', fileobj=log_file)
+
+ if log_file is not None:
+ try:
+ for line in log_file:
+ writemsg_level(line, noiselevel=-1)
+ except zlib.error as e:
+ writemsg_level("%s\n" % (e,), level=logging.ERROR,
+ noiselevel=-1)
+ finally:
+ log_file.close()
+ if log_file_real is not None:
+ log_file_real.close()
+ failure_log_shown = True
+
+ # Dump mod_echo output now since it tends to flood the terminal.
+ # This allows us to avoid having more important output, generated
+ # later, from being swept away by the mod_echo output.
+ mod_echo_output = _flush_elog_mod_echo()
+
+ if background and not failure_log_shown and \
+ self._failed_pkgs_all and \
+ self._failed_pkgs_die_msgs and \
+ not mod_echo_output:
+
+ for mysettings, key, logentries in self._failed_pkgs_die_msgs:
+ root_msg = ""
+ if mysettings["ROOT"] != "/":
+ root_msg = " merged to %s" % mysettings["ROOT"]
+ print()
+ printer.einfo("Error messages for package %s%s:" % \
+ (colorize("INFORM", key), root_msg))
+ print()
+ for phase in portage.const.EBUILD_PHASES:
+ if phase not in logentries:
+ continue
+ for msgtype, msgcontent in logentries[phase]:
+ if isinstance(msgcontent, basestring):
+ msgcontent = [msgcontent]
+ for line in msgcontent:
+ printer.eerror(line.strip("\n"))
+
+ if self._post_mod_echo_msgs:
+ for msg in self._post_mod_echo_msgs:
+ msg()
+
+ if len(self._failed_pkgs_all) > 1 or \
+ (self._failed_pkgs_all and keep_going):
+ if len(self._failed_pkgs_all) > 1:
+ msg = "The following %d packages have " % \
+ len(self._failed_pkgs_all) + \
+ "failed to build, install, or execute postinst:"
+ else:
+ msg = "The following package has " + \
+ "failed to build, install, or execute postinst:"
+
+ printer.eerror("")
+ for line in textwrap.wrap(msg, 72):
+ printer.eerror(line)
+ printer.eerror("")
+ for failed_pkg in self._failed_pkgs_all:
+ # Use unicode_literals to force unicode format string so
+ # that Package.__unicode__() is called in python2.
+ msg = " %s" % (failed_pkg.pkg,)
+ if failed_pkg.postinst_failure:
+ msg += " (postinst failed)"
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ printer.eerror(msg)
+ if log_path is not None:
+ printer.eerror(" '%s'" % colorize('INFORM', log_path))
+ printer.eerror("")
+
+ if self._failed_pkgs_all:
+ return FAILURE
+ return os.EX_OK
+
+ def _elog_listener(self, mysettings, key, logentries, fulltext):
+ errors = portage.elog.filter_loglevels(logentries, ["ERROR"])
+ if errors:
+ self._failed_pkgs_die_msgs.append(
+ (mysettings, key, errors))
+
+ def _locate_failure_log(self, failed_pkg):
+
+ log_paths = [failed_pkg.build_log]
+
+ for log_path in log_paths:
+ if not log_path:
+ continue
+
+ try:
+ log_size = os.stat(log_path).st_size
+ except OSError:
+ continue
+
+ if log_size == 0:
+ continue
+
+ return log_path
+
+ return None
+
+ def _add_packages(self):
+ pkg_queue = self._pkg_queue
+ for pkg in self._mergelist:
+ if isinstance(pkg, Package):
+ pkg_queue.append(pkg)
+ elif isinstance(pkg, Blocker):
+ pass
+
+ def _system_merge_started(self, merge):
+ """
+ Add any unsatisfied runtime deps to self._unsatisfied_system_deps.
+ In general, this keeps track of installed system packages with
+ unsatisfied RDEPEND or PDEPEND (circular dependencies). It can be
+ a fragile situation, so we don't execute any unrelated builds until
+ the circular dependencies are built and installed.
+ """
+ graph = self._digraph
+ if graph is None:
+ return
+ pkg = merge.merge.pkg
+
+ # Skip this if $ROOT != / since it shouldn't matter if there
+ # are unsatisfied system runtime deps in this case.
+ if pkg.root_config.settings["ROOT"] != "/":
+ return
+
+ completed_tasks = self._completed_tasks
+ unsatisfied = self._unsatisfied_system_deps
+
+ def ignore_non_runtime_or_satisfied(priority):
+ """
+ Ignore non-runtime and satisfied runtime priorities.
+ """
+ if isinstance(priority, DepPriority) and \
+ not priority.satisfied and \
+ (priority.runtime or priority.runtime_post):
+ return False
+ return True
+
+ # When checking for unsatisfied runtime deps, only check
+ # direct deps since indirect deps are checked when the
+ # corresponding parent is merged.
+ for child in graph.child_nodes(pkg,
+ ignore_priority=ignore_non_runtime_or_satisfied):
+ if not isinstance(child, Package) or \
+ child.operation == 'uninstall':
+ continue
+ if child is pkg:
+ continue
+ if child.operation == 'merge' and \
+ child not in completed_tasks:
+ unsatisfied.add(child)
+
+ def _merge_wait_exit_handler(self, task):
+ self._merge_wait_scheduled.remove(task)
+ self._merge_exit(task)
+
+ def _merge_exit(self, merge):
+ self._running_tasks.pop(id(merge), None)
+ self._do_merge_exit(merge)
+ self._deallocate_config(merge.merge.settings)
+ if merge.returncode == os.EX_OK and \
+ not merge.merge.pkg.installed:
+ self._status_display.curval += 1
+ self._status_display.merges = len(self._task_queues.merge)
+ self._schedule()
+
+ def _do_merge_exit(self, merge):
+ pkg = merge.merge.pkg
+ if merge.returncode != os.EX_OK:
+ settings = merge.merge.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=pkg,
+ returncode=merge.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
+ self._status_display.failed = len(self._failed_pkgs)
+ return
+
+ if merge.postinst_failure:
+ # Append directly to _failed_pkgs_all for non-critical errors.
+ self._failed_pkgs_all.append(self._failed_pkg(
+ build_dir=merge.merge.settings.get("PORTAGE_BUILDDIR"),
+ build_log=merge.merge.settings.get("PORTAGE_LOG_FILE"),
+ pkg=pkg,
+ postinst_failure=True,
+ returncode=merge.returncode))
+ self._failed_pkg_msg(self._failed_pkgs_all[-1],
+ "execute postinst for", "for")
+
+ self._task_complete(pkg)
+ pkg_to_replace = merge.merge.pkg_to_replace
+ if pkg_to_replace is not None:
+ # When a package is replaced, mark it's uninstall
+ # task complete (if any).
+ if self._digraph is not None and \
+ pkg_to_replace in self._digraph:
+ try:
+ self._pkg_queue.remove(pkg_to_replace)
+ except ValueError:
+ pass
+ self._task_complete(pkg_to_replace)
+ else:
+ self._pkg_cache.pop(pkg_to_replace, None)
+
+ if pkg.installed:
+ return
+
+ # Call mtimedb.commit() after each merge so that
+ # --resume still works after being interrupted
+ # by reboot, sigkill or similar.
+ mtimedb = self._mtimedb
+ mtimedb["resume"]["mergelist"].remove(list(pkg))
+ if not mtimedb["resume"]["mergelist"]:
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ def _build_exit(self, build):
+ self._running_tasks.pop(id(build), None)
+ if build.returncode == os.EX_OK and self._terminated_tasks:
+ # We've been interrupted, so we won't
+ # add this to the merge queue.
+ self.curval += 1
+ self._deallocate_config(build.settings)
+ elif build.returncode == os.EX_OK:
+ self.curval += 1
+ merge = PackageMerge(merge=build, scheduler=self._sched_iface)
+ self._running_tasks[id(merge)] = merge
+ if not build.build_opts.buildpkgonly and \
+ build.pkg in self._deep_system_deps:
+ # Since dependencies on system packages are frequently
+ # unspecified, merge them only when no builds are executing.
+ self._merge_wait_queue.append(merge)
+ merge.addStartListener(self._system_merge_started)
+ else:
+ self._task_queues.merge.add(merge)
+ merge.addExitListener(self._merge_exit)
+ self._status_display.merges = len(self._task_queues.merge)
+ else:
+ settings = build.settings
+ build_dir = settings.get("PORTAGE_BUILDDIR")
+ build_log = settings.get("PORTAGE_LOG_FILE")
+
+ self._failed_pkgs.append(self._failed_pkg(
+ build_dir=build_dir, build_log=build_log,
+ pkg=build.pkg,
+ returncode=build.returncode))
+ if not self._terminated_tasks:
+ self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
+ self._status_display.failed = len(self._failed_pkgs)
+ self._deallocate_config(build.settings)
+ self._jobs -= 1
+ self._status_display.running = self._jobs
+ self._schedule()
+
+ def _extract_exit(self, build):
+ self._build_exit(build)
+
+ def _task_complete(self, pkg):
+ self._completed_tasks.add(pkg)
+ self._unsatisfied_system_deps.discard(pkg)
+ self._choose_pkg_return_early = False
+ blocker_db = self._blocker_db[pkg.root]
+ blocker_db.discardBlocker(pkg)
+
+ def _main_loop(self):
+ self._main_exit = self._event_loop.create_future()
+
+ if self._max_load is not None and \
+ self._loadavg_latency is not None and \
+ (self._max_jobs is True or self._max_jobs > 1):
+ # We have to schedule periodically, in case the load
+ # average has changed since the last call.
+ self._main_loadavg_handle = self._event_loop.call_later(
+ self._loadavg_latency, self._schedule)
+
+ self._schedule()
+ self._event_loop.run_until_complete(self._main_exit)
+
+ def _merge(self):
+
+ if self._opts_no_background.intersection(self.myopts):
+ self._set_max_jobs(1)
+
+ self._add_prefetchers()
+ self._add_packages()
+ failed_pkgs = self._failed_pkgs
+ portage.locks._quiet = self._background
+ portage.elog.add_listener(self._elog_listener)
+
+ def display_callback():
+ self._status_display.display()
+ display_callback.handle = self._event_loop.call_later(
+ self._max_display_latency, display_callback)
+ display_callback.handle = None
+
+ if self._status_display._isatty and not self._status_display.quiet:
+ display_callback()
+ rval = os.EX_OK
+
+ try:
+ self._main_loop()
+ finally:
+ self._main_loop_cleanup()
+ portage.locks._quiet = False
+ portage.elog.remove_listener(self._elog_listener)
+ if display_callback.handle is not None:
+ display_callback.handle.cancel()
+ if failed_pkgs:
+ rval = failed_pkgs[-1].returncode
+
+ return rval
+
+ def _main_loop_cleanup(self):
+ del self._pkg_queue[:]
+ self._completed_tasks.clear()
+ self._deep_system_deps.clear()
+ self._unsatisfied_system_deps.clear()
+ self._choose_pkg_return_early = False
+ self._status_display.reset()
+ self._digraph = None
+ self._task_queues.fetch.clear()
+ self._prefetchers.clear()
+ self._main_exit = None
+ if self._main_loadavg_handle is not None:
+ self._main_loadavg_handle.cancel()
+ self._main_loadavg_handle = None
+ if self._job_delay_timeout_id is not None:
+ self._job_delay_timeout_id.cancel()
+ self._job_delay_timeout_id = None
+ if self._schedule_merge_wakeup_task is not None:
+ self._schedule_merge_wakeup_task.cancel()
+ self._schedule_merge_wakeup_task = None
+
+ def _choose_pkg(self):
+ """
+ Choose a task that has all its dependencies satisfied. This is used
+ for parallel build scheduling, and ensures that we don't build
+ anything with deep dependencies that have yet to be merged.
+ """
+
+ if self._choose_pkg_return_early:
+ return None
+
+ if self._digraph is None:
+ if self._is_work_scheduled() and \
+ not ("--nodeps" in self.myopts and \
+ (self._max_jobs is True or self._max_jobs > 1)):
+ self._choose_pkg_return_early = True
+ return None
+ return self._pkg_queue.pop(0)
+
+ if not self._is_work_scheduled():
+ return self._pkg_queue.pop(0)
+
+ self._prune_digraph()
+
+ chosen_pkg = None
+
+ # Prefer uninstall operations when available.
+ graph = self._digraph
+ for pkg in self._pkg_queue:
+ if pkg.operation == 'uninstall' and \
+ not graph.child_nodes(pkg):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is None:
+ later = set(self._pkg_queue)
+ for pkg in self._pkg_queue:
+ later.remove(pkg)
+ if not self._dependent_on_scheduled_merges(pkg, later):
+ chosen_pkg = pkg
+ break
+
+ if chosen_pkg is not None:
+ self._pkg_queue.remove(chosen_pkg)
+
+ if chosen_pkg is None:
+ # There's no point in searching for a package to
+ # choose until at least one of the existing jobs
+ # completes.
+ self._choose_pkg_return_early = True
+
+ return chosen_pkg
+
+ def _dependent_on_scheduled_merges(self, pkg, later):
+ """
+ Traverse the subgraph of the given packages deep dependencies
+ to see if it contains any scheduled merges.
+ @param pkg: a package to check dependencies for
+ @type pkg: Package
+ @param later: packages for which dependence should be ignored
+ since they will be merged later than pkg anyway and therefore
+ delaying the merge of pkg will not result in a more optimal
+ merge order
+ @type later: set
+ @rtype: bool
+ @return: True if the package is dependent, False otherwise.
+ """
+
+ graph = self._digraph
+ completed_tasks = self._completed_tasks
+
+ dependent = False
+ traversed_nodes = set([pkg])
+ direct_deps = graph.child_nodes(pkg)
+ node_stack = direct_deps
+ direct_deps = frozenset(direct_deps)
+ while node_stack:
+ node = node_stack.pop()
+ if node in traversed_nodes:
+ continue
+ traversed_nodes.add(node)
+ if not ((node.installed and node.operation == "nomerge") or \
+ (node.operation == "uninstall" and \
+ node not in direct_deps) or \
+ node in completed_tasks or \
+ node in later):
+ dependent = True
+ break
+
+ # Don't traverse children of uninstall nodes since
+ # those aren't dependencies in the usual sense.
+ if node.operation != "uninstall":
+ node_stack.extend(graph.child_nodes(node))
+
+ return dependent
+
+ def _allocate_config(self, root):
+ """
+ Allocate a unique config instance for a task in order
+ to prevent interference between parallel tasks.
+ """
+ if self._config_pool[root]:
+ temp_settings = self._config_pool[root].pop()
+ else:
+ temp_settings = portage.config(clone=self.pkgsettings[root])
+ # Since config.setcpv() isn't guaranteed to call config.reset() due to
+ # performance reasons, call it here to make sure all settings from the
+ # previous package get flushed out (such as PORTAGE_LOG_FILE).
+ temp_settings.reload()
+ temp_settings.reset()
+ return temp_settings
+
+ def _deallocate_config(self, settings):
+ self._config_pool[settings['EROOT']].append(settings)
+
+ def _keep_scheduling(self):
+ return bool(not self._terminated.is_set() and self._pkg_queue and \
+ not (self._failed_pkgs and not self._build_opts.fetchonly))
+
+ def _is_work_scheduled(self):
+ return bool(self._running_tasks)
+
+ def _running_job_count(self):
+ return self._jobs
+
+ def _schedule_tasks(self):
+
+ while True:
+
+ state_change = 0
+
+ # When the number of jobs and merges drops to zero,
+ # process a single merge from _merge_wait_queue if
+ # it's not empty. We only process one since these are
+ # special packages and we want to ensure that
+ # parallel-install does not cause more than one of
+ # them to install at the same time.
+ if (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge):
+ task = self._merge_wait_queue.popleft()
+ task.scheduler = self._sched_iface
+ self._merge_wait_scheduled.append(task)
+ self._task_queues.merge.add(task)
+ task.addExitListener(self._merge_wait_exit_handler)
+ self._status_display.merges = len(self._task_queues.merge)
+ state_change += 1
+
+ if self._schedule_tasks_imp():
+ state_change += 1
+
+ self._status_display.display()
+
+ # Cancel prefetchers if they're the only reason
+ # the main poll loop is still running.
+ if self._failed_pkgs and not self._build_opts.fetchonly and \
+ not self._is_work_scheduled() and \
+ self._task_queues.fetch:
+ # Since this happens asynchronously, it doesn't count in
+ # state_change (counting it triggers an infinite loop).
+ self._task_queues.fetch.clear()
+
+ if not (state_change or \
+ (self._merge_wait_queue and not self._jobs and
+ not self._task_queues.merge)):
+ break
+
+ if not (self._is_work_scheduled() or
+ self._keep_scheduling() or self._main_exit.done()):
+ self._main_exit.set_result(None)
+ elif self._main_loadavg_handle is not None:
+ self._main_loadavg_handle.cancel()
+ self._main_loadavg_handle = self._event_loop.call_later(
+ self._loadavg_latency, self._schedule)
+
+ # Failure to schedule *after* self._task_queues.merge becomes
+ # empty will cause the scheduler to hang as in bug 711322.
+ # Do not rely on scheduling which occurs via the _merge_exit
+ # method, since the order of callback invocation may cause
+ # self._task_queues.merge to appear non-empty when it is
+ # about to become empty.
+ if (self._task_queues.merge and (self._schedule_merge_wakeup_task is None
+ or self._schedule_merge_wakeup_task.done())):
+ self._schedule_merge_wakeup_task = asyncio.ensure_future(
+ self._task_queues.merge.wait(), loop=self._event_loop)
+ self._schedule_merge_wakeup_task.add_done_callback(
+ self._schedule_merge_wakeup)
+
+ def _schedule_merge_wakeup(self, future):
+ if not future.cancelled():
+ future.result()
+ if self._main_exit is not None and not self._main_exit.done():
+ self._schedule()
+
+ def _sigcont_handler(self, signum, frame):
+ self._sigcont_time = time.time()
+
+ def _job_delay(self):
+ """
+ @rtype: bool
+ @return: True if job scheduling should be delayed, False otherwise.
+ """
+
+ if self._jobs and self._max_load is not None:
+
+ current_time = time.time()
+
+ if self._sigcont_time is not None:
+
+ elapsed_seconds = current_time - self._sigcont_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and \
+ elapsed_seconds < self._sigcont_delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._job_delay_timeout_id.cancel()
+
+ self._job_delay_timeout_id = self._event_loop.call_later(
+ self._sigcont_delay - elapsed_seconds,
+ self._schedule)
+ return True
+
+ # Only set this to None after the delay has expired,
+ # since this method may be called again before the
+ # delay has expired.
+ self._sigcont_time = None
+
+ try:
+ avg1, avg5, avg15 = getloadavg()
+ except OSError:
+ return False
+
+ delay = self._job_delay_max * avg1 / self._max_load
+ if delay > self._job_delay_max:
+ delay = self._job_delay_max
+ elapsed_seconds = current_time - self._previous_job_start_time
+ # elapsed_seconds < 0 means the system clock has been adjusted
+ if elapsed_seconds > 0 and elapsed_seconds < delay:
+
+ if self._job_delay_timeout_id is not None:
+ self._job_delay_timeout_id.cancel()
+
+ self._job_delay_timeout_id = self._event_loop.call_later(
+ delay - elapsed_seconds, self._schedule)
+ return True
+
+ return False
+
+ def _schedule_tasks_imp(self):
+ """
+ @rtype: bool
+ @return: True if state changed, False otherwise.
+ """
+
+ state_change = 0
+
+ while True:
+
+ if not self._keep_scheduling():
+ return bool(state_change)
+
+ if self._choose_pkg_return_early or \
+ self._merge_wait_scheduled or \
+ (self._jobs and self._unsatisfied_system_deps) or \
+ not self._can_add_job() or \
+ self._job_delay():
+ return bool(state_change)
+
+ pkg = self._choose_pkg()
+ if pkg is None:
+ return bool(state_change)
+
+ state_change += 1
+
+ if not pkg.installed:
+ self._pkg_count.curval += 1
+
+ task = self._task(pkg)
+
+ if pkg.installed:
+ merge = PackageMerge(merge=task, scheduler=self._sched_iface)
+ self._running_tasks[id(merge)] = merge
+ self._task_queues.merge.addFront(merge)
+ merge.addExitListener(self._merge_exit)
+
+ elif pkg.built:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.scheduler = self._sched_iface
+ self._task_queues.jobs.add(task)
+ task.addExitListener(self._extract_exit)
+
+ else:
+ self._jobs += 1
+ self._previous_job_start_time = time.time()
+ self._status_display.running = self._jobs
+ self._running_tasks[id(task)] = task
+ task.scheduler = self._sched_iface
+ self._task_queues.jobs.add(task)
+ task.addExitListener(self._build_exit)
+
+ return bool(state_change)
+
+ def _task(self, pkg):
+
+ pkg_to_replace = None
+ if pkg.operation != "uninstall":
+ vardb = pkg.root_config.trees["vartree"].dbapi
+ previous_cpv = [x for x in vardb.match(pkg.slot_atom) \
+ if portage.cpv_getkey(x) == pkg.cp]
+ if not previous_cpv and vardb.cpv_exists(pkg.cpv):
+ # same cpv, different SLOT
+ previous_cpv = [pkg.cpv]
+ if previous_cpv:
+ previous_cpv = previous_cpv.pop()
+ pkg_to_replace = self._pkg(previous_cpv,
+ "installed", pkg.root_config, installed=True,
+ operation="uninstall")
+
+ try:
+ prefetcher = self._prefetchers.pop(pkg, None)
+ except KeyError:
+ # KeyError observed with PyPy 1.8, despite None given as default.
+ # Note that PyPy 1.8 has the same WeakValueDictionary code as
+ # CPython 2.7, so it may be possible for CPython to raise KeyError
+ # here as well.
+ prefetcher = None
+ if prefetcher is not None and not prefetcher.isAlive():
+ try:
+ self._task_queues.fetch._task_queue.remove(prefetcher)
+ except ValueError:
+ pass
+ prefetcher = None
+
+ task = MergeListItem(args_set=self._args_set,
+ background=self._background, binpkg_opts=self._binpkg_opts,
+ build_opts=self._build_opts,
+ config_pool=self._ConfigPool(pkg.root,
+ self._allocate_config, self._deallocate_config),
+ emerge_opts=self.myopts,
+ find_blockers=self._find_blockers(pkg), logger=self._logger,
+ mtimedb=self._mtimedb, pkg=pkg, pkg_count=self._pkg_count.copy(),
+ pkg_to_replace=pkg_to_replace,
+ prefetcher=prefetcher,
+ scheduler=self._sched_iface,
+ settings=self._allocate_config(pkg.root),
+ statusMessage=self._status_msg,
+ world_atom=self._world_atom)
+
+ return task
+
+ def _failed_pkg_msg(self, failed_pkg, action, preposition):
+ pkg = failed_pkg.pkg
+ msg = "%s to %s %s" % \
+ (bad("Failed"), action, colorize("INFORM", pkg.cpv))
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " %s %s" % (preposition, pkg.root)
+
+ log_path = self._locate_failure_log(failed_pkg)
+ if log_path is not None:
+ msg += ", Log file:"
+ self._status_msg(msg)
+
+ if log_path is not None:
+ self._status_msg(" '%s'" % (colorize("INFORM", log_path),))
+
+ def _status_msg(self, msg):
+ """
+ Display a brief status message (no newlines) in the status display.
+ This is called by tasks to provide feedback to the user. This
+ delegates the resposibility of generating \r and \n control characters,
+ to guarantee that lines are created or erased when necessary and
+ appropriate.
+
+ @type msg: str
+ @param msg: a brief status message (no newlines allowed)
+ """
+ if not self._background:
+ writemsg_level("\n")
+ self._status_display.displayMessage(msg)
+
+ def _save_resume_list(self):
+ """
+ Do this before verifying the ebuild Manifests since it might
+ be possible for the user to use --resume --skipfirst get past
+ a non-essential package with a broken digest.
+ """
+ mtimedb = self._mtimedb
+
+ mtimedb["resume"] = {}
+ # Stored as a dict starting with portage-2.1.6_rc1, and supported
+ # by >=portage-2.1.3_rc8. Versions <portage-2.1.3_rc8 only support
+ # a list type for options.
+ mtimedb["resume"]["myopts"] = self.myopts.copy()
+
+ # Convert Atom instances to plain str.
+ mtimedb["resume"]["favorites"] = [str(x) for x in self._favorites]
+ mtimedb["resume"]["mergelist"] = [list(x) \
+ for x in self._mergelist \
+ if isinstance(x, Package) and x.operation == "merge"]
+
+ mtimedb.commit()
+
+ def _calc_resume_list(self):
+ """
+ Use the current resume list to calculate a new one,
+ dropping any packages with unsatisfied deps.
+ @rtype: bool
+ @return: True if successful, False otherwise.
+ """
+ print(colorize("GOOD", "*** Resuming merge..."))
+
+ # free some memory before creating
+ # the resume depgraph
+ self._destroy_graph()
+
+ myparams = create_depgraph_params(self.myopts, None)
+ success = False
+ e = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ self.settings, self.trees, self._mtimedb, self.myopts,
+ myparams, self._spinner)
+ except depgraph.UnsatisfiedResumeDep as exc:
+ # rename variable to avoid python-3.0 error:
+ # SyntaxError: can not delete variable 'e' referenced in nested
+ # scope
+ e = exc
+ mydepgraph = e.depgraph
+ dropped_tasks = {}
+
+ if e is not None:
+ def unsatisfied_resume_dep_msg():
+ mydepgraph.display_problems()
+ out = portage.output.EOutput()
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ show_parents = set()
+ for dep in e.value:
+ if dep.parent in show_parents:
+ continue
+ show_parents.add(dep.parent)
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ self._post_mod_echo_msgs.append(unsatisfied_resume_dep_msg)
+ return False
+
+ if success and self._show_list():
+ mydepgraph.display(mydepgraph.altlist(), favorites=self._favorites)
+
+ if not success:
+ self._post_mod_echo_msgs.append(mydepgraph.display_problems)
+ return False
+ mydepgraph.display_problems()
+ self._init_graph(mydepgraph.schedulerGraph())
+
+ msg_width = 75
+ for task, atoms in dropped_tasks.items():
+ if not (isinstance(task, Package) and task.operation == "merge"):
+ continue
+ pkg = task
+ msg = "emerge --keep-going:" + \
+ " %s" % (pkg.cpv,)
+ if pkg.root_config.settings["ROOT"] != "/":
+ msg += " for %s" % (pkg.root,)
+ if not atoms:
+ msg += " dropped because it is masked or unavailable"
+ else:
+ msg += " dropped because it requires %s" % ", ".join(atoms)
+ for line in textwrap.wrap(msg, msg_width):
+ eerror(line, phase="other", key=pkg.cpv)
+ settings = self.pkgsettings[pkg.root]
+ # Ensure that log collection from $T is disabled inside
+ # elog_process(), since any logs that might exist are
+ # not valid here.
+ settings.pop("T", None)
+ portage.elog.elog_process(pkg.cpv, settings)
+ self._failed_pkgs_all.append(self._failed_pkg(pkg=pkg))
+
+ return True
+
+ def _show_list(self):
+ myopts = self.myopts
+ if "--quiet" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts):
+ return True
+ return False
+
+ def _world_atom(self, pkg):
+ """
+ Add or remove the package to the world file, but only if
+ it's supposed to be added or removed. Otherwise, do nothing.
+ """
+
+ if set(("--buildpkgonly", "--fetchonly",
+ "--fetch-all-uri",
+ "--oneshot", "--onlydeps",
+ "--pretend")).intersection(self.myopts):
+ return
+
+ if pkg.root != self.target_root:
+ return
+
+ args_set = self._args_set
+ if not args_set.findAtomForPackage(pkg):
+ return
+
+ logger = self._logger
+ pkg_count = self._pkg_count
+ root_config = pkg.root_config
+ world_set = root_config.sets["selected"]
+ world_locked = False
+ atom = None
+
+ if pkg.operation != "uninstall":
+ atom = self._world_atoms.get(pkg)
+
+ try:
+
+ if hasattr(world_set, "lock"):
+ world_set.lock()
+ world_locked = True
+
+ if hasattr(world_set, "load"):
+ world_set.load() # maybe it's changed on disk
+
+ if pkg.operation == "uninstall":
+ if hasattr(world_set, "cleanPackage"):
+ world_set.cleanPackage(pkg.root_config.trees["vartree"].dbapi,
+ pkg.cpv)
+ if hasattr(world_set, "remove"):
+ for s in pkg.root_config.setconfig.active:
+ world_set.remove(SETPREFIX+s)
+ else:
+ if atom is not None:
+ if hasattr(world_set, "add"):
+ self._status_msg(('Recording %s in "world" ' + \
+ 'favorites file...') % atom)
+ logger.log(" === (%s of %s) Updating world file (%s)" % \
+ (pkg_count.curval, pkg_count.maxval, pkg.cpv))
+ world_set.add(atom)
+ else:
+ writemsg_level('\n!!! Unable to record %s in "world"\n' % \
+ (atom,), level=logging.WARN, noiselevel=-1)
+ finally:
+ if world_locked:
+ world_set.unlock()
+
+ def _pkg(self, cpv, type_name, root_config, installed=False,
+ operation=None, myrepo=None):
+ """
+ Get a package instance from the cache, or create a new
+ one if necessary. Raises KeyError from aux_get if it
+ failures for some reason (package does not exist or is
+ corrupt).
+ """
+
+ # Reuse existing instance when available.
+ pkg = self._pkg_cache.get(Package._gen_hash_key(cpv=cpv,
+ type_name=type_name, repo_name=myrepo, root_config=root_config,
+ installed=installed, operation=operation))
+
+ if pkg is not None:
+ return pkg
+
+ tree_type = depgraph.pkg_tree_map[type_name]
+ db = root_config.trees[tree_type].dbapi
+ db_keys = list(self.trees[root_config.root][
+ tree_type].dbapi._aux_cache_keys)
+ metadata = zip(db_keys, db.aux_get(cpv, db_keys, myrepo=myrepo))
+ pkg = Package(built=(type_name != "ebuild"),
+ cpv=cpv, installed=installed, metadata=metadata,
+ root_config=root_config, type_name=type_name)
+ self._pkg_cache[pkg] = pkg
+ return pkg
diff --git a/gosbs/_emerge/__init__.py b/gosbs/_emerge/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/gosbs/_emerge/actions.py b/gosbs/_emerge/actions.py
new file mode 100644
index 0000000..db230cc
--- /dev/null
+++ b/gosbs/_emerge/actions.py
@@ -0,0 +1,3378 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import division, print_function, unicode_literals
+
+import collections
+import errno
+import logging
+import operator
+import platform
+import pwd
+import random
+import re
+import signal
+import socket
+import stat
+import subprocess
+import sys
+import tempfile
+import textwrap
+import time
+import warnings
+from itertools import chain
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'portage.dbapi._similar_name_search:similar_name_search',
+ 'portage.debug',
+ 'portage.news:count_unread_news,display_news_notifications',
+ 'portage.util._get_vm_info:get_vm_info',
+ 'portage.util.locale:check_locale',
+ 'portage.emaint.modules.sync.sync:SyncRepos',
+ '_emerge.chk_updated_cfg_files:chk_updated_cfg_files',
+ '_emerge.help:help@emerge_help',
+ '_emerge.post_emerge:display_news_notification,post_emerge',
+ '_emerge.stdout_spinner:stdout_spinner',
+)
+
+from portage import os
+from portage import shutil
+from portage import eapi_is_supported, _encodings, _unicode_decode
+from portage.cache.cache_errors import CacheError
+from portage.const import GLOBAL_CONFIG_PATH, VCS_DIRS, _DEPCLEAN_LIB_CHECK_DEFAULT
+from portage.const import SUPPORTED_BINPKG_FORMATS, TIMESTAMP_FORMAT
+from portage.dbapi.dep_expand import dep_expand
+from portage.dbapi._expand_new_virt import expand_new_virt
+from portage.dbapi.IndexedPortdb import IndexedPortdb
+from portage.dbapi.IndexedVardb import IndexedVardb
+from portage.dep import Atom, _repo_separator, _slot_separator
+from portage.eclass_cache import hashed_path
+from portage.exception import InvalidAtom, InvalidData, ParseError
+from portage.output import blue, colorize, create_color_func, darkgreen, \
+ red, xtermTitle, xtermTitleReset, yellow
+good = create_color_func("GOOD")
+bad = create_color_func("BAD")
+warn = create_color_func("WARN")
+from portage.package.ebuild._ipc.QueryCommand import QueryCommand
+from portage.package.ebuild.doebuild import _check_temp_dir
+from portage.package.ebuild.fetch import _hide_url_passwd
+from portage._sets import load_default_config, SETPREFIX
+from portage._sets.base import InternalPackageSet
+from portage.util import cmp_sort_key, writemsg, varexpand, \
+ writemsg_level, writemsg_stdout
+from portage.util.digraph import digraph
+from portage.util.SlotObject import SlotObject
+from portage.util._async.run_main_scheduler import run_main_scheduler
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+from portage._global_updates import _global_updates
+from portage.sync.old_tree_timestamp import old_tree_timestamp_warn
+from portage.localization import _
+from portage.metadata import action_metadata
+from portage.emaint.main import print_results
+
+from _emerge.clear_caches import clear_caches
+from _emerge.countdown import countdown
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.Dependency import Dependency
+from _emerge.depgraph import backtrack_depgraph, depgraph, resume_depgraph
+from _emerge.DepPrioritySatisfiedRange import DepPrioritySatisfiedRange
+from _emerge.emergelog import emergelog
+from _emerge.is_valid_package_atom import is_valid_package_atom
+from gosbs._emerge.main import profile_check
+from _emerge.MetadataRegen import MetadataRegen
+from _emerge.Package import Package
+from _emerge.ProgressHandler import ProgressHandler
+from _emerge.RootConfig import RootConfig
+from gosbs._emerge.Scheduler import Scheduler
+from _emerge.search import search
+from _emerge.SetArg import SetArg
+from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
+from _emerge.unmerge import unmerge
+from _emerge.UnmergeDepPriority import UnmergeDepPriority
+from _emerge.UseFlagDisplay import pkg_use_display
+from _emerge.UserQuery import UserQuery
+
+from gosbs.builder.wrapper_depgraph import build_mydepgraph
+
+if sys.hexversion >= 0x3000000:
+ long = int
+ _unicode = str
+else:
+ _unicode = unicode
+
+def action_build(emerge_config, build_job, context, trees=DeprecationWarning,
+ mtimedb=DeprecationWarning, myopts=DeprecationWarning,
+ myaction=DeprecationWarning, myfiles=DeprecationWarning, spinner=None):
+
+ if not isinstance(emerge_config, _emerge_config):
+ warnings.warn("_emerge.actions.action_build() now expects "
+ "an _emerge_config instance as the first parameter",
+ DeprecationWarning, stacklevel=2)
+ emerge_config = load_emerge_config(
+ action=myaction, args=myfiles, trees=trees, opts=myopts)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ settings, trees, mtimedb = emerge_config
+ myopts = emerge_config.opts
+ myaction = emerge_config.action
+ myfiles = emerge_config.args
+
+ if '--usepkgonly' not in myopts:
+ old_tree_timestamp_warn(settings['PORTDIR'], settings)
+
+ # It's best for config updates in /etc/portage to be processed
+ # before we get here, so warn if they're not (bug #267103).
+ chk_updated_cfg_files(settings['EROOT'], ['/etc/portage'])
+
+ quickpkg_direct = ("--usepkg" in emerge_config.opts and
+ emerge_config.opts.get('--quickpkg-direct', 'n') == 'y' and
+ emerge_config.target_config is not emerge_config.running_config)
+ if '--getbinpkg' in emerge_config.opts or quickpkg_direct:
+ kwargs = {}
+ if quickpkg_direct:
+ kwargs['add_repos'] = (emerge_config.running_config.trees['vartree'].dbapi,)
+
+ try:
+ emerge_config.target_config.trees['bintree'].populate(
+ getbinpkgs='--getbinpkg' in emerge_config.opts,
+ **kwargs)
+ except ParseError as e:
+ writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
+ % e, noiselevel=-1)
+ return 1
+
+ # validate the state of the resume data
+ # so that we can make assumptions later.
+ for k in ("resume", "resume_backup"):
+ if k not in mtimedb:
+ continue
+ resume_data = mtimedb[k]
+ if not isinstance(resume_data, dict):
+ del mtimedb[k]
+ continue
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ del mtimedb[k]
+ continue
+ for x in mergelist:
+ if not (isinstance(x, list) and len(x) == 4):
+ continue
+ pkg_type, pkg_root, pkg_key, pkg_action = x
+ if pkg_root not in trees:
+ # Current $ROOT setting differs,
+ # so the list must be stale.
+ mergelist = None
+ break
+ if not mergelist:
+ del mtimedb[k]
+ continue
+ resume_opts = resume_data.get("myopts")
+ if not isinstance(resume_opts, (dict, list)):
+ del mtimedb[k]
+ continue
+ favorites = resume_data.get("favorites")
+ if not isinstance(favorites, list):
+ del mtimedb[k]
+ continue
+
+ resume = False
+ if "--resume" in myopts and \
+ ("resume" in mtimedb or
+ "resume_backup" in mtimedb):
+ resume = True
+ if "resume" not in mtimedb:
+ mtimedb["resume"] = mtimedb["resume_backup"]
+ del mtimedb["resume_backup"]
+ mtimedb.commit()
+ # "myopts" is a list for backward compatibility.
+ resume_opts = mtimedb["resume"].get("myopts", [])
+ if isinstance(resume_opts, list):
+ resume_opts = dict((k,True) for k in resume_opts)
+ for opt in ("--ask", "--color", "--skipfirst", "--tree"):
+ resume_opts.pop(opt, None)
+
+ # Current options always override resume_opts.
+ resume_opts.update(myopts)
+ myopts.clear()
+ myopts.update(resume_opts)
+
+ if "--debug" in myopts:
+ writemsg_level("myopts %s\n" % (myopts,))
+
+ # Adjust config according to options of the command being resumed.
+ for myroot in trees:
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+ del myroot, mysettings
+
+ ldpath_mtimes = mtimedb["ldpath"]
+ favorites=[]
+ buildpkgonly = "--buildpkgonly" in myopts
+ pretend = "--pretend" in myopts
+ fetchonly = "--fetchonly" in myopts or "--fetch-all-uri" in myopts
+ ask = "--ask" in myopts
+ enter_invalid = '--ask-enter-invalid' in myopts
+ nodeps = "--nodeps" in myopts
+ oneshot = "--oneshot" in myopts or "--onlydeps" in myopts
+ tree = "--tree" in myopts
+ if nodeps and tree:
+ tree = False
+ del myopts["--tree"]
+ portage.writemsg(colorize("WARN", " * ") + \
+ "--tree is broken with --nodeps. Disabling...\n")
+ debug = "--debug" in myopts
+ verbose = "--verbose" in myopts
+ quiet = "--quiet" in myopts
+ myparams = create_depgraph_params(myopts, myaction)
+ mergelist_shown = False
+
+ if pretend or fetchonly:
+ # make the mtimedb readonly
+ mtimedb.filename = None
+ if '--digest' in myopts or 'digest' in settings.features:
+ if '--digest' in myopts:
+ msg = "The --digest option"
+ else:
+ msg = "The FEATURES=digest setting"
+
+ msg += " can prevent corruption from being" + \
+ " noticed. The `repoman manifest` command is the preferred" + \
+ " way to generate manifests and it is capable of doing an" + \
+ " entire repository or category at once."
+ prefix = bad(" * ")
+ writemsg(prefix + "\n")
+ for line in textwrap.wrap(msg, 72):
+ writemsg("%s%s\n" % (prefix, line))
+ writemsg(prefix + "\n")
+
+ if resume:
+ favorites = mtimedb["resume"].get("favorites")
+ if not isinstance(favorites, list):
+ favorites = []
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data["mergelist"]
+ if mergelist and "--skipfirst" in myopts:
+ for i, task in enumerate(mergelist):
+ if isinstance(task, list) and \
+ task and task[-1] == "merge":
+ del mergelist[i]
+ break
+
+ success = False
+ mydepgraph = None
+ try:
+ success, mydepgraph, dropped_tasks = resume_depgraph(
+ settings, trees, mtimedb, myopts, myparams, spinner)
+ except (portage.exception.PackageNotFound,
+ depgraph.UnsatisfiedResumeDep) as e:
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ mydepgraph = e.depgraph
+
+ from portage.output import EOutput
+ out = EOutput()
+
+ resume_data = mtimedb["resume"]
+ mergelist = resume_data.get("mergelist")
+ if not isinstance(mergelist, list):
+ mergelist = []
+ if mergelist and debug or (verbose and not quiet):
+ out.eerror("Invalid resume list:")
+ out.eerror("")
+ indent = " "
+ for task in mergelist:
+ if isinstance(task, list):
+ out.eerror(indent + str(tuple(task)))
+ out.eerror("")
+
+ if isinstance(e, depgraph.UnsatisfiedResumeDep):
+ out.eerror("One or more packages are either masked or " + \
+ "have missing dependencies:")
+ out.eerror("")
+ indent = " "
+ for dep in e.value:
+ if dep.atom is None:
+ out.eerror(indent + "Masked package:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ else:
+ out.eerror(indent + str(dep.atom) + " pulled in by:")
+ out.eerror(2 * indent + str(dep.parent))
+ out.eerror("")
+ msg = "The resume list contains packages " + \
+ "that are either masked or have " + \
+ "unsatisfied dependencies. " + \
+ "Please restart/continue " + \
+ "the operation manually, or use --skipfirst " + \
+ "to skip the first package in the list and " + \
+ "any other packages that may be " + \
+ "masked or have missing dependencies."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+ elif isinstance(e, portage.exception.PackageNotFound):
+ out.eerror("An expected package is " + \
+ "not available: %s" % str(e))
+ out.eerror("")
+ msg = "The resume list contains one or more " + \
+ "packages that are no longer " + \
+ "available. Please restart/continue " + \
+ "the operation manually."
+ for line in textwrap.wrap(msg, 72):
+ out.eerror(line)
+
+ if success:
+ if dropped_tasks:
+ portage.writemsg("!!! One or more packages have been " + \
+ "dropped due to\n" + \
+ "!!! masking or unsatisfied dependencies:\n\n",
+ noiselevel=-1)
+ for task, atoms in dropped_tasks.items():
+ if not atoms:
+ writemsg(" %s is masked or unavailable\n" %
+ (task,), noiselevel=-1)
+ else:
+ writemsg(" %s requires %s\n" %
+ (task, ", ".join(atoms)), noiselevel=-1)
+
+ portage.writemsg("\n", noiselevel=-1)
+ del dropped_tasks
+ else:
+ if mydepgraph is not None:
+ mydepgraph.display_problems()
+ if not (ask or pretend):
+ # delete the current list and also the backup
+ # since it's probably stale too.
+ for k in ("resume", "resume_backup"):
+ mtimedb.pop(k, None)
+ mtimedb.commit()
+
+ return 1
+ else:
+ if ("--resume" in myopts):
+ print(darkgreen("emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+
+ success, settings, trees, mtimedb, mydepgraph = build_mydepgraph(settings,
+ trees, mtimedb, myopts, myparams, myaction, myfiles, spinner, build_job, context)
+
+ if success and mydepgraph.need_config_reload():
+ load_emerge_config(emerge_config=emerge_config)
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+ settings, trees, mtimedb = emerge_config
+
+ # After config reload, the freshly instantiated binarytree
+ # instances need to load remote metadata if --getbinpkg
+ # is enabled. Use getbinpkg_refresh=False to use cached
+ # metadata, since the cache is already fresh.
+ if "--getbinpkg" in emerge_config.opts or quickpkg_direct:
+ for root_trees in emerge_config.trees.values():
+ kwargs = {}
+ if quickpkg_direct:
+ kwargs['add_repos'] = (emerge_config.running_config.trees['vartree'].dbapi,)
+
+ try:
+ root_trees["bintree"].populate(
+ getbinpkgs=True,
+ getbinpkg_refresh=False,
+ **kwargs)
+ except ParseError as e:
+ writemsg("\n\n!!!%s.\nSee make.conf(5) for more info.\n"
+ % e, noiselevel=-1)
+ return 1
+
+ if "--autounmask-only" in myopts:
+ mydepgraph.display_problems()
+ return 0
+
+ if not success:
+ return 1
+
+ mergecount = None
+ if "--pretend" not in myopts and \
+ ("--ask" in myopts or "--tree" in myopts or \
+ "--verbose" in myopts) and \
+ not ("--quiet" in myopts and "--ask" not in myopts):
+ if "--resume" in myopts:
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ prompt="Would you like to resume merging these packages?"
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ mergecount=0
+ for x in mydepgraph.altlist():
+ if isinstance(x, Package) and x.operation == "merge":
+ mergecount += 1
+
+ prompt = None
+ if mergecount==0:
+ sets = trees[settings['EROOT']]['root_config'].sets
+ world_candidates = None
+ if "selective" in myparams and \
+ not oneshot and favorites:
+ # Sets that are not world candidates are filtered
+ # out here since the favorites list needs to be
+ # complete for depgraph.loadResumeCommand() to
+ # operate correctly.
+ world_candidates = [x for x in favorites \
+ if not (x.startswith(SETPREFIX) and \
+ not sets[x[1:]].world_candidate)]
+
+ if "selective" in myparams and \
+ not oneshot and world_candidates:
+ # Prompt later, inside saveNomergeFavorites.
+ prompt = None
+ else:
+ print()
+ print("Nothing to merge; quitting.")
+ print()
+ return os.EX_OK
+ elif "--fetchonly" in myopts or "--fetch-all-uri" in myopts:
+ prompt="Would you like to fetch the source files for these packages?"
+ else:
+ prompt="Would you like to merge these packages?"
+ print()
+ uq = UserQuery(myopts)
+ if prompt is not None and "--ask" in myopts and \
+ uq.query(prompt, enter_invalid) == "No":
+ print()
+ print("Quitting.")
+ print()
+ return 128 + signal.SIGINT
+ # Don't ask again (e.g. when auto-cleaning packages after merge)
+ if mergecount != 0:
+ myopts.pop("--ask", None)
+
+ if ("--pretend" in myopts) and not ("--fetchonly" in myopts or "--fetch-all-uri" in myopts):
+ if ("--resume" in myopts):
+ mymergelist = mydepgraph.altlist()
+ if len(mymergelist) == 0:
+ print(colorize("INFORM", "emerge: It seems we have nothing to resume..."))
+ return os.EX_OK
+ favorites = mtimedb["resume"]["favorites"]
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+ else:
+ retval = mydepgraph.display(
+ mydepgraph.altlist(),
+ favorites=favorites)
+ mydepgraph.display_problems()
+ mergelist_shown = True
+ if retval != os.EX_OK:
+ return retval
+
+ else:
+
+ if not mergelist_shown:
+ # If we haven't already shown the merge list above, at
+ # least show warnings about missed updates and such.
+ mydepgraph.display_problems()
+
+
+ need_write_vardb = not Scheduler. \
+ _opts_no_self_update.intersection(myopts)
+
+ need_write_bindb = not any(x in myopts for x in
+ ("--fetchonly", "--fetch-all-uri",
+ "--pretend", "--usepkgonly")) and \
+ (any("buildpkg" in trees[eroot]["root_config"].
+ settings.features for eroot in trees) or
+ any("buildsyspkg" in trees[eroot]["root_config"].
+ settings.features for eroot in trees))
+
+ if need_write_bindb or need_write_vardb:
+
+ eroots = set()
+ ebuild_eroots = set()
+ for x in mydepgraph.altlist():
+ if isinstance(x, Package) and x.operation == "merge":
+ eroots.add(x.root)
+ if x.type_name == "ebuild":
+ ebuild_eroots.add(x.root)
+
+ for eroot in eroots:
+ if need_write_vardb and \
+ not trees[eroot]["vartree"].dbapi.writable:
+ writemsg_level("!!! %s\n" %
+ _("Read-only file system: %s") %
+ trees[eroot]["vartree"].dbapi._dbroot,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if need_write_bindb and eroot in ebuild_eroots and \
+ ("buildpkg" in trees[eroot]["root_config"].
+ settings.features or
+ "buildsyspkg" in trees[eroot]["root_config"].
+ settings.features) and \
+ not trees[eroot]["bintree"].dbapi.writable:
+ writemsg_level("!!! %s\n" %
+ _("Read-only file system: %s") %
+ trees[eroot]["bintree"].pkgdir,
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--resume" in myopts):
+ favorites=mtimedb["resume"]["favorites"]
+
+ else:
+ if "resume" in mtimedb and \
+ "mergelist" in mtimedb["resume"] and \
+ len(mtimedb["resume"]["mergelist"]) > 1:
+ mtimedb["resume_backup"] = mtimedb["resume"]
+ del mtimedb["resume"]
+ mtimedb.commit()
+
+ mydepgraph.saveNomergeFavorites()
+
+ if mergecount == 0:
+ retval = os.EX_OK
+ else:
+ mergetask = Scheduler(settings, trees, mtimedb, myopts,
+ spinner, favorites=favorites,
+ graph_config=mydepgraph.schedulerGraph())
+
+ del mydepgraph
+ clear_caches(trees)
+
+ retval = mergetask.merge()
+
+ if retval == os.EX_OK and \
+ not (buildpkgonly or fetchonly or pretend):
+ if "yes" == settings.get("AUTOCLEAN"):
+ portage.writemsg_stdout(">>> Auto-cleaning packages...\n")
+ unmerge(trees[settings['EROOT']]['root_config'],
+ myopts, "clean", [],
+ ldpath_mtimes, autoclean=1)
+ else:
+ portage.writemsg_stdout(colorize("WARN", "WARNING:")
+ + " AUTOCLEAN is disabled. This can cause serious"
+ + " problems due to overlapping packages.\n")
+
+ return retval
+
+def action_config(settings, trees, myopts, myfiles):
+ enter_invalid = '--ask-enter-invalid' in myopts
+ uq = UserQuery(myopts)
+ if len(myfiles) != 1:
+ print(red("!!! config can only take a single package atom at this time\n"))
+ sys.exit(1)
+ if not is_valid_package_atom(myfiles[0], allow_repo=True):
+ portage.writemsg("!!! '%s' is not a valid package atom.\n" % myfiles[0],
+ noiselevel=-1)
+ portage.writemsg("!!! Please check ebuild(5) for full details.\n")
+ portage.writemsg("!!! (Did you specify a version but forget to prefix with '='?)\n")
+ sys.exit(1)
+ print()
+ try:
+ pkgs = trees[settings['EROOT']]['vartree'].dbapi.match(myfiles[0])
+ except portage.exception.AmbiguousPackageName as e:
+ # Multiple matches thrown from cpv_expand
+ pkgs = e.args[0]
+ if len(pkgs) == 0:
+ print("No packages found.\n")
+ sys.exit(0)
+ elif len(pkgs) > 1:
+ if "--ask" in myopts:
+ options = []
+ print("Please select a package to configure:")
+ idx = 0
+ for pkg in pkgs:
+ idx += 1
+ options.append(str(idx))
+ print(options[-1]+") "+pkg)
+ print("X) Cancel")
+ options.append("X")
+ idx = uq.query("Selection?", enter_invalid, responses=options)
+ if idx == "X":
+ sys.exit(128 + signal.SIGINT)
+ pkg = pkgs[int(idx)-1]
+ else:
+ print("The following packages available:")
+ for pkg in pkgs:
+ print("* "+pkg)
+ print("\nPlease use a specific atom or the --ask option.")
+ sys.exit(1)
+ else:
+ pkg = pkgs[0]
+
+ print()
+ if "--ask" in myopts:
+ if uq.query("Ready to configure %s?" % pkg, enter_invalid) == "No":
+ sys.exit(128 + signal.SIGINT)
+ else:
+ print("Configuring pkg...")
+ print()
+ ebuildpath = trees[settings['EROOT']]['vartree'].dbapi.findname(pkg)
+ mysettings = portage.config(clone=settings)
+ vardb = trees[mysettings['EROOT']]['vartree'].dbapi
+ debug = mysettings.get("PORTAGE_DEBUG") == "1"
+ retval = portage.doebuild(ebuildpath, "config", settings=mysettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1), cleanup=True,
+ mydbapi = trees[settings['EROOT']]['vartree'].dbapi, tree="vartree")
+ if retval == os.EX_OK:
+ portage.doebuild(ebuildpath, "clean", settings=mysettings,
+ debug=debug, mydbapi=vardb, tree="vartree")
+ print()
+ return retval
+
+def action_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, myfiles, spinner, scheduler=None):
+ # Kill packages that aren't explicitly merged or are required as a
+ # dependency of another package. World file is explicit.
+
+ # Global depclean or prune operations are not very safe when there are
+ # missing dependencies since it's unknown how badly incomplete
+ # the dependency graph is, and we might accidentally remove packages
+ # that should have been pulled into the graph. On the other hand, it's
+ # relatively safe to ignore missing deps when only asked to remove
+ # specific packages.
+
+ msg = []
+ if "preserve-libs" not in settings.features and \
+ not myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n":
+ msg.append("Depclean may break link level dependencies. Thus, it is\n")
+ msg.append("recommended to use a tool such as " + good("`revdep-rebuild`") + " (from\n")
+ msg.append("app-portage/gentoolkit) in order to detect such breakage.\n")
+ msg.append("\n")
+ msg.append("Always study the list of packages to be cleaned for any obvious\n")
+ msg.append("mistakes. Packages that are part of the world set will always\n")
+ msg.append("be kept. They can be manually added to this set with\n")
+ msg.append(good("`emerge --noreplace <atom>`") + ". Packages that are listed in\n")
+ msg.append("package.provided (see portage(5)) will be removed by\n")
+ msg.append("depclean, even if they are part of the world set.\n")
+ msg.append("\n")
+ msg.append("As a safety measure, depclean will not remove any packages\n")
+ msg.append("unless *all* required dependencies have been resolved. As a\n")
+ msg.append("consequence of this, it often becomes necessary to run \n")
+ msg.append("%s" % good("`emerge --update --newuse --deep @world`")
+ + " prior to depclean.\n")
+
+ if action == "depclean" and "--quiet" not in myopts and not myfiles:
+ portage.writemsg_stdout("\n")
+ for x in msg:
+ portage.writemsg_stdout(colorize("WARN", " * ") + x)
+
+ root_config = trees[settings['EROOT']]['root_config']
+ vardb = root_config.trees['vartree'].dbapi
+
+ args_set = InternalPackageSet(allow_repo=True)
+ if myfiles:
+ args_set.update(myfiles)
+ matched_packages = False
+ for x in args_set:
+ if vardb.match(x):
+ matched_packages = True
+ else:
+ writemsg_level("--- Couldn't find '%s' to %s.\n" % \
+ (x.replace("null/", ""), action),
+ level=logging.WARN, noiselevel=-1)
+ if not matched_packages:
+ writemsg_level(">>> No packages selected for removal by %s\n" % \
+ action)
+ return 0
+
+ # The calculation is done in a separate function so that depgraph
+ # references go out of scope and the corresponding memory
+ # is freed before we call unmerge().
+ rval, cleanlist, ordered, req_pkg_count, unresolvable = \
+ calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner)
+
+ clear_caches(trees)
+
+ if rval != os.EX_OK:
+ return rval
+
+ if cleanlist:
+ rval = unmerge(root_config, myopts, "unmerge",
+ cleanlist, ldpath_mtimes, ordered=ordered,
+ scheduler=scheduler)
+
+ if action == "prune":
+ return rval
+
+ if not cleanlist and "--quiet" in myopts:
+ return rval
+
+ set_atoms = {}
+ for k in ("profile", "system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
+ print("Packages installed: " + str(len(vardb.cpv_all())))
+ print("Packages in world: %d" % len(set_atoms["selected"]))
+ print("Packages in system: %d" % len(set_atoms["system"]))
+ if set_atoms["profile"]:
+ print("Packages in profile: %d" % len(set_atoms["profile"]))
+ print("Required packages: "+str(req_pkg_count))
+ if "--pretend" in myopts:
+ print("Number to remove: "+str(len(cleanlist)))
+ else:
+ print("Number removed: "+str(len(cleanlist)))
+
+ return rval
+
+
+def calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner):
+ result = _calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner)
+ return result.returncode, result.cleanlist, result.ordered, result.req_pkg_count, result.unresolvable
+
+
+_depclean_result = collections.namedtuple('_depclean_result',
+ ('returncode', 'cleanlist', 'ordered', 'req_pkg_count', 'depgraph', 'unresolvable'))
+
+
+def _calc_depclean(settings, trees, ldpath_mtimes,
+ myopts, action, args_set, spinner):
+ allow_missing_deps = bool(args_set)
+
+ debug = '--debug' in myopts
+ xterm_titles = "notitles" not in settings.features
+ root_len = len(settings["ROOT"])
+ eroot = settings['EROOT']
+ root_config = trees[eroot]["root_config"]
+ psets = root_config.setconfig.psets
+ deselect = myopts.get('--deselect') != 'n'
+ required_sets = {}
+ required_sets['world'] = psets['world']
+
+ # When removing packages, a temporary version of the world 'selected'
+ # set may be used which excludes packages that are intended to be
+ # eligible for removal.
+ selected_set = psets['selected']
+ required_sets['selected'] = selected_set
+ protected_set = InternalPackageSet()
+ protected_set_name = '____depclean_protected_set____'
+ required_sets[protected_set_name] = protected_set
+
+ set_error = False
+ set_atoms = {}
+ for k in ("profile", "system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound as e:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+ writemsg_level(_("!!! The set '%s' "
+ "contains a non-existent set named '%s'.\n") %
+ (k, e), level=logging.ERROR, noiselevel=-1)
+ set_error = True
+
+ # Support @profile as an alternative to @system.
+ if not (set_atoms["system"] or set_atoms["profile"]):
+ writemsg_level(_("!!! You have no system list.\n"),
+ level=logging.WARNING, noiselevel=-1)
+
+ if not set_atoms["selected"]:
+ writemsg_level(_("!!! You have no world file.\n"),
+ level=logging.WARNING, noiselevel=-1)
+
+ # Suppress world file warnings unless @world is completely empty,
+ # since having an empty world file can be a valid state.
+ try:
+ world_atoms = bool(root_config.setconfig.getSetAtoms('world'))
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(_("!!! The set '%s' "
+ "contains a non-existent set named '%s'.\n") %
+ ("world", e), level=logging.ERROR, noiselevel=-1)
+ set_error = True
+ else:
+ if not world_atoms:
+ writemsg_level(_("!!! Your @world set is empty.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ set_error = True
+
+ if set_error:
+ writemsg_level(_("!!! Aborting due to set configuration "
+ "errors displayed above.\n"),
+ level=logging.ERROR, noiselevel=-1)
+ return _depclean_result(1, [], False, 0, None)
+
+ if action == "depclean":
+ emergelog(xterm_titles, " >>> depclean")
+
+ writemsg_level("\nCalculating dependencies ")
+ resolver_params = create_depgraph_params(myopts, "remove")
+ resolver = depgraph(settings, trees, myopts, resolver_params, spinner)
+ resolver._load_vdb()
+ vardb = resolver._frozen_config.trees[eroot]["vartree"].dbapi
+ real_vardb = trees[eroot]["vartree"].dbapi
+
+ if action == "depclean":
+
+ if args_set:
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed but not matched
+ # by an argument atom since we don't want to clean any
+ # package if something depends on it.
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg, _unicode(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ elif action == "prune":
+
+ if deselect:
+ # Start with an empty set.
+ selected_set = InternalPackageSet()
+ required_sets['selected'] = selected_set
+ # Pull in any sets nested within the selected set.
+ selected_set.update(psets['selected'].getNonAtoms())
+
+ # Pull in everything that's installed since we don't
+ # to prune a package if something depends on it.
+ protected_set.update(vardb.cp_all())
+
+ if not args_set:
+
+ # Try to prune everything that's slotted.
+ for cp in vardb.cp_all():
+ if len(vardb.cp_list(cp)) > 1:
+ args_set.add(cp)
+
+ # Remove atoms from world that match installed packages
+ # that are also matched by argument atoms, but do not remove
+ # them if they match the highest installed version.
+ for pkg in vardb:
+ if spinner is not None:
+ spinner.update()
+ pkgs_for_cp = vardb.match_pkgs(Atom(pkg.cp))
+ if not pkgs_for_cp or pkg not in pkgs_for_cp:
+ raise AssertionError("package expected in matches: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ highest_version = pkgs_for_cp[-1]
+ if pkg == highest_version:
+ # pkg is the highest version
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if len(pkgs_for_cp) <= 1:
+ raise AssertionError("more packages expected: " + \
+ "cp = %s, cpv = %s matches = %s" % \
+ (pkg.cp, pkg.cpv, [str(x) for x in pkgs_for_cp]))
+
+ try:
+ if args_set.findAtomForPackage(pkg) is None:
+ protected_set.add("=" + pkg.cpv)
+ continue
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg, _unicode(e))
+ del e
+ protected_set.add("=" + pkg.cpv)
+ continue
+
+ if resolver._frozen_config.excluded_pkgs:
+ excluded_set = resolver._frozen_config.excluded_pkgs
+ required_sets['__excluded__'] = InternalPackageSet()
+
+ for pkg in vardb:
+ if spinner:
+ spinner.update()
+
+ try:
+ if excluded_set.findAtomForPackage(pkg):
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+ except portage.exception.InvalidDependString as e:
+ show_invalid_depstring_notice(pkg, _unicode(e))
+ del e
+ required_sets['__excluded__'].add("=" + pkg.cpv)
+
+ success = resolver._complete_graph(required_sets={eroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+
+ resolver.display_problems()
+
+ if not success:
+ return _depclean_result(1, [], False, 0, resolver, [])
+
+ def unresolved_deps():
+
+ soname_deps = set()
+ unresolvable = set()
+ for dep in resolver._dynamic_config._initially_unsatisfied_deps:
+ if isinstance(dep.parent, Package) and \
+ (dep.priority > UnmergeDepPriority.SOFT):
+ if dep.atom.soname:
+ soname_deps.add((dep.atom, dep.parent.cpv))
+ else:
+ unresolvable.add((dep.atom, dep.parent.cpv))
+
+ if soname_deps:
+ # Generally, broken soname dependencies can safely be
+ # suppressed by a REQUIRES_EXCLUDE setting in the ebuild,
+ # so they should only trigger a warning message.
+ prefix = warn(" * ")
+ msg = []
+ msg.append("Broken soname dependencies found:")
+ msg.append("")
+ for atom, parent in soname_deps:
+ msg.append(" %s required by:" % (atom,))
+ msg.append(" %s" % (parent,))
+ msg.append("")
+
+ writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ if not unresolvable:
+ return None
+
+ if unresolvable and not allow_missing_deps:
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ resolver._dynamic_config.digraph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ prefix = bad(" * ")
+ msg = []
+ msg.append("Dependencies could not be completely resolved due to")
+ msg.append("the following required packages not being installed:")
+ msg.append("")
+ for atom, parent in unresolvable:
+ # For readability, we want to display the atom with USE
+ # conditionals evaluated whenever possible. However,
+ # there is a very special case where the atom does not
+ # match because the unevaluated form contains one or
+ # more flags for which the target package has missing
+ # IUSE, but due to conditionals those flags are only
+ # visible in the unevaluated form of the atom. In this
+ # case, we must display the unevaluated atom, so that
+ # the user can see the conditional USE deps that would
+ # otherwise be invisible. Use Atom(_unicode(atom)) to
+ # test for a package where this case would matter. This
+ # is not necessarily the same as atom.without_use,
+ # since Atom(_unicode(atom)) may still contain some
+ # USE dependencies that remain after evaluation of
+ # conditionals.
+ if atom.package and atom != atom.unevaluated_atom and \
+ vardb.match(Atom(_unicode(atom))):
+ msg.append(" %s (%s) pulled in by:" %
+ (atom.unevaluated_atom, atom))
+ else:
+ msg.append(" %s pulled in by:" % (atom,))
+ msg.append(" %s" % (parent,))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Have you forgotten to do a complete update prior " + \
+ "to depclean? The most comprehensive command for this " + \
+ "purpose is as follows:", 65
+ ))
+ msg.append("")
+ msg.append(" " + \
+ good("emerge --update --newuse --deep --with-bdeps=y @world"))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Note that the --with-bdeps=y option is not required in " + \
+ "many situations. Refer to the emerge manual page " + \
+ "(run `man emerge`) for more information about " + \
+ "--with-bdeps.", 65
+ ))
+ msg.append("")
+ msg.extend(textwrap.wrap(
+ "Also, note that it may be necessary to manually uninstall " + \
+ "packages that no longer exist in the repository, since " + \
+ "it may not be possible to satisfy their dependencies.", 65
+ ))
+ if action == "prune":
+ msg.append("")
+ msg.append("If you would like to ignore " + \
+ "dependencies then use %s." % good("--nodeps"))
+ writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return unresolvable
+ return None
+
+ unresolvable = unresolved_deps()
+ if not unresolvable is None:
+ return _depclean_result(1, [], False, 0, resolver, unresolvable)
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+
+ def show_parents(child_node):
+ parent_atoms = \
+ resolver._dynamic_config._parent_atoms.get(child_node, [])
+
+ # Never display the special internal protected_set.
+ parent_atoms = [parent_atom for parent_atom in parent_atoms
+ if not (isinstance(parent_atom[0], SetArg) and
+ parent_atom[0].name == protected_set_name)]
+
+ if not parent_atoms:
+ # With --prune, the highest version can be pulled in without any
+ # real parent since all installed packages are pulled in. In that
+ # case there's nothing to show here.
+ return
+ parent_atom_dict = {}
+ for parent, atom in parent_atoms:
+ parent_atom_dict.setdefault(parent, []).append(atom)
+
+ parent_strs = []
+ for parent, atoms in parent_atom_dict.items():
+ # Display package atoms and soname
+ # atoms in separate groups.
+ atoms = sorted(atoms, reverse=True,
+ key=operator.attrgetter('package'))
+ parent_strs.append("%s requires %s" %
+ (getattr(parent, "cpv", parent),
+ ", ".join(_unicode(atom) for atom in atoms)))
+ parent_strs.sort()
+ msg = []
+ msg.append(" %s pulled in by:\n" % (child_node.cpv,))
+ for parent_str in parent_strs:
+ msg.append(" %s\n" % (parent_str,))
+ msg.append("\n")
+ portage.writemsg_stdout("".join(msg), noiselevel=-1)
+
+ def cmp_pkg_cpv(pkg1, pkg2):
+ """Sort Package instances by cpv."""
+ if pkg1.cpv > pkg2.cpv:
+ return 1
+ elif pkg1.cpv == pkg2.cpv:
+ return 0
+ else:
+ return -1
+
+ def create_cleanlist():
+
+ if "--debug" in myopts:
+ writemsg("\ndigraph:\n\n", noiselevel=-1)
+ graph.debug_print()
+ writemsg("\n", noiselevel=-1)
+
+ pkgs_to_remove = []
+
+ if action == "depclean":
+ if args_set:
+
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ arg_atom = None
+ try:
+ arg_atom = args_set.findAtomForPackage(pkg)
+ except portage.exception.InvalidDependString:
+ # this error has already been displayed by now
+ continue
+
+ if arg_atom:
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ else:
+ for pkg in sorted(vardb, key=cmp_sort_key(cmp_pkg_cpv)):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ elif action == "prune":
+
+ for atom in args_set:
+ for pkg in vardb.match_pkgs(atom):
+ if pkg not in graph:
+ pkgs_to_remove.append(pkg)
+ elif "--verbose" in myopts:
+ show_parents(pkg)
+
+ if not pkgs_to_remove:
+ writemsg_level(
+ ">>> No packages selected for removal by %s\n" % action)
+ if "--verbose" not in myopts:
+ writemsg_level(
+ ">>> To see reverse dependencies, use %s\n" % \
+ good("--verbose"))
+ if action == "prune":
+ writemsg_level(
+ ">>> To ignore dependencies, use %s\n" % \
+ good("--nodeps"))
+
+ return pkgs_to_remove
+
+ cleanlist = create_cleanlist()
+ clean_set = set(cleanlist)
+
+ depclean_lib_check = cleanlist and real_vardb._linkmap is not None and \
+ myopts.get("--depclean-lib-check", _DEPCLEAN_LIB_CHECK_DEFAULT) != "n"
+ preserve_libs = "preserve-libs" in settings.features
+ preserve_libs_restrict = False
+
+ if depclean_lib_check and preserve_libs:
+ for pkg in cleanlist:
+ if "preserve-libs" in pkg.restrict:
+ preserve_libs_restrict = True
+ break
+
+ if depclean_lib_check and \
+ (preserve_libs_restrict or not preserve_libs):
+
+ # Check if any of these packages are the sole providers of libraries
+ # with consumers that have not been selected for removal. If so, these
+ # packages and any dependencies need to be added to the graph.
+ linkmap = real_vardb._linkmap
+ consumer_cache = {}
+ provider_cache = {}
+ consumer_map = {}
+
+ writemsg_level(">>> Checking for lib consumers...\n")
+
+ for pkg in cleanlist:
+
+ if preserve_libs and "preserve-libs" not in pkg.restrict:
+ # Any needed libraries will be preserved
+ # when this package is unmerged, so there's
+ # no need to account for it here.
+ continue
+
+ pkg_dblink = real_vardb._dblink(pkg.cpv)
+ consumers = {}
+
+ for lib in pkg_dblink.getcontents():
+ lib = lib[root_len:]
+ lib_key = linkmap._obj_key(lib)
+ lib_consumers = consumer_cache.get(lib_key)
+ if lib_consumers is None:
+ try:
+ lib_consumers = linkmap.findConsumers(lib_key)
+ except KeyError:
+ continue
+ consumer_cache[lib_key] = lib_consumers
+ if lib_consumers:
+ consumers[lib_key] = lib_consumers
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in list(consumers.items()):
+ for consumer_file in list(lib_consumers):
+ if pkg_dblink.isowner(consumer_file):
+ lib_consumers.remove(consumer_file)
+ if not lib_consumers:
+ del consumers[lib]
+
+ if not consumers:
+ continue
+
+ for lib, lib_consumers in consumers.items():
+
+ soname = linkmap.getSoname(lib)
+
+ consumer_providers = []
+ for lib_consumer in lib_consumers:
+ providers = provider_cache.get(lib)
+ if providers is None:
+ providers = linkmap.findProviders(lib_consumer)
+ provider_cache[lib_consumer] = providers
+ if soname not in providers:
+ # Why does this happen?
+ continue
+ consumer_providers.append(
+ (lib_consumer, providers[soname]))
+
+ consumers[lib] = consumer_providers
+
+ consumer_map[pkg] = consumers
+
+ if consumer_map:
+
+ search_files = set()
+ for consumers in consumer_map.values():
+ for lib, consumer_providers in consumers.items():
+ for lib_consumer, providers in consumer_providers:
+ search_files.add(lib_consumer)
+ search_files.update(providers)
+
+ writemsg_level(">>> Assigning files to packages...\n")
+ file_owners = {}
+ for f in search_files:
+ owner_set = set()
+ for owner in linkmap.getOwners(f):
+ owner_dblink = real_vardb._dblink(owner)
+ if owner_dblink.exists():
+ owner_set.add(owner_dblink)
+ if owner_set:
+ file_owners[f] = owner_set
+
+ for pkg, consumers in list(consumer_map.items()):
+ for lib, consumer_providers in list(consumers.items()):
+ lib_consumers = set()
+
+ for lib_consumer, providers in consumer_providers:
+ owner_set = file_owners.get(lib_consumer)
+ provider_dblinks = set()
+ provider_pkgs = set()
+
+ if len(providers) > 1:
+ for provider in providers:
+ provider_set = file_owners.get(provider)
+ if provider_set is not None:
+ provider_dblinks.update(provider_set)
+
+ if len(provider_dblinks) > 1:
+ for provider_dblink in provider_dblinks:
+ provider_pkg = resolver._pkg(
+ provider_dblink.mycpv, "installed",
+ root_config, installed=True)
+ if provider_pkg not in clean_set:
+ provider_pkgs.add(provider_pkg)
+
+ if provider_pkgs:
+ continue
+
+ if owner_set is not None:
+ lib_consumers.update(owner_set)
+
+ for consumer_dblink in list(lib_consumers):
+ if resolver._pkg(consumer_dblink.mycpv, "installed",
+ root_config, installed=True) in clean_set:
+ lib_consumers.remove(consumer_dblink)
+ continue
+
+ if lib_consumers:
+ consumers[lib] = lib_consumers
+ else:
+ del consumers[lib]
+ if not consumers:
+ del consumer_map[pkg]
+
+ if consumer_map:
+ # TODO: Implement a package set for rebuilding consumer packages.
+
+ msg = "In order to avoid breakage of link level " + \
+ "dependencies, one or more packages will not be removed. " + \
+ "This can be solved by rebuilding " + \
+ "the packages that pulled them in."
+
+ prefix = bad(" * ")
+ writemsg_level("".join(prefix + "%s\n" % line for \
+ line in textwrap.wrap(msg, 70)), level=logging.WARNING, noiselevel=-1)
+
+ msg = []
+ for pkg in sorted(consumer_map, key=cmp_sort_key(cmp_pkg_cpv)):
+ consumers = consumer_map[pkg]
+ consumer_libs = {}
+ for lib, lib_consumers in consumers.items():
+ for consumer in lib_consumers:
+ consumer_libs.setdefault(
+ consumer.mycpv, set()).add(linkmap.getSoname(lib))
+ unique_consumers = set(chain(*consumers.values()))
+ unique_consumers = sorted(consumer.mycpv \
+ for consumer in unique_consumers)
+ msg.append("")
+ msg.append(" %s pulled in by:" % (pkg.cpv,))
+ for consumer in unique_consumers:
+ libs = consumer_libs[consumer]
+ msg.append(" %s needs %s" % \
+ (consumer, ', '.join(sorted(libs))))
+ msg.append("")
+ writemsg_level("".join(prefix + "%s\n" % line for line in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ # Add lib providers to the graph as children of lib consumers,
+ # and also add any dependencies pulled in by the provider.
+ writemsg_level(">>> Adding lib providers to graph...\n")
+
+ for pkg, consumers in consumer_map.items():
+ for consumer_dblink in set(chain(*consumers.values())):
+ consumer_pkg = resolver._pkg(consumer_dblink.mycpv,
+ "installed", root_config, installed=True)
+ if not resolver._add_pkg(pkg,
+ Dependency(parent=consumer_pkg,
+ priority=UnmergeDepPriority(runtime=True,
+ runtime_slot_op=True),
+ root=pkg.root)):
+ resolver.display_problems()
+ return _depclean_result(1, [], False, 0, resolver, [])
+
+ writemsg_level("\nCalculating dependencies ")
+ success = resolver._complete_graph(
+ required_sets={eroot:required_sets})
+ writemsg_level("\b\b... done!\n")
+ resolver.display_problems()
+ if not success:
+ return _depclean_result(1, [], False, 0, resolver, [])
+ unresolvable = unresolved_deps()
+ if not unresolvable is None:
+ return _depclean_result(1, [], False, 0, resolver, unresolvable)
+
+ graph = resolver._dynamic_config.digraph.copy()
+ required_pkgs_total = 0
+ for node in graph:
+ if isinstance(node, Package):
+ required_pkgs_total += 1
+ cleanlist = create_cleanlist()
+ if not cleanlist:
+ return _depclean_result(0, [], False, required_pkgs_total, resolver, [])
+ clean_set = set(cleanlist)
+
+ if clean_set:
+ writemsg_level(">>> Calculating removal order...\n")
+ # Use a topological sort to create an unmerge order such that
+ # each package is unmerged before it's dependencies. This is
+ # necessary to avoid breaking things that may need to run
+ # during pkg_prerm or pkg_postrm phases.
+
+ # Create a new graph to account for dependencies between the
+ # packages being unmerged.
+ graph = digraph()
+ del cleanlist[:]
+
+ runtime = UnmergeDepPriority(runtime=True)
+ runtime_post = UnmergeDepPriority(runtime_post=True)
+ buildtime = UnmergeDepPriority(buildtime=True)
+ priority_map = {
+ "RDEPEND": runtime,
+ "PDEPEND": runtime_post,
+ "BDEPEND": buildtime,
+ "DEPEND": buildtime,
+ }
+
+ for node in clean_set:
+ graph.add(node, None)
+ for dep_type in Package._dep_keys:
+ depstr = node._metadata[dep_type]
+ if not depstr:
+ continue
+ priority = priority_map[dep_type]
+
+ if debug:
+ writemsg_level("\nParent: %s\n"
+ % (node,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level( "Depstring: %s\n"
+ % (depstr,), noiselevel=-1, level=logging.DEBUG)
+ writemsg_level( "Priority: %s\n"
+ % (priority,), noiselevel=-1, level=logging.DEBUG)
+
+ try:
+ atoms = resolver._select_atoms(eroot, depstr,
+ myuse=node.use.enabled, parent=node,
+ priority=priority)[node]
+ except portage.exception.InvalidDependString:
+ # Ignore invalid deps of packages that will
+ # be uninstalled anyway.
+ continue
+
+ if debug:
+ writemsg_level("Candidates: [%s]\n" % \
+ ', '.join("'%s'" % (x,) for x in atoms),
+ noiselevel=-1, level=logging.DEBUG)
+
+ for atom in atoms:
+ if not isinstance(atom, portage.dep.Atom):
+ # Ignore invalid atoms returned from dep_check().
+ continue
+ if atom.blocker:
+ continue
+ matches = vardb.match_pkgs(atom)
+ if not matches:
+ continue
+ for child_node in matches:
+ if child_node in clean_set:
+
+ mypriority = priority.copy()
+ if atom.slot_operator_built:
+ if mypriority.buildtime:
+ mypriority.buildtime_slot_op = True
+ if mypriority.runtime:
+ mypriority.runtime_slot_op = True
+
+ graph.add(child_node, node, priority=mypriority)
+
+ if debug:
+ writemsg_level("\nunmerge digraph:\n\n",
+ noiselevel=-1, level=logging.DEBUG)
+ graph.debug_print()
+ writemsg_level("\n", noiselevel=-1, level=logging.DEBUG)
+
+ ordered = True
+ if len(graph.order) == len(graph.root_nodes()):
+ # If there are no dependencies between packages
+ # let unmerge() group them by cat/pn.
+ ordered = False
+ cleanlist = [pkg.cpv for pkg in graph.order]
+ else:
+ # Order nodes from lowest to highest overall reference count for
+ # optimal root node selection (this can help minimize issues
+ # with unaccounted implicit dependencies).
+ node_refcounts = {}
+ for node in graph.order:
+ node_refcounts[node] = len(graph.parent_nodes(node))
+ def cmp_reference_count(node1, node2):
+ return node_refcounts[node1] - node_refcounts[node2]
+ graph.order.sort(key=cmp_sort_key(cmp_reference_count))
+
+ ignore_priority_range = [None]
+ ignore_priority_range.extend(
+ range(UnmergeDepPriority.MIN, UnmergeDepPriority.MAX + 1))
+ while graph:
+ for ignore_priority in ignore_priority_range:
+ nodes = graph.root_nodes(ignore_priority=ignore_priority)
+ if nodes:
+ break
+ if not nodes:
+ raise AssertionError("no root nodes")
+ if ignore_priority is not None:
+ # Some deps have been dropped due to circular dependencies,
+ # so only pop one node in order to minimize the number that
+ # are dropped.
+ del nodes[1:]
+ for node in nodes:
+ graph.remove(node)
+ cleanlist.append(node.cpv)
+
+ return _depclean_result(0, cleanlist, ordered, required_pkgs_total, resolver, [])
+ return _depclean_result(0, [], False, required_pkgs_total, resolver, [])
+
+def action_deselect(settings, trees, opts, atoms):
+ enter_invalid = '--ask-enter-invalid' in opts
+ root_config = trees[settings['EROOT']]['root_config']
+ world_set = root_config.sets['selected']
+ if not hasattr(world_set, 'update'):
+ writemsg_level("World @selected set does not appear to be mutable.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ pretend = '--pretend' in opts
+ locked = False
+ if not pretend and hasattr(world_set, 'lock'):
+ world_set.lock()
+ locked = True
+ try:
+ world_set.load()
+ world_atoms = world_set.getAtoms()
+ vardb = root_config.trees["vartree"].dbapi
+ expanded_atoms = set(atoms)
+
+ for atom in atoms:
+ if not atom.startswith(SETPREFIX):
+ if atom.cp.startswith("null/"):
+ # try to expand category from world set
+ null_cat, pn = portage.catsplit(atom.cp)
+ for world_atom in world_atoms:
+ cat, world_pn = portage.catsplit(world_atom.cp)
+ if pn == world_pn:
+ expanded_atoms.add(
+ Atom(atom.replace("null", cat, 1),
+ allow_repo=True, allow_wildcard=True))
+
+ for cpv in vardb.match(atom):
+ pkg = vardb._pkg_str(cpv, None)
+ expanded_atoms.add(Atom("%s:%s" % (pkg.cp, pkg.slot)))
+
+ discard_atoms = set()
+ for atom in world_set:
+ for arg_atom in expanded_atoms:
+ if arg_atom.startswith(SETPREFIX):
+ if atom.startswith(SETPREFIX) and \
+ arg_atom == atom:
+ discard_atoms.add(atom)
+ break
+ else:
+ if not atom.startswith(SETPREFIX) and \
+ arg_atom.intersects(atom) and \
+ not (arg_atom.slot and not atom.slot) and \
+ not (arg_atom.repo and not atom.repo):
+ discard_atoms.add(atom)
+ break
+ if discard_atoms:
+ for atom in sorted(discard_atoms):
+
+ if pretend:
+ action_desc = "Would remove"
+ else:
+ action_desc = "Removing"
+
+ if atom.startswith(SETPREFIX):
+ filename = "world_sets"
+ else:
+ filename = "world"
+
+ writemsg_stdout(
+ ">>> %s %s from \"%s\" favorites file...\n" %
+ (action_desc, colorize("INFORM", _unicode(atom)),
+ filename), noiselevel=-1)
+
+ if '--ask' in opts:
+ prompt = "Would you like to remove these " + \
+ "packages from your world favorites?"
+ uq = UserQuery(opts)
+ if uq.query(prompt, enter_invalid) == 'No':
+ return 128 + signal.SIGINT
+
+ remaining = set(world_set)
+ remaining.difference_update(discard_atoms)
+ if not pretend:
+ world_set.replace(remaining)
+ else:
+ print(">>> No matching atoms found in \"world\" favorites file...")
+ finally:
+ if locked:
+ world_set.unlock()
+ return os.EX_OK
+
+class _info_pkgs_ver(object):
+ def __init__(self, ver, repo_suffix, provide_suffix):
+ self.ver = ver
+ self.repo_suffix = repo_suffix
+ self.provide_suffix = provide_suffix
+
+ def __lt__(self, other):
+ return portage.versions.vercmp(self.ver, other.ver) < 0
+
+ def toString(self):
+ """
+ This may return unicode if repo_name contains unicode.
+ Don't use __str__ and str() since unicode triggers compatibility
+ issues between python 2.x and 3.x.
+ """
+ return self.ver + self.repo_suffix + self.provide_suffix
+
+def action_info(settings, trees, myopts, myfiles):
+
+ # See if we can find any packages installed matching the strings
+ # passed on the command line
+ mypkgs = []
+ eroot = settings['EROOT']
+ vardb = trees[eroot]["vartree"].dbapi
+ portdb = trees[eroot]['porttree'].dbapi
+ bindb = trees[eroot]["bintree"].dbapi
+ repos = portdb.settings.repositories
+ for x in myfiles:
+ any_match = False
+ cp_exists = bool(vardb.match(x.cp))
+ installed_match = vardb.match(x)
+ for installed in installed_match:
+ mypkgs.append((installed, "installed"))
+ any_match = True
+
+ if any_match:
+ continue
+
+ for db, pkg_type in ((portdb, "ebuild"), (bindb, "binary")):
+ if pkg_type == "binary" and "--usepkg" not in myopts:
+ continue
+
+ # Use match instead of cp_list, to account for old-style virtuals.
+ if not cp_exists and db.match(x.cp):
+ cp_exists = True
+ # Search for masked packages too.
+ if not cp_exists and hasattr(db, "xmatch") and \
+ db.xmatch("match-all", x.cp):
+ cp_exists = True
+
+ matches = db.match(x)
+ matches.reverse()
+ for match in matches:
+ if pkg_type == "binary":
+ if db.bintree.isremote(match):
+ continue
+ auxkeys = ["EAPI", "DEFINED_PHASES"]
+ metadata = dict(zip(auxkeys, db.aux_get(match, auxkeys)))
+ if metadata["EAPI"] not in ("0", "1", "2", "3") and \
+ "info" in metadata["DEFINED_PHASES"].split():
+ mypkgs.append((match, pkg_type))
+ break
+
+ if not cp_exists:
+ xinfo = '"%s"' % x.unevaluated_atom
+ # Discard null/ from failed cpv_expand category expansion.
+ xinfo = xinfo.replace("null/", "")
+ if settings["ROOT"] != "/":
+ xinfo = "%s for %s" % (xinfo, eroot)
+ writemsg("\nemerge: there are no ebuilds to satisfy %s.\n" %
+ colorize("INFORM", xinfo), noiselevel=-1)
+
+ if myopts.get("--misspell-suggestions", "y") != "n":
+
+ writemsg("\nemerge: searching for similar names..."
+ , noiselevel=-1)
+
+ search_index = myopts.get("--search-index", "y") != "n"
+ dbs = [IndexedVardb(vardb) if search_index else vardb]
+ #if "--usepkgonly" not in myopts:
+ dbs.append(IndexedPortdb(portdb) if search_index else portdb)
+ if "--usepkg" in myopts:
+ dbs.append(bindb)
+
+ matches = similar_name_search(dbs, x)
+
+ if len(matches) == 1:
+ writemsg("\nemerge: Maybe you meant " + matches[0] + "?\n"
+ , noiselevel=-1)
+ elif len(matches) > 1:
+ writemsg(
+ "\nemerge: Maybe you meant any of these: %s?\n" % \
+ (", ".join(matches),), noiselevel=-1)
+ else:
+ # Generally, this would only happen if
+ # all dbapis are empty.
+ writemsg(" nothing similar found.\n"
+ , noiselevel=-1)
+
+ return 1
+
+ output_buffer = []
+ append = output_buffer.append
+ root_config = trees[settings['EROOT']]['root_config']
+ chost = settings.get("CHOST")
+
+ append(getportageversion(settings["PORTDIR"], None,
+ settings.profile_path, chost,
+ trees[settings['EROOT']]["vartree"].dbapi))
+
+ header_width = 65
+ header_title = "System Settings"
+ if myfiles:
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("System uname: %s" % (platform.platform(aliased=1),))
+
+ vm_info = get_vm_info()
+ if "ram.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Mem:", vm_info["ram.total"] // 1024)
+ if "ram.free" in vm_info:
+ line += ",%10d free" % (vm_info["ram.free"] // 1024,)
+ append(line)
+ if "swap.total" in vm_info:
+ line = "%-9s %10d total" % ("KiB Swap:", vm_info["swap.total"] // 1024)
+ if "swap.free" in vm_info:
+ line += ",%10d free" % (vm_info["swap.free"] // 1024,)
+ append(line)
+
+ for repo in repos:
+ last_sync = portage.grabfile(os.path.join(repo.location, "metadata", "timestamp.chk"))
+ head_commit = None
+ if last_sync:
+ append("Timestamp of repository %s: %s" % (repo.name, last_sync[0]))
+ if repo.sync_type:
+ sync = portage.sync.module_controller.get_class(repo.sync_type)()
+ options = { 'repo': repo }
+ try:
+ head_commit = sync.retrieve_head(options=options)
+ except NotImplementedError:
+ head_commit = (1, False)
+ if head_commit and head_commit[0] == os.EX_OK:
+ append("Head commit of repository %s: %s" % (repo.name, head_commit[1]))
+
+ # Searching contents for the /bin/sh provider is somewhat
+ # slow. Therefore, use the basename of the symlink target
+ # to locate the package. If this fails, then only the
+ # basename of the symlink target will be displayed. So,
+ # typical output is something like "sh bash 4.2_p53". Since
+ # realpath is used to resolve symlinks recursively, this
+ # approach is also able to handle multiple levels of symlinks
+ # such as /bin/sh -> bb -> busybox. Note that we do not parse
+ # the output of "/bin/sh --version" because many shells
+ # do not have a --version option.
+ basename = os.path.basename(os.path.realpath(os.path.join(
+ os.sep, portage.const.EPREFIX, "bin", "sh")))
+ try:
+ Atom("null/%s" % basename)
+ except InvalidAtom:
+ matches = None
+ else:
+ try:
+ # Try a match against the basename, which should work for
+ # busybox and most shells.
+ matches = (trees[trees._running_eroot]["vartree"].dbapi.
+ match(basename))
+ except portage.exception.AmbiguousPackageName:
+ # If the name is ambiguous, then restrict our match
+ # to the app-shells category.
+ matches = (trees[trees._running_eroot]["vartree"].dbapi.
+ match("app-shells/%s" % basename))
+
+ if matches:
+ pkg = matches[-1]
+ name = pkg.cp
+ version = pkg.version
+ # Omit app-shells category from the output.
+ if name.startswith("app-shells/"):
+ name = name[len("app-shells/"):]
+ sh_str = "%s %s" % (name, version)
+ else:
+ sh_str = basename
+
+ append("sh %s" % sh_str)
+
+ ld_names = []
+ if chost:
+ ld_names.append(chost + "-ld")
+ ld_names.append("ld")
+ for name in ld_names:
+ try:
+ proc = subprocess.Popen([name, "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ pass
+ else:
+ output = _unicode_decode(proc.communicate()[0]).splitlines()
+ proc.wait()
+ if proc.wait() == os.EX_OK and output:
+ append("ld %s" % (output[0]))
+ break
+
+ try:
+ proc = subprocess.Popen(["distcc", "--version"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ output = (1, None)
+ else:
+ output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ output = (proc.wait(), output)
+ if output[0] == os.EX_OK:
+ distcc_str = output[1].split("\n", 1)[0]
+ if "distcc" in settings.features:
+ distcc_str += " [enabled]"
+ else:
+ distcc_str += " [disabled]"
+ append(distcc_str)
+
+ try:
+ proc = subprocess.Popen(["ccache", "-V"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ output = (1, None)
+ else:
+ output = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ output = (proc.wait(), output)
+ if output[0] == os.EX_OK:
+ ccache_str = output[1].split("\n", 1)[0]
+ if "ccache" in settings.features:
+ ccache_str += " [enabled]"
+ else:
+ ccache_str += " [disabled]"
+ append(ccache_str)
+
+ myvars = ["sys-devel/autoconf", "sys-devel/automake", "virtual/os-headers",
+ "sys-devel/binutils", "sys-devel/libtool", "dev-lang/python"]
+ myvars += portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_pkgs")
+ atoms = []
+ for x in myvars:
+ try:
+ x = Atom(x)
+ except InvalidAtom:
+ append("%-20s %s" % (x+":", "[NOT VALID]"))
+ else:
+ for atom in expand_new_virt(vardb, x):
+ if not atom.blocker:
+ atoms.append((x, atom))
+
+ myvars = sorted(set(atoms))
+
+ cp_map = {}
+ cp_max_len = 0
+
+ for orig_atom, x in myvars:
+ pkg_matches = vardb.match(x)
+
+ versions = []
+ for cpv in pkg_matches:
+ matched_cp = portage.versions.cpv_getkey(cpv)
+ ver = portage.versions.cpv_getversion(cpv)
+ ver_map = cp_map.setdefault(matched_cp, {})
+ prev_match = ver_map.get(ver)
+ if prev_match is not None:
+ if prev_match.provide_suffix:
+ # prefer duplicate matches that include
+ # additional virtual provider info
+ continue
+
+ if len(matched_cp) > cp_max_len:
+ cp_max_len = len(matched_cp)
+ repo = vardb.aux_get(cpv, ["repository"])[0]
+ if repo:
+ repo_suffix = _repo_separator + repo
+ else:
+ repo_suffix = _repo_separator + "<unknown repository>"
+
+ if matched_cp == orig_atom.cp:
+ provide_suffix = ""
+ else:
+ provide_suffix = " (%s)" % (orig_atom,)
+
+ ver_map[ver] = _info_pkgs_ver(ver, repo_suffix, provide_suffix)
+
+ for cp in sorted(cp_map):
+ versions = sorted(cp_map[cp].values())
+ versions = ", ".join(ver.toString() for ver in versions)
+ append("%s %s" % \
+ ((cp + ":").ljust(cp_max_len + 1), versions))
+
+ append("Repositories:\n")
+ for repo in repos:
+ append(repo.info_string())
+
+ installed_sets = sorted(s for s in
+ root_config.sets['selected'].getNonAtoms() if s.startswith(SETPREFIX))
+ if installed_sets:
+ sets_line = "Installed sets: "
+ sets_line += ", ".join(installed_sets)
+ append(sets_line)
+
+ if "--verbose" in myopts:
+ myvars = list(settings)
+ else:
+ myvars = ['GENTOO_MIRRORS', 'CONFIG_PROTECT', 'CONFIG_PROTECT_MASK',
+ 'DISTDIR', 'ENV_UNSET', 'PKGDIR', 'PORTAGE_TMPDIR',
+ 'PORTAGE_BINHOST', 'PORTAGE_BUNZIP2_COMMAND',
+ 'PORTAGE_BZIP2_COMMAND',
+ 'USE', 'CHOST', 'CFLAGS', 'CXXFLAGS',
+ 'ACCEPT_KEYWORDS', 'ACCEPT_LICENSE', 'FEATURES',
+ 'EMERGE_DEFAULT_OPTS']
+
+ myvars.extend(portage.util.grabfile(settings["PORTDIR"]+"/profiles/info_vars"))
+
+ myvars_ignore_defaults = {
+ 'PORTAGE_BZIP2_COMMAND' : 'bzip2',
+ }
+
+ skipped_vars = ['PORTAGE_REPOSITORIES']
+ # Deprecated variables
+ skipped_vars.extend(('PORTDIR', 'PORTDIR_OVERLAY', 'SYNC'))
+
+ myvars = set(myvars)
+ myvars.difference_update(skipped_vars)
+ myvars = sorted(myvars)
+
+ use_expand = settings.get('USE_EXPAND', '').split()
+ use_expand.sort()
+ unset_vars = []
+
+ for k in myvars:
+ v = settings.get(k)
+ if v is not None:
+ if k != "USE":
+ default = myvars_ignore_defaults.get(k)
+ if default is not None and \
+ default == v:
+ continue
+
+ v = _hide_url_passwd(v)
+
+ append('%s="%s"' % (k, v))
+ else:
+ use = set(v.split())
+ for varname in use_expand:
+ flag_prefix = varname.lower() + "_"
+ for f in list(use):
+ if f.startswith(flag_prefix):
+ use.remove(f)
+ use = list(use)
+ use.sort()
+ use = ['USE="%s"' % " ".join(use)]
+ for varname in use_expand:
+ myval = settings.get(varname)
+ if myval:
+ use.append('%s="%s"' % (varname, myval))
+ append(" ".join(use))
+ else:
+ unset_vars.append(k)
+ if unset_vars:
+ append("Unset: "+", ".join(unset_vars))
+
+ return False, output_buffer
+
+ # If some packages were found...
+ if mypkgs:
+ # Get our global settings (we only print stuff if it varies from
+ # the current config)
+ mydesiredvars = ['CHOST', 'CFLAGS', 'CXXFLAGS', 'FEATURES', 'LDFLAGS']
+ auxkeys = mydesiredvars + list(vardb._aux_cache_keys)
+ auxkeys.append('DEFINED_PHASES')
+ pkgsettings = portage.config(clone=settings)
+
+ # Loop through each package
+ # Only print settings if they differ from global settings
+ header_title = "Package Settings"
+ append(header_width * "=")
+ append(header_title.rjust(int(header_width/2 + len(header_title)/2)))
+ append(header_width * "=")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ out = portage.output.EOutput()
+ for mypkg in mypkgs:
+ cpv = mypkg[0]
+ pkg_type = mypkg[1]
+ # Get all package specific variables
+ if pkg_type == "installed":
+ metadata = dict(zip(auxkeys, vardb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "ebuild":
+ metadata = dict(zip(auxkeys, portdb.aux_get(cpv, auxkeys)))
+ elif pkg_type == "binary":
+ metadata = dict(zip(auxkeys, bindb.aux_get(cpv, auxkeys)))
+
+ pkg = Package(built=(pkg_type!="ebuild"), cpv=cpv,
+ installed=(pkg_type=="installed"), metadata=zip(Package.metadata_keys,
+ (metadata.get(x, '') for x in Package.metadata_keys)),
+ root_config=root_config, type_name=pkg_type)
+
+ if pkg_type == "installed":
+ append("\n%s was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
+ elif pkg_type == "ebuild":
+ append("\n%s would be built with the following:" % \
+ colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
+ elif pkg_type == "binary":
+ append("\n%s (non-installed binary) was built with the following:" % \
+ colorize("INFORM", str(pkg.cpv + _repo_separator + pkg.repo)))
+
+ append('%s' % pkg_use_display(pkg, myopts))
+ if pkg_type == "installed":
+ for myvar in mydesiredvars:
+ if metadata[myvar].split() != settings.get(myvar, '').split():
+ append("%s=\"%s\"" % (myvar, metadata[myvar]))
+ append("")
+ append("")
+ writemsg_stdout("\n".join(output_buffer),
+ noiselevel=-1)
+ del output_buffer[:]
+
+ if metadata['DEFINED_PHASES']:
+ if 'info' not in metadata['DEFINED_PHASES'].split():
+ continue
+
+ writemsg_stdout(">>> Attempting to run pkg_info() for '%s'\n"
+ % pkg.cpv, noiselevel=-1)
+
+ if pkg_type == "installed":
+ ebuildpath = vardb.findname(pkg.cpv)
+ elif pkg_type == "ebuild":
+ ebuildpath = portdb.findname(pkg.cpv, myrepo=pkg.repo)
+ elif pkg_type == "binary":
+ tbz2_file = bindb.bintree.getname(pkg.cpv)
+ ebuild_file_name = pkg.cpv.split("/")[1] + ".ebuild"
+ ebuild_file_contents = portage.xpak.tbz2(tbz2_file).getfile(ebuild_file_name)
+ tmpdir = tempfile.mkdtemp()
+ ebuildpath = os.path.join(tmpdir, ebuild_file_name)
+ file = open(ebuildpath, 'w')
+ file.write(ebuild_file_contents)
+ file.close()
+
+ if not ebuildpath or not os.path.exists(ebuildpath):
+ out.ewarn("No ebuild found for '%s'" % pkg.cpv)
+ continue
+
+ if pkg_type == "installed":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]["vartree"].dbapi,
+ tree="vartree")
+ elif pkg_type == "ebuild":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]['porttree'].dbapi,
+ tree="porttree")
+ elif pkg_type == "binary":
+ portage.doebuild(ebuildpath, "info", settings=pkgsettings,
+ debug=(settings.get("PORTAGE_DEBUG", "") == 1),
+ mydbapi=trees[settings['EROOT']]["bintree"].dbapi,
+ tree="bintree")
+ shutil.rmtree(tmpdir)
+
+def action_regen(settings, portdb, max_jobs, max_load):
+ xterm_titles = "notitles" not in settings.features
+ emergelog(xterm_titles, " === regen")
+ #regenerate cache entries
+ sys.stdout.flush()
+
+ regen = MetadataRegen(portdb, max_jobs=max_jobs,
+ max_load=max_load, main=True)
+
+ signum = run_main_scheduler(regen)
+ if signum is not None:
+ sys.exit(128 + signum)
+
+ portage.writemsg_stdout("done!\n")
+ return regen.returncode
+
+def action_search(root_config, myopts, myfiles, spinner):
+ if not myfiles:
+ print("emerge: no search terms provided.")
+ else:
+ searchinstance = search(root_config,
+ spinner, "--searchdesc" in myopts,
+ "--quiet" not in myopts, "--usepkg" in myopts,
+ "--usepkgonly" in myopts,
+ search_index=myopts.get("--search-index", "y") != "n",
+ search_similarity=myopts.get("--search-similarity"),
+ fuzzy=myopts.get("--fuzzy-search") != "n",
+ )
+ for mysearch in myfiles:
+ try:
+ searchinstance.execute(mysearch)
+ except re.error as comment:
+ print("\n!!! Regular expression error in \"%s\": %s" % ( mysearch, comment ))
+ sys.exit(1)
+ searchinstance.output()
+
+def action_sync(emerge_config, trees=DeprecationWarning,
+ mtimedb=DeprecationWarning, opts=DeprecationWarning,
+ action=DeprecationWarning):
+
+ if not isinstance(emerge_config, _emerge_config):
+ warnings.warn("_emerge.actions.action_sync() now expects "
+ "an _emerge_config instance as the first parameter",
+ DeprecationWarning, stacklevel=2)
+ emerge_config = load_emerge_config(
+ action=action, args=[], trees=trees, opts=opts)
+
+ syncer = SyncRepos(emerge_config)
+ return_messages = "--quiet" not in emerge_config.opts
+ options = {'return-messages' : return_messages}
+ if emerge_config.args:
+ options['repo'] = emerge_config.args
+ success, msgs = syncer.repo(options=options)
+ else:
+ success, msgs = syncer.auto_sync(options=options)
+ if return_messages:
+ print_results(msgs)
+ elif msgs and not success:
+ writemsg_level("".join("%s\n" % (line,) for line in msgs),
+ level=logging.ERROR, noiselevel=-1)
+
+ return os.EX_OK if success else 1
+
+
+def action_uninstall(settings, trees, ldpath_mtimes,
+ opts, action, files, spinner):
+ # For backward compat, some actions do not require leading '='.
+ ignore_missing_eq = action in ('clean', 'rage-clean', 'unmerge')
+ root = settings['ROOT']
+ eroot = settings['EROOT']
+ vardb = trees[settings['EROOT']]['vartree'].dbapi
+ valid_atoms = []
+ lookup_owners = []
+
+ # Ensure atoms are valid before calling unmerge().
+ # For backward compat, leading '=' is not required.
+ for x in files:
+ if is_valid_package_atom(x, allow_repo=True) or \
+ (ignore_missing_eq and is_valid_package_atom('=' + x)):
+
+ try:
+ atom = dep_expand(x, mydb=vardb, settings=settings)
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ else:
+ if atom.use and atom.use.conditional:
+ writemsg_level(
+ ("\n\n!!! '%s' contains a conditional " + \
+ "which is not allowed.\n") % (x,),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level(
+ "!!! Please check ebuild(5) for full details.\n",
+ level=logging.ERROR)
+ return 1
+ valid_atoms.append(atom)
+
+ elif x.startswith(os.sep):
+ if not x.startswith(eroot):
+ writemsg_level(("!!! '%s' does not start with" + \
+ " $EROOT.\n") % x, level=logging.ERROR, noiselevel=-1)
+ return 1
+ # Queue these up since it's most efficient to handle
+ # multiple files in a single iter_owners() call.
+ lookup_owners.append(x)
+
+ elif x.startswith(SETPREFIX) and action == "deselect":
+ valid_atoms.append(x)
+
+ elif "*" in x:
+ try:
+ ext_atom = Atom(x, allow_repo=True, allow_wildcard=True)
+ except InvalidAtom:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ for cpv in vardb.cpv_all():
+ if portage.match_from_list(ext_atom, [cpv]):
+ require_metadata = False
+ atom = portage.cpv_getkey(cpv)
+ if ext_atom.operator == '=*':
+ atom = "=" + atom + "-" + \
+ portage.versions.cpv_getversion(cpv)
+ if ext_atom.slot:
+ atom += _slot_separator + ext_atom.slot
+ require_metadata = True
+ if ext_atom.repo:
+ atom += _repo_separator + ext_atom.repo
+ require_metadata = True
+
+ atom = Atom(atom, allow_repo=True)
+ if require_metadata:
+ try:
+ cpv = vardb._pkg_str(cpv, ext_atom.repo)
+ except (KeyError, InvalidData):
+ continue
+ if not portage.match_from_list(atom, [cpv]):
+ continue
+
+ valid_atoms.append(atom)
+
+ else:
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if lookup_owners:
+ relative_paths = []
+ search_for_multiple = False
+ if len(lookup_owners) > 1:
+ search_for_multiple = True
+
+ for x in lookup_owners:
+ if not search_for_multiple and os.path.isdir(x):
+ search_for_multiple = True
+ relative_paths.append(x[len(root)-1:])
+
+ owners = set()
+ for pkg, relative_path in \
+ vardb._owners.iter_owners(relative_paths):
+ owners.add(pkg.mycpv)
+ if not search_for_multiple:
+ break
+
+ if owners:
+ for cpv in owners:
+ pkg = vardb._pkg_str(cpv, None)
+ atom = '%s:%s' % (pkg.cp, pkg.slot)
+ valid_atoms.append(portage.dep.Atom(atom))
+ else:
+ writemsg_level(("!!! '%s' is not claimed " + \
+ "by any package.\n") % lookup_owners[0],
+ level=logging.WARNING, noiselevel=-1)
+
+ if files and not valid_atoms:
+ return 1
+
+ if action == 'unmerge' and \
+ '--quiet' not in opts and \
+ '--quiet-unmerge-warn' not in opts:
+ msg = "This action can remove important packages! " + \
+ "In order to be safer, use " + \
+ "`emerge -pv --depclean <atom>` to check for " + \
+ "reverse dependencies before removing packages."
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 72):
+ out.ewarn(line)
+
+ if action == 'deselect':
+ return action_deselect(settings, trees, opts, valid_atoms)
+
+ # Use the same logic as the Scheduler class to trigger redirection
+ # of ebuild pkg_prerm/postrm phase output to logs as appropriate
+ # for options such as --jobs, --quiet and --quiet-build.
+ max_jobs = opts.get("--jobs", 1)
+ background = (max_jobs is True or max_jobs > 1 or
+ "--quiet" in opts or opts.get("--quiet-build") == "y")
+ sched_iface = SchedulerInterface(global_event_loop(),
+ is_background=lambda: background)
+
+ if background:
+ settings.unlock()
+ settings["PORTAGE_BACKGROUND"] = "1"
+ settings.backup_changes("PORTAGE_BACKGROUND")
+ settings.lock()
+
+ if action in ('clean', 'rage-clean', 'unmerge') or \
+ (action == 'prune' and "--nodeps" in opts):
+ # When given a list of atoms, unmerge them in the order given.
+ ordered = action in ('rage-clean', 'unmerge')
+ rval = unmerge(trees[settings['EROOT']]['root_config'], opts, action,
+ valid_atoms, ldpath_mtimes, ordered=ordered,
+ scheduler=sched_iface)
+ else:
+ rval = action_depclean(settings, trees, ldpath_mtimes,
+ opts, action, valid_atoms, spinner,
+ scheduler=sched_iface)
+
+ return rval
+
+def adjust_configs(myopts, trees):
+ for myroot, mytrees in trees.items():
+ mysettings = trees[myroot]["vartree"].settings
+ mysettings.unlock()
+
+ # For --usepkgonly mode, propagate settings from the binary package
+ # database, so that it's possible to operate without dependence on
+ # a local ebuild repository and profile.
+ if ('--usepkgonly' in myopts and
+ mytrees['bintree']._propagate_config(mysettings)):
+ # Also propagate changes to the portdbapi doebuild_settings
+ # attribute which is used by Package instances for USE
+ # calculations (in support of --binpkg-respect-use).
+ mytrees['porttree'].dbapi.doebuild_settings = \
+ portage.config(clone=mysettings)
+
+ adjust_config(myopts, mysettings)
+ mysettings.lock()
+
+def adjust_config(myopts, settings):
+ """Make emerge specific adjustments to the config."""
+
+ # Kill noauto as it will break merges otherwise.
+ if "noauto" in settings.features:
+ settings.features.remove('noauto')
+
+ fail_clean = myopts.get('--fail-clean')
+ if fail_clean is not None:
+ if fail_clean is True and \
+ 'fail-clean' not in settings.features:
+ settings.features.add('fail-clean')
+ elif fail_clean == 'n' and \
+ 'fail-clean' in settings.features:
+ settings.features.remove('fail-clean')
+
+ CLEAN_DELAY = 5
+ try:
+ CLEAN_DELAY = int(settings.get("CLEAN_DELAY", str(CLEAN_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: CLEAN_DELAY='%s'\n" % \
+ settings["CLEAN_DELAY"], noiselevel=-1)
+ settings["CLEAN_DELAY"] = str(CLEAN_DELAY)
+ settings.backup_changes("CLEAN_DELAY")
+
+ EMERGE_WARNING_DELAY = 10
+ try:
+ EMERGE_WARNING_DELAY = int(settings.get(
+ "EMERGE_WARNING_DELAY", str(EMERGE_WARNING_DELAY)))
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: EMERGE_WARNING_DELAY='%s'\n" % \
+ settings["EMERGE_WARNING_DELAY"], noiselevel=-1)
+ settings["EMERGE_WARNING_DELAY"] = str(EMERGE_WARNING_DELAY)
+ settings.backup_changes("EMERGE_WARNING_DELAY")
+
+ buildpkg = myopts.get("--buildpkg")
+ if buildpkg is True:
+ settings.features.add("buildpkg")
+ elif buildpkg == 'n':
+ settings.features.discard("buildpkg")
+
+ if "--quiet" in myopts:
+ settings["PORTAGE_QUIET"]="1"
+ settings.backup_changes("PORTAGE_QUIET")
+
+ if "--verbose" in myopts:
+ settings["PORTAGE_VERBOSE"] = "1"
+ settings.backup_changes("PORTAGE_VERBOSE")
+
+ # Set so that configs will be merged regardless of remembered status
+ if ("--noconfmem" in myopts):
+ settings["NOCONFMEM"]="1"
+ settings.backup_changes("NOCONFMEM")
+
+ # Set various debug markers... They should be merged somehow.
+ PORTAGE_DEBUG = 0
+ try:
+ PORTAGE_DEBUG = int(settings.get("PORTAGE_DEBUG", str(PORTAGE_DEBUG)))
+ if PORTAGE_DEBUG not in (0, 1):
+ portage.writemsg("!!! Invalid value: PORTAGE_DEBUG='%i'\n" % \
+ PORTAGE_DEBUG, noiselevel=-1)
+ portage.writemsg("!!! PORTAGE_DEBUG must be either 0 or 1\n",
+ noiselevel=-1)
+ PORTAGE_DEBUG = 0
+ except ValueError as e:
+ portage.writemsg("!!! %s\n" % str(e), noiselevel=-1)
+ portage.writemsg("!!! Unable to parse integer: PORTAGE_DEBUG='%s'\n" %\
+ settings["PORTAGE_DEBUG"], noiselevel=-1)
+ del e
+ if "--debug" in myopts:
+ PORTAGE_DEBUG = 1
+ settings["PORTAGE_DEBUG"] = str(PORTAGE_DEBUG)
+ settings.backup_changes("PORTAGE_DEBUG")
+
+ if settings.get("NOCOLOR") not in ("yes","true"):
+ portage.output.havecolor = 1
+
+ # The explicit --color < y | n > option overrides the NOCOLOR environment
+ # variable and stdout auto-detection.
+ if "--color" in myopts:
+ if "y" == myopts["--color"]:
+ portage.output.havecolor = 1
+ settings["NOCOLOR"] = "false"
+ else:
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+ elif settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ portage.output.havecolor = 0
+ settings["NOCOLOR"] = "true"
+ settings.backup_changes("NOCOLOR")
+
+ if "--pkg-format" in myopts:
+ settings["PORTAGE_BINPKG_FORMAT"] = myopts["--pkg-format"]
+ settings.backup_changes("PORTAGE_BINPKG_FORMAT")
+
+def display_missing_pkg_set(root_config, set_name):
+
+ msg = []
+ msg.append(("emerge: There are no sets to satisfy '%s'. " + \
+ "The following sets exist:") % \
+ colorize("INFORM", set_name))
+ msg.append("")
+
+ for s in sorted(root_config.sets):
+ msg.append(" %s" % s)
+ msg.append("")
+
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.ERROR, noiselevel=-1)
+
+def relative_profile_path(portdir, abs_profile):
+ realpath = os.path.realpath(abs_profile)
+ basepath = os.path.realpath(os.path.join(portdir, "profiles"))
+ if realpath.startswith(basepath):
+ profilever = realpath[1 + len(basepath):]
+ else:
+ profilever = None
+ return profilever
+
+def getportageversion(portdir, _unused, profile, chost, vardb):
+ pythonver = 'python %d.%d.%d-%s-%d' % sys.version_info[:]
+ profilever = None
+ repositories = vardb.settings.repositories
+ if profile:
+ profilever = relative_profile_path(portdir, profile)
+ if profilever is None:
+ try:
+ for parent in portage.grabfile(
+ os.path.join(profile, 'parent')):
+ profilever = relative_profile_path(portdir,
+ os.path.join(profile, parent))
+ if profilever is not None:
+ break
+ colon = parent.find(":")
+ if colon != -1:
+ p_repo_name = parent[:colon]
+ try:
+ p_repo_loc = \
+ repositories.get_location_for_name(p_repo_name)
+ except KeyError:
+ pass
+ else:
+ profilever = relative_profile_path(p_repo_loc,
+ os.path.join(p_repo_loc, 'profiles',
+ parent[colon+1:]))
+ if profilever is not None:
+ break
+ except portage.exception.PortageException:
+ pass
+
+ if profilever is None:
+ try:
+ profilever = "!" + os.readlink(profile)
+ except (OSError):
+ pass
+
+ if profilever is None:
+ profilever = "unavailable"
+
+ libcver = []
+ libclist = set()
+ for atom in expand_new_virt(vardb, portage.const.LIBC_PACKAGE_ATOM):
+ if not atom.blocker:
+ libclist.update(vardb.match(atom))
+ if libclist:
+ for cpv in sorted(libclist):
+ libc_split = portage.catpkgsplit(cpv)[1:]
+ if libc_split[-1] == "r0":
+ libc_split = libc_split[:-1]
+ libcver.append("-".join(libc_split))
+ else:
+ libcver = ["unavailable"]
+
+ gccver = getgccversion(chost)
+ unameout=platform.release()+" "+platform.machine()
+
+ return "Portage %s (%s, %s, %s, %s, %s)" % \
+ (portage.VERSION, pythonver, profilever, gccver, ",".join(libcver), unameout)
+
+
+class _emerge_config(SlotObject):
+
+ __slots__ = ('action', 'args', 'opts',
+ 'running_config', 'target_config', 'trees')
+
+ # Support unpack as tuple, for load_emerge_config backward compatibility.
+ def __iter__(self):
+ yield self.target_config.settings
+ yield self.trees
+ yield self.target_config.mtimedb
+
+ def __getitem__(self, index):
+ return list(self)[index]
+
+ def __len__(self):
+ return 3
+
+def load_emerge_config(emerge_config=None, env=None, **kargs):
+
+ if emerge_config is None:
+ emerge_config = _emerge_config(**kargs)
+
+ env = os.environ if env is None else env
+ kwargs = {'env': env}
+ for k, envvar in (("config_root", "PORTAGE_CONFIGROOT"), ("target_root", "ROOT"),
+ ("sysroot", "SYSROOT"), ("eprefix", "EPREFIX")):
+ v = env.get(envvar)
+ if v is not None:
+ kwargs[k] = v
+ emerge_config.trees = portage.create_trees(trees=emerge_config.trees,
+ **kwargs)
+
+ for root_trees in emerge_config.trees.values():
+ settings = root_trees["vartree"].settings
+ settings._init_dirs()
+ setconfig = load_default_config(settings, root_trees)
+ root_config = RootConfig(settings, root_trees, setconfig)
+ if "root_config" in root_trees:
+ # Propagate changes to the existing instance,
+ # which may be referenced by a depgraph.
+ root_trees["root_config"].update(root_config)
+ else:
+ root_trees["root_config"] = root_config
+
+ target_eroot = emerge_config.trees._target_eroot
+ emerge_config.target_config = \
+ emerge_config.trees[target_eroot]['root_config']
+ emerge_config.target_config.mtimedb = portage.MtimeDB(
+ os.path.join(target_eroot, portage.CACHE_PATH, "mtimedb"))
+ emerge_config.running_config = emerge_config.trees[
+ emerge_config.trees._running_eroot]['root_config']
+ QueryCommand._db = emerge_config.trees
+
+ return emerge_config
+
+def getgccversion(chost=None):
+ """
+ rtype: C{str}
+ return: the current in-use gcc version
+ """
+
+ gcc_ver_command = ['gcc', '-dumpversion']
+ gcc_ver_prefix = 'gcc-'
+
+ gcc_not_found_error = red(
+ "!!! No gcc found. You probably need to 'source /etc/profile'\n" +
+ "!!! to update the environment of this terminal and possibly\n" +
+ "!!! other terminals also.\n"
+ )
+
+ if chost:
+ try:
+ proc = subprocess.Popen(["gcc-config", "-c"],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK and myoutput.startswith(chost + "-"):
+ return myoutput.replace(chost + "-", gcc_ver_prefix, 1)
+
+ try:
+ proc = subprocess.Popen(
+ [chost + "-" + gcc_ver_command[0]] + gcc_ver_command[1:],
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ try:
+ proc = subprocess.Popen(gcc_ver_command,
+ stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
+ except OSError:
+ myoutput = None
+ mystatus = 1
+ else:
+ myoutput = _unicode_decode(proc.communicate()[0]).rstrip("\n")
+ mystatus = proc.wait()
+ if mystatus == os.EX_OK:
+ return gcc_ver_prefix + myoutput
+
+ portage.writemsg(gcc_not_found_error, noiselevel=-1)
+ return "[unavailable]"
+
+# Warn about features that may confuse users and
+# lead them to report invalid bugs.
+_emerge_features_warn = frozenset(['keeptemp', 'keepwork'])
+
+def validate_ebuild_environment(trees):
+ features_warn = set()
+ for myroot in trees:
+ settings = trees[myroot]["vartree"].settings
+ settings.validate()
+ features_warn.update(
+ _emerge_features_warn.intersection(settings.features))
+
+ if features_warn:
+ msg = "WARNING: The FEATURES variable contains one " + \
+ "or more values that should be disabled under " + \
+ "normal circumstances: %s" % " ".join(features_warn)
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 65):
+ out.ewarn(line)
+
+ check_locale()
+
+def check_procfs():
+ procfs_path = '/proc'
+ if platform.system() not in ("Linux",) or \
+ os.path.ismount(procfs_path):
+ return os.EX_OK
+ msg = "It seems that %s is not mounted. You have been warned." % procfs_path
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+def config_protect_check(trees):
+ for root, root_trees in trees.items():
+ settings = root_trees["root_config"].settings
+ if not settings.get("CONFIG_PROTECT"):
+ msg = "!!! CONFIG_PROTECT is empty"
+ if settings["ROOT"] != "/":
+ msg += " for '%s'" % root
+ msg += "\n"
+ writemsg_level(msg, level=logging.WARN, noiselevel=-1)
+
+def apply_priorities(settings):
+ ionice(settings)
+ nice(settings)
+
+def nice(settings):
+ try:
+ os.nice(int(settings.get("PORTAGE_NICENESS", "0")))
+ except (OSError, ValueError) as e:
+ out = portage.output.EOutput()
+ out.eerror("Failed to change nice value to '%s'" % \
+ settings.get("PORTAGE_NICENESS", "0"))
+ out.eerror("%s\n" % str(e))
+
+def ionice(settings):
+
+ ionice_cmd = settings.get("PORTAGE_IONICE_COMMAND")
+ if ionice_cmd:
+ ionice_cmd = portage.util.shlex_split(ionice_cmd)
+ if not ionice_cmd:
+ return
+
+ variables = {"PID" : str(os.getpid())}
+ cmd = [varexpand(x, mydict=variables) for x in ionice_cmd]
+
+ try:
+ rval = portage.process.spawn(cmd, env=os.environ)
+ except portage.exception.CommandNotFound:
+ # The OS kernel probably doesn't support ionice,
+ # so return silently.
+ return
+
+ if rval != os.EX_OK:
+ out = portage.output.EOutput()
+ out.eerror("PORTAGE_IONICE_COMMAND returned %d" % (rval,))
+ out.eerror("See the make.conf(5) man page for PORTAGE_IONICE_COMMAND usage instructions.")
+
+def setconfig_fallback(root_config):
+ setconfig = root_config.setconfig
+ setconfig._create_default_config()
+ setconfig._parse(update=True)
+ root_config.sets = setconfig.getSets()
+
+def get_missing_sets(root_config):
+ # emerge requires existence of "world", "selected", and "system"
+ missing_sets = []
+
+ for s in ("selected", "system", "world",):
+ if s not in root_config.sets:
+ missing_sets.append(s)
+
+ return missing_sets
+
+def missing_sets_warning(root_config, missing_sets):
+ if len(missing_sets) > 2:
+ missing_sets_str = ", ".join('"%s"' % s for s in missing_sets[:-1])
+ missing_sets_str += ', and "%s"' % missing_sets[-1]
+ elif len(missing_sets) == 2:
+ missing_sets_str = '"%s" and "%s"' % tuple(missing_sets)
+ else:
+ missing_sets_str = '"%s"' % missing_sets[-1]
+ msg = ["emerge: incomplete set configuration, " + \
+ "missing set(s): %s" % missing_sets_str]
+ if root_config.sets:
+ msg.append(" sets defined: %s" % ", ".join(root_config.sets))
+ global_config_path = portage.const.GLOBAL_CONFIG_PATH
+ if portage.const.EPREFIX:
+ global_config_path = os.path.join(portage.const.EPREFIX,
+ portage.const.GLOBAL_CONFIG_PATH.lstrip(os.sep))
+ msg.append(" This usually means that '%s'" % \
+ (os.path.join(global_config_path, "sets/portage.conf"),))
+ msg.append(" is missing or corrupt.")
+ msg.append(" Falling back to default world and system set configuration!!!")
+ for line in msg:
+ writemsg_level(line + "\n", level=logging.ERROR, noiselevel=-1)
+
+def ensure_required_sets(trees):
+ warning_shown = False
+ for root_trees in trees.values():
+ missing_sets = get_missing_sets(root_trees["root_config"])
+ if missing_sets and not warning_shown:
+ warning_shown = True
+ missing_sets_warning(root_trees["root_config"], missing_sets)
+ if missing_sets:
+ setconfig_fallback(root_trees["root_config"])
+
+def expand_set_arguments(myfiles, myaction, root_config):
+ retval = os.EX_OK
+ setconfig = root_config.setconfig
+
+ sets = setconfig.getSets()
+
+ # In order to know exactly which atoms/sets should be added to the
+ # world file, the depgraph performs set expansion later. It will get
+ # confused about where the atoms came from if it's not allowed to
+ # expand them itself.
+ do_not_expand = myaction is None
+ newargs = []
+ for a in myfiles:
+ if a in ("system", "world"):
+ newargs.append(SETPREFIX+a)
+ else:
+ newargs.append(a)
+ myfiles = newargs
+ del newargs
+ newargs = []
+
+ # separators for set arguments
+ ARG_START = "{"
+ ARG_END = "}"
+
+ for i in range(0, len(myfiles)):
+ if myfiles[i].startswith(SETPREFIX):
+ start = 0
+ end = 0
+ x = myfiles[i][len(SETPREFIX):]
+ newset = ""
+ while x:
+ start = x.find(ARG_START)
+ end = x.find(ARG_END)
+ if start > 0 and start < end:
+ namepart = x[:start]
+ argpart = x[start+1:end]
+
+ # TODO: implement proper quoting
+ args = argpart.split(",")
+ options = {}
+ for a in args:
+ if "=" in a:
+ k, v = a.split("=", 1)
+ options[k] = v
+ else:
+ options[a] = "True"
+ setconfig.update(namepart, options)
+ newset += (x[:start-len(namepart)]+namepart)
+ x = x[end+len(ARG_END):]
+ else:
+ newset += x
+ x = ""
+ myfiles[i] = SETPREFIX+newset
+
+ sets = setconfig.getSets()
+
+ # display errors that occurred while loading the SetConfig instance
+ for e in setconfig.errors:
+ print(colorize("BAD", "Error during set creation: %s" % e))
+
+ unmerge_actions = ("unmerge", "prune", "clean", "depclean", "rage-clean")
+
+ for a in myfiles:
+ if a.startswith(SETPREFIX):
+ s = a[len(SETPREFIX):]
+ if s not in sets:
+ display_missing_pkg_set(root_config, s)
+ return (None, 1)
+ if s == "installed":
+ msg = ("The @installed set is not recommended when "
+ "updating packages because it will often "
+ "introduce unsolved blocker conflicts. Please "
+ "refer to bug #387059 for details.")
+ out = portage.output.EOutput()
+ for line in textwrap.wrap(msg, 57):
+ out.ewarn(line)
+ setconfig.active.append(s)
+
+ if do_not_expand:
+ # Loading sets can be slow, so skip it here, in order
+ # to allow the depgraph to indicate progress with the
+ # spinner while sets are loading (bug #461412).
+ newargs.append(a)
+ continue
+
+ try:
+ set_atoms = setconfig.getSetAtoms(s)
+ except portage.exception.PackageSetNotFound as e:
+ writemsg_level(("emerge: the given set '%s' " + \
+ "contains a non-existent set named '%s'.\n") % \
+ (s, e), level=logging.ERROR, noiselevel=-1)
+ if s in ('world', 'selected') and \
+ SETPREFIX + e.value in sets['selected']:
+ writemsg_level(("Use `emerge --deselect %s%s` to "
+ "remove this set from world_sets.\n") %
+ (SETPREFIX, e,), level=logging.ERROR,
+ noiselevel=-1)
+ return (None, 1)
+ if myaction in unmerge_actions and \
+ not sets[s].supportsOperation("unmerge"):
+ writemsg_level("emerge: the given set '%s' does " % s + \
+ "not support unmerge operations\n",
+ level=logging.ERROR, noiselevel=-1)
+ retval = 1
+ elif not set_atoms:
+ writemsg_level("emerge: '%s' is an empty set\n" % s,
+ level=logging.INFO, noiselevel=-1)
+ else:
+ newargs.extend(set_atoms)
+ for error_msg in sets[s].errors:
+ writemsg_level("%s\n" % (error_msg,),
+ level=logging.ERROR, noiselevel=-1)
+ else:
+ newargs.append(a)
+ return (newargs, retval)
+
+def repo_name_check(trees):
+ missing_repo_names = set()
+ for root_trees in trees.values():
+ porttree = root_trees.get("porttree")
+ if porttree:
+ portdb = porttree.dbapi
+ missing_repo_names.update(portdb.getMissingRepoNames())
+
+ # Skip warnings about missing repo_name entries for
+ # /usr/local/portage (see bug #248603).
+ try:
+ missing_repo_names.remove('/usr/local/portage')
+ except KeyError:
+ pass
+
+ if missing_repo_names:
+ msg = []
+ msg.append("WARNING: One or more repositories " + \
+ "have missing repo_name entries:")
+ msg.append("")
+ for p in missing_repo_names:
+ msg.append("\t%s/profiles/repo_name" % (p,))
+ msg.append("")
+ msg.extend(textwrap.wrap("NOTE: Each repo_name entry " + \
+ "should be a plain text file containing a unique " + \
+ "name for the repository on the first line.", 70))
+ msg.append("\n")
+ writemsg_level("".join("%s\n" % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(missing_repo_names)
+
+def repo_name_duplicate_check(trees):
+ ignored_repos = {}
+ for root, root_trees in trees.items():
+ if 'porttree' in root_trees:
+ portdb = root_trees['porttree'].dbapi
+ if portdb.settings.get('PORTAGE_REPO_DUPLICATE_WARN') != '0':
+ for repo_name, paths in portdb.getIgnoredRepos():
+ k = (root, repo_name, portdb.getRepositoryPath(repo_name))
+ ignored_repos.setdefault(k, []).extend(paths)
+
+ if ignored_repos:
+ msg = []
+ msg.append('WARNING: One or more repositories ' + \
+ 'have been ignored due to duplicate')
+ msg.append(' profiles/repo_name entries:')
+ msg.append('')
+ for k in sorted(ignored_repos):
+ msg.append(' %s overrides' % ", ".join(k))
+ for path in ignored_repos[k]:
+ msg.append(' %s' % (path,))
+ msg.append('')
+ msg.extend(' ' + x for x in textwrap.wrap(
+ "All profiles/repo_name entries must be unique in order " + \
+ "to avoid having duplicates ignored. " + \
+ "Set PORTAGE_REPO_DUPLICATE_WARN=\"0\" in " + \
+ "/etc/portage/make.conf if you would like to disable this warning."))
+ msg.append("\n")
+ writemsg_level(''.join('%s\n' % l for l in msg),
+ level=logging.WARNING, noiselevel=-1)
+
+ return bool(ignored_repos)
+
+def run_action(emerge_config, build_job, context):
+
+ # skip global updates prior to sync, since it's called after sync
+ if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
+ emerge_config.opts.get('--package-moves') != 'n' and \
+ _global_updates(emerge_config.trees,
+ emerge_config.target_config.mtimedb["updates"],
+ quiet=("--quiet" in emerge_config.opts)):
+ emerge_config.target_config.mtimedb.commit()
+ # Reload the whole config from scratch.
+ load_emerge_config(emerge_config=emerge_config)
+
+ xterm_titles = "notitles" not in \
+ emerge_config.target_config.settings.features
+ if xterm_titles:
+ xtermTitle("emerge")
+
+ if "--digest" in emerge_config.opts:
+ os.environ["FEATURES"] = os.environ.get("FEATURES","") + " digest"
+ # Reload the whole config from scratch so that the portdbapi internal
+ # config is updated with new FEATURES.
+ load_emerge_config(emerge_config=emerge_config)
+
+ # NOTE: adjust_configs() can map options to FEATURES, so any relevant
+ # options adjustments should be made prior to calling adjust_configs().
+ if "--buildpkgonly" in emerge_config.opts:
+ emerge_config.opts["--buildpkg"] = True
+
+ if "getbinpkg" in emerge_config.target_config.settings.features:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--getbinpkg"] = True
+
+ if "--getbinpkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkgonly"] = True
+
+ if "--getbinpkg" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ if "--usepkgonly" in emerge_config.opts:
+ emerge_config.opts["--usepkg"] = True
+
+ # Populate the bintree with current --getbinpkg setting.
+ # This needs to happen before:
+ # * expand_set_arguments, in case any sets use the bintree
+ # * adjust_configs and profile_check, in order to propagate settings
+ # implicit IUSE and USE_EXPAND settings from the binhost(s)
+ if (emerge_config.action in ('search', None) and
+ '--usepkg' in emerge_config.opts):
+ for mytrees in emerge_config.trees.values():
+ kwargs = {}
+ if (mytrees is emerge_config.target_config.trees and
+ emerge_config.target_config is not emerge_config.running_config and
+ emerge_config.opts.get('--quickpkg-direct', 'n') == 'y'):
+ kwargs['add_repos'] = (emerge_config.running_config.trees['vartree'].dbapi,)
+
+ try:
+ mytrees['bintree'].populate(
+ getbinpkgs='--getbinpkg' in emerge_config.opts,
+ **kwargs)
+ except ParseError as e:
+ writemsg('\n\n!!!%s.\nSee make.conf(5) for more info.\n'
+ % (e,), noiselevel=-1)
+ return 1
+
+ adjust_configs(emerge_config.opts, emerge_config.trees)
+
+ if "--changelog" in emerge_config.opts:
+ writemsg_level(
+ " %s The emerge --changelog (or -l) option is deprecated\n" %
+ warn("*"), level=logging.WARNING, noiselevel=-1)
+
+ if profile_check(emerge_config.trees, emerge_config.action) != os.EX_OK:
+ return 1
+
+ apply_priorities(emerge_config.target_config.settings)
+
+ if ("--autounmask-continue" in emerge_config.opts and
+ emerge_config.opts.get("--autounmask") == "n"):
+ writemsg_level(
+ " %s --autounmask-continue has been disabled by --autounmask=n\n" %
+ warn("*"), level=logging.WARNING, noiselevel=-1)
+
+ for fmt in emerge_config.target_config.settings.get("PORTAGE_BINPKG_FORMAT", "").split():
+ if not fmt in portage.const.SUPPORTED_BINPKG_FORMATS:
+ if "--pkg-format" in emerge_config.opts:
+ problematic="--pkg-format"
+ else:
+ problematic="PORTAGE_BINPKG_FORMAT"
+
+ writemsg_level(("emerge: %s is not set correctly. Format " + \
+ "'%s' is not supported.\n") % (problematic, fmt),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if emerge_config.action == 'version':
+ writemsg_stdout(getportageversion(
+ emerge_config.target_config.settings["PORTDIR"],
+ None,
+ emerge_config.target_config.settings.profile_path,
+ emerge_config.target_config.settings.get("CHOST"),
+ emerge_config.target_config.trees['vartree'].dbapi) + '\n',
+ noiselevel=-1)
+ return 0
+ elif emerge_config.action == 'help':
+ emerge_help()
+ return 0
+
+ spinner = stdout_spinner()
+ if "candy" in emerge_config.target_config.settings.features:
+ spinner.update = spinner.update_scroll
+
+ if "--quiet" not in emerge_config.opts:
+ portage.deprecated_profile_check(
+ settings=emerge_config.target_config.settings)
+ repo_name_check(emerge_config.trees)
+ repo_name_duplicate_check(emerge_config.trees)
+ config_protect_check(emerge_config.trees)
+ check_procfs()
+
+ for mytrees in emerge_config.trees.values():
+ mydb = mytrees["porttree"].dbapi
+ # Freeze the portdbapi for performance (memoize all xmatch results).
+ mydb.freeze()
+
+ del mytrees, mydb
+
+ for x in emerge_config.args:
+ if x.endswith((".ebuild", ".tbz2")) and \
+ os.path.exists(os.path.abspath(x)):
+ print(colorize("BAD", "\n*** emerging by path is broken "
+ "and may not always work!!!\n"))
+ break
+
+ if emerge_config.action == "list-sets":
+ writemsg_stdout("".join("%s\n" % s for s in
+ sorted(emerge_config.target_config.sets)))
+ return os.EX_OK
+ elif emerge_config.action == "check-news":
+ news_counts = count_unread_news(
+ emerge_config.target_config.trees["porttree"].dbapi,
+ emerge_config.target_config.trees["vartree"].dbapi)
+ if any(news_counts.values()):
+ display_news_notifications(news_counts)
+ elif "--quiet" not in emerge_config.opts:
+ print("", colorize("GOOD", "*"), "No news items were found.")
+ return os.EX_OK
+
+ ensure_required_sets(emerge_config.trees)
+
+ if emerge_config.action is None and \
+ "--resume" in emerge_config.opts and emerge_config.args:
+ writemsg("emerge: unexpected argument(s) for --resume: %s\n" %
+ " ".join(emerge_config.args), noiselevel=-1)
+ return 1
+
+ # only expand sets for actions taking package arguments
+ oldargs = emerge_config.args[:]
+ if emerge_config.action in ("clean", "config", "depclean",
+ "info", "prune", "unmerge", "rage-clean", None):
+ newargs, retval = expand_set_arguments(
+ emerge_config.args, emerge_config.action,
+ emerge_config.target_config)
+ if retval != os.EX_OK:
+ return retval
+
+ # Need to handle empty sets specially, otherwise emerge will react
+ # with the help message for empty argument lists
+ if oldargs and not newargs:
+ print("emerge: no targets left after set expansion")
+ return 0
+
+ emerge_config.args = newargs
+
+ if "--tree" in emerge_config.opts and \
+ "--columns" in emerge_config.opts:
+ print("emerge: can't specify both of \"--tree\" and \"--columns\".")
+ return 1
+
+ if '--emptytree' in emerge_config.opts and \
+ '--noreplace' in emerge_config.opts:
+ writemsg_level("emerge: can't specify both of " + \
+ "\"--emptytree\" and \"--noreplace\".\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ if ("--quiet" in emerge_config.opts):
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = -1
+
+ if "--fetch-all-uri" in emerge_config.opts:
+ emerge_config.opts["--fetchonly"] = True
+
+ if "--skipfirst" in emerge_config.opts and \
+ "--resume" not in emerge_config.opts:
+ emerge_config.opts["--resume"] = True
+
+ # Allow -p to remove --ask
+ if "--pretend" in emerge_config.opts:
+ emerge_config.opts.pop("--ask", None)
+
+ # forbid --ask when not in a terminal
+ # note: this breaks `emerge --ask | tee logfile`, but that doesn't work anyway.
+ if ("--ask" in emerge_config.opts) and (not sys.stdin.isatty()):
+ portage.writemsg("!!! \"--ask\" should only be used in a terminal. Exiting.\n",
+ noiselevel=-1)
+ return 1
+
+ if emerge_config.target_config.settings.get("PORTAGE_DEBUG", "") == "1":
+ spinner.update = spinner.update_quiet
+ portage.util.noiselimit = 0
+ if "python-trace" in emerge_config.target_config.settings.features:
+ portage.debug.set_trace(True)
+
+ if not ("--quiet" in emerge_config.opts):
+ if '--nospinner' in emerge_config.opts or \
+ emerge_config.target_config.settings.get('TERM') == 'dumb' or \
+ not sys.stdout.isatty():
+ spinner.update = spinner.update_basic
+
+ if "--debug" in emerge_config.opts:
+ print("myaction", emerge_config.action)
+ print("myopts", emerge_config.opts)
+
+ if not emerge_config.action and not emerge_config.args and \
+ "--resume" not in emerge_config.opts:
+ emerge_help()
+ return 1
+
+ pretend = "--pretend" in emerge_config.opts
+ fetchonly = "--fetchonly" in emerge_config.opts or \
+ "--fetch-all-uri" in emerge_config.opts
+ buildpkgonly = "--buildpkgonly" in emerge_config.opts
+
+ # check if root user is the current user for the actions where emerge needs this
+ if portage.data.secpass < 2:
+ # We've already allowed "--version" and "--help" above.
+ if "--pretend" not in emerge_config.opts and \
+ emerge_config.action not in ("search", "info"):
+ need_superuser = emerge_config.action in ('clean', 'depclean',
+ 'deselect', 'prune', 'unmerge', "rage-clean") or not \
+ (fetchonly or \
+ (buildpkgonly and portage.data.secpass >= 1) or \
+ emerge_config.action in ("metadata", "regen", "sync"))
+ if portage.data.secpass < 1 or \
+ need_superuser:
+ if need_superuser:
+ access_desc = "superuser"
+ else:
+ access_desc = "portage group"
+ # Always show portage_group_warning() when only portage group
+ # access is required but the user is not in the portage group.
+ if "--ask" in emerge_config.opts:
+ writemsg_stdout("This action requires %s access...\n" % \
+ (access_desc,), noiselevel=-1)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ uq = UserQuery(emerge_config.opts)
+ if uq.query("Would you like to add --pretend to options?",
+ "--ask-enter-invalid" in emerge_config.opts) == "No":
+ return 128 + signal.SIGINT
+ emerge_config.opts["--pretend"] = True
+ emerge_config.opts.pop("--ask")
+ else:
+ sys.stderr.write(("emerge: %s access is required\n") \
+ % access_desc)
+ if portage.data.secpass < 1 and not need_superuser:
+ portage.data.portage_group_warning()
+ return 1
+
+ # Disable emergelog for everything except build or unmerge operations.
+ # This helps minimize parallel emerge.log entries that can confuse log
+ # parsers like genlop.
+ disable_emergelog = False
+ for x in ("--pretend", "--fetchonly", "--fetch-all-uri"):
+ if x in emerge_config.opts:
+ disable_emergelog = True
+ break
+ if disable_emergelog:
+ pass
+ elif emerge_config.action in ("search", "info"):
+ disable_emergelog = True
+ elif portage.data.secpass < 1:
+ disable_emergelog = True
+
+ import _emerge.emergelog
+ _emerge.emergelog._disable = disable_emergelog
+
+ if not disable_emergelog:
+ emerge_log_dir = \
+ emerge_config.target_config.settings.get('EMERGE_LOG_DIR')
+ if emerge_log_dir:
+ try:
+ # At least the parent needs to exist for the lock file.
+ portage.util.ensure_dirs(emerge_log_dir)
+ except portage.exception.PortageException as e:
+ writemsg_level("!!! Error creating directory for " + \
+ "EMERGE_LOG_DIR='%s':\n!!! %s\n" % \
+ (emerge_log_dir, e),
+ noiselevel=-1, level=logging.ERROR)
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+ else:
+ _emerge.emergelog._emerge_log_dir = emerge_log_dir
+ else:
+ _emerge.emergelog._emerge_log_dir = os.path.join(os.sep,
+ portage.const.EPREFIX.lstrip(os.sep), "var", "log")
+ portage.util.ensure_dirs(_emerge.emergelog._emerge_log_dir)
+
+ if not "--pretend" in emerge_config.opts:
+ time_fmt = "%b %d, %Y %H:%M:%S"
+ if sys.hexversion < 0x3000000:
+ time_fmt = portage._unicode_encode(time_fmt)
+ time_str = time.strftime(time_fmt, time.localtime(time.time()))
+ # Avoid potential UnicodeDecodeError in Python 2, since strftime
+ # returns bytes in Python 2, and %b may contain non-ascii chars.
+ time_str = _unicode_decode(time_str,
+ encoding=_encodings['content'], errors='replace')
+ emergelog(xterm_titles, "Started emerge on: %s" % time_str)
+ myelogstr=""
+ if emerge_config.opts:
+ opt_list = []
+ for opt, arg in emerge_config.opts.items():
+ if arg is True:
+ opt_list.append(opt)
+ elif isinstance(arg, list):
+ # arguments like --exclude that use 'append' action
+ for x in arg:
+ opt_list.append("%s=%s" % (opt, x))
+ else:
+ opt_list.append("%s=%s" % (opt, arg))
+ myelogstr=" ".join(opt_list)
+ if emerge_config.action:
+ myelogstr += " --" + emerge_config.action
+ if oldargs:
+ myelogstr += " " + " ".join(oldargs)
+ emergelog(xterm_titles, " *** emerge " + myelogstr)
+
+ oldargs = None
+
+ def emergeexitsig(signum, frame):
+ signal.signal(signal.SIGTERM, signal.SIG_IGN)
+ portage.util.writemsg(
+ "\n\nExiting on signal %(signal)s\n" % {"signal":signum})
+ sys.exit(128 + signum)
+
+ signal.signal(signal.SIGTERM, emergeexitsig)
+
+ def emergeexit():
+ """This gets out final log message in before we quit."""
+ if "--pretend" not in emerge_config.opts:
+ emergelog(xterm_titles, " *** terminating.")
+ if xterm_titles:
+ xtermTitleReset()
+ portage.atexit_register(emergeexit)
+
+ if emerge_config.action in ("config", "metadata", "regen", "sync"):
+ if "--pretend" in emerge_config.opts:
+ sys.stderr.write(("emerge: The '%s' action does " + \
+ "not support '--pretend'.\n") % emerge_config.action)
+ return 1
+
+ if "sync" == emerge_config.action:
+ return action_sync(emerge_config)
+ elif "metadata" == emerge_config.action:
+ action_metadata(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts)
+ elif emerge_config.action=="regen":
+ validate_ebuild_environment(emerge_config.trees)
+ return action_regen(emerge_config.target_config.settings,
+ emerge_config.target_config.trees['porttree'].dbapi,
+ emerge_config.opts.get("--jobs"),
+ emerge_config.opts.get("--load-average"))
+ # HELP action
+ elif "config" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ return action_config(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, emerge_config.args)
+
+ # SEARCH action
+ elif "search" == emerge_config.action:
+ validate_ebuild_environment(emerge_config.trees)
+ action_search(emerge_config.target_config,
+ emerge_config.opts, emerge_config.args, spinner)
+
+ elif emerge_config.action in \
+ ('clean', 'depclean', 'deselect', 'prune', 'unmerge', 'rage-clean'):
+ validate_ebuild_environment(emerge_config.trees)
+ rval = action_uninstall(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.target_config.mtimedb["ldpath"],
+ emerge_config.opts, emerge_config.action,
+ emerge_config.args, spinner)
+ if not (emerge_config.action == 'deselect' or
+ buildpkgonly or fetchonly or pretend):
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, rval)
+ return rval
+
+ elif emerge_config.action == 'info':
+
+ # Ensure atoms are valid before calling unmerge().
+ vardb = emerge_config.target_config.trees['vartree'].dbapi
+ portdb = emerge_config.target_config.trees['porttree'].dbapi
+ bindb = emerge_config.target_config.trees['bintree'].dbapi
+ valid_atoms = []
+ for x in emerge_config.args:
+ if is_valid_package_atom(x, allow_repo=True):
+ try:
+ #look at the installed files first, if there is no match
+ #look at the ebuilds, since EAPI 4 allows running pkg_info
+ #on non-installed packages
+ valid_atom = dep_expand(x, mydb=vardb)
+ if valid_atom.cp.split("/")[0] == "null":
+ valid_atom = dep_expand(x, mydb=portdb)
+
+ if valid_atom.cp.split("/")[0] == "null" and \
+ "--usepkg" in emerge_config.opts:
+ valid_atom = dep_expand(x, mydb=bindb)
+
+ valid_atoms.append(valid_atom)
+
+ except portage.exception.AmbiguousPackageName as e:
+ msg = "The short ebuild name \"" + x + \
+ "\" is ambiguous. Please specify " + \
+ "one of the following " + \
+ "fully-qualified ebuild names instead:"
+ for line in textwrap.wrap(msg, 70):
+ writemsg_level("!!! %s\n" % (line,),
+ level=logging.ERROR, noiselevel=-1)
+ for i in e.args[0]:
+ writemsg_level(" %s\n" % colorize("INFORM", i),
+ level=logging.ERROR, noiselevel=-1)
+ writemsg_level("\n", level=logging.ERROR, noiselevel=-1)
+ return 1
+ continue
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ return action_info(emerge_config.target_config.settings,
+ emerge_config.trees, emerge_config.opts, valid_atoms)
+
+ # "update", "system", or just process files:
+ else:
+ validate_ebuild_environment(emerge_config.trees)
+
+ for x in emerge_config.args:
+ if x.startswith(SETPREFIX) or \
+ is_valid_package_atom(x, allow_repo=True):
+ continue
+ if x[:1] == os.sep:
+ continue
+ try:
+ os.lstat(x)
+ continue
+ except OSError:
+ pass
+ msg = []
+ msg.append("'%s' is not a valid package atom." % (x,))
+ msg.append("Please check ebuild(5) for full details.")
+ writemsg_level("".join("!!! %s\n" % line for line in msg),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # GLEP 42 says to display news *after* an emerge --pretend
+ if "--pretend" not in emerge_config.opts:
+ uq = UserQuery(emerge_config.opts)
+ if display_news_notification(emerge_config.target_config,
+ emerge_config.opts) \
+ and "--ask" in emerge_config.opts \
+ and "--read-news" in emerge_config.opts \
+ and uq.query("Would you like to read the news items while " \
+ "calculating dependencies?",
+ '--ask-enter-invalid' in emerge_config.opts) == "Yes":
+ try:
+ subprocess.call(['eselect', 'news', 'read'])
+ # If eselect is not installed, Python <3.3 will throw an
+ # OSError. >=3.3 will throw a FileNotFoundError, which is a
+ # subclass of OSError.
+ except OSError:
+ writemsg("Please install eselect to use this feature.\n",
+ noiselevel=-1)
+ retval = action_build(emerge_config, build_job, context, spinner=spinner)
+ post_emerge(emerge_config.action, emerge_config.opts,
+ emerge_config.args, emerge_config.target_config.root,
+ emerge_config.trees, emerge_config.target_config.mtimedb, retval)
+
+ return retval
diff --git a/gosbs/_emerge/main.py b/gosbs/_emerge/main.py
new file mode 100644
index 0000000..e3cddfa
--- /dev/null
+++ b/gosbs/_emerge/main.py
@@ -0,0 +1,1317 @@
+# Copyright 1999-2019 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from __future__ import print_function
+
+import argparse
+import locale
+import platform
+import sys
+
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'logging',
+ 'portage.dep:Atom',
+ 'portage.util:writemsg_level',
+ 'textwrap',
+ 'gosbs._emerge.actions:load_emerge_config,run_action,' + \
+ 'validate_ebuild_environment',
+ '_emerge.help:help@emerge_help',
+ '_emerge.is_valid_package_atom:insert_category_into_atom'
+)
+from portage import os
+from portage.sync import _SUBMODULE_PATH_MAP
+
+if sys.hexversion >= 0x3000000:
+ long = int
+
+options=[
+"--alphabetical",
+"--ask-enter-invalid",
+"--buildpkgonly",
+"--changed-use",
+"--changelog", "--columns",
+"--debug",
+"--digest",
+"--emptytree",
+"--verbose-conflicts",
+"--fetchonly", "--fetch-all-uri",
+"--ignore-default-opts",
+"--noconfmem",
+"--newrepo",
+"--newuse",
+"--nodeps", "--noreplace",
+"--nospinner", "--oneshot",
+"--onlydeps", "--pretend",
+"--quiet-repo-display",
+"--quiet-unmerge-warn",
+"--resume",
+"--searchdesc",
+"--skipfirst",
+"--tree",
+"--unordered-display",
+"--update",
+]
+
+shortmapping={
+"1":"--oneshot",
+"B":"--buildpkgonly",
+"c":"--depclean",
+"C":"--unmerge",
+"d":"--debug",
+"e":"--emptytree",
+"f":"--fetchonly", "F":"--fetch-all-uri",
+"h":"--help",
+"l":"--changelog",
+"n":"--noreplace", "N":"--newuse",
+"o":"--onlydeps", "O":"--nodeps",
+"p":"--pretend", "P":"--prune",
+"r":"--resume",
+"s":"--search", "S":"--searchdesc",
+"t":"--tree",
+"u":"--update", "U":"--changed-use",
+"V":"--version"
+}
+
+COWSAY_MOO = r"""
+
+Larry loves Gentoo (%s)
+
+_______________________
+< Have you mooed today? >
+-----------------------
+ \ ^__^
+ \ (oo)\_______
+ (__)\ )\/\
+ ||----w |
+ || ||
+
+"""
+
+def multiple_actions(action1, action2):
+ sys.stderr.write("\n!!! Multiple actions requested... Please choose one only.\n")
+ sys.stderr.write("!!! '%s' or '%s'\n\n" % (action1, action2))
+ sys.exit(1)
+
+def insert_optional_args(args):
+ """
+ Parse optional arguments and insert a value if one has
+ not been provided. This is done before feeding the args
+ to the optparse parser since that parser does not support
+ this feature natively.
+ """
+
+ class valid_integers(object):
+ def __contains__(self, s):
+ try:
+ return int(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_integers = valid_integers()
+
+ class valid_floats(object):
+ def __contains__(self, s):
+ try:
+ return float(s) >= 0
+ except (ValueError, OverflowError):
+ return False
+
+ valid_floats = valid_floats()
+
+ y_or_n = ('y', 'n',)
+
+ new_args = []
+
+ default_arg_opts = {
+ '--alert' : y_or_n,
+ '--ask' : y_or_n,
+ '--autounmask' : y_or_n,
+ '--autounmask-continue' : y_or_n,
+ '--autounmask-only' : y_or_n,
+ '--autounmask-keep-keywords' : y_or_n,
+ '--autounmask-keep-masks': y_or_n,
+ '--autounmask-unrestricted-atoms' : y_or_n,
+ '--autounmask-write' : y_or_n,
+ '--binpkg-changed-deps' : y_or_n,
+ '--buildpkg' : y_or_n,
+ '--changed-deps' : y_or_n,
+ '--changed-slot' : y_or_n,
+ '--changed-deps-report' : y_or_n,
+ '--complete-graph' : y_or_n,
+ '--deep' : valid_integers,
+ '--depclean-lib-check' : y_or_n,
+ '--deselect' : y_or_n,
+ '--binpkg-respect-use' : y_or_n,
+ '--fail-clean' : y_or_n,
+ '--fuzzy-search' : y_or_n,
+ '--getbinpkg' : y_or_n,
+ '--getbinpkgonly' : y_or_n,
+ '--ignore-world' : y_or_n,
+ '--jobs' : valid_integers,
+ '--keep-going' : y_or_n,
+ '--load-average' : valid_floats,
+ '--onlydeps-with-rdeps' : y_or_n,
+ '--package-moves' : y_or_n,
+ '--quiet' : y_or_n,
+ '--quiet-build' : y_or_n,
+ '--quiet-fail' : y_or_n,
+ '--read-news' : y_or_n,
+ '--rebuild-if-new-slot': y_or_n,
+ '--rebuild-if-new-rev' : y_or_n,
+ '--rebuild-if-new-ver' : y_or_n,
+ '--rebuild-if-unbuilt' : y_or_n,
+ '--rebuilt-binaries' : y_or_n,
+ '--root-deps' : ('rdeps',),
+ '--select' : y_or_n,
+ '--selective' : y_or_n,
+ "--use-ebuild-visibility": y_or_n,
+ '--usepkg' : y_or_n,
+ '--usepkgonly' : y_or_n,
+ '--verbose' : y_or_n,
+ '--verbose-slot-rebuilds': y_or_n,
+ '--with-test-deps' : y_or_n,
+ }
+
+ short_arg_opts = {
+ 'D' : valid_integers,
+ 'j' : valid_integers,
+ }
+
+ # Don't make things like "-kn" expand to "-k n"
+ # since existence of -n makes it too ambiguous.
+ short_arg_opts_n = {
+ 'a' : y_or_n,
+ 'A' : y_or_n,
+ 'b' : y_or_n,
+ 'g' : y_or_n,
+ 'G' : y_or_n,
+ 'k' : y_or_n,
+ 'K' : y_or_n,
+ 'q' : y_or_n,
+ 'v' : y_or_n,
+ 'w' : y_or_n,
+ }
+
+ arg_stack = args[:]
+ arg_stack.reverse()
+ while arg_stack:
+ arg = arg_stack.pop()
+
+ default_arg_choices = default_arg_opts.get(arg)
+ if default_arg_choices is not None:
+ new_args.append(arg)
+ if arg_stack and arg_stack[-1] in default_arg_choices:
+ new_args.append(arg_stack.pop())
+ else:
+ # insert default argument
+ new_args.append('True')
+ continue
+
+ if arg[:1] != "-" or arg[:2] == "--":
+ new_args.append(arg)
+ continue
+
+ match = None
+ for k, arg_choices in short_arg_opts.items():
+ if k in arg:
+ match = k
+ break
+
+ if match is None:
+ for k, arg_choices in short_arg_opts_n.items():
+ if k in arg:
+ match = k
+ break
+
+ if match is None:
+ new_args.append(arg)
+ continue
+
+ if len(arg) == 2:
+ new_args.append(arg)
+ if arg_stack and arg_stack[-1] in arg_choices:
+ new_args.append(arg_stack.pop())
+ else:
+ # insert default argument
+ new_args.append('True')
+ continue
+
+ # Insert an empty placeholder in order to
+ # satisfy the requirements of optparse.
+
+ new_args.append("-" + match)
+ opt_arg = None
+ saved_opts = None
+
+ if arg[1:2] == match:
+ if match not in short_arg_opts_n and arg[2:] in arg_choices:
+ opt_arg = arg[2:]
+ else:
+ saved_opts = arg[2:]
+ opt_arg = "True"
+ else:
+ saved_opts = arg[1:].replace(match, "")
+ opt_arg = "True"
+
+ if opt_arg is None and arg_stack and \
+ arg_stack[-1] in arg_choices:
+ opt_arg = arg_stack.pop()
+
+ if opt_arg is None:
+ new_args.append("True")
+ else:
+ new_args.append(opt_arg)
+
+ if saved_opts is not None:
+ # Recycle these on arg_stack since they
+ # might contain another match.
+ arg_stack.append("-" + saved_opts)
+
+ return new_args
+
+def _find_bad_atoms(atoms, less_strict=False):
+ """
+ Declares all atoms as invalid that have an operator,
+ a use dependency, a blocker or a repo spec.
+ It accepts atoms with wildcards.
+ In less_strict mode it accepts operators and repo specs.
+ """
+ bad_atoms = []
+ for x in ' '.join(atoms).split():
+ atom = x
+ if "/" not in x.split(":")[0]:
+ x_cat = insert_category_into_atom(x, 'dummy-category')
+ if x_cat is not None:
+ atom = x_cat
+
+ bad_atom = False
+ try:
+ atom = Atom(atom, allow_wildcard=True, allow_repo=less_strict)
+ except portage.exception.InvalidAtom:
+ bad_atom = True
+
+ if bad_atom or (atom.operator and not less_strict) or atom.blocker or atom.use:
+ bad_atoms.append(x)
+ return bad_atoms
+
+
+def parse_opts(tmpcmdline, silent=False):
+ myaction=None
+ myopts = {}
+
+ actions = frozenset([
+ "clean", "check-news", "config", "depclean", "help",
+ "info", "list-sets", "metadata", "moo",
+ "prune", "rage-clean", "regen", "search",
+ "sync", "unmerge", "version",
+ ])
+
+ longopt_aliases = {"--cols":"--columns", "--skip-first":"--skipfirst"}
+ y_or_n = ("y", "n")
+ true_y_or_n = ("True", "y", "n")
+ true_y = ("True", "y")
+ argument_options = {
+
+ "--alert": {
+ "shortopt" : "-A",
+ "help" : "alert (terminal bell) on prompts",
+ "choices" : true_y_or_n
+ },
+
+ "--ask": {
+ "shortopt" : "-a",
+ "help" : "prompt before performing any actions",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask": {
+ "help" : "automatically unmask packages",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-backtrack": {
+ "help": ("continue backtracking when there are autounmask "
+ "configuration changes"),
+ "choices":("y", "n")
+ },
+
+ "--autounmask-continue": {
+ "help" : "write autounmask changes and continue",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-only": {
+ "help" : "only perform --autounmask",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-license": {
+ "help" : "allow autounmask to change package.license",
+ "choices" : y_or_n
+ },
+
+ "--autounmask-unrestricted-atoms": {
+ "help" : "write autounmask changes with >= atoms if possible",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-use": {
+ "help" : "allow autounmask to change package.use",
+ "choices" : y_or_n
+ },
+
+ "--autounmask-keep-keywords": {
+ "help" : "don't add package.accept_keywords entries",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-keep-masks": {
+ "help" : "don't add package.unmask entries",
+ "choices" : true_y_or_n
+ },
+
+ "--autounmask-write": {
+ "help" : "write changes made by --autounmask to disk",
+ "choices" : true_y_or_n
+ },
+
+ "--accept-properties": {
+ "help":"temporarily override ACCEPT_PROPERTIES",
+ "action":"store"
+ },
+
+ "--accept-restrict": {
+ "help":"temporarily override ACCEPT_RESTRICT",
+ "action":"store"
+ },
+
+ "--backtrack": {
+
+ "help" : "Specifies how many times to backtrack if dependency " + \
+ "calculation fails ",
+
+ "action" : "store"
+ },
+
+ "--binpkg-changed-deps": {
+ "help" : ("reject binary packages with outdated "
+ "dependencies"),
+ "choices" : true_y_or_n
+ },
+
+ "--buildpkg": {
+ "shortopt" : "-b",
+ "help" : "build binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--buildpkg-exclude": {
+ "help" :"A space separated list of package atoms for which " + \
+ "no binary packages should be built. This option overrides all " + \
+ "possible ways to enable building of binary packages.",
+
+ "action" : "append"
+ },
+
+ "--changed-deps": {
+ "help" : ("replace installed packages with "
+ "outdated dependencies"),
+ "choices" : true_y_or_n
+ },
+
+ "--changed-deps-report": {
+ "help" : ("report installed packages with "
+ "outdated dependencies"),
+ "choices" : true_y_or_n
+ },
+
+ "--changed-slot": {
+ "help" : ("replace installed packages with "
+ "outdated SLOT metadata"),
+ "choices" : true_y_or_n
+ },
+
+ "--config-root": {
+ "help":"specify the location for portage configuration files",
+ "action":"store"
+ },
+ "--color": {
+ "help":"enable or disable color output",
+ "choices":("y", "n")
+ },
+
+ "--complete-graph": {
+ "help" : "completely account for all known dependencies",
+ "choices" : true_y_or_n
+ },
+
+ "--complete-graph-if-new-use": {
+ "help" : "trigger --complete-graph behavior if USE or IUSE will change for an installed package",
+ "choices" : y_or_n
+ },
+
+ "--complete-graph-if-new-ver": {
+ "help" : "trigger --complete-graph behavior if an installed package version will change (upgrade or downgrade)",
+ "choices" : y_or_n
+ },
+
+ "--deep": {
+
+ "shortopt" : "-D",
+
+ "help" : "Specifies how deep to recurse into dependencies " + \
+ "of packages given as arguments. If no argument is given, " + \
+ "depth is unlimited. Default behavior is to skip " + \
+ "dependencies of installed packages.",
+
+ "action" : "store"
+ },
+
+ "--depclean-lib-check": {
+ "help" : "check for consumers of libraries before removing them",
+ "choices" : true_y_or_n
+ },
+
+ "--deselect": {
+ "help" : "remove atoms/sets from the world file",
+ "choices" : true_y_or_n
+ },
+
+ "--dynamic-deps": {
+ "help": "substitute the dependencies of installed packages with the dependencies of unbuilt ebuilds",
+ "choices": y_or_n
+ },
+
+ "--exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge won't install any ebuild or binary package that " + \
+ "matches any of the given package atoms.",
+
+ "action" : "append"
+ },
+
+ "--fail-clean": {
+ "help" : "clean temp files after build failure",
+ "choices" : true_y_or_n
+ },
+
+ "--fuzzy-search": {
+ "help": "Enable or disable fuzzy search",
+ "choices": true_y_or_n
+ },
+
+ "--ignore-built-slot-operator-deps": {
+ "help": "Ignore the slot/sub-slot := operator parts of dependencies that have "
+ "been recorded when packages where built. This option is intended "
+ "only for debugging purposes, and it only affects built packages "
+ "that specify slot/sub-slot := operator dependencies using the "
+ "experimental \"4-slot-abi\" EAPI.",
+ "choices": y_or_n
+ },
+
+ "--ignore-soname-deps": {
+ "help": "Ignore the soname dependencies of binary and "
+ "installed packages. This option is enabled by "
+ "default, since soname dependencies are relatively "
+ "new, and the required metadata is not guaranteed to "
+ "exist for binary and installed packages built with "
+ "older versions of portage.",
+ "choices": y_or_n
+ },
+
+ "--ignore-world": {
+ "help" : "ignore the @world package set and its dependencies",
+ "choices" : true_y_or_n
+ },
+
+ "--implicit-system-deps": {
+ "help": "Assume that packages may have implicit dependencies on"
+ "packages which belong to the @system set",
+ "choices": y_or_n
+ },
+
+ "--jobs": {
+
+ "shortopt" : "-j",
+
+ "help" : "Specifies the number of packages to build " + \
+ "simultaneously.",
+
+ "action" : "store"
+ },
+
+ "--keep-going": {
+ "help" : "continue as much as possible after an error",
+ "choices" : true_y_or_n
+ },
+
+ "--load-average": {
+
+ "help" :"Specifies that no new builds should be started " + \
+ "if there are other builds running and the load average " + \
+ "is at least LOAD (a floating-point number).",
+
+ "action" : "store"
+ },
+
+ "--misspell-suggestions": {
+ "help" : "enable package name misspell suggestions",
+ "choices" : ("y", "n")
+ },
+
+ "--with-bdeps": {
+ "help":"include unnecessary build time dependencies",
+ "choices":("y", "n")
+ },
+ "--with-bdeps-auto": {
+ "help":("automatically enable --with-bdeps for installation"
+ " actions, unless --usepkg is enabled"),
+ "choices":("y", "n")
+ },
+ "--reinstall": {
+ "help":"specify conditions to trigger package reinstallation",
+ "choices":["changed-use"]
+ },
+
+ "--reinstall-atoms": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will treat matching packages as if they are not " + \
+ "installed, and reinstall them if necessary. Implies --deep.",
+
+ "action" : "append",
+ },
+
+ "--binpkg-respect-use": {
+ "help" : "discard binary packages if their use flags \
+ don't match the current configuration",
+ "choices" : true_y_or_n
+ },
+
+ "--getbinpkg": {
+ "shortopt" : "-g",
+ "help" : "fetch binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--getbinpkgonly": {
+ "shortopt" : "-G",
+ "help" : "fetch binary packages only",
+ "choices" : true_y_or_n
+ },
+
+ "--usepkg-exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will ignore matching binary packages. ",
+
+ "action" : "append",
+ },
+
+ "--onlydeps-with-rdeps": {
+ "help" : "modify interpretation of depedencies",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-exclude": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will not rebuild these packages due to the " + \
+ "--rebuild flag. ",
+
+ "action" : "append",
+ },
+
+ "--rebuild-ignore": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will not rebuild packages that depend on matching " + \
+ "packages due to the --rebuild flag. ",
+
+ "action" : "append",
+ },
+
+ "--package-moves": {
+ "help" : "perform package moves when necessary",
+ "choices" : true_y_or_n
+ },
+
+ "--prefix": {
+ "help" : "specify the installation prefix",
+ "action" : "store"
+ },
+
+ "--pkg-format": {
+ "help" : "format of result binary package",
+ "action" : "store",
+ },
+
+ "--quickpkg-direct": {
+ "help": "Enable use of installed packages directly as binary packages",
+ "choices": y_or_n
+ },
+
+ "--quiet": {
+ "shortopt" : "-q",
+ "help" : "reduced or condensed output",
+ "choices" : true_y_or_n
+ },
+
+ "--quiet-build": {
+ "help" : "redirect build output to logs",
+ "choices" : true_y_or_n,
+ },
+
+ "--quiet-fail": {
+ "help" : "suppresses display of the build log on stdout",
+ "choices" : true_y_or_n,
+ },
+
+ "--read-news": {
+ "help" : "offer to read unread news via eselect",
+ "choices" : true_y_or_n
+ },
+
+
+ "--rebuild-if-new-slot": {
+ "help" : ("Automatically rebuild or reinstall packages when slot/sub-slot := "
+ "operator dependencies can be satisfied by a newer slot, so that "
+ "older packages slots will become eligible for removal by the "
+ "--depclean action as soon as possible."),
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-new-rev": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version and revision.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-new-ver": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built, " + \
+ "if the dependency is not already installed with the " + \
+ "same version. Revision numbers are ignored.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuild-if-unbuilt": {
+ "help" : "Rebuild packages when dependencies that are " + \
+ "used at both build-time and run-time are built.",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuilt-binaries": {
+ "help" : "replace installed packages with binary " + \
+ "packages that have been rebuilt",
+ "choices" : true_y_or_n
+ },
+
+ "--rebuilt-binaries-timestamp": {
+ "help" : "use only binaries that are newer than this " + \
+ "timestamp for --rebuilt-binaries",
+ "action" : "store"
+ },
+
+ "--root": {
+ "help" : "specify the target root filesystem for merging packages",
+ "action" : "store"
+ },
+
+ "--root-deps": {
+ "help" : "modify interpretation of depedencies",
+ "choices" :("True", "rdeps")
+ },
+
+ "--search-index": {
+ "help": "Enable or disable indexed search (enabled by default)",
+ "choices": y_or_n
+ },
+
+ "--search-similarity": {
+ "help": ("Set minimum similarity percentage for fuzzy seach "
+ "(a floating-point number between 0 and 100)"),
+ "action": "store"
+ },
+
+ "--select": {
+ "shortopt" : "-w",
+ "help" : "add specified packages to the world set " + \
+ "(inverse of --oneshot)",
+ "choices" : true_y_or_n
+ },
+
+ "--selective": {
+ "help" : "identical to --noreplace",
+ "choices" : true_y_or_n
+ },
+
+ "--sync-submodule": {
+ "help" : ("Restrict sync to the specified submodule(s)."
+ " (--sync action only)"),
+ "choices" : tuple(_SUBMODULE_PATH_MAP),
+ "action" : "append",
+ },
+
+ "--sysroot": {
+ "help":"specify the location for build dependencies specified in DEPEND",
+ "action":"store"
+ },
+
+ "--use-ebuild-visibility": {
+ "help" : "use unbuilt ebuild metadata for visibility checks on built packages",
+ "choices" : true_y_or_n
+ },
+
+ "--useoldpkg-atoms": {
+ "help" :"A space separated list of package names or slot atoms. " + \
+ "Emerge will prefer matching binary packages over newer unbuilt packages. ",
+
+ "action" : "append",
+ },
+
+ "--usepkg": {
+ "shortopt" : "-k",
+ "help" : "use binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--usepkgonly": {
+ "shortopt" : "-K",
+ "help" : "use only binary packages",
+ "choices" : true_y_or_n
+ },
+
+ "--verbose": {
+ "shortopt" : "-v",
+ "help" : "verbose output",
+ "choices" : true_y_or_n
+ },
+ "--verbose-slot-rebuilds": {
+ "help" : "verbose slot rebuild output",
+ "choices" : true_y_or_n
+ },
+ "--with-test-deps": {
+ "help" : "pull in test deps for packages " + \
+ "matched by arguments",
+ "choices" : true_y_or_n
+ },
+ }
+
+ parser = argparse.ArgumentParser(add_help=False)
+
+ for action_opt in actions:
+ parser.add_argument("--" + action_opt, action="store_true",
+ dest=action_opt.replace("-", "_"), default=False)
+ for myopt in options:
+ parser.add_argument(myopt, action="store_true",
+ dest=myopt.lstrip("--").replace("-", "_"), default=False)
+ for shortopt, longopt in shortmapping.items():
+ parser.add_argument("-" + shortopt, action="store_true",
+ dest=longopt.lstrip("--").replace("-", "_"), default=False)
+ for myalias, myopt in longopt_aliases.items():
+ parser.add_argument(myalias, action="store_true",
+ dest=myopt.lstrip("--").replace("-", "_"), default=False)
+
+ for myopt, kwargs in argument_options.items():
+ shortopt = kwargs.pop("shortopt", None)
+ args = [myopt]
+ if shortopt is not None:
+ args.append(shortopt)
+ parser.add_argument(dest=myopt.lstrip("--").replace("-", "_"),
+ *args, **kwargs)
+
+ parser.add_argument('positional_args', nargs='*')
+
+ tmpcmdline = insert_optional_args(tmpcmdline)
+
+ myoptions = parser.parse_args(args=tmpcmdline)
+
+ if myoptions.alert in true_y:
+ myoptions.alert = True
+ else:
+ myoptions.alert = None
+
+ if myoptions.ask in true_y:
+ myoptions.ask = True
+ else:
+ myoptions.ask = None
+
+ if myoptions.autounmask in true_y:
+ myoptions.autounmask = True
+
+ if myoptions.autounmask_continue in true_y:
+ myoptions.autounmask_continue = True
+
+ if myoptions.autounmask_only in true_y:
+ myoptions.autounmask_only = True
+ else:
+ myoptions.autounmask_only = None
+
+ if myoptions.autounmask_unrestricted_atoms in true_y:
+ myoptions.autounmask_unrestricted_atoms = True
+
+ if myoptions.autounmask_keep_keywords in true_y:
+ myoptions.autounmask_keep_keywords = True
+
+ if myoptions.autounmask_keep_masks in true_y:
+ myoptions.autounmask_keep_masks = True
+
+ if myoptions.autounmask_write in true_y:
+ myoptions.autounmask_write = True
+
+ if myoptions.binpkg_changed_deps is not None:
+ if myoptions.binpkg_changed_deps in true_y:
+ myoptions.binpkg_changed_deps = 'y'
+ else:
+ myoptions.binpkg_changed_deps = 'n'
+
+ if myoptions.buildpkg in true_y:
+ myoptions.buildpkg = True
+
+ if myoptions.buildpkg_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.buildpkg_exclude, less_strict=True)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --buildpkg-exclude parameter: '%s'\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.changed_deps is not None:
+ if myoptions.changed_deps in true_y:
+ myoptions.changed_deps = 'y'
+ else:
+ myoptions.changed_deps = 'n'
+
+ if myoptions.changed_deps_report is not None:
+ if myoptions.changed_deps_report in true_y:
+ myoptions.changed_deps_report = 'y'
+ else:
+ myoptions.changed_deps_report = 'n'
+
+ if myoptions.changed_slot is not None:
+ if myoptions.changed_slot in true_y:
+ myoptions.changed_slot = True
+ else:
+ myoptions.changed_slot = None
+
+ if myoptions.changed_use is not False:
+ myoptions.reinstall = "changed-use"
+ myoptions.changed_use = False
+
+ if myoptions.deselect in true_y:
+ myoptions.deselect = True
+
+ if myoptions.binpkg_respect_use is not None:
+ if myoptions.binpkg_respect_use in true_y:
+ myoptions.binpkg_respect_use = 'y'
+ else:
+ myoptions.binpkg_respect_use = 'n'
+
+ if myoptions.complete_graph in true_y:
+ myoptions.complete_graph = True
+ else:
+ myoptions.complete_graph = None
+
+ if myoptions.depclean_lib_check in true_y:
+ myoptions.depclean_lib_check = True
+
+ if myoptions.exclude:
+ bad_atoms = _find_bad_atoms(myoptions.exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.reinstall_atoms:
+ bad_atoms = _find_bad_atoms(myoptions.reinstall_atoms)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --reinstall-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.rebuild_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.rebuild_exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --rebuild-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.rebuild_ignore:
+ bad_atoms = _find_bad_atoms(myoptions.rebuild_ignore)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --rebuild-ignore parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.usepkg_exclude:
+ bad_atoms = _find_bad_atoms(myoptions.usepkg_exclude)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --usepkg-exclude parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.useoldpkg_atoms:
+ bad_atoms = _find_bad_atoms(myoptions.useoldpkg_atoms)
+ if bad_atoms and not silent:
+ parser.error("Invalid Atom(s) in --useoldpkg-atoms parameter: '%s' (only package names and slot atoms (with wildcards) allowed)\n" % \
+ (",".join(bad_atoms),))
+
+ if myoptions.fail_clean in true_y:
+ myoptions.fail_clean = True
+
+ if myoptions.fuzzy_search in true_y:
+ myoptions.fuzzy_search = True
+
+ if myoptions.getbinpkg in true_y:
+ myoptions.getbinpkg = True
+ else:
+ myoptions.getbinpkg = None
+
+ if myoptions.getbinpkgonly in true_y:
+ myoptions.getbinpkgonly = True
+ else:
+ myoptions.getbinpkgonly = None
+
+ if myoptions.ignore_world in true_y:
+ myoptions.ignore_world = True
+
+ if myoptions.keep_going in true_y:
+ myoptions.keep_going = True
+ else:
+ myoptions.keep_going = None
+
+ if myoptions.package_moves in true_y:
+ myoptions.package_moves = True
+
+ if myoptions.quiet in true_y:
+ myoptions.quiet = True
+ else:
+ myoptions.quiet = None
+
+ if myoptions.quiet_build in true_y:
+ myoptions.quiet_build = 'y'
+
+ if myoptions.quiet_fail in true_y:
+ myoptions.quiet_fail = 'y'
+
+ if myoptions.read_news in true_y:
+ myoptions.read_news = True
+ else:
+ myoptions.read_news = None
+
+
+ if myoptions.rebuild_if_new_slot in true_y:
+ myoptions.rebuild_if_new_slot = 'y'
+
+ if myoptions.rebuild_if_new_ver in true_y:
+ myoptions.rebuild_if_new_ver = True
+ else:
+ myoptions.rebuild_if_new_ver = None
+
+ if myoptions.rebuild_if_new_rev in true_y:
+ myoptions.rebuild_if_new_rev = True
+ myoptions.rebuild_if_new_ver = None
+ else:
+ myoptions.rebuild_if_new_rev = None
+
+ if myoptions.rebuild_if_unbuilt in true_y:
+ myoptions.rebuild_if_unbuilt = True
+ myoptions.rebuild_if_new_rev = None
+ myoptions.rebuild_if_new_ver = None
+ else:
+ myoptions.rebuild_if_unbuilt = None
+
+ if myoptions.rebuilt_binaries in true_y:
+ myoptions.rebuilt_binaries = True
+
+ if myoptions.root_deps in true_y:
+ myoptions.root_deps = True
+
+ if myoptions.select in true_y:
+ myoptions.select = True
+ myoptions.oneshot = False
+ elif myoptions.select == "n":
+ myoptions.oneshot = True
+
+ if myoptions.selective in true_y:
+ myoptions.selective = True
+
+ if myoptions.backtrack is not None:
+
+ try:
+ backtrack = int(myoptions.backtrack)
+ except (OverflowError, ValueError):
+ backtrack = -1
+
+ if backtrack < 0:
+ backtrack = None
+ if not silent:
+ parser.error("Invalid --backtrack parameter: '%s'\n" % \
+ (myoptions.backtrack,))
+
+ myoptions.backtrack = backtrack
+
+ if myoptions.deep is not None:
+ deep = None
+ if myoptions.deep == "True":
+ deep = True
+ else:
+ try:
+ deep = int(myoptions.deep)
+ except (OverflowError, ValueError):
+ deep = -1
+
+ if deep is not True and deep < 0:
+ deep = None
+ if not silent:
+ parser.error("Invalid --deep parameter: '%s'\n" % \
+ (myoptions.deep,))
+
+ myoptions.deep = deep
+
+ if myoptions.jobs:
+ jobs = None
+ if myoptions.jobs == "True":
+ jobs = True
+ else:
+ try:
+ jobs = int(myoptions.jobs)
+ except ValueError:
+ jobs = -1
+
+ if jobs is not True and \
+ jobs < 1:
+ jobs = None
+ if not silent:
+ parser.error("Invalid --jobs parameter: '%s'\n" % \
+ (myoptions.jobs,))
+
+ myoptions.jobs = jobs
+
+ if myoptions.load_average == "True":
+ myoptions.load_average = None
+
+ if myoptions.load_average:
+ try:
+ load_average = float(myoptions.load_average)
+ except ValueError:
+ load_average = 0.0
+
+ if load_average <= 0.0:
+ load_average = None
+ if not silent:
+ parser.error("Invalid --load-average parameter: '%s'\n" % \
+ (myoptions.load_average,))
+
+ myoptions.load_average = load_average
+
+ if myoptions.rebuilt_binaries_timestamp:
+ try:
+ rebuilt_binaries_timestamp = int(myoptions.rebuilt_binaries_timestamp)
+ except ValueError:
+ rebuilt_binaries_timestamp = -1
+
+ if rebuilt_binaries_timestamp < 0:
+ rebuilt_binaries_timestamp = 0
+ if not silent:
+ parser.error("Invalid --rebuilt-binaries-timestamp parameter: '%s'\n" % \
+ (myoptions.rebuilt_binaries_timestamp,))
+
+ myoptions.rebuilt_binaries_timestamp = rebuilt_binaries_timestamp
+
+ if myoptions.search_similarity:
+ try:
+ search_similarity = float(myoptions.search_similarity)
+ except ValueError:
+ parser.error("Invalid --search-similarity parameter "
+ "(not a number): '{}'\n".format(
+ myoptions.search_similarity))
+
+ if search_similarity < 0 or search_similarity > 100:
+ parser.error("Invalid --search-similarity parameter "
+ "(not between 0 and 100): '{}'\n".format(
+ myoptions.search_similarity))
+
+ myoptions.search_similarity = search_similarity
+
+ if myoptions.use_ebuild_visibility in true_y:
+ myoptions.use_ebuild_visibility = True
+ else:
+ # None or "n"
+ pass
+
+ if myoptions.usepkg in true_y:
+ myoptions.usepkg = True
+ else:
+ myoptions.usepkg = None
+
+ if myoptions.usepkgonly in true_y:
+ myoptions.usepkgonly = True
+ else:
+ myoptions.usepkgonly = None
+
+ if myoptions.verbose in true_y:
+ myoptions.verbose = True
+ else:
+ myoptions.verbose = None
+
+ if myoptions.with_test_deps in true_y:
+ myoptions.with_test_deps = True
+ else:
+ myoptions.with_test_deps = None
+
+ for myopt in options:
+ v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"))
+ if v:
+ myopts[myopt] = True
+
+ for myopt in argument_options:
+ v = getattr(myoptions, myopt.lstrip("--").replace("-", "_"), None)
+ if v is not None:
+ myopts[myopt] = v
+
+ if myoptions.searchdesc:
+ myoptions.search = True
+
+ for action_opt in actions:
+ v = getattr(myoptions, action_opt.replace("-", "_"))
+ if v:
+ if myaction:
+ multiple_actions(myaction, action_opt)
+ sys.exit(1)
+ myaction = action_opt
+
+ if myaction is None and myoptions.deselect is True:
+ myaction = 'deselect'
+
+ return myaction, myopts, myoptions.positional_args
+
+def profile_check(trees, myaction):
+ if myaction in ("help", "info", "search", "sync", "version"):
+ return os.EX_OK
+ for root_trees in trees.values():
+ if (root_trees["root_config"].settings.profiles and
+ 'ARCH' in root_trees["root_config"].settings):
+ continue
+ # generate some profile related warning messages
+ validate_ebuild_environment(trees)
+ msg = ("Your current profile is invalid. If you have just changed "
+ "your profile configuration, you should revert back to the "
+ "previous configuration. Allowed actions are limited to "
+ "--help, --info, --search, --sync, and --version.")
+ writemsg_level("".join("!!! %s\n" % l for l in textwrap.wrap(msg, 70)),
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ return os.EX_OK
+
+def emerge_main(context, build_job, args=None):
+ """
+ @param args: command arguments (default: sys.argv[1:])
+ @type args: list
+ """
+ if args is None:
+ args = sys.argv[1:]
+ if build_job is None:
+ build_job = {}
+
+ args = portage._decode_argv(args)
+
+ # Use system locale.
+ try:
+ locale.setlocale(locale.LC_ALL, "")
+ except locale.Error as e:
+ writemsg_level("setlocale: %s\n" % e, level=logging.WARN)
+
+ # Disable color until we're sure that it should be enabled (after
+ # EMERGE_DEFAULT_OPTS has been parsed).
+ portage.output.havecolor = 0
+
+ # This first pass is just for options that need to be known as early as
+ # possible, such as --config-root. They will be parsed again later,
+ # together with EMERGE_DEFAULT_OPTS (which may vary depending on the
+ # the value of --config-root).
+ myaction, myopts, myfiles = parse_opts(args, silent=True)
+ if "--debug" in myopts:
+ os.environ["PORTAGE_DEBUG"] = "1"
+ if "--config-root" in myopts:
+ os.environ["PORTAGE_CONFIGROOT"] = myopts["--config-root"]
+ if "--sysroot" in myopts:
+ os.environ["SYSROOT"] = myopts["--sysroot"]
+ if "--root" in myopts:
+ os.environ["ROOT"] = myopts["--root"]
+ if "--prefix" in myopts:
+ os.environ["EPREFIX"] = myopts["--prefix"]
+ if "--accept-properties" in myopts:
+ os.environ["ACCEPT_PROPERTIES"] = myopts["--accept-properties"]
+ if "--accept-restrict" in myopts:
+ os.environ["ACCEPT_RESTRICT"] = myopts["--accept-restrict"]
+
+ # optimize --help (no need to load config / EMERGE_DEFAULT_OPTS)
+ if myaction == "help":
+ emerge_help()
+ return os.EX_OK
+ elif myaction == "moo":
+ print(COWSAY_MOO % platform.system())
+ return os.EX_OK
+ elif myaction == "sync":
+ # need to set this to True now in order for the repository config
+ # loading to allow new repos with non-existent directories
+ portage._sync_mode = True
+
+ # Verify that /dev/null exists and is a device file as a cheap early
+ # filter for obviously broken /dev/s.
+ try:
+ if os.stat(os.devnull).st_rdev == 0:
+ writemsg_level("Failed to validate a sane '/dev'.\n"
+ "'/dev/null' is not a device file.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+ except OSError:
+ writemsg_level("Failed to validate a sane '/dev'.\n"
+ "'/dev/null' does not exist.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # Verify that BASH process substitution works as another cheap early
+ # filter. Process substitution uses '/dev/fd'.
+ with open(os.devnull, 'r+b') as dev_null:
+ fd_pipes = {
+ 0: dev_null.fileno(),
+ 1: dev_null.fileno(),
+ 2: dev_null.fileno(),
+ }
+ if portage.process.spawn_bash("[[ $(< <(echo foo) ) == foo ]]",
+ fd_pipes=fd_pipes) != 0:
+ writemsg_level("Failed to validate a sane '/dev'.\n"
+ "bash process substitution doesn't work; this may be an "
+ "indication of a broken '/dev/fd'.\n",
+ level=logging.ERROR, noiselevel=-1)
+ return 1
+
+ # Portage needs to ensure a sane umask for the files it creates.
+ os.umask(0o22)
+ emerge_config = load_emerge_config(
+ action=myaction, args=myfiles, opts=myopts)
+
+ # Make locale variables from configuration files (make.defaults, make.conf) affect locale of emerge process.
+ for locale_var_name in ("LANGUAGE", "LC_ALL", "LC_ADDRESS", "LC_COLLATE", "LC_CTYPE",
+ "LC_IDENTIFICATION", "LC_MEASUREMENT", "LC_MESSAGES", "LC_MONETARY",
+ "LC_NAME", "LC_NUMERIC", "LC_PAPER", "LC_TELEPHONE", "LC_TIME", "LANG"):
+ locale_var_value = emerge_config.running_config.settings.get(locale_var_name)
+ if locale_var_value is not None:
+ os.environ.setdefault(locale_var_name, locale_var_value)
+ try:
+ locale.setlocale(locale.LC_ALL, "")
+ except locale.Error as e:
+ writemsg_level("setlocale: %s\n" % e, level=logging.WARN)
+
+ tmpcmdline = []
+ if "--ignore-default-opts" not in myopts:
+ tmpcmdline.extend(portage.util.shlex_split(
+ emerge_config.target_config.settings.get(
+ "EMERGE_DEFAULT_OPTS", "")))
+ tmpcmdline.extend(args)
+ emerge_config.action, emerge_config.opts, emerge_config.args = \
+ parse_opts(tmpcmdline)
+
+ try:
+ return run_action(emerge_config, build_job, context)
+ finally:
+ # Call destructors for our portdbapi instances.
+ for x in emerge_config.trees.values():
+ if "porttree" in x.lazy_items:
+ continue
+ x["porttree"].dbapi.close_caches()
diff --git a/gosbs/baserpc.py b/gosbs/baserpc.py
index b57f44c..13d5359 100644
--- a/gosbs/baserpc.py
+++ b/gosbs/baserpc.py
@@ -14,8 +14,6 @@
# under the License.
#
-# Origin https://github.com/openstack/nova/blob/master/nova/baserpc.py
-
"""
Base RPC client and server common to all services.
"""
diff --git a/gosbs/builder/depclean.py b/gosbs/builder/depclean.py
new file mode 100644
index 0000000..0e17a5f
--- /dev/null
+++ b/gosbs/builder/depclean.py
@@ -0,0 +1,82 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.clear_caches import clear_caches
+from _emerge.main import parse_opts
+from _emerge.unmerge import unmerge
+import portage
+from portage._sets.base import InternalPackageSet
+from portage.util._async.SchedulerInterface import SchedulerInterface
+from portage.util._eventloop.global_event_loop import global_event_loop
+
+from oslo_log import log as logging
+from gosbs._emerge.actions import load_emerge_config, action_depclean, calc_depclean
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def do_depclean(context):
+ mysettings, mytrees, mtimedb = load_emerge_config()
+ root_config = mytrees[mysettings['EROOT']]['root_config']
+ vardb = root_config.trees['vartree'].dbapi
+ psets = root_config.setconfig.psets
+ args_set = InternalPackageSet(allow_repo=True)
+ spinner=None
+ tmpcmdline = []
+ tmpcmdline.append("--depclean")
+ tmpcmdline.append("--pretend")
+ print("depclean",tmpcmdline)
+ myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
+ if myfiles:
+ args_set.update(myfiles)
+ matched_packages = False
+ for x in args_set:
+ if vardb.match(x):
+ matched_packages = True
+ if not matched_packages:
+ return 0
+
+ rval, cleanlist, ordered, req_pkg_count, unresolvable = calc_depclean(mysettings, mytrees, mtimedb["ldpath"], myopts, myaction, args_set, spinner)
+ print('rval, cleanlist, ordered, req_pkg_count, unresolvable', rval, cleanlist, ordered, req_pkg_count, unresolvable)
+ clear_caches(mytrees)
+ if unresolvable != []:
+ return True
+ if cleanlist != []:
+ conflict_package_list = []
+ max_jobs = myopts.get("--jobs", 1)
+ background = (max_jobs is True or max_jobs > 1 or
+ "--quiet" in myopts or myopts.get("--quiet-build") == "y")
+ sched_iface = SchedulerInterface(global_event_loop(),
+ is_background=lambda: background)
+
+ if background:
+ settings.unlock()
+ settings["PORTAGE_BACKGROUND"] = "1"
+ settings.backup_changes("PORTAGE_BACKGROUND")
+ settings.lock()
+
+ for depclean_cpv in cleanlist:
+ if portage.versions.cpv_getkey(depclean_cpv) in list(psets["system"]):
+ conflict_package_list.append(depclean_cpv)
+ if portage.versions.cpv_getkey(depclean_cpv) in list(psets['selected']):
+ conflict_package_list.append(depclean_cpv)
+ print('conflict_package_list', conflict_package_list)
+ if conflict_package_list == []:
+ rval = unmerge(root_config, myopts, "unmerge", cleanlist, mtimedb["ldpath"], ordered=ordered, scheduler=sched_iface)
+ set_atoms = {}
+ for k in ("profile", "system", "selected"):
+ try:
+ set_atoms[k] = root_config.setconfig.getSetAtoms(k)
+ except portage.exception.PackageSetNotFound:
+ # A nested set could not be resolved, so ignore nested sets.
+ set_atoms[k] = root_config.sets[k].getAtoms()
+
+ print("Packages installed: " + str(len(vardb.cpv_all())))
+ print("Packages in world: %d" % len(set_atoms["selected"]))
+ print("Packages in system: %d" % len(set_atoms["system"]))
+ if set_atoms["profile"]:
+ print("Packages in profile: %d" % len(set_atoms["profile"]))
+ print("Required packages: "+str(req_pkg_count))
+ print("Number removed: "+str(len(cleanlist)))
+ return True
diff --git a/gosbs/builder/manager.py b/gosbs/builder/manager.py
index 950122b..f0e741f 100644
--- a/gosbs/builder/manager.py
+++ b/gosbs/builder/manager.py
@@ -102,3 +102,13 @@ class BuilderManager(manager.Manager):
'service_uuid' : self.service_ref.uuid,
}
run_task(context, filters, self.service_ref)
+
+ @periodic_task.periodic_task
+ def build_pkg_task(self, context):
+ task_name = 'build_pkg'
+ LOG.debug("Runing task %s", task_name)
+ filters = { 'status' : 'waiting',
+ 'name' : task_name,
+ 'service_uuid' : self.service_ref.uuid,
+ }
+ run_task(context, filters, self.service_ref)
diff --git a/gosbs/builder/wrapper_depgraph.py b/gosbs/builder/wrapper_depgraph.py
new file mode 100644
index 0000000..3d73805
--- /dev/null
+++ b/gosbs/builder/wrapper_depgraph.py
@@ -0,0 +1,61 @@
+# Copyright 1998-2020 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
+
+from _emerge.create_depgraph_params import create_depgraph_params
+from _emerge.depgraph import backtrack_depgraph
+import portage
+portage.proxy.lazyimport.lazyimport(globals(),
+ 'gosbs._emerge.actions:load_emerge_config',
+)
+from portage.exception import PackageSetNotFound
+
+from oslo_log import log as logging
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def build_mydepgraph(settings, trees, mtimedb, myopts, myparams, myaction, myfiles, spinner, build_job, context):
+ try:
+ success, mydepgraph, favorites = backtrack_depgraph(
+ settings, trees, myopts, myparams, myaction, myfiles, spinner)
+ except portage.exception.PackageSetNotFound as e:
+ root_config = trees[settings["ROOT"]]["root_config"]
+ display_missing_pkg_set(root_config, e.value)
+ LOG.error('Dependencies fail')
+ else:
+ if not success:
+ repeat = True
+ repeat_times = 0
+ while repeat:
+ if mydepgraph._dynamic_config._needed_p_mask_changes:
+ LOG.debug('Mask package or dep')
+ elif mydepgraph._dynamic_config._needed_use_config_changes:
+ mydepgraph._display_autounmask()
+ LOG.debug('Need use change')
+ elif mydepgraph._dynamic_config._slot_conflict_handler:
+ LOG.debug('Slot blocking')
+ elif mydepgraph._dynamic_config._circular_deps_for_display:
+ LOG.debug('Circular Deps')
+ elif mydepgraph._dynamic_config._unsolvable_blockers:
+ LOG.debug('Blocking packages')
+ else:
+ LOG.debug('Dep calc fail')
+ mydepgraph.display_problems()
+ if repeat_times is 2:
+ repeat = False
+ else:
+ repeat_times = repeat_times + 1
+ settings, trees, mtimedb = load_emerge_config()
+ myparams = create_depgraph_params(myopts, myaction)
+ try:
+ success, mydepgraph, favorites = backtrack_depgraph(
+ settings, trees, myopts, myparams, myaction, myfiles, spinner)
+ except portage.exception.PackageSetNotFound as e:
+ root_config = trees[settings["ROOT"]]["root_config"]
+ display_missing_pkg_set(root_config, e.value)
+ if success:
+ repeat = False
+
+ return success, settings, trees, mtimedb, mydepgraph
diff --git a/gosbs/cmd/__init__.py b/gosbs/cmd/__init__.py
index 7970311..e69de29 100644
--- a/gosbs/cmd/__init__.py
+++ b/gosbs/cmd/__init__.py
@@ -1,20 +0,0 @@
-# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
-# All Rights Reserved.
-#
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
-# Origin https://github.com/openstack/nova/blob/master/nova/cmd/__init__.py
-
-from gosbs import utils
-
-utils.monkey_patch()
diff --git a/gosbs/common/binary.py b/gosbs/common/binary.py
new file mode 100644
index 0000000..7d6ac46
--- /dev/null
+++ b/gosbs/common/binary.py
@@ -0,0 +1,32 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from oslo_log import log as logging
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def destroy_local_binary(context, build_job, project_db, service_uuid, mysettings):
+ filters = {
+ 'ebuild_uuid' : build_job['ebuild'].uuid,
+ 'project_uuid' : project_db.uuid,
+ 'service_uuid' : service_uuid,
+ }
+ for local_binary_db in objects.local_binary.LocalBinaryList.get_all(context, filters=filters):
+ local_binary_db.destroy(context)
+ binfile = mysettings['PKGDIR'] + "/" + build_job['cpv'] + ".tbz2"
+ try:
+ os.remove(binfile)
+ except:
+ LOG.error("Package file was not removed or not found: %s" % binfile)
+
+def destroy_objectstor_binary(context, build_job, project_db):
+ filters = {
+ 'ebuild_uuid' : build_job['ebuild'].uuid,
+ 'project_uuid' : project_db.uuid,
+ }
+ for objectstor_binary_db in objects.objectstor_binary.ObjectStorBinaryList.get_all(context, filters=filters):
+ objectstor_binary_db.destroy(context)
+ # Fixme: remove the file on ObjectStor
diff --git a/gosbs/common/flags.py b/gosbs/common/flags.py
index 2e844f0..142ec96 100644
--- a/gosbs/common/flags.py
+++ b/gosbs/common/flags.py
@@ -1,9 +1,6 @@
# Copyright 1999-2020 Gentoo Authors
# Distributed under the terms of the GNU General Public License v2
-
# Origin https://gitweb.gentoo.org/proj/portage.git/tree/pym/portage/api/flag.py?h=public_api
-# Fix so we can use mysettings and myportdb.
-# Add filtring of api, python and ruby.
"""Provides support functions for USE flag settings and analysis"""
@@ -209,3 +206,17 @@ def get_use_flag_dict(portdir):
#debug.dprint(data[0].strip())
#debug.dprint(item[index:])
return use_dict
+
+def get_build_use(cpv, mysettings, myportdb):
+ (final_use, use_expand_hidden, usemasked, useforced) = get_all_cpv_use(cpv, myportdb, mysettings)
+ iuse_flags = filter_flags(get_iuse(cpv, myportdb), use_expand_hidden, usemasked, useforced, mysettings)
+ final_flags = filter_flags(final_use, use_expand_hidden, usemasked, useforced, mysettings)
+ iuse_flags2 = reduce_flags(iuse_flags)
+ iuse_flags_list = list(set(iuse_flags2))
+ use_disable = list(set(iuse_flags_list).difference(set(final_flags)))
+ use_flagsDict = {}
+ for x in final_flags:
+ use_flagsDict[x] = True
+ for x in use_disable:
+ use_flagsDict[x] = False
+ return use_flagsDict, usemasked
diff --git a/gosbs/common/git.py b/gosbs/common/git.py
index b22435d..a72aead 100644
--- a/gosbs/common/git.py
+++ b/gosbs/common/git.py
@@ -1,24 +1,16 @@
-# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Copyright 1998-2019 Gentoo Foundation
+# Distributed under the terms of the GNU General Public License v2
import re
import git
import os
from oslo_log import log as logging
+from gosbs import objects
import gosbs.conf
CONF = gosbs.conf.CONF
+
LOG = logging.getLogger(__name__)
def fetch(repo):
@@ -43,7 +35,7 @@ def update_git_repo_db(repo_dict):
if repo_uptodate:
return True, cp_list
# We check for dir changes and add the package to a list
- repo_diff = repo.git.diff('origin', '--name-only'
+ repo_diff = repo.git.diff('origin', '--name-only')
#write_log(session, 'Git dir diff:\n%s' % (repo_diff,), "debug", config_id, 'sync.git_sync_main')
for diff_line in repo_diff.splitlines():
find_search = True
@@ -73,7 +65,7 @@ def create_git_repo(repo_dict):
try:
os.mkdir(repo_dict['repo_path'])
except OSError:
- LOG.error("Creation of the directory %s failed" % repo_dict['repo_path'])
+ print ("Creation of the directory %s failed" % repo_dict['repo_path'])
return False
try:
if not repo_dict['history']:
@@ -89,7 +81,7 @@ def check_git_repo_db(repo_dict):
succes = create_git_repo(repo_dict)
return succes, None
succes, cp_list = update_git_repo_db(repo_dict)
- return succes, cp_list
+ return succes , cp_list
def check_git_repo(repo_dict):
if not os.path.isdir(repo_dict['repo_path']):
diff --git a/gosbs/common/task.py b/gosbs/common/task.py
index 4fb98cb..b3bbda3 100644
--- a/gosbs/common/task.py
+++ b/gosbs/common/task.py
@@ -1,16 +1,3 @@
-# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
-
from datetime import datetime
from dateutil.relativedelta import relativedelta
import pytz
@@ -27,6 +14,10 @@ def time_to_run_task(task_db):
task_time_when = task_time_when + relativedelta(days=+(task_db.run.day -1))
task_time_when = task_time_when + relativedelta(hours=+task_db.run.hour)
task_time_when = task_time_when + relativedelta(minutes=+task_db.run.minute)
+ print(task_db.run)
+ print(task_db.run.minute)
+ print(task_time_when)
+ print(task_time_now)
if task_time_when < task_time_now:
return True
else:
@@ -41,6 +32,7 @@ def create_task_db(context, name, run, repet, service_uuid):
task_db.repet = repet
task_db.status = 'waiting'
task_db.last = datetime.now().replace(tzinfo=pytz.UTC)
+ print(task_db)
task_db.create(context)
return task_db
@@ -57,6 +49,7 @@ def check_task_db(context, name, run, repet, service_uuid):
def run_task(context, filters, service_ref):
for task_db in objects.task.TaskList.get_all(context, filters=filters, sort_key='priority'):
+ print(task_db)
if time_to_run_task(task_db):
task_db.status = 'in-progress'
task_db.save(context)
diff --git a/gosbs/config.py b/gosbs/config.py
index 6defbc2..bb7887c 100644
--- a/gosbs/config.py
+++ b/gosbs/config.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/config.py
-
from oslo_log import log
from oslo_utils import importutils
diff --git a/gosbs/context.py b/gosbs/context.py
index 20df39c..11ef81a 100644
--- a/gosbs/context.py
+++ b/gosbs/context.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/context.py
-
"""RequestContext: context for requests that persist through all of nova."""
from contextlib import contextmanager
diff --git a/gosbs/db/sqlalchemy/models.py b/gosbs/db/sqlalchemy/models.py
index 119369c..f42dacd 100644
--- a/gosbs/db/sqlalchemy/models.py
+++ b/gosbs/db/sqlalchemy/models.py
@@ -157,7 +157,18 @@ class ProjectsRepos(BASE, NovaBase):
repoman = Column(Boolean(), default=False)
qa = Column(Boolean(), default=False)
auto = Column(Boolean(), default=False)
+
+class ProjectsOptions(BASE, NovaBase):
+ """Represents an image in the datastore."""
+ __tablename__ = 'projects_options'
+ __table_args__ = (
+ )
+ id = Column(Integer, primary_key=True)
+ project_uuid = Column(String(36), ForeignKey('projects.uuid'),
+ default=lambda: str(uuid.uuid4()))
depclean = Column(Boolean(), default=False)
+ oneshot = Column(Boolean(), default=False)
+ removebin = Column(Boolean(), default=False)
class ProjectsBuilds(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
"""Represents an image in the datastore."""
@@ -424,3 +435,37 @@ class ServicesRepos(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixi
auto = Column(Boolean(), default=False)
status = Column(Enum('failed', 'completed', 'in-progress', 'waiting', 'update_db', 'rebuild_db'),
nullable=True)
+
+class LocalBinarys(BASE, NovaBase, models.TimestampMixin):
+ """Represents an image in the datastore."""
+ __tablename__ = 'local_binarys'
+ __table_args__ = (
+ )
+
+ uuid = Column(String(36), primary_key=True,
+ default=lambda: str(uuid.uuid4()))
+ name = Column(String(255))
+ project_uuid = Column(String(36), ForeignKey('projects.uuid'), nullable=False,
+ default=lambda: str(uuid.uuid4()))
+ ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'), nullable=False,
+ default=lambda: str(uuid.uuid4()))
+ service_uuid = Column(String(36), ForeignKey('services.uuid'),
+ default=lambda: str(uuid.uuid4()))
+ checksum = Column(String(200))
+ looked = Column(Boolean(), default=False)
+
+class ObjectStorBinarys(BASE, NovaBase, models.TimestampMixin):
+ """Represents an image in the datastore."""
+ __tablename__ = 'objectstor_binarys'
+ __table_args__ = (
+ )
+
+ uuid = Column(String(36), primary_key=True,
+ default=lambda: str(uuid.uuid4()))
+ name = Column(String(255))
+ project_uuid = Column(String(36), ForeignKey('projects.uuid'), nullable=False,
+ default=lambda: str(uuid.uuid4()))
+ ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'), nullable=False,
+ default=lambda: str(uuid.uuid4()))
+ checksum = Column(String(200))
+ looked = Column(Boolean(), default=False)
diff --git a/gosbs/debugger.py b/gosbs/debugger.py
index 0cda17c..a6c8a4f 100644
--- a/gosbs/debugger.py
+++ b/gosbs/debugger.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/debugger.py
-
# NOTE(markmc): this is imported before monkey patching in nova.cmd
# so we avoid extra imports here
diff --git a/gosbs/exception.py b/gosbs/exception.py
index 4759034..09b626f 100644
--- a/gosbs/exception.py
+++ b/gosbs/exception.py
@@ -14,8 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/exception.py
-
"""Nova base exception handling.
Includes decorator for re-raising Nova-type exceptions.
diff --git a/gosbs/i18n.py b/gosbs/i18n.py
index f0a769d..123d895 100644
--- a/gosbs/i18n.py
+++ b/gosbs/i18n.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/i18n.py
-
"""oslo.i18n integration module.
See https://docs.openstack.org/oslo.i18n/latest/user/index.html .
diff --git a/gosbs/manager.py b/gosbs/manager.py
index b8e1dda..19a5e4d 100644
--- a/gosbs/manager.py
+++ b/gosbs/manager.py
@@ -14,8 +14,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/manager.py
-
"""Base Manager class.
Managers are responsible for a certain aspect of the system. It is a logical
@@ -58,7 +56,7 @@ import six
import gosbs.conf
from gosbs.db import base
-#from gosbs import profiler
+from gosbs import profiler
from gosbs import rpc
diff --git a/gosbs/middleware.py b/gosbs/middleware.py
index a796014..3c34686 100644
--- a/gosbs/middleware.py
+++ b/gosbs/middleware.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/middleware.py
-
from oslo_middleware import cors
diff --git a/gosbs/objects/__init__.py b/gosbs/objects/__init__.py
index e67fba4..50e2e38 100644
--- a/gosbs/objects/__init__.py
+++ b/gosbs/objects/__init__.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/__init__.py
-
# NOTE(comstud): You may scratch your head as you see code that imports
# this module and then accesses attributes for objects such as Instance,
# etc, yet you do not see these attributes in here. Never fear, there is
@@ -37,6 +35,8 @@ def register_all():
__import__('gosbs.objects.email')
__import__('gosbs.objects.keyword')
__import__('gosbs.objects.package')
+ __import__('gosbs.objects.local_package')
+ __import__('gosbs.objects.objectstor_package')
__import__('gosbs.objects.package_metadata')
__import__('gosbs.objects.package_email')
__import__('gosbs.objects.project')
diff --git a/gosbs/objects/build_iuse.py b/gosbs/objects/build_iuse.py
index 4aa5342..b550fc5 100644
--- a/gosbs/objects/build_iuse.py
+++ b/gosbs/objects/build_iuse.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -230,6 +226,9 @@ def _build_iuse_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
if 'status' in filters:
query = query.filter(
models.BuildsIUses.status == filters['status'])
+ if 'build_uuid' in filters:
+ query = query.filter(
+ models.BuildsIUses.build_uuid == filters['build_uuid'])
marker_row = None
if marker is not None:
diff --git a/gosbs/objects/category.py b/gosbs/objects/category.py
index c7659c5..3535b18 100644
--- a/gosbs/objects/category.py
+++ b/gosbs/objects/category.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/category_metadata.py b/gosbs/objects/category_metadata.py
index 76eff64..3d74a65 100644
--- a/gosbs/objects/category_metadata.py
+++ b/gosbs/objects/category_metadata.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/ebuild.py b/gosbs/objects/ebuild.py
index 4be3c64..7bc6c19 100644
--- a/gosbs/objects/ebuild.py
+++ b/gosbs/objects/ebuild.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/ebuild_iuse.py b/gosbs/objects/ebuild_iuse.py
index 6a8c568..bf63688 100644
--- a/gosbs/objects/ebuild_iuse.py
+++ b/gosbs/objects/ebuild_iuse.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/ebuild_keyword.py b/gosbs/objects/ebuild_keyword.py
index dfde6e7..35238f5 100644
--- a/gosbs/objects/ebuild_keyword.py
+++ b/gosbs/objects/ebuild_keyword.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/ebuild_metadata.py b/gosbs/objects/ebuild_metadata.py
index 9a886ee..04f2d55 100644
--- a/gosbs/objects/ebuild_metadata.py
+++ b/gosbs/objects/ebuild_metadata.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/ebuild_restriction.py b/gosbs/objects/ebuild_restriction.py
index e3e046c..679d2d2 100644
--- a/gosbs/objects/ebuild_restriction.py
+++ b/gosbs/objects/ebuild_restriction.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -227,8 +223,8 @@ def _ebuild_restriction_get_all_from_db(context, inactive, filters, sort_key, so
query = EbuildRestriction._ebuild_restriction_get_query_from_db(context)
if 'ebuild_uuid' in filters:
- query = query.filter(
- models.EbuildsRestrictions.ebuild_uuid == filters['ebuild_uuid'])
+ query = query.filter(models.EbuildsRestrictions.ebuild_uuid == filters['ebuild_uuid'])
+
if not query:
return None
diff --git a/gosbs/objects/email.py b/gosbs/objects/email.py
index 375c429..c7a6fd9 100644
--- a/gosbs/objects/email.py
+++ b/gosbs/objects/email.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it fit what we need.
-# I need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -70,7 +66,7 @@ def _email_destroy(context, email_id=None, emailid=None):
# TODO(berrange): Remove NovaObjectDictCompat
# TODO(mriedem): Remove NovaPersistentObject in version 2.0
@base.NovaObjectRegistry.register
-class Email(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+class Email(base.NovaObject, base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
diff --git a/gosbs/objects/fields.py b/gosbs/objects/fields.py
index 02dd297..e4e8e0c 100644
--- a/gosbs/objects/fields.py
+++ b/gosbs/objects/fields.py
@@ -12,13 +12,11 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/fields.py
-
import os
import re
-from cursive import signature_utils
-from oslo_serialization import jsonutils
+#from cursive import signature_utils
+#from oslo_serialization import jsonutils
from oslo_versionedobjects import fields
import six
diff --git a/gosbs/objects/flavor.py b/gosbs/objects/flavor.py
index 28739a8..ecf0dd4 100644
--- a/gosbs/objects/flavor.py
+++ b/gosbs/objects/flavor.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/image.py b/gosbs/objects/image.py
index 45d48eb..bf5379c 100644
--- a/gosbs/objects/image.py
+++ b/gosbs/objects/image.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/keyword.py b/gosbs/objects/keyword.py
index 4cf2e0c..f98abdc 100644
--- a/gosbs/objects/keyword.py
+++ b/gosbs/objects/keyword.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -70,7 +66,7 @@ def _keyword_destroy(context, keyword_id=None, keywordid=None):
# TODO(berrange): Remove NovaObjectDictCompat
# TODO(mriedem): Remove NovaPersistentObject in version 2.0
@base.NovaObjectRegistry.register
-class Keyword(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject2):
+class Keyword(base.NovaObject, base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
diff --git a/gosbs/objects/project_metadata.py b/gosbs/objects/local_binary.py
similarity index 50%
copy from gosbs/objects/project_metadata.py
copy to gosbs/objects/local_binary.py
index 4edbb55..d930638 100644
--- a/gosbs/objects/project_metadata.py
+++ b/gosbs/objects/local_binary.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -35,157 +31,157 @@ from gosbs.objects import fields
CONF = gosbs.conf.CONF
-def _dict_with_extra_specs(projectmetadata_model):
+def _dict_with_extra_specs(model):
extra_specs = {}
- return dict(projectmetadata_model, extra_specs=extra_specs)
+ return dict(model, extra_specs=extra_specs)
@db_api.main_context_manager.writer
-def _projectmetadata_create(context, values):
- db_projectmetadata = models.ProjectsMetadata()
- db_projectmetadata.update(values)
+def _local_binary_create(context, values):
+ db_local_binary = models.LocalBinarys()
+ db_local_binary.update(values)
try:
- db_projectmetadata.save(context.session)
+ db_local_binary.save(context.session)
except db_exc.DBDuplicateEntry as e:
- if 'projectmetadataid' in e.columns:
- raise exception.ImagesIdExists(projectmetadata_id=values['projectmetadataid'])
+ if 'local_binaryid' in e.columns:
+ raise exception.ImagesIdExists(local_binary_id=values['local_binaryid'])
raise exception.ImagesExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
- return _dict_with_extra_specs(db_projectmetadata)
+ return _dict_with_extra_specs(db_local_binary)
@db_api.main_context_manager.writer
-def _projectmetadata_destroy(context, projectmetadata_id=None, projectmetadataid=None):
- query = context.session.query(models.ProjectsMetadata)
+def _local_binary_destroy(context, local_binary_id=None, local_binaryid=None):
+ query = context.session.query(models.LocalBinarys)
- if projectmetadata_id is not None:
- query.filter(models.ProjectsMetadata.id == projectmetadata_id).delete()
+ if local_binary_id is not None:
+ query.filter(models.LocalBinarys.id == local_binary_id).delete()
else:
- query.filter(models.ProjectsMetadata.id == projectmetadataid).delete()
+ query.filter(models.LocalBinarys.id == local_binaryid).delete()
# TODO(berrange): Remove NovaObjectDictCompat
# TODO(mriedem): Remove NovaPersistentObject in version 2.0
@base.NovaObjectRegistry.register
-class ProjectMetadata(base.NovaObject, base.NovaObjectDictCompat):
+class LocalBinary(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject2):
# Version 1.0: Initial version
VERSION = '1.0'
fields = {
- 'id': fields.IntegerField(),
+ 'uuid': fields.UUIDField(),
'project_uuid': fields.UUIDField(),
- 'titel': fields.StringField(),
- 'description' : fields.StringField(),
- 'project_repo_uuid': fields.UUIDField(),
- 'project_profile' : fields.StringField(),
- 'project_profile_repo_uuid': fields.UUIDField(),
+ 'service_uuid': fields.UUIDField(),
+ 'name': fields.StringField(),
+ 'checksum' : fields.StringField(),
+ 'ebuild_uuid': fields.UUIDField(),
+ 'looked' : fields.BooleanField(),
}
def __init__(self, *args, **kwargs):
- super(ProjectMetadata, self).__init__(*args, **kwargs)
+ super(LocalBinary, self).__init__(*args, **kwargs)
self._orig_extra_specs = {}
- self._orig_projectmetadatas = []
+ self._orig_local_binary = []
def obj_make_compatible(self, primitive, target_version):
- super(ProjectMetadata, self).obj_make_compatible(primitive, target_version)
+ super(LocalBinary, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
@staticmethod
- def _from_db_object(context, projectmetadata, db_projectmetadata, expected_attrs=None):
+ def _from_db_object(context, local_binary, db_local_binary, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
- projectmetadata._context = context
- for name, field in projectmetadata.fields.items():
- value = db_projectmetadata[name]
+ local_binary._context = context
+ for name, field in local_binary.fields.items():
+ value = db_local_binary[name]
if isinstance(field, fields.IntegerField):
value = value if value is not None else 0
- projectmetadata[name] = value
+ local_binary[name] = value
- projectmetadata.obj_reset_changes()
- return projectmetadata
+ local_binary.obj_reset_changes()
+ return local_binary
@staticmethod
@db_api.main_context_manager.reader
- def _projectmetadata_get_query_from_db(context):
- query = context.session.query(models.ProjectsMetadata)
+ def _local_binary_get_query_from_db(context):
+ query = context.session.query(models.LocalBinarys)
return query
@staticmethod
@require_context
- def _projectmetadata_get_from_db(context, id):
- """Returns a dict describing specific projectmetadatas."""
- result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ def _local_binary_get_from_db(context, id):
+ """Returns a dict describing specific local_binarys."""
+ result = LocalBinary._local_binary_get_query_from_db(context).\
filter_by(id=id).\
first()
if not result:
- raise exception.ImagesNotFound(projectmetadata_id=id)
+ raise exception.ImagesNotFound(local_binary_id=id)
return result
@staticmethod
@require_context
- def _projectmetadata_get_from_db(context, id):
- """Returns a dict describing specific projectmetadatas."""
- result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ def _local_binary_get_from_db(context, id):
+ """Returns a dict describing specific local_binaryss."""
+ result = LocalBinary._local_binary_get_query_from_db(context).\
filter_by(id=id).\
first()
if not result:
- raise exception.ImagesNotFound(projectmetadata_id=id)
+ raise exception.ImagesNotFound(local_binary_id=id)
return result
@staticmethod
@require_context
- def _projectmetadata_get_by_name_from_db(context, name):
+ def _local_binarys_get_by_name_from_db(context, name):
"""Returns a dict describing specific flavor."""
- result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ result = LocalBinary._local_binary_get_query_from_db(context).\
filter_by(name=name).\
first()
if not result:
- raise exception.FlavorNotFoundByName(projectmetadatas_name=name)
+ raise exception.FlavorNotFoundByName(local_binarys_name=name)
return _dict_with_extra_specs(result)
@staticmethod
@require_context
- def _projectmetadata_get_by_uuid_from_db(context, uuid):
+ def _local_binary_get_by_uuid_from_db(context, uuid):
"""Returns a dict describing specific flavor."""
- result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ result = LocalBinary._local_binary_get_query_from_db(context).\
filter_by(project_uuid=uuid).\
first()
if not result:
- raise exception.FlavorNotFoundByName(projectmetadatas_name=name)
+ raise exception.FlavorNotFoundByName(local_binarys_name=name)
return _dict_with_extra_specs(result)
def obj_reset_changes(self, fields=None, recursive=False):
- super(ProjectMetadata, self).obj_reset_changes(fields=fields,
+ super(LocalBinary, self).obj_reset_changes(fields=fields,
recursive=recursive)
def obj_what_changed(self):
- changes = super(ProjectMetadata, self).obj_what_changed()
+ changes = super(LocalBinary, self).obj_what_changed()
return changes
@base.remotable_classmethod
def get_by_id(cls, context, id):
- db_projectmetadata = cls._projectmetadata_get_from_db(context, id)
- return cls._from_db_object(context, cls(context), db_projectmetadata,
+ db_local_binary = cls._local_binary_get_from_db(context, id)
+ return cls._from_db_object(context, cls(context), db_local_binary,
expected_attrs=[])
@base.remotable_classmethod
def get_by_name(cls, context, name):
- db_projectmetadata = cls._projectmetadata_get_by_name_from_db(context, name)
- return cls._from_db_object(context, cls(context), db_projectmetadata,
+ db_local_binary = cls._local_binary_get_by_name_from_db(context, name)
+ return cls._from_db_object(context, cls(context), db_local_binary,
expected_attrs=[])
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
- db_projectmetadata = cls._projectmetadata_get_by_uuid_from_db(context, uuid)
- return cls._from_db_object(context, cls(context), db_projectmetadata,
+ db_local_binary = cls._local_binary_get_by_uuid_from_db(context, uuid)
+ return cls._from_db_object(context, cls(context), db_local_binary,
expected_attrs=[])
@staticmethod
- def _projectmetadata_create(context, updates):
- return _projectmetadata_create(context, updates)
+ def _local_binary_create(context, updates):
+ return _local_binary_create(context, updates)
#@base.remotable
def create(self, context):
@@ -193,22 +189,22 @@ class ProjectMetadata(base.NovaObject, base.NovaObjectDictCompat):
# raise exception.ObjectActionError(action='create',
#reason='already created')
updates = self.obj_get_changes()
- db_projectmetadata = self._projectmetadata_create(context, updates)
- self._from_db_object(context, self, db_projectmetadata)
+ db_local_binary = self._local_binary_create(context, updates)
+ self._from_db_object(context, self, db_local_binary)
# NOTE(mriedem): This method is not remotable since we only expect the API
- # to be able to make updates to a projectmetadatas.
+ # to be able to make updates to a local_binaryss.
@db_api.main_context_manager.writer
def _save(self, context, values):
- db_projectmetadata = context.session.query(models.ProjectsMetadata).\
+ db_local_binary = context.session.query(models.LocalBinarys).\
filter_by(id=self.id).first()
- if not db_projectmetadata:
- raise exception.ImagesNotFound(projectmetadata_id=self.id)
- db_projectmetadata.update(values)
- db_projectmetadata.save(context.session)
+ if not db_local_binary:
+ raise exception.ImagesNotFound(local_binary_id=self.id)
+ db_local_binary.update(values)
+ db_local_binary.save(context.session)
# Refresh ourselves from the DB object so we get the new updated_at.
- self._from_db_object(context, self, db_projectmetadata)
+ self._from_db_object(context, self, db_local_binary)
self.obj_reset_changes()
def save(self, context):
@@ -217,92 +213,98 @@ class ProjectMetadata(base.NovaObject, base.NovaObjectDictCompat):
self._save(context, updates)
@staticmethod
- def _projectmetadata_destroy(context, projectmetadata_id=None, projectmetadataid=None):
- _projectmetadata_destroy(context, projectmetadata_id=projectmetadata_id, projectmetadataid=projectmetadataid)
+ def _local_binary_destroy(context, local_binary_id=None, local_binaryid=None):
+ _local_binary_destroy(context, local_binary_id=local_binary_id, local_binaryid=local_binaryid)
#@base.remotable
def destroy(self, context):
- # NOTE(danms): Historically the only way to delete a projectmetadatas
+ # NOTE(danms): Historically the only way to delete a local_binaryss
# is via name, which is not very precise. We need to be able to
- # support the light construction of a projectmetadatas object and subsequent
+ # support the light construction of a local_binaryss object and subsequent
# delete request with only our name filled out. However, if we have
# our id property, we should instead delete with that since it's
# far more specific.
if 'id' in self:
- self._projectmetadata_destroy(context, projectmetadata_id=self.id)
+ self._local_binary_destroy(context, local_binary_id=self.id)
else:
- self._projectmetadata_destroy(context, projectmetadataid=self.projectmetadataid)
- #self._from_db_object(context, self, db_projectmetadata)
+ self._local_binary_destroy(context, local_binaryid=self.local_binaryid)
+ #self._from_db_object(context, self, db_local_binary)
@base.remotable_classmethod
def get_by_filters_first(cls, context, filters=None):
filters = filters or {}
- db_projectmetadata = ProjectMetadata._projectmetadata_get_query_from_db(context)
+ db_local_binary = LocalBinary._local_binary_get_query_from_db(context)
if 'status' in filters:
- db_projectmetadata = db_projectmetadata.filter(
- models.ProjectsMetadata.status == filters['status']).first()
- return cls._from_db_object(context, cls(context), db_projectmetadata,
+ db_local_binary = db_local_binary.filter(
+ models.LocalBinarys.status == filters['status']).first()
+ return cls._from_db_object(context, cls(context), db_local_binary,
expected_attrs=[])
@db_api.main_context_manager
-def _projectmetadata_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+def _local_binary_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
limit, marker):
- """Returns all projectmetadatass.
+ """Returns all local_binarys.
"""
filters = filters or {}
- query = ProjectMetadata._projectmetadata_get_query_from_db(context)
+ query = LocalBinary._local_binary_get_query_from_db(context)
- if 'status' in filters:
+ if 'ebuild_uuid' in filters:
+ query = query.filter(
+ models.LocalBinarys.ebuild_uuid == filters['ebuild_uuid'])
+ if 'project_uuid' in filters:
+ query = query.filter(
+ models.LocalBinarys.project_uuid == filters['project_uuid'])
+ if 'service_uuid' in filters:
query = query.filter(
- models.ProjectsMetadata.status == filters['status'])
+ models.LocalBinarys.service_uuid == filters['service_uuid'])
marker_row = None
if marker is not None:
- marker_row = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ marker_row = LocalBinary._local_binary_get_query_from_db(context).\
filter_by(id=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker=marker)
- query = sqlalchemyutils.paginate_query(query, models.ProjectsMetadata,
+ query = sqlalchemyutils.paginate_query(query, models.LocalBinarys,
limit,
- [sort_key, 'id'],
+ [sort_key, 'uuid'],
marker=marker_row,
sort_dir=sort_dir)
return [_dict_with_extra_specs(i) for i in query.all()]
@base.NovaObjectRegistry.register
-class ProjectMetadataList(base.ObjectListBase, base.NovaObject):
+class LocalBinaryList(base.ObjectListBase, base.NovaObject):
VERSION = '1.0'
fields = {
- 'objects': fields.ListOfObjectsField('ProjectMetadata'),
+ 'objects': fields.ListOfObjectsField('LocalBinary'),
}
@base.remotable_classmethod
def get_all(cls, context, inactive=False, filters=None,
- sort_key='id', sort_dir='asc', limit=None, marker=None):
- db_projectmetadatas = _projectmetadata_get_all_from_db(context,
+ sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+ db_local_binarys = _local_binary_get_all_from_db(context,
inactive=inactive,
filters=filters,
sort_key=sort_key,
sort_dir=sort_dir,
limit=limit,
marker=marker)
- return base.obj_make_list(context, cls(context), objects.projectmetadata.ProjectMetadata,
- db_projectmetadatas,
+ return base.obj_make_list(context, cls(context), objects.local_binary.LocalBinary,
+ db_local_binarys,
expected_attrs=[])
@db_api.main_context_manager.writer
def destroy_all(context):
- context.session.query(models.ProjectsMetadata).delete()
+ context.session.query(models.LocalBinarys).delete()
@db_api.main_context_manager.writer
def update_all(context):
values = {'status': 'waiting', }
- db_projectmetadata = context.session.query(models.ProjectsMetadata).filter_by(auto=True)
- db_projectmetadata.update(values)
+ db_local_binary = context.session.query(models.LocalBinarys).filter_by(auto=True)
+ db_local_binary.update(values)
diff --git a/gosbs/objects/objectstor_binary.py b/gosbs/objects/objectstor_binary.py
new file mode 100644
index 0000000..994e7fc
--- /dev/null
+++ b/gosbs/objects/objectstor_binary.py
@@ -0,0 +1,306 @@
+# Copyright 2013 Red Hat, Inc
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(model):
+ extra_specs = {}
+ return dict(model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _objectstor_binary_create(context, values):
+ db_objectstor_binary = models.ObjectStorBinarys()
+ db_objectstor_binary.update(values)
+
+ try:
+ db_objectstor_binary.save(context.session)
+ except db_exc.DBDuplicateEntry as e:
+ if 'objectstor_binaryid' in e.columns:
+ raise exception.ImagesIdExists(objectstor_binary_id=values['objectstor_binaryid'])
+ raise exception.ImagesExists(name=values['name'])
+ except Exception as e:
+ raise db_exc.DBError(e)
+
+ return _dict_with_extra_specs(db_objectstor_binary)
+
+
+@db_api.main_context_manager.writer
+def _objectstor_binary_destroy(context, objectstor_binary_id=None, objectstor_binaryid=None):
+ query = context.session.query(models.ObjectStorBinarys)
+
+ if objectstor_binary_id is not None:
+ query.filter(models.ObjectStorBinarys.id == objectstor_binary_id).delete()
+ else:
+ query.filter(models.ObjectStorBinarys.id == objectstor_binaryid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class ObjectStorBinary(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject2):
+ # Version 1.0: Initial version
+
+ VERSION = '1.0'
+
+ fields = {
+ 'uuid': fields.UUIDField(),
+ 'project_uuid': fields.UUIDField(),
+ 'name': fields.StringField(),
+ 'checksum' : fields.StringField(),
+ 'ebuild_uuid': fields.UUIDField(),
+ 'looked' : fields.BooleanField(),
+ }
+
+ def __init__(self, *args, **kwargs):
+ super(ObjectStorBinary, self).__init__(*args, **kwargs)
+ self._orig_extra_specs = {}
+ self._orig_objectstor_binarys = []
+
+ def obj_make_compatible(self, primitive, target_version):
+ super(ObjectStorBinary, self).obj_make_compatible(primitive, target_version)
+ target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+ @staticmethod
+ def _from_db_object(context, objectstor_binary, db_objectstor_binary, expected_attrs=None):
+ if expected_attrs is None:
+ expected_attrs = []
+ objectstor_binary._context = context
+ for name, field in objectstor_binary.fields.items():
+ value = db_objectstor_binary[name]
+ if isinstance(field, fields.IntegerField):
+ value = value if value is not None else 0
+ objectstor_binary[name] = value
+
+ objectstor_binary.obj_reset_changes()
+ return objectstor_binary
+
+ @staticmethod
+ @db_api.main_context_manager.reader
+ def _objectstor_binary_get_query_from_db(context):
+ query = context.session.query(models.ObjectStorBinarys)
+ return query
+
+ @staticmethod
+ @require_context
+ def _objectstor_binary_get_from_db(context, id):
+ """Returns a dict describing specific objectstor_binarys."""
+ result = ObjectStorBinary._objectstor_binary_get_query_from_db(context).\
+ filter_by(id=id).\
+ first()
+ if not result:
+ raise exception.ImagesNotFound(objectstor_binary_id=id)
+ return result
+
+ @staticmethod
+ @require_context
+ def _objectstor_binary_get_from_db(context, id):
+ """Returns a dict describing specific objectstor_binaryss."""
+ result = ObjectStorBinary._objectstor_binary_get_query_from_db(context).\
+ filter_by(id=id).\
+ first()
+ if not result:
+ raise exception.ImagesNotFound(objectstor_binary_id=id)
+ return result
+
+ @staticmethod
+ @require_context
+ def _objectstor_binarys_get_by_name_from_db(context, name):
+ """Returns a dict describing specific flavor."""
+ result = ObjectStorBinary._objectstor_binary_get_query_from_db(context).\
+ filter_by(name=name).\
+ first()
+ if not result:
+ raise exception.FlavorNotFoundByName(objectstor_binarys_name=name)
+ return _dict_with_extra_specs(result)
+
+ @staticmethod
+ @require_context
+ def _objectstor_binary_get_by_uuid_from_db(context, uuid):
+ """Returns a dict describing specific flavor."""
+ result = ObjectStorBinary._objectstor_binary_get_query_from_db(context).\
+ filter_by(project_uuid=uuid).\
+ first()
+ if not result:
+ raise exception.FlavorNotFoundByName(objectstor_binarys_name=name)
+ return _dict_with_extra_specs(result)
+
+ def obj_reset_changes(self, fields=None, recursive=False):
+ super(ObjectStorBinary, self).obj_reset_changes(fields=fields,
+ recursive=recursive)
+
+ def obj_what_changed(self):
+ changes = super(ObjectStorBinary, self).obj_what_changed()
+ return changes
+
+ @base.remotable_classmethod
+ def get_by_id(cls, context, id):
+ db_objectstor_binary = cls._objectstor_binary_get_from_db(context, id)
+ return cls._from_db_object(context, cls(context), db_objectstor_binary,
+ expected_attrs=[])
+ @base.remotable_classmethod
+ def get_by_name(cls, context, name):
+ db_objectstor_binary = cls._objectstor_binary_get_by_name_from_db(context, name)
+ return cls._from_db_object(context, cls(context), db_objectstor_binary,
+ expected_attrs=[])
+ @base.remotable_classmethod
+ def get_by_uuid(cls, context, uuid):
+ db_objectstor_binary = cls._objectstor_binary_get_by_uuid_from_db(context, uuid)
+ return cls._from_db_object(context, cls(context), db_objectstor_binary,
+ expected_attrs=[])
+
+ @staticmethod
+ def _objectstor_binary_create(context, updates):
+ return _objectstor_binary_create(context, updates)
+
+ #@base.remotable
+ def create(self, context):
+ #if self.obj_attr_is_set('id'):
+ # raise exception.ObjectActionError(action='create',
+ #reason='already created')
+ updates = self.obj_get_changes()
+ db_objectstor_binary = self._objectstor_binary_create(context, updates)
+ self._from_db_object(context, self, db_objectstor_binary)
+
+
+ # NOTE(mriedem): This method is not remotable since we only expect the API
+ # to be able to make updates to a objectstor_binaryss.
+ @db_api.main_context_manager.writer
+ def _save(self, context, values):
+ db_objectstor_binary = context.session.query(models.ObjectStorBinarys).\
+ filter_by(id=self.id).first()
+ if not db_objectstor_binary:
+ raise exception.ImagesNotFound(objectstor_binary_id=self.id)
+ db_objectstor_binary.update(values)
+ db_objectstor_binary.save(context.session)
+ # Refresh ourselves from the DB object so we get the new updated_at.
+ self._from_db_object(context, self, db_objectstor_binary)
+ self.obj_reset_changes()
+
+ def save(self, context):
+ updates = self.obj_get_changes()
+ if updates:
+ self._save(context, updates)
+
+ @staticmethod
+ def _objectstor_binary_destroy(context, objectstor_binary_id=None, objectstor_binaryid=None):
+ _objectstor_binary_destroy(context, objectstor_binary_id=objectstor_binary_id, objectstor_binaryid=objectstor_binaryid)
+
+ #@base.remotable
+ def destroy(self, context):
+ # NOTE(danms): Historically the only way to delete a objectstor_binaryss
+ # is via name, which is not very precise. We need to be able to
+ # support the light construction of a objectstor_binaryss object and subsequent
+ # delete request with only our name filled out. However, if we have
+ # our id property, we should instead delete with that since it's
+ # far more specific.
+ if 'id' in self:
+ self._objectstor_binary_destroy(context, objectstor_binary_id=self.id)
+ else:
+ self._objectstor_binary_destroy(context, objectstor_binaryid=self.objectstor_binaryid)
+ #self._from_db_object(context, self, db_objectstor_binary)
+
+ @base.remotable_classmethod
+ def get_by_filters_first(cls, context, filters=None):
+ filters = filters or {}
+ db_objectstor_binary = ObjectStorBinary._objectstor_binary_get_query_from_db(context)
+
+ if 'status' in filters:
+ db_objectstor_binary = db_objectstor_binary.filter(
+ models.ObjectStorBinarys.status == filters['status']).first()
+ return cls._from_db_object(context, cls(context), db_objectstor_binary,
+ expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _objectstor_binary_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+ limit, marker):
+ """Returns all objectstor_binarys.
+ """
+ filters = filters or {}
+
+ query = ObjectStorBinary._objectstor_binary_get_query_from_db(context)
+
+ if 'ebuild_uuid' in filters:
+ query = query.filter(
+ models.ObjectStorBinarys.ebuild_uuid == filters['ebuild_uuid'])
+ if 'project_uuid' in filters:
+ query = query.filter(
+ models.ObjectStorBinarys.project_uuid == filters['project_uuid'])
+
+ marker_row = None
+ if marker is not None:
+ marker_row = ObjectStorBinary._objectstor_binary_get_query_from_db(context).\
+ filter_by(id=marker).\
+ first()
+ if not marker_row:
+ raise exception.MarkerNotFound(marker=marker)
+
+ query = sqlalchemyutils.paginate_query(query, models.ObjectStorBinarys,
+ limit,
+ [sort_key, 'uuid'],
+ marker=marker_row,
+ sort_dir=sort_dir)
+ return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class ObjectStorBinaryList(base.ObjectListBase, base.NovaObject):
+ VERSION = '1.0'
+
+ fields = {
+ 'objects': fields.ListOfObjectsField('ObjectStorBinary'),
+ }
+
+ @base.remotable_classmethod
+ def get_all(cls, context, inactive=False, filters=None,
+ sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+ db_objectstor_binarys = _objectstor_binary_get_all_from_db(context,
+ inactive=inactive,
+ filters=filters,
+ sort_key=sort_key,
+ sort_dir=sort_dir,
+ limit=limit,
+ marker=marker)
+ return base.obj_make_list(context, cls(context), objects.objectstor_binary.ObjectStorBinary,
+ db_objectstor_binarys,
+ expected_attrs=[])
+
+ @db_api.main_context_manager.writer
+ def destroy_all(context):
+ context.session.query(models.ObjectStorBinarys).delete()
+
+ @db_api.main_context_manager.writer
+ def update_all(context):
+ values = {'status': 'waiting', }
+ db_objectstor_binary = context.session.query(models.ObjectStorBinarys).filter_by(auto=True)
+ db_objectstor_binary.update(values)
diff --git a/gosbs/objects/package.py b/gosbs/objects/package.py
index 7f3ac6b..c99c362 100644
--- a/gosbs/objects/package.py
+++ b/gosbs/objects/package.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/package_email.py b/gosbs/objects/package_email.py
index c6a5f2d..acb3c28 100644
--- a/gosbs/objects/package_email.py
+++ b/gosbs/objects/package_email.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/package_metadata.py b/gosbs/objects/package_metadata.py
index 5f6270d..6b7af16 100644
--- a/gosbs/objects/package_metadata.py
+++ b/gosbs/objects/package_metadata.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/project.py b/gosbs/objects/project.py
index 1e1917f..7d50ef8 100644
--- a/gosbs/objects/project.py
+++ b/gosbs/objects/project.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/project_build.py b/gosbs/objects/project_build.py
index e8f1885..be251f3 100644
--- a/gosbs/objects/project_build.py
+++ b/gosbs/objects/project_build.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -50,7 +46,7 @@ def _project_build_create(context, values):
db_project_build.save(context.session)
except db_exc.DBDuplicateEntry as e:
if 'project_buildid' in e.columns:
- raise exception.ImagesIdExists(project_build_id=values['project_buildid'])
+ raise exception.ImagesIdExists(project_build_uuid=values['project_buildid'])
raise exception.ImagesExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
@@ -62,7 +58,7 @@ def _project_build_create(context, values):
def _project_build_destroy(context, project_build_uuid=None, project_builduuid=None):
query = context.session.query(models.ProjectsBuilds)
- if project_build_id is not None:
+ if project_build_uuid is not None:
query.filter(models.ProjectsBuilds.uuid == project_build_uuid).delete()
else:
query.filter(models.ProjectsBuilds.uuid == project_builduuid).delete()
@@ -174,9 +170,9 @@ class ProjectBuild(base.NovaObject, base.NovaObjectDictCompat, ):
@db_api.main_context_manager.writer
def _save(self, context, values):
db_project_build = context.session.query(models.ProjectsBuilds).\
- filter_by(id=self.id).first()
+ filter_by(uuid=self.uuid).first()
if not db_project_build:
- raise exception.ImagesNotFound(project_build_id=self.id)
+ raise exception.ImagesNotFound(project_build_uuid=self.uuid)
db_project_build.update(values)
db_project_build.save(context.session)
# Refresh ourselves from the DB object so we get the new updated_at.
@@ -189,8 +185,8 @@ class ProjectBuild(base.NovaObject, base.NovaObjectDictCompat, ):
self._save(context, updates)
@staticmethod
- def _project_build_destroy(context, project_build_id=None, project_buildid=None):
- _project_build_destroy(context, project_build_id=project_build_id, project_buildid=project_buildid)
+ def _project_build_destroy(context, project_build_uuid=None, project_buildid=None):
+ _project_build_destroy(context, project_build_uuid=project_build_uuid, project_builduuid=project_builduuid)
#@base.remotable
def destroy(self, context):
@@ -200,10 +196,10 @@ class ProjectBuild(base.NovaObject, base.NovaObjectDictCompat, ):
# delete request with only our name filled out. However, if we have
# our id property, we should instead delete with that since it's
# far more specific.
- if 'id' in self:
- self._project_build_destroy(context, project_build_id=self.id)
+ if 'uuid' in self:
+ self._project_build_destroy(context, project_build_uuid=self.uuid)
else:
- self._project_build_destroy(context, project_buildid=self.project_buildid)
+ self._project_build_destroy(context, project_builduuid=self.project_builduuid)
#self._from_db_object(context, self, db_project_build)
@base.remotable_classmethod
@@ -214,9 +210,9 @@ class ProjectBuild(base.NovaObject, base.NovaObjectDictCompat, ):
if 'project_uuid' in filters:
db_project_build = db_project_build.filter(
models.ProjectsBuilds.project_uuid == filters['project_uuid'])
- if 'repo_uuid' in filters:
+ if 'status' in filters:
db_project_build = db_project_build.filter(
- models.ProjectsBuilds.repo_uuid == filters['repo_uuid'])
+ models.ProjectsBuilds.status == filters['status'])
db_project_build = db_project_build.first()
if not db_project_build:
return None
@@ -247,7 +243,7 @@ def _project_build_get_all_from_db(context, inactive, filters, sort_key, sort_di
query = sqlalchemyutils.paginate_query(query, models.ProjectsBuilds,
limit,
- [sort_key, 'id'],
+ [sort_key, 'uuid'],
marker=marker_row,
sort_dir=sort_dir)
return [_dict_with_extra_specs(i) for i in query.all()]
@@ -263,7 +259,7 @@ class ProjectBuildList(base.ObjectListBase, base.NovaObject):
@base.remotable_classmethod
def get_all(cls, context, inactive=False, filters=None,
- sort_key='id', sort_dir='asc', limit=None, marker=None):
+ sort_key='uuid', sort_dir='asc', limit=None, marker=None):
db_project_builds = _project_build_get_all_from_db(context,
inactive=inactive,
filters=filters,
diff --git a/gosbs/objects/project_metadata.py b/gosbs/objects/project_metadata.py
index 4edbb55..4555066 100644
--- a/gosbs/objects/project_metadata.py
+++ b/gosbs/objects/project_metadata.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/project_metadata.py b/gosbs/objects/project_option.py
similarity index 54%
copy from gosbs/objects/project_metadata.py
copy to gosbs/objects/project_option.py
index 4edbb55..051c9d7 100644
--- a/gosbs/objects/project_metadata.py
+++ b/gosbs/objects/project_option.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -35,42 +31,42 @@ from gosbs.objects import fields
CONF = gosbs.conf.CONF
-def _dict_with_extra_specs(projectmetadata_model):
+def _dict_with_extra_specs(project_option_model):
extra_specs = {}
- return dict(projectmetadata_model, extra_specs=extra_specs)
+ return dict(project_option_model, extra_specs=extra_specs)
@db_api.main_context_manager.writer
-def _projectmetadata_create(context, values):
- db_projectmetadata = models.ProjectsMetadata()
- db_projectmetadata.update(values)
+def _project_option_create(context, values):
+ db_project_option = models.ProjectsOptions()
+ db_project_option.update(values)
try:
- db_projectmetadata.save(context.session)
+ db_project_option.save(context.session)
except db_exc.DBDuplicateEntry as e:
- if 'projectmetadataid' in e.columns:
- raise exception.ImagesIdExists(projectmetadata_id=values['projectmetadataid'])
+ if 'project_optionid' in e.columns:
+ raise exception.ImagesIdExists(project_option_id=values['project_optionid'])
raise exception.ImagesExists(name=values['name'])
except Exception as e:
raise db_exc.DBError(e)
- return _dict_with_extra_specs(db_projectmetadata)
+ return _dict_with_extra_specs(db_project_option)
@db_api.main_context_manager.writer
-def _projectmetadata_destroy(context, projectmetadata_id=None, projectmetadataid=None):
- query = context.session.query(models.ProjectsMetadata)
+def _project_option_destroy(context, project_option_id=None, project_optionid=None):
+ query = context.session.query(models.ProjectsOptions)
- if projectmetadata_id is not None:
- query.filter(models.ProjectsMetadata.id == projectmetadata_id).delete()
+ if project_option_id is not None:
+ query.filter(models.ProjectsOptions.id == project_option_id).delete()
else:
- query.filter(models.ProjectsMetadata.id == projectmetadataid).delete()
+ query.filter(models.ProjectsOptions.id == project_optionid).delete()
# TODO(berrange): Remove NovaObjectDictCompat
# TODO(mriedem): Remove NovaPersistentObject in version 2.0
@base.NovaObjectRegistry.register
-class ProjectMetadata(base.NovaObject, base.NovaObjectDictCompat):
+class ProjectOption(base.NovaObject, base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
@@ -78,114 +74,112 @@ class ProjectMetadata(base.NovaObject, base.NovaObjectDictCompat):
fields = {
'id': fields.IntegerField(),
'project_uuid': fields.UUIDField(),
- 'titel': fields.StringField(),
- 'description' : fields.StringField(),
- 'project_repo_uuid': fields.UUIDField(),
- 'project_profile' : fields.StringField(),
- 'project_profile_repo_uuid': fields.UUIDField(),
+ 'oneshot' : fields.BooleanField(),
+ 'removebin' : fields.BooleanField(),
+ 'depclean' : fields.BooleanField(),
}
def __init__(self, *args, **kwargs):
- super(ProjectMetadata, self).__init__(*args, **kwargs)
+ super(ProjectOption, self).__init__(*args, **kwargs)
self._orig_extra_specs = {}
- self._orig_projectmetadatas = []
+ self._orig_project_options = []
def obj_make_compatible(self, primitive, target_version):
- super(ProjectMetadata, self).obj_make_compatible(primitive, target_version)
+ super(ProjectOption, self).obj_make_compatible(primitive, target_version)
target_version = versionutils.convert_version_to_tuple(target_version)
@staticmethod
- def _from_db_object(context, projectmetadata, db_projectmetadata, expected_attrs=None):
+ def _from_db_object(context, project_option, db_project_option, expected_attrs=None):
if expected_attrs is None:
expected_attrs = []
- projectmetadata._context = context
- for name, field in projectmetadata.fields.items():
- value = db_projectmetadata[name]
+ project_option._context = context
+ for name, field in project_option.fields.items():
+ value = db_project_option[name]
if isinstance(field, fields.IntegerField):
value = value if value is not None else 0
- projectmetadata[name] = value
+ project_option[name] = value
- projectmetadata.obj_reset_changes()
- return projectmetadata
+ project_option.obj_reset_changes()
+ return project_option
@staticmethod
@db_api.main_context_manager.reader
- def _projectmetadata_get_query_from_db(context):
- query = context.session.query(models.ProjectsMetadata)
+ def _project_option_get_query_from_db(context):
+ query = context.session.query(models.ProjectsOptions)
return query
@staticmethod
@require_context
- def _projectmetadata_get_from_db(context, id):
- """Returns a dict describing specific projectmetadatas."""
- result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ def _project_option_get_from_db(context, id):
+ """Returns a dict describing specific project_options."""
+ result = ProjectOption._project_option_get_query_from_db(context).\
filter_by(id=id).\
first()
if not result:
- raise exception.ImagesNotFound(projectmetadata_id=id)
+ raise exception.ImagesNotFound(project_option_id=id)
return result
@staticmethod
@require_context
- def _projectmetadata_get_from_db(context, id):
- """Returns a dict describing specific projectmetadatas."""
- result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ def _project_option_get_from_db(context, id):
+ """Returns a dict describing specific project_options."""
+ result = ProjectOption._project_option_get_query_from_db(context).\
filter_by(id=id).\
first()
if not result:
- raise exception.ImagesNotFound(projectmetadata_id=id)
+ raise exception.ImagesNotFound(project_option_id=id)
return result
@staticmethod
@require_context
- def _projectmetadata_get_by_name_from_db(context, name):
+ def _project_option_get_by_name_from_db(context, name):
"""Returns a dict describing specific flavor."""
- result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ result = ProjectOption._project_option_get_query_from_db(context).\
filter_by(name=name).\
first()
if not result:
- raise exception.FlavorNotFoundByName(projectmetadatas_name=name)
+ raise exception.FlavorNotFoundByName(project_options_name=name)
return _dict_with_extra_specs(result)
@staticmethod
@require_context
- def _projectmetadata_get_by_uuid_from_db(context, uuid):
+ def _project_option_get_by_uuid_from_db(context, uuid):
"""Returns a dict describing specific flavor."""
- result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ result = ProjectOption._project_option_get_query_from_db(context).\
filter_by(project_uuid=uuid).\
first()
if not result:
- raise exception.FlavorNotFoundByName(projectmetadatas_name=name)
+ raise exception.FlavorNotFoundByName(project_options_name=name)
return _dict_with_extra_specs(result)
def obj_reset_changes(self, fields=None, recursive=False):
- super(ProjectMetadata, self).obj_reset_changes(fields=fields,
+ super(ProjectOption, self).obj_reset_changes(fields=fields,
recursive=recursive)
def obj_what_changed(self):
- changes = super(ProjectMetadata, self).obj_what_changed()
+ changes = super(ProjectOption, self).obj_what_changed()
return changes
@base.remotable_classmethod
def get_by_id(cls, context, id):
- db_projectmetadata = cls._projectmetadata_get_from_db(context, id)
- return cls._from_db_object(context, cls(context), db_projectmetadata,
+ db_project_option = cls._project_option_get_from_db(context, id)
+ return cls._from_db_object(context, cls(context), db_project_option,
expected_attrs=[])
@base.remotable_classmethod
def get_by_name(cls, context, name):
- db_projectmetadata = cls._projectmetadata_get_by_name_from_db(context, name)
- return cls._from_db_object(context, cls(context), db_projectmetadata,
+ db_project_option = cls._project_option_get_by_name_from_db(context, name)
+ return cls._from_db_object(context, cls(context), db_project_option,
expected_attrs=[])
@base.remotable_classmethod
def get_by_uuid(cls, context, uuid):
- db_projectmetadata = cls._projectmetadata_get_by_uuid_from_db(context, uuid)
- return cls._from_db_object(context, cls(context), db_projectmetadata,
+ db_project_option = cls._project_option_get_by_uuid_from_db(context, uuid)
+ return cls._from_db_object(context, cls(context), db_project_option,
expected_attrs=[])
@staticmethod
- def _projectmetadata_create(context, updates):
- return _projectmetadata_create(context, updates)
+ def _project_option_create(context, updates):
+ return _project_option_create(context, updates)
#@base.remotable
def create(self, context):
@@ -193,22 +187,22 @@ class ProjectMetadata(base.NovaObject, base.NovaObjectDictCompat):
# raise exception.ObjectActionError(action='create',
#reason='already created')
updates = self.obj_get_changes()
- db_projectmetadata = self._projectmetadata_create(context, updates)
- self._from_db_object(context, self, db_projectmetadata)
+ db_project_option = self._project_option_create(context, updates)
+ self._from_db_object(context, self, db_project_option)
# NOTE(mriedem): This method is not remotable since we only expect the API
- # to be able to make updates to a projectmetadatas.
+ # to be able to make updates to a project_options.
@db_api.main_context_manager.writer
def _save(self, context, values):
- db_projectmetadata = context.session.query(models.ProjectsMetadata).\
+ db_project_option = context.session.query(models.ProjectsOptions).\
filter_by(id=self.id).first()
- if not db_projectmetadata:
- raise exception.ImagesNotFound(projectmetadata_id=self.id)
- db_projectmetadata.update(values)
- db_projectmetadata.save(context.session)
+ if not db_project_option:
+ raise exception.ImagesNotFound(project_option_id=self.id)
+ db_project_option.update(values)
+ db_project_option.save(context.session)
# Refresh ourselves from the DB object so we get the new updated_at.
- self._from_db_object(context, self, db_projectmetadata)
+ self._from_db_object(context, self, db_project_option)
self.obj_reset_changes()
def save(self, context):
@@ -217,57 +211,57 @@ class ProjectMetadata(base.NovaObject, base.NovaObjectDictCompat):
self._save(context, updates)
@staticmethod
- def _projectmetadata_destroy(context, projectmetadata_id=None, projectmetadataid=None):
- _projectmetadata_destroy(context, projectmetadata_id=projectmetadata_id, projectmetadataid=projectmetadataid)
+ def _project_option_destroy(context, project_option_id=None, project_optionid=None):
+ _project_option_destroy(context, project_option_id=project_option_id, project_optionid=project_optionid)
#@base.remotable
def destroy(self, context):
- # NOTE(danms): Historically the only way to delete a projectmetadatas
+ # NOTE(danms): Historically the only way to delete a project_options
# is via name, which is not very precise. We need to be able to
- # support the light construction of a projectmetadatas object and subsequent
+ # support the light construction of a project_options object and subsequent
# delete request with only our name filled out. However, if we have
# our id property, we should instead delete with that since it's
# far more specific.
if 'id' in self:
- self._projectmetadata_destroy(context, projectmetadata_id=self.id)
+ self._project_option_destroy(context, project_option_id=self.id)
else:
- self._projectmetadata_destroy(context, projectmetadataid=self.projectmetadataid)
- #self._from_db_object(context, self, db_projectmetadata)
+ self._project_option_destroy(context, project_optionid=self.project_optionid)
+ #self._from_db_object(context, self, db_project_option)
@base.remotable_classmethod
def get_by_filters_first(cls, context, filters=None):
filters = filters or {}
- db_projectmetadata = ProjectMetadata._projectmetadata_get_query_from_db(context)
+ db_project_option = ProjectOption._project_option_get_query_from_db(context)
if 'status' in filters:
- db_projectmetadata = db_projectmetadata.filter(
- models.ProjectsMetadata.status == filters['status']).first()
- return cls._from_db_object(context, cls(context), db_projectmetadata,
+ db_project_option = db_project_option.filter(
+ models.ProjectsOptions.status == filters['status']).first()
+ return cls._from_db_object(context, cls(context), db_project_option,
expected_attrs=[])
@db_api.main_context_manager
-def _projectmetadata_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+def _project_option_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
limit, marker):
- """Returns all projectmetadatass.
+ """Returns all project_optionss.
"""
filters = filters or {}
- query = ProjectMetadata._projectmetadata_get_query_from_db(context)
+ query = ProjectOption._project_option_get_query_from_db(context)
if 'status' in filters:
query = query.filter(
- models.ProjectsMetadata.status == filters['status'])
+ models.ProjectsOptions.status == filters['status'])
marker_row = None
if marker is not None:
- marker_row = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+ marker_row = ProjectOption._project_option_get_query_from_db(context).\
filter_by(id=marker).\
first()
if not marker_row:
raise exception.MarkerNotFound(marker=marker)
- query = sqlalchemyutils.paginate_query(query, models.ProjectsMetadata,
+ query = sqlalchemyutils.paginate_query(query, models.ProjectsOptions,
limit,
[sort_key, 'id'],
marker=marker_row,
@@ -276,33 +270,27 @@ def _projectmetadata_get_all_from_db(context, inactive, filters, sort_key, sort_
@base.NovaObjectRegistry.register
-class ProjectMetadataList(base.ObjectListBase, base.NovaObject):
+class ProjectOptionList(base.ObjectListBase, base.NovaObject):
VERSION = '1.0'
fields = {
- 'objects': fields.ListOfObjectsField('ProjectMetadata'),
+ 'objects': fields.ListOfObjectsField('ProjectOption'),
}
@base.remotable_classmethod
def get_all(cls, context, inactive=False, filters=None,
sort_key='id', sort_dir='asc', limit=None, marker=None):
- db_projectmetadatas = _projectmetadata_get_all_from_db(context,
+ db_project_options = _project_option_get_all_from_db(context,
inactive=inactive,
filters=filters,
sort_key=sort_key,
sort_dir=sort_dir,
limit=limit,
marker=marker)
- return base.obj_make_list(context, cls(context), objects.projectmetadata.ProjectMetadata,
- db_projectmetadatas,
+ return base.obj_make_list(context, cls(context), objects.project_option.ProjectOption,
+ db_project_options,
expected_attrs=[])
@db_api.main_context_manager.writer
def destroy_all(context):
- context.session.query(models.ProjectsMetadata).delete()
-
- @db_api.main_context_manager.writer
- def update_all(context):
- values = {'status': 'waiting', }
- db_projectmetadata = context.session.query(models.ProjectsMetadata).filter_by(auto=True)
- db_projectmetadata.update(values)
+ context.session.query(models.ProjectsOptions).delete()
diff --git a/gosbs/objects/project_repo.py b/gosbs/objects/project_repo.py
index 85e458d..bbee237 100644
--- a/gosbs/objects/project_repo.py
+++ b/gosbs/objects/project_repo.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -83,7 +79,6 @@ class ProjectRepo(base.NovaObject, base.NovaObjectDictCompat, ):
'build' : fields.BooleanField(),
'test' : fields.BooleanField(),
'qa' : fields.BooleanField(),
- 'depclean' : fields.BooleanField(),
'repoman' : fields.BooleanField(),
}
diff --git a/gosbs/objects/repo.py b/gosbs/objects/repo.py
index 747016d..24b6d41 100644
--- a/gosbs/objects/repo.py
+++ b/gosbs/objects/repo.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/restriction.py b/gosbs/objects/restriction.py
index 4aede29..4c22a6e 100644
--- a/gosbs/objects/restriction.py
+++ b/gosbs/objects/restriction.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -70,7 +66,7 @@ def _restriction_destroy(context, restriction_id=None, restrictionid=None):
# TODO(berrange): Remove NovaObjectDictCompat
# TODO(mriedem): Remove NovaPersistentObject in version 2.0
@base.NovaObjectRegistry.register
-class Restriction(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+class Restriction(base.NovaObject, base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
diff --git a/gosbs/objects/service.py b/gosbs/objects/service.py
index 1fc0c4f..397f29f 100644
--- a/gosbs/objects/service.py
+++ b/gosbs/objects/service.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/service.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_log import log as logging
from oslo_utils import uuidutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/service_repo.py b/gosbs/objects/service_repo.py
index f4761aa..c4e1a78 100644
--- a/gosbs/objects/service_repo.py
+++ b/gosbs/objects/service_repo.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/objects/task.py b/gosbs/objects/task.py
index 83f9c5c..3a6975a 100644
--- a/gosbs/objects/task.py
+++ b/gosbs/objects/task.py
@@ -12,9 +12,7 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
+import pdb
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
diff --git a/gosbs/objects/use.py b/gosbs/objects/use.py
index dd71073..e80a18e 100644
--- a/gosbs/objects/use.py
+++ b/gosbs/objects/use.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
@@ -70,7 +66,7 @@ def _use_destroy(context, use_id=None, useid=None):
# TODO(berrange): Remove NovaObjectDictCompat
# TODO(mriedem): Remove NovaPersistentObject in version 2.0
@base.NovaObjectRegistry.register
-class Use(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+class Use(base.NovaObject, base.NovaObjectDictCompat):
# Version 1.0: Initial version
VERSION = '1.0'
diff --git a/gosbs/objects/user.py b/gosbs/objects/user.py
index f4d6783..f43dec2 100644
--- a/gosbs/objects/user.py
+++ b/gosbs/objects/user.py
@@ -12,10 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
from oslo_db import exception as db_exc
from oslo_db.sqlalchemy import utils as sqlalchemyutils
from oslo_utils import versionutils
diff --git a/gosbs/policy.py b/gosbs/policy.py
index f8f8659..1040830 100644
--- a/gosbs/policy.py
+++ b/gosbs/policy.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/policy.py
-
"""Policy Engine For Nova."""
import copy
import re
diff --git a/gosbs/profiler.py b/gosbs/profiler.py
index de9ed9b..57f04b5 100644
--- a/gosbs/profiler.py
+++ b/gosbs/profiler.py
@@ -13,8 +13,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/profiler.py
-
from oslo_utils import importutils
import gosbs.conf
diff --git a/gosbs/rpc.py b/gosbs/rpc.py
index 73154ea..c1e1246 100644
--- a/gosbs/rpc.py
+++ b/gosbs/rpc.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/rpc.py
-
import functools
from oslo_log import log as logging
diff --git a/gosbs/scheduler/category.py b/gosbs/scheduler/category.py
index c9b98e7..4ac1f84 100644
--- a/gosbs/scheduler/category.py
+++ b/gosbs/scheduler/category.py
@@ -1,18 +1,9 @@
# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Distributed under the terms of the GNU General Public License v2
import re
import os
+import pdb
from portage.xml.metadata import MetaDataXML
from portage.checksum import perform_checksum
@@ -22,6 +13,7 @@ from gosbs import objects
import gosbs.conf
CONF = gosbs.conf.CONF
+
LOG = logging.getLogger(__name__)
def get_category_metadata_tree(c_path):
diff --git a/gosbs/scheduler/ebuild.py b/gosbs/scheduler/ebuild.py
index 646a6c1..c802e60 100644
--- a/gosbs/scheduler/ebuild.py
+++ b/gosbs/scheduler/ebuild.py
@@ -1,20 +1,11 @@
# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Distributed under the terms of the GNU General Public License v2
import os
import git
import re
import portage
+import pdb
from portage.checksum import perform_checksum
from oslo_log import log as logging
@@ -23,6 +14,7 @@ from gosbs import objects
import gosbs.conf
CONF = gosbs.conf.CONF
+
LOG = logging.getLogger(__name__)
def get_all_cpv_from_package(myportdb, cp, repo_path):
@@ -84,6 +76,8 @@ def check_use_db(context, use):
def create_cpv_use_db(context, ebuild_version_uuid, ebuild_version_metadata_tree):
for iuse in ebuild_version_metadata_tree[10].split():
+ print('iuse')
+ print(iuse)
status = False
if iuse[0] in ["+"]:
iuse = iuse[1:]
@@ -139,6 +133,8 @@ def check_restriction_db(context, restriction):
def create_cpv_restriction_db(context, ebuild_version_uuid, ebuild_version_metadata_tree):
for restriction in ebuild_version_metadata_tree[4].split():
+ print('restriction')
+ print(restriction)
if restriction in ["!"]:
restriction = restriction[1:]
if restriction in ["?"]:
@@ -154,6 +150,7 @@ def create_cpv_metadata_db(context, myportdb, cpv, ebuild_file, ebuild_version_d
repo_path = CONF.repopath + '/' + repo_db.name + '.git'
git_commit, git_commit_msg = get_git_log_ebuild(repo_path, ebuild_file)
ebuild_version_metadata_tree = get_ebuild_metadata(myportdb, cpv, repo_db.name)
+ print(ebuild_version_metadata_tree)
ebuild_metadata_db = objects.ebuild_metadata.EbuildMetadata()
ebuild_metadata_db.ebuild_uuid = ebuild_version_db.uuid
ebuild_metadata_db.commit = git_commit
@@ -176,8 +173,10 @@ def check_cpv_db(context, myportdb, cp, repo_db, package_uuid):
ebuild_version_tree_list = []
ebuild_version_tree_dict_new = {}
succes = True
+ #pdb.set_trace()
for cpv in sorted(get_all_cpv_from_package(myportdb, cp, repo_path)):
LOG.debug("Checking %s", cpv)
+ print(cpv)
ebuild_version_tree = portage.versions.cpv_getversion(cpv)
package = portage.versions.catpkgsplit(cpv)[1]
ebuild_file = repo_path + "/" + cp + "/" + package + "-" + ebuild_version_tree + ".ebuild"
@@ -196,6 +195,7 @@ def check_cpv_db(context, myportdb, cp, repo_db, package_uuid):
succes = create_cpv_metadata_db(context, myportdb, cpv, ebuild_file, ebuild_version_db, repo_db)
ebuild_version_tree_dict_new[cpv] = ebuild_version_db.uuid
ebuild_version_tree_list.append(ebuild_version_tree)
+ print('check old ebuilds')
for ebuild_db in objects.ebuild.EbuildList.get_all(context, filters=filters):
if not ebuild_db.version in ebuild_version_tree_list:
LOG.info("Deleting %s in the database", ebuild_db.version)
diff --git a/gosbs/scheduler/email.py b/gosbs/scheduler/email.py
index 581b22a..4b7ca90 100644
--- a/gosbs/scheduler/email.py
+++ b/gosbs/scheduler/email.py
@@ -1,15 +1,5 @@
# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Distributed under the terms of the GNU General Public License v2
from oslo_log import log as logging
from gosbs import objects
@@ -29,8 +19,4 @@ def check_email_db(context, email):
email_db = objects.email.Email.get_by_name(context, email)
if email_db is None:
email_db = create_email(context, email)
- else:
- if email_db.deleted is True:
- email_db.deleted = False
- email_db.save(context)
return email_db.id
diff --git a/gosbs/scheduler/manager.py b/gosbs/scheduler/manager.py
index fea9923..9e2a7a0 100644
--- a/gosbs/scheduler/manager.py
+++ b/gosbs/scheduler/manager.py
@@ -15,10 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/compute/manager.py
-# We have change the code so it will fit what we need.
-# It need more cleaning.
-
"""Handles all processes relating to instances (guest vms).
The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
diff --git a/gosbs/scheduler/package.py b/gosbs/scheduler/package.py
index 07b467c..9f33415 100644
--- a/gosbs/scheduler/package.py
+++ b/gosbs/scheduler/package.py
@@ -1,15 +1,5 @@
# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Distributed under the terms of the GNU General Public License v2
import os
import git
@@ -27,6 +17,7 @@ from gosbs.scheduler.ebuild import check_cpv_db, deleted_cpv_db
import gosbs.conf
CONF = gosbs.conf.CONF
+
LOG = logging.getLogger(__name__)
def get_all_cp_from_repo(myportdb, repopath):
@@ -132,8 +123,10 @@ def deleted_cp_db(context, package_db):
filters = { 'deleted' : False,
'package_uuid' : package_db.uuid,
}
+ print(package_db.uuid)
for ebuild_db in objects.ebuild.EbuildList.get_all(context, filters=filters):
LOG.info("Deleting %s in the database", ebuild_db.version)
+ print(ebuild_db.package_uuid)
deleted_cpv_db(context, ebuild_db.uuid)
package_db.deleted = True
package_db.status = 'completed'
diff --git a/gosbs/scheduler/rpcapi.py b/gosbs/scheduler/rpcapi.py
index 3af4067..4b6abf5 100644
--- a/gosbs/scheduler/rpcapi.py
+++ b/gosbs/scheduler/rpcapi.py
@@ -11,10 +11,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/compute/rpcapi.py
-# We have change the code so it will fit what we need.
-# It need more cleaning and work.
-
"""
Client side of the scheduler RPC API.
"""
diff --git a/gosbs/service.py b/gosbs/service.py
index ea67f81..7deb686 100644
--- a/gosbs/service.py
+++ b/gosbs/service.py
@@ -15,9 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/service.py
-# we removed class WSGIService
-
"""Generic Node base class for all workers that run on hosts."""
import os
@@ -56,7 +53,7 @@ LOG = logging.getLogger(__name__)
CONF = gosbs.conf.CONF
SERVICE_MANAGERS = {
- 'gosbs-gitmirror': 'gosbs.gitmirror.manager.GitMirrorManager',
+ 'gosbs-builder': 'gosbs.builder.manager.BuilderManager',
'gosbs-scheduler': 'gosbs.scheduler.manager.SchedulerManager',
}
diff --git a/gosbs/tasks/builder/__init__.py b/gosbs/tasks/builder/__init__.py
index bb93406..47b803f 100644
--- a/gosbs/tasks/builder/__init__.py
+++ b/gosbs/tasks/builder/__init__.py
@@ -21,4 +21,5 @@ LOG = logging.getLogger(__name__)
def activete_all_tasks(context, service_uuid):
# Tasks
- check_task_db(context, 'update_git', datetime(1, 1, 1, 0, 5, 0, 0), True, service_uuid)
+ check_task_db(context, 'update_git', datetime(1, 1, 1, 0, 1, 0, 0), True, service_uuid)
+ check_task_db(context, 'build_pkg', datetime(1, 1, 1, 0, 3, 0, 0), True, service_uuid)
diff --git a/gosbs/tasks/builder/build_pkg.py b/gosbs/tasks/builder/build_pkg.py
new file mode 100644
index 0000000..bb86a05
--- /dev/null
+++ b/gosbs/tasks/builder/build_pkg.py
@@ -0,0 +1,164 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+import os
+from portage.checksum import perform_checksum
+
+from oslo_log import log as logging
+from gosbs import objects
+from gosbs._emerge.actions import load_emerge_config
+from gosbs._emerge.main import emerge_main
+from gosbs.common.flags import get_build_use
+from gosbs.common.binary import destroy_local_binary, destroy_objectstor_binary
+from gosbs.builder.depclean import do_depclean
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def get_build_job(context, project_db):
+ filters = {
+ 'project_uuid' : project_db.uuid,
+ 'status' : 'waiting',
+ 'deleted' : False,
+ }
+ project_build_db = objects.project_build.ProjectBuild.get_by_filters(context, filters)
+ if project_build_db is None:
+ return True, {}
+ ebuild_db = objects.ebuild.Ebuild.get_by_uuid(context, project_build_db.ebuild_uuid)
+ package_db = objects.package.Package.get_by_uuid(context, ebuild_db.package_uuid)
+ repo_db = objects.repo.Repo.get_by_uuid(context, package_db.repo_uuid)
+ category_db = objects.category.Category.get_by_uuid(context, package_db.category_uuid)
+ cp = category_db.name + '/' + package_db.name
+ cpv = cp + '-' + ebuild_db.version
+ repo_path = CONF.repopath + '/' + repo_db.name + '.git'
+ ebuild_file = repo_path + "/" + cp + "/" + package_db.name + "-" + ebuild_db.version + ".ebuild"
+ if not os.path.isfile(ebuild_file):
+ LOG.error("File %s is not found for %s", ebuild_file, cpv)
+ project_build_db.status = 'failed'
+ project_build_db.save(context)
+ return False, {}
+ ebuild_tree_checksum = perform_checksum(ebuild_file, "SHA256")[0]
+ if ebuild_tree_checksum != ebuild_db.checksum:
+ LOG.error("File %s with wrong checksum", ebuild_file)
+ project_build_db.status = 'failed'
+ project_build_db.save(context)
+ return False, {}
+ build_job = {}
+ build_job['ebuild'] = ebuild_db
+ build_job['build'] = project_build_db
+ build_job['cpv'] = cpv
+ build_job['repo'] = repo_db
+ build_job['category'] = category_db
+ build_job['package'] = package_db
+ return True, build_job
+
+def check_build_use(context, build_job, mysettings, myportdb):
+ build_tree_use, usemasked = get_build_use(build_job['cpv'], mysettings, myportdb)
+ filters = {
+ 'build_uuid' : build_job['build'].uuid,
+ }
+ build_db_use = {}
+ for build_use_db in objects.build_iuse.BuildIUseList.get_all(context, filters=filters):
+ use_db = objects.use.Use.get_by_id(context, build_use_db.use_id)
+ build_db_use[use_db.flag] = build_use_db.status
+ build_use_flags = {}
+ for k, v in build_tree_use.items():
+ if build_db_use[k] != v:
+ if build_db_use[k]:
+ build_use_flags[k] = True
+ else:
+ build_use_flags[k] = False
+ return build_use_flags
+
+def set_features_packages(context, build_job, build_use, mysettings):
+ restrictions = {}
+ filters = {
+ 'ebuild_uuid' : build_job['ebuild'].uuid,
+ }
+ for ebuild_restriction_db in objects.ebuild_restriction.EbuildRestrictionList.get_all(context, filters=filters):
+ restriction_db = objects.restriction.Restriction.get_by_id(context, ebuild_restriction_db.restriction_id)
+ restrictions[restriction_db.restriction] = True
+ if 'test' in restrictions or "test" in build_use or "test" in mysettings.features:
+ enable_test_features = False
+ disable_test_features = False
+ if 'test' in restrictions and 'test' in mysettings.features:
+ disable_test_features = True
+ if "test" in build_use:
+ if build_use['test'] is False and "test" in mysettings.features:
+ disable_test_features = True
+ if build_use['test'] is True and not disable_test_features and "test" not in mysettings.features:
+ enable_test_features = True
+ if disable_test_features or enable_test_features:
+ filetext = '=' + build_job['cpv'] + ' '
+ if disable_test_features:
+ filetext = filetext + 'notest.conf'
+ if enable_test_features:
+ filetext = filetext + 'test.conf'
+ LOG.debug("features filetext: %s" % filetext)
+ with open("/etc/portage/package.env/99_env", "a") as f:
+ f.write(filetext)
+ f.write('\n')
+ f.close
+ return True
+
+def set_use_packages(context, build_job, build_use):
+ if build_use != {}:
+ filetext = '=' + build_job['cpv']
+ for k, v in build_use.items():
+ if v:
+ filetext = filetext + ' ' + k
+ else:
+ filetext = filetext + ' -' + k
+ LOG.debug("use filetext: %s" % filetext)
+ with open("/etc/portage/package.use/99_autounmask", "a") as f:
+ f.write(filetext)
+ f.write('\n')
+ f.close
+ return True
+
+def destroy_binary(context, build_job, mysettings, project_db, service_uuid):
+ destroy_local_binary(context, build_job, project_db, service_uuid, mysettings)
+ destroy_objectstor_binary(context, build_job, project_db)
+
+def emeerge_cmd_options(context, build_job, project_options_db):
+ argscmd = []
+ if project_options_db.oneshot:
+ argscmd.append('--oneshot')
+ argscmd.append('=' + build_job['cpv'])
+ return argscmd
+
+def touch_package_settings():
+ try:
+ os.remove("/etc/portage/package.use/99_autounmask")
+ with open("/etc/portage/package.use/99_autounmask", "a") as f:
+ f.close
+ os.remove("/etc/portage/package.env/99_env")
+ with open("/etc/portage/package.env/99_env/", "a") as f:
+ f.close
+ except:
+ pass
+
+def task(context, service_uuid):
+ project_db = objects.project.Project.get_by_name(context, CONF.builder.project)
+ project_metadata_db = objects.project_metadata.ProjectMetadata.get_by_uuid(context, project_db.uuid)
+ project_options_db = objects.project_option.ProjectOption.get_by_uuid(context, project_db.uuid)
+ succes, build_job = get_build_job(context, project_db)
+ if succes and build_job == {}:
+ return
+ elif not succes and build_job == {}:
+ return
+ elif succes and build_job != {}:
+ mysettings, trees, mtimedb = load_emerge_config()
+ myportdb = trees[mysettings["ROOT"]]["porttree"].dbapi
+ build_use = check_build_use(context, build_job, mysettings, myportdb)
+ succes = set_features_packages(context, build_job, build_use, mysettings)
+ succes = set_use_packages(context, build_job, build_use)
+ if project_options_db.removebin:
+ destroy_binary(context, build_job, mysettings, project_db, service_uuid)
+ argscmd = emeerge_cmd_options(context, build_job, project_options_db)
+ build_fail = emerge_main(context, build_job, args=argscmd)
+ if project_options_db.depclean:
+ depclean_fail = do_depclean(context)
+ touch_package_settings()
+ return
diff --git a/gosbs/tasks/scheduler/rebuild_db.py b/gosbs/tasks/scheduler/rebuild_db.py
index 2eee2b8..a96a18d 100644
--- a/gosbs/tasks/scheduler/rebuild_db.py
+++ b/gosbs/tasks/scheduler/rebuild_db.py
@@ -1,15 +1,5 @@
# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Distributed under the terms of the GNU General Public License v2
from oslo_log import log as logging
diff --git a/gosbs/tasks/scheduler/update_db.py b/gosbs/tasks/scheduler/update_db.py
index 0800e28..49ed367 100644
--- a/gosbs/tasks/scheduler/update_db.py
+++ b/gosbs/tasks/scheduler/update_db.py
@@ -1,16 +1,7 @@
# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Distributed under the terms of the GNU General Public License v2
+import pdb
from oslo_log import log as logging
from gosbs.common.portage_settings import get_portage_settings, clean_portage_settings
diff --git a/gosbs/tasks/scheduler/update_git.py b/gosbs/tasks/scheduler/update_git.py
index ac63966..f04e29c 100644
--- a/gosbs/tasks/scheduler/update_git.py
+++ b/gosbs/tasks/scheduler/update_git.py
@@ -1,15 +1,5 @@
# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Distributed under the terms of the GNU General Public License v2
import sys
@@ -85,6 +75,13 @@ def update_repo_git_thread(context, service_uuid, repo_db):
package_db.save(context)
service_repo_db.status = 'update_db'
service_repo_db.save(context)
+ filters = {
+ 'repo_uuid' : repo_db.uuid,
+ }
+ for service_repo_db in objects.service_repo.ServiceRepoList.get_all(context, filters=filters):
+ if service_repo_db.service_uuid != service_uuid:
+ service_repo_db.status = 'waiting'
+ service_repo_db.save(context)
return True
def task(context, service_uuid):
diff --git a/gosbs/tasks/scheduler/update_repos.py b/gosbs/tasks/scheduler/update_repos.py
index a28689a..9d1d456 100644
--- a/gosbs/tasks/scheduler/update_repos.py
+++ b/gosbs/tasks/scheduler/update_repos.py
@@ -1,15 +1,5 @@
# Copyright 1999-2020 Gentoo Authors
-# Licensed under the Apache License, Version 2.0 (the "License"); you may
-# not use this file except in compliance with the License. You may obtain
-# a copy of the License at
-#
-# http://www.apache.org/licenses/LICENSE-2.0
-#
-# Unless required by applicable law or agreed to in writing, software
-# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
-# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
-# License for the specific language governing permissions and limitations
-# under the License.
+# Distributed under the terms of the GNU General Public License v2
from oslo_log import log as logging
from gosbs import objects
diff --git a/gosbs/utils.py b/gosbs/utils.py
index 11ee94d..386e702 100644
--- a/gosbs/utils.py
+++ b/gosbs/utils.py
@@ -15,8 +15,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/utils.py
-
"""Utilities and helper functions."""
import contextlib
diff --git a/gosbs/version.py b/gosbs/version.py
index 268086c..6dbd9d9 100644
--- a/gosbs/version.py
+++ b/gosbs/version.py
@@ -12,8 +12,6 @@
# License for the specific language governing permissions and limitations
# under the License.
-# Origin https://github.com/openstack/nova/blob/master/nova/version.py
-
import pbr.version
from gosbs.i18n import _LE
^ permalink raw reply related [flat|nested] only message in thread
only message in thread, other threads:[~2020-04-25 21:20 UTC | newest]
Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-04-25 21:20 [gentoo-commits] proj/tinderbox-cluster:master commit in: gosbs/scheduler/, gosbs/cmd/, gosbs/db/sqlalchemy/, gosbs/builder/, Magnus Granberg
This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox