public inbox for gentoo-commits@lists.gentoo.org
 help / color / mirror / Atom feed
* [gentoo-commits] proj/tinderbox-cluster:master commit in: etc/, pym/tbc/, gosbs/tasks/, gosbs/db/, gosbs/, gosbs/cmd/, bin/, conf/, ...
@ 2020-04-04 22:53 Magnus Granberg
  0 siblings, 0 replies; only message in thread
From: Magnus Granberg @ 2020-04-04 22:53 UTC (permalink / raw
  To: gentoo-commits

commit:     be80f7418991180567f0882902ca4151a635e42d
Author:     Magnus Granberg <zorry <AT> gentoo <DOT> org>
AuthorDate: Sat Apr  4 22:34:22 2020 +0000
Commit:     Magnus Granberg <zorry <AT> gentoo <DOT> org>
CommitDate: Sat Apr  4 22:44:59 2020 +0000
URL:        https://gitweb.gentoo.org/proj/tinderbox-cluster.git/commit/?id=be80f741

Redone the code to use OpenStack's libs
We use Openstack's libs and Nova code as a base for the POC
Most of the code to populate the db is done.

Signed-off-by: Magnus Granberg <zorry <AT> gentoo.org>

 README.txt                                         |   17 +
 bin/tbc_guest_jobs                                 |   55 -
 bin/tbc_host_jobs                                  |   34 -
 conf/tbc.conf                                      |   15 -
 ebuild/tbc-9999.ebuild                             |   52 -
 etc/gosbs.conf                                     | 2583 +++++++++
 gosbs/__init__.py                                  |   35 +
 gosbs/baserpc.py                                   |   81 +
 gosbs/cmd/__init__.py                              |   20 +
 gosbs/cmd/scheduler.py                             |   45 +
 {pym/tbc => gosbs/common}/__init__.py              |    0
 gosbs/common/flags.py                              |  211 +
 gosbs/common/git.py                                |   99 +
 gosbs/common/portage_settings.py                   |   48 +
 gosbs/common/task.py                               |   70 +
 gosbs/conf/__init__.py                             |   47 +
 gosbs/conf/base.py                                 |   43 +
 gosbs/conf/database.py                             |  183 +
 gosbs/conf/keystone.py                             |   72 +
 gosbs/conf/netconf.py                              |   94 +
 gosbs/conf/notifications.py                        |  118 +
 gosbs/conf/opts.py                                 |   79 +
 gosbs/conf/paths.py                                |  106 +
 gosbs/conf/rpc.py                                  |   46 +
 gosbs/conf/scheduler.py                            |   37 +
 gosbs/conf/service.py                              |  169 +
 gosbs/conf/upgrade_levels.py                       |  210 +
 gosbs/conf/utils.py                                |   91 +
 gosbs/config.py                                    |   50 +
 gosbs/context.py                                   |  562 ++
 gosbs/db/__init__.py                               |   13 +
 gosbs/db/api.py                                    | 1891 +++++++
 gosbs/db/base.py                                   |   29 +
 gosbs/db/constants.py                              |   25 +
 {pym/tbc => gosbs/db/sqlalchemy}/__init__.py       |    0
 gosbs/db/sqlalchemy/api.py                         | 5897 ++++++++++++++++++++
 gosbs/db/sqlalchemy/models.py                      |  430 ++
 gosbs/db/sqlalchemy/types.py                       |   74 +
 gosbs/db/sqlalchemy/utils.py                       |  118 +
 gosbs/debugger.py                                  |   62 +
 gosbs/exception.py                                 | 2394 ++++++++
 gosbs/i18n.py                                      |   48 +
 gosbs/manager.py                                   |  149 +
 gosbs/middleware.py                                |   39 +
 gosbs/objects/__init__.py                          |   52 +
 gosbs/objects/base.py                              |  361 ++
 gosbs/objects/build_iuse.py                        |  280 +
 gosbs/objects/category.py                          |  278 +
 gosbs/objects/category_metadata.py                 |  278 +
 gosbs/objects/ebuild.py                            |  288 +
 gosbs/objects/ebuild_iuse.py                       |  280 +
 gosbs/objects/ebuild_keyword.py                    |  280 +
 gosbs/objects/ebuild_metadata.py                   |  282 +
 gosbs/objects/ebuild_restriction.py                |  281 +
 gosbs/objects/email.py                             |  269 +
 gosbs/objects/fields.py                            |   70 +
 gosbs/objects/flavor.py                            |  228 +
 gosbs/objects/image.py                             |  204 +
 gosbs/objects/keyword.py                           |  277 +
 gosbs/objects/package.py                           |  300 +
 gosbs/objects/package_email.py                     |  301 +
 gosbs/objects/package_metadata.py                  |  279 +
 gosbs/objects/project.py                           |  279 +
 gosbs/objects/project_build.py                     |  286 +
 gosbs/objects/project_metadata.py                  |  308 +
 gosbs/objects/project_repo.py                      |  295 +
 gosbs/objects/repo.py                              |  290 +
 gosbs/objects/restriction.py                       |  281 +
 gosbs/objects/service.py                           |  486 ++
 gosbs/objects/service_repo.py                      |  296 +
 gosbs/objects/task.py                              |  291 +
 gosbs/objects/use.py                               |  278 +
 gosbs/objects/user.py                              |  278 +
 gosbs/policies/__init__.py                         |   26 +
 gosbs/policies/base.py                             |   41 +
 gosbs/policies/hosts.py                            |   63 +
 gosbs/policies/services.py                         |   69 +
 gosbs/policy.py                                    |  246 +
 gosbs/profiler.py                                  |   51 +
 gosbs/rpc.py                                       |  448 ++
 gosbs/safe_utils.py                                |   41 +
 {pym/tbc => gosbs/scheduler}/__init__.py           |    0
 gosbs/scheduler/category.py                        |  113 +
 gosbs/scheduler/ebuild.py                          |  203 +
 gosbs/scheduler/email.py                           |   36 +
 gosbs/scheduler/manager.py                         |  141 +
 gosbs/scheduler/package.py                         |  180 +
 gosbs/scheduler/project.py                         |   35 +
 gosbs/scheduler/rpcapi.py                          |  127 +
 gosbs/service.py                                   |  331 ++
 {pym/tbc => gosbs/tasks}/__init__.py               |    0
 gosbs/tasks/scheduler/__init__.py                  |   18 +
 gosbs/tasks/scheduler/rebuild_db.py                |   52 +
 {pym/tbc => gosbs/tasks/scheduler/sub}/__init__.py |    0
 gosbs/tasks/scheduler/sub/build.py                 |   73 +
 gosbs/tasks/scheduler/update_db.py                 |   60 +
 gosbs/tasks/scheduler/update_git.py                |  103 +
 gosbs/tasks/scheduler/update_repos.py              |   23 +
 gosbs/utils.py                                     | 1358 +++++
 gosbs/version.py                                   |   90 +
 licenses/Apache-2.0                                |  201 +
 licenses/GPL-2                                     |  339 ++
 patches/portage.patch                              |  292 -
 patches/repoman.patch                              |   42 -
 pym/tbc/ConnectionManager.py                       |   22 -
 pym/tbc/build_depgraph.py                          |   65 -
 pym/tbc/build_job.py                               |  202 -
 pym/tbc/build_log.py                               |  383 --
 pym/tbc/buildquerydb.py                            |  102 -
 pym/tbc/check_setup.py                             |   74 -
 pym/tbc/db_mapping.py                              |  306 -
 pym/tbc/depclean.py                                |   53 -
 pym/tbc/flags.py                                   |  222 -
 pym/tbc/irk.py                                     |   28 -
 pym/tbc/jobs.py                                    |   80 -
 pym/tbc/log.py                                     |   38 -
 pym/tbc/old_cpv.py                                 |   43 -
 pym/tbc/package.py                                 |  409 --
 pym/tbc/qachecks.py                                |  100 -
 pym/tbc/readconf.py                                |   45 -
 pym/tbc/sqlquerys.py                               |  665 ---
 pym/tbc/sync.py                                    |  122 -
 pym/tbc/text.py                                    |   49 -
 pym/tbc/updatedb.py                                |  173 -
 requirements.txt                                   |   18 +
 setup.cfg                                          |   35 +
 setup.py                                           |   48 +-
 sql/data_dump.sql                                  |  193 -
 sql/structure_dump.sql                             | 1538 ++---
 129 files changed, 29264 insertions(+), 4948 deletions(-)

diff --git a/README.txt b/README.txt
new file mode 100644
index 0000000..da77900
--- /dev/null
+++ b/README.txt
@@ -0,0 +1,17 @@
+=============================
+Gentoo OpenStack Build System
+=============================
+
+This is a POC
+We are useing OpenStack's libs and nova as code base.
+https://github.com/openstack/nova
+For the building and populate the db we use portage's api.
+https://gitweb.gentoo.org/proj/portage.git
+Thanks to the community of thos team's.
+
+The code for now need alot of work.
+We need to cleanout not needed Nova code.
+Most of the code to populate the db is done.
+The code for the build worker neeed to be done.
+
+https://wiki.gentoo.org/wiki/Project:Tinderbox-cluster

diff --git a/bin/tbc_guest_jobs b/bin/tbc_guest_jobs
deleted file mode 100755
index 5614f07..0000000
--- a/bin/tbc_guest_jobs
+++ /dev/null
@@ -1,55 +0,0 @@
-#!/usr/bin/python3.4
-#
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-from tbc.readconf import read_config_settings
-from tbc.ConnectionManager import NewConnection
-from tbc.sqlquerys import get_config_id_fqdn, check_host_updatedb, update_deamon_status
-from tbc.check_setup import check_configure_guest
-from tbc.build_job import build_job_action
-from tbc.jobs import jobs_main
-from tbc.log import setup_logger, write_log
-from sqlalchemy.orm import sessionmaker
-import portage
-import sys
-import os
-import time
-
-def main():
-	repeat = True
-	tbc_settings = read_config_settings()
-
-	# setup the logger
-	logger = setup_logger(tbc_settings)
-
-	Session = sessionmaker(bind=NewConnection(tbc_settings))
-	session = Session()
-	config_id = get_config_id_fqdn(session, tbc_settings['hostname'])
-	write_log(session, "Job and build deamon started.", "info", config_id, 'main')
-	Status = 'Waiting'
-	update_deamon_status(session, Status, config_id)
-	msg = 'Status: %s Host: %s' % (Status, tbc_settings['hostname'],)
-	write_log(session, msg, "info", config_id, 'main')
-	init_build_job = build_job_action(config_id, session)
-	while repeat:
-		jobs_main(session, config_id)
-		if not check_configure_guest(session, config_id) or check_host_updatedb(session):
-			time.sleep(60)
-			continue
-		else:
-			Status = 'Runing'
-			update_deamon_status(session, Status, config_id)
-			msg = 'Status: %s Host: %s' % (Status, tbc_settings['hostname'],)
-			write_log(session, msg, "info", config_id, 'main')
-			init_build_job.procces_build_jobs()
-			Status = 'Waiting'
-			update_deamon_status(session, Status, config_id)
-			msg = 'Status: %s Host: %s' % (Status, tbc_settings['hostname'],)
-			write_log(session, msg, "info", config_id, 'main')
-			time.sleep(60)
-	conn.close
-
-if __name__ == "__main__":
-  main()

diff --git a/bin/tbc_host_jobs b/bin/tbc_host_jobs
deleted file mode 100755
index 50dfe76..0000000
--- a/bin/tbc_host_jobs
+++ /dev/null
@@ -1,34 +0,0 @@
-#!/usr/bin/python3.4
-#
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-
-from tbc.readconf import read_config_settings
-from tbc.jobs import jobs_main
-from tbc.ConnectionManager import NewConnection
-from tbc.sqlquerys import get_config_id_fqdn
-from tbc.log import setup_logger, write_log
-from sqlalchemy.orm import sessionmaker
-import time
-
-def main():
-	# Main
-	tbc_settings = read_config_settings()
-	# setup the logger
-	logger = setup_logger(tbc_settings)
-
-	Session = sessionmaker(bind=NewConnection(tbc_settings))
-	session = Session()
-	config_id = get_config_id_fqdn(session, tbc_settings['hostname'])
-	write_log(session, "Job deamon started", "info", config_id, 'main')
-	repeat = True
-	while repeat:
-		jobs_main(session, config_id)
-		repeat = False
-		time.sleep(60)
-	write_log(session, "Job deamon stoped", "info", config_id, 'main')
-
-if __name__ == "__main__":
-	main()
\ No newline at end of file

diff --git a/conf/tbc.conf b/conf/tbc.conf
deleted file mode 100644
index d1254f3..0000000
--- a/conf/tbc.conf
+++ /dev/null
@@ -1,15 +0,0 @@
-# The Sql Backend
-SQLBACKEND=mysql
-# The Sql Datebase
-SQLDB=tbc
-# Sql Host
-SQLHOST=localhost
-# Sql user
-SQLUSER=buildhost
-# Sql Password
-SQLPASSWD=buildhost
-# loging level
-#LOG=INFO
-# logfile
-#LOGFILE=/var/log/tbc.log
-

diff --git a/ebuild/tbc-9999.ebuild b/ebuild/tbc-9999.ebuild
deleted file mode 100644
index e705155..0000000
--- a/ebuild/tbc-9999.ebuild
+++ /dev/null
@@ -1,52 +0,0 @@
-# Copyright 1999-2016 Gentoo Foundation
-# Distributed under theterms of the GNU General Public License v2
-# $Id$
-
-EAPI="5"
-PYTHON_COMPAT=( python{2_7,3_4,3_5})
-
-inherit distutils-r1 git-2
-
-DESCRIPTION="TBC"
-HOMEPAGE="https://wiki.gentoo.org/wiki/Project:Tinderbox-cluster"
-SRC_URI=""
-LICENSE="GPL-2"
-KEYWORDS="~amd64"
-SLOT="0"
-IUSE="+mysql"
-
-RDEPEND=">=sys-apps/portage-2.3.2[${PYTHON_USEDEP}]
-	>=app-portage/repoman-2.3.0-r2[${PYTHON_USEDEP}]
-        dev-python/sqlalchemy[${PYTHON_USEDEP}]
-        dev-python/git-python[${PYTHON_USEDEP}]
-        mysql? ( dev-python/mysql-connector-python[${PYTHON_USEDEP}] )"
-
-DEPEND="${RDEPEND}
-        dev-python/setuptools[${PYTHON_USEDEP}]"
-
-EGIT_REPO_URI="https://anongit.gentoo.org/git/proj/tinderbox-cluster.git"
-PATCHES=(
-        "${S}"/patches/portage.patch
-        "${S}"/patches/repoman.patch
-)
-
-python_prepare_all() {
-        python_setup
-        einfo "Copying needed files from portage"
-        cp $(python_get_sitedir)/_emerge/actions.py ${S}/pym/tbc
-        cp $(python_get_sitedir)/_emerge/main.py ${S}/pym/tbc
-        cp $(python_get_sitedir)/_emerge/Scheduler.py ${S}/pym/tbc
-        cp $(python_get_sitedir)/repoman/main.py ${S}/pym/tbc/repoman.py
-        einfo "Done."
-        distutils-r1_python_prepare_all
-}
-
-src_install() {
-        dodir etc/tbc
-        insinto /etc/tbc
-        doins ${S}/conf/tbc.conf
-        dosbin ${S}/bin/tbc_host_jobs
-        dosbin  ${S}/bin/tbc_guest_jobs
-
-        distutils-r1_src_install
-}

diff --git a/etc/gosbs.conf b/etc/gosbs.conf
new file mode 100644
index 0000000..0d53587
--- /dev/null
+++ b/etc/gosbs.conf
@@ -0,0 +1,2583 @@
+[DEFAULT]
+
+#
+# From gosbs.conf
+#
+
+#
+# The IP address which the host is using to connect to the management network.
+#
+# Possible values:
+#
+# * String with valid IP address. Default is IPv4 address of this host.
+#
+# Related options:
+#
+# * metadata_host
+# * my_block_storage_ip
+# * routing_source_ip
+# * vpn_ip
+#  (string value)
+my_ip = 192.168.1.2
+
+#
+# Hostname, FQDN or IP address of this host.
+#
+# Used as:
+#
+# * the oslo.messaging queue name for nova-compute worker
+# * we use this value for the binding_host sent to neutron. This means if you
+# use
+#   a neutron agent, it should have the same value for host.
+# * cinder host attachment information
+#
+# Must be valid within AMQP key.
+#
+# Possible values:
+#
+# * String with hostname, FQDN or IP address. Default is hostname of this host.
+#  (string value)
+#host = <current_hostname>
+
+#
+# The directory where the Nova python modules are installed.
+#
+# This directory is used to store template files for networking and remote
+# console access. It is also the default path for other config options which
+# need to persist Nova internal data. It is very unlikely that you need to
+# change this option from its default value.
+#
+# Possible values:
+#
+# * The full path to a directory.
+#
+# Related options:
+#
+# * ``state_path``
+#  (string value)
+#pybasedir = <Path>
+
+#
+# The directory where the Nova binaries are installed.
+#
+# This option is only relevant if the networking capabilities from Nova are
+# used (see services below). Nova's networking capabilities are targeted to
+# be fully replaced by Neutron in the future. It is very unlikely that you need
+# to change this option from its default value.
+#
+# Possible values:
+#
+# * The full path to a directory.
+#  (string value)
+#bindir = /home/mthode/dev/openstack/openstack/nova/.tox/shared/local/bin
+
+#
+# The top-level directory for maintaining Nova's state.
+#
+# This directory is used to store Nova's internal state. It is used by a
+# variety of other config options which derive from this. In some scenarios
+# (for example migrations) it makes sense to use a storage location which is
+# shared between multiple compute hosts (for example via NFS). Unless the
+# option ``instances_path`` gets overwritten, this directory can grow very
+# large.
+#
+# Possible values:
+#
+# * The full path to a directory. Defaults to value provided in ``pybasedir``.
+#  (string value)
+#state_path = $pybasedir
+
+repopath = /home/repos
+
+#
+# This option allows setting an alternate timeout value for RPC calls
+# that have the potential to take a long time. If set, RPC calls to
+# other services will use this value for the timeout (in seconds)
+# instead of the global rpc_response_timeout value.
+#
+# Operations with RPC calls that utilize this value:
+#
+# * live migration
+#
+# Related options:
+#
+# * rpc_response_timeout
+#  (integer value)
+#long_rpc_timeout = 1800
+
+#
+# Maximum time in seconds since last check-in for up service
+#
+# Each compute node periodically updates their database status based on the
+# specified report interval. If the compute node hasn't updated the status
+# for more than service_down_time, then the compute node is considered down.
+#
+# Related Options:
+#
+# * report_interval (service_down_time should not be less than report_interval)
+# * scheduler.periodic_task_interval
+#  (integer value)
+#service_down_time = 60
+
+#
+# Enable periodic tasks.
+#
+# If set to true, this option allows services to periodically run tasks
+# on the manager.
+#
+# In case of running multiple schedulers or conductors you may want to run
+# periodic tasks on only one host - in this case disable this option for all
+# hosts but one.
+#  (boolean value)
+#periodic_enable = true
+
+#
+# Number of seconds to randomly delay when starting the periodic task
+# scheduler to reduce stampeding.
+#
+# When compute workers are restarted in unison across a cluster,
+# they all end up running the periodic tasks at the same time
+# causing problems for the external services. To mitigate this
+# behavior, periodic_fuzzy_delay option allows you to introduce a
+# random initial delay when starting the periodic task scheduler.
+#
+# Possible Values:
+#
+# * Any positive integer (in seconds)
+# * 0 : disable the random delay
+#  (integer value)
+# Minimum value: 0
+#periodic_fuzzy_delay = 60
+
+#
+# Number of workers for OpenStack API service. The default will be the number
+# of CPUs available.
+#
+# OpenStack API services can be configured to run as multi-process (workers).
+# This overcomes the problem of reduction in throughput when API request
+# concurrency increases. OpenStack API service will run in the specified
+# number of processes.
+#
+# Possible Values:
+#
+# * Any positive integer
+# * None (default value)
+#  (integer value)
+# Minimum value: 1
+#osapi_compute_workers = <None>
+
+#
+# Number of workers for metadata service. If not specified the number of
+# available CPUs will be used.
+#
+# The metadata service can be configured to run as multi-process (workers).
+# This overcomes the problem of reduction in throughput when API request
+# concurrency increases. The metadata service will run in the specified
+# number of processes.
+#
+# Possible Values:
+#
+# * Any positive integer
+# * None (default value)
+#  (integer value)
+# Minimum value: 1
+#metadata_workers = <None>
+
+#
+# This option specifies the driver to be used for the servicegroup service.
+#
+# ServiceGroup API in nova enables checking status of a compute node. When a
+# compute worker running the nova-compute daemon starts, it calls the join API
+# to join the compute group. Services like nova scheduler can query the
+# ServiceGroup API to check if a node is alive. Internally, the ServiceGroup
+# client driver automatically updates the compute worker status. There are
+# multiple backend implementations for this service: Database ServiceGroup
+# driver
+# and Memcache ServiceGroup driver.
+#
+# Possible Values:
+#
+#     * db : Database ServiceGroup driver
+#     * mc : Memcache ServiceGroup driver
+#
+# Related Options:
+#
+#     * service_down_time (maximum time since last check-in for up service)
+#  (string value)
+# Possible values:
+# db - <No description provided>
+# mc - <No description provided>
+#servicegroup_driver = db
+
+#
+# From oslo.log
+#
+
+# If set to true, the logging level will be set to DEBUG instead of the default
+# INFO level. (boolean value)
+# Note: This option can be changed without restarting.
+debug = true
+
+# The name of a logging configuration file. This file is appended to any
+# existing logging configuration files. For details about logging configuration
+# files, see the Python logging module documentation. Note that when logging
+# configuration files are used then all logging configuration is set in the
+# configuration file and other logging configuration options are ignored (for
+# example, logging_context_format_string). (string value)
+# Note: This option can be changed without restarting.
+# Deprecated group/name - [DEFAULT]/log_config
+#log_config_append = <None>
+
+# Defines the format string for %%(asctime)s in log records. Default:
+# %(default)s . This option is ignored if log_config_append is set. (string
+# value)
+#log_date_format = %Y-%m-%d %H:%M:%S
+
+# (Optional) Name of log file to send logging output to. If no default is set,
+# logging will go to stderr as defined by use_stderr. This option is ignored if
+# log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logfile
+#log_file = test.log
+
+# (Optional) The base directory used for relative log_file  paths. This option
+# is ignored if log_config_append is set. (string value)
+# Deprecated group/name - [DEFAULT]/logdir
+#log_dir = /var/log/nova
+
+# Uses logging handler designed to watch file system. When log file is moved or
+# removed this handler will open a new log file with specified path
+# instantaneously. It makes sense only if log_file option is specified and Linux
+# platform is used. This option is ignored if log_config_append is set. (boolean
+# value)
+#watch_log_file = false
+
+# Use syslog for logging. Existing syslog format is DEPRECATED and will be
+# changed later to honor RFC5424. This option is ignored if log_config_append is
+# set. (boolean value)
+#use_syslog = false
+
+# Enable journald for logging. If running in a systemd environment you may wish
+# to enable journal support. Doing so will use the journal native protocol which
+# includes structured metadata in addition to log messages.This option is
+# ignored if log_config_append is set. (boolean value)
+#use_journal = false
+
+# Syslog facility to receive log lines. This option is ignored if
+# log_config_append is set. (string value)
+#syslog_log_facility = LOG_USER
+
+# Use JSON formatting for logging. This option is ignored if log_config_append
+# is set. (boolean value)
+#use_json = false
+
+# Log output to standard error. This option is ignored if log_config_append is
+# set. (boolean value)
+use_stderr = true
+
+# Format string to use for log messages with context. (string value)
+#logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+# Format string to use for log messages when context is undefined. (string
+# value)
+#logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+# Additional data to append to log message when logging level for the message is
+# DEBUG. (string value)
+# logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+# Prefix each line of exception output with this format. (string value)
+#logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+# Defines the format string for %(user_identity)s that is used in
+# logging_context_format_string. (string value)
+#logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+# List of package logging levels in logger=LEVEL pairs. This option is ignored
+# if log_config_append is set. (list value)
+#default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,oslo_messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+# Enables or disables publication of error events. (boolean value)
+#publish_errors = false
+
+# The format for an instance that is passed with the log message. (string value)
+#instance_format = "[instance: %(uuid)s] "
+
+# The format for an instance UUID that is passed with the log message. (string
+# value)
+#instance_uuid_format = "[instance: %(uuid)s] "
+
+# Interval, number of seconds, of log rate limiting. (integer value)
+#rate_limit_interval = 0
+
+# Maximum number of logged messages per rate_limit_interval. (integer value)
+#rate_limit_burst = 0
+
+# Log level name used by rate limiting: CRITICAL, ERROR, INFO, WARNING, DEBUG or
+# empty string. Logs with level greater or equal to rate_limit_except_level are
+# not filtered. An empty string means that all levels are filtered. (string
+# value)
+#rate_limit_except_level = CRITICAL
+
+# Enables or disables fatal status of deprecations. (boolean value)
+#fatal_deprecations = false
+
+#
+# From oslo.messaging
+#
+
+# Size of RPC connection pool. (integer value)
+#rpc_conn_pool_size = 30
+
+# The pool size limit for connections expiration policy (integer value)
+#conn_pool_min_size = 2
+
+# The time-to-live in sec of idle connections in the pool (integer value)
+#conn_pool_ttl = 1200
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Possible values:
+# redis - <No description provided>
+# sentinel - <No description provided>
+# dummy - <No description provided>
+#rpc_zmq_matchmaker = redis
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Number of seconds to wait before all pending messages will be sent after
+# closing a socket. The default value of -1 specifies an infinite linger period.
+# The value of 0 specifies no linger period. Pending messages shall be discarded
+# immediately when the socket is closed. Positive values specify an upper bound
+# for the linger period. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_cast_timeout
+#zmq_linger = -1
+
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target (
+# < 0 means no timeout). (integer value)
+#zmq_target_expire = 300
+
+# Update period in seconds of a name service record about existing target.
+# (integer value)
+#zmq_target_update = 180
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = false
+
+# Use ROUTER remote proxy. (boolean value)
+#use_router_proxy = false
+
+# This option makes direct connections dynamic or static. It makes sense only
+# with use_router_proxy=False which means to use direct connections for direct
+# message types (ignored otherwise). (boolean value)
+#use_dynamic_connections = false
+
+# How many additional connections to a host will be made for failover reasons.
+# This option is actual only in dynamic connections mode. (integer value)
+#zmq_failover_connections = 2
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49153
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Default serialization mechanism for serializing/deserializing
+# outgoing/incoming messages (string value)
+# Possible values:
+# json - <No description provided>
+# msgpack - <No description provided>
+#rpc_zmq_serialization = json
+
+# This option configures round-robin mode in zmq socket. True means not keeping
+# a queue when server side disconnects. False means to keep queue and messages
+# even if server is disconnected, when the server appears we send all
+# accumulated messages to it. (boolean value)
+#zmq_immediate = true
+
+# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any
+# other negative value) means to skip any overrides and leave it to OS default;
+# 0 and 1 (or any other positive value) mean to disable and enable the option
+# respectively. (integer value)
+#zmq_tcp_keepalive = -1
+
+# The duration between two keepalive transmissions in idle condition. The unit
+# is platform dependent, for example, seconds in Linux, milliseconds in Windows
+# etc. The default value of -1 (or any other negative value and 0) means to skip
+# any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_idle = -1
+
+# The number of retransmissions to be carried out before declaring that remote
+# end is not available. The default value of -1 (or any other negative value and
+# 0) means to skip any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_cnt = -1
+
+# The duration between two successive keepalive retransmissions, if
+# acknowledgement to the previous keepalive transmission is not received. The
+# unit is platform dependent, for example, seconds in Linux, milliseconds in
+# Windows etc. The default value of -1 (or any other negative value and 0) means
+# to skip any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_intvl = -1
+
+# Maximum number of (green) threads to work concurrently. (integer value)
+#rpc_thread_pool_size = 100
+
+# Expiration timeout in seconds of a sent/received message after which it is not
+# tracked anymore by a client/server. (integer value)
+#rpc_message_ttl = 300
+
+# Wait for message acknowledgements from receivers. This mechanism works only
+# via proxy without PUB/SUB. (boolean value)
+#rpc_use_acks = false
+
+# Number of seconds to wait for an ack from a cast/call. After each retry
+# attempt this timeout is multiplied by some specified multiplier. (integer
+# value)
+#rpc_ack_timeout_base = 15
+
+# Number to multiply base ack timeout by after each retry attempt. (integer
+# value)
+#rpc_ack_timeout_multiplier = 2
+
+# Default number of message sending attempts in case of any problems occurred:
+# positive value N means at most N retries, 0 means no retries, None or -1 (or
+# any other negative values) mean to retry forever. This option is used only if
+# acknowledgments are enabled. (integer value)
+#rpc_retry_attempts = 3
+
+# List of publisher hosts SubConsumer can subscribe on. This option has higher
+# priority then the default publishers list taken from the matchmaker. (list
+# value)
+#subscribe_on =
+
+# Size of executor thread pool when executor is threading or eventlet. (integer
+# value)
+# Deprecated group/name - [DEFAULT]/rpc_thread_pool_size
+#executor_thread_pool_size = 64
+
+# Seconds to wait for a response from a call. (integer value)
+#rpc_response_timeout = 60
+
+# The network address and optional user credentials for connecting to the
+# messaging backend, in URL format. The expected format is:
+#
+# driver://[user:pass@]host:port[,[userN:passN@]hostN:portN]/virtual_host?query
+#
+# Example: rabbit://rabbitmq:password@127.0.0.1:5672//
+#
+# For full details on the fields in the URL see the documentation of
+# oslo_messaging.TransportURL at
+# https://docs.openstack.org/oslo.messaging/latest/reference/transport.html
+# (string value)
+transport_url = rabbit://gobsmq:GobsMQ20.@controller
+
+
+# The default exchange under which topics are scoped. May be overridden by an
+# exchange name specified in the transport_url option. (string value)
+#control_exchange = gobs
+
+#
+# From oslo.service.periodic_task
+#
+
+# Some periodic tasks can be run in a separate process. Should we run them here?
+# (boolean value)
+#run_external_periodic_tasks = true
+
+#
+# From oslo.service.service
+#
+
+# Enable eventlet backdoor.  Acceptable values are 0, <port>, and <start>:<end>,
+# where 0 results in listening on a random tcp port number; <port> results in
+# listening on the specified port number (and not enabling backdoor if that port
+# is in use); and <start>:<end> results in listening on the smallest unused port
+# number within the specified range of port numbers.  The chosen port is
+# displayed in the service's log file. (string value)
+#backdoor_port = <None>
+
+# Enable eventlet backdoor, using the provided path as a unix socket that can
+# receive connections. This option is mutually exclusive with 'backdoor_port' in
+# that only one should be provided. If both are provided then the existence of
+# this option overrides the usage of that option. (string value)
+#backdoor_socket = <None>
+
+# Enables or disables logging values of all registered options when starting a
+# service (at DEBUG level). (boolean value)
+log_options = true
+
+# Specify a timeout after which a gracefully shutdown server will exit. Zero
+# value means endless wait. (integer value)
+#graceful_shutdown_timeout = 60
+
+
+[api]
+#
+# Options under this group are used to define Nova API.
+
+#
+# From nova.conf
+#
+
+#
+# This determines the strategy to use for authentication: keystone or noauth2.
+# 'noauth2' is designed for testing only, as it does no actual credential
+# checking. 'noauth2' provides administrative credentials only if 'admin' is
+# specified as the username.
+#  (string value)
+# Possible values:
+# keystone - <No description provided>
+# noauth2 - <No description provided>
+auth_strategy = keystone
+
+#
+# When True, the 'X-Forwarded-For' header is treated as the canonical remote
+# address. When False (the default), the 'remote_address' header is used.
+#
+# You should only enable this if you have an HTML sanitizing proxy.
+#  (boolean value)
+#use_forwarded_for = false
+
+#
+# When gathering the existing metadata for a config drive, the EC2-style
+# metadata is returned for all versions that don't appear in this option.
+# As of the Liberty release, the available versions are:
+#
+# * 1.0
+# * 2007-01-19
+# * 2007-03-01
+# * 2007-08-29
+# * 2007-10-10
+# * 2007-12-15
+# * 2008-02-01
+# * 2008-09-01
+# * 2009-04-04
+#
+# The option is in the format of a single string, with each version separated
+# by a space.
+#
+# Possible values:
+#
+# * Any string that represents zero or more versions, separated by spaces.
+#  (string value)
+#config_drive_skip_versions = 1.0 2007-01-19 2007-03-01 2007-08-29 2007-10-10 2007-12-15 2008-02-01 2008-09-01
+
+#
+# A list of vendordata providers.
+#
+# vendordata providers are how deployers can provide metadata via configdrive
+# and metadata that is specific to their deployment. There are currently two
+# supported providers: StaticJSON and DynamicJSON.
+#
+# StaticJSON reads a JSON file configured by the flag vendordata_jsonfile_path
+# and places the JSON from that file into vendor_data.json and
+# vendor_data2.json.
+#
+# DynamicJSON is configured via the vendordata_dynamic_targets flag, which is
+# documented separately. For each of the endpoints specified in that flag, a
+# section is added to the vendor_data2.json.
+#
+# For more information on the requirements for implementing a vendordata
+# dynamic endpoint, please see the vendordata.rst file in the nova developer
+# reference.
+#
+# Possible values:
+#
+# * A list of vendordata providers, with StaticJSON and DynamicJSON being
+#   current options.
+#
+# Related options:
+#
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+#  (list value)
+#vendordata_providers = StaticJSON
+
+#
+# A list of targets for the dynamic vendordata provider. These targets are of
+# the form <name>@<url>.
+#
+# The dynamic vendordata provider collects metadata by contacting external REST
+# services and querying them for information about the instance. This behaviour
+# is documented in the vendordata.rst file in the nova developer reference.
+#  (list value)
+#vendordata_dynamic_targets =
+
+#
+# Path to an optional certificate file or CA bundle to verify dynamic
+# vendordata REST services ssl certificates against.
+#
+# Possible values:
+#
+# * An empty string, or a path to a valid certificate file
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+#  (string value)
+#vendordata_dynamic_ssl_certfile =
+
+#
+# Maximum wait time for an external REST service to connect.
+#
+# Possible values:
+#
+# * Any integer with a value greater than three (the TCP packet retransmission
+#   timeout). Note that instance start may be blocked during this wait time,
+#   so this value should be kept small.
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_read_timeout
+# * vendordata_dynamic_failure_fatal
+#  (integer value)
+# Minimum value: 3
+#vendordata_dynamic_connect_timeout = 5
+
+#
+# Maximum wait time for an external REST service to return data once connected.
+#
+# Possible values:
+#
+# * Any integer. Note that instance start is blocked during this wait time,
+#   so this value should be kept small.
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_failure_fatal
+#  (integer value)
+# Minimum value: 0
+#vendordata_dynamic_read_timeout = 5
+
+#
+# Should failures to fetch dynamic vendordata be fatal to instance boot?
+#
+# Related options:
+#
+# * vendordata_providers
+# * vendordata_dynamic_targets
+# * vendordata_dynamic_ssl_certfile
+# * vendordata_dynamic_connect_timeout
+# * vendordata_dynamic_read_timeout
+#  (boolean value)
+#vendordata_dynamic_failure_fatal = false
+
+#
+# This option is the time (in seconds) to cache metadata. When set to 0,
+# metadata caching is disabled entirely; this is generally not recommended for
+# performance reasons. Increasing this setting should improve response times
+# of the metadata API when under heavy load. Higher values may increase memory
+# usage, and result in longer times for host metadata changes to take effect.
+#  (integer value)
+# Minimum value: 0
+#metadata_cache_expiration = 15
+
+#
+# Cloud providers may store custom data in vendor data file that will then be
+# available to the instances via the metadata service, and to the rendering of
+# config-drive. The default class for this, JsonFileVendorData, loads this
+# information from a JSON file, whose path is configured by this option. If
+# there is no path set by this option, the class returns an empty dictionary.
+#
+# Possible values:
+#
+# * Any string representing the path to the data file, or an empty string
+#     (default).
+#  (string value)
+#vendordata_jsonfile_path = <None>
+
+#
+# As a query can potentially return many thousands of items, you can limit the
+# maximum number of items in a single response by setting this option.
+#  (integer value)
+# Minimum value: 0
+# Deprecated group/name - [DEFAULT]/osapi_max_limit
+#max_limit = 1000
+
+#
+# This string is prepended to the normal URL that is returned in links to the
+# OpenStack Compute API. If it is empty (the default), the URLs are returned
+# unchanged.
+#
+# Possible values:
+#
+# * Any string, including an empty string (the default).
+#  (string value)
+# Deprecated group/name - [DEFAULT]/osapi_compute_link_prefix
+#compute_link_prefix = <None>
+
+#
+# This string is prepended to the normal URL that is returned in links to
+# Glance resources. If it is empty (the default), the URLs are returned
+# unchanged.
+#
+# Possible values:
+#
+# * Any string, including an empty string (the default).
+#  (string value)
+# Deprecated group/name - [DEFAULT]/osapi_glance_link_prefix
+#glance_link_prefix = <None>
+
+#
+# When enabled, this will cause the API to only query cell databases
+# in which the tenant has mapped instances. This requires an additional
+# (fast) query in the API database before each list, but also
+# (potentially) limits the number of cell databases that must be queried
+# to provide the result. If you have a small number of cells, or tenants
+# are likely to have instances in all cells, then this should be
+# False. If you have many cells, especially if you confine tenants to a
+# small subset of those cells, this should be True.
+#  (boolean value)
+#instance_list_per_project_cells = false
+
+
+[cache]
+
+#
+# From nova.conf
+#
+
+# Prefix for building the configuration dictionary for the cache region. This
+# should not need to be changed unless there is another dogpile.cache region
+# with the same configuration name. (string value)
+#config_prefix = cache.oslo
+
+# Default TTL, in seconds, for any cached item in the dogpile.cache region. This
+# applies to any cached method that doesn't have an explicit cache expiration
+# time defined for it. (integer value)
+#expiration_time = 600
+
+# Cache backend module. For eventlet-based or environments with hundreds of
+# threaded servers, Memcache with pooling (oslo_cache.memcache_pool) is
+# recommended. For environments with less than 100 threaded servers, Memcached
+# (dogpile.cache.memcached) or Redis (dogpile.cache.redis) is recommended. Test
+# environments with a single instance of the server can use the
+# dogpile.cache.memory backend. (string value)
+# Possible values:
+# oslo_cache.memcache_pool - <No description provided>
+# oslo_cache.dict - <No description provided>
+# oslo_cache.mongo - <No description provided>
+# oslo_cache.etcd3gw - <No description provided>
+# dogpile.cache.memcached - <No description provided>
+# dogpile.cache.pylibmc - <No description provided>
+# dogpile.cache.bmemcached - <No description provided>
+# dogpile.cache.dbm - <No description provided>
+# dogpile.cache.redis - <No description provided>
+# dogpile.cache.memory - <No description provided>
+# dogpile.cache.memory_pickle - <No description provided>
+# dogpile.cache.null - <No description provided>
+#backend = dogpile.cache.null
+
+# Arguments supplied to the backend module. Specify this option once per
+# argument to be passed to the dogpile.cache backend. Example format:
+# "<argname>:<value>". (multi valued)
+#backend_argument =
+
+# Proxy classes to import that will affect the way the dogpile.cache backend
+# functions. See the dogpile.cache documentation on changing-backend-behavior.
+# (list value)
+#proxies =
+
+# Global toggle for caching. (boolean value)
+#enabled = false
+
+# Extra debugging from the cache backend (cache keys, get/set/delete/etc calls).
+# This is only really useful if you need to see the specific cache-backend
+# get/set/delete calls with the keys/values.  Typically this should be left set
+# to false. (boolean value)
+#debug_cache_backend = false
+
+# Memcache servers in the format of "host:port". (dogpile.cache.memcache and
+# oslo_cache.memcache_pool backends only). (list value)
+#memcache_servers = localhost:11211
+
+# Number of seconds memcached server is considered dead before it is tried
+# again. (dogpile.cache.memcache and oslo_cache.memcache_pool backends only).
+# (integer value)
+#memcache_dead_retry = 300
+
+# Timeout in seconds for every call to a server. (dogpile.cache.memcache and
+# oslo_cache.memcache_pool backends only). (floating point value)
+#memcache_socket_timeout = 3.0
+
+# Max total number of open connections to every memcached server.
+# (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_maxsize = 10
+
+# Number of seconds a connection to memcached is held unused in the pool before
+# it is closed. (oslo_cache.memcache_pool backend only). (integer value)
+#memcache_pool_unused_timeout = 60
+
+# Number of seconds that an operation will wait to get a memcache client
+# connection. (integer value)
+#memcache_pool_connection_get_timeout = 10
+
+[database]
+
+#
+# From oslo.db
+#
+
+# If True, SQLite uses synchronous mode. (boolean value)
+#sqlite_synchronous = true
+
+# The back end to use for the database. (string value)
+# Deprecated group/name - [DEFAULT]/db_backend
+backend = sqlalchemy
+
+# The SQLAlchemy connection string to use to connect to the database. (string
+# value)
+# Deprecated group/name - [DEFAULT]/sql_connection
+# Deprecated group/name - [DATABASE]/sql_connection
+# Deprecated group/name - [sql]/connection
+connection = mysql+pymysql://gosbs:SGmA1GiDzZcVc9WA@controller/gosbs
+
+# The SQLAlchemy connection string to use to connect to the slave database.
+# (string value)
+#slave_connection = <None>
+
+# The SQL mode to be used for MySQL sessions. This option, including the
+# default, overrides any server-set SQL mode. To use whatever SQL mode is set by
+# the server configuration, set this to no value. Example: mysql_sql_mode=
+# (string value)
+#mysql_sql_mode = TRADITIONAL
+
+# If True, transparently enables support for handling MySQL Cluster (NDB).
+# (boolean value)
+#mysql_enable_ndb = false
+
+# Connections which have been present in the connection pool longer than this
+# number of seconds will be replaced with a new one the next time they are
+# checked out from the pool. (integer value)
+# Deprecated group/name - [DATABASE]/idle_timeout
+# Deprecated group/name - [database]/idle_timeout
+# Deprecated group/name - [DEFAULT]/sql_idle_timeout
+# Deprecated group/name - [DATABASE]/sql_idle_timeout
+# Deprecated group/name - [sql]/idle_timeout
+#connection_recycle_time = 3600
+
+
+# Maximum number of SQL connections to keep open in a pool. Setting a value of 0
+# indicates no limit. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_pool_size
+# Deprecated group/name - [DATABASE]/sql_max_pool_size
+#max_pool_size = 5
+
+# Maximum number of database connection retries during startup. Set to -1 to
+# specify an infinite retry count. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_retries
+# Deprecated group/name - [DATABASE]/sql_max_retries
+#max_retries = 10
+
+# Interval between retries of opening a SQL connection. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_retry_interval
+# Deprecated group/name - [DATABASE]/reconnect_interval
+#retry_interval = 10
+
+# If set, use this value for max_overflow with SQLAlchemy. (integer value)
+# Deprecated group/name - [DEFAULT]/sql_max_overflow
+# Deprecated group/name - [DATABASE]/sqlalchemy_max_overflow
+#max_overflow = 50
+
+# Verbosity of SQL debugging information: 0=None, 100=Everything. (integer
+# value)
+# Minimum value: 0
+# Maximum value: 100
+# Deprecated group/name - [DEFAULT]/sql_connection_debug
+#connection_debug = 0
+
+# Add Python stack traces to SQL as comment strings. (boolean value)
+# Deprecated group/name - [DEFAULT]/sql_connection_trace
+#connection_trace = false
+
+# If set, use this value for pool_timeout with SQLAlchemy. (integer value)
+# Deprecated group/name - [DATABASE]/sqlalchemy_pool_timeout
+#pool_timeout = <None>
+
+# Enable the experimental use of database reconnect on connection lost. (boolean
+# value)
+#use_db_reconnect = false
+
+# Seconds between retries of a database transaction. (integer value)
+#db_retry_interval = 1
+
+# If True, increases the interval between retries of a database operation up to
+# db_max_retry_interval. (boolean value)
+#db_inc_retry_interval = true
+
+# If db_inc_retry_interval is set, the maximum seconds between retries of a
+# database operation. (integer value)
+#db_max_retry_interval = 10
+
+# Maximum retries in case of connection error or deadlock error before error is
+# raised. Set to -1 to specify an infinite retry count. (integer value)
+#db_max_retries = 20
+
+# Optional URL parameters to append onto the connection URL at connect time;
+# specify as param1=value1&param2=value2&... (string value)
+#connection_parameters =
+
+[healthcheck]
+
+#
+# From oslo.middleware
+#
+
+# Show more detailed information as part of the response (boolean value)
+#detailed = false
+
+# Additional backends that can perform health checks and report that information
+# back as part of a request. (list value)
+#backends =
+
+# Check the presence of a file to determine if an application is running on a
+# port. Used by DisableByFileHealthcheck plugin. (string value)
+#disable_by_file_path = <None>
+
+# Check the presence of a file based on a port to determine if an application is
+# running on a port. Expects a "port:path" list of strings. Used by
+# DisableByFilesPortsHealthcheck plugin. (list value)
+#disable_by_file_paths =
+
+
+[key_manager]
+
+#
+# From nova.conf
+#
+
+#
+# Fixed key returned by key manager, specified in hex.
+#
+# Possible values:
+#
+# * Empty string or a key in hex value
+#  (string value)
+#fixed_key = <None>
+
+# Specify the key manager implementation. Options are "barbican" and "vault".
+# Default is  "barbican". Will support the  values earlier set using
+# [key_manager]/api_class for some time. (string value)
+# Deprecated group/name - [key_manager]/api_class
+#backend = barbican
+
+# The type of authentication credential to create. Possible values are 'token',
+# 'password', 'keystone_token', and 'keystone_password'. Required if no context
+# is passed to the credential factory. (string value)
+#auth_type = <None>
+
+# Token for authentication. Required for 'token' and 'keystone_token' auth_type
+# if no context is passed to the credential factory. (string value)
+#token = <None>
+
+# Username for authentication. Required for 'password' auth_type. Optional for
+# the 'keystone_password' auth_type. (string value)
+#username = <None>
+
+# Password for authentication. Required for 'password' and 'keystone_password'
+# auth_type. (string value)
+#password = <None>
+
+# Use this endpoint to connect to Keystone. (string value)
+#auth_url = <None>
+
+# User ID for authentication. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#user_id = <None>
+
+# User's domain ID for authentication. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#user_domain_id = <None>
+
+# User's domain name for authentication. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#user_domain_name = <None>
+
+# Trust ID for trust scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#trust_id = <None>
+
+# Domain ID for domain scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#domain_id = <None>
+
+# Domain name for domain scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#domain_name = <None>
+
+# Project ID for project scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_id = <None>
+
+# Project name for project scoping. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_name = <None>
+
+# Project's domain ID for project. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_domain_id = <None>
+
+# Project's domain name for project. Optional for 'keystone_token' and
+# 'keystone_password' auth_type. (string value)
+#project_domain_name = <None>
+
+# Allow fetching a new token if the current one is going to expire. Optional for
+# 'keystone_token' and 'keystone_password' auth_type. (boolean value)
+#reauthenticate = true
+
+
+[keystone]
+# Configuration options for the identity service
+
+#
+# From nova.conf
+#
+
+# PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# (string value)
+#cafile = <None>
+
+# PEM encoded client certificate cert file (string value)
+#certfile = <None>
+
+# PEM encoded client certificate key file (string value)
+#keyfile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# Timeout value for http requests (integer value)
+#timeout = <None>
+
+# Collect per-API call timing information. (boolean value)
+#collect_timing = false
+
+# Log requests to multiple loggers. (boolean value)
+#split_loggers = false
+
+# The default service_type for endpoint URL discovery. (string value)
+#service_type = identity
+
+# The default service_name for endpoint URL discovery. (string value)
+#service_name = <None>
+
+# List of interfaces, in order of preference, for endpoint URL. (list value)
+#valid_interfaces = internal
+
+# The default region_name for endpoint URL discovery. (string value)
+region_name = RegionOne
+
+# API version of the admin Identity API endpoint. (string value)
+auth_version = 2
+
+identity_interface = internal
+# Always use this endpoint URL for requests for this client. NOTE: The
+# unversioned endpoint should be specified here; to request a particular API
+# version, use the `version`, `min-version`, and/or `max-version` options.
+# (string value)
+#endpoint_override = <None>
+auth_url = http://controller:35357
+auth_type = password
+project_domain_name = default
+user_domain_name = default
+project_id = b422264d56ae485f8e8e7abbe83ae781
+username = gobs
+password = gobs20.
+
+[keystone_authtoken]
+
+#
+# From keystonemiddleware.auth_token
+#
+
+# Complete "public" Identity API endpoint. This endpoint should not be an
+# "admin" endpoint, as it should be accessible by all end users. Unauthenticated
+# clients are redirected to this endpoint to authenticate. Although this
+# endpoint should ideally be unversioned, client support in the wild varies. If
+# you're using a versioned v2 endpoint here, then this should *not* be the same
+# endpoint the service user utilizes for validating tokens, because normal end
+# users may not be able to reach that endpoint. (string value)
+# Deprecated group/name - [keystone_authtoken]/auth_uri
+#www_authenticate_uri = <None>
+
+# DEPRECATED: Complete "public" Identity API endpoint. This endpoint should not
+# be an "admin" endpoint, as it should be accessible by all end users.
+# Unauthenticated clients are redirected to this endpoint to authenticate.
+# Although this endpoint should ideally be unversioned, client support in the
+# wild varies. If you're using a versioned v2 endpoint here, then this should
+# *not* be the same endpoint the service user utilizes for validating tokens,
+# because normal end users may not be able to reach that endpoint. This option
+# is deprecated in favor of www_authenticate_uri and will be removed in the S
+# release. (string value)
+# This option is deprecated for removal since Queens.
+# Its value may be silently ignored in the future.
+# Reason: The auth_uri option is deprecated in favor of www_authenticate_uri and
+# will be removed in the S  release.
+#auth_uri = <None>
+
+# API version of the admin Identity API endpoint. (string value)
+#auth_version = <None>
+
+# Do not handle authorization requests within the middleware, but delegate the
+# authorization decision to downstream WSGI components. (boolean value)
+#delay_auth_decision = false
+
+# Request timeout value for communicating with Identity API server. (integer
+# value)
+#http_connect_timeout = <None>
+
+# How many times are we trying to reconnect when communicating with Identity API
+# Server. (integer value)
+#http_request_max_retries = 3
+
+# Request environment key where the Swift cache object is stored. When
+# auth_token middleware is deployed with a Swift cache, use this option to have
+# the middleware share a caching backend with swift. Otherwise, use the
+# ``memcached_servers`` option instead. (string value)
+#cache = <None>
+
+# Required if identity server requires client certificate (string value)
+#certfile = <None>
+
+# Required if identity server requires client certificate (string value)
+#keyfile = <None>
+
+# A PEM encoded Certificate Authority to use when verifying HTTPs connections.
+# Defaults to system CAs. (string value)
+#cafile = <None>
+
+# Verify HTTPS connections. (boolean value)
+#insecure = false
+
+# The region in which the identity server can be found. (string value)
+#region_name = <None>
+
+# Optionally specify a list of memcached server(s) to use for caching. If left
+# undefined, tokens will instead be cached in-process. (list value)
+# Deprecated group/name - [keystone_authtoken]/memcache_servers
+#memcached_servers = <None>
+
+# In order to prevent excessive effort spent validating tokens, the middleware
+# caches previously-seen tokens for a configurable duration (in seconds). Set to
+# -1 to disable caching completely. (integer value)
+#token_cache_time = 300
+
+# (Optional) If defined, indicate whether token data should be authenticated or
+# authenticated and encrypted. If MAC, token data is authenticated (with HMAC)
+# in the cache. If ENCRYPT, token data is encrypted and authenticated in the
+# cache. If the value is not one of these options or empty, auth_token will
+# raise an exception on initialization. (string value)
+# Possible values:
+# None - <No description provided>
+# MAC - <No description provided>
+# ENCRYPT - <No description provided>
+#memcache_security_strategy = None
+
+# (Optional, mandatory if memcache_security_strategy is defined) This string is
+# used for key derivation. (string value)
+#memcache_secret_key = <None>
+
+# (Optional) Number of seconds memcached server is considered dead before it is
+# tried again. (integer value)
+#memcache_pool_dead_retry = 300
+
+# (Optional) Maximum total number of open connections to every memcached server.
+# (integer value)
+#memcache_pool_maxsize = 10
+
+# (Optional) Socket timeout in seconds for communicating with a memcached
+# server. (integer value)
+#memcache_pool_socket_timeout = 3
+
+# (Optional) Number of seconds a connection to memcached is held unused in the
+# pool before it is closed. (integer value)
+#memcache_pool_unused_timeout = 60
+
+# (Optional) Number of seconds that an operation will wait to get a memcached
+# client connection from the pool. (integer value)
+#memcache_pool_conn_get_timeout = 10
+
+# (Optional) Use the advanced (eventlet safe) memcached client pool. The
+# advanced pool will only work under python 2.x. (boolean value)
+#memcache_use_advanced_pool = false
+
+# (Optional) Indicate whether to set the X-Service-Catalog header. If False,
+# middleware will not ask for service catalog on token validation and will not
+# set the X-Service-Catalog header. (boolean value)
+#include_service_catalog = true
+
+# Used to control the use and type of token binding. Can be set to: "disabled"
+# to not check token binding. "permissive" (default) to validate binding
+# information if the bind type is of a form known to the server and ignore it if
+# not. "strict" like "permissive" but if the bind type is unknown the token will
+# be rejected. "required" any form of token binding is needed to be allowed.
+# Finally the name of a binding method that must be present in tokens. (string
+# value)
+#enforce_token_bind = permissive
+
+# A choice of roles that must be present in a service token. Service tokens are
+# allowed to request that an expired token can be used and so this check should
+# tightly control that only actual services should be sending this token. Roles
+# here are applied as an ANY check so any role in this list must be present. For
+# backwards compatibility reasons this currently only affects the allow_expired
+# check. (list value)
+#service_token_roles = service
+
+# For backwards compatibility reasons we must let valid service tokens pass that
+# don't pass the service_token_roles check as valid. Setting this true will
+# become the default in a future release and should be enabled if possible.
+# (boolean value)
+service_token_roles_required = true
+
+# Authentication type to load (string value)
+# Deprecated group/name - [keystone_authtoken]/auth_plugin
+#auth_type = <None>
+
+# Config Section from which to load plugin specific options (string value)
+#auth_section = <None>
+#auth_uri = http://controller:5001
+#auth_url = http://controller:35357
+#auth_type = password
+#project_domain_name = default
+#user_domain_name = default
+#project_name = gobsproject
+#username = gobs
+#password = gobs20.
+
+[notifications]
+#
+# Most of the actions in Nova which manipulate the system state generate
+# notifications which are posted to the messaging component (e.g. RabbitMQ) and
+# can be consumed by any service outside the OpenStack. More technical details
+# at https://docs.openstack.org/nova/latest/reference/notifications.html
+
+#
+# From nova.conf
+#
+
+#
+# If set, send compute.instance.update notifications on
+# instance state changes.
+#
+# Please refer to
+# https://docs.openstack.org/nova/latest/reference/notifications.html for
+# additional information on notifications.
+#
+# Possible values:
+#
+# * None - no notifications
+# * "vm_state" - notifications are sent with VM state transition information in
+#   the ``old_state`` and ``state`` fields. The ``old_task_state`` and
+#   ``new_task_state`` fields will be set to the current task_state of the
+#   instance.
+# * "vm_and_task_state" - notifications are sent with VM and task state
+#   transition information.
+#  (string value)
+# Possible values:
+# <None> - <No description provided>
+# vm_state - <No description provided>
+# vm_and_task_state - <No description provided>
+#notify_on_state_change = <None>
+
+# Default notification level for outgoing notifications. (string value)
+# Possible values:
+# DEBUG - <No description provided>
+# INFO - <No description provided>
+# WARN - <No description provided>
+# ERROR - <No description provided>
+# CRITICAL - <No description provided>
+# Deprecated group/name - [DEFAULT]/default_notification_level
+#default_level = INFO
+
+#
+# Specifies which notification format shall be used by nova.
+#
+# The default value is fine for most deployments and rarely needs to be changed.
+# This value can be set to 'versioned' once the infrastructure moves closer to
+# consuming the newer format of notifications. After this occurs, this option
+# will be removed.
+#
+# Note that notifications can be completely disabled by setting ``driver=noop``
+# in the ``[oslo_messaging_notifications]`` group.
+#
+# Possible values:
+#
+# * unversioned: Only the legacy unversioned notifications are emitted.
+# * versioned: Only the new versioned notifications are emitted.
+# * both: Both the legacy unversioned and the new versioned notifications are
+#   emitted. (Default)
+#
+# The list of versioned notifications is visible in
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+#  (string value)
+# Possible values:
+# unversioned - <No description provided>
+# versioned - <No description provided>
+# both - <No description provided>
+#notification_format = versioned
+
+#
+# Specifies the topics for the versioned notifications issued by nova.
+#
+# The default value is fine for most deployments and rarely needs to be changed.
+# However, if you have a third-party service that consumes versioned
+# notifications, it might be worth getting a topic for that service.
+# Nova will send a message containing a versioned notification payload to each
+# topic queue in this list.
+#
+# The list of versioned notifications is visible in
+# https://docs.openstack.org/nova/latest/reference/notifications.html
+#  (list value)
+#versioned_notifications_topics = versioned_notifications
+
+#
+# If enabled, include block device information in the versioned notification
+# payload. Sending block device information is disabled by default as providing
+# that information can incur some overhead on the system since the information
+# may need to be loaded from the database.
+#  (boolean value)
+#bdms_in_notifications = false
+
+[oslo_concurrency]
+
+#
+# From oslo.concurrency
+#
+
+# Enables or disables inter-process locks. (boolean value)
+#disable_process_locking = false
+
+# Directory to use for lock files.  For security, the specified directory should
+# only be writable by the user running the processes that need locking. Defaults
+# to environment variable OSLO_LOCK_PATH. If external locks are used, a lock
+# path must be set. (string value)
+#lock_path = <None>
+
+
+[oslo_messaging_amqp]
+
+#
+# From oslo.messaging
+#
+
+# Name for the AMQP container. must be globally unique. Defaults to a generated
+# UUID (string value)
+#container_name = <None>
+
+# Timeout for inactive connections (in seconds) (integer value)
+#idle_timeout = 0
+
+# Debug: dump AMQP frames to stdout (boolean value)
+#trace = false
+
+# Attempt to connect via SSL. If no other ssl-related parameters are given, it
+# will use the system's CA-bundle to verify the server's certificate. (boolean
+# value)
+#ssl = false
+
+# CA certificate PEM file used to verify the server's certificate (string value)
+#ssl_ca_file =
+
+# Self-identifying certificate PEM file for client authentication (string value)
+#ssl_cert_file =
+
+# Private key PEM file used to sign ssl_cert_file certificate (optional) (string
+# value)
+#ssl_key_file =
+
+# Password for decrypting ssl_key_file (if encrypted) (string value)
+#ssl_key_password = <None>
+
+# By default SSL checks that the name in the server's certificate matches the
+# hostname in the transport_url. In some configurations it may be preferable to
+# use the virtual hostname instead, for example if the server uses the Server
+# Name Indication TLS extension (rfc6066) to provide a certificate per virtual
+# host. Set ssl_verify_vhost to True if the server's SSL certificate uses the
+# virtual host name instead of the DNS name. (boolean value)
+#ssl_verify_vhost = false
+
+# DEPRECATED: Accept clients using either SSL or plain TCP (boolean value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Not applicable - not a SSL server
+#allow_insecure_clients = false
+
+# Space separated list of acceptable SASL mechanisms (string value)
+#sasl_mechanisms =
+
+# Path to directory that contains the SASL configuration (string value)
+#sasl_config_dir =
+
+# Name of configuration file (without .conf suffix) (string value)
+#sasl_config_name =
+
+# SASL realm to use if no realm present in username (string value)
+#sasl_default_realm =
+
+# DEPRECATED: User name for message broker authentication (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Should use configuration option transport_url to provide the username.
+#username =
+
+# DEPRECATED: Password for message broker authentication (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Should use configuration option transport_url to provide the password.
+#password =
+
+# Seconds to pause before attempting to re-connect. (integer value)
+# Minimum value: 1
+#connection_retry_interval = 1
+
+# Increase the connection_retry_interval by this many seconds after each
+# unsuccessful failover attempt. (integer value)
+# Minimum value: 0
+#connection_retry_backoff = 2
+
+# Maximum limit for connection_retry_interval + connection_retry_backoff
+# (integer value)
+# Minimum value: 1
+#connection_retry_interval_max = 30
+
+# Time to pause between re-connecting an AMQP 1.0 link that failed due to a
+# recoverable error. (integer value)
+# Minimum value: 1
+#link_retry_delay = 10
+
+# The maximum number of attempts to re-send a reply message which failed due to
+# a recoverable error. (integer value)
+# Minimum value: -1
+#default_reply_retry = 0
+
+# The deadline for an rpc reply message delivery. (integer value)
+# Minimum value: 5
+#default_reply_timeout = 30
+
+# The deadline for an rpc cast or call message delivery. Only used when caller
+# does not provide a timeout expiry. (integer value)
+# Minimum value: 5
+#default_send_timeout = 30
+
+# The deadline for a sent notification message delivery. Only used when caller
+# does not provide a timeout expiry. (integer value)
+# Minimum value: 5
+#default_notify_timeout = 30
+
+# The duration to schedule a purge of idle sender links. Detach link after
+# expiry. (integer value)
+# Minimum value: 1
+#default_sender_link_timeout = 600
+
+# Indicates the addressing mode used by the driver.
+# Permitted values:
+# 'legacy'   - use legacy non-routable addressing
+# 'routable' - use routable addresses
+# 'dynamic'  - use legacy addresses if the message bus does not support routing
+# otherwise use routable addressing (string value)
+#addressing_mode = dynamic
+
+# Enable virtual host support for those message buses that do not natively
+# support virtual hosting (such as qpidd). When set to true the virtual host
+# name will be added to all message bus addresses, effectively creating a
+# private 'subnet' per virtual host. Set to False if the message bus supports
+# virtual hosting using the 'hostname' field in the AMQP 1.0 Open performative
+# as the name of the virtual host. (boolean value)
+#pseudo_vhost = true
+
+# address prefix used when sending to a specific server (string value)
+#server_request_prefix = exclusive
+
+# address prefix used when broadcasting to all servers (string value)
+#broadcast_prefix = broadcast
+
+# address prefix when sending to any server in group (string value)
+#group_request_prefix = unicast
+
+# Address prefix for all generated RPC addresses (string value)
+#rpc_address_prefix = openstack.org/om/rpc
+
+# Address prefix for all generated Notification addresses (string value)
+#notify_address_prefix = openstack.org/om/notify
+
+# Appended to the address prefix when sending a fanout message. Used by the
+# message bus to identify fanout messages. (string value)
+#multicast_address = multicast
+
+# Appended to the address prefix when sending to a particular RPC/Notification
+# server. Used by the message bus to identify messages sent to a single
+# destination. (string value)
+#unicast_address = unicast
+
+# Appended to the address prefix when sending to a group of consumers. Used by
+# the message bus to identify messages that should be delivered in a round-robin
+# fashion across consumers. (string value)
+#anycast_address = anycast
+
+# Exchange name used in notification addresses.
+# Exchange name resolution precedence:
+# Target.exchange if set
+# else default_notification_exchange if set
+# else control_exchange if set
+# else 'notify' (string value)
+#default_notification_exchange = <None>
+
+# Exchange name used in RPC addresses.
+# Exchange name resolution precedence:
+# Target.exchange if set
+# else default_rpc_exchange if set
+# else control_exchange if set
+# else 'rpc' (string value)
+#default_rpc_exchange = <None>
+
+# Window size for incoming RPC Reply messages. (integer value)
+# Minimum value: 1
+#reply_link_credit = 200
+
+# Window size for incoming RPC Request messages (integer value)
+# Minimum value: 1
+#rpc_server_credit = 100
+
+# Window size for incoming Notification messages (integer value)
+# Minimum value: 1
+#notify_server_credit = 100
+
+# Send messages of this type pre-settled.
+# Pre-settled messages will not receive acknowledgement
+# from the peer. Note well: pre-settled messages may be
+# silently discarded if the delivery fails.
+# Permitted values:
+# 'rpc-call' - send RPC Calls pre-settled
+# 'rpc-reply'- send RPC Replies pre-settled
+# 'rpc-cast' - Send RPC Casts pre-settled
+# 'notify'   - Send Notifications pre-settled
+#  (multi valued)
+#pre_settled = rpc-cast
+#pre_settled = rpc-reply
+
+
+[oslo_messaging_kafka]
+
+#
+# From oslo.messaging
+#
+
+# DEPRECATED: Default Kafka broker Host (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#kafka_default_host = localhost
+
+# DEPRECATED: Default Kafka broker Port (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#kafka_default_port = 9092
+
+# Max fetch bytes of Kafka consumer (integer value)
+#kafka_max_fetch_bytes = 1048576
+
+# Default timeout(s) for Kafka consumers (floating point value)
+#kafka_consumer_timeout = 1.0
+
+# DEPRECATED: Pool Size for Kafka Consumers (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#pool_size = 10
+
+# DEPRECATED: The pool size limit for connections expiration policy (integer
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#conn_pool_min_size = 2
+
+# DEPRECATED: The time-to-live in sec of idle connections in the pool (integer
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Driver no longer uses connection pool.
+#conn_pool_ttl = 1200
+
+# Group id for Kafka consumer. Consumers in one group will coordinate message
+# consumption (string value)
+#consumer_group = oslo_messaging_consumer
+
+# Upper bound on the delay for KafkaProducer batching in seconds (floating point
+# value)
+#producer_batch_timeout = 0.0
+
+# Size of batch for the producer async send (integer value)
+#producer_batch_size = 16384
+
+# Enable asynchronous consumer commits (boolean value)
+#enable_auto_commit = false
+
+# The maximum number of records returned in a poll call (integer value)
+#max_poll_records = 500
+
+# Protocol used to communicate with brokers (string value)
+# Possible values:
+# PLAINTEXT - <No description provided>
+# SASL_PLAINTEXT - <No description provided>
+# SSL - <No description provided>
+# SASL_SSL - <No description provided>
+#security_protocol = PLAINTEXT
+
+# Mechanism when security protocol is SASL (string value)
+#sasl_mechanism = PLAIN
+
+# CA certificate PEM file used to verify the server certificate (string value)
+#ssl_cafile =
+
+
+[oslo_messaging_notifications]
+
+#
+# From oslo.messaging
+#
+
+# The Drivers(s) to handle sending notifications. Possible values are messaging,
+# messagingv2, routing, log, test, noop (multi valued)
+# Deprecated group/name - [DEFAULT]/notification_driver
+#driver =
+
+# A URL representing the messaging driver to use for notifications. If not set,
+# we fall back to the same configuration used for RPC. (string value)
+# Deprecated group/name - [DEFAULT]/notification_transport_url
+#transport_url = <None>
+
+# AMQP topic used for OpenStack notifications. (list value)
+# Deprecated group/name - [rpc_notifier2]/topics
+# Deprecated group/name - [DEFAULT]/notification_topics
+#topics = notifications
+
+# The maximum number of attempts to re-send a notification message which failed
+# to be delivered due to a recoverable error. 0 - No retry, -1 - indefinite
+# (integer value)
+#retry = -1
+
+
+[oslo_messaging_rabbit]
+
+#
+# From oslo.messaging
+#
+
+# Use durable queues in AMQP. (boolean value)
+# Deprecated group/name - [DEFAULT]/amqp_durable_queues
+# Deprecated group/name - [DEFAULT]/rabbit_durable_queues
+#amqp_durable_queues = false
+
+# Auto-delete queues in AMQP. (boolean value)
+#amqp_auto_delete = false
+
+# Connect over SSL. (boolean value)
+# Deprecated group/name - [oslo_messaging_rabbit]/rabbit_use_ssl
+#ssl = false
+
+# SSL version to use (valid only if SSL enabled). Valid values are TLSv1 and
+# SSLv23. SSLv2, SSLv3, TLSv1_1, and TLSv1_2 may be available on some
+# distributions. (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_version
+#ssl_version =
+
+# SSL key file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_keyfile
+#ssl_key_file =
+
+# SSL cert file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_certfile
+#ssl_cert_file =
+
+# SSL certification authority file (valid only if SSL enabled). (string value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_ssl_ca_certs
+#ssl_ca_file =
+
+# How long to wait before reconnecting in response to an AMQP consumer cancel
+# notification. (floating point value)
+#kombu_reconnect_delay = 1.0
+
+# EXPERIMENTAL: Possible values are: gzip, bz2. If not set compression will not
+# be used. This option may not be available in future versions. (string value)
+#kombu_compression = <None>
+
+# How long to wait a missing client before abandoning to send it its replies.
+# This value should not be longer than rpc_response_timeout. (integer value)
+# Deprecated group/name - [oslo_messaging_rabbit]/kombu_reconnect_timeout
+#kombu_missing_consumer_retry_timeout = 60
+
+# Determines how the next RabbitMQ node is chosen in case the one we are
+# currently connected to becomes unavailable. Takes effect only if more than one
+# RabbitMQ node is provided in config. (string value)
+# Possible values:
+# round-robin - <No description provided>
+# shuffle - <No description provided>
+#kombu_failover_strategy = round-robin
+
+# DEPRECATED: The RabbitMQ broker address where a single node is used. (string
+# value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_host = localhost
+
+# DEPRECATED: The RabbitMQ broker port where a single node is used. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_port = 5672
+
+# DEPRECATED: RabbitMQ HA cluster host:port pairs. (list value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_hosts = $rabbit_host:$rabbit_port
+
+# DEPRECATED: The RabbitMQ userid. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_userid = guest
+
+# DEPRECATED: The RabbitMQ password. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_password = guest
+
+# The RabbitMQ login method. (string value)
+# Possible values:
+# PLAIN - <No description provided>
+# AMQPLAIN - <No description provided>
+# RABBIT-CR-DEMO - <No description provided>
+#rabbit_login_method = AMQPLAIN
+
+# DEPRECATED: The RabbitMQ virtual host. (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+# Reason: Replaced by [DEFAULT]/transport_url
+#rabbit_virtual_host = /
+
+# How frequently to retry connecting with RabbitMQ. (integer value)
+#rabbit_retry_interval = 1
+
+# How long to backoff for between retries when connecting to RabbitMQ. (integer
+# value)
+#rabbit_retry_backoff = 2
+
+# Maximum interval of RabbitMQ connection retries. Default is 30 seconds.
+# (integer value)
+#rabbit_interval_max = 30
+
+# DEPRECATED: Maximum number of RabbitMQ connection retries. Default is 0
+# (infinite retry count). (integer value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#rabbit_max_retries = 0
+
+# Try to use HA queues in RabbitMQ (x-ha-policy: all). If you change this
+# option, you must wipe the RabbitMQ database. In RabbitMQ 3.0, queue mirroring
+# is no longer controlled by the x-ha-policy argument when declaring a queue. If
+# you just want to make sure that all queues (except those with auto-generated
+# names) are mirrored across all nodes, run: "rabbitmqctl set_policy HA
+# '^(?!amq\.).*' '{"ha-mode": "all"}' " (boolean value)
+#rabbit_ha_queues = false
+
+# Positive integer representing duration in seconds for queue TTL (x-expires).
+# Queues which are unused for the duration of the TTL are automatically deleted.
+# The parameter affects only reply and fanout queues. (integer value)
+# Minimum value: 1
+#rabbit_transient_queues_ttl = 1800
+
+# Specifies the number of messages to prefetch. Setting to zero allows unlimited
+# messages. (integer value)
+#rabbit_qos_prefetch_count = 0
+
+# Number of seconds after which the Rabbit broker is considered down if
+# heartbeat's keep-alive fails (0 disable the heartbeat). EXPERIMENTAL (integer
+# value)
+#heartbeat_timeout_threshold = 60
+
+# How often times during the heartbeat_timeout_threshold we check the heartbeat.
+# (integer value)
+#heartbeat_rate = 2
+
+
+[oslo_messaging_zmq]
+
+#
+# From oslo.messaging
+#
+
+# ZeroMQ bind address. Should be a wildcard (*), an ethernet interface, or IP.
+# The "host" option should point or resolve to this address. (string value)
+#rpc_zmq_bind_address = *
+
+# MatchMaker driver. (string value)
+# Possible values:
+# redis - <No description provided>
+# sentinel - <No description provided>
+# dummy - <No description provided>
+#rpc_zmq_matchmaker = redis
+
+# Number of ZeroMQ contexts, defaults to 1. (integer value)
+#rpc_zmq_contexts = 1
+
+# Maximum number of ingress messages to locally buffer per topic. Default is
+# unlimited. (integer value)
+#rpc_zmq_topic_backlog = <None>
+
+# Directory for holding IPC sockets. (string value)
+#rpc_zmq_ipc_dir = /var/run/openstack
+
+# Name of this node. Must be a valid hostname, FQDN, or IP address. Must match
+# "host" option, if running Nova. (string value)
+#rpc_zmq_host = localhost
+
+# Number of seconds to wait before all pending messages will be sent after
+# closing a socket. The default value of -1 specifies an infinite linger period.
+# The value of 0 specifies no linger period. Pending messages shall be discarded
+# immediately when the socket is closed. Positive values specify an upper bound
+# for the linger period. (integer value)
+# Deprecated group/name - [DEFAULT]/rpc_cast_timeout
+#zmq_linger = -1
+
+# The default number of seconds that poll should wait. Poll raises timeout
+# exception when timeout expired. (integer value)
+#rpc_poll_timeout = 1
+
+# Expiration timeout in seconds of a name service record about existing target (
+# < 0 means no timeout). (integer value)
+#zmq_target_expire = 300
+
+# Update period in seconds of a name service record about existing target.
+# (integer value)
+#zmq_target_update = 180
+
+# Use PUB/SUB pattern for fanout methods. PUB/SUB always uses proxy. (boolean
+# value)
+#use_pub_sub = false
+
+# Use ROUTER remote proxy. (boolean value)
+#use_router_proxy = false
+
+# This option makes direct connections dynamic or static. It makes sense only
+# with use_router_proxy=False which means to use direct connections for direct
+# message types (ignored otherwise). (boolean value)
+#use_dynamic_connections = false
+
+# How many additional connections to a host will be made for failover reasons.
+# This option is actual only in dynamic connections mode. (integer value)
+#zmq_failover_connections = 2
+
+# Minimal port number for random ports range. (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#rpc_zmq_min_port = 49153
+
+# Maximal port number for random ports range. (integer value)
+# Minimum value: 1
+# Maximum value: 65536
+#rpc_zmq_max_port = 65536
+
+# Number of retries to find free port number before fail with ZMQBindError.
+# (integer value)
+#rpc_zmq_bind_port_retries = 100
+
+# Default serialization mechanism for serializing/deserializing
+# outgoing/incoming messages (string value)
+# Possible values:
+# json - <No description provided>
+# msgpack - <No description provided>
+#rpc_zmq_serialization = json
+
+# This option configures round-robin mode in zmq socket. True means not keeping
+# a queue when server side disconnects. False means to keep queue and messages
+# even if server is disconnected, when the server appears we send all
+# accumulated messages to it. (boolean value)
+#zmq_immediate = true
+
+# Enable/disable TCP keepalive (KA) mechanism. The default value of -1 (or any
+# other negative value) means to skip any overrides and leave it to OS default;
+# 0 and 1 (or any other positive value) mean to disable and enable the option
+# respectively. (integer value)
+#zmq_tcp_keepalive = -1
+
+# The duration between two keepalive transmissions in idle condition. The unit
+# is platform dependent, for example, seconds in Linux, milliseconds in Windows
+# etc. The default value of -1 (or any other negative value and 0) means to skip
+# any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_idle = -1
+
+# The number of retransmissions to be carried out before declaring that remote
+# end is not available. The default value of -1 (or any other negative value and
+# 0) means to skip any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_cnt = -1
+
+# The duration between two successive keepalive retransmissions, if
+# acknowledgement to the previous keepalive transmission is not received. The
+# unit is platform dependent, for example, seconds in Linux, milliseconds in
+# Windows etc. The default value of -1 (or any other negative value and 0) means
+# to skip any overrides and leave it to OS default. (integer value)
+#zmq_tcp_keepalive_intvl = -1
+
+# Maximum number of (green) threads to work concurrently. (integer value)
+#rpc_thread_pool_size = 100
+
+# Expiration timeout in seconds of a sent/received message after which it is not
+# tracked anymore by a client/server. (integer value)
+#rpc_message_ttl = 300
+
+# Wait for message acknowledgements from receivers. This mechanism works only
+# via proxy without PUB/SUB. (boolean value)
+#rpc_use_acks = false
+
+# Number of seconds to wait for an ack from a cast/call. After each retry
+# attempt this timeout is multiplied by some specified multiplier. (integer
+# value)
+#rpc_ack_timeout_base = 15
+
+# Number to multiply base ack timeout by after each retry attempt. (integer
+# value)
+#rpc_ack_timeout_multiplier = 2
+
+# Default number of message sending attempts in case of any problems occurred:
+# positive value N means at most N retries, 0 means no retries, None or -1 (or
+# any other negative values) mean to retry forever. This option is used only if
+# acknowledgments are enabled. (integer value)
+#rpc_retry_attempts = 3
+
+# List of publisher hosts SubConsumer can subscribe on. This option has higher
+# priority then the default publishers list taken from the matchmaker. (list
+# value)
+#subscribe_on =
+
+
+[oslo_middleware]
+
+#
+# From oslo.middleware
+#
+
+# The maximum body size for each  request, in bytes. (integer value)
+# Deprecated group/name - [DEFAULT]/osapi_max_request_body_size
+# Deprecated group/name - [DEFAULT]/max_request_body_size
+#max_request_body_size = 114688
+
+# DEPRECATED: The HTTP Header that will be used to determine what the original
+# request protocol scheme was, even if it was hidden by a SSL termination proxy.
+# (string value)
+# This option is deprecated for removal.
+# Its value may be silently ignored in the future.
+#secure_proxy_ssl_header = X-Forwarded-Proto
+
+# Whether the application is behind a proxy or not. This determines if the
+# middleware should parse the headers or not. (boolean value)
+#enable_proxy_headers_parsing = false
+
+
+[oslo_policy]
+
+#
+# From oslo.policy
+#
+
+# This option controls whether or not to enforce scope when evaluating policies.
+# If ``True``, the scope of the token used in the request is compared to the
+# ``scope_types`` of the policy being enforced. If the scopes do not match, an
+# ``InvalidScope`` exception will be raised. If ``False``, a message will be
+# logged informing operators that policies are being invoked with mismatching
+# scope. (boolean value)
+#enforce_scope = false
+
+# The file that defines policies. (string value)
+#policy_file = policy.json
+
+# Default rule. Enforced when a requested rule is not found. (string value)
+#policy_default_rule = default
+
+# Directories where policy configuration files are stored. They can be relative
+# to any directory in the search path defined by the config_dir option, or
+# absolute paths. The file defined by policy_file must exist for these
+# directories to be searched.  Missing or empty directories are ignored. (multi
+# valued)
+#policy_dirs = policy.d
+
+# Content Type to send and receive data for REST based policy check (string
+# value)
+# Possible values:
+# application/x-www-form-urlencoded - <No description provided>
+# application/json - <No description provided>
+#remote_content_type = application/x-www-form-urlencoded
+
+# server identity verification for REST based policy check (boolean value)
+#remote_ssl_verify_server_crt = false
+
+# Absolute path to ca cert file for REST based policy check (string value)
+#remote_ssl_ca_crt_file = <None>
+
+# Absolute path to client cert for REST based policy check (string value)
+#remote_ssl_client_crt_file = <None>
+
+# Absolute path client key file REST based policy check (string value)
+#remote_ssl_client_key_file = <None>
+
+
+[profiler]
+
+#
+# From osprofiler
+#
+
+#
+# Enable the profiling for all services on this node.
+#
+# Default value is False (fully disable the profiling feature).
+#
+# Possible values:
+#
+# * True: Enables the feature
+# * False: Disables the feature. The profiling cannot be started via this
+# project
+#   operations. If the profiling is triggered by another project, this project
+#   part will be empty.
+#  (boolean value)
+# Deprecated group/name - [profiler]/profiler_enabled
+#enabled = false
+
+#
+# Enable SQL requests profiling in services.
+#
+# Default value is False (SQL requests won't be traced).
+#
+# Possible values:
+#
+# * True: Enables SQL requests profiling. Each SQL query will be part of the
+#   trace and can the be analyzed by how much time was spent for that.
+# * False: Disables SQL requests profiling. The spent time is only shown on a
+#   higher level of operations. Single SQL queries cannot be analyzed this way.
+#  (boolean value)
+#trace_sqlalchemy = false
+
+#
+# Secret key(s) to use for encrypting context data for performance profiling.
+#
+# This string value should have the following format: <key1>[,<key2>,...<keyn>],
+# where each key is some random string. A user who triggers the profiling via
+# the REST API has to set one of these keys in the headers of the REST API call
+# to include profiling results of this node for this particular project.
+#
+# Both "enabled" flag and "hmac_keys" config options should be set to enable
+# profiling. Also, to generate correct profiling information across all services
+# at least one key needs to be consistent between OpenStack projects. This
+# ensures it can be used from client side to generate the trace, containing
+# information from all possible resources.
+#  (string value)
+#hmac_keys = SECRET_KEY
+
+#
+# Connection string for a notifier backend.
+#
+# Default value is ``messaging://`` which sets the notifier to oslo_messaging.
+#
+# Examples of possible values:
+#
+# * ``messaging://`` - use oslo_messaging driver for sending spans.
+# * ``redis://127.0.0.1:6379`` - use redis driver for sending spans.
+# * ``mongodb://127.0.0.1:27017`` - use mongodb driver for sending spans.
+# * ``elasticsearch://127.0.0.1:9200`` - use elasticsearch driver for sending
+#   spans.
+# * ``jaeger://127.0.0.1:6831`` - use jaeger tracing as driver for sending
+# spans.
+#  (string value)
+#connection_string = messaging://
+
+#
+# Document type for notification indexing in elasticsearch.
+#  (string value)
+#es_doc_type = notification
+
+#
+# This parameter is a time value parameter (for example: es_scroll_time=2m),
+# indicating for how long the nodes that participate in the search will maintain
+# relevant resources in order to continue and support it.
+#  (string value)
+#es_scroll_time = 2m
+
+#
+# Elasticsearch splits large requests in batches. This parameter defines
+# maximum size of each batch (for example: es_scroll_size=10000).
+#  (integer value)
+#es_scroll_size = 10000
+
+#
+# Redissentinel provides a timeout option on the connections.
+# This parameter defines that timeout (for example: socket_timeout=0.1).
+#  (floating point value)
+#socket_timeout = 0.1
+
+#
+# Redissentinel uses a service name to identify a master redis service.
+# This parameter defines the name (for example:
+# ``sentinal_service_name=mymaster``).
+#  (string value)
+#sentinel_service_name = mymaster
+
+#
+# Enable filter traces that contain error/exception to a separated place.
+#
+# Default value is set to False.
+#
+# Possible values:
+#
+# * True: Enable filter traces that contain error/exception.
+# * False: Disable the filter.
+#  (boolean value)
+#filter_error_trace = false
+
+
+[remote_debug]
+
+#
+# From nova.conf
+#
+
+#
+# Debug host (IP or name) to connect to. This command line parameter is used
+# when
+# you want to connect to a nova service via a debugger running on a different
+# host.
+#
+# Note that using the remote debug option changes how Nova uses the eventlet
+# library to support async IO. This could result in failures that do not occur
+# under normal operation. Use at your own risk.
+#
+# Possible Values:
+#
+#    * IP address of a remote host as a command line parameter
+#      to a nova service. For Example:
+#
+#     /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
+#     --remote_debug-host <IP address where the debugger is running>
+#  (host address value)
+#host = <None>
+
+#
+# Debug port to connect to. This command line parameter allows you to specify
+# the port you want to use to connect to a nova service via a debugger running
+# on different host.
+#
+# Note that using the remote debug option changes how Nova uses the eventlet
+# library to support async IO. This could result in failures that do not occur
+# under normal operation. Use at your own risk.
+#
+# Possible Values:
+#
+#    * Port number you want to use as a command line parameter
+#      to a nova service. For Example:
+#
+#     /usr/local/bin/nova-compute --config-file /etc/nova/nova.conf
+#     --remote_debug-host <IP address where the debugger is running>
+#     --remote_debug-port <port> it's listening on>.
+#  (port value)
+# Minimum value: 0
+# Maximum value: 65535
+#port = <None>
+
+
+[scheduler]
+
+#
+# From nova.conf
+#
+
+baseproject = gosbsbase
+
+#
+# The class of the driver used by the scheduler. This should be chosen from one
+# of the entrypoints under the namespace 'nova.scheduler.driver' of file
+# 'setup.cfg'. If nothing is specified in this option, the 'filter_scheduler' is
+# used.
+#
+# Other options are:
+#
+# * 'caching_scheduler' which aggressively caches the system state for better
+#   individual scheduler performance at the risk of more retries when running
+#   multiple schedulers. [DEPRECATED]
+# * 'chance_scheduler' which simply picks a host at random. [DEPRECATED]
+# * 'fake_scheduler' which is used for testing.
+#
+# Possible values:
+#
+# * Any of the drivers included in Nova:
+#
+#   * filter_scheduler
+#   * caching_scheduler
+#   * chance_scheduler
+#   * fake_scheduler
+#
+# * You may also set this to the entry point name of a custom scheduler driver,
+#   but you will be responsible for creating and maintaining it in your
+# setup.cfg
+#   file.
+#
+# Related options:
+#
+# * workers
+#  (string value)
+# Deprecated group/name - [DEFAULT]/scheduler_driver
+#driver = filter_scheduler
+
+#
+# Periodic task interval.
+#
+# This value controls how often (in seconds) to run periodic tasks in the
+# scheduler. The specific tasks that are run for each period are determined by
+# the particular scheduler being used. Currently the only in-tree scheduler
+# driver that uses this option is the ``caching_scheduler``.
+#
+# If this is larger than the nova-service 'service_down_time' setting, the
+# ComputeFilter (if enabled) may think the compute service is down. As each
+# scheduler can work a little differently than the others, be sure to test this
+# with your selected scheduler.
+#
+# Possible values:
+#
+# * An integer, where the integer corresponds to periodic task interval in
+#   seconds. 0 uses the default interval (60 seconds). A negative value disables
+#   periodic tasks.
+#
+# Related options:
+#
+# * ``nova-service service_down_time``
+#  (integer value)
+#periodic_task_interval = 60
+
+#
+# This is the maximum number of attempts that will be made for a given instance
+# build/move operation. It limits the number of alternate hosts returned by the
+# scheduler. When that list of hosts is exhausted, a MaxRetriesExceeded
+# exception is raised and the instance is set to an error state.
+#
+# Possible values:
+#
+# * A positive integer, where the integer corresponds to the max number of
+#   attempts that can be made when building or moving an instance.
+#          (integer value)
+# Minimum value: 1
+# Deprecated group/name - [DEFAULT]/scheduler_max_attempts
+#max_attempts = 3
+
+#
+# Periodic task interval.
+#
+# This value controls how often (in seconds) the scheduler should attempt
+# to discover new hosts that have been added to cells. If negative (the
+# default), no automatic discovery will occur.
+#
+# Deployments where compute nodes come and go frequently may want this
+# enabled, where others may prefer to manually discover hosts when one
+# is added to avoid any overhead from constantly checking. If enabled,
+# every time this runs, we will select any unmapped hosts out of each
+# cell database on every run.
+#  (integer value)
+# Minimum value: -1
+#discover_hosts_in_cells_interval = -1
+
+#
+# This setting determines the maximum limit on results received from the
+# placement service during a scheduling operation. It effectively limits
+# the number of hosts that may be considered for scheduling requests that
+# match a large number of candidates.
+#
+# A value of 1 (the minimum) will effectively defer scheduling to the placement
+# service strictly on "will it fit" grounds. A higher value will put an upper
+# cap on the number of results the scheduler will consider during the filtering
+# and weighing process. Large deployments may need to set this lower than the
+# total number of hosts available to limit memory consumption, network traffic,
+# etc. of the scheduler.
+#
+# This option is only used by the FilterScheduler; if you use a different
+# scheduler, this option has no effect.
+#  (integer value)
+# Minimum value: 1
+#max_placement_results = 1000
+
+#
+# Number of workers for the nova-scheduler service. The default will be the
+# number of CPUs available if using the "filter_scheduler" scheduler driver,
+# otherwise the default will be 1.
+#  (integer value)
+# Minimum value: 0
+#workers = <None>
+
+#
+# This setting causes the scheduler to look up a host aggregate with the
+# metadata key of `filter_tenant_id` set to the project of an incoming
+# request, and request results from placement be limited to that aggregate.
+# Multiple tenants may be added to a single aggregate by appending a serial
+# number to the key, such as `filter_tenant_id:123`.
+#
+# The matching aggregate UUID must be mirrored in placement for proper
+# operation. If no host aggregate with the tenant id is found, or that
+# aggregate does not match one in placement, the result will be the same
+# as not finding any suitable hosts for the request.
+#
+# See also the placement_aggregate_required_for_tenants option.
+#  (boolean value)
+#limit_tenants_to_placement_aggregate = false
+
+#
+# This setting, when limit_tenants_to_placement_aggregate=True, will control
+# whether or not a tenant with no aggregate affinity will be allowed to schedule
+# to any available node. If aggregates are used to limit some tenants but
+# not all, then this should be False. If all tenants should be confined via
+# aggregate, then this should be True to prevent them from receiving
+# unrestricted
+# scheduling to any available node.
+#
+# See also the limit_tenants_to_placement_aggregate option.
+#  (boolean value)
+#placement_aggregate_required_for_tenants = false
+
+#
+# This setting causes the scheduler to look up a host aggregate with the
+# metadata key of `availability_zone` set to the value provided by an
+# incoming request, and request results from placement be limited to that
+# aggregate.
+#
+# The matching aggregate UUID must be mirrored in placement for proper
+# operation. If no host aggregate with the `availability_zone` key is
+# found, or that aggregate does not match one in placement, the result will
+# be the same as not finding any suitable hosts.
+#
+# Note that if you enable this flag, you can disable the (less efficient)
+# AvailabilityZoneFilter in the scheduler.
+#  (boolean value)
+#query_placement_for_availability_zone = false
+
+
+[upgrade_levels]
+#
+# upgrade_levels options are used to set version cap for RPC
+# messages sent between different nova services.
+#
+# By default all services send messages using the latest version
+# they know about.
+#
+# The compute upgrade level is an important part of rolling upgrades
+# where old and new nova-compute services run side by side.
+#
+# The other options can largely be ignored, and are only kept to
+# help with a possible future backport issue.
+
+#
+# From nova.conf
+#
+
+#
+# Compute RPC API version cap.
+#
+# By default, we always send messages using the most recent version
+# the client knows about.
+#
+# Where you have old and new compute services running, you should set
+# this to the lowest deployed version. This is to guarantee that all
+# services never send messages that one of the compute nodes can't
+# understand. Note that we only support upgrading from release N to
+# release N+1.
+#
+# Set this option to "auto" if you want to let the compute RPC module
+# automatically determine what version to use based on the service
+# versions in the deployment.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * 'auto': Automatically determines what version to use based on
+#   the service versions in the deployment.
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+#compute = <None>
+
+#
+# Cells RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+#cells = <None>
+
+#
+# Intercell RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+#intercell = <None>
+
+# DEPRECATED:
+# Cert RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+# This option is deprecated for removal since 18.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# The nova-cert service was removed in 16.0.0 (Pike) so this option
+# is no longer used.
+#cert = <None>
+
+#
+# Scheduler RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+#scheduler = <None>
+
+#
+# Conductor RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+#conductor = <None>
+
+#
+# Console RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+#console = <None>
+
+# DEPRECATED:
+# Consoleauth RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+# This option is deprecated for removal since 18.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# The nova-consoleauth service was deprecated in 18.0.0 (Rocky) and will be
+# removed in an upcoming release.
+#consoleauth = <None>
+
+# DEPRECATED:
+# Network RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+# This option is deprecated for removal since 18.0.0.
+# Its value may be silently ignored in the future.
+# Reason:
+# The nova-network service was deprecated in 14.0.0 (Newton) and will be
+# removed in an upcoming release.
+#network = <None>
+
+#
+# Base API RPC API version cap.
+#
+# Possible values:
+#
+# * By default send the latest version the client knows about
+# * A string representing a version number in the format 'N.N';
+#   for example, possible values might be '1.12' or '2.0'.
+# * An OpenStack release name, in lower case, such as 'mitaka' or
+#   'liberty'.
+#  (string value)
+#baseapi = <None>

diff --git a/gosbs/__init__.py b/gosbs/__init__.py
new file mode 100644
index 0000000..228c89d
--- /dev/null
+++ b/gosbs/__init__.py
@@ -0,0 +1,35 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+:mod:`nova` -- Cloud IaaS Platform
+===================================
+
+.. automodule:: nova
+   :platform: Unix
+   :synopsis: Infrastructure-as-a-Service Cloud platform.
+"""
+
+import os
+
+os.environ['EVENTLET_NO_GREENDNS'] = 'yes'
+
+# NOTE(rpodolyaka): import oslo_service first, so that it makes eventlet hub
+# use a monotonic clock to avoid issues with drifts of system time (see
+# LP 1510234 for details)
+import oslo_service  # noqa
+
+import eventlet  # noqa

diff --git a/gosbs/baserpc.py b/gosbs/baserpc.py
new file mode 100644
index 0000000..b57f44c
--- /dev/null
+++ b/gosbs/baserpc.py
@@ -0,0 +1,81 @@
+#
+# Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+#
+
+# Origin https://github.com/openstack/nova/blob/master/nova/baserpc.py
+
+"""
+Base RPC client and server common to all services.
+"""
+
+import oslo_messaging as messaging
+from oslo_serialization import jsonutils
+
+import gosbs.conf
+from gosbs import rpc
+
+
+CONF = gosbs.conf.CONF
+
+_NAMESPACE = 'baseapi'
+
+
+class BaseAPI(object):
+    """Client side of the base rpc API.
+
+    API version history:
+
+        1.0 - Initial version.
+        1.1 - Add get_backdoor_port
+    """
+
+    VERSION_ALIASES = {
+        # baseapi was added in havana
+    }
+
+    def __init__(self, topic):
+        super(BaseAPI, self).__init__()
+        target = messaging.Target(topic=topic,
+                                  namespace=_NAMESPACE,
+                                  version='1.0')
+        version_cap = self.VERSION_ALIASES.get(CONF.upgrade_levels.baseapi,
+                                               CONF.upgrade_levels.baseapi)
+        self.client = rpc.get_client(target, version_cap=version_cap)
+
+    def ping(self, context, arg, timeout=None):
+        arg_p = jsonutils.to_primitive(arg)
+        cctxt = self.client.prepare(timeout=timeout)
+        return cctxt.call(context, 'ping', arg=arg_p)
+
+    def get_backdoor_port(self, context, host):
+        cctxt = self.client.prepare(server=host, version='1.1')
+        return cctxt.call(context, 'get_backdoor_port')
+
+
+class BaseRPCAPI(object):
+    """Server side of the base RPC API."""
+
+    target = messaging.Target(namespace=_NAMESPACE, version='1.1')
+
+    def __init__(self, service_name, backdoor_port):
+        self.service_name = service_name
+        self.backdoor_port = backdoor_port
+
+    def ping(self, context, arg):
+        resp = {'service': self.service_name, 'arg': arg}
+        return jsonutils.to_primitive(resp)
+
+    def get_backdoor_port(self, context):
+        return self.backdoor_port

diff --git a/gosbs/cmd/__init__.py b/gosbs/cmd/__init__.py
new file mode 100644
index 0000000..7970311
--- /dev/null
+++ b/gosbs/cmd/__init__.py
@@ -0,0 +1,20 @@
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+# All Rights Reserved.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/cmd/__init__.py
+
+from gosbs import utils
+
+utils.monkey_patch()

diff --git a/gosbs/cmd/scheduler.py b/gosbs/cmd/scheduler.py
new file mode 100644
index 0000000..db5d612
--- /dev/null
+++ b/gosbs/cmd/scheduler.py
@@ -0,0 +1,45 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/cmd/compute.py
+# Removed code that was not needed
+
+"""Starter script for Gosbs Scheduler."""
+
+import shlex
+import sys
+
+from oslo_log import log as logging
+
+from gosbs.scheduler import rpcapi as scheduler_rpcapi
+import gosbs.conf
+from gosbs import config
+from gosbs import objects
+from gosbs.objects import base as objects_base
+from gosbs import service
+
+CONF = gosbs.conf.CONF
+
+def main():
+    config.parse_args(sys.argv)
+    logging.setup(CONF, 'gosbs')
+    objects.register_all()
+
+    objects.Service.enable_min_version_cache()
+    server = service.Service.create(binary='gosbs-scheduler',
+                                    topic=scheduler_rpcapi.RPC_TOPIC)
+    service.serve(server)
+    service.wait()

diff --git a/pym/tbc/__init__.py b/gosbs/common/__init__.py
similarity index 100%
copy from pym/tbc/__init__.py
copy to gosbs/common/__init__.py

diff --git a/gosbs/common/flags.py b/gosbs/common/flags.py
new file mode 100644
index 0000000..2e844f0
--- /dev/null
+++ b/gosbs/common/flags.py
@@ -0,0 +1,211 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+# Origin https://gitweb.gentoo.org/proj/portage.git/tree/pym/portage/api/flag.py?h=public_api
+# Fix so we can use mysettings and myportdb.
+# Add filtring of api, python and ruby.
+
+"""Provides support functions for USE flag settings and analysis"""
+
+
+__all__ = (
+    'get_iuse',
+    'get_installed_use',
+    'reduce_flag',
+    'reduce_flags',
+    'filter_flags',
+    'get_all_cpv_use',
+    'get_flags'
+)
+
+import portage
+
+
+def get_iuse(cpv, portdb):
+    """Gets the current IUSE flags from the tree
+
+    To be used when a gentoolkit package object is not needed
+    @type: cpv: string 
+    @param cpv: cat/pkg-ver
+    @type root: string
+    @param root: tree root to use
+    @param settings: optional portage config settings instance.
+        defaults to portage.api.settings.default_settings
+    @rtype list
+    @returns [] or the list of IUSE flags
+    """
+    return portdb.aux_get(cpv, ["IUSE"])[0].split()
+
+
+def reduce_flag(flag):
+    """Absolute value function for a USE flag
+
+    @type flag: string
+    @param flag: the use flag to absolute.
+    @rtype: string
+    @return absolute USE flag
+    """
+    if flag[0] in ["+","-"]:
+        return flag[1:]
+    else:
+        return flag
+
+
+def reduce_flags(the_list):
+    """Absolute value function for a USE flag list
+
+    @type the_list: list
+    @param the_list: the use flags to absolute.
+    @rtype: list
+    @return absolute USE flags
+    """
+    reduced = []
+    for member in the_list:
+        reduced.append(reduce_flag(member))
+    return reduced
+
+
+def filter_flags(use, use_expand_hidden, usemasked,
+        useforced, settings):
+    """Filter function to remove hidden or otherwise not normally
+    visible USE flags from a list.
+
+    @type use: list
+    @param use: the USE flag list to be filtered.
+    @type use_expand_hidden: list
+    @param  use_expand_hidden: list of flags hidden.
+    @type usemasked: list
+    @param usemasked: list of masked USE flags.
+    @type useforced: list
+    @param useforced: the forced USE flags.
+    @param settings: optional portage config settings instance.
+        defaults to portage.api.settings.default_settings
+    @rtype: list
+    @return the filtered USE flags.
+    """
+    # clean out some environment flags, since they will most probably
+    # be confusing for the user
+    for flag in use_expand_hidden:
+        flag = flag.lower() + "_"
+        for expander in use:
+            if flag in expander:
+                use.remove(expander)
+    # clean out any arch's
+    archlist = settings["PORTAGE_ARCHLIST"].split()
+    for key in use[:]:
+        if key in archlist:
+            use.remove(key)
+    # dbl check if any from usemasked  or useforced are still there
+    masked = usemasked + useforced
+    for flag in use[:]:
+        if flag in masked:
+            use.remove(flag)
+    # clean out any abi_ flag
+    for a in use[:]:
+        if a.startswith("abi_"):
+            use.remove(a)
+    # clean out any python_ flag
+    for a in use[:]:
+        if a.startswith("python_"):
+            use.remove(a)
+    # clean out any ruby_targets_ flag
+    for a in use[:]:
+        if a.startswith("ruby_targets_"):
+            use.remove(a)
+    return use
+
+
+def get_all_cpv_use(cpv, portdb, settings):
+    """Uses portage to determine final USE flags and settings for an emerge
+
+    @type cpv: string
+    @param cpv: eg cat/pkg-ver
+    @type root: string
+    @param root: tree root to use
+    @param settings: optional portage config settings instance.
+        defaults to portage.api.settings.default_settings
+    @rtype: lists
+    @return  use, use_expand_hidden, usemask, useforce
+    """
+    use = None
+    settings.unlock()
+    try:
+        settings.setcpv(cpv, use_cache=None, mydb=portdb)
+        use = settings['PORTAGE_USE'].split()
+        use_expand_hidden = settings["USE_EXPAND_HIDDEN"].split()
+        usemask = list(settings.usemask)
+        useforce =  list(settings.useforce)
+    except KeyError:
+        settings.reset()
+        settings.lock()
+        return [], [], [], []
+    # reset cpv filter
+    settings.reset()
+    settings.lock()
+    return use, use_expand_hidden, usemask, useforce
+
+
+def get_flags(cpv, portdb, settings, final_setting=False):
+    """Retrieves all information needed to filter out hidden, masked, etc.
+    USE flags for a given package.
+
+    @type cpv: string
+    @param cpv: eg. cat/pkg-ver
+    @type final_setting: boolean
+    @param final_setting: used to also determine the final
+        enviroment USE flag settings and return them as well.
+    @type root: string
+    @param root: pass through variable needed, tree root to use
+        for other function calls.
+    @param settings: optional portage config settings instance.
+        defaults to portage.api.settings.default_settings
+    @rtype: list or list, list
+    @return IUSE or IUSE, final_flags
+    """
+    (final_use, use_expand_hidden, usemasked, useforced) = \
+        get_all_cpv_use(cpv, portdb, settings)
+    iuse_flags = filter_flags(get_iuse(cpv), use_expand_hidden,
+        usemasked, useforced, settings)
+    #flags = filter_flags(use_flags, use_expand_hidden,
+        #usemasked, useforced, settings)
+    if final_setting:
+        final_flags = filter_flags(final_use,  use_expand_hidden,
+            usemasked, useforced, settings)
+        return iuse_flags, final_flags
+    return iuse_flags
+
+
+def get_use_flag_dict(portdir):
+    """ Get all the use flags and return them as a dictionary
+    
+    @param portdir: the path to the repository
+    @rtype dictionary of:
+        key = use flag forced to lowercase
+        data = list[0] = 'local' or 'global'
+            list[1] = 'package-name'
+            list[2] = description of flag
+    """
+    use_dict = {}
+
+    # process standard use flags
+
+    _list = portage.grabfile(portdir + '/profiles/use.desc')
+    for item in _list:
+        index = item.find(' - ')
+        use_dict[item[:index].strip().lower()] = ['global', '', item[index+3:]]
+
+    # process local (package specific) use flags
+
+    _list = portage.grabfile(portdir + '/profiles/use.local.desc')
+    for item in _list:
+        index = item.find(' - ')
+        data = item[:index].lower().split(':')
+        try: 
+            use_dict[data[1].strip()] = ['local', data[0].strip(), item[index+3:]]
+        except:
+            pass
+            #debug.dprint("FLAG: get_use_flag_dict();"
+                #"error in index??? data[0].strip, item[index:]")
+            #debug.dprint(data[0].strip())
+            #debug.dprint(item[index:])
+    return use_dict

diff --git a/gosbs/common/git.py b/gosbs/common/git.py
new file mode 100644
index 0000000..b22435d
--- /dev/null
+++ b/gosbs/common/git.py
@@ -0,0 +1,99 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import re
+import git
+import os
+
+from oslo_log import log as logging
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def fetch(repo):
+    remote = git.remote.Remote(repo, 'origin')
+    info_list = remote.fetch()
+    local_commit = repo.commit()
+    remote_commit = info_list[0].commit
+    if local_commit.hexsha != remote_commit.hexsha:
+        return info_list, False
+    return info_list, True
+
+def merge(repo, info):
+    repo.git.merge(info.commit)
+
+def update_git_repo_db(repo_dict):
+    # check git diffs witch get updated and pass that to objects Packages
+    # fetch and merge the repo
+    search_list = [ '^metadata', '^eclass', '^licenses', '^profiles', '^scripts', '^skel.', '^header.txt']
+    repo = git.Repo(repo_dict['repo_path'])
+    cp_list = []
+    info_list, repo_uptodate = fetch(repo)
+    if repo_uptodate:
+        return True, cp_list
+    # We check for dir changes and add the package to a list
+    repo_diff = repo.git.diff('origin', '--name-only'
+    #write_log(session, 'Git dir diff:\n%s' % (repo_diff,), "debug", config_id, 'sync.git_sync_main')
+    for diff_line in repo_diff.splitlines():
+        find_search = True
+        for search_line in search_list:
+            if re.search(search_line, diff_line):
+                find_search = False
+        if find_search:
+            splited_diff_line = re.split('/', diff_line)
+            c = splited_diff_line[0]
+            p = splited_diff_line[1]
+            cp = c + '/' + p
+            if not cp in cp_list:
+                cp_list.append(cp)
+            #write_log(session, 'Git CP Diff: %s' % (cp_list,), "debug", config_id, 'sync.git_sync_main')
+    merge(repo, info_list[0])
+    return True, cp_list
+
+def update_git_repo(repo_dict):
+    repo = git.Repo(repo_dict['repo_path'])
+    try:
+        repo.git.pull()
+    except:
+        return False
+    return True
+
+def create_git_repo(repo_dict):
+    try:
+        os.mkdir(repo_dict['repo_path'])
+    except OSError:  
+        LOG.error("Creation of the directory %s failed" % repo_dict['repo_path'])
+        return False
+    try:
+        if not repo_dict['history']:
+            git.Repo.clone_from(repo_dict['repo_url'], repo_dict['repo_path'], 'depth=1')
+        else:
+            git.Repo.clone_from(repo_dict['repo_url'], repo_dict['repo_path'],)
+    except:
+        return False
+    return True
+    
+def check_git_repo_db(repo_dict):
+    if not os.path.isdir(repo_dict['repo_path']):
+        succes = create_git_repo(repo_dict)
+        return succes, None
+    succes, cp_list = update_git_repo_db(repo_dict)
+    return succes, cp_list
+
+def check_git_repo(repo_dict):
+    if not os.path.isdir(repo_dict['repo_path']):
+        succes = create_git_repo(repo_dict)
+    else:
+        succes = update_git_repo(repo_dict)
+    return succes

diff --git a/gosbs/common/portage_settings.py b/gosbs/common/portage_settings.py
new file mode 100644
index 0000000..e5a58f5
--- /dev/null
+++ b/gosbs/common/portage_settings.py
@@ -0,0 +1,48 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+from pathlib import Path
+
+import portage
+
+from oslo_log import log as logging
+
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def check_profile(context, project_repopath, project_metadata_db):
+    profile_repo_db = objects.repo.Repo.get_by_uuid(context, project_metadata_db.project_profile_repo_uuid)
+    profile_repopath = CONF.repopath + '/' + profile_repo_db.name + '.git/' + 'profiles/'
+    if not Path(project_repopath + 'etc/portage/make.profile').is_symlink():
+        Path(project_repopath + 'etc/portage/make.profile').symlink_to(profile_repopath + project_metadata_db.project_profile)
+    else:
+        if Path(project_repopath + 'etc/portage/make.profile').resolve() != project_repopath + project_metadata_db.project_profile:
+            pass
+
+def get_portage_settings(context, project_metadata_db, project_repo_name):
+    settings_repo_db = objects.repo.Repo.get_by_uuid(context, project_metadata_db.project_repo_uuid)
+    project_repopath = CONF.repopath + '/' + settings_repo_db.name + '.git/' + project_repo_name + '/'
+    if Path(project_repopath).exists():
+        check_profile(context, project_repopath, project_metadata_db)
+        # Set config_root (PORTAGE_CONFIGROOT) to project_repopath
+        mysettings = portage.config(config_root = project_repopath)
+        myportdb = portage.portdbapi(mysettings=mysettings)
+        return mysettings, myportdb
+
+def clean_portage_settings(myportdb):
+    myportdb.close_caches()
+    portage.portdbapi.portdbapi_instances.remove(myportdb)

diff --git a/gosbs/common/task.py b/gosbs/common/task.py
new file mode 100644
index 0000000..4fb98cb
--- /dev/null
+++ b/gosbs/common/task.py
@@ -0,0 +1,70 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from datetime import datetime
+from dateutil.relativedelta import relativedelta
+import pytz
+from importlib import import_module
+
+from oslo_utils import uuidutils
+
+from gosbs import objects
+
+def time_to_run_task(task_db):
+    task_time_now = datetime.now().replace(tzinfo=pytz.UTC)
+    task_time_when = task_db.last + relativedelta(years=+(task_db.run.year - 1))
+    task_time_when = task_time_when + relativedelta(months=+(task_db.run.month - 1))
+    task_time_when = task_time_when + relativedelta(days=+(task_db.run.day -1))
+    task_time_when = task_time_when + relativedelta(hours=+task_db.run.hour)
+    task_time_when = task_time_when + relativedelta(minutes=+task_db.run.minute)
+    if task_time_when < task_time_now:
+        return True
+    else:
+        return False
+
+def create_task_db(context, name, run, repet, service_uuid):
+    task_db = objects.task.Task()
+    task_db.uuid = uuidutils.generate_uuid()
+    task_db.name = name
+    task_db.service_uuid = service_uuid
+    task_db.run = run
+    task_db.repet = repet
+    task_db.status = 'waiting'
+    task_db.last = datetime.now().replace(tzinfo=pytz.UTC)
+    task_db.create(context)
+    return task_db
+
+def check_task_db(context, name, run, repet, service_uuid):
+    filters = { 
+                   'name' : name,
+                   'repet' : repet,
+                   }
+    task_db = objects.task.Task.get_by_server_uuid(context, service_uuid, filters=filters)
+    if task_db is None:
+        task_db = create_task_db(context, name, run, repet, service_uuid)
+    task_db.status = 'waiting'
+    task_db.save(context)
+
+def run_task(context, filters, service_ref):
+    for task_db in objects.task.TaskList.get_all(context, filters=filters, sort_key='priority'):
+        if time_to_run_task(task_db):
+            task_db.status = 'in-progress'
+            task_db.save(context)
+            module_to_run = import_module('.' + task_db.name , 'gosbs.tasks.' + service_ref.topic)
+            module_to_run.task(context, service_ref.uuid)
+            if task_db.repet:
+                task_db.status = 'waiting'
+                task_db.last = datetime.now().replace(tzinfo=pytz.UTC)
+                task_db.save(context)
+            else:
+                task_db.destroy(context)

diff --git a/gosbs/conf/__init__.py b/gosbs/conf/__init__.py
new file mode 100644
index 0000000..05b2f36
--- /dev/null
+++ b/gosbs/conf/__init__.py
@@ -0,0 +1,47 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# This package got introduced during the Mitaka cycle in 2015 to
+# have a central place where the config options of Nova can be maintained.
+# For more background see the blueprint "centralize-config-options"
+
+# Origin https://github.com/openstack/nova/blob/master/nova/conf/__init__.py
+# Import only what we need on gosbs
+
+from oslo_config import cfg
+
+from gosbs.conf import base
+from gosbs.conf import database
+from gosbs.conf import keystone
+from gosbs.conf import netconf
+from gosbs.conf import notifications
+from gosbs.conf import paths
+from gosbs.conf import rpc
+from gosbs.conf import scheduler
+from gosbs.conf import service
+from gosbs.conf import upgrade_levels
+
+CONF = cfg.CONF
+
+base.register_opts(CONF)
+database.register_opts(CONF)
+keystone.register_opts(CONF)
+netconf.register_opts(CONF)
+notifications.register_opts(CONF)
+paths.register_opts(CONF)
+rpc.register_opts(CONF)
+scheduler.register_opts(CONF)
+service.register_opts(CONF)
+upgrade_levels.register_opts(CONF)

diff --git a/gosbs/conf/base.py b/gosbs/conf/base.py
new file mode 100644
index 0000000..4390ad3
--- /dev/null
+++ b/gosbs/conf/base.py
@@ -0,0 +1,43 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+base_options = [
+	cfg.StrOpt("auth_strategy",
+        default="keystone",
+        choices=[
+            ("keystone", "Use keystone for authentication."),
+            ("noauth2", "Designed for testing only, as it does no actual "
+             "credential checking. 'noauth2' provides administrative "
+             "credentials only if 'admin' is specified as the username."),
+        ],
+        help="""
+Determine the strategy to use for authentication.
+"""),
+    cfg.StrOpt(
+        'tempdir',
+        help='Explicitly specify the temporary working directory.'),
+]
+
+
+def register_opts(conf):
+    conf.register_opts(base_options)
+
+
+def list_opts():
+    return {'DEFAULT': base_options}

diff --git a/gosbs/conf/database.py b/gosbs/conf/database.py
new file mode 100644
index 0000000..a82c5e4
--- /dev/null
+++ b/gosbs/conf/database.py
@@ -0,0 +1,183 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/conf/database.py
+
+from oslo_config import cfg
+from oslo_db import options as oslo_db_options
+
+from gosbs.conf import paths
+
+_DEFAULT_SQL_CONNECTION = 'sqlite:///' + paths.state_path_def('gosbs.sqlite')
+_ENRICHED = False
+
+
+# NOTE(markus_z): We cannot simply do:
+# conf.register_opts(oslo_db_options.database_opts, 'api_database')
+# If we reuse a db config option for two different groups ("api_database"
+# and "database") and deprecate or rename a config option in one of these
+# groups, "oslo.config" cannot correctly determine which one to update.
+# That's why we copied & pasted these config options for the "api_database"
+# group here. See commit ba407e3 ("Add support for multiple database engines")
+# for more details.
+api_db_group = cfg.OptGroup('api_database',
+    title='API Database Options',
+    help="""
+The *Nova API Database* is a separate database which is used for information
+which is used across *cells*. This database is mandatory since the Mitaka
+release (13.0.0).
+""")
+
+api_db_opts = [
+    # TODO(markus_z): This should probably have a required=True attribute
+    cfg.StrOpt('connection',
+        secret=True,
+        help=''),
+    cfg.StrOpt('connection_parameters',
+        default='',
+        help=''),
+    cfg.BoolOpt('sqlite_synchronous',
+        default=True,
+        help=''),
+    cfg.StrOpt('slave_connection',
+        secret=True,
+        help=''),
+    cfg.StrOpt('mysql_sql_mode',
+        default='TRADITIONAL',
+        help=''),
+    cfg.IntOpt('connection_recycle_time',
+        default=3600,
+        deprecated_name='idle_timeout',
+        help=''),
+    # TODO(markus_z): We should probably default this to 5 to not rely on the
+    # SQLAlchemy default. Otherwise we wouldn't provide a stable default.
+    cfg.IntOpt('max_pool_size',
+        help=''),
+    cfg.IntOpt('max_retries',
+        default=10,
+        help=''),
+    # TODO(markus_z): This should have a minimum attribute of 0
+    cfg.IntOpt('retry_interval',
+        default=10,
+        help=''),
+    # TODO(markus_z): We should probably default this to 10 to not rely on the
+    # SQLAlchemy default. Otherwise we wouldn't provide a stable default.
+    cfg.IntOpt('max_overflow',
+        help=''),
+    # TODO(markus_z): This should probably make use of the "choices" attribute.
+    # "oslo.db" uses only the values [<0, 0, 50, 100] see module
+    # /oslo_db/sqlalchemy/engines.py method "_setup_logging"
+    cfg.IntOpt('connection_debug',
+        default=0,
+        help=''),
+    cfg.BoolOpt('connection_trace',
+        default=False,
+        help=''),
+    # TODO(markus_z): We should probably default this to 30 to not rely on the
+    # SQLAlchemy default. Otherwise we wouldn't provide a stable default.
+    cfg.IntOpt('pool_timeout',
+        help='')
+]  # noqa
+
+
+def enrich_help_text(alt_db_opts):
+
+    def get_db_opts():
+        for group_name, db_opts in oslo_db_options.list_opts():
+            if group_name == 'database':
+                return db_opts
+        return []
+
+    for db_opt in get_db_opts():
+        for alt_db_opt in alt_db_opts:
+            if alt_db_opt.name == db_opt.name:
+                # NOTE(markus_z): We can append alternative DB specific help
+                # texts here if needed.
+                alt_db_opt.help = db_opt.help + alt_db_opt.help
+
+# NOTE(cdent): See the note above on api_db_group. The same issues
+# apply here.
+
+placement_db_group = cfg.OptGroup('placement_database',
+                                  title='Placement API database options',
+                                  help="""
+The *Placement API Database* is a separate database which can be used with the
+placement service. This database is optional: if the connection option is not
+set, the nova api database will be used instead.
+""")
+
+placement_db_opts = [
+    cfg.StrOpt('connection',
+        help='',
+        secret=True),
+    cfg.StrOpt('connection_parameters',
+        default='',
+        help=''),
+    cfg.BoolOpt('sqlite_synchronous',
+        default=True,
+        help=''),
+    cfg.StrOpt('slave_connection',
+        secret=True,
+        help=''),
+    cfg.StrOpt('mysql_sql_mode',
+        default='TRADITIONAL',
+        help=''),
+    cfg.IntOpt('connection_recycle_time',
+        default=3600,
+        help=''),
+    cfg.IntOpt('max_pool_size',
+        help=''),
+    cfg.IntOpt('max_retries',
+        default=10,
+        help=''),
+    cfg.IntOpt('retry_interval',
+        default=10,
+        help=''),
+    cfg.IntOpt('max_overflow',
+        help=''),
+    cfg.IntOpt('connection_debug',
+        default=0,
+        help=''),
+    cfg.BoolOpt('connection_trace',
+        default=False,
+        help=''),
+    cfg.IntOpt('pool_timeout',
+        help=''),
+]  # noqa
+
+
+def register_opts(conf):
+    oslo_db_options.set_defaults(conf, connection=_DEFAULT_SQL_CONNECTION)
+    conf.register_opts(api_db_opts, group=api_db_group)
+    conf.register_opts(placement_db_opts, group=placement_db_group)
+
+
+def list_opts():
+    # NOTE(markus_z): 2016-04-04: If we list the oslo_db_options here, they
+    # get emitted twice(!) in the "sample.conf" file. First under the
+    # namespace "nova.conf" and second under the namespace "oslo.db". This
+    # is due to the setting in file "etc/nova/nova-config-generator.conf".
+    # As I think it is useful to have the "oslo.db" namespace information
+    # in the "sample.conf" file, I omit the listing of the "oslo_db_options"
+    # here.
+    global _ENRICHED
+    if not _ENRICHED:
+        enrich_help_text(api_db_opts)
+        enrich_help_text(placement_db_opts)
+        _ENRICHED = True
+    return {
+        api_db_group: api_db_opts,
+        placement_db_group: placement_db_opts,
+    }

diff --git a/gosbs/conf/keystone.py b/gosbs/conf/keystone.py
new file mode 100644
index 0000000..4a7dd57
--- /dev/null
+++ b/gosbs/conf/keystone.py
@@ -0,0 +1,72 @@
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/conf/keystone.py
+
+from keystoneauth1 import loading as ks_loading
+from oslo_config import cfg
+
+from gosbs.conf import utils as confutils
+
+
+DEFAULT_SERVICE_TYPE = 'identity'
+
+keystone_group = cfg.OptGroup(
+    'keystone',
+    title='Keystone Options',
+    help='Configuration options for the identity service')
+
+keystone_opts = [
+    cfg.StrOpt('auth_version',
+               default='3',
+               help='API version of the admin Identity API endpoint. (string value)'),
+    cfg.StrOpt('identity_interface',
+               default='',
+               help=''),
+    cfg.StrOpt('auth_url',
+               default='',
+               help=''),
+    cfg.StrOpt('project_domain_name',
+               default='',
+               help=''),
+    cfg.StrOpt('user_domain_name',
+               default='',
+               help=''),
+    cfg.StrOpt('project_id',
+               default='',
+               help=''),
+    cfg.StrOpt('username',
+               default='',
+               help=''),
+    cfg.StrOpt('password',
+               secret=True,
+               default='',
+               help=''),
+]
+
+
+def register_opts(conf):
+    conf.register_group(keystone_group)
+    confutils.register_ksa_opts(conf, keystone_group.name,
+                                DEFAULT_SERVICE_TYPE, include_auth=True)
+    conf.register_opts(keystone_opts, group=keystone_group)
+
+
+def list_opts():
+    return {
+        keystone_group: (
+            ks_loading.get_session_conf_options() +
+            confutils.get_ksa_adapter_opts(DEFAULT_SERVICE_TYPE) +
+            keystone_opts
+        )
+    }

diff --git a/gosbs/conf/netconf.py b/gosbs/conf/netconf.py
new file mode 100644
index 0000000..5fe0f90
--- /dev/null
+++ b/gosbs/conf/netconf.py
@@ -0,0 +1,94 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import socket
+
+from oslo_config import cfg
+from oslo_utils import netutils
+
+
+netconf_opts = [
+    cfg.StrOpt("my_ip",
+        default=netutils.get_my_ipv4(),
+        sample_default='<host_ipv4>',
+        help="""
+The IP address which the host is using to connect to the management network.
+
+Possible values:
+
+* String with valid IP address. Default is IPv4 address of this host.
+
+Related options:
+
+* my_block_storage_ip
+"""),
+    cfg.StrOpt("my_block_storage_ip",
+        default="$my_ip",
+        help="""
+The IP address which is used to connect to the block storage network.
+
+Possible values:
+
+* String with valid IP address. Default is IP address of this host.
+
+Related options:
+
+* my_ip - if my_block_storage_ip is not set, then my_ip value is used.
+"""),
+    cfg.StrOpt("host",
+        default=socket.gethostname(),
+        sample_default='<current_hostname>',
+        help="""
+Hostname, FQDN or IP address of this host.
+
+Used as:
+
+* the oslo.messaging queue name for nova-compute worker
+* we use this value for the binding_host sent to neutron. This means if you use
+  a neutron agent, it should have the same value for host.
+* cinder host attachment information
+
+Must be valid within AMQP key.
+
+Possible values:
+
+* String with hostname, FQDN or IP address. Default is hostname of this host.
+"""),
+    # TODO(sfinucan): This option is tied into the XenAPI, VMWare and Libvirt
+    # drivers.
+    # We should remove this dependency by either adding a new opt for each
+    # driver or simply removing the offending code. Until then we cannot
+    # deprecate this option.
+    cfg.BoolOpt("flat_injected",
+        default=False,
+        help="""
+This option determines whether the network setup information is injected into
+the VM before it is booted. While it was originally designed to be used only
+by nova-network, it is also used by the vmware and xenapi virt drivers to
+control whether network information is injected into a VM. The libvirt virt
+driver also uses it when we use config_drive to configure network to control
+whether network information is injected into a VM.
+"""),
+]
+
+
+def register_opts(conf):
+    conf.register_opts(netconf_opts)
+
+
+def list_opts():
+    return {'DEFAULT': netconf_opts}

diff --git a/gosbs/conf/notifications.py b/gosbs/conf/notifications.py
new file mode 100644
index 0000000..a5946dd
--- /dev/null
+++ b/gosbs/conf/notifications.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2016 Intel, Inc.
+# Copyright (c) 2013 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+notifications_group = cfg.OptGroup(
+    name='notifications',
+    title='Notifications options',
+    help="""
+Most of the actions in Nova which manipulate the system state generate
+notifications which are posted to the messaging component (e.g. RabbitMQ) and
+can be consumed by any service outside the OpenStack. More technical details
+at https://docs.openstack.org/nova/latest/reference/notifications.html
+""")
+
+ALL_OPTS = [
+    cfg.StrOpt(
+        'notify_on_state_change',
+        choices=[
+            (None, 'no notifications'),
+            ('vm_state', 'Notifications are sent with VM state transition '
+             'information in the ``old_state`` and ``state`` fields. The '
+             '``old_task_state`` and ``new_task_state`` fields will be set to '
+             'the current task_state of the instance'),
+            ('vm_and_task_state', 'Notifications are sent with VM and task '
+             'state transition information'),
+        ],
+        deprecated_group='DEFAULT',
+        help="""
+If set, send compute.instance.update notifications on
+instance state changes.
+
+Please refer to
+https://docs.openstack.org/nova/latest/reference/notifications.html for
+additional information on notifications.
+"""),
+
+    cfg.StrOpt(
+        'default_level',
+        default='INFO',
+        choices=('DEBUG', 'INFO', 'WARN', 'ERROR', 'CRITICAL'),
+        deprecated_group='DEFAULT',
+        deprecated_name='default_notification_level',
+        help="Default notification level for outgoing notifications."),
+    cfg.StrOpt(
+        'notification_format',
+        default='unversioned',
+        choices=[
+            ('both', 'Both the legacy unversioned and the new versioned '
+             'notifications are emitted'),
+            ('versioned', 'Only the new versioned notifications are emitted'),
+            ('unversioned', 'Only the legacy unversioned notifications are '
+             'emitted'),
+        ],
+        deprecated_group='DEFAULT',
+        help="""
+Specifies which notification format shall be emitted by nova.
+
+The versioned notification interface are in feature parity with the legacy
+interface and the versioned interface is actively developed so new consumers
+should used the versioned interface.
+
+However, the legacy interface is heavily used by ceilometer and other mature
+OpenStack components so it remains the default.
+
+Note that notifications can be completely disabled by setting ``driver=noop``
+in the ``[oslo_messaging_notifications]`` group.
+
+The list of versioned notifications is visible in
+https://docs.openstack.org/nova/latest/reference/notifications.html
+"""),
+    cfg.ListOpt(
+        'versioned_notifications_topics',
+        default=['versioned_notifications'],
+        help="""
+Specifies the topics for the versioned notifications issued by nova.
+
+The default value is fine for most deployments and rarely needs to be changed.
+However, if you have a third-party service that consumes versioned
+notifications, it might be worth getting a topic for that service.
+Nova will send a message containing a versioned notification payload to each
+topic queue in this list.
+
+The list of versioned notifications is visible in
+https://docs.openstack.org/nova/latest/reference/notifications.html
+"""),
+    cfg.BoolOpt(
+        'bdms_in_notifications',
+        default=False,
+        help="""
+If enabled, include block device information in the versioned notification
+payload. Sending block device information is disabled by default as providing
+that information can incur some overhead on the system since the information
+may need to be loaded from the database.
+""")
+]
+
+
+def register_opts(conf):
+    conf.register_group(notifications_group)
+    conf.register_opts(ALL_OPTS, group=notifications_group)
+
+
+def list_opts():
+    return {notifications_group: ALL_OPTS}

diff --git a/gosbs/conf/opts.py b/gosbs/conf/opts.py
new file mode 100644
index 0000000..9aed6b2
--- /dev/null
+++ b/gosbs/conf/opts.py
@@ -0,0 +1,79 @@
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""
+This is the single point of entry to generate the sample configuration
+file for Nova. It collects all the necessary info from the other modules
+in this package. It is assumed that:
+
+* every other module in this package has a 'list_opts' function which
+  return a dict where
+  * the keys are strings which are the group names
+  * the value of each key is a list of config options for that group
+* the nova.conf package doesn't have further packages with config options
+* this module is only used in the context of sample file generation
+"""
+
+import collections
+import importlib
+import os
+import pkgutil
+
+LIST_OPTS_FUNC_NAME = "list_opts"
+
+
+def _tupleize(dct):
+    """Take the dict of options and convert to the 2-tuple format."""
+    return [(key, val) for key, val in dct.items()]
+
+
+def list_opts():
+    opts = collections.defaultdict(list)
+    module_names = _list_module_names()
+    imported_modules = _import_modules(module_names)
+    _append_config_options(imported_modules, opts)
+    return _tupleize(opts)
+
+
+def _list_module_names():
+    module_names = []
+    package_path = os.path.dirname(os.path.abspath(__file__))
+    for _, modname, ispkg in pkgutil.iter_modules(path=[package_path]):
+        if modname == "opts" or ispkg:
+            continue
+        else:
+            module_names.append(modname)
+    return module_names
+
+
+def _import_modules(module_names):
+    imported_modules = []
+    for modname in module_names:
+        mod = importlib.import_module("nova.conf." + modname)
+        if not hasattr(mod, LIST_OPTS_FUNC_NAME):
+            msg = "The module 'nova.conf.%s' should have a '%s' "\
+                  "function which returns the config options." % \
+                  (modname, LIST_OPTS_FUNC_NAME)
+            raise Exception(msg)
+        else:
+            imported_modules.append(mod)
+    return imported_modules
+
+
+def _append_config_options(imported_modules, config_options):
+    for mod in imported_modules:
+        configs = mod.list_opts()
+        for key, val in configs.items():
+            config_options[key].extend(val)

diff --git a/gosbs/conf/paths.py b/gosbs/conf/paths.py
new file mode 100644
index 0000000..f4b7f6c
--- /dev/null
+++ b/gosbs/conf/paths.py
@@ -0,0 +1,106 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/conf/paths.py
+# Add repopath
+
+import os
+import sys
+
+from oslo_config import cfg
+
+ALL_OPTS = [
+    cfg.StrOpt('pybasedir',
+        default=os.path.abspath(os.path.join(os.path.dirname(__file__),
+                                             '../../')),
+        sample_default='<Path>',
+        help="""
+The directory where the Nova python modules are installed.
+
+This directory is used to store template files for networking and remote
+console access. It is also the default path for other config options which
+need to persist Nova internal data. It is very unlikely that you need to
+change this option from its default value.
+
+Possible values:
+
+* The full path to a directory.
+
+Related options:
+
+* ``state_path``
+"""),
+    cfg.StrOpt('bindir',
+        default=os.path.join(sys.prefix, 'local', 'bin'),
+        help="""
+The directory where the Nova binaries are installed.
+
+This option is only relevant if the networking capabilities from Nova are
+used (see services below). Nova's networking capabilities are targeted to
+be fully replaced by Neutron in the future. It is very unlikely that you need
+to change this option from its default value.
+
+Possible values:
+
+* The full path to a directory.
+"""),
+
+    cfg.StrOpt('state_path',
+        default='$pybasedir',
+        help="""
+The top-level directory for maintaining Nova's state.
+
+This directory is used to store Nova's internal state. It is used by a
+variety of other config options which derive from this. In some scenarios
+(for example migrations) it makes sense to use a storage location which is
+shared between multiple compute hosts (for example via NFS). Unless the
+option ``instances_path`` gets overwritten, this directory can grow very
+large.
+
+Possible values:
+
+* The full path to a directory. Defaults to value provided in ``pybasedir``.
+"""),
+    cfg.StrOpt(
+        'repopath',
+        help="""
+Explicitly specify the repos working directory.
+"""),
+]
+
+
+def basedir_def(*args):
+    """Return an uninterpolated path relative to $pybasedir."""
+    return os.path.join('$pybasedir', *args)
+
+
+def bindir_def(*args):
+    """Return an uninterpolated path relative to $bindir."""
+    return os.path.join('$bindir', *args)
+
+
+def state_path_def(*args):
+    """Return an uninterpolated path relative to $state_path."""
+    return os.path.join('$state_path', *args)
+
+
+def register_opts(conf):
+    conf.register_opts(ALL_OPTS)
+
+
+def list_opts():
+    return {"DEFAULT": ALL_OPTS}

diff --git a/gosbs/conf/rpc.py b/gosbs/conf/rpc.py
new file mode 100644
index 0000000..a74ef10
--- /dev/null
+++ b/gosbs/conf/rpc.py
@@ -0,0 +1,46 @@
+# Copyright 2018 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+rpc_opts = [
+    cfg.IntOpt("long_rpc_timeout",
+        default=1800,
+        help="""
+This option allows setting an alternate timeout value for RPC calls
+that have the potential to take a long time. If set, RPC calls to
+other services will use this value for the timeout (in seconds)
+instead of the global rpc_response_timeout value.
+
+Operations with RPC calls that utilize this value:
+
+* live migration
+
+Related options:
+
+* rpc_response_timeout
+"""),
+]
+
+
+ALL_OPTS = rpc_opts
+
+
+def register_opts(conf):
+    conf.register_opts(ALL_OPTS)
+
+
+def list_opts():
+    return {'DEFAULT': ALL_OPTS}

diff --git a/gosbs/conf/scheduler.py b/gosbs/conf/scheduler.py
new file mode 100644
index 0000000..7b38d53
--- /dev/null
+++ b/gosbs/conf/scheduler.py
@@ -0,0 +1,37 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+scheduler_group = cfg.OptGroup(name="scheduler",
+                               title="Scheduler configuration")
+
+scheduler_opts = [
+    cfg.StrOpt(
+        'git_mirror_url',
+        help="""
+Explicitly specify the mirror git url.
+"""),
+    cfg.StrOpt(
+        'db_project_repo',
+        help="""
+Explicitly specify the database project repo.
+"""),
+]
+
+def register_opts(conf):
+    conf.register_group(scheduler_group)
+    conf.register_opts(scheduler_opts, group=scheduler_group)
+
+def list_opts():
+    return {scheduler_group: scheduler_opts}

diff --git a/gosbs/conf/service.py b/gosbs/conf/service.py
new file mode 100644
index 0000000..6a57efa
--- /dev/null
+++ b/gosbs/conf/service.py
@@ -0,0 +1,169 @@
+# needs:check_deprecation_status
+
+
+# Copyright 2015 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+service_opts = [
+    # TODO(johngarbutt) we need a better default and minimum, in a backwards
+    # compatible way for report_interval
+    cfg.IntOpt('report_interval',
+               default=10,
+               help="""
+Number of seconds indicating how frequently the state of services on a
+given hypervisor is reported. Nova needs to know this to determine the
+overall health of the deployment.
+
+Related Options:
+
+* service_down_time
+  report_interval should be less than service_down_time. If service_down_time
+  is less than report_interval, services will routinely be considered down,
+  because they report in too rarely.
+"""),
+    # TODO(johngarbutt) the code enforces the min value here, but we could
+    # do to add some min value here, once we sort out report_interval
+    cfg.IntOpt('service_down_time',
+               default=60,
+               help="""
+Maximum time in seconds since last check-in for up service
+
+Each compute node periodically updates their database status based on the
+specified report interval. If the compute node hasn't updated the status
+for more than service_down_time, then the compute node is considered down.
+
+Related Options:
+
+* report_interval (service_down_time should not be less than report_interval)
+* scheduler.periodic_task_interval
+"""),
+    cfg.BoolOpt('periodic_enable',
+               default=True,
+               help="""
+Enable periodic tasks.
+
+If set to true, this option allows services to periodically run tasks
+on the manager.
+
+In case of running multiple schedulers or conductors you may want to run
+periodic tasks on only one host - in this case disable this option for all
+hosts but one.
+"""),
+    cfg.IntOpt('periodic_fuzzy_delay',
+               default=60,
+               min=0,
+               help="""
+Number of seconds to randomly delay when starting the periodic task
+scheduler to reduce stampeding.
+
+When compute workers are restarted in unison across a cluster,
+they all end up running the periodic tasks at the same time
+causing problems for the external services. To mitigate this
+behavior, periodic_fuzzy_delay option allows you to introduce a
+random initial delay when starting the periodic task scheduler.
+
+Possible Values:
+
+* Any positive integer (in seconds)
+* 0 : disable the random delay
+"""),
+    cfg.ListOpt('enabled_apis',
+                item_type=cfg.types.String(choices=['osapi_compute',
+                                                    'metadata']),
+                default=['osapi_compute', 'metadata'],
+                help="List of APIs to be enabled by default."),
+    cfg.ListOpt('enabled_ssl_apis',
+                default=[],
+                help="""
+List of APIs with enabled SSL.
+
+Nova provides SSL support for the API servers. enabled_ssl_apis option
+allows configuring the SSL support.
+"""),
+    cfg.StrOpt('osapi_compute_listen',
+               default="0.0.0.0",
+               help="""
+IP address on which the OpenStack API will listen.
+
+The OpenStack API service listens on this IP address for incoming
+requests.
+"""),
+    cfg.PortOpt('osapi_compute_listen_port',
+               default=8774,
+               help="""
+Port on which the OpenStack API will listen.
+
+The OpenStack API service listens on this port number for incoming
+requests.
+"""),
+    cfg.IntOpt('osapi_compute_workers',
+               min=1,
+               help="""
+Number of workers for OpenStack API service. The default will be the number
+of CPUs available.
+
+OpenStack API services can be configured to run as multi-process (workers).
+This overcomes the problem of reduction in throughput when API request
+concurrency increases. OpenStack API service will run in the specified
+number of processes.
+
+Possible Values:
+
+* Any positive integer
+* None (default value)
+"""),
+    cfg.StrOpt('metadata_listen',
+               default="0.0.0.0",
+               help="""
+IP address on which the metadata API will listen.
+
+The metadata API service listens on this IP address for incoming
+requests.
+"""),
+    cfg.PortOpt('metadata_listen_port',
+               default=8775,
+               help="""
+Port on which the metadata API will listen.
+
+The metadata API service listens on this port number for incoming
+requests.
+"""),
+    cfg.IntOpt('metadata_workers',
+               min=1,
+               help="""
+Number of workers for metadata service. If not specified the number of
+available CPUs will be used.
+
+The metadata service can be configured to run as multi-process (workers).
+This overcomes the problem of reduction in throughput when API request
+concurrency increases. The metadata service will run in the specified
+number of processes.
+
+Possible Values:
+
+* Any positive integer
+* None (default value)
+"""),
+]
+
+
+def register_opts(conf):
+    conf.register_opts(service_opts)
+
+
+def list_opts():
+    return {'DEFAULT': service_opts}

diff --git a/gosbs/conf/upgrade_levels.py b/gosbs/conf/upgrade_levels.py
new file mode 100644
index 0000000..767ff59
--- /dev/null
+++ b/gosbs/conf/upgrade_levels.py
@@ -0,0 +1,210 @@
+# Copyright 2016 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_config import cfg
+
+upgrade_group = cfg.OptGroup('upgrade_levels',
+        title='Upgrade levels Options',
+        help="""
+upgrade_levels options are used to set version cap for RPC
+messages sent between different nova services.
+
+By default all services send messages using the latest version
+they know about.
+
+The compute upgrade level is an important part of rolling upgrades
+where old and new nova-compute services run side by side.
+
+The other options can largely be ignored, and are only kept to
+help with a possible future backport issue.
+""")
+
+# TODO(sneti): Add default=auto for compute
+upgrade_levels_opts = [
+    cfg.StrOpt('compute',
+        help="""
+Compute RPC API version cap.
+
+By default, we always send messages using the most recent version
+the client knows about.
+
+Where you have old and new compute services running, you should set
+this to the lowest deployed version. This is to guarantee that all
+services never send messages that one of the compute nodes can't
+understand. Note that we only support upgrading from release N to
+release N+1.
+
+Set this option to "auto" if you want to let the compute RPC module
+automatically determine what version to use based on the service
+versions in the deployment.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* 'auto': Automatically determines what version to use based on
+  the service versions in the deployment.
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt('cells',
+        help="""
+Cells RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt('intercell',
+        help="""
+Intercell RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt("cert",
+        deprecated_for_removal=True,
+        deprecated_since='18.0.0',
+        deprecated_reason="""
+The nova-cert service was removed in 16.0.0 (Pike) so this option
+is no longer used.
+""",
+        help="""
+Cert RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt("scheduler",
+        help="""
+Scheduler RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt("updater",
+        help="""
+Scheduler RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt('conductor',
+        help="""
+Conductor RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt('console',
+        help="""
+Console RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt('consoleauth',
+        deprecated_for_removal=True,
+        deprecated_since='18.0.0',
+        deprecated_reason="""
+The nova-consoleauth service was deprecated in 18.0.0 (Rocky) and will be
+removed in an upcoming release.
+""",
+        help="""
+Consoleauth RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt('network',
+        deprecated_for_removal=True,
+        deprecated_since='18.0.0',
+        deprecated_reason="""
+The nova-network service was deprecated in 14.0.0 (Newton) and will be
+removed in an upcoming release.
+""",
+        help="""
+Network RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+"""),
+    cfg.StrOpt('baseapi',
+        help="""
+Base API RPC API version cap.
+
+Possible values:
+
+* By default send the latest version the client knows about
+* A string representing a version number in the format 'N.N';
+  for example, possible values might be '1.12' or '2.0'.
+* An OpenStack release name, in lower case, such as 'mitaka' or
+  'liberty'.
+""")
+]
+
+
+def register_opts(conf):
+    conf.register_group(upgrade_group)
+    conf.register_opts(upgrade_levels_opts, group=upgrade_group)
+
+
+def list_opts():
+    return {upgrade_group: upgrade_levels_opts}

diff --git a/gosbs/conf/utils.py b/gosbs/conf/utils.py
new file mode 100644
index 0000000..da96444
--- /dev/null
+++ b/gosbs/conf/utils.py
@@ -0,0 +1,91 @@
+# Copyright 2017 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""Common utilities for conf providers.
+
+This module does not provide any actual conf options.
+"""
+from keystoneauth1 import loading as ks_loading
+from oslo_config import cfg
+
+
+_ADAPTER_VERSION_OPTS = ('version', 'min_version', 'max_version')
+
+
+def get_ksa_adapter_opts(default_service_type, deprecated_opts=None):
+    """Get auth, Session, and Adapter conf options from keystonauth1.loading.
+
+    :param default_service_type: Default for the service_type conf option on
+                                 the Adapter.
+    :param deprecated_opts: dict of deprecated opts to register with the ksa
+                            Adapter opts.  Works the same as the
+                            deprecated_opts kwarg to:
+                    keystoneauth1.loading.session.Session.register_conf_options
+    :return: List of cfg.Opts.
+    """
+    opts = ks_loading.get_adapter_conf_options(include_deprecated=False,
+                                               deprecated_opts=deprecated_opts)
+
+    for opt in opts[:]:
+        # Remove version-related opts.  Required/supported versions are
+        # something the code knows about, not the operator.
+        if opt.dest in _ADAPTER_VERSION_OPTS:
+            opts.remove(opt)
+
+    # Override defaults that make sense for nova
+    cfg.set_defaults(opts,
+                     valid_interfaces=['internal', 'public'],
+                     service_type=default_service_type)
+    return opts
+
+
+def _dummy_opt(name):
+    # A config option that can't be set by the user, so it behaves as if it's
+    # ignored; but consuming code may expect it to be present in a conf group.
+    return cfg.Opt(name, type=lambda x: None)
+
+
+def register_ksa_opts(conf, group, default_service_type, include_auth=True,
+                      deprecated_opts=None):
+    """Register keystoneauth auth, Session, and Adapter opts.
+
+    :param conf: oslo_config.cfg.CONF in which to register the options
+    :param group: Conf group, or string name thereof, in which to register the
+                  options.
+    :param default_service_type: Default for the service_type conf option on
+                                 the Adapter.
+    :param include_auth: For service types where Nova is acting on behalf of
+                         the user, auth should come from the user context.
+                         In those cases, set this arg to False to avoid
+                         registering ksa auth options.
+    :param deprecated_opts: dict of deprecated opts to register with the ksa
+                            Session or Adapter opts.  See docstring for
+                            the deprecated_opts param of:
+                    keystoneauth1.loading.session.Session.register_conf_options
+    """
+    # ksa register methods need the group name as a string.  oslo doesn't care.
+    group = getattr(group, 'name', group)
+    ks_loading.register_session_conf_options(
+        conf, group, deprecated_opts=deprecated_opts)
+    if include_auth:
+        ks_loading.register_auth_conf_options(conf, group)
+    conf.register_opts(get_ksa_adapter_opts(
+        default_service_type, deprecated_opts=deprecated_opts), group=group)
+    # Have to register dummies for the version-related opts we removed
+    for name in _ADAPTER_VERSION_OPTS:
+        conf.register_opt(_dummy_opt(name), group=group)
+
+
+# NOTE(efried): Required for docs build.
+def list_opts():
+    return {}

diff --git a/gosbs/config.py b/gosbs/config.py
new file mode 100644
index 0000000..6defbc2
--- /dev/null
+++ b/gosbs/config.py
@@ -0,0 +1,50 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+# Copyright 2012 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/config.py
+
+from oslo_log import log
+from oslo_utils import importutils
+
+from gosbs import middleware
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as sqlalchemy_api
+from gosbs import rpc
+from gosbs import version
+
+CONF = gosbs.conf.CONF
+
+def parse_args(argv, default_config_files=None, configure_db=True,
+               init_rpc=True):
+    log.register_options(CONF)
+    extra_default_log_levels = []
+
+    log.set_defaults(default_log_levels=log.get_default_log_levels() +
+                     extra_default_log_levels)
+    rpc.set_defaults(control_exchange='gosbs')
+    middleware.set_defaults()
+
+    CONF(argv[1:],
+         project='gosbs',
+         version=version.version_string(),
+         default_config_files=default_config_files)
+
+    if init_rpc:
+        rpc.init(CONF)
+
+    if configure_db:
+        sqlalchemy_api.configure(CONF)

diff --git a/gosbs/context.py b/gosbs/context.py
new file mode 100644
index 0000000..20df39c
--- /dev/null
+++ b/gosbs/context.py
@@ -0,0 +1,562 @@
+# Copyright 2011 OpenStack Foundation
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/context.py
+
+"""RequestContext: context for requests that persist through all of nova."""
+
+from contextlib import contextmanager
+import copy
+import warnings
+
+import eventlet.queue
+import eventlet.timeout
+from keystoneauth1.access import service_catalog as ksa_service_catalog
+from keystoneauth1 import plugin
+from oslo_context import context
+from oslo_db.sqlalchemy import enginefacade
+from oslo_log import log as logging
+from oslo_utils import timeutils
+import six
+
+from gosbs import exception
+from gosbs.i18n import _
+from gosbs import objects
+from gosbs import policy
+from gosbs import utils
+
+LOG = logging.getLogger(__name__)
+# TODO(melwitt): This cache should be cleared whenever WSGIService receives a
+# SIGHUP and periodically based on an expiration time. Currently, none of the
+# cell caches are purged, so neither is this one, for now.
+CELL_CACHE = {}
+# NOTE(melwitt): Used for the scatter-gather utility to indicate we timed out
+# waiting for a result from a cell.
+did_not_respond_sentinel = object()
+# FIXME(danms): Keep a global cache of the cells we find the
+# first time we look. This needs to be refreshed on a timer or
+# trigger.
+CELLS = []
+# Timeout value for waiting for cells to respond
+CELL_TIMEOUT = 60
+
+
+class _ContextAuthPlugin(plugin.BaseAuthPlugin):
+    """A keystoneauth auth plugin that uses the values from the Context.
+
+    Ideally we would use the plugin provided by auth_token middleware however
+    this plugin isn't serialized yet so we construct one from the serialized
+    auth data.
+    """
+
+    def __init__(self, auth_token, sc):
+        super(_ContextAuthPlugin, self).__init__()
+
+        self.auth_token = auth_token
+        self.service_catalog = ksa_service_catalog.ServiceCatalogV2(sc)
+
+    def get_token(self, *args, **kwargs):
+        return self.auth_token
+
+    def get_endpoint(self, session, service_type=None, interface=None,
+                     region_name=None, service_name=None, **kwargs):
+        return self.service_catalog.url_for(service_type=service_type,
+                                            service_name=service_name,
+                                            interface=interface,
+                                            region_name=region_name)
+
+
+@enginefacade.transaction_context_provider
+class RequestContext(context.RequestContext):
+    """Security context and request information.
+
+    Represents the user taking a given action within the system.
+
+    """
+
+    def __init__(self, user_id=None, project_id=None, is_admin=None,
+                 read_deleted="no", remote_address=None, timestamp=None,
+                 quota_class=None, service_catalog=None,
+                 user_auth_plugin=None, **kwargs):
+        """:param read_deleted: 'no' indicates deleted records are hidden,
+                'yes' indicates deleted records are visible,
+                'only' indicates that *only* deleted records are visible.
+
+           :param overwrite: Set to False to ensure that the greenthread local
+                copy of the index is not overwritten.
+
+           :param instance_lock_checked: This is not used and will be removed
+                in a future release.
+
+           :param user_auth_plugin: The auth plugin for the current request's
+                authentication data.
+        """
+        if user_id:
+            kwargs['user_id'] = user_id
+        if project_id:
+            kwargs['project_id'] = project_id
+
+        if kwargs.pop('instance_lock_checked', None) is not None:
+            # TODO(mriedem): Let this be a hard failure in 19.0.0 (S).
+            warnings.warn("The 'instance_lock_checked' kwarg to "
+                          "nova.context.RequestContext is no longer used and "
+                          "will be removed in a future version.")
+
+        super(RequestContext, self).__init__(is_admin=is_admin, **kwargs)
+
+        self.read_deleted = read_deleted
+        self.remote_address = remote_address
+        if not timestamp:
+            timestamp = timeutils.utcnow()
+        if isinstance(timestamp, six.string_types):
+            timestamp = timeutils.parse_strtime(timestamp)
+        self.timestamp = timestamp
+
+        if service_catalog:
+            # Only include required parts of service_catalog
+            self.service_catalog = [s for s in service_catalog
+                if s.get('type') in ('image', 'block-storage', 'volumev3',
+                                     'key-manager', 'placement', 'network')]
+        else:
+            # if list is empty or none
+            self.service_catalog = []
+
+        # NOTE(markmc): this attribute is currently only used by the
+        # rs_limits turnstile pre-processor.
+        # See https://lists.launchpad.net/openstack/msg12200.html
+        self.quota_class = quota_class
+
+        # NOTE(dheeraj): The following attributes are used by cellsv2 to store
+        # connection information for connecting to the target cell.
+        # It is only manipulated using the target_cell contextmanager
+        # provided by this module
+        self.db_connection = None
+        self.mq_connection = None
+        self.cell_uuid = None
+
+        self.user_auth_plugin = user_auth_plugin
+        if self.is_admin is None:
+            self.is_admin = policy.check_is_admin(self)
+
+    def get_auth_plugin(self):
+        if self.user_auth_plugin:
+            return self.user_auth_plugin
+        else:
+            return _ContextAuthPlugin(self.auth_token, self.service_catalog)
+
+    def _get_read_deleted(self):
+        return self._read_deleted
+
+    def _set_read_deleted(self, read_deleted):
+        if read_deleted not in ('no', 'yes', 'only'):
+            raise ValueError(_("read_deleted can only be one of 'no', "
+                               "'yes' or 'only', not %r") % read_deleted)
+        self._read_deleted = read_deleted
+
+    def _del_read_deleted(self):
+        del self._read_deleted
+
+    read_deleted = property(_get_read_deleted, _set_read_deleted,
+                            _del_read_deleted)
+
+    def to_dict(self):
+        values = super(RequestContext, self).to_dict()
+        # FIXME(dims): defensive hasattr() checks need to be
+        # removed once we figure out why we are seeing stack
+        # traces
+        values.update({
+            'user_id': getattr(self, 'user_id', None),
+            'project_id': getattr(self, 'project_id', None),
+            'is_admin': getattr(self, 'is_admin', None),
+            'read_deleted': getattr(self, 'read_deleted', 'no'),
+            'remote_address': getattr(self, 'remote_address', None),
+            'timestamp': utils.strtime(self.timestamp) if hasattr(
+                self, 'timestamp') else None,
+            'request_id': getattr(self, 'request_id', None),
+            'quota_class': getattr(self, 'quota_class', None),
+            'user_name': getattr(self, 'user_name', None),
+            'service_catalog': getattr(self, 'service_catalog', None),
+            'project_name': getattr(self, 'project_name', None),
+        })
+        # NOTE(tonyb): This can be removed once we're certain to have a
+        # RequestContext contains 'is_admin_project', We can only get away with
+        # this because we "know" the default value of 'is_admin_project' which
+        # is very fragile.
+        values.update({
+            'is_admin_project': getattr(self, 'is_admin_project', True),
+        })
+        return values
+
+    @classmethod
+    def from_dict(cls, values):
+        return super(RequestContext, cls).from_dict(
+            values,
+            user_id=values.get('user_id'),
+            project_id=values.get('project_id'),
+            # TODO(sdague): oslo.context has show_deleted, if
+            # possible, we should migrate to that in the future so we
+            # don't need to be different here.
+            read_deleted=values.get('read_deleted', 'no'),
+            remote_address=values.get('remote_address'),
+            timestamp=values.get('timestamp'),
+            quota_class=values.get('quota_class'),
+            service_catalog=values.get('service_catalog'),
+        )
+
+    def elevated(self, read_deleted=None):
+        """Return a version of this context with admin flag set."""
+        context = copy.copy(self)
+        # context.roles must be deepcopied to leave original roles
+        # without changes
+        context.roles = copy.deepcopy(self.roles)
+        context.is_admin = True
+
+        if 'admin' not in context.roles:
+            context.roles.append('admin')
+
+        if read_deleted is not None:
+            context.read_deleted = read_deleted
+
+        return context
+
+    def can(self, action, target=None, fatal=True):
+        """Verifies that the given action is valid on the target in this context.
+
+        :param action: string representing the action to be checked.
+        :param target: dictionary representing the object of the action
+            for object creation this should be a dictionary representing the
+            location of the object e.g. ``{'project_id': context.project_id}``.
+            If None, then this default target will be considered:
+            {'project_id': self.project_id, 'user_id': self.user_id}
+        :param fatal: if False, will return False when an exception.Forbidden
+           occurs.
+
+        :raises nova.exception.Forbidden: if verification fails and fatal is
+            True.
+
+        :return: returns a non-False value (not necessarily "True") if
+            authorized and False if not authorized and fatal is False.
+        """
+        if target is None:
+            target = {'project_id': self.project_id,
+                      'user_id': self.user_id}
+
+        try:
+            return policy.authorize(self, action, target)
+        except exception.Forbidden:
+            if fatal:
+                raise
+            return False
+
+    def to_policy_values(self):
+        policy = super(RequestContext, self).to_policy_values()
+        policy['is_admin'] = self.is_admin
+        return policy
+
+    def __str__(self):
+        return "<Context %s>" % self.to_dict()
+
+
+def get_context():
+    """A helper method to get a blank context.
+
+    Note that overwrite is False here so this context will not update the
+    greenthread-local stored context that is used when logging.
+    """
+    return RequestContext(user_id=None,
+                          project_id=None,
+                          is_admin=False,
+                          overwrite=False)
+
+
+def get_admin_context(read_deleted="no"):
+    # NOTE(alaski): This method should only be used when an admin context is
+    # necessary for the entirety of the context lifetime. If that's not the
+    # case please use get_context(), or create the RequestContext manually, and
+    # use context.elevated() where necessary. Some periodic tasks may use
+    # get_admin_context so that their database calls are not filtered on
+    # project_id.
+    return RequestContext(user_id=None,
+                          project_id=None,
+                          is_admin=True,
+                          read_deleted=read_deleted,
+                          overwrite=False)
+
+
+def is_user_context(context):
+    """Indicates if the request context is a normal user."""
+    if not context:
+        return False
+    if context.is_admin:
+        return False
+    if not context.user_id or not context.project_id:
+        return False
+    return True
+
+
+def require_context(ctxt):
+    """Raise exception.Forbidden() if context is not a user or an
+    admin context.
+    """
+    if not ctxt.is_admin and not is_user_context(ctxt):
+        raise exception.Forbidden()
+
+
+def authorize_project_context(context, project_id):
+    """Ensures a request has permission to access the given project."""
+    if is_user_context(context):
+        if not context.project_id:
+            raise exception.Forbidden()
+        elif context.project_id != project_id:
+            raise exception.Forbidden()
+
+
+def authorize_user_context(context, user_id):
+    """Ensures a request has permission to access the given user."""
+    if is_user_context(context):
+        if not context.user_id:
+            raise exception.Forbidden()
+        elif context.user_id != user_id:
+            raise exception.Forbidden()
+
+
+def authorize_quota_class_context(context, class_name):
+    """Ensures a request has permission to access the given quota class."""
+    if is_user_context(context):
+        if not context.quota_class:
+            raise exception.Forbidden()
+        elif context.quota_class != class_name:
+            raise exception.Forbidden()
+
+
+def set_target_cell(context, cell_mapping):
+    """Adds database connection information to the context
+    for communicating with the given target_cell.
+
+    This is used for permanently targeting a cell in a context.
+    Use this when you want all subsequent code to target a cell.
+
+    Passing None for cell_mapping will untarget the context.
+
+    :param context: The RequestContext to add connection information
+    :param cell_mapping: An objects.CellMapping object or None
+    """
+    global CELL_CACHE
+    if cell_mapping is not None:
+        # avoid circular import
+        from nova.db import api as db
+        from nova import rpc
+
+        # Synchronize access to the cache by multiple API workers.
+        @utils.synchronized(cell_mapping.uuid)
+        def get_or_set_cached_cell_and_set_connections():
+            try:
+                cell_tuple = CELL_CACHE[cell_mapping.uuid]
+            except KeyError:
+                db_connection_string = cell_mapping.database_connection
+                context.db_connection = db.create_context_manager(
+                    db_connection_string)
+                if not cell_mapping.transport_url.startswith('none'):
+                    context.mq_connection = rpc.create_transport(
+                        cell_mapping.transport_url)
+                context.cell_uuid = cell_mapping.uuid
+                CELL_CACHE[cell_mapping.uuid] = (context.db_connection,
+                                                 context.mq_connection)
+            else:
+                context.db_connection = cell_tuple[0]
+                context.mq_connection = cell_tuple[1]
+                context.cell_uuid = cell_mapping.uuid
+
+        get_or_set_cached_cell_and_set_connections()
+    else:
+        context.db_connection = None
+        context.mq_connection = None
+        context.cell_uuid = None
+
+
+@contextmanager
+def target_cell(context, cell_mapping):
+    """Yields a new context with connection information for a specific cell.
+
+    This function yields a copy of the provided context, which is targeted to
+    the referenced cell for MQ and DB connections.
+
+    Passing None for cell_mapping will yield an untargetd copy of the context.
+
+    :param context: The RequestContext to add connection information
+    :param cell_mapping: An objects.CellMapping object or None
+    """
+    # Create a sanitized copy of context by serializing and deserializing it
+    # (like we would do over RPC). This help ensure that we have a clean
+    # copy of the context with all the tracked attributes, but without any
+    # of the hidden/private things we cache on a context. We do this to avoid
+    # unintentional sharing of cached thread-local data across threads.
+    # Specifically, this won't include any oslo_db-set transaction context, or
+    # any existing cell targeting.
+    cctxt = RequestContext.from_dict(context.to_dict())
+    set_target_cell(cctxt, cell_mapping)
+    yield cctxt
+
+
+def scatter_gather_cells(context, cell_mappings, timeout, fn, *args, **kwargs):
+    """Target cells in parallel and return their results.
+
+    The first parameter in the signature of the function to call for each cell
+    should be of type RequestContext.
+
+    :param context: The RequestContext for querying cells
+    :param cell_mappings: The CellMappings to target in parallel
+    :param timeout: The total time in seconds to wait for all the results to be
+                    gathered
+    :param fn: The function to call for each cell
+    :param args: The args for the function to call for each cell, not including
+                 the RequestContext
+    :param kwargs: The kwargs for the function to call for each cell
+    :returns: A dict {cell_uuid: result} containing the joined results. The
+              did_not_respond_sentinel will be returned if a cell did not
+              respond within the timeout. The exception object will
+              be returned if the call to a cell raised an exception. The
+              exception will be logged.
+    """
+    greenthreads = []
+    queue = eventlet.queue.LightQueue()
+    results = {}
+
+    def gather_result(cell_mapping, fn, context, *args, **kwargs):
+        cell_uuid = cell_mapping.uuid
+        try:
+            with target_cell(context, cell_mapping) as cctxt:
+                result = fn(cctxt, *args, **kwargs)
+        except Exception as e:
+            LOG.exception('Error gathering result from cell %s', cell_uuid)
+            result = e.__class__(e.args)
+        # The queue is already synchronized.
+        queue.put((cell_uuid, result))
+
+    for cell_mapping in cell_mappings:
+        greenthreads.append((cell_mapping.uuid,
+                             utils.spawn(gather_result, cell_mapping,
+                                         fn, context, *args, **kwargs)))
+
+    with eventlet.timeout.Timeout(timeout, exception.CellTimeout):
+        try:
+            while len(results) != len(greenthreads):
+                cell_uuid, result = queue.get()
+                results[cell_uuid] = result
+        except exception.CellTimeout:
+            # NOTE(melwitt): We'll fill in did_not_respond_sentinels at the
+            # same time we kill/wait for the green threads.
+            pass
+
+    # Kill the green threads still pending and wait on those we know are done.
+    for cell_uuid, greenthread in greenthreads:
+        if cell_uuid not in results:
+            greenthread.kill()
+            results[cell_uuid] = did_not_respond_sentinel
+            LOG.warning('Timed out waiting for response from cell %s',
+                        cell_uuid)
+        else:
+            greenthread.wait()
+
+    return results
+
+
+def load_cells():
+    global CELLS
+    if not CELLS:
+        CELLS = objects.CellMappingList.get_all(get_admin_context())
+        LOG.debug('Found %(count)i cells: %(cells)s',
+                  dict(count=len(CELLS),
+                       cells=','.join([c.identity for c in CELLS])))
+
+    if not CELLS:
+        LOG.error('No cells are configured, unable to continue')
+
+
+def is_cell_failure_sentinel(record):
+    return (record is did_not_respond_sentinel or
+            isinstance(record, Exception))
+
+
+def scatter_gather_skip_cell0(context, fn, *args, **kwargs):
+    """Target all cells except cell0 in parallel and return their results.
+
+    The first parameter in the signature of the function to call for
+    each cell should be of type RequestContext. There is a timeout for
+    waiting on all results to be gathered.
+
+    :param context: The RequestContext for querying cells
+    :param fn: The function to call for each cell
+    :param args: The args for the function to call for each cell, not including
+                 the RequestContext
+    :param kwargs: The kwargs for the function to call for each cell
+    :returns: A dict {cell_uuid: result} containing the joined results. The
+              did_not_respond_sentinel will be returned if a cell did not
+              respond within the timeout. The exception object will
+              be returned if the call to a cell raised an exception. The
+              exception will be logged.
+    """
+    load_cells()
+    cell_mappings = [cell for cell in CELLS if not cell.is_cell0()]
+    return scatter_gather_cells(context, cell_mappings, CELL_TIMEOUT,
+                                fn, *args, **kwargs)
+
+
+def scatter_gather_single_cell(context, cell_mapping, fn, *args, **kwargs):
+    """Target the provided cell and return its results or sentinels in case of
+    failure.
+
+    The first parameter in the signature of the function to call for each cell
+    should be of type RequestContext.
+
+    :param context: The RequestContext for querying cells
+    :param cell_mapping: The CellMapping to target
+    :param fn: The function to call for each cell
+    :param args: The args for the function to call for each cell, not including
+                 the RequestContext
+    :param kwargs: The kwargs for the function to call for this cell
+    :returns: A dict {cell_uuid: result} containing the joined results. The
+              did_not_respond_sentinel will be returned if the cell did not
+              respond within the timeout. The exception object will
+              be returned if the call to the cell raised an exception. The
+              exception will be logged.
+    """
+    return scatter_gather_cells(context, [cell_mapping], CELL_TIMEOUT, fn,
+                                *args, **kwargs)
+
+
+def scatter_gather_all_cells(context, fn, *args, **kwargs):
+    """Target all cells in parallel and return their results.
+
+    The first parameter in the signature of the function to call for
+    each cell should be of type RequestContext. There is a timeout for
+    waiting on all results to be gathered.
+
+    :param context: The RequestContext for querying cells
+    :param fn: The function to call for each cell
+    :param args: The args for the function to call for each cell, not including
+                 the RequestContext
+    :param kwargs: The kwargs for the function to call for each cell
+    :returns: A dict {cell_uuid: result} containing the joined results. The
+              did_not_respond_sentinel will be returned if a cell did not
+              respond within the timeout. The exception object will
+              be returned if the call to a cell raised an exception. The
+              exception will be logged.
+    """
+    load_cells()
+    return scatter_gather_cells(context, CELLS, CELL_TIMEOUT,
+                                fn, *args, **kwargs)

diff --git a/gosbs/db/__init__.py b/gosbs/db/__init__.py
new file mode 100644
index 0000000..8c26ee8
--- /dev/null
+++ b/gosbs/db/__init__.py
@@ -0,0 +1,13 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""Use nova.db.api instead. In the past this file imported * from there,
+which led to unwanted imports."""

diff --git a/gosbs/db/api.py b/gosbs/db/api.py
new file mode 100644
index 0000000..f8294a6
--- /dev/null
+++ b/gosbs/db/api.py
@@ -0,0 +1,1891 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# origin https://github.com/openstack/nova/blob/master/nova/db/api.py
+
+"""Defines interface for DB access.
+
+Functions in this module are imported into the nova.db namespace. Call these
+functions from nova.db namespace, not the nova.db.api namespace.
+
+All functions in this module return objects that implement a dictionary-like
+interface. Currently, many of these objects are sqlalchemy objects that
+implement a dictionary interface. However, a future goal is to have all of
+these objects be simple dictionaries.
+
+"""
+
+from oslo_db import concurrency
+from oslo_log import log as logging
+
+import gosbs.conf
+from gosbs.db import constants
+
+
+CONF = gosbs.conf.CONF
+# NOTE(cdent): These constants are re-defined in this module to preserve
+# existing references to them.
+MAX_INT = constants.MAX_INT
+SQL_SP_FLOAT_MAX = constants.SQL_SP_FLOAT_MAX
+
+_BACKEND_MAPPING = {'sqlalchemy': 'gosbs.db.sqlalchemy.api'}
+
+
+IMPL = concurrency.TpoolDbapiWrapper(CONF, backend_mapping=_BACKEND_MAPPING)
+
+LOG = logging.getLogger(__name__)
+
+
+###################
+
+
+def constraint(**conditions):
+    """Return a constraint object suitable for use with some updates."""
+    return IMPL.constraint(**conditions)
+
+
+def equal_any(*values):
+    """Return an equality condition object suitable for use in a constraint.
+
+    Equal_any conditions require that a model object's attribute equal any
+    one of the given values.
+    """
+    return IMPL.equal_any(*values)
+
+
+def not_equal(*values):
+    """Return an inequality condition object suitable for use in a constraint.
+
+    Not_equal conditions require that a model object's attribute differs from
+    all of the given values.
+    """
+    return IMPL.not_equal(*values)
+
+
+def create_context_manager(connection):
+    """Return a context manager for a cell database connection."""
+    return IMPL.create_context_manager(connection=connection)
+
+
+###################
+
+
+def select_db_reader_mode(f):
+    """Decorator to select synchronous or asynchronous reader mode.
+
+    The kwarg argument 'use_slave' defines reader mode. Asynchronous reader
+    will be used if 'use_slave' is True and synchronous reader otherwise.
+    """
+    return IMPL.select_db_reader_mode(f)
+
+
+###################
+
+
+def service_destroy(context, service_id):
+    """Destroy the service or raise if it does not exist."""
+    return IMPL.service_destroy(context, service_id)
+
+
+def service_get(context, service_id):
+    """Get a service or raise if it does not exist."""
+    return IMPL.service_get(context, service_id)
+
+
+def service_get_by_uuid(context, service_uuid):
+    """Get a service by it's uuid or raise ServiceNotFound if it does not
+    exist.
+    """
+    return IMPL.service_get_by_uuid(context, service_uuid)
+
+
+def service_get_minimum_version(context, binary):
+    """Get the minimum service version in the database."""
+    return IMPL.service_get_minimum_version(context, binary)
+
+
+def service_get_by_host_and_topic(context, host, topic):
+    """Get a service by hostname and topic it listens to."""
+    return IMPL.service_get_by_host_and_topic(context, host, topic)
+
+
+def service_get_by_topic(context, topic):
+    """Get a service by hostname and topic it listens to."""
+    return IMPL.service_get_by_topic(context, topic)
+
+
+def service_get_by_host_and_binary(context, host, binary):
+    """Get a service by hostname and binary."""
+    return IMPL.service_get_by_host_and_binary(context, host, binary)
+
+
+def service_get_all(context, disabled=None):
+    """Get all services."""
+    return IMPL.service_get_all(context, disabled)
+
+
+def service_get_all_by_topic(context, topic):
+    """Get all services for a given topic."""
+    return IMPL.service_get_all_by_topic(context, topic)
+
+
+def service_get_all_by_binary(context, binary, include_disabled=False):
+    """Get services for a given binary.
+
+    Includes disabled services if 'include_disabled' parameter is True
+    """
+    return IMPL.service_get_all_by_binary(context, binary,
+                                          include_disabled=include_disabled)
+
+
+def service_get_all_computes_by_hv_type(context, hv_type,
+                                        include_disabled=False):
+    """Get all compute services for a given hypervisor type.
+
+    Includes disabled services if 'include_disabled' parameter is True.
+    """
+    return IMPL.service_get_all_computes_by_hv_type(context, hv_type,
+        include_disabled=include_disabled)
+
+
+def service_get_all_by_host(context, host):
+    """Get all services for a given host."""
+    return IMPL.service_get_all_by_host(context, host)
+
+
+def service_get_by_compute_host(context, host):
+    """Get the service entry for a given compute host.
+
+    Returns the service entry joined with the compute_node entry.
+    """
+    return IMPL.service_get_by_compute_host(context, host)
+
+
+def service_create(context, values):
+    """Create a service from the values dictionary."""
+    return IMPL.service_create(context, values)
+
+
+def service_update(context, service_id, values):
+    """Set the given properties on a service and update it.
+
+    Raises NotFound if service does not exist.
+
+    """
+    return IMPL.service_update(context, service_id, values)
+
+
+###################
+
+
+def compute_node_get(context, compute_id):
+    """Get a compute node by its id.
+
+    :param context: The security context
+    :param compute_id: ID of the compute node
+
+    :returns: Dictionary-like object containing properties of the compute node
+
+    Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
+    """
+    return IMPL.compute_node_get(context, compute_id)
+
+
+# TODO(edleafe): remove once the compute node resource provider migration is
+# complete, and this distinction is no longer necessary.
+def compute_node_get_model(context, compute_id):
+    """Get a compute node sqlalchemy model object by its id.
+
+    :param context: The security context
+    :param compute_id: ID of the compute node
+
+    :returns: Sqlalchemy model object containing properties of the compute node
+
+    Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
+    """
+    return IMPL.compute_node_get_model(context, compute_id)
+
+
+def compute_nodes_get_by_service_id(context, service_id):
+    """Get a list of compute nodes by their associated service id.
+
+    :param context: The security context
+    :param service_id: ID of the associated service
+
+    :returns: List of dictionary-like objects, each containing properties of
+              the compute node, including its corresponding service and
+              statistics
+
+    Raises ServiceNotFound if service with the given ID doesn't exist.
+    """
+    return IMPL.compute_nodes_get_by_service_id(context, service_id)
+
+
+def compute_node_get_by_host_and_nodename(context, host, nodename):
+    """Get a compute node by its associated host and nodename.
+
+    :param context: The security context (admin)
+    :param host: Name of the host
+    :param nodename: Name of the node
+
+    :returns: Dictionary-like object containing properties of the compute node,
+              including its statistics
+
+    Raises ComputeHostNotFound if host with the given name doesn't exist.
+    """
+    return IMPL.compute_node_get_by_host_and_nodename(context, host, nodename)
+
+
+def compute_node_get_all(context):
+    """Get all computeNodes.
+
+    :param context: The security context
+
+    :returns: List of dictionaries each containing compute node properties
+    """
+    return IMPL.compute_node_get_all(context)
+
+
+def compute_node_get_all_mapped_less_than(context, mapped_less_than):
+    """Get all ComputeNode objects with specific mapped values.
+
+    :param context: The security context
+    :param mapped_less_than: Get compute nodes with mapped less than this
+                             value
+
+    :returns: List of dictionaries each containing compute node properties
+    """
+    return IMPL.compute_node_get_all_mapped_less_than(context,
+                                                      mapped_less_than)
+
+
+def compute_node_get_all_by_pagination(context, limit=None, marker=None):
+    """Get compute nodes by pagination.
+    :param context: The security context
+    :param limit: Maximum number of items to return
+    :param marker: The last item of the previous page, the next results after
+                   this value will be returned
+
+    :returns: List of dictionaries each containing compute node properties
+    """
+    return IMPL.compute_node_get_all_by_pagination(context,
+                                                   limit=limit, marker=marker)
+
+
+def compute_node_get_all_by_host(context, host):
+    """Get compute nodes by host name
+
+    :param context: The security context (admin)
+    :param host: Name of the host
+
+    :returns: List of dictionaries each containing compute node properties
+    """
+    return IMPL.compute_node_get_all_by_host(context, host)
+
+
+def compute_node_search_by_hypervisor(context, hypervisor_match):
+    """Get compute nodes by hypervisor hostname.
+
+    :param context: The security context
+    :param hypervisor_match: The hypervisor hostname
+
+    :returns: List of dictionary-like objects each containing compute node
+              properties
+    """
+    return IMPL.compute_node_search_by_hypervisor(context, hypervisor_match)
+
+
+def compute_node_create(context, values):
+    """Create a compute node from the values dictionary.
+
+    :param context: The security context
+    :param values: Dictionary containing compute node properties
+
+    :returns: Dictionary-like object containing the properties of the created
+              node, including its corresponding service and statistics
+    """
+    return IMPL.compute_node_create(context, values)
+
+
+def compute_node_update(context, compute_id, values):
+    """Set the given properties on a compute node and update it.
+
+    :param context: The security context
+    :param compute_id: ID of the compute node
+    :param values: Dictionary containing compute node properties to be updated
+
+    :returns: Dictionary-like object containing the properties of the updated
+              compute node, including its corresponding service and statistics
+
+    Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
+    """
+    return IMPL.compute_node_update(context, compute_id, values)
+
+
+def compute_node_delete(context, compute_id):
+    """Delete a compute node from the database.
+
+    :param context: The security context
+    :param compute_id: ID of the compute node
+
+    Raises ComputeHostNotFound if compute node with the given ID doesn't exist.
+    """
+    return IMPL.compute_node_delete(context, compute_id)
+
+
+def compute_node_statistics(context):
+    """Get aggregate statistics over all compute nodes.
+
+    :param context: The security context
+
+    :returns: Dictionary containing compute node characteristics summed up
+              over all the compute nodes, e.g. 'vcpus', 'free_ram_mb' etc.
+    """
+    return IMPL.compute_node_statistics(context)
+
+
+###################
+
+
+def certificate_create(context, values):
+    """Create a certificate from the values dictionary."""
+    return IMPL.certificate_create(context, values)
+
+
+def certificate_get_all_by_project(context, project_id):
+    """Get all certificates for a project."""
+    return IMPL.certificate_get_all_by_project(context, project_id)
+
+
+def certificate_get_all_by_user(context, user_id):
+    """Get all certificates for a user."""
+    return IMPL.certificate_get_all_by_user(context, user_id)
+
+
+def certificate_get_all_by_user_and_project(context, user_id, project_id):
+    """Get all certificates for a user and project."""
+    return IMPL.certificate_get_all_by_user_and_project(context,
+                                                        user_id,
+                                                        project_id)
+
+
+###################
+
+def floating_ip_get(context, id):
+    return IMPL.floating_ip_get(context, id)
+
+
+def floating_ip_get_pools(context):
+    """Returns a list of floating IP pools."""
+    return IMPL.floating_ip_get_pools(context)
+
+
+def floating_ip_allocate_address(context, project_id, pool,
+                                 auto_assigned=False):
+    """Allocate free floating IP from specified pool and return the address.
+
+    Raises if one is not available.
+
+    """
+    return IMPL.floating_ip_allocate_address(context, project_id, pool,
+                                             auto_assigned)
+
+
+def floating_ip_bulk_create(context, ips, want_result=True):
+    """Create a lot of floating IPs from the values dictionary.
+        :param want_result: If set to True, return floating IPs inserted
+    """
+    return IMPL.floating_ip_bulk_create(context, ips, want_result=want_result)
+
+
+def floating_ip_bulk_destroy(context, ips):
+    """Destroy a lot of floating IPs from the values dictionary."""
+    return IMPL.floating_ip_bulk_destroy(context, ips)
+
+
+def floating_ip_create(context, values):
+    """Create a floating IP from the values dictionary."""
+    return IMPL.floating_ip_create(context, values)
+
+
+def floating_ip_deallocate(context, address):
+    """Deallocate a floating IP by address."""
+    return IMPL.floating_ip_deallocate(context, address)
+
+
+def floating_ip_destroy(context, address):
+    """Destroy the floating_ip or raise if it does not exist."""
+    return IMPL.floating_ip_destroy(context, address)
+
+
+def floating_ip_disassociate(context, address):
+    """Disassociate a floating IP from a fixed IP by address.
+
+    :returns: the fixed IP record joined to network record or None
+              if the IP was not associated to an IP.
+
+    """
+    return IMPL.floating_ip_disassociate(context, address)
+
+
+def floating_ip_fixed_ip_associate(context, floating_address,
+                                   fixed_address, host):
+    """Associate a floating IP to a fixed_ip by address.
+
+    :returns: the fixed IP record joined to network record or None
+              if the IP was already associated to the fixed IP.
+    """
+
+    return IMPL.floating_ip_fixed_ip_associate(context,
+                                               floating_address,
+                                               fixed_address,
+                                               host)
+
+
+def floating_ip_get_all(context):
+    """Get all floating IPs."""
+    return IMPL.floating_ip_get_all(context)
+
+
+def floating_ip_get_all_by_host(context, host):
+    """Get all floating IPs by host."""
+    return IMPL.floating_ip_get_all_by_host(context, host)
+
+
+def floating_ip_get_all_by_project(context, project_id):
+    """Get all floating IPs by project."""
+    return IMPL.floating_ip_get_all_by_project(context, project_id)
+
+
+def floating_ip_get_by_address(context, address):
+    """Get a floating IP by address or raise if it doesn't exist."""
+    return IMPL.floating_ip_get_by_address(context, address)
+
+
+def floating_ip_get_by_fixed_address(context, fixed_address):
+    """Get a floating IPs by fixed address."""
+    return IMPL.floating_ip_get_by_fixed_address(context, fixed_address)
+
+
+def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
+    """Get a floating IPs by fixed address."""
+    return IMPL.floating_ip_get_by_fixed_ip_id(context, fixed_ip_id)
+
+
+def floating_ip_update(context, address, values):
+    """Update a floating IP by address or raise if it doesn't exist."""
+    return IMPL.floating_ip_update(context, address, values)
+
+
+def dnsdomain_get_all(context):
+    """Get a list of all dnsdomains in our database."""
+    return IMPL.dnsdomain_get_all(context)
+
+
+def dnsdomain_register_for_zone(context, fqdomain, zone):
+    """Associated a DNS domain with an availability zone."""
+    return IMPL.dnsdomain_register_for_zone(context, fqdomain, zone)
+
+
+def dnsdomain_register_for_project(context, fqdomain, project):
+    """Associated a DNS domain with a project id."""
+    return IMPL.dnsdomain_register_for_project(context, fqdomain, project)
+
+
+def dnsdomain_unregister(context, fqdomain):
+    """Purge associations for the specified DNS zone."""
+    return IMPL.dnsdomain_unregister(context, fqdomain)
+
+
+def dnsdomain_get(context, fqdomain):
+    """Get the db record for the specified domain."""
+    return IMPL.dnsdomain_get(context, fqdomain)
+
+
+####################
+
+
+def migration_update(context, id, values):
+    """Update a migration instance."""
+    return IMPL.migration_update(context, id, values)
+
+
+def migration_create(context, values):
+    """Create a migration record."""
+    return IMPL.migration_create(context, values)
+
+
+def migration_get(context, migration_id):
+    """Finds a migration by the id."""
+    return IMPL.migration_get(context, migration_id)
+
+
+def migration_get_by_uuid(context, migration_uuid):
+    """Finds a migration by the migration uuid."""
+    return IMPL.migration_get_by_uuid(context, migration_uuid)
+
+
+def migration_get_by_id_and_instance(context, migration_id, instance_uuid):
+    """Finds a migration by the migration id and the instance uuid."""
+    return IMPL.migration_get_by_id_and_instance(context,
+                                                 migration_id,
+                                                 instance_uuid)
+
+
+def migration_get_by_instance_and_status(context, instance_uuid, status):
+    """Finds a migration by the instance uuid its migrating."""
+    return IMPL.migration_get_by_instance_and_status(context, instance_uuid,
+            status)
+
+
+def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
+        dest_compute):
+    """Finds all unconfirmed migrations within the confirmation window for
+    a specific destination compute host.
+    """
+    return IMPL.migration_get_unconfirmed_by_dest_compute(context,
+            confirm_window, dest_compute)
+
+
+def migration_get_in_progress_by_host_and_node(context, host, node):
+    """Finds all migrations for the given host + node  that are not yet
+    confirmed or reverted.
+    """
+    return IMPL.migration_get_in_progress_by_host_and_node(context, host, node)
+
+
+def migration_get_all_by_filters(context, filters, sort_keys=None,
+                                 sort_dirs=None, limit=None, marker=None):
+    """Finds all migrations using the provided filters."""
+    return IMPL.migration_get_all_by_filters(context, filters,
+                                             sort_keys=sort_keys,
+                                             sort_dirs=sort_dirs,
+                                             limit=limit, marker=marker)
+
+
+def migration_get_in_progress_by_instance(context, instance_uuid,
+                                          migration_type=None):
+    """Finds all migrations of an instance in progress."""
+    return IMPL.migration_get_in_progress_by_instance(context, instance_uuid,
+                                                      migration_type)
+
+
+def migration_get_by_sort_filters(context, sort_keys, sort_dirs, values):
+    """Get the uuid of the first migration in a sort order.
+
+    Return the first migration (uuid) of the set where each column value
+    is greater than or equal to the matching one in @values, for each key
+    in @sort_keys.
+    """
+    return IMPL.migration_get_by_sort_filters(context, sort_keys, sort_dirs,
+                                              values)
+
+
+####################
+
+
+def fixed_ip_associate(context, address, instance_uuid, network_id=None,
+                       reserved=False, virtual_interface_id=None):
+    """Associate fixed IP to instance.
+
+    Raises if fixed IP is not available.
+
+    """
+    return IMPL.fixed_ip_associate(context, address, instance_uuid, network_id,
+                                   reserved, virtual_interface_id)
+
+
+def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
+                            host=None, virtual_interface_id=None):
+    """Find free IP in network and associate it to instance or host.
+
+    Raises if one is not available.
+
+    """
+    return IMPL.fixed_ip_associate_pool(context, network_id,
+                                        instance_uuid, host,
+                                        virtual_interface_id)
+
+
+def fixed_ip_create(context, values):
+    """Create a fixed IP from the values dictionary."""
+    return IMPL.fixed_ip_create(context, values)
+
+
+def fixed_ip_bulk_create(context, ips):
+    """Create a lot of fixed IPs from the values dictionary."""
+    return IMPL.fixed_ip_bulk_create(context, ips)
+
+
+def fixed_ip_disassociate(context, address):
+    """Disassociate a fixed IP from an instance by address."""
+    return IMPL.fixed_ip_disassociate(context, address)
+
+
+def fixed_ip_disassociate_all_by_timeout(context, host, time):
+    """Disassociate old fixed IPs from host."""
+    return IMPL.fixed_ip_disassociate_all_by_timeout(context, host, time)
+
+
+def fixed_ip_get(context, id, get_network=False):
+    """Get fixed IP by id or raise if it does not exist.
+
+    If get_network is true, also return the associated network.
+    """
+    return IMPL.fixed_ip_get(context, id, get_network)
+
+
+def fixed_ip_get_all(context):
+    """Get all defined fixed IPs."""
+    return IMPL.fixed_ip_get_all(context)
+
+
+def fixed_ip_get_by_address(context, address, columns_to_join=None):
+    """Get a fixed IP by address or raise if it does not exist."""
+    return IMPL.fixed_ip_get_by_address(context, address,
+                                        columns_to_join=columns_to_join)
+
+
+def fixed_ip_get_by_floating_address(context, floating_address):
+    """Get a fixed IP by a floating address."""
+    return IMPL.fixed_ip_get_by_floating_address(context, floating_address)
+
+
+def fixed_ip_get_by_instance(context, instance_uuid):
+    """Get fixed IPs by instance or raise if none exist."""
+    return IMPL.fixed_ip_get_by_instance(context, instance_uuid)
+
+
+def fixed_ip_get_by_host(context, host):
+    """Get fixed IPs by compute host."""
+    return IMPL.fixed_ip_get_by_host(context, host)
+
+
+def fixed_ip_get_by_network_host(context, network_uuid, host):
+    """Get fixed IP for a host in a network."""
+    return IMPL.fixed_ip_get_by_network_host(context, network_uuid, host)
+
+
+def fixed_ips_by_virtual_interface(context, vif_id):
+    """Get fixed IPs by virtual interface or raise if none exist."""
+    return IMPL.fixed_ips_by_virtual_interface(context, vif_id)
+
+
+def fixed_ip_update(context, address, values):
+    """Create a fixed IP from the values dictionary."""
+    return IMPL.fixed_ip_update(context, address, values)
+
+
+####################
+
+
+def virtual_interface_create(context, values):
+    """Create a virtual interface record in the database."""
+    return IMPL.virtual_interface_create(context, values)
+
+
+def virtual_interface_update(context, address, values):
+    """Create a virtual interface record in the database."""
+    return IMPL.virtual_interface_update(context, address, values)
+
+
+def virtual_interface_get(context, vif_id):
+    """Gets a virtual interface from the table."""
+    return IMPL.virtual_interface_get(context, vif_id)
+
+
+def virtual_interface_get_by_address(context, address):
+    """Gets a virtual interface from the table filtering on address."""
+    return IMPL.virtual_interface_get_by_address(context, address)
+
+
+def virtual_interface_get_by_uuid(context, vif_uuid):
+    """Gets a virtual interface from the table filtering on vif uuid."""
+    return IMPL.virtual_interface_get_by_uuid(context, vif_uuid)
+
+
+def virtual_interface_get_by_instance(context, instance_id):
+    """Gets all virtual_interfaces for instance."""
+    return IMPL.virtual_interface_get_by_instance(context, instance_id)
+
+
+def virtual_interface_get_by_instance_and_network(context, instance_id,
+                                                           network_id):
+    """Gets all virtual interfaces for instance."""
+    return IMPL.virtual_interface_get_by_instance_and_network(context,
+                                                              instance_id,
+                                                              network_id)
+
+
+def virtual_interface_delete_by_instance(context, instance_id):
+    """Delete virtual interface records associated with instance."""
+    return IMPL.virtual_interface_delete_by_instance(context, instance_id)
+
+
+def virtual_interface_delete(context, id):
+    """Delete virtual interface by id."""
+    return IMPL.virtual_interface_delete(context, id)
+
+
+def virtual_interface_get_all(context):
+    """Gets all virtual interfaces from the table."""
+    return IMPL.virtual_interface_get_all(context)
+
+
+####################
+
+
+def instance_create(context, values):
+    """Create an instance from the values dictionary."""
+    return IMPL.instance_create(context, values)
+
+
+def instance_destroy(context, instance_uuid, constraint=None):
+    """Destroy the instance or raise if it does not exist."""
+    return IMPL.instance_destroy(context, instance_uuid, constraint)
+
+
+def instance_get_by_uuid(context, uuid, columns_to_join=None):
+    """Get an instance or raise if it does not exist."""
+    return IMPL.instance_get_by_uuid(context, uuid, columns_to_join)
+
+
+def instance_get(context, instance_id, columns_to_join=None):
+    """Get an instance or raise if it does not exist."""
+    return IMPL.instance_get(context, instance_id,
+                             columns_to_join=columns_to_join)
+
+
+def instance_get_all(context, columns_to_join=None):
+    """Get all instances."""
+    return IMPL.instance_get_all(context, columns_to_join=columns_to_join)
+
+
+def instance_get_all_uuids_by_host(context, host):
+    """Get a list of instance uuids on host."""
+    return IMPL.instance_get_all_uuids_by_host(context, host)
+
+
+def instance_get_all_by_filters(context, filters, sort_key='created_at',
+                                sort_dir='desc', limit=None, marker=None,
+                                columns_to_join=None):
+    """Get all instances that match all filters."""
+    # Note: This function exists for backwards compatibility since calls to
+    # the instance layer coming in over RPC may specify the single sort
+    # key/direction values; in this case, this function is invoked instead
+    # of the 'instance_get_all_by_filters_sort' function.
+    return IMPL.instance_get_all_by_filters(context, filters, sort_key,
+                                            sort_dir, limit=limit,
+                                            marker=marker,
+                                            columns_to_join=columns_to_join)
+
+
+def instance_get_all_by_filters_sort(context, filters, limit=None,
+                                     marker=None, columns_to_join=None,
+                                     sort_keys=None, sort_dirs=None):
+    """Get all instances that match all filters sorted by multiple keys.
+
+    sort_keys and sort_dirs must be a list of strings.
+    """
+    return IMPL.instance_get_all_by_filters_sort(
+        context, filters, limit=limit, marker=marker,
+        columns_to_join=columns_to_join, sort_keys=sort_keys,
+        sort_dirs=sort_dirs)
+
+
+def instance_get_by_sort_filters(context, sort_keys, sort_dirs, values):
+    """Get the uuid of the first instance in a sort order.
+
+    Return the first instance (uuid) of the set where each column value
+    is greater than or equal to the matching one in @values, for each key
+    in @sort_keys.
+    """
+    return IMPL.instance_get_by_sort_filters(context, sort_keys, sort_dirs,
+                                             values)
+
+
+def instance_get_active_by_window_joined(context, begin, end=None,
+                                         project_id=None, host=None,
+                                         columns_to_join=None, limit=None,
+                                         marker=None):
+    """Get instances and joins active during a certain time window.
+
+    Specifying a project_id will filter for a certain project.
+    Specifying a host will filter for instances on a given compute host.
+    """
+    return IMPL.instance_get_active_by_window_joined(context, begin, end,
+                                              project_id, host,
+                                              columns_to_join=columns_to_join,
+                                              limit=limit, marker=marker)
+
+
+def instance_get_all_by_host(context, host, columns_to_join=None):
+    """Get all instances belonging to a host."""
+    return IMPL.instance_get_all_by_host(context, host, columns_to_join)
+
+
+def instance_get_all_by_host_and_node(context, host, node,
+                                      columns_to_join=None):
+    """Get all instances belonging to a node."""
+    return IMPL.instance_get_all_by_host_and_node(
+        context, host, node, columns_to_join=columns_to_join)
+
+
+def instance_get_all_by_host_and_not_type(context, host, type_id=None):
+    """Get all instances belonging to a host with a different type_id."""
+    return IMPL.instance_get_all_by_host_and_not_type(context, host, type_id)
+
+
+def instance_get_all_by_grantee_security_groups(context, group_ids):
+    """Get instances with rules granted to them by a list of secgroups ids."""
+    return IMPL.instance_get_all_by_grantee_security_groups(context, group_ids)
+
+
+def instance_floating_address_get_all(context, instance_uuid):
+    """Get all floating IP addresses of an instance."""
+    return IMPL.instance_floating_address_get_all(context, instance_uuid)
+
+
+# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
+def instance_get_all_hung_in_rebooting(context, reboot_window):
+    """Get all instances stuck in a rebooting state."""
+    return IMPL.instance_get_all_hung_in_rebooting(context, reboot_window)
+
+
+def instance_update(context, instance_uuid, values, expected=None):
+    """Set the given properties on an instance and update it.
+
+    Raises NotFound if instance does not exist.
+
+    """
+    return IMPL.instance_update(context, instance_uuid, values,
+                                expected=expected)
+
+
+def instance_update_and_get_original(context, instance_uuid, values,
+                                     columns_to_join=None, expected=None):
+    """Set the given properties on an instance and update it. Return
+    a shallow copy of the original instance reference, as well as the
+    updated one.
+
+    :param context: = request context object
+    :param instance_uuid: = instance id or uuid
+    :param values: = dict containing column values
+
+    :returns: a tuple of the form (old_instance_ref, new_instance_ref)
+
+    Raises NotFound if instance does not exist.
+    """
+    rv = IMPL.instance_update_and_get_original(context, instance_uuid, values,
+                                               columns_to_join=columns_to_join,
+                                               expected=expected)
+    return rv
+
+
+def instance_add_security_group(context, instance_id, security_group_id):
+    """Associate the given security group with the given instance."""
+    return IMPL.instance_add_security_group(context, instance_id,
+                                            security_group_id)
+
+
+def instance_remove_security_group(context, instance_id, security_group_id):
+    """Disassociate the given security group from the given instance."""
+    return IMPL.instance_remove_security_group(context, instance_id,
+                                            security_group_id)
+
+
+####################
+
+
+def instance_info_cache_get(context, instance_uuid):
+    """Gets an instance info cache from the table.
+
+    :param instance_uuid: = uuid of the info cache's instance
+    """
+    return IMPL.instance_info_cache_get(context, instance_uuid)
+
+
+def instance_info_cache_update(context, instance_uuid, values):
+    """Update an instance info cache record in the table.
+
+    :param instance_uuid: = uuid of info cache's instance
+    :param values: = dict containing column values to update
+    """
+    return IMPL.instance_info_cache_update(context, instance_uuid, values)
+
+
+def instance_info_cache_delete(context, instance_uuid):
+    """Deletes an existing instance_info_cache record
+
+    :param instance_uuid: = uuid of the instance tied to the cache record
+    """
+    return IMPL.instance_info_cache_delete(context, instance_uuid)
+
+
+###################
+
+
+def instance_extra_get_by_instance_uuid(context, instance_uuid, columns=None):
+    """Get the instance extra record
+
+    :param instance_uuid: = uuid of the instance tied to the topology record
+    :param columns: A list of the columns to load, or None for 'all of them'
+    """
+    return IMPL.instance_extra_get_by_instance_uuid(
+        context, instance_uuid, columns=columns)
+
+
+def instance_extra_update_by_uuid(context, instance_uuid, updates):
+    """Update the instance extra record by instance uuid
+
+    :param instance_uuid: = uuid of the instance tied to the record
+    :param updates: A dict of updates to apply
+    """
+    return IMPL.instance_extra_update_by_uuid(context, instance_uuid,
+                                              updates)
+
+
+###################
+
+
+def key_pair_create(context, values):
+    """Create a key_pair from the values dictionary."""
+    return IMPL.key_pair_create(context, values)
+
+
+def key_pair_destroy(context, user_id, name):
+    """Destroy the key_pair or raise if it does not exist."""
+    return IMPL.key_pair_destroy(context, user_id, name)
+
+
+def key_pair_get(context, user_id, name):
+    """Get a key_pair or raise if it does not exist."""
+    return IMPL.key_pair_get(context, user_id, name)
+
+
+def key_pair_get_all_by_user(context, user_id, limit=None, marker=None):
+    """Get all key_pairs by user."""
+    return IMPL.key_pair_get_all_by_user(
+        context, user_id, limit=limit, marker=marker)
+
+
+def key_pair_count_by_user(context, user_id):
+    """Count number of key pairs for the given user ID."""
+    return IMPL.key_pair_count_by_user(context, user_id)
+
+
+####################
+
+
+def network_associate(context, project_id, network_id=None, force=False):
+    """Associate a free network to a project."""
+    return IMPL.network_associate(context, project_id, network_id, force)
+
+
+def network_count_reserved_ips(context, network_id):
+    """Return the number of reserved IPs in the network."""
+    return IMPL.network_count_reserved_ips(context, network_id)
+
+
+def network_create_safe(context, values):
+    """Create a network from the values dict.
+
+    The network is only returned if the create succeeds. If the create violates
+    constraints because the network already exists, no exception is raised.
+
+    """
+    return IMPL.network_create_safe(context, values)
+
+
+def network_delete_safe(context, network_id):
+    """Delete network with key network_id.
+
+    This method assumes that the network is not associated with any project
+
+    """
+    return IMPL.network_delete_safe(context, network_id)
+
+
+def network_disassociate(context, network_id, disassociate_host=True,
+                         disassociate_project=True):
+    """Disassociate the network from project or host
+
+    Raises if it does not exist.
+    """
+    return IMPL.network_disassociate(context, network_id, disassociate_host,
+                                     disassociate_project)
+
+
+def network_get(context, network_id, project_only="allow_none"):
+    """Get a network or raise if it does not exist."""
+    return IMPL.network_get(context, network_id, project_only=project_only)
+
+
+def network_get_all(context, project_only="allow_none"):
+    """Return all defined networks."""
+    return IMPL.network_get_all(context, project_only)
+
+
+def network_get_all_by_uuids(context, network_uuids,
+                             project_only="allow_none"):
+    """Return networks by ids."""
+    return IMPL.network_get_all_by_uuids(context, network_uuids,
+                                         project_only=project_only)
+
+
+def network_in_use_on_host(context, network_id, host=None):
+    """Indicates if a network is currently in use on host."""
+    return IMPL.network_in_use_on_host(context, network_id, host)
+
+
+def network_get_associated_fixed_ips(context, network_id, host=None):
+    """Get all network's IPs that have been associated."""
+    return IMPL.network_get_associated_fixed_ips(context, network_id, host)
+
+
+def network_get_by_uuid(context, uuid):
+    """Get a network by uuid or raise if it does not exist."""
+    return IMPL.network_get_by_uuid(context, uuid)
+
+
+def network_get_by_cidr(context, cidr):
+    """Get a network by cidr or raise if it does not exist."""
+    return IMPL.network_get_by_cidr(context, cidr)
+
+
+def network_get_all_by_host(context, host):
+    """All networks for which the given host is the network host."""
+    return IMPL.network_get_all_by_host(context, host)
+
+
+def network_set_host(context, network_id, host_id):
+    """Safely set the host for network."""
+    return IMPL.network_set_host(context, network_id, host_id)
+
+
+def network_update(context, network_id, values):
+    """Set the given properties on a network and update it.
+
+    Raises NotFound if network does not exist.
+
+    """
+    return IMPL.network_update(context, network_id, values)
+
+
+###############
+
+
+def quota_create(context, project_id, resource, limit, user_id=None):
+    """Create a quota for the given project and resource."""
+    return IMPL.quota_create(context, project_id, resource, limit,
+                             user_id=user_id)
+
+
+def quota_get(context, project_id, resource, user_id=None):
+    """Retrieve a quota or raise if it does not exist."""
+    return IMPL.quota_get(context, project_id, resource, user_id=user_id)
+
+
+def quota_get_all_by_project_and_user(context, project_id, user_id):
+    """Retrieve all quotas associated with a given project and user."""
+    return IMPL.quota_get_all_by_project_and_user(context, project_id, user_id)
+
+
+def quota_get_all_by_project(context, project_id):
+    """Retrieve all quotas associated with a given project."""
+    return IMPL.quota_get_all_by_project(context, project_id)
+
+
+def quota_get_per_project_resources():
+    """Retrieve the names of resources whose quotas are calculated on a
+       per-project rather than a per-user basis.
+    """
+    return IMPL.quota_get_per_project_resources()
+
+
+def quota_get_all(context, project_id):
+    """Retrieve all user quotas associated with a given project."""
+    return IMPL.quota_get_all(context, project_id)
+
+
+def quota_update(context, project_id, resource, limit, user_id=None):
+    """Update a quota or raise if it does not exist."""
+    return IMPL.quota_update(context, project_id, resource, limit,
+                             user_id=user_id)
+
+
+###################
+
+
+def quota_class_create(context, class_name, resource, limit):
+    """Create a quota class for the given name and resource."""
+    return IMPL.quota_class_create(context, class_name, resource, limit)
+
+
+def quota_class_get(context, class_name, resource):
+    """Retrieve a quota class or raise if it does not exist."""
+    return IMPL.quota_class_get(context, class_name, resource)
+
+
+def quota_class_get_default(context):
+    """Retrieve all default quotas."""
+    return IMPL.quota_class_get_default(context)
+
+
+def quota_class_get_all_by_name(context, class_name):
+    """Retrieve all quotas associated with a given quota class."""
+    return IMPL.quota_class_get_all_by_name(context, class_name)
+
+
+def quota_class_update(context, class_name, resource, limit):
+    """Update a quota class or raise if it does not exist."""
+    return IMPL.quota_class_update(context, class_name, resource, limit)
+
+
+###################
+
+
+def quota_destroy_all_by_project_and_user(context, project_id, user_id):
+    """Destroy all quotas associated with a given project and user."""
+    return IMPL.quota_destroy_all_by_project_and_user(context,
+                                                      project_id, user_id)
+
+
+def quota_destroy_all_by_project(context, project_id):
+    """Destroy all quotas associated with a given project."""
+    return IMPL.quota_destroy_all_by_project(context, project_id)
+
+
+###################
+
+
+def ec2_volume_create(context, volume_id, forced_id=None):
+    return IMPL.ec2_volume_create(context, volume_id, forced_id)
+
+
+def ec2_volume_get_by_id(context, volume_id):
+    return IMPL.ec2_volume_get_by_id(context, volume_id)
+
+
+def ec2_volume_get_by_uuid(context, volume_uuid):
+    return IMPL.ec2_volume_get_by_uuid(context, volume_uuid)
+
+
+def ec2_snapshot_create(context, snapshot_id, forced_id=None):
+    return IMPL.ec2_snapshot_create(context, snapshot_id, forced_id)
+
+
+def ec2_snapshot_get_by_ec2_id(context, ec2_id):
+    return IMPL.ec2_snapshot_get_by_ec2_id(context, ec2_id)
+
+
+def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
+    return IMPL.ec2_snapshot_get_by_uuid(context, snapshot_uuid)
+
+
+####################
+
+
+def block_device_mapping_create(context, values, legacy=True):
+    """Create an entry of block device mapping."""
+    return IMPL.block_device_mapping_create(context, values, legacy)
+
+
+def block_device_mapping_update(context, bdm_id, values, legacy=True):
+    """Update an entry of block device mapping."""
+    return IMPL.block_device_mapping_update(context, bdm_id, values, legacy)
+
+
+def block_device_mapping_update_or_create(context, values, legacy=True):
+    """Update an entry of block device mapping.
+
+    If not existed, create a new entry
+    """
+    return IMPL.block_device_mapping_update_or_create(context, values, legacy)
+
+
+def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids):
+    """Get all block device mapping belonging to a list of instances."""
+    return IMPL.block_device_mapping_get_all_by_instance_uuids(context,
+                                                               instance_uuids)
+
+
+def block_device_mapping_get_all_by_instance(context, instance_uuid):
+    """Get all block device mapping belonging to an instance."""
+    return IMPL.block_device_mapping_get_all_by_instance(context,
+                                                         instance_uuid)
+
+
+def block_device_mapping_get_all_by_volume_id(context, volume_id,
+        columns_to_join=None):
+    """Get block device mapping for a given volume."""
+    return IMPL.block_device_mapping_get_all_by_volume_id(context, volume_id,
+            columns_to_join)
+
+
+def block_device_mapping_get_by_instance_and_volume_id(context, volume_id,
+                                                       instance_uuid,
+                                                       columns_to_join=None):
+    """Get block device mapping for a given volume ID and instance UUID."""
+    return IMPL.block_device_mapping_get_by_instance_and_volume_id(
+        context, volume_id, instance_uuid, columns_to_join)
+
+
+def block_device_mapping_destroy(context, bdm_id):
+    """Destroy the block device mapping."""
+    return IMPL.block_device_mapping_destroy(context, bdm_id)
+
+
+def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
+                                                        device_name):
+    """Destroy the block device mapping."""
+    return IMPL.block_device_mapping_destroy_by_instance_and_device(
+        context, instance_uuid, device_name)
+
+
+def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
+                                                        volume_id):
+    """Destroy the block device mapping."""
+    return IMPL.block_device_mapping_destroy_by_instance_and_volume(
+        context, instance_uuid, volume_id)
+
+
+####################
+
+
+def security_group_get_all(context):
+    """Get all security groups."""
+    return IMPL.security_group_get_all(context)
+
+
+def security_group_get(context, security_group_id, columns_to_join=None):
+    """Get security group by its id."""
+    return IMPL.security_group_get(context, security_group_id,
+                                   columns_to_join)
+
+
+def security_group_get_by_name(context, project_id, group_name,
+                               columns_to_join=None):
+    """Returns a security group with the specified name from a project."""
+    return IMPL.security_group_get_by_name(context, project_id, group_name,
+                                           columns_to_join=None)
+
+
+def security_group_get_by_project(context, project_id):
+    """Get all security groups belonging to a project."""
+    return IMPL.security_group_get_by_project(context, project_id)
+
+
+def security_group_get_by_instance(context, instance_uuid):
+    """Get security groups to which the instance is assigned."""
+    return IMPL.security_group_get_by_instance(context, instance_uuid)
+
+
+def security_group_in_use(context, group_id):
+    """Indicates if a security group is currently in use."""
+    return IMPL.security_group_in_use(context, group_id)
+
+
+def security_group_create(context, values):
+    """Create a new security group."""
+    return IMPL.security_group_create(context, values)
+
+
+def security_group_update(context, security_group_id, values,
+                          columns_to_join=None):
+    """Update a security group."""
+    return IMPL.security_group_update(context, security_group_id, values,
+                                      columns_to_join=columns_to_join)
+
+
+def security_group_ensure_default(context):
+    """Ensure default security group exists for a project_id.
+
+    Returns a tuple with the first element being a bool indicating
+    if the default security group previously existed. Second
+    element is the dict used to create the default security group.
+    """
+    return IMPL.security_group_ensure_default(context)
+
+
+def security_group_destroy(context, security_group_id):
+    """Deletes a security group."""
+    return IMPL.security_group_destroy(context, security_group_id)
+
+
+####################
+
+
+def security_group_rule_create(context, values):
+    """Create a new security group."""
+    return IMPL.security_group_rule_create(context, values)
+
+
+def security_group_rule_get_by_security_group(context, security_group_id,
+                                              columns_to_join=None):
+    """Get all rules for a given security group."""
+    return IMPL.security_group_rule_get_by_security_group(
+        context, security_group_id, columns_to_join=columns_to_join)
+
+
+def security_group_rule_get_by_instance(context, instance_uuid):
+    """Get all rules for a given instance."""
+    return IMPL.security_group_rule_get_by_instance(context, instance_uuid)
+
+
+def security_group_rule_destroy(context, security_group_rule_id):
+    """Deletes a security group rule."""
+    return IMPL.security_group_rule_destroy(context, security_group_rule_id)
+
+
+def security_group_rule_get(context, security_group_rule_id):
+    """Gets a security group rule."""
+    return IMPL.security_group_rule_get(context, security_group_rule_id)
+
+
+def security_group_rule_count_by_group(context, security_group_id):
+    """Count rules in a given security group."""
+    return IMPL.security_group_rule_count_by_group(context, security_group_id)
+
+
+###################
+
+
+def security_group_default_rule_get(context, security_group_rule_default_id):
+    return IMPL.security_group_default_rule_get(context,
+                                                security_group_rule_default_id)
+
+
+def security_group_default_rule_destroy(context,
+                                        security_group_rule_default_id):
+    return IMPL.security_group_default_rule_destroy(
+        context, security_group_rule_default_id)
+
+
+def security_group_default_rule_create(context, values):
+    return IMPL.security_group_default_rule_create(context, values)
+
+
+def security_group_default_rule_list(context):
+    return IMPL.security_group_default_rule_list(context)
+
+
+###################
+
+
+def provider_fw_rule_create(context, rule):
+    """Add a firewall rule at the provider level (all hosts & instances)."""
+    return IMPL.provider_fw_rule_create(context, rule)
+
+
+def provider_fw_rule_get_all(context):
+    """Get all provider-level firewall rules."""
+    return IMPL.provider_fw_rule_get_all(context)
+
+
+def provider_fw_rule_destroy(context, rule_id):
+    """Delete a provider firewall rule from the database."""
+    return IMPL.provider_fw_rule_destroy(context, rule_id)
+
+
+###################
+
+
+def project_get_networks(context, project_id, associate=True):
+    """Return the network associated with the project.
+
+    If associate is true, it will attempt to associate a new
+    network if one is not found, otherwise it returns None.
+
+    """
+    return IMPL.project_get_networks(context, project_id, associate)
+
+
+###################
+
+
+def console_pool_create(context, values):
+    """Create console pool."""
+    return IMPL.console_pool_create(context, values)
+
+
+def console_pool_get_by_host_type(context, compute_host, proxy_host,
+                                  console_type):
+    """Fetch a console pool for a given proxy host, compute host, and type."""
+    return IMPL.console_pool_get_by_host_type(context,
+                                              compute_host,
+                                              proxy_host,
+                                              console_type)
+
+
+def console_pool_get_all_by_host_type(context, host, console_type):
+    """Fetch all pools for given proxy host and type."""
+    return IMPL.console_pool_get_all_by_host_type(context,
+                                                  host,
+                                                  console_type)
+
+
+def console_create(context, values):
+    """Create a console."""
+    return IMPL.console_create(context, values)
+
+
+def console_delete(context, console_id):
+    """Delete a console."""
+    return IMPL.console_delete(context, console_id)
+
+
+def console_get_by_pool_instance(context, pool_id, instance_uuid):
+    """Get console entry for a given instance and pool."""
+    return IMPL.console_get_by_pool_instance(context, pool_id, instance_uuid)
+
+
+def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
+    """Get consoles for a given instance."""
+    return IMPL.console_get_all_by_instance(context, instance_uuid,
+                                            columns_to_join)
+
+
+def console_get(context, console_id, instance_uuid=None):
+    """Get a specific console (possibly on a given instance)."""
+    return IMPL.console_get(context, console_id, instance_uuid)
+
+##################
+
+
+def pci_device_get_by_addr(context, node_id, dev_addr):
+    """Get PCI device by address."""
+    return IMPL.pci_device_get_by_addr(context, node_id, dev_addr)
+
+
+def pci_device_get_by_id(context, id):
+    """Get PCI device by id."""
+    return IMPL.pci_device_get_by_id(context, id)
+
+
+def pci_device_get_all_by_node(context, node_id):
+    """Get all PCI devices for one host."""
+    return IMPL.pci_device_get_all_by_node(context, node_id)
+
+
+def pci_device_get_all_by_instance_uuid(context, instance_uuid):
+    """Get PCI devices allocated to instance."""
+    return IMPL.pci_device_get_all_by_instance_uuid(context, instance_uuid)
+
+
+def pci_device_get_all_by_parent_addr(context, node_id, parent_addr):
+    """Get all PCI devices by parent address."""
+    return IMPL.pci_device_get_all_by_parent_addr(context, node_id,
+                                                  parent_addr)
+
+
+def pci_device_destroy(context, node_id, address):
+    """Delete a PCI device record."""
+    return IMPL.pci_device_destroy(context, node_id, address)
+
+
+def pci_device_update(context, node_id, address, value):
+    """Update a pci device."""
+    return IMPL.pci_device_update(context, node_id, address, value)
+
+
+###################
+
+def cell_create(context, values):
+    """Create a new child Cell entry."""
+    return IMPL.cell_create(context, values)
+
+
+def cell_update(context, cell_name, values):
+    """Update a child Cell entry."""
+    return IMPL.cell_update(context, cell_name, values)
+
+
+def cell_delete(context, cell_name):
+    """Delete a child Cell."""
+    return IMPL.cell_delete(context, cell_name)
+
+
+def cell_get(context, cell_name):
+    """Get a specific child Cell."""
+    return IMPL.cell_get(context, cell_name)
+
+
+def cell_get_all(context):
+    """Get all child Cells."""
+    return IMPL.cell_get_all(context)
+
+
+####################
+
+
+def instance_metadata_get(context, instance_uuid):
+    """Get all metadata for an instance."""
+    return IMPL.instance_metadata_get(context, instance_uuid)
+
+
+def instance_metadata_delete(context, instance_uuid, key):
+    """Delete the given metadata item."""
+    IMPL.instance_metadata_delete(context, instance_uuid, key)
+
+
+def instance_metadata_update(context, instance_uuid, metadata, delete):
+    """Update metadata if it exists, otherwise create it."""
+    return IMPL.instance_metadata_update(context, instance_uuid,
+                                         metadata, delete)
+
+
+####################
+
+
+def instance_system_metadata_get(context, instance_uuid):
+    """Get all system metadata for an instance."""
+    return IMPL.instance_system_metadata_get(context, instance_uuid)
+
+
+def instance_system_metadata_update(context, instance_uuid, metadata, delete):
+    """Update metadata if it exists, otherwise create it."""
+    IMPL.instance_system_metadata_update(
+            context, instance_uuid, metadata, delete)
+
+
+####################
+
+
+def agent_build_create(context, values):
+    """Create a new agent build entry."""
+    return IMPL.agent_build_create(context, values)
+
+
+def agent_build_get_by_triple(context, hypervisor, os, architecture):
+    """Get agent build by hypervisor/OS/architecture triple."""
+    return IMPL.agent_build_get_by_triple(context, hypervisor, os,
+            architecture)
+
+
+def agent_build_get_all(context, hypervisor=None):
+    """Get all agent builds."""
+    return IMPL.agent_build_get_all(context, hypervisor)
+
+
+def agent_build_destroy(context, agent_update_id):
+    """Destroy agent build entry."""
+    IMPL.agent_build_destroy(context, agent_update_id)
+
+
+def agent_build_update(context, agent_build_id, values):
+    """Update agent build entry."""
+    IMPL.agent_build_update(context, agent_build_id, values)
+
+
+####################
+
+
+def bw_usage_get(context, uuid, start_period, mac):
+    """Return bw usage for instance and mac in a given audit period."""
+    return IMPL.bw_usage_get(context, uuid, start_period, mac)
+
+
+def bw_usage_get_by_uuids(context, uuids, start_period):
+    """Return bw usages for instance(s) in a given audit period."""
+    return IMPL.bw_usage_get_by_uuids(context, uuids, start_period)
+
+
+def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
+                    last_ctr_in, last_ctr_out, last_refreshed=None,
+                    update_cells=True):
+    """Update cached bandwidth usage for an instance's network based on mac
+    address.  Creates new record if needed.
+    """
+    rv = IMPL.bw_usage_update(context, uuid, mac, start_period, bw_in,
+            bw_out, last_ctr_in, last_ctr_out, last_refreshed=last_refreshed)
+    if update_cells:
+        try:
+            cells_rpcapi.CellsAPI().bw_usage_update_at_top(context,
+                    uuid, mac, start_period, bw_in, bw_out,
+                    last_ctr_in, last_ctr_out, last_refreshed)
+        except Exception:
+            LOG.exception("Failed to notify cells of bw_usage update")
+    return rv
+
+
+###################
+
+
+def vol_get_usage_by_time(context, begin):
+    """Return volumes usage that have been updated after a specified time."""
+    return IMPL.vol_get_usage_by_time(context, begin)
+
+
+def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
+                     instance_id, project_id, user_id, availability_zone,
+                     update_totals=False):
+    """Update cached volume usage for a volume
+
+       Creates new record if needed.
+    """
+    return IMPL.vol_usage_update(context, id, rd_req, rd_bytes, wr_req,
+                                 wr_bytes, instance_id, project_id, user_id,
+                                 availability_zone,
+                                 update_totals=update_totals)
+
+
+###################
+
+
+def s3_image_get(context, image_id):
+    """Find local s3 image represented by the provided id."""
+    return IMPL.s3_image_get(context, image_id)
+
+
+def s3_image_get_by_uuid(context, image_uuid):
+    """Find local s3 image represented by the provided uuid."""
+    return IMPL.s3_image_get_by_uuid(context, image_uuid)
+
+
+def s3_image_create(context, image_uuid):
+    """Create local s3 image represented by provided uuid."""
+    return IMPL.s3_image_create(context, image_uuid)
+
+
+####################
+
+
+def instance_fault_create(context, values):
+    """Create a new Instance Fault."""
+    return IMPL.instance_fault_create(context, values)
+
+
+def instance_fault_get_by_instance_uuids(context, instance_uuids,
+                                         latest=False):
+    """Get all instance faults for the provided instance_uuids."""
+    return IMPL.instance_fault_get_by_instance_uuids(context, instance_uuids,
+                                                     latest=latest)
+
+
+####################
+
+
+def action_start(context, values):
+    """Start an action for an instance."""
+    return IMPL.action_start(context, values)
+
+
+def action_finish(context, values):
+    """Finish an action for an instance."""
+    return IMPL.action_finish(context, values)
+
+
+def actions_get(context, instance_uuid, limit=None, marker=None,
+                filters=None):
+    """Get all instance actions for the provided instance and filters."""
+    return IMPL.actions_get(context, instance_uuid, limit, marker, filters)
+
+
+def action_get_by_request_id(context, uuid, request_id):
+    """Get the action by request_id and given instance."""
+    return IMPL.action_get_by_request_id(context, uuid, request_id)
+
+
+def action_event_start(context, values):
+    """Start an event on an instance action."""
+    return IMPL.action_event_start(context, values)
+
+
+def action_event_finish(context, values):
+    """Finish an event on an instance action."""
+    return IMPL.action_event_finish(context, values)
+
+
+def action_events_get(context, action_id):
+    """Get the events by action id."""
+    return IMPL.action_events_get(context, action_id)
+
+
+def action_event_get_by_id(context, action_id, event_id):
+    return IMPL.action_event_get_by_id(context, action_id, event_id)
+
+
+####################
+
+
+def get_instance_uuid_by_ec2_id(context, ec2_id):
+    """Get uuid through ec2 id from instance_id_mappings table."""
+    return IMPL.get_instance_uuid_by_ec2_id(context, ec2_id)
+
+
+def ec2_instance_create(context, instance_uuid, id=None):
+    """Create the ec2 id to instance uuid mapping on demand."""
+    return IMPL.ec2_instance_create(context, instance_uuid, id)
+
+
+def ec2_instance_get_by_uuid(context, instance_uuid):
+    return IMPL.ec2_instance_get_by_uuid(context, instance_uuid)
+
+
+def ec2_instance_get_by_id(context, instance_id):
+    return IMPL.ec2_instance_get_by_id(context, instance_id)
+
+
+####################
+
+
+def task_log_end_task(context, task_name,
+                        period_beginning,
+                        period_ending,
+                        host,
+                        errors,
+                        message=None):
+    """Mark a task as complete for a given host/time period."""
+    return IMPL.task_log_end_task(context, task_name,
+                                  period_beginning,
+                                  period_ending,
+                                  host,
+                                  errors,
+                                  message)
+
+
+def task_log_begin_task(context, task_name,
+                        period_beginning,
+                        period_ending,
+                        host,
+                        task_items=None,
+                        message=None):
+    """Mark a task as started for a given host/time period."""
+    return IMPL.task_log_begin_task(context, task_name,
+                                    period_beginning,
+                                    period_ending,
+                                    host,
+                                    task_items,
+                                    message)
+
+
+def task_log_get_all(context, task_name, period_beginning,
+                 period_ending, host=None, state=None):
+    return IMPL.task_log_get_all(context, task_name, period_beginning,
+                 period_ending, host, state)
+
+
+def task_log_get(context, task_name, period_beginning,
+                 period_ending, host, state=None):
+    return IMPL.task_log_get(context, task_name, period_beginning,
+                 period_ending, host, state)
+
+
+####################
+
+
+def archive_deleted_rows(max_rows=None):
+    """Move up to max_rows rows from production tables to corresponding shadow
+    tables.
+
+    :returns: dict that maps table name to number of rows archived from that
+              table, for example:
+
+    ::
+
+        {
+            'instances': 5,
+            'block_device_mapping': 5,
+            'pci_devices': 2,
+        }
+
+    """
+    return IMPL.archive_deleted_rows(max_rows=max_rows)
+
+
+def pcidevice_online_data_migration(context, max_count):
+    return IMPL.pcidevice_online_data_migration(context, max_count)
+
+
+def service_uuids_online_data_migration(context, max_count):
+    return IMPL.service_uuids_online_data_migration(context, max_count)
+
+
+####################
+
+
+def instance_tag_add(context, instance_uuid, tag):
+    """Add tag to the instance."""
+    return IMPL.instance_tag_add(context, instance_uuid, tag)
+
+
+def instance_tag_set(context, instance_uuid, tags):
+    """Replace all of the instance tags with specified list of tags."""
+    return IMPL.instance_tag_set(context, instance_uuid, tags)
+
+
+def instance_tag_get_by_instance_uuid(context, instance_uuid):
+    """Get all tags for a given instance."""
+    return IMPL.instance_tag_get_by_instance_uuid(context, instance_uuid)
+
+
+def instance_tag_delete(context, instance_uuid, tag):
+    """Delete specified tag from the instance."""
+    return IMPL.instance_tag_delete(context, instance_uuid, tag)
+
+
+def instance_tag_delete_all(context, instance_uuid):
+    """Delete all tags from the instance."""
+    return IMPL.instance_tag_delete_all(context, instance_uuid)
+
+
+def instance_tag_exists(context, instance_uuid, tag):
+    """Check if specified tag exist on the instance."""
+    return IMPL.instance_tag_exists(context, instance_uuid, tag)
+
+
+####################
+
+
+def console_auth_token_create(context, values):
+    """Create a console authorization."""
+    return IMPL.console_auth_token_create(context, values)
+
+
+def console_auth_token_get_valid(context, token_hash, instance_uuid=None):
+    """Get a valid console authorization by token_hash and instance_uuid.
+
+    The console authorizations expire at the time specified by their
+    'expires' column. An expired console auth token will not be returned
+    to the caller - it is treated as if it does not exist.
+
+    If instance_uuid is specified, the token is validated against both
+    expiry and instance_uuid.
+
+    If instance_uuid is not specified, the token is validated against
+    expiry only.
+    """
+    return IMPL.console_auth_token_get_valid(context,
+                                             token_hash,
+                                             instance_uuid=instance_uuid)
+
+
+def console_auth_token_destroy_all_by_instance(context, instance_uuid):
+    """Delete all console authorizations belonging to the instance."""
+    return IMPL.console_auth_token_destroy_all_by_instance(context,
+                                                           instance_uuid)
+
+
+def console_auth_token_destroy_expired_by_host(context, host):
+    """Delete expired console authorizations belonging to the host.
+
+    The console authorizations expire at the time specified by their
+    'expires' column. This function is used to garbage collect expired
+    tokens associated with the given host.
+    """
+    return IMPL.console_auth_token_destroy_expired_by_host(context, host)

diff --git a/gosbs/db/base.py b/gosbs/db/base.py
new file mode 100644
index 0000000..9d61239
--- /dev/null
+++ b/gosbs/db/base.py
@@ -0,0 +1,29 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/db/base.py
+
+"""Base class for classes that need database access."""
+
+import gosbs.db.api
+
+
+class Base(object):
+    """DB driver is injected in the init method."""
+
+    def __init__(self):
+        super(Base, self).__init__()
+        self.db = gosbs.db.api

diff --git a/gosbs/db/constants.py b/gosbs/db/constants.py
new file mode 100644
index 0000000..a082fba
--- /dev/null
+++ b/gosbs/db/constants.py
@@ -0,0 +1,25 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+"""Useful db-related constants. In their own file so they can be imported
+cleanly."""
+
+# The maximum value a signed INT type may have
+MAX_INT = 0x7FFFFFFF
+
+# NOTE(dosaboy): This is supposed to represent the maximum value that we can
+# place into a SQL single precision float so that we can check whether values
+# are oversize. Postgres and MySQL both define this as their max whereas Sqlite
+# uses dynamic typing so this would not apply. Different dbs react in different
+# ways to oversize values e.g. postgres will raise an exception while mysql
+# will round off the value. Nevertheless we may still want to know prior to
+# insert whether the value is oversize or not.
+SQL_SP_FLOAT_MAX = 3.40282e+38

diff --git a/pym/tbc/__init__.py b/gosbs/db/sqlalchemy/__init__.py
similarity index 100%
copy from pym/tbc/__init__.py
copy to gosbs/db/sqlalchemy/__init__.py

diff --git a/gosbs/db/sqlalchemy/api.py b/gosbs/db/sqlalchemy/api.py
new file mode 100644
index 0000000..6ce8589
--- /dev/null
+++ b/gosbs/db/sqlalchemy/api.py
@@ -0,0 +1,5897 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/db/sqlalchemy/api.py
+
+"""Implementation of SQLAlchemy backend."""
+
+import collections
+import copy
+import datetime
+import functools
+import inspect
+import sys
+
+from oslo_db import api as oslo_db_api
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import enginefacade
+from oslo_db.sqlalchemy import update_match
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_log import log as logging
+from oslo_utils import importutils
+from oslo_utils import timeutils
+from oslo_utils import uuidutils
+import six
+from six.moves import range
+import sqlalchemy as sa
+from sqlalchemy import and_
+from sqlalchemy import Boolean
+from sqlalchemy.exc import NoSuchTableError
+from sqlalchemy.ext.compiler import compiles
+from sqlalchemy import Integer
+from sqlalchemy import MetaData
+from sqlalchemy import or_
+from sqlalchemy.orm import aliased
+from sqlalchemy.orm import contains_eager
+from sqlalchemy.orm import joinedload
+from sqlalchemy.orm import joinedload_all
+from sqlalchemy.orm import noload
+from sqlalchemy.orm import undefer
+from sqlalchemy.schema import Table
+from sqlalchemy import sql
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql.expression import cast
+from sqlalchemy.sql.expression import desc
+from sqlalchemy.sql.expression import UpdateBase
+from sqlalchemy.sql import false
+from sqlalchemy.sql import func
+from sqlalchemy.sql import null
+from sqlalchemy.sql import true
+
+import gosbs.conf
+import gosbs.context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs.i18n import _
+from gosbs import safe_utils
+
+profiler_sqlalchemy = importutils.try_import('osprofiler.sqlalchemy')
+
+CONF = gosbs.conf.CONF
+
+
+LOG = logging.getLogger(__name__)
+
+main_context_manager = enginefacade.transaction_context()
+api_context_manager = enginefacade.transaction_context()
+
+
+def _get_db_conf(conf_group, connection=None):
+    kw = dict(conf_group.items())
+    if connection is not None:
+        kw['connection'] = connection
+    return kw
+
+
+def _context_manager_from_context(context):
+    if context:
+        try:
+            return context.db_connection
+        except AttributeError:
+            pass
+
+
+def configure(conf):
+    main_context_manager.configure(**_get_db_conf(conf.database))
+    api_context_manager.configure(**_get_db_conf(conf.api_database))
+
+    if profiler_sqlalchemy and CONF.profiler.enabled \
+            and CONF.profiler.trace_sqlalchemy:
+
+        main_context_manager.append_on_engine_create(
+            lambda eng: profiler_sqlalchemy.add_tracing(sa, eng, "db"))
+        api_context_manager.append_on_engine_create(
+            lambda eng: profiler_sqlalchemy.add_tracing(sa, eng, "db"))
+
+
+def create_context_manager(connection=None):
+    """Create a database context manager object.
+
+    : param connection: The database connection string
+    """
+    ctxt_mgr = enginefacade.transaction_context()
+    ctxt_mgr.configure(**_get_db_conf(CONF.database, connection=connection))
+    return ctxt_mgr
+
+
+def get_context_manager(context):
+    """Get a database context manager object.
+
+    :param context: The request context that can contain a context manager
+    """
+    return _context_manager_from_context(context) or main_context_manager
+
+
+def get_engine(use_slave=False, context=None):
+    """Get a database engine object.
+
+    :param use_slave: Whether to use the slave connection
+    :param context: The request context that can contain a context manager
+    """
+    ctxt_mgr = get_context_manager(context)
+    if use_slave:
+        return ctxt_mgr.reader.get_engine()
+    return ctxt_mgr.writer.get_engine()
+
+
+def get_api_engine():
+    return api_context_manager.writer.get_engine()
+
+
+_SHADOW_TABLE_PREFIX = 'shadow_'
+_DEFAULT_QUOTA_NAME = 'default'
+PER_PROJECT_QUOTAS = ['fixed_ips', 'floating_ips', 'networks']
+
+
+def get_backend():
+    """The backend is this module itself."""
+    return sys.modules[__name__]
+
+
+def require_context(f):
+    """Decorator to require *any* user or admin context.
+
+    This does no authorization for user or project access matching, see
+    :py:func:`nova.context.authorize_project_context` and
+    :py:func:`nova.context.authorize_user_context`.
+
+    The first argument to the wrapped function must be the context.
+
+    """
+
+    @functools.wraps(f)
+    def wrapper(*args, **kwargs):
+        gosbs.context.require_context(args[0])
+        return f(*args, **kwargs)
+    return wrapper
+
+
+def require_instance_exists_using_uuid(f):
+    """Decorator to require the specified instance to exist.
+
+    Requires the wrapped function to use context and instance_uuid as
+    their first two arguments.
+    """
+    @functools.wraps(f)
+    def wrapper(context, instance_uuid, *args, **kwargs):
+        instance_get_by_uuid(context, instance_uuid)
+        return f(context, instance_uuid, *args, **kwargs)
+
+    return wrapper
+
+
+def select_db_reader_mode(f):
+    """Decorator to select synchronous or asynchronous reader mode.
+
+    The kwarg argument 'use_slave' defines reader mode. Asynchronous reader
+    will be used if 'use_slave' is True and synchronous reader otherwise.
+    If 'use_slave' is not specified default value 'False' will be used.
+
+    Wrapped function must have a context in the arguments.
+    """
+
+    @functools.wraps(f)
+    def wrapper(*args, **kwargs):
+        wrapped_func = safe_utils.get_wrapped_function(f)
+        keyed_args = inspect.getcallargs(wrapped_func, *args, **kwargs)
+
+        context = keyed_args['context']
+        use_slave = keyed_args.get('use_slave', False)
+
+        if use_slave:
+            reader_mode = get_context_manager(context).async_
+        else:
+            reader_mode = get_context_manager(context).reader
+
+        with reader_mode.using(context):
+            return f(*args, **kwargs)
+    return wrapper
+
+
+def pick_context_manager_writer(f):
+    """Decorator to use a writer db context manager.
+
+    The db context manager will be picked from the RequestContext.
+
+    Wrapped function must have a RequestContext in the arguments.
+    """
+    @functools.wraps(f)
+    def wrapped(context, *args, **kwargs):
+        ctxt_mgr = get_context_manager(context)
+        with ctxt_mgr.writer.using(context):
+            return f(context, *args, **kwargs)
+    return wrapped
+
+
+def pick_context_manager_reader(f):
+    """Decorator to use a reader db context manager.
+
+    The db context manager will be picked from the RequestContext.
+
+    Wrapped function must have a RequestContext in the arguments.
+    """
+    @functools.wraps(f)
+    def wrapped(context, *args, **kwargs):
+        ctxt_mgr = get_context_manager(context)
+        with ctxt_mgr.reader.using(context):
+            return f(context, *args, **kwargs)
+    return wrapped
+
+
+def pick_context_manager_reader_allow_async(f):
+    """Decorator to use a reader.allow_async db context manager.
+
+    The db context manager will be picked from the RequestContext.
+
+    Wrapped function must have a RequestContext in the arguments.
+    """
+    @functools.wraps(f)
+    def wrapped(context, *args, **kwargs):
+        ctxt_mgr = get_context_manager(context)
+        with ctxt_mgr.reader.allow_async.using(context):
+            return f(context, *args, **kwargs)
+    return wrapped
+
+
+def model_query(context, model,
+                args=None,
+                read_deleted=None,
+                project_only=False):
+    """Query helper that accounts for context's `read_deleted` field.
+
+    :param context:     NovaContext of the query.
+    :param model:       Model to query. Must be a subclass of ModelBase.
+    :param args:        Arguments to query. If None - model is used.
+    :param read_deleted: If not None, overrides context's read_deleted field.
+                        Permitted values are 'no', which does not return
+                        deleted values; 'only', which only returns deleted
+                        values; and 'yes', which does not filter deleted
+                        values.
+    :param project_only: If set and context is user-type, then restrict
+                        query to match the context's project_id. If set to
+                        'allow_none', restriction includes project_id = None.
+    """
+
+    if read_deleted is None:
+        read_deleted = context.read_deleted
+
+    query_kwargs = {}
+    if 'no' == read_deleted:
+        query_kwargs['deleted'] = False
+    elif 'only' == read_deleted:
+        query_kwargs['deleted'] = True
+    elif 'yes' == read_deleted:
+        pass
+    else:
+        raise ValueError(_("Unrecognized read_deleted value '%s'")
+                           % read_deleted)
+
+    query = sqlalchemyutils.model_query(
+        model, context.session, args, **query_kwargs)
+
+    # We can't use oslo.db model_query's project_id here, as it doesn't allow
+    # us to return both our projects and unowned projects.
+    if gosbs.context.is_user_context(context) and project_only:
+        if project_only == 'allow_none':
+            query = query.\
+                filter(or_(model.project_id == context.project_id,
+                           model.project_id == null()))
+        else:
+            query = query.filter_by(project_id=context.project_id)
+
+    return query
+
+
+def convert_objects_related_datetimes(values, *datetime_keys):
+    if not datetime_keys:
+        datetime_keys = ('created_at', 'deleted_at', 'updated_at')
+
+    for key in datetime_keys:
+        if key in values and values[key]:
+            if isinstance(values[key], six.string_types):
+                try:
+                    values[key] = timeutils.parse_strtime(values[key])
+                except ValueError:
+                    # Try alternate parsing since parse_strtime will fail
+                    # with say converting '2015-05-28T19:59:38+00:00'
+                    values[key] = timeutils.parse_isotime(values[key])
+            # NOTE(danms): Strip UTC timezones from datetimes, since they're
+            # stored that way in the database
+            values[key] = values[key].replace(tzinfo=None)
+    return values
+
+
+###################
+
+
+def constraint(**conditions):
+    return Constraint(conditions)
+
+
+def equal_any(*values):
+    return EqualityCondition(values)
+
+
+def not_equal(*values):
+    return InequalityCondition(values)
+
+
+class Constraint(object):
+
+    def __init__(self, conditions):
+        self.conditions = conditions
+
+    def apply(self, model, query):
+        for key, condition in self.conditions.items():
+            for clause in condition.clauses(getattr(model, key)):
+                query = query.filter(clause)
+        return query
+
+
+class EqualityCondition(object):
+
+    def __init__(self, values):
+        self.values = values
+
+    def clauses(self, field):
+        # method signature requires us to return an iterable even if for OR
+        # operator this will actually be a single clause
+        return [or_(*[field == value for value in self.values])]
+
+
+class InequalityCondition(object):
+
+    def __init__(self, values):
+        self.values = values
+
+    def clauses(self, field):
+        return [field != value for value in self.values]
+
+
+class DeleteFromSelect(UpdateBase):
+    def __init__(self, table, select, column):
+        self.table = table
+        self.select = select
+        self.column = column
+
+
+# NOTE(guochbo): some versions of MySQL doesn't yet support subquery with
+# 'LIMIT & IN/ALL/ANY/SOME' We need work around this with nesting select .
+@compiles(DeleteFromSelect)
+def visit_delete_from_select(element, compiler, **kw):
+    return "DELETE FROM %s WHERE %s in (SELECT T1.%s FROM (%s) as T1)" % (
+        compiler.process(element.table, asfrom=True),
+        compiler.process(element.column),
+        element.column.name,
+        compiler.process(element.select))
+
+###################
+
+
+@pick_context_manager_writer
+def service_destroy(context, service_id):
+    service = service_get(context, service_id)
+
+    model_query(context, models.Service).\
+                filter_by(id=service_id).\
+                soft_delete(synchronize_session=False)
+
+    # TODO(sbauza): Remove the service_id filter in a later release
+    # once we are sure that all compute nodes report the host field
+    model_query(context, models.ComputeNode).\
+                filter(or_(models.ComputeNode.service_id == service_id,
+                           models.ComputeNode.host == service['host'])).\
+                soft_delete(synchronize_session=False)
+
+
+@pick_context_manager_reader
+def service_get(context, service_id):
+    query = model_query(context, models.Service).filter_by(id=service_id)
+
+    result = query.first()
+    if not result:
+        raise exception.ServiceNotFound(service_id=service_id)
+
+    return result
+
+
+@pick_context_manager_reader
+def service_get_by_uuid(context, service_uuid):
+    query = model_query(context, models.Service).filter_by(uuid=service_uuid)
+
+    result = query.first()
+    if not result:
+        raise exception.ServiceNotFound(service_id=service_uuid)
+
+    return result
+
+
+@pick_context_manager_reader_allow_async
+def service_get_minimum_version(context, binaries):
+    min_versions = context.session.query(
+        models.Service.binary,
+        func.min(models.Service.version)).\
+                         filter(models.Service.binary.in_(binaries)).\
+                         filter(models.Service.deleted == 0).\
+                         filter(models.Service.forced_down == false()).\
+                         group_by(models.Service.binary)
+    return dict(min_versions)
+
+
+@pick_context_manager_reader
+def service_get_all(context, disabled=None):
+    query = model_query(context, models.Service)
+
+    if disabled is not None:
+        query = query.filter_by(disabled=disabled)
+
+    return query.all()
+
+
+@pick_context_manager_reader
+def service_get_all_by_topic(context, topic):
+    return model_query(context, models.Service, read_deleted="no").\
+                filter_by(disabled=False).\
+                filter_by(topic=topic).\
+                all()
+
+
+@pick_context_manager_reader
+def service_get_by_host_and_topic(context, host, topic):
+    return model_query(context, models.Service, read_deleted="no").\
+                filter_by(disabled=False).\
+                filter_by(host=host).\
+                filter_by(topic=topic).\
+                first()
+
+
+@pick_context_manager_reader
+def service_get_by_topic(context, topic):
+    return model_query(context, models.Service, read_deleted="no").\
+                filter_by(disabled=False).\
+                filter_by(topic=topic).\
+                first()
+
+
+@pick_context_manager_reader
+def service_get_all_by_binary(context, binary, include_disabled=False):
+    query = model_query(context, models.Service, read_deleted="no").\
+                    filter_by(binary=binary)
+    if not include_disabled:
+        query = query.filter_by(disabled=False)
+    return query.all()
+
+
+@pick_context_manager_reader
+def service_get_all_computes_by_hv_type(context, hv_type,
+                                        include_disabled=False):
+    query = model_query(context, models.Service, read_deleted="no").\
+                    filter_by(binary='nova-scheduler')
+    if not include_disabled:
+        query = query.filter_by(disabled=False)
+    query = query.join(models.ComputeNode,
+                       models.Service.host == models.ComputeNode.host).\
+                  filter(models.ComputeNode.hypervisor_type == hv_type).\
+                  distinct('host')
+    return query.all()
+
+
+@pick_context_manager_reader
+def service_get_by_host_and_binary(context, host, binary):
+    result = model_query(context, models.Service, read_deleted="no").\
+                    filter_by(host=host).\
+                    filter_by(binary=binary).\
+                    first()
+
+    if not result:
+        raise exception.HostBinaryNotFound(host=host, binary=binary)
+
+    return result
+
+
+@pick_context_manager_reader
+def service_get_all_by_host(context, host):
+    return model_query(context, models.Service, read_deleted="no").\
+                filter_by(host=host).\
+                all()
+
+
+@pick_context_manager_reader_allow_async
+def service_get_by_compute_host(context, host):
+    result = model_query(context, models.Service, read_deleted="no").\
+                filter_by(host=host).\
+                filter_by(binary='gosbs-scheduler').\
+                first()
+
+    if not result:
+        raise exception.ComputeHostNotFound(host=host)
+
+    return result
+
+
+@pick_context_manager_writer
+def service_create(context, values):
+    service_ref = models.Service()
+    service_ref.update(values)
+    try:
+        service_ref.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'binary' in e.columns:
+            raise exception.ServiceBinaryExists(host=values.get('host'),
+                        binary=values.get('binary'))
+        raise exception.ServiceTopicExists(host=values.get('host'),
+                        topic=values.get('topic'))
+    return service_ref
+
+
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def service_update(context, service_id, values):
+    service_ref = service_get(context, service_id)
+    # Only servicegroup.drivers.db.DbDriver._report_state() updates
+    # 'report_count', so if that value changes then store the timestamp
+    # as the last time we got a state report.
+    if 'report_count' in values:
+        if values['report_count'] > service_ref.report_count:
+            service_ref.last_seen_up = timeutils.utcnow()
+    service_ref.update(values)
+
+    return service_ref
+
+
+###################
+
+
+def _compute_node_select(context, filters=None, limit=None, marker=None):
+    if filters is None:
+        filters = {}
+
+    cn_tbl = sa.alias(models.ComputeNode.__table__, name='cn')
+    select = sa.select([cn_tbl])
+
+    if context.read_deleted == "no":
+        select = select.where(cn_tbl.c.deleted == 0)
+    if "compute_id" in filters:
+        select = select.where(cn_tbl.c.id == filters["compute_id"])
+    if "service_id" in filters:
+        select = select.where(cn_tbl.c.service_id == filters["service_id"])
+    if "host" in filters:
+        select = select.where(cn_tbl.c.host == filters["host"])
+    if "hypervisor_hostname" in filters:
+        hyp_hostname = filters["hypervisor_hostname"]
+        select = select.where(cn_tbl.c.hypervisor_hostname == hyp_hostname)
+    if "mapped" in filters:
+        select = select.where(cn_tbl.c.mapped < filters['mapped'])
+    if marker is not None:
+        try:
+            compute_node_get(context, marker)
+        except exception.ComputeHostNotFound:
+            raise exception.MarkerNotFound(marker=marker)
+        select = select.where(cn_tbl.c.id > marker)
+    if limit is not None:
+        select = select.limit(limit)
+    # Explicitly order by id, so we're not dependent on the native sort
+    # order of the underlying DB.
+    select = select.order_by(asc("id"))
+    return select
+
+
+def _compute_node_fetchall(context, filters=None, limit=None, marker=None):
+    select = _compute_node_select(context, filters, limit=limit, marker=marker)
+    engine = get_engine(context=context)
+    conn = engine.connect()
+
+    results = conn.execute(select).fetchall()
+
+    # Callers expect dict-like objects, not SQLAlchemy RowProxy objects...
+    results = [dict(r) for r in results]
+    conn.close()
+    return results
+
+
+@pick_context_manager_reader
+def compute_node_get(context, compute_id):
+    results = _compute_node_fetchall(context, {"compute_id": compute_id})
+    if not results:
+        raise exception.ComputeHostNotFound(host=compute_id)
+    return results[0]
+
+
+@pick_context_manager_reader
+def compute_node_get_model(context, compute_id):
+    # TODO(edleafe): remove once the compute node resource provider migration
+    # is complete, and this distinction is no longer necessary.
+    result = model_query(context, models.ComputeNode).\
+            filter_by(id=compute_id).\
+            first()
+    if not result:
+        raise exception.ComputeHostNotFound(host=compute_id)
+    return result
+
+
+@pick_context_manager_reader
+def compute_nodes_get_by_service_id(context, service_id):
+    results = _compute_node_fetchall(context, {"service_id": service_id})
+    if not results:
+        raise exception.ServiceNotFound(service_id=service_id)
+    return results
+
+
+@pick_context_manager_reader
+def compute_node_get_by_host_and_nodename(context, host, nodename):
+    results = _compute_node_fetchall(context,
+            {"host": host, "hypervisor_hostname": nodename})
+    if not results:
+        raise exception.ComputeHostNotFound(host=host)
+    return results[0]
+
+
+@pick_context_manager_reader_allow_async
+def compute_node_get_all_by_host(context, host):
+    results = _compute_node_fetchall(context, {"host": host})
+    if not results:
+        raise exception.ComputeHostNotFound(host=host)
+    return results
+
+
+@pick_context_manager_reader
+def compute_node_get_all(context):
+    return _compute_node_fetchall(context)
+
+
+@pick_context_manager_reader
+def compute_node_get_all_mapped_less_than(context, mapped_less_than):
+    return _compute_node_fetchall(context,
+                                  {'mapped': mapped_less_than})
+
+
+@pick_context_manager_reader
+def compute_node_get_all_by_pagination(context, limit=None, marker=None):
+    return _compute_node_fetchall(context, limit=limit, marker=marker)
+
+
+@pick_context_manager_reader
+def compute_node_search_by_hypervisor(context, hypervisor_match):
+    field = models.ComputeNode.hypervisor_hostname
+    return model_query(context, models.ComputeNode).\
+            filter(field.like('%%%s%%' % hypervisor_match)).\
+            all()
+
+
+@pick_context_manager_writer
+def compute_node_create(context, values):
+    """Creates a new ComputeNode and populates the capacity fields
+    with the most recent data.
+    """
+    convert_objects_related_datetimes(values)
+
+    compute_node_ref = models.ComputeNode()
+    compute_node_ref.update(values)
+    compute_node_ref.save(context.session)
+
+    return compute_node_ref
+
+
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def compute_node_update(context, compute_id, values):
+    """Updates the ComputeNode record with the most recent data."""
+
+    compute_ref = compute_node_get_model(context, compute_id)
+    # Always update this, even if there's going to be no other
+    # changes in data.  This ensures that we invalidate the
+    # scheduler cache of compute node data in case of races.
+    values['updated_at'] = timeutils.utcnow()
+    convert_objects_related_datetimes(values)
+    compute_ref.update(values)
+
+    return compute_ref
+
+
+@pick_context_manager_writer
+def compute_node_delete(context, compute_id):
+    """Delete a ComputeNode record."""
+    result = model_query(context, models.ComputeNode).\
+             filter_by(id=compute_id).\
+             soft_delete(synchronize_session=False)
+
+    if not result:
+        raise exception.ComputeHostNotFound(host=compute_id)
+
+
+@pick_context_manager_reader
+def compute_node_statistics(context):
+    """Compute statistics over all compute nodes."""
+    engine = get_engine(context=context)
+    services_tbl = models.Service.__table__
+
+    inner_sel = sa.alias(_compute_node_select(context), name='inner_sel')
+
+    # TODO(sbauza): Remove the service_id filter in a later release
+    # once we are sure that all compute nodes report the host field
+    j = sa.join(
+        inner_sel, services_tbl,
+        sql.and_(
+            sql.or_(
+                inner_sel.c.host == services_tbl.c.host,
+                inner_sel.c.service_id == services_tbl.c.id
+            ),
+            services_tbl.c.disabled == false(),
+            services_tbl.c.binary == 'gobs-scheduler',
+            services_tbl.c.deleted == 0
+        )
+    )
+
+    # NOTE(jaypipes): This COALESCE() stuff is temporary while the data
+    # migration to the new resource providers inventories and allocations
+    # tables is completed.
+    agg_cols = [
+        func.count().label('count'),
+        sql.func.sum(
+            inner_sel.c.vcpus
+        ).label('vcpus'),
+        sql.func.sum(
+            inner_sel.c.memory_mb
+        ).label('memory_mb'),
+        sql.func.sum(
+            inner_sel.c.local_gb
+        ).label('local_gb'),
+        sql.func.sum(
+            inner_sel.c.vcpus_used
+        ).label('vcpus_used'),
+        sql.func.sum(
+            inner_sel.c.memory_mb_used
+        ).label('memory_mb_used'),
+        sql.func.sum(
+            inner_sel.c.local_gb_used
+        ).label('local_gb_used'),
+        sql.func.sum(
+            inner_sel.c.free_ram_mb
+        ).label('free_ram_mb'),
+        sql.func.sum(
+            inner_sel.c.free_disk_gb
+        ).label('free_disk_gb'),
+        sql.func.sum(
+            inner_sel.c.current_workload
+        ).label('current_workload'),
+        sql.func.sum(
+            inner_sel.c.running_vms
+        ).label('running_vms'),
+        sql.func.sum(
+            inner_sel.c.disk_available_least
+        ).label('disk_available_least'),
+    ]
+    select = sql.select(agg_cols).select_from(j)
+    conn = engine.connect()
+
+    results = conn.execute(select).fetchone()
+
+    # Build a dict of the info--making no assumptions about result
+    fields = ('count', 'vcpus', 'memory_mb', 'local_gb', 'vcpus_used',
+              'memory_mb_used', 'local_gb_used', 'free_ram_mb', 'free_disk_gb',
+              'current_workload', 'running_vms', 'disk_available_least')
+    results = {field: int(results[idx] or 0)
+               for idx, field in enumerate(fields)}
+    conn.close()
+    return results
+
+
+###################
+
+
+@pick_context_manager_writer
+def certificate_create(context, values):
+    certificate_ref = models.Certificate()
+    for (key, value) in values.items():
+        certificate_ref[key] = value
+    certificate_ref.save(context.session)
+    return certificate_ref
+
+
+@pick_context_manager_reader
+def certificate_get_all_by_project(context, project_id):
+    return model_query(context, models.Certificate, read_deleted="no").\
+                   filter_by(project_id=project_id).\
+                   all()
+
+
+@pick_context_manager_reader
+def certificate_get_all_by_user(context, user_id):
+    return model_query(context, models.Certificate, read_deleted="no").\
+                   filter_by(user_id=user_id).\
+                   all()
+
+
+@pick_context_manager_reader
+def certificate_get_all_by_user_and_project(context, user_id, project_id):
+    return model_query(context, models.Certificate, read_deleted="no").\
+                   filter_by(user_id=user_id).\
+                   filter_by(project_id=project_id).\
+                   all()
+
+
+###################
+
+
+@require_context
+@pick_context_manager_reader
+def floating_ip_get(context, id):
+    try:
+        result = model_query(context, models.FloatingIp, project_only=True).\
+                     filter_by(id=id).\
+                     options(joinedload_all('fixed_ip.instance')).\
+                     first()
+
+        if not result:
+            raise exception.FloatingIpNotFound(id=id)
+    except db_exc.DBError:
+        LOG.warning("Invalid floating IP ID %s in request", id)
+        raise exception.InvalidID(id=id)
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def floating_ip_get_pools(context):
+    pools = []
+    for result in model_query(context, models.FloatingIp,
+                              (models.FloatingIp.pool,)).distinct():
+        pools.append({'name': result[0]})
+    return pools
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def floating_ip_allocate_address(context, project_id, pool,
+                                 auto_assigned=False):
+    nova.context.authorize_project_context(context, project_id)
+    floating_ip_ref = model_query(context, models.FloatingIp,
+                                  read_deleted="no").\
+        filter_by(fixed_ip_id=None).\
+        filter_by(project_id=None).\
+        filter_by(pool=pool).\
+        first()
+
+    if not floating_ip_ref:
+        raise exception.NoMoreFloatingIps()
+
+    params = {'project_id': project_id, 'auto_assigned': auto_assigned}
+
+    rows_update = model_query(context, models.FloatingIp, read_deleted="no").\
+        filter_by(id=floating_ip_ref['id']).\
+        filter_by(fixed_ip_id=None).\
+        filter_by(project_id=None).\
+        filter_by(pool=pool).\
+        update(params, synchronize_session='evaluate')
+
+    if not rows_update:
+        LOG.debug('The row was updated in a concurrent transaction, '
+                  'we will fetch another one')
+        raise db_exc.RetryRequest(exception.FloatingIpAllocateFailed())
+
+    return floating_ip_ref['address']
+
+
+@require_context
+@pick_context_manager_writer
+def floating_ip_bulk_create(context, ips, want_result=True):
+    try:
+        tab = models.FloatingIp().__table__
+        context.session.execute(tab.insert(), ips)
+    except db_exc.DBDuplicateEntry as e:
+        raise exception.FloatingIpExists(address=e.value)
+
+    if want_result:
+        return model_query(context, models.FloatingIp).filter(
+            models.FloatingIp.address.in_(
+                [ip['address'] for ip in ips])).all()
+
+
+def _ip_range_splitter(ips, block_size=256):
+    """Yields blocks of IPs no more than block_size elements long."""
+    out = []
+    count = 0
+    for ip in ips:
+        out.append(ip['address'])
+        count += 1
+
+        if count > block_size - 1:
+            yield out
+            out = []
+            count = 0
+
+    if out:
+        yield out
+
+
+@require_context
+@pick_context_manager_writer
+def floating_ip_bulk_destroy(context, ips):
+    project_id_to_quota_count = collections.defaultdict(int)
+    for ip_block in _ip_range_splitter(ips):
+        # Find any floating IPs that were not auto_assigned and
+        # thus need quota released.
+        query = model_query(context, models.FloatingIp).\
+            filter(models.FloatingIp.address.in_(ip_block)).\
+            filter_by(auto_assigned=False)
+        for row in query.all():
+            # The count is negative since we release quota by
+            # reserving negative quota.
+            project_id_to_quota_count[row['project_id']] -= 1
+        # Delete the floating IPs.
+        model_query(context, models.FloatingIp).\
+            filter(models.FloatingIp.address.in_(ip_block)).\
+            soft_delete(synchronize_session='fetch')
+
+
+@require_context
+@pick_context_manager_writer
+def floating_ip_create(context, values):
+    floating_ip_ref = models.FloatingIp()
+    floating_ip_ref.update(values)
+    try:
+        floating_ip_ref.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.FloatingIpExists(address=values['address'])
+    return floating_ip_ref
+
+
+def _floating_ip_count_by_project(context, project_id):
+    nova.context.authorize_project_context(context, project_id)
+    # TODO(tr3buchet): why leave auto_assigned floating IPs out?
+    return model_query(context, models.FloatingIp, read_deleted="no").\
+                   filter_by(project_id=project_id).\
+                   filter_by(auto_assigned=False).\
+                   count()
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def floating_ip_fixed_ip_associate(context, floating_address,
+                                   fixed_address, host):
+    fixed_ip_ref = model_query(context, models.FixedIp).\
+                     filter_by(address=fixed_address).\
+                     options(joinedload('network')).\
+                     first()
+    if not fixed_ip_ref:
+        raise exception.FixedIpNotFoundForAddress(address=fixed_address)
+    rows = model_query(context, models.FloatingIp).\
+                filter_by(address=floating_address).\
+                filter(models.FloatingIp.project_id ==
+                       context.project_id).\
+                filter(or_(models.FloatingIp.fixed_ip_id ==
+                           fixed_ip_ref['id'],
+                           models.FloatingIp.fixed_ip_id.is_(None))).\
+                update({'fixed_ip_id': fixed_ip_ref['id'], 'host': host})
+
+    if not rows:
+        raise exception.FloatingIpAssociateFailed(address=floating_address)
+
+    return fixed_ip_ref
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def floating_ip_deallocate(context, address):
+    return model_query(context, models.FloatingIp).\
+        filter_by(address=address).\
+        filter(and_(models.FloatingIp.project_id != null()),
+                    models.FloatingIp.fixed_ip_id == null()).\
+        update({'project_id': None,
+                'host': None,
+                'auto_assigned': False},
+               synchronize_session=False)
+
+
+@require_context
+@pick_context_manager_writer
+def floating_ip_destroy(context, address):
+    model_query(context, models.FloatingIp).\
+            filter_by(address=address).\
+            delete()
+
+
+@require_context
+@pick_context_manager_writer
+def floating_ip_disassociate(context, address):
+    floating_ip_ref = model_query(context,
+                                  models.FloatingIp).\
+                        filter_by(address=address).\
+                        first()
+    if not floating_ip_ref:
+        raise exception.FloatingIpNotFoundForAddress(address=address)
+
+    fixed_ip_ref = model_query(context, models.FixedIp).\
+        filter_by(id=floating_ip_ref['fixed_ip_id']).\
+        options(joinedload('network')).\
+        first()
+    floating_ip_ref.fixed_ip_id = None
+    floating_ip_ref.host = None
+
+    return fixed_ip_ref
+
+
+def _floating_ip_get_all(context):
+    return model_query(context, models.FloatingIp, read_deleted="no")
+
+
+@pick_context_manager_reader
+def floating_ip_get_all(context):
+    floating_ip_refs = _floating_ip_get_all(context).\
+                       options(joinedload('fixed_ip')).\
+                       all()
+    if not floating_ip_refs:
+        raise exception.NoFloatingIpsDefined()
+    return floating_ip_refs
+
+
+@pick_context_manager_reader
+def floating_ip_get_all_by_host(context, host):
+    floating_ip_refs = _floating_ip_get_all(context).\
+                       filter_by(host=host).\
+                       options(joinedload('fixed_ip')).\
+                       all()
+    if not floating_ip_refs:
+        raise exception.FloatingIpNotFoundForHost(host=host)
+    return floating_ip_refs
+
+
+@require_context
+@pick_context_manager_reader
+def floating_ip_get_all_by_project(context, project_id):
+    nova.context.authorize_project_context(context, project_id)
+    # TODO(tr3buchet): why do we not want auto_assigned floating IPs here?
+    return _floating_ip_get_all(context).\
+                         filter_by(project_id=project_id).\
+                         filter_by(auto_assigned=False).\
+                         options(joinedload_all('fixed_ip.instance')).\
+                         all()
+
+
+@require_context
+@pick_context_manager_reader
+def floating_ip_get_by_address(context, address):
+    return _floating_ip_get_by_address(context, address)
+
+
+def _floating_ip_get_by_address(context, address):
+
+    # if address string is empty explicitly set it to None
+    if not address:
+        address = None
+    try:
+        result = model_query(context, models.FloatingIp).\
+                    filter_by(address=address).\
+                    options(joinedload_all('fixed_ip.instance')).\
+                    first()
+
+        if not result:
+            raise exception.FloatingIpNotFoundForAddress(address=address)
+    except db_exc.DBError:
+        msg = _("Invalid floating IP %s in request") % address
+        LOG.warning(msg)
+        raise exception.InvalidIpAddressError(msg)
+
+    # If the floating IP has a project ID set, check to make sure
+    # the non-admin user has access.
+    if result.project_id and nova.context.is_user_context(context):
+        nova.context.authorize_project_context(context, result.project_id)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def floating_ip_get_by_fixed_address(context, fixed_address):
+    return model_query(context, models.FloatingIp).\
+                       outerjoin(models.FixedIp,
+                                 models.FixedIp.id ==
+                                 models.FloatingIp.fixed_ip_id).\
+                       filter(models.FixedIp.address == fixed_address).\
+                       all()
+
+
+@require_context
+@pick_context_manager_reader
+def floating_ip_get_by_fixed_ip_id(context, fixed_ip_id):
+    return model_query(context, models.FloatingIp).\
+                filter_by(fixed_ip_id=fixed_ip_id).\
+                all()
+
+
+@require_context
+@pick_context_manager_writer
+def floating_ip_update(context, address, values):
+    float_ip_ref = _floating_ip_get_by_address(context, address)
+    float_ip_ref.update(values)
+    try:
+        float_ip_ref.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.FloatingIpExists(address=values['address'])
+    return float_ip_ref
+
+
+###################
+
+
+@require_context
+@pick_context_manager_reader
+def dnsdomain_get(context, fqdomain):
+    return model_query(context, models.DNSDomain, read_deleted="no").\
+               filter_by(domain=fqdomain).\
+               with_lockmode('update').\
+               first()
+
+
+def _dnsdomain_get_or_create(context, fqdomain):
+    domain_ref = dnsdomain_get(context, fqdomain)
+    if not domain_ref:
+        dns_ref = models.DNSDomain()
+        dns_ref.update({'domain': fqdomain,
+                        'availability_zone': None,
+                        'project_id': None})
+        return dns_ref
+
+    return domain_ref
+
+
+@pick_context_manager_writer
+def dnsdomain_register_for_zone(context, fqdomain, zone):
+    domain_ref = _dnsdomain_get_or_create(context, fqdomain)
+    domain_ref.scope = 'private'
+    domain_ref.availability_zone = zone
+    context.session.add(domain_ref)
+
+
+@pick_context_manager_writer
+def dnsdomain_register_for_project(context, fqdomain, project):
+    domain_ref = _dnsdomain_get_or_create(context, fqdomain)
+    domain_ref.scope = 'public'
+    domain_ref.project_id = project
+    context.session.add(domain_ref)
+
+
+@pick_context_manager_writer
+def dnsdomain_unregister(context, fqdomain):
+    model_query(context, models.DNSDomain).\
+                 filter_by(domain=fqdomain).\
+                 delete()
+
+
+@pick_context_manager_reader
+def dnsdomain_get_all(context):
+    return model_query(context, models.DNSDomain, read_deleted="no").all()
+
+
+###################
+
+
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def fixed_ip_associate(context, address, instance_uuid, network_id=None,
+                       reserved=False, virtual_interface_id=None):
+    """Keyword arguments:
+    reserved -- should be a boolean value(True or False), exact value will be
+    used to filter on the fixed IP address
+    """
+    if not uuidutils.is_uuid_like(instance_uuid):
+        raise exception.InvalidUUID(uuid=instance_uuid)
+
+    network_or_none = or_(models.FixedIp.network_id == network_id,
+                          models.FixedIp.network_id == null())
+    fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
+                           filter(network_or_none).\
+                           filter_by(reserved=reserved).\
+                           filter_by(address=address).\
+                           first()
+
+    if fixed_ip_ref is None:
+        raise exception.FixedIpNotFoundForNetwork(address=address,
+                                        network_uuid=network_id)
+    if fixed_ip_ref.instance_uuid:
+        raise exception.FixedIpAlreadyInUse(address=address,
+                                            instance_uuid=instance_uuid)
+
+    params = {'instance_uuid': instance_uuid,
+              'allocated': virtual_interface_id is not None}
+    if not fixed_ip_ref.network_id:
+        params['network_id'] = network_id
+    if virtual_interface_id:
+        params['virtual_interface_id'] = virtual_interface_id
+
+    rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
+                            filter_by(id=fixed_ip_ref.id).\
+                            filter(network_or_none).\
+                            filter_by(reserved=reserved).\
+                            filter_by(address=address).\
+                            update(params, synchronize_session='evaluate')
+
+    if not rows_updated:
+        LOG.debug('The row was updated in a concurrent transaction, '
+                  'we will fetch another row')
+        raise db_exc.RetryRequest(
+            exception.FixedIpAssociateFailed(net=network_id))
+
+    return fixed_ip_ref
+
+
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def fixed_ip_associate_pool(context, network_id, instance_uuid=None,
+                            host=None, virtual_interface_id=None):
+    """allocate a fixed ip out of a fixed ip network pool.
+
+    This allocates an unallocated fixed ip out of a specified
+    network. We sort by updated_at to hand out the oldest address in
+    the list.
+
+    """
+    if instance_uuid and not uuidutils.is_uuid_like(instance_uuid):
+        raise exception.InvalidUUID(uuid=instance_uuid)
+
+    network_or_none = or_(models.FixedIp.network_id == network_id,
+                          models.FixedIp.network_id == null())
+    fixed_ip_ref = model_query(context, models.FixedIp, read_deleted="no").\
+                           filter(network_or_none).\
+                           filter_by(reserved=False).\
+                           filter_by(instance_uuid=None).\
+                           filter_by(host=None).\
+                           filter_by(leased=False).\
+                           order_by(asc(models.FixedIp.updated_at)).\
+                           first()
+
+    if not fixed_ip_ref:
+        raise exception.NoMoreFixedIps(net=network_id)
+
+    params = {'allocated': virtual_interface_id is not None}
+    if fixed_ip_ref['network_id'] is None:
+        params['network_id'] = network_id
+    if instance_uuid:
+        params['instance_uuid'] = instance_uuid
+    if host:
+        params['host'] = host
+    if virtual_interface_id:
+        params['virtual_interface_id'] = virtual_interface_id
+
+    rows_updated = model_query(context, models.FixedIp, read_deleted="no").\
+        filter_by(id=fixed_ip_ref['id']).\
+        filter_by(network_id=fixed_ip_ref['network_id']).\
+        filter_by(reserved=False).\
+        filter_by(instance_uuid=None).\
+        filter_by(host=None).\
+        filter_by(leased=False).\
+        filter_by(address=fixed_ip_ref['address']).\
+        update(params, synchronize_session='evaluate')
+
+    if not rows_updated:
+        LOG.debug('The row was updated in a concurrent transaction, '
+                  'we will fetch another row')
+        raise db_exc.RetryRequest(
+            exception.FixedIpAssociateFailed(net=network_id))
+
+    return fixed_ip_ref
+
+
+@require_context
+@pick_context_manager_writer
+def fixed_ip_create(context, values):
+    fixed_ip_ref = models.FixedIp()
+    fixed_ip_ref.update(values)
+    try:
+        fixed_ip_ref.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.FixedIpExists(address=values['address'])
+    return fixed_ip_ref
+
+
+@require_context
+@pick_context_manager_writer
+def fixed_ip_bulk_create(context, ips):
+    try:
+        tab = models.FixedIp.__table__
+        context.session.execute(tab.insert(), ips)
+    except db_exc.DBDuplicateEntry as e:
+        raise exception.FixedIpExists(address=e.value)
+
+
+@require_context
+@pick_context_manager_writer
+def fixed_ip_disassociate(context, address):
+    _fixed_ip_get_by_address(context, address).update(
+        {'instance_uuid': None,
+         'virtual_interface_id': None})
+
+
+@pick_context_manager_writer
+def fixed_ip_disassociate_all_by_timeout(context, host, time):
+    # NOTE(vish): only update fixed ips that "belong" to this
+    #             host; i.e. the network host or the instance
+    #             host matches. Two queries necessary because
+    #             join with update doesn't work.
+    host_filter = or_(and_(models.Instance.host == host,
+                           models.Network.multi_host == true()),
+                      models.Network.host == host)
+    result = model_query(context, models.FixedIp, (models.FixedIp.id,),
+                         read_deleted="no").\
+            filter(models.FixedIp.allocated == false()).\
+            filter(models.FixedIp.updated_at < time).\
+            join((models.Network,
+                  models.Network.id == models.FixedIp.network_id)).\
+            join((models.Instance,
+                  models.Instance.uuid == models.FixedIp.instance_uuid)).\
+            filter(host_filter).\
+            all()
+    fixed_ip_ids = [fip[0] for fip in result]
+    if not fixed_ip_ids:
+        return 0
+    result = model_query(context, models.FixedIp).\
+                         filter(models.FixedIp.id.in_(fixed_ip_ids)).\
+                         update({'instance_uuid': None,
+                                 'leased': False,
+                                 'updated_at': timeutils.utcnow()},
+                                synchronize_session='fetch')
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def fixed_ip_get(context, id, get_network=False):
+    query = model_query(context, models.FixedIp).filter_by(id=id)
+    if get_network:
+        query = query.options(joinedload('network'))
+    result = query.first()
+    if not result:
+        raise exception.FixedIpNotFound(id=id)
+
+    # FIXME(sirp): shouldn't we just use project_only here to restrict the
+    # results?
+    if (nova.context.is_user_context(context) and
+            result['instance_uuid'] is not None):
+        instance = instance_get_by_uuid(context.elevated(read_deleted='yes'),
+                                        result['instance_uuid'])
+        nova.context.authorize_project_context(context, instance.project_id)
+
+    return result
+
+
+@pick_context_manager_reader
+def fixed_ip_get_all(context):
+    result = model_query(context, models.FixedIp, read_deleted="yes").all()
+    if not result:
+        raise exception.NoFixedIpsDefined()
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def fixed_ip_get_by_address(context, address, columns_to_join=None):
+    return _fixed_ip_get_by_address(context, address,
+                                    columns_to_join=columns_to_join)
+
+
+def _fixed_ip_get_by_address(context, address, columns_to_join=None):
+    if columns_to_join is None:
+        columns_to_join = []
+
+    try:
+        result = model_query(context, models.FixedIp)
+        for column in columns_to_join:
+            result = result.options(joinedload_all(column))
+        result = result.filter_by(address=address).first()
+        if not result:
+            raise exception.FixedIpNotFoundForAddress(address=address)
+    except db_exc.DBError:
+        msg = _("Invalid fixed IP Address %s in request") % address
+        LOG.warning(msg)
+        raise exception.FixedIpInvalid(msg)
+
+    # NOTE(sirp): shouldn't we just use project_only here to restrict the
+    # results?
+    if (nova.context.is_user_context(context) and
+            result['instance_uuid'] is not None):
+        instance = _instance_get_by_uuid(
+            context.elevated(read_deleted='yes'),
+            result['instance_uuid'])
+        nova.context.authorize_project_context(context,
+                                               instance.project_id)
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def fixed_ip_get_by_floating_address(context, floating_address):
+    return model_query(context, models.FixedIp).\
+                       join(models.FloatingIp,
+                            models.FloatingIp.fixed_ip_id ==
+                            models.FixedIp.id).\
+                       filter(models.FloatingIp.address == floating_address).\
+                       first()
+    # NOTE(tr3buchet) please don't invent an exception here, None is fine
+
+
+@require_context
+@pick_context_manager_reader
+def fixed_ip_get_by_instance(context, instance_uuid):
+    if not uuidutils.is_uuid_like(instance_uuid):
+        raise exception.InvalidUUID(uuid=instance_uuid)
+
+    vif_and = and_(models.VirtualInterface.id ==
+                   models.FixedIp.virtual_interface_id,
+                   models.VirtualInterface.deleted == 0)
+    result = model_query(context, models.FixedIp, read_deleted="no").\
+                 filter_by(instance_uuid=instance_uuid).\
+                 outerjoin(models.VirtualInterface, vif_and).\
+                 options(contains_eager("virtual_interface")).\
+                 options(joinedload('network')).\
+                 options(joinedload('floating_ips')).\
+                 order_by(asc(models.VirtualInterface.created_at),
+                          asc(models.VirtualInterface.id)).\
+                 all()
+
+    if not result:
+        raise exception.FixedIpNotFoundForInstance(instance_uuid=instance_uuid)
+
+    return result
+
+
+@pick_context_manager_reader
+def fixed_ip_get_by_host(context, host):
+    instance_uuids = _instance_get_all_uuids_by_host(context, host)
+    if not instance_uuids:
+        return []
+
+    return model_query(context, models.FixedIp).\
+             filter(models.FixedIp.instance_uuid.in_(instance_uuids)).\
+             all()
+
+
+@require_context
+@pick_context_manager_reader
+def fixed_ip_get_by_network_host(context, network_id, host):
+    result = model_query(context, models.FixedIp, read_deleted="no").\
+                 filter_by(network_id=network_id).\
+                 filter_by(host=host).\
+                 first()
+
+    if not result:
+        raise exception.FixedIpNotFoundForNetworkHost(network_id=network_id,
+                                                      host=host)
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def fixed_ips_by_virtual_interface(context, vif_id):
+    result = model_query(context, models.FixedIp, read_deleted="no").\
+                 filter_by(virtual_interface_id=vif_id).\
+                 options(joinedload('network')).\
+                 options(joinedload('floating_ips')).\
+                 all()
+
+    return result
+
+
+@require_context
+@pick_context_manager_writer
+def fixed_ip_update(context, address, values):
+    _fixed_ip_get_by_address(context, address).update(values)
+
+
+def _fixed_ip_count_by_project(context, project_id):
+    nova.context.authorize_project_context(context, project_id)
+    return model_query(context, models.FixedIp, (models.FixedIp.id,),
+                       read_deleted="no").\
+                join((models.Instance,
+                      models.Instance.uuid == models.FixedIp.instance_uuid)).\
+                filter(models.Instance.project_id == project_id).\
+                count()
+
+
+###################
+
+
+@require_context
+@pick_context_manager_writer
+def virtual_interface_create(context, values):
+    """Create a new virtual interface record in the database.
+
+    :param values: = dict containing column values
+    """
+    try:
+        vif_ref = models.VirtualInterface()
+        vif_ref.update(values)
+        vif_ref.save(context.session)
+    except db_exc.DBError:
+        LOG.exception("VIF creation failed with a database error.")
+        raise exception.VirtualInterfaceCreateException()
+
+    return vif_ref
+
+
+def _virtual_interface_query(context):
+    return model_query(context, models.VirtualInterface, read_deleted="no")
+
+
+@require_context
+@pick_context_manager_writer
+def virtual_interface_update(context, address, values):
+    vif_ref = virtual_interface_get_by_address(context, address)
+    vif_ref.update(values)
+    vif_ref.save(context.session)
+    return vif_ref
+
+
+@require_context
+@pick_context_manager_reader
+def virtual_interface_get(context, vif_id):
+    """Gets a virtual interface from the table.
+
+    :param vif_id: = id of the virtual interface
+    """
+    vif_ref = _virtual_interface_query(context).\
+                      filter_by(id=vif_id).\
+                      first()
+    return vif_ref
+
+
+@require_context
+@pick_context_manager_reader
+def virtual_interface_get_by_address(context, address):
+    """Gets a virtual interface from the table.
+
+    :param address: = the address of the interface you're looking to get
+    """
+    try:
+        vif_ref = _virtual_interface_query(context).\
+                          filter_by(address=address).\
+                          first()
+    except db_exc.DBError:
+        msg = _("Invalid virtual interface address %s in request") % address
+        LOG.warning(msg)
+        raise exception.InvalidIpAddressError(msg)
+    return vif_ref
+
+
+@require_context
+@pick_context_manager_reader
+def virtual_interface_get_by_uuid(context, vif_uuid):
+    """Gets a virtual interface from the table.
+
+    :param vif_uuid: the uuid of the interface you're looking to get
+    """
+    vif_ref = _virtual_interface_query(context).\
+                      filter_by(uuid=vif_uuid).\
+                      first()
+    return vif_ref
+
+
+@require_context
+@require_instance_exists_using_uuid
+@pick_context_manager_reader_allow_async
+def virtual_interface_get_by_instance(context, instance_uuid):
+    """Gets all virtual interfaces for instance.
+
+    :param instance_uuid: = uuid of the instance to retrieve vifs for
+    """
+    vif_refs = _virtual_interface_query(context).\
+                       filter_by(instance_uuid=instance_uuid).\
+                       order_by(asc("created_at"), asc("id")).\
+                       all()
+    return vif_refs
+
+
+@require_context
+@pick_context_manager_reader
+def virtual_interface_get_by_instance_and_network(context, instance_uuid,
+                                                  network_id):
+    """Gets virtual interface for instance that's associated with network."""
+    vif_ref = _virtual_interface_query(context).\
+                      filter_by(instance_uuid=instance_uuid).\
+                      filter_by(network_id=network_id).\
+                      first()
+    return vif_ref
+
+
+@require_context
+@pick_context_manager_writer
+def virtual_interface_delete_by_instance(context, instance_uuid):
+    """Delete virtual interface records that are associated
+    with the instance given by instance_id.
+
+    :param instance_uuid: = uuid of instance
+    """
+    _virtual_interface_query(context).\
+           filter_by(instance_uuid=instance_uuid).\
+           soft_delete()
+
+
+@require_context
+@pick_context_manager_writer
+def virtual_interface_delete(context, id):
+    """Delete virtual interface records.
+
+    :param id: id of the interface
+    """
+    _virtual_interface_query(context).\
+        filter_by(id=id).\
+        soft_delete()
+
+
+@require_context
+@pick_context_manager_reader
+def virtual_interface_get_all(context):
+    """Get all vifs."""
+    vif_refs = _virtual_interface_query(context).all()
+    return vif_refs
+
+
+###################
+
+
+def _metadata_refs(metadata_dict, meta_class):
+    metadata_refs = []
+    if metadata_dict:
+        for k, v in metadata_dict.items():
+            metadata_ref = meta_class()
+            metadata_ref['key'] = k
+            metadata_ref['value'] = v
+            metadata_refs.append(metadata_ref)
+    return metadata_refs
+
+
+def _validate_unique_server_name(context, name):
+    if not CONF.osapi_compute_unique_server_name_scope:
+        return
+
+    lowername = name.lower()
+    base_query = model_query(context, models.Instance, read_deleted='no').\
+            filter(func.lower(models.Instance.hostname) == lowername)
+
+    if CONF.osapi_compute_unique_server_name_scope == 'project':
+        instance_with_same_name = base_query.\
+                        filter_by(project_id=context.project_id).\
+                        count()
+
+    elif CONF.osapi_compute_unique_server_name_scope == 'global':
+        instance_with_same_name = base_query.count()
+
+    else:
+        return
+
+    if instance_with_same_name > 0:
+        raise exception.InstanceExists(name=lowername)
+
+
+def _handle_objects_related_type_conversions(values):
+    """Make sure that certain things in values (which may have come from
+    an objects.instance.Instance object) are in suitable form for the
+    database.
+    """
+    # NOTE(danms): Make sure IP addresses are passed as strings to
+    # the database engine
+    for key in ('access_ip_v4', 'access_ip_v6'):
+        if key in values and values[key] is not None:
+            values[key] = str(values[key])
+
+    datetime_keys = ('created_at', 'deleted_at', 'updated_at',
+                     'launched_at', 'terminated_at')
+    convert_objects_related_datetimes(values, *datetime_keys)
+
+
+def _check_instance_exists_in_project(context, instance_uuid):
+    if not model_query(context, models.Instance, read_deleted="no",
+                       project_only=True).filter_by(
+                       uuid=instance_uuid).first():
+        raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def instance_create(context, values):
+    """Create a new Instance record in the database.
+
+    context - request context object
+    values - dict containing column values.
+    """
+
+    security_group_ensure_default(context)
+
+    values = values.copy()
+    values['metadata'] = _metadata_refs(
+            values.get('metadata'), models.InstanceMetadata)
+
+    values['system_metadata'] = _metadata_refs(
+            values.get('system_metadata'), models.InstanceSystemMetadata)
+    _handle_objects_related_type_conversions(values)
+
+    instance_ref = models.Instance()
+    if not values.get('uuid'):
+        values['uuid'] = uuidutils.generate_uuid()
+    instance_ref['info_cache'] = models.InstanceInfoCache()
+    info_cache = values.pop('info_cache', None)
+    if info_cache is not None:
+        instance_ref['info_cache'].update(info_cache)
+    security_groups = values.pop('security_groups', [])
+    instance_ref['extra'] = models.InstanceExtra()
+    instance_ref['extra'].update(
+        {'numa_topology': None,
+         'pci_requests': None,
+         'vcpu_model': None,
+         'trusted_certs': None,
+         })
+    instance_ref['extra'].update(values.pop('extra', {}))
+    instance_ref.update(values)
+
+    def _get_sec_group_models(security_groups):
+        models = []
+        default_group = _security_group_ensure_default(context)
+        if 'default' in security_groups:
+            models.append(default_group)
+            # Generate a new list, so we don't modify the original
+            security_groups = [x for x in security_groups if x != 'default']
+        if security_groups:
+            models.extend(_security_group_get_by_names(
+                context, security_groups))
+        return models
+
+    if 'hostname' in values:
+        _validate_unique_server_name(context, values['hostname'])
+    instance_ref.security_groups = _get_sec_group_models(security_groups)
+    context.session.add(instance_ref)
+
+    # create the instance uuid to ec2_id mapping entry for instance
+    ec2_instance_create(context, instance_ref['uuid'])
+
+    # Parity with the return value of instance_get_all_by_filters_sort()
+    # Obviously a newly-created instance record can't already have a fault
+    # record because of the FK constraint, so this is fine.
+    instance_ref.fault = None
+
+    return instance_ref
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def instance_destroy(context, instance_uuid, constraint=None):
+    if uuidutils.is_uuid_like(instance_uuid):
+        instance_ref = _instance_get_by_uuid(context, instance_uuid)
+    else:
+        raise exception.InvalidUUID(instance_uuid)
+
+    query = model_query(context, models.Instance).\
+                    filter_by(uuid=instance_uuid)
+    if constraint is not None:
+        query = constraint.apply(models.Instance, query)
+    count = query.soft_delete()
+    if count == 0:
+        raise exception.ConstraintNotMet()
+    model_query(context, models.SecurityGroupInstanceAssociation).\
+            filter_by(instance_uuid=instance_uuid).\
+            soft_delete()
+    model_query(context, models.InstanceInfoCache).\
+            filter_by(instance_uuid=instance_uuid).\
+            soft_delete()
+    model_query(context, models.InstanceMetadata).\
+            filter_by(instance_uuid=instance_uuid).\
+            soft_delete()
+    model_query(context, models.InstanceFault).\
+            filter_by(instance_uuid=instance_uuid).\
+            soft_delete()
+    model_query(context, models.InstanceExtra).\
+            filter_by(instance_uuid=instance_uuid).\
+            soft_delete()
+    model_query(context, models.InstanceSystemMetadata).\
+            filter_by(instance_uuid=instance_uuid).\
+            soft_delete()
+    model_query(context, models.BlockDeviceMapping).\
+            filter_by(instance_uuid=instance_uuid).\
+            soft_delete()
+    model_query(context, models.Migration).\
+            filter_by(instance_uuid=instance_uuid).\
+            soft_delete()
+    model_query(context, models.InstanceIdMapping).filter_by(
+        uuid=instance_uuid).soft_delete()
+    # NOTE(snikitin): We can't use model_query here, because there is no
+    # column 'deleted' in 'tags' or 'console_auth_tokens' tables.
+    context.session.query(models.Tag).filter_by(
+        resource_id=instance_uuid).delete()
+    context.session.query(models.ConsoleAuthToken).filter_by(
+        instance_uuid=instance_uuid).delete()
+    # NOTE(cfriesen): We intentionally do not soft-delete entries in the
+    # instance_actions or instance_actions_events tables because they
+    # can be used by operators to find out what actions were performed on a
+    # deleted instance.  Both of these tables are special-cased in
+    # _archive_deleted_rows_for_table().
+
+    return instance_ref
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def instance_get_by_uuid(context, uuid, columns_to_join=None):
+    return _instance_get_by_uuid(context, uuid,
+                                 columns_to_join=columns_to_join)
+
+
+def _instance_get_by_uuid(context, uuid, columns_to_join=None):
+    result = _build_instance_get(context, columns_to_join=columns_to_join).\
+                filter_by(uuid=uuid).\
+                first()
+
+    if not result:
+        raise exception.InstanceNotFound(instance_id=uuid)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def instance_get(context, instance_id, columns_to_join=None):
+    try:
+        result = _build_instance_get(context, columns_to_join=columns_to_join
+                                     ).filter_by(id=instance_id).first()
+
+        if not result:
+            raise exception.InstanceNotFound(instance_id=instance_id)
+
+        return result
+    except db_exc.DBError:
+        # NOTE(sdague): catch all in case the db engine chokes on the
+        # id because it's too long of an int to store.
+        LOG.warning("Invalid instance id %s in request", instance_id)
+        raise exception.InvalidID(id=instance_id)
+
+
+def _build_instance_get(context, columns_to_join=None):
+    query = model_query(context, models.Instance, project_only=True).\
+            options(joinedload_all('security_groups.rules')).\
+            options(joinedload('info_cache'))
+    if columns_to_join is None:
+        columns_to_join = ['metadata', 'system_metadata']
+    for column in columns_to_join:
+        if column in ['info_cache', 'security_groups']:
+            # Already always joined above
+            continue
+        if 'extra.' in column:
+            query = query.options(undefer(column))
+        else:
+            query = query.options(joinedload(column))
+    # NOTE(alaski) Stop lazy loading of columns not needed.
+    for col in ['metadata', 'system_metadata']:
+        if col not in columns_to_join:
+            query = query.options(noload(col))
+    return query
+
+
+def _instances_fill_metadata(context, instances, manual_joins=None):
+    """Selectively fill instances with manually-joined metadata. Note that
+    instance will be converted to a dict.
+
+    :param context: security context
+    :param instances: list of instances to fill
+    :param manual_joins: list of tables to manually join (can be any
+                         combination of 'metadata' and 'system_metadata' or
+                         None to take the default of both)
+    """
+    uuids = [inst['uuid'] for inst in instances]
+
+    if manual_joins is None:
+        manual_joins = ['metadata', 'system_metadata']
+
+    meta = collections.defaultdict(list)
+    if 'metadata' in manual_joins:
+        for row in _instance_metadata_get_multi(context, uuids):
+            meta[row['instance_uuid']].append(row)
+
+    sys_meta = collections.defaultdict(list)
+    if 'system_metadata' in manual_joins:
+        for row in _instance_system_metadata_get_multi(context, uuids):
+            sys_meta[row['instance_uuid']].append(row)
+
+    pcidevs = collections.defaultdict(list)
+    if 'pci_devices' in manual_joins:
+        for row in _instance_pcidevs_get_multi(context, uuids):
+            pcidevs[row['instance_uuid']].append(row)
+
+    if 'fault' in manual_joins:
+        faults = instance_fault_get_by_instance_uuids(context, uuids,
+                                                      latest=True)
+    else:
+        faults = {}
+
+    filled_instances = []
+    for inst in instances:
+        inst = dict(inst)
+        inst['system_metadata'] = sys_meta[inst['uuid']]
+        inst['metadata'] = meta[inst['uuid']]
+        if 'pci_devices' in manual_joins:
+            inst['pci_devices'] = pcidevs[inst['uuid']]
+        inst_faults = faults.get(inst['uuid'])
+        inst['fault'] = inst_faults and inst_faults[0] or None
+        filled_instances.append(inst)
+
+    return filled_instances
+
+
+def _manual_join_columns(columns_to_join):
+    """Separate manually joined columns from columns_to_join
+
+    If columns_to_join contains 'metadata', 'system_metadata', 'fault', or
+    'pci_devices' those columns are removed from columns_to_join and added
+    to a manual_joins list to be used with the _instances_fill_metadata method.
+
+    The columns_to_join formal parameter is copied and not modified, the return
+    tuple has the modified columns_to_join list to be used with joinedload in
+    a model query.
+
+    :param:columns_to_join: List of columns to join in a model query.
+    :return: tuple of (manual_joins, columns_to_join)
+    """
+    manual_joins = []
+    columns_to_join_new = copy.copy(columns_to_join)
+    for column in ('metadata', 'system_metadata', 'pci_devices', 'fault'):
+        if column in columns_to_join_new:
+            columns_to_join_new.remove(column)
+            manual_joins.append(column)
+    return manual_joins, columns_to_join_new
+
+
+@require_context
+@pick_context_manager_reader
+def instance_get_all(context, columns_to_join=None):
+    if columns_to_join is None:
+        columns_to_join_new = ['info_cache', 'security_groups']
+        manual_joins = ['metadata', 'system_metadata']
+    else:
+        manual_joins, columns_to_join_new = (
+            _manual_join_columns(columns_to_join))
+    query = model_query(context, models.Instance)
+    for column in columns_to_join_new:
+        query = query.options(joinedload(column))
+    if not context.is_admin:
+        # If we're not admin context, add appropriate filter..
+        if context.project_id:
+            query = query.filter_by(project_id=context.project_id)
+        else:
+            query = query.filter_by(user_id=context.user_id)
+    instances = query.all()
+    return _instances_fill_metadata(context, instances, manual_joins)
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def instance_get_all_by_filters(context, filters, sort_key, sort_dir,
+                                limit=None, marker=None, columns_to_join=None):
+    """Return instances matching all filters sorted by the primary key.
+
+    See instance_get_all_by_filters_sort for more information.
+    """
+    # Invoke the API with the multiple sort keys and directions using the
+    # single sort key/direction
+    return instance_get_all_by_filters_sort(context, filters, limit=limit,
+                                            marker=marker,
+                                            columns_to_join=columns_to_join,
+                                            sort_keys=[sort_key],
+                                            sort_dirs=[sort_dir])
+
+
+def _get_query_nova_resource_by_changes_time(query, filters, model_object):
+    """Filter resources by changes-since or changes-before.
+
+    Special keys are used to tweek the query further::
+
+    |   'changes-since' - only return resources updated after
+    |   'changes-before' - only return resources updated before
+
+    Return query results.
+
+    :param query: query to apply filters to.
+    :param filters: dictionary of filters with regex values.
+    :param model_object: object of the operation target.
+    """
+    for change_filter in ['changes-since', 'changes-before']:
+        if filters and filters.get(change_filter):
+            changes_filter_time = timeutils.normalize_time(
+                filters.get(change_filter))
+            updated_at = getattr(model_object, 'updated_at')
+            if change_filter == 'changes-since':
+                query = query.filter(updated_at >= changes_filter_time)
+            else:
+                query = query.filter(updated_at <= changes_filter_time)
+    return query
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def instance_get_all_by_filters_sort(context, filters, limit=None, marker=None,
+                                     columns_to_join=None, sort_keys=None,
+                                     sort_dirs=None):
+    """Return instances that match all filters sorted by the given keys.
+    Deleted instances will be returned by default, unless there's a filter that
+    says otherwise.
+
+    Depending on the name of a filter, matching for that filter is
+    performed using either exact matching or as regular expression
+    matching. Exact matching is applied for the following filters::
+
+    |   ['project_id', 'user_id', 'image_ref',
+    |    'vm_state', 'instance_type_id', 'uuid',
+    |    'metadata', 'host', 'system_metadata']
+
+
+    A third type of filter (also using exact matching), filters
+    based on instance metadata tags when supplied under a special
+    key named 'filter'::
+
+    |   filters = {
+    |       'filter': [
+    |           {'name': 'tag-key', 'value': '<metakey>'},
+    |           {'name': 'tag-value', 'value': '<metaval>'},
+    |           {'name': 'tag:<metakey>', 'value': '<metaval>'}
+    |       ]
+    |   }
+
+    Special keys are used to tweek the query further::
+
+    |   'changes-since' - only return instances updated after
+    |   'changes-before' - only return instances updated before
+    |   'deleted' - only return (or exclude) deleted instances
+    |   'soft_deleted' - modify behavior of 'deleted' to either
+    |                    include or exclude instances whose
+    |                    vm_state is SOFT_DELETED.
+
+    A fourth type of filter (also using exact matching), filters
+    based on instance tags (not metadata tags). There are two types
+    of these tags:
+
+    `tags` -- One or more strings that will be used to filter results
+            in an AND expression: T1 AND T2
+
+    `tags-any` -- One or more strings that will be used to filter results in
+            an OR expression: T1 OR T2
+
+    `not-tags` -- One or more strings that will be used to filter results in
+            an NOT AND expression: NOT (T1 AND T2)
+
+    `not-tags-any` -- One or more strings that will be used to filter results
+            in an NOT OR expression: NOT (T1 OR T2)
+
+    Tags should be represented as list::
+
+    |    filters = {
+    |        'tags': [some-tag, some-another-tag],
+    |        'tags-any: [some-any-tag, some-another-any-tag],
+    |        'not-tags: [some-not-tag, some-another-not-tag],
+    |        'not-tags-any: [some-not-any-tag, some-another-not-any-tag]
+    |    }
+
+    """
+    # NOTE(mriedem): If the limit is 0 there is no point in even going
+    # to the database since nothing is going to be returned anyway.
+    if limit == 0:
+        return []
+
+    sort_keys, sort_dirs = process_sort_params(sort_keys,
+                                               sort_dirs,
+                                               default_dir='desc')
+
+    if columns_to_join is None:
+        columns_to_join_new = ['info_cache', 'security_groups']
+        manual_joins = ['metadata', 'system_metadata']
+    else:
+        manual_joins, columns_to_join_new = (
+            _manual_join_columns(columns_to_join))
+
+    query_prefix = context.session.query(models.Instance)
+    for column in columns_to_join_new:
+        if 'extra.' in column:
+            query_prefix = query_prefix.options(undefer(column))
+        else:
+            query_prefix = query_prefix.options(joinedload(column))
+
+    # Note: order_by is done in the sqlalchemy.utils.py paginate_query(),
+    # no need to do it here as well
+
+    # Make a copy of the filters dictionary to use going forward, as we'll
+    # be modifying it and we shouldn't affect the caller's use of it.
+    filters = copy.deepcopy(filters)
+
+    model_object = models.Instance
+    query_prefix = _get_query_nova_resource_by_changes_time(query_prefix,
+                                                            filters,
+                                                            model_object)
+
+    if 'deleted' in filters:
+        # Instances can be soft or hard deleted and the query needs to
+        # include or exclude both
+        deleted = filters.pop('deleted')
+        if deleted:
+            if filters.pop('soft_deleted', True):
+                delete = or_(
+                    models.Instance.deleted == models.Instance.id,
+                    models.Instance.vm_state == vm_states.SOFT_DELETED
+                    )
+                query_prefix = query_prefix.\
+                    filter(delete)
+            else:
+                query_prefix = query_prefix.\
+                    filter(models.Instance.deleted == models.Instance.id)
+        else:
+            query_prefix = query_prefix.\
+                    filter_by(deleted=0)
+            if not filters.pop('soft_deleted', False):
+                # It would be better to have vm_state not be nullable
+                # but until then we test it explicitly as a workaround.
+                not_soft_deleted = or_(
+                    models.Instance.vm_state != vm_states.SOFT_DELETED,
+                    models.Instance.vm_state == null()
+                    )
+                query_prefix = query_prefix.filter(not_soft_deleted)
+
+    if 'cleaned' in filters:
+        cleaned = 1 if filters.pop('cleaned') else 0
+        query_prefix = query_prefix.filter(models.Instance.cleaned == cleaned)
+
+    if 'tags' in filters:
+        tags = filters.pop('tags')
+        # We build a JOIN ladder expression for each tag, JOIN'ing
+        # the first tag to the instances table, and each subsequent
+        # tag to the last JOIN'd tags table
+        first_tag = tags.pop(0)
+        query_prefix = query_prefix.join(models.Instance.tags)
+        query_prefix = query_prefix.filter(models.Tag.tag == first_tag)
+
+        for tag in tags:
+            tag_alias = aliased(models.Tag)
+            query_prefix = query_prefix.join(tag_alias,
+                                             models.Instance.tags)
+            query_prefix = query_prefix.filter(tag_alias.tag == tag)
+
+    if 'tags-any' in filters:
+        tags = filters.pop('tags-any')
+        tag_alias = aliased(models.Tag)
+        query_prefix = query_prefix.join(tag_alias, models.Instance.tags)
+        query_prefix = query_prefix.filter(tag_alias.tag.in_(tags))
+
+    if 'not-tags' in filters:
+        tags = filters.pop('not-tags')
+        first_tag = tags.pop(0)
+        subq = query_prefix.session.query(models.Tag.resource_id)
+        subq = subq.join(models.Instance.tags)
+        subq = subq.filter(models.Tag.tag == first_tag)
+
+        for tag in tags:
+            tag_alias = aliased(models.Tag)
+            subq = subq.join(tag_alias, models.Instance.tags)
+            subq = subq.filter(tag_alias.tag == tag)
+
+        query_prefix = query_prefix.filter(~models.Instance.uuid.in_(subq))
+
+    if 'not-tags-any' in filters:
+        tags = filters.pop('not-tags-any')
+        query_prefix = query_prefix.filter(~models.Instance.tags.any(
+            models.Tag.tag.in_(tags)))
+
+    if not context.is_admin:
+        # If we're not admin context, add appropriate filter..
+        if context.project_id:
+            filters['project_id'] = context.project_id
+        else:
+            filters['user_id'] = context.user_id
+
+    # Filters for exact matches that we can do along with the SQL query...
+    # For other filters that don't match this, we will do regexp matching
+    exact_match_filter_names = ['project_id', 'user_id', 'image_ref',
+                                'vm_state', 'instance_type_id', 'uuid',
+                                'metadata', 'host', 'task_state',
+                                'system_metadata']
+
+    # Filter the query
+    query_prefix = _exact_instance_filter(query_prefix,
+                                filters, exact_match_filter_names)
+    if query_prefix is None:
+        return []
+    query_prefix = _regex_instance_filter(query_prefix, filters)
+
+    # paginate query
+    if marker is not None:
+        try:
+            marker = _instance_get_by_uuid(
+                    context.elevated(read_deleted='yes'), marker)
+        except exception.InstanceNotFound:
+            raise exception.MarkerNotFound(marker=marker)
+    try:
+        query_prefix = sqlalchemyutils.paginate_query(query_prefix,
+                               models.Instance, limit,
+                               sort_keys,
+                               marker=marker,
+                               sort_dirs=sort_dirs)
+    except db_exc.InvalidSortKey:
+        raise exception.InvalidSortKey()
+
+    return _instances_fill_metadata(context, query_prefix.all(), manual_joins)
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def instance_get_by_sort_filters(context, sort_keys, sort_dirs, values):
+    """Attempt to get a single instance based on a combination of sort
+    keys, directions and filter values. This is used to try to find a
+    marker instance when we don't have a marker uuid.
+
+    This returns just a uuid of the instance that matched.
+    """
+
+    model = models.Instance
+    return _model_get_uuid_by_sort_filters(context, model, sort_keys,
+                                           sort_dirs, values)
+
+
+def _model_get_uuid_by_sort_filters(context, model, sort_keys, sort_dirs,
+                                    values):
+    query = context.session.query(model.uuid)
+
+    # NOTE(danms): Below is a re-implementation of our
+    # oslo_db.sqlalchemy.utils.paginate_query() utility. We can't use that
+    # directly because it does not return the marker and we need it to.
+    # The below is basically the same algorithm, stripped down to just what
+    # we need, and augmented with the filter criteria required for us to
+    # get back the instance that would correspond to our query.
+
+    # This is our position in sort_keys,sort_dirs,values for the loop below
+    key_index = 0
+
+    # We build a list of criteria to apply to the query, which looks
+    # approximately like this (assuming all ascending):
+    #
+    #  OR(row.key1 > val1,
+    #     AND(row.key1 == val1, row.key2 > val2),
+    #     AND(row.key1 == val1, row.key2 == val2, row.key3 >= val3),
+    #  )
+    #
+    # The final key is compared with the "or equal" variant so that
+    # a complete match instance is still returned.
+    criteria = []
+
+    for skey, sdir, val in zip(sort_keys, sort_dirs, values):
+        # Apply ordering to our query for the key, direction we're processing
+        if sdir == 'desc':
+            query = query.order_by(desc(getattr(model, skey)))
+        else:
+            query = query.order_by(asc(getattr(model, skey)))
+
+        # Build a list of equivalence requirements on keys we've already
+        # processed through the loop. In other words, if we're adding
+        # key2 > val2, make sure that key1 == val1
+        crit_attrs = []
+        for equal_attr in range(0, key_index):
+            crit_attrs.append(
+                (getattr(model, sort_keys[equal_attr]) == values[equal_attr]))
+
+        model_attr = getattr(model, skey)
+        if isinstance(model_attr.type, Boolean):
+            model_attr = cast(model_attr, Integer)
+            val = int(val)
+
+        if skey == sort_keys[-1]:
+            # If we are the last key, then we should use or-equal to
+            # allow a complete match to be returned
+            if sdir == 'asc':
+                crit = (model_attr >= val)
+            else:
+                crit = (model_attr <= val)
+        else:
+            # If we're not the last key, then strict greater or less than
+            # so we order strictly.
+            if sdir == 'asc':
+                crit = (model_attr > val)
+            else:
+                crit = (model_attr < val)
+
+        # AND together all the above
+        crit_attrs.append(crit)
+        criteria.append(and_(*crit_attrs))
+        key_index += 1
+
+    # OR together all the ANDs
+    query = query.filter(or_(*criteria))
+
+    # We can't raise InstanceNotFound because we don't have a uuid to
+    # be looking for, so just return nothing if no match.
+    result = query.limit(1).first()
+    if result:
+        # We're querying for a single column, which means we get back a
+        # tuple of one thing. Strip that out and just return the uuid
+        # for our caller.
+        return result[0]
+    else:
+        return result
+
+
+def _db_connection_type(db_connection):
+    """Returns a lowercase symbol for the db type.
+
+    This is useful when we need to change what we are doing per DB
+    (like handling regexes). In a CellsV2 world it probably needs to
+    do something better than use the database configuration string.
+    """
+
+    db_string = db_connection.split(':')[0].split('+')[0]
+    return db_string.lower()
+
+
+def _safe_regex_mysql(raw_string):
+    """Make regex safe to mysql.
+
+    Certain items like '|' are interpreted raw by mysql REGEX. If you
+    search for a single | then you trigger an error because it's
+    expecting content on either side.
+
+    For consistency sake we escape all '|'. This does mean we wouldn't
+    support something like foo|bar to match completely different
+    things, however, one can argue putting such complicated regex into
+    name search probably means you are doing this wrong.
+    """
+    return raw_string.replace('|', '\\|')
+
+
+def _get_regexp_ops(connection):
+    """Return safety filter and db opts for regex."""
+    regexp_op_map = {
+        'postgresql': '~',
+        'mysql': 'REGEXP',
+        'sqlite': 'REGEXP'
+    }
+    regex_safe_filters = {
+        'mysql': _safe_regex_mysql
+    }
+    db_type = _db_connection_type(connection)
+
+    return (regex_safe_filters.get(db_type, lambda x: x),
+            regexp_op_map.get(db_type, 'LIKE'))
+
+
+def _regex_instance_filter(query, filters):
+
+    """Applies regular expression filtering to an Instance query.
+
+    Returns the updated query.
+
+    :param query: query to apply filters to
+    :param filters: dictionary of filters with regex values
+    """
+
+    model = models.Instance
+    safe_regex_filter, db_regexp_op = _get_regexp_ops(CONF.database.connection)
+    for filter_name in filters:
+        try:
+            column_attr = getattr(model, filter_name)
+        except AttributeError:
+            continue
+        if 'property' == type(column_attr).__name__:
+            continue
+        filter_val = filters[filter_name]
+        # Sometimes the REGEX filter value is not a string
+        if not isinstance(filter_val, six.string_types):
+            filter_val = str(filter_val)
+        if db_regexp_op == 'LIKE':
+            query = query.filter(column_attr.op(db_regexp_op)(
+                                 u'%' + filter_val + u'%'))
+        else:
+            filter_val = safe_regex_filter(filter_val)
+            query = query.filter(column_attr.op(db_regexp_op)(
+                                 filter_val))
+    return query
+
+
+def _exact_instance_filter(query, filters, legal_keys):
+    """Applies exact match filtering to an Instance query.
+
+    Returns the updated query.  Modifies filters argument to remove
+    filters consumed.
+
+    :param query: query to apply filters to
+    :param filters: dictionary of filters; values that are lists,
+                    tuples, sets, or frozensets cause an 'IN' test to
+                    be performed, while exact matching ('==' operator)
+                    is used for other values
+    :param legal_keys: list of keys to apply exact filtering to
+    """
+
+    filter_dict = {}
+    model = models.Instance
+
+    # Walk through all the keys
+    for key in legal_keys:
+        # Skip ones we're not filtering on
+        if key not in filters:
+            continue
+
+        # OK, filtering on this key; what value do we search for?
+        value = filters.pop(key)
+
+        if key in ('metadata', 'system_metadata'):
+            column_attr = getattr(model, key)
+            if isinstance(value, list):
+                for item in value:
+                    for k, v in item.items():
+                        query = query.filter(column_attr.any(key=k))
+                        query = query.filter(column_attr.any(value=v))
+
+            else:
+                for k, v in value.items():
+                    query = query.filter(column_attr.any(key=k))
+                    query = query.filter(column_attr.any(value=v))
+        elif isinstance(value, (list, tuple, set, frozenset)):
+            if not value:
+                return None  # empty IN-predicate; short circuit
+            # Looking for values in a list; apply to query directly
+            column_attr = getattr(model, key)
+            query = query.filter(column_attr.in_(value))
+        else:
+            # OK, simple exact match; save for later
+            filter_dict[key] = value
+
+    # Apply simple exact matches
+    if filter_dict:
+        query = query.filter(*[getattr(models.Instance, k) == v
+                               for k, v in filter_dict.items()])
+    return query
+
+
+def process_sort_params(sort_keys, sort_dirs,
+                        default_keys=['created_at', 'id'],
+                        default_dir='asc'):
+    """Process the sort parameters to include default keys.
+
+    Creates a list of sort keys and a list of sort directions. Adds the default
+    keys to the end of the list if they are not already included.
+
+    When adding the default keys to the sort keys list, the associated
+    direction is:
+    1) The first element in the 'sort_dirs' list (if specified), else
+    2) 'default_dir' value (Note that 'asc' is the default value since this is
+    the default in sqlalchemy.utils.paginate_query)
+
+    :param sort_keys: List of sort keys to include in the processed list
+    :param sort_dirs: List of sort directions to include in the processed list
+    :param default_keys: List of sort keys that need to be included in the
+                         processed list, they are added at the end of the list
+                         if not already specified.
+    :param default_dir: Sort direction associated with each of the default
+                        keys that are not supplied, used when they are added
+                        to the processed list
+    :returns: list of sort keys, list of sort directions
+    :raise exception.InvalidInput: If more sort directions than sort keys
+                                   are specified or if an invalid sort
+                                   direction is specified
+    """
+    # Determine direction to use for when adding default keys
+    if sort_dirs and len(sort_dirs) != 0:
+        default_dir_value = sort_dirs[0]
+    else:
+        default_dir_value = default_dir
+
+    # Create list of keys (do not modify the input list)
+    if sort_keys:
+        result_keys = list(sort_keys)
+    else:
+        result_keys = []
+
+    # If a list of directions is not provided, use the default sort direction
+    # for all provided keys
+    if sort_dirs:
+        result_dirs = []
+        # Verify sort direction
+        for sort_dir in sort_dirs:
+            if sort_dir not in ('asc', 'desc'):
+                msg = _("Unknown sort direction, must be 'desc' or 'asc'")
+                raise exception.InvalidInput(reason=msg)
+            result_dirs.append(sort_dir)
+    else:
+        result_dirs = [default_dir_value for _sort_key in result_keys]
+
+    # Ensure that the key and direction length match
+    while len(result_dirs) < len(result_keys):
+        result_dirs.append(default_dir_value)
+    # Unless more direction are specified, which is an error
+    if len(result_dirs) > len(result_keys):
+        msg = _("Sort direction size exceeds sort key size")
+        raise exception.InvalidInput(reason=msg)
+
+    # Ensure defaults are included
+    for key in default_keys:
+        if key not in result_keys:
+            result_keys.append(key)
+            result_dirs.append(default_dir_value)
+
+    return result_keys, result_dirs
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def instance_get_active_by_window_joined(context, begin, end=None,
+                                         project_id=None, host=None,
+                                         columns_to_join=None, limit=None,
+                                         marker=None):
+    """Return instances and joins that were active during window."""
+    query = context.session.query(models.Instance)
+
+    if columns_to_join is None:
+        columns_to_join_new = ['info_cache', 'security_groups']
+        manual_joins = ['metadata', 'system_metadata']
+    else:
+        manual_joins, columns_to_join_new = (
+            _manual_join_columns(columns_to_join))
+
+    for column in columns_to_join_new:
+        if 'extra.' in column:
+            query = query.options(undefer(column))
+        else:
+            query = query.options(joinedload(column))
+
+    query = query.filter(or_(models.Instance.terminated_at == null(),
+                             models.Instance.terminated_at > begin))
+    if end:
+        query = query.filter(models.Instance.launched_at < end)
+    if project_id:
+        query = query.filter_by(project_id=project_id)
+    if host:
+        query = query.filter_by(host=host)
+
+    if marker is not None:
+        try:
+            marker = _instance_get_by_uuid(
+                context.elevated(read_deleted='yes'), marker)
+        except exception.InstanceNotFound:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(
+        query, models.Instance, limit, ['project_id', 'uuid'], marker=marker)
+
+    return _instances_fill_metadata(context, query.all(), manual_joins)
+
+
+def _instance_get_all_query(context, project_only=False, joins=None):
+    if joins is None:
+        joins = ['info_cache', 'security_groups']
+
+    query = model_query(context,
+                        models.Instance,
+                        project_only=project_only)
+    for column in joins:
+        if 'extra.' in column:
+            query = query.options(undefer(column))
+        else:
+            query = query.options(joinedload(column))
+    return query
+
+
+@pick_context_manager_reader_allow_async
+def instance_get_all_by_host(context, host, columns_to_join=None):
+    query = _instance_get_all_query(context, joins=columns_to_join)
+    return _instances_fill_metadata(context,
+                                    query.filter_by(host=host).all(),
+                                    manual_joins=columns_to_join)
+
+
+def _instance_get_all_uuids_by_host(context, host):
+    """Return a list of the instance uuids on a given host.
+
+    Returns a list of UUIDs, not Instance model objects.
+    """
+    uuids = []
+    for tuple in model_query(context, models.Instance, (models.Instance.uuid,),
+                             read_deleted="no").\
+                filter_by(host=host).\
+                all():
+        uuids.append(tuple[0])
+    return uuids
+
+
+@pick_context_manager_reader
+def instance_get_all_uuids_by_host(context, host):
+    return _instance_get_all_uuids_by_host(context, host)
+
+
+@pick_context_manager_reader
+def instance_get_all_by_host_and_node(context, host, node,
+                                      columns_to_join=None):
+    if columns_to_join is None:
+        manual_joins = []
+    else:
+        candidates = ['system_metadata', 'metadata']
+        manual_joins = [x for x in columns_to_join if x in candidates]
+        columns_to_join = list(set(columns_to_join) - set(candidates))
+    return _instances_fill_metadata(context,
+            _instance_get_all_query(
+                context,
+                joins=columns_to_join).filter_by(host=host).
+                filter_by(node=node).all(), manual_joins=manual_joins)
+
+
+@pick_context_manager_reader
+def instance_get_all_by_host_and_not_type(context, host, type_id=None):
+    return _instances_fill_metadata(context,
+        _instance_get_all_query(context).filter_by(host=host).
+                   filter(models.Instance.instance_type_id != type_id).all())
+
+
+@pick_context_manager_reader
+def instance_get_all_by_grantee_security_groups(context, group_ids):
+    if not group_ids:
+        return []
+    return _instances_fill_metadata(context,
+        _instance_get_all_query(context).
+            join(models.Instance.security_groups).
+            filter(models.SecurityGroup.rules.any(
+                models.SecurityGroupIngressRule.group_id.in_(group_ids))).
+            all())
+
+
+@require_context
+@pick_context_manager_reader
+def instance_floating_address_get_all(context, instance_uuid):
+    if not uuidutils.is_uuid_like(instance_uuid):
+        raise exception.InvalidUUID(uuid=instance_uuid)
+
+    floating_ips = model_query(context,
+                               models.FloatingIp,
+                               (models.FloatingIp.address,)).\
+        join(models.FloatingIp.fixed_ip).\
+        filter_by(instance_uuid=instance_uuid)
+
+    return [floating_ip.address for floating_ip in floating_ips]
+
+
+# NOTE(hanlind): This method can be removed as conductor RPC API moves to v2.0.
+@pick_context_manager_reader
+def instance_get_all_hung_in_rebooting(context, reboot_window):
+    reboot_window = (timeutils.utcnow() -
+                     datetime.timedelta(seconds=reboot_window))
+
+    # NOTE(danms): this is only used in the _poll_rebooting_instances()
+    # call in compute/manager, so we can avoid the metadata lookups
+    # explicitly
+    return _instances_fill_metadata(context,
+        model_query(context, models.Instance).
+            filter(models.Instance.updated_at <= reboot_window).
+            filter_by(task_state=task_states.REBOOTING).all(),
+        manual_joins=[])
+
+
+def _retry_instance_update():
+    """Wrap with oslo_db_api.wrap_db_retry, and also retry on
+    UnknownInstanceUpdateConflict.
+    """
+    exception_checker = \
+        lambda exc: isinstance(exc, (exception.UnknownInstanceUpdateConflict,))
+    return oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True,
+                                     exception_checker=exception_checker)
+
+
+@require_context
+@_retry_instance_update()
+@pick_context_manager_writer
+def instance_update(context, instance_uuid, values, expected=None):
+    return _instance_update(context, instance_uuid, values, expected)
+
+
+@require_context
+@_retry_instance_update()
+@pick_context_manager_writer
+def instance_update_and_get_original(context, instance_uuid, values,
+                                     columns_to_join=None, expected=None):
+    """Set the given properties on an instance and update it. Return
+    a shallow copy of the original instance reference, as well as the
+    updated one.
+
+    :param context: = request context object
+    :param instance_uuid: = instance uuid
+    :param values: = dict containing column values
+
+    If "expected_task_state" exists in values, the update can only happen
+    when the task state before update matches expected_task_state. Otherwise
+    a UnexpectedTaskStateError is thrown.
+
+    :returns: a tuple of the form (old_instance_ref, new_instance_ref)
+
+    Raises NotFound if instance does not exist.
+    """
+    instance_ref = _instance_get_by_uuid(context, instance_uuid,
+                                         columns_to_join=columns_to_join)
+    return (copy.copy(instance_ref), _instance_update(
+        context, instance_uuid, values, expected, original=instance_ref))
+
+
+# NOTE(danms): This updates the instance's metadata list in-place and in
+# the database to avoid stale data and refresh issues. It assumes the
+# delete=True behavior of instance_metadata_update(...)
+def _instance_metadata_update_in_place(context, instance, metadata_type, model,
+                                       metadata):
+    metadata = dict(metadata)
+    to_delete = []
+    for keyvalue in instance[metadata_type]:
+        key = keyvalue['key']
+        if key in metadata:
+            keyvalue['value'] = metadata.pop(key)
+        elif key not in metadata:
+            to_delete.append(keyvalue)
+
+    # NOTE: we have to hard_delete here otherwise we will get more than one
+    # system_metadata record when we read deleted for an instance;
+    # regular metadata doesn't have the same problem because we don't
+    # allow reading deleted regular metadata anywhere.
+    if metadata_type == 'system_metadata':
+        for condemned in to_delete:
+            context.session.delete(condemned)
+            instance[metadata_type].remove(condemned)
+    else:
+        for condemned in to_delete:
+            condemned.soft_delete(context.session)
+
+    for key, value in metadata.items():
+        newitem = model()
+        newitem.update({'key': key, 'value': value,
+                        'instance_uuid': instance['uuid']})
+        context.session.add(newitem)
+        instance[metadata_type].append(newitem)
+
+
+def _instance_update(context, instance_uuid, values, expected, original=None):
+    if not uuidutils.is_uuid_like(instance_uuid):
+        raise exception.InvalidUUID(instance_uuid)
+
+    if expected is None:
+        expected = {}
+    else:
+        # Coerce all single values to singleton lists
+        expected = {k: [None] if v is None else sqlalchemyutils.to_list(v)
+                       for (k, v) in expected.items()}
+
+    # Extract 'expected_' values from values dict, as these aren't actually
+    # updates
+    for field in ('task_state', 'vm_state'):
+        expected_field = 'expected_%s' % field
+        if expected_field in values:
+            value = values.pop(expected_field, None)
+            # Coerce all single values to singleton lists
+            if value is None:
+                expected[field] = [None]
+            else:
+                expected[field] = sqlalchemyutils.to_list(value)
+
+    # Values which need to be updated separately
+    metadata = values.pop('metadata', None)
+    system_metadata = values.pop('system_metadata', None)
+
+    _handle_objects_related_type_conversions(values)
+
+    # Hostname is potentially unique, but this is enforced in code rather
+    # than the DB. The query below races, but the number of users of
+    # osapi_compute_unique_server_name_scope is small, and a robust fix
+    # will be complex. This is intentionally left as is for the moment.
+    if 'hostname' in values:
+        _validate_unique_server_name(context, values['hostname'])
+
+    compare = models.Instance(uuid=instance_uuid, **expected)
+    try:
+        instance_ref = model_query(context, models.Instance,
+                                   project_only=True).\
+                       update_on_match(compare, 'uuid', values)
+    except update_match.NoRowsMatched:
+        # Update failed. Try to find why and raise a specific error.
+
+        # We should get here only because our expected values were not current
+        # when update_on_match executed. Having failed, we now have a hint that
+        # the values are out of date and should check them.
+
+        # This code is made more complex because we are using repeatable reads.
+        # If we have previously read the original instance in the current
+        # transaction, reading it again will return the same data, even though
+        # the above update failed because it has changed: it is not possible to
+        # determine what has changed in this transaction. In this case we raise
+        # UnknownInstanceUpdateConflict, which will cause the operation to be
+        # retried in a new transaction.
+
+        # Because of the above, if we have previously read the instance in the
+        # current transaction it will have been passed as 'original', and there
+        # is no point refreshing it. If we have not previously read the
+        # instance, we can fetch it here and we will get fresh data.
+        if original is None:
+            original = _instance_get_by_uuid(context, instance_uuid)
+
+        conflicts_expected = {}
+        conflicts_actual = {}
+        for (field, expected_values) in expected.items():
+            actual = original[field]
+            if actual not in expected_values:
+                conflicts_expected[field] = expected_values
+                conflicts_actual[field] = actual
+
+        # Exception properties
+        exc_props = {
+            'instance_uuid': instance_uuid,
+            'expected': conflicts_expected,
+            'actual': conflicts_actual
+        }
+
+        # There was a conflict, but something (probably the MySQL read view,
+        # but possibly an exceptionally unlikely second race) is preventing us
+        # from seeing what it is. When we go round again we'll get a fresh
+        # transaction and a fresh read view.
+        if len(conflicts_actual) == 0:
+            raise exception.UnknownInstanceUpdateConflict(**exc_props)
+
+        # Task state gets special handling for convenience. We raise the
+        # specific error UnexpectedDeletingTaskStateError or
+        # UnexpectedTaskStateError as appropriate
+        if 'task_state' in conflicts_actual:
+            conflict_task_state = conflicts_actual['task_state']
+            if conflict_task_state == task_states.DELETING:
+                exc = exception.UnexpectedDeletingTaskStateError
+            else:
+                exc = exception.UnexpectedTaskStateError
+
+        # Everything else is an InstanceUpdateConflict
+        else:
+            exc = exception.InstanceUpdateConflict
+
+        raise exc(**exc_props)
+
+    if metadata is not None:
+        _instance_metadata_update_in_place(context, instance_ref,
+                                           'metadata',
+                                           models.InstanceMetadata,
+                                           metadata)
+
+    if system_metadata is not None:
+        _instance_metadata_update_in_place(context, instance_ref,
+                                           'system_metadata',
+                                           models.InstanceSystemMetadata,
+                                           system_metadata)
+
+    return instance_ref
+
+
+@pick_context_manager_writer
+def instance_add_security_group(context, instance_uuid, security_group_id):
+    """Associate the given security group with the given instance."""
+    sec_group_ref = models.SecurityGroupInstanceAssociation()
+    sec_group_ref.update({'instance_uuid': instance_uuid,
+                          'security_group_id': security_group_id})
+    sec_group_ref.save(context.session)
+
+
+@require_context
+@pick_context_manager_writer
+def instance_remove_security_group(context, instance_uuid, security_group_id):
+    """Disassociate the given security group from the given instance."""
+    model_query(context, models.SecurityGroupInstanceAssociation).\
+                filter_by(instance_uuid=instance_uuid).\
+                filter_by(security_group_id=security_group_id).\
+                soft_delete()
+
+
+###################
+
+
+@require_context
+@pick_context_manager_reader
+def instance_info_cache_get(context, instance_uuid):
+    """Gets an instance info cache from the table.
+
+    :param instance_uuid: = uuid of the info cache's instance
+    """
+    return model_query(context, models.InstanceInfoCache).\
+                         filter_by(instance_uuid=instance_uuid).\
+                         first()
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def instance_info_cache_update(context, instance_uuid, values):
+    """Update an instance info cache record in the table.
+
+    :param instance_uuid: = uuid of info cache's instance
+    :param values: = dict containing column values to update
+    """
+    convert_objects_related_datetimes(values)
+
+    info_cache = model_query(context, models.InstanceInfoCache).\
+                     filter_by(instance_uuid=instance_uuid).\
+                     first()
+    needs_create = False
+    if info_cache and info_cache['deleted']:
+        raise exception.InstanceInfoCacheNotFound(
+                instance_uuid=instance_uuid)
+    elif not info_cache:
+        # NOTE(tr3buchet): just in case someone blows away an instance's
+        #                  cache entry, re-create it.
+        values['instance_uuid'] = instance_uuid
+        info_cache = models.InstanceInfoCache(**values)
+        needs_create = True
+
+    try:
+        with get_context_manager(context).writer.savepoint.using(context):
+            if needs_create:
+                info_cache.save(context.session)
+            else:
+                info_cache.update(values)
+    except db_exc.DBDuplicateEntry:
+        # NOTE(sirp): Possible race if two greenthreads attempt to
+        # recreate the instance cache entry at the same time. First one
+        # wins.
+        pass
+
+    return info_cache
+
+
+@require_context
+@pick_context_manager_writer
+def instance_info_cache_delete(context, instance_uuid):
+    """Deletes an existing instance_info_cache record
+
+    :param instance_uuid: = uuid of the instance tied to the cache record
+    """
+    model_query(context, models.InstanceInfoCache).\
+                         filter_by(instance_uuid=instance_uuid).\
+                         soft_delete()
+
+
+###################
+
+
+def _instance_extra_create(context, values):
+    inst_extra_ref = models.InstanceExtra()
+    inst_extra_ref.update(values)
+    inst_extra_ref.save(context.session)
+    return inst_extra_ref
+
+
+@pick_context_manager_writer
+def instance_extra_update_by_uuid(context, instance_uuid, values):
+    rows_updated = model_query(context, models.InstanceExtra).\
+        filter_by(instance_uuid=instance_uuid).\
+        update(values)
+    if not rows_updated:
+        LOG.debug("Created instance_extra for %s", instance_uuid)
+        create_values = copy.copy(values)
+        create_values["instance_uuid"] = instance_uuid
+        _instance_extra_create(context, create_values)
+        rows_updated = 1
+    return rows_updated
+
+
+@pick_context_manager_reader
+def instance_extra_get_by_instance_uuid(context, instance_uuid,
+                                        columns=None):
+    query = model_query(context, models.InstanceExtra).\
+        filter_by(instance_uuid=instance_uuid)
+    if columns is None:
+        columns = ['numa_topology', 'pci_requests', 'flavor', 'vcpu_model',
+                   'trusted_certs', 'migration_context']
+    for column in columns:
+        query = query.options(undefer(column))
+    instance_extra = query.first()
+    return instance_extra
+
+
+###################
+
+
+@require_context
+@pick_context_manager_writer
+def key_pair_create(context, values):
+    try:
+        key_pair_ref = models.KeyPair()
+        key_pair_ref.update(values)
+        key_pair_ref.save(context.session)
+        return key_pair_ref
+    except db_exc.DBDuplicateEntry:
+        raise exception.KeyPairExists(key_name=values['name'])
+
+
+@require_context
+@pick_context_manager_writer
+def key_pair_destroy(context, user_id, name):
+    result = model_query(context, models.KeyPair).\
+                         filter_by(user_id=user_id).\
+                         filter_by(name=name).\
+                         soft_delete()
+    if not result:
+        raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+
+@require_context
+@pick_context_manager_reader
+def key_pair_get(context, user_id, name):
+    result = model_query(context, models.KeyPair).\
+                     filter_by(user_id=user_id).\
+                     filter_by(name=name).\
+                     first()
+
+    if not result:
+        raise exception.KeypairNotFound(user_id=user_id, name=name)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def key_pair_get_all_by_user(context, user_id, limit=None, marker=None):
+    marker_row = None
+    if marker is not None:
+        marker_row = model_query(context, models.KeyPair, read_deleted="no").\
+            filter_by(name=marker).filter_by(user_id=user_id).first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = model_query(context, models.KeyPair, read_deleted="no").\
+        filter_by(user_id=user_id)
+
+    query = sqlalchemyutils.paginate_query(
+        query, models.KeyPair, limit, ['name'], marker=marker_row)
+
+    return query.all()
+
+
+@require_context
+@pick_context_manager_reader
+def key_pair_count_by_user(context, user_id):
+    return model_query(context, models.KeyPair, read_deleted="no").\
+                   filter_by(user_id=user_id).\
+                   count()
+
+
+###################
+
+@pick_context_manager_writer
+def network_associate(context, project_id, network_id=None, force=False):
+    """Associate a project with a network.
+
+    called by project_get_networks under certain conditions
+    and network manager add_network_to_project()
+
+    only associate if the project doesn't already have a network
+    or if force is True
+
+    force solves race condition where a fresh project has multiple instance
+    builds simultaneously picked up by multiple network hosts which attempt
+    to associate the project with multiple networks
+    force should only be used as a direct consequence of user request
+    all automated requests should not use force
+    """
+    def network_query(project_filter, id=None):
+        filter_kwargs = {'project_id': project_filter}
+        if id is not None:
+            filter_kwargs['id'] = id
+        return model_query(context, models.Network, read_deleted="no").\
+                       filter_by(**filter_kwargs).\
+                       with_lockmode('update').\
+                       first()
+
+    if not force:
+        # find out if project has a network
+        network_ref = network_query(project_id)
+
+    if force or not network_ref:
+        # in force mode or project doesn't have a network so associate
+        # with a new network
+
+        # get new network
+        network_ref = network_query(None, network_id)
+        if not network_ref:
+            raise exception.NoMoreNetworks()
+
+        # associate with network
+        # NOTE(vish): if with_lockmode isn't supported, as in sqlite,
+        #             then this has concurrency issues
+        network_ref['project_id'] = project_id
+        context.session.add(network_ref)
+    return network_ref
+
+
+def _network_ips_query(context, network_id):
+    return model_query(context, models.FixedIp, read_deleted="no").\
+                   filter_by(network_id=network_id)
+
+
+@pick_context_manager_reader
+def network_count_reserved_ips(context, network_id):
+    return _network_ips_query(context, network_id).\
+                    filter_by(reserved=True).\
+                    count()
+
+
+@pick_context_manager_writer
+def network_create_safe(context, values):
+    network_ref = models.Network()
+    network_ref['uuid'] = uuidutils.generate_uuid()
+    network_ref.update(values)
+
+    try:
+        network_ref.save(context.session)
+        return network_ref
+    except db_exc.DBDuplicateEntry:
+        raise exception.DuplicateVlan(vlan=values['vlan'])
+
+
+@pick_context_manager_writer
+def network_delete_safe(context, network_id):
+    result = model_query(context, models.FixedIp, read_deleted="no").\
+                     filter_by(network_id=network_id).\
+                     filter_by(allocated=True).\
+                     count()
+    if result != 0:
+        raise exception.NetworkInUse(network_id=network_id)
+    network_ref = _network_get(context, network_id=network_id)
+
+    model_query(context, models.FixedIp, read_deleted="no").\
+            filter_by(network_id=network_id).\
+            soft_delete()
+
+    context.session.delete(network_ref)
+
+
+@pick_context_manager_writer
+def network_disassociate(context, network_id, disassociate_host,
+                         disassociate_project):
+    net_update = {}
+    if disassociate_project:
+        net_update['project_id'] = None
+    if disassociate_host:
+        net_update['host'] = None
+    network_update(context, network_id, net_update)
+
+
+def _network_get(context, network_id, project_only='allow_none'):
+    result = model_query(context, models.Network, project_only=project_only).\
+                    filter_by(id=network_id).\
+                    first()
+
+    if not result:
+        raise exception.NetworkNotFound(network_id=network_id)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def network_get(context, network_id, project_only='allow_none'):
+    return _network_get(context, network_id, project_only=project_only)
+
+
+@require_context
+@pick_context_manager_reader
+def network_get_all(context, project_only):
+    result = model_query(context, models.Network, read_deleted="no",
+                         project_only=project_only).all()
+
+    if not result:
+        raise exception.NoNetworksFound()
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def network_get_all_by_uuids(context, network_uuids, project_only):
+    result = model_query(context, models.Network, read_deleted="no",
+                         project_only=project_only).\
+                filter(models.Network.uuid.in_(network_uuids)).\
+                all()
+
+    if not result:
+        raise exception.NoNetworksFound()
+
+    # check if the result contains all the networks
+    # we are looking for
+    for network_uuid in network_uuids:
+        for network in result:
+            if network['uuid'] == network_uuid:
+                break
+        else:
+            if project_only:
+                raise exception.NetworkNotFoundForProject(
+                      network_uuid=network_uuid, project_id=context.project_id)
+            raise exception.NetworkNotFound(network_id=network_uuid)
+
+    return result
+
+
+def _get_associated_fixed_ips_query(context, network_id, host=None):
+    # NOTE(vish): The ugly joins here are to solve a performance issue and
+    #             should be removed once we can add and remove leases
+    #             without regenerating the whole list
+    vif_and = and_(models.VirtualInterface.id ==
+                   models.FixedIp.virtual_interface_id,
+                   models.VirtualInterface.deleted == 0)
+    inst_and = and_(models.Instance.uuid == models.FixedIp.instance_uuid,
+                    models.Instance.deleted == 0)
+    # NOTE(vish): This subquery left joins the minimum interface id for each
+    #             instance. If the join succeeds (i.e. the 11th column is not
+    #             null), then the fixed ip is on the first interface.
+    subq = context.session.query(
+        func.min(models.VirtualInterface.id).label("id"),
+        models.VirtualInterface.instance_uuid).\
+        group_by(models.VirtualInterface.instance_uuid).subquery()
+    subq_and = and_(subq.c.id == models.FixedIp.virtual_interface_id,
+            subq.c.instance_uuid == models.VirtualInterface.instance_uuid)
+    query = context.session.query(
+        models.FixedIp.address,
+        models.FixedIp.instance_uuid,
+        models.FixedIp.network_id,
+        models.FixedIp.virtual_interface_id,
+        models.VirtualInterface.address,
+        models.Instance.hostname,
+        models.Instance.updated_at,
+        models.Instance.created_at,
+        models.FixedIp.allocated,
+        models.FixedIp.leased,
+        subq.c.id).\
+        filter(models.FixedIp.deleted == 0).\
+        filter(models.FixedIp.network_id == network_id).\
+        join((models.VirtualInterface, vif_and)).\
+        join((models.Instance, inst_and)).\
+        outerjoin((subq, subq_and)).\
+        filter(models.FixedIp.instance_uuid != null()).\
+        filter(models.FixedIp.virtual_interface_id != null())
+    if host:
+        query = query.filter(models.Instance.host == host)
+    return query
+
+
+@pick_context_manager_reader
+def network_get_associated_fixed_ips(context, network_id, host=None):
+    # FIXME(sirp): since this returns fixed_ips, this would be better named
+    # fixed_ip_get_all_by_network.
+    query = _get_associated_fixed_ips_query(context, network_id, host)
+    result = query.all()
+    data = []
+    for datum in result:
+        cleaned = {}
+        cleaned['address'] = datum[0]
+        cleaned['instance_uuid'] = datum[1]
+        cleaned['network_id'] = datum[2]
+        cleaned['vif_id'] = datum[3]
+        cleaned['vif_address'] = datum[4]
+        cleaned['instance_hostname'] = datum[5]
+        cleaned['instance_updated'] = datum[6]
+        cleaned['instance_created'] = datum[7]
+        cleaned['allocated'] = datum[8]
+        cleaned['leased'] = datum[9]
+        # NOTE(vish): default_route is True if this fixed ip is on the first
+        #             interface its instance.
+        cleaned['default_route'] = datum[10] is not None
+        data.append(cleaned)
+    return data
+
+
+@pick_context_manager_reader
+def network_in_use_on_host(context, network_id, host):
+    query = _get_associated_fixed_ips_query(context, network_id, host)
+    return query.count() > 0
+
+
+def _network_get_query(context):
+    return model_query(context, models.Network, read_deleted="no")
+
+
+@pick_context_manager_reader
+def network_get_by_uuid(context, uuid):
+    result = _network_get_query(context).filter_by(uuid=uuid).first()
+
+    if not result:
+        raise exception.NetworkNotFoundForUUID(uuid=uuid)
+
+    return result
+
+
+@pick_context_manager_reader
+def network_get_by_cidr(context, cidr):
+    result = _network_get_query(context).\
+                filter(or_(models.Network.cidr == cidr,
+                           models.Network.cidr_v6 == cidr)).\
+                first()
+
+    if not result:
+        raise exception.NetworkNotFoundForCidr(cidr=cidr)
+
+    return result
+
+
+@pick_context_manager_reader
+def network_get_all_by_host(context, host):
+    fixed_host_filter = or_(models.FixedIp.host == host,
+            and_(models.FixedIp.instance_uuid != null(),
+                 models.Instance.host == host))
+    fixed_ip_query = model_query(context, models.FixedIp,
+                                 (models.FixedIp.network_id,)).\
+                     outerjoin((models.Instance,
+                                models.Instance.uuid ==
+                                models.FixedIp.instance_uuid)).\
+                     filter(fixed_host_filter)
+    # NOTE(vish): return networks that have host set
+    #             or that have a fixed ip with host set
+    #             or that have an instance with host set
+    host_filter = or_(models.Network.host == host,
+                      models.Network.id.in_(fixed_ip_query.subquery()))
+    return _network_get_query(context).filter(host_filter).all()
+
+
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def network_set_host(context, network_id, host_id):
+    network_ref = _network_get_query(context).\
+        filter_by(id=network_id).\
+        first()
+
+    if not network_ref:
+        raise exception.NetworkNotFound(network_id=network_id)
+
+    if network_ref.host:
+        return None
+
+    rows_updated = _network_get_query(context).\
+        filter_by(id=network_id).\
+        filter_by(host=None).\
+        update({'host': host_id})
+
+    if not rows_updated:
+        LOG.debug('The row was updated in a concurrent transaction, '
+                  'we will fetch another row')
+        raise db_exc.RetryRequest(
+            exception.NetworkSetHostFailed(network_id=network_id))
+
+
+@require_context
+@pick_context_manager_writer
+def network_update(context, network_id, values):
+    network_ref = _network_get(context, network_id)
+    network_ref.update(values)
+    try:
+        network_ref.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.DuplicateVlan(vlan=values['vlan'])
+    return network_ref
+
+
+###################
+
+
+@require_context
+@pick_context_manager_reader
+def quota_get(context, project_id, resource, user_id=None):
+    model = models.ProjectUserQuota if user_id else models.Quota
+    query = model_query(context, model).\
+                    filter_by(project_id=project_id).\
+                    filter_by(resource=resource)
+    if user_id:
+        query = query.filter_by(user_id=user_id)
+
+    result = query.first()
+    if not result:
+        if user_id:
+            raise exception.ProjectUserQuotaNotFound(project_id=project_id,
+                                                     user_id=user_id)
+        else:
+            raise exception.ProjectQuotaNotFound(project_id=project_id)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def quota_get_all_by_project_and_user(context, project_id, user_id):
+    user_quotas = model_query(context, models.ProjectUserQuota,
+                              (models.ProjectUserQuota.resource,
+                               models.ProjectUserQuota.hard_limit)).\
+                   filter_by(project_id=project_id).\
+                   filter_by(user_id=user_id).\
+                   all()
+
+    result = {'project_id': project_id, 'user_id': user_id}
+    for user_quota in user_quotas:
+        result[user_quota.resource] = user_quota.hard_limit
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def quota_get_all_by_project(context, project_id):
+    rows = model_query(context, models.Quota, read_deleted="no").\
+                   filter_by(project_id=project_id).\
+                   all()
+
+    result = {'project_id': project_id}
+    for row in rows:
+        result[row.resource] = row.hard_limit
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def quota_get_all(context, project_id):
+    result = model_query(context, models.ProjectUserQuota).\
+                   filter_by(project_id=project_id).\
+                   all()
+
+    return result
+
+
+def quota_get_per_project_resources():
+    return PER_PROJECT_QUOTAS
+
+
+@pick_context_manager_writer
+def quota_create(context, project_id, resource, limit, user_id=None):
+    per_user = user_id and resource not in PER_PROJECT_QUOTAS
+    quota_ref = models.ProjectUserQuota() if per_user else models.Quota()
+    if per_user:
+        quota_ref.user_id = user_id
+    quota_ref.project_id = project_id
+    quota_ref.resource = resource
+    quota_ref.hard_limit = limit
+    try:
+        quota_ref.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.QuotaExists(project_id=project_id, resource=resource)
+    return quota_ref
+
+
+@pick_context_manager_writer
+def quota_update(context, project_id, resource, limit, user_id=None):
+    per_user = user_id and resource not in PER_PROJECT_QUOTAS
+    model = models.ProjectUserQuota if per_user else models.Quota
+    query = model_query(context, model).\
+                filter_by(project_id=project_id).\
+                filter_by(resource=resource)
+    if per_user:
+        query = query.filter_by(user_id=user_id)
+
+    result = query.update({'hard_limit': limit})
+    if not result:
+        if per_user:
+            raise exception.ProjectUserQuotaNotFound(project_id=project_id,
+                                                     user_id=user_id)
+        else:
+            raise exception.ProjectQuotaNotFound(project_id=project_id)
+
+
+###################
+
+
+@require_context
+@pick_context_manager_reader
+def quota_class_get(context, class_name, resource):
+    result = model_query(context, models.QuotaClass, read_deleted="no").\
+                     filter_by(class_name=class_name).\
+                     filter_by(resource=resource).\
+                     first()
+
+    if not result:
+        raise exception.QuotaClassNotFound(class_name=class_name)
+
+    return result
+
+
+@pick_context_manager_reader
+def quota_class_get_default(context):
+    rows = model_query(context, models.QuotaClass, read_deleted="no").\
+                   filter_by(class_name=_DEFAULT_QUOTA_NAME).\
+                   all()
+
+    result = {'class_name': _DEFAULT_QUOTA_NAME}
+    for row in rows:
+        result[row.resource] = row.hard_limit
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def quota_class_get_all_by_name(context, class_name):
+    rows = model_query(context, models.QuotaClass, read_deleted="no").\
+                   filter_by(class_name=class_name).\
+                   all()
+
+    result = {'class_name': class_name}
+    for row in rows:
+        result[row.resource] = row.hard_limit
+
+    return result
+
+
+@pick_context_manager_writer
+def quota_class_create(context, class_name, resource, limit):
+    quota_class_ref = models.QuotaClass()
+    quota_class_ref.class_name = class_name
+    quota_class_ref.resource = resource
+    quota_class_ref.hard_limit = limit
+    quota_class_ref.save(context.session)
+    return quota_class_ref
+
+
+@pick_context_manager_writer
+def quota_class_update(context, class_name, resource, limit):
+    result = model_query(context, models.QuotaClass, read_deleted="no").\
+                     filter_by(class_name=class_name).\
+                     filter_by(resource=resource).\
+                     update({'hard_limit': limit})
+
+    if not result:
+        raise exception.QuotaClassNotFound(class_name=class_name)
+
+
+###################
+
+
+@pick_context_manager_writer
+def quota_destroy_all_by_project_and_user(context, project_id, user_id):
+    model_query(context, models.ProjectUserQuota, read_deleted="no").\
+        filter_by(project_id=project_id).\
+        filter_by(user_id=user_id).\
+        soft_delete(synchronize_session=False)
+
+
+@pick_context_manager_writer
+def quota_destroy_all_by_project(context, project_id):
+    model_query(context, models.Quota, read_deleted="no").\
+        filter_by(project_id=project_id).\
+        soft_delete(synchronize_session=False)
+
+    model_query(context, models.ProjectUserQuota, read_deleted="no").\
+        filter_by(project_id=project_id).\
+        soft_delete(synchronize_session=False)
+
+
+###################
+
+
+def _ec2_volume_get_query(context):
+    return model_query(context, models.VolumeIdMapping, read_deleted='yes')
+
+
+def _ec2_snapshot_get_query(context):
+    return model_query(context, models.SnapshotIdMapping, read_deleted='yes')
+
+
+@require_context
+@pick_context_manager_writer
+def ec2_volume_create(context, volume_uuid, id=None):
+    """Create ec2 compatible volume by provided uuid."""
+    ec2_volume_ref = models.VolumeIdMapping()
+    ec2_volume_ref.update({'uuid': volume_uuid})
+    if id is not None:
+        ec2_volume_ref.update({'id': id})
+
+    ec2_volume_ref.save(context.session)
+
+    return ec2_volume_ref
+
+
+@require_context
+@pick_context_manager_reader
+def ec2_volume_get_by_uuid(context, volume_uuid):
+    result = _ec2_volume_get_query(context).\
+                    filter_by(uuid=volume_uuid).\
+                    first()
+
+    if not result:
+        raise exception.VolumeNotFound(volume_id=volume_uuid)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def ec2_volume_get_by_id(context, volume_id):
+    result = _ec2_volume_get_query(context).\
+                    filter_by(id=volume_id).\
+                    first()
+
+    if not result:
+        raise exception.VolumeNotFound(volume_id=volume_id)
+
+    return result
+
+
+@require_context
+@pick_context_manager_writer
+def ec2_snapshot_create(context, snapshot_uuid, id=None):
+    """Create ec2 compatible snapshot by provided uuid."""
+    ec2_snapshot_ref = models.SnapshotIdMapping()
+    ec2_snapshot_ref.update({'uuid': snapshot_uuid})
+    if id is not None:
+        ec2_snapshot_ref.update({'id': id})
+
+    ec2_snapshot_ref.save(context.session)
+
+    return ec2_snapshot_ref
+
+
+@require_context
+@pick_context_manager_reader
+def ec2_snapshot_get_by_ec2_id(context, ec2_id):
+    result = _ec2_snapshot_get_query(context).\
+                    filter_by(id=ec2_id).\
+                    first()
+
+    if not result:
+        raise exception.SnapshotNotFound(snapshot_id=ec2_id)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def ec2_snapshot_get_by_uuid(context, snapshot_uuid):
+    result = _ec2_snapshot_get_query(context).\
+                    filter_by(uuid=snapshot_uuid).\
+                    first()
+
+    if not result:
+        raise exception.SnapshotNotFound(snapshot_id=snapshot_uuid)
+
+    return result
+
+
+###################
+
+
+def _block_device_mapping_get_query(context, columns_to_join=None):
+    if columns_to_join is None:
+        columns_to_join = []
+
+    query = model_query(context, models.BlockDeviceMapping)
+
+    for column in columns_to_join:
+        query = query.options(joinedload(column))
+
+    return query
+
+
+def _scrub_empty_str_values(dct, keys_to_scrub):
+    """Remove any keys found in sequence keys_to_scrub from the dict
+    if they have the value ''.
+    """
+    for key in keys_to_scrub:
+        if key in dct and dct[key] == '':
+            del dct[key]
+
+
+def _from_legacy_values(values, legacy, allow_updates=False):
+    if legacy:
+        if allow_updates and block_device.is_safe_for_update(values):
+            return values
+        else:
+            return block_device.BlockDeviceDict.from_legacy(values)
+    else:
+        return values
+
+
+def _set_or_validate_uuid(values):
+    uuid = values.get('uuid')
+
+    # values doesn't contain uuid, or it's blank
+    if not uuid:
+        values['uuid'] = uuidutils.generate_uuid()
+
+    # values contains a uuid
+    else:
+        if not uuidutils.is_uuid_like(uuid):
+            raise exception.InvalidUUID(uuid=uuid)
+
+
+@require_context
+@pick_context_manager_writer
+def block_device_mapping_create(context, values, legacy=True):
+    _scrub_empty_str_values(values, ['volume_size'])
+    values = _from_legacy_values(values, legacy)
+    convert_objects_related_datetimes(values)
+
+    _set_or_validate_uuid(values)
+
+    bdm_ref = models.BlockDeviceMapping()
+    bdm_ref.update(values)
+    bdm_ref.save(context.session)
+    return bdm_ref
+
+
+@require_context
+@pick_context_manager_writer
+def block_device_mapping_update(context, bdm_id, values, legacy=True):
+    _scrub_empty_str_values(values, ['volume_size'])
+    values = _from_legacy_values(values, legacy, allow_updates=True)
+    convert_objects_related_datetimes(values)
+
+    query = _block_device_mapping_get_query(context).filter_by(id=bdm_id)
+    query.update(values)
+    return query.first()
+
+
+@pick_context_manager_writer
+def block_device_mapping_update_or_create(context, values, legacy=True):
+    # TODO(mdbooth): Remove this method entirely. Callers should know whether
+    # they require update or create, and call the appropriate method.
+
+    _scrub_empty_str_values(values, ['volume_size'])
+    values = _from_legacy_values(values, legacy, allow_updates=True)
+    convert_objects_related_datetimes(values)
+
+    result = None
+    # NOTE(xqueralt,danms): Only update a BDM when device_name or
+    # uuid was provided. Prefer the uuid, if available, but fall
+    # back to device_name if no uuid is provided, which can happen
+    # for BDMs created before we had a uuid. We allow empty device
+    # names so they will be set later by the manager.
+    if 'uuid' in values:
+        query = _block_device_mapping_get_query(context)
+        result = query.filter_by(instance_uuid=values['instance_uuid'],
+                                 uuid=values['uuid']).one_or_none()
+
+    if not result and values['device_name']:
+        query = _block_device_mapping_get_query(context)
+        result = query.filter_by(instance_uuid=values['instance_uuid'],
+                                 device_name=values['device_name']).first()
+
+    if result:
+        result.update(values)
+    else:
+        # Either the device_name or uuid doesn't exist in the database yet, or
+        # neither was provided. Both cases mean creating a new BDM.
+        _set_or_validate_uuid(values)
+        result = models.BlockDeviceMapping(**values)
+        result.save(context.session)
+
+    # NOTE(xqueralt): Prevent from having multiple swap devices for the
+    # same instance. This will delete all the existing ones.
+    if block_device.new_format_is_swap(values):
+        query = _block_device_mapping_get_query(context)
+        query = query.filter_by(instance_uuid=values['instance_uuid'],
+                                source_type='blank', guest_format='swap')
+        query = query.filter(models.BlockDeviceMapping.id != result.id)
+        query.soft_delete()
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def block_device_mapping_get_all_by_instance_uuids(context, instance_uuids):
+    if not instance_uuids:
+        return []
+    return _block_device_mapping_get_query(context).filter(
+        models.BlockDeviceMapping.instance_uuid.in_(instance_uuids)).all()
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def block_device_mapping_get_all_by_instance(context, instance_uuid):
+    return _block_device_mapping_get_query(context).\
+                 filter_by(instance_uuid=instance_uuid).\
+                 all()
+
+
+@require_context
+@pick_context_manager_reader
+def block_device_mapping_get_all_by_volume_id(context, volume_id,
+        columns_to_join=None):
+    return _block_device_mapping_get_query(context,
+            columns_to_join=columns_to_join).\
+                 filter_by(volume_id=volume_id).\
+                 all()
+
+
+@require_context
+@pick_context_manager_reader
+def block_device_mapping_get_by_instance_and_volume_id(context, volume_id,
+                                                       instance_uuid,
+                                                       columns_to_join=None):
+    return _block_device_mapping_get_query(context,
+            columns_to_join=columns_to_join).\
+                 filter_by(volume_id=volume_id).\
+                 filter_by(instance_uuid=instance_uuid).\
+                 first()
+
+
+@require_context
+@pick_context_manager_writer
+def block_device_mapping_destroy(context, bdm_id):
+    _block_device_mapping_get_query(context).\
+            filter_by(id=bdm_id).\
+            soft_delete()
+
+
+@require_context
+@pick_context_manager_writer
+def block_device_mapping_destroy_by_instance_and_volume(context, instance_uuid,
+                                                        volume_id):
+    _block_device_mapping_get_query(context).\
+            filter_by(instance_uuid=instance_uuid).\
+            filter_by(volume_id=volume_id).\
+            soft_delete()
+
+
+@require_context
+@pick_context_manager_writer
+def block_device_mapping_destroy_by_instance_and_device(context, instance_uuid,
+                                                        device_name):
+    _block_device_mapping_get_query(context).\
+            filter_by(instance_uuid=instance_uuid).\
+            filter_by(device_name=device_name).\
+            soft_delete()
+
+
+###################
+
+
+@require_context
+@pick_context_manager_writer
+def security_group_create(context, values):
+    security_group_ref = models.SecurityGroup()
+    # FIXME(devcamcar): Unless I do this, rules fails with lazy load exception
+    # once save() is called.  This will get cleaned up in next orm pass.
+    security_group_ref.rules
+    security_group_ref.update(values)
+    try:
+        with get_context_manager(context).writer.savepoint.using(context):
+            security_group_ref.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.SecurityGroupExists(
+                project_id=values['project_id'],
+                security_group_name=values['name'])
+    return security_group_ref
+
+
+def _security_group_get_query(context, read_deleted=None,
+                              project_only=False, join_rules=True):
+    query = model_query(context, models.SecurityGroup,
+            read_deleted=read_deleted, project_only=project_only)
+    if join_rules:
+        query = query.options(joinedload_all('rules.grantee_group'))
+    return query
+
+
+def _security_group_get_by_names(context, group_names):
+    """Get security group models for a project by a list of names.
+    Raise SecurityGroupNotFoundForProject for a name not found.
+    """
+    query = _security_group_get_query(context, read_deleted="no",
+                                      join_rules=False).\
+            filter_by(project_id=context.project_id).\
+            filter(models.SecurityGroup.name.in_(group_names))
+    sg_models = query.all()
+    if len(sg_models) == len(group_names):
+        return sg_models
+    # Find the first one missing and raise
+    group_names_from_models = [x.name for x in sg_models]
+    for group_name in group_names:
+        if group_name not in group_names_from_models:
+            raise exception.SecurityGroupNotFoundForProject(
+                project_id=context.project_id, security_group_id=group_name)
+    # Not Reached
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_get_all(context):
+    return _security_group_get_query(context).all()
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_get(context, security_group_id, columns_to_join=None):
+    join_rules = columns_to_join and 'rules' in columns_to_join
+    if join_rules:
+        columns_to_join.remove('rules')
+    query = _security_group_get_query(context, project_only=True,
+                                      join_rules=join_rules).\
+                    filter_by(id=security_group_id)
+
+    if columns_to_join is None:
+        columns_to_join = []
+    for column in columns_to_join:
+        if column.startswith('instances'):
+            query = query.options(joinedload_all(column))
+
+    result = query.first()
+    if not result:
+        raise exception.SecurityGroupNotFound(
+                security_group_id=security_group_id)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_get_by_name(context, project_id, group_name,
+                               columns_to_join=None):
+    query = _security_group_get_query(context,
+                                      read_deleted="no", join_rules=False).\
+            filter_by(project_id=project_id).\
+            filter_by(name=group_name)
+
+    if columns_to_join is None:
+        columns_to_join = ['instances', 'rules.grantee_group']
+
+    for column in columns_to_join:
+        query = query.options(joinedload_all(column))
+
+    result = query.first()
+    if not result:
+        raise exception.SecurityGroupNotFoundForProject(
+                project_id=project_id, security_group_id=group_name)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_get_by_project(context, project_id):
+    return _security_group_get_query(context, read_deleted="no").\
+                        filter_by(project_id=project_id).\
+                        all()
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_get_by_instance(context, instance_uuid):
+    return _security_group_get_query(context, read_deleted="no").\
+                   join(models.SecurityGroup.instances).\
+                   filter_by(uuid=instance_uuid).\
+                   all()
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_in_use(context, group_id):
+    # Are there any instances that haven't been deleted
+    # that include this group?
+    inst_assoc = model_query(context,
+                             models.SecurityGroupInstanceAssociation,
+                             read_deleted="no").\
+                    filter_by(security_group_id=group_id).\
+                    all()
+    for ia in inst_assoc:
+        num_instances = model_query(context, models.Instance,
+                                    read_deleted="no").\
+                    filter_by(uuid=ia.instance_uuid).\
+                    count()
+        if num_instances:
+            return True
+
+    return False
+
+
+@require_context
+@pick_context_manager_writer
+def security_group_update(context, security_group_id, values,
+                          columns_to_join=None):
+    query = model_query(context, models.SecurityGroup).filter_by(
+        id=security_group_id)
+    if columns_to_join:
+        for column in columns_to_join:
+            query = query.options(joinedload_all(column))
+    security_group_ref = query.first()
+
+    if not security_group_ref:
+        raise exception.SecurityGroupNotFound(
+                security_group_id=security_group_id)
+    security_group_ref.update(values)
+    name = security_group_ref['name']
+    project_id = security_group_ref['project_id']
+    try:
+        security_group_ref.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.SecurityGroupExists(
+                project_id=project_id,
+                security_group_name=name)
+    return security_group_ref
+
+
+def security_group_ensure_default(context):
+    """Ensure default security group exists for a project_id."""
+
+    try:
+        # NOTE(rpodolyaka): create the default security group, if it doesn't
+        # exist. This must be done in a separate transaction, so that
+        # this one is not aborted in case a concurrent one succeeds first
+        # and the unique constraint for security group names is violated
+        # by a concurrent INSERT
+        with get_context_manager(context).writer.independent.using(context):
+            return _security_group_ensure_default(context)
+    except exception.SecurityGroupExists:
+        # NOTE(rpodolyaka): a concurrent transaction has succeeded first,
+        # suppress the error and proceed
+        return security_group_get_by_name(context, context.project_id,
+                                          'default')
+
+
+@pick_context_manager_writer
+def _security_group_ensure_default(context):
+    try:
+        default_group = _security_group_get_by_names(context, ['default'])[0]
+    except exception.NotFound:
+        values = {'name': 'default',
+                  'description': 'default',
+                  'user_id': context.user_id,
+                  'project_id': context.project_id}
+        default_group = security_group_create(context, values)
+
+        default_rules = _security_group_rule_get_default_query(context).all()
+        for default_rule in default_rules:
+            # This is suboptimal, it should be programmatic to know
+            # the values of the default_rule
+            rule_values = {'protocol': default_rule.protocol,
+                           'from_port': default_rule.from_port,
+                           'to_port': default_rule.to_port,
+                           'cidr': default_rule.cidr,
+                           'parent_group_id': default_group.id,
+            }
+            _security_group_rule_create(context, rule_values)
+    return default_group
+
+
+@require_context
+@pick_context_manager_writer
+def security_group_destroy(context, security_group_id):
+    model_query(context, models.SecurityGroup).\
+            filter_by(id=security_group_id).\
+            soft_delete()
+    model_query(context, models.SecurityGroupInstanceAssociation).\
+            filter_by(security_group_id=security_group_id).\
+            soft_delete()
+    model_query(context, models.SecurityGroupIngressRule).\
+            filter_by(group_id=security_group_id).\
+            soft_delete()
+    model_query(context, models.SecurityGroupIngressRule).\
+            filter_by(parent_group_id=security_group_id).\
+            soft_delete()
+
+
+def _security_group_count_by_project_and_user(context, project_id, user_id):
+    nova.context.authorize_project_context(context, project_id)
+    return model_query(context, models.SecurityGroup, read_deleted="no").\
+                   filter_by(project_id=project_id).\
+                   filter_by(user_id=user_id).\
+                   count()
+
+
+###################
+
+
+def _security_group_rule_create(context, values):
+    security_group_rule_ref = models.SecurityGroupIngressRule()
+    security_group_rule_ref.update(values)
+    security_group_rule_ref.save(context.session)
+    return security_group_rule_ref
+
+
+def _security_group_rule_get_query(context):
+    return model_query(context, models.SecurityGroupIngressRule)
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_rule_get(context, security_group_rule_id):
+    result = (_security_group_rule_get_query(context).
+                         filter_by(id=security_group_rule_id).
+                         first())
+
+    if not result:
+        raise exception.SecurityGroupNotFoundForRule(
+                                               rule_id=security_group_rule_id)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_rule_get_by_security_group(context, security_group_id,
+                                              columns_to_join=None):
+    if columns_to_join is None:
+        columns_to_join = ['grantee_group.instances.system_metadata',
+                           'grantee_group.instances.info_cache']
+    query = (_security_group_rule_get_query(context).
+             filter_by(parent_group_id=security_group_id))
+    for column in columns_to_join:
+        query = query.options(joinedload_all(column))
+    return query.all()
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_rule_get_by_instance(context, instance_uuid):
+    return (_security_group_rule_get_query(context).
+            join('parent_group', 'instances').
+            filter_by(uuid=instance_uuid).
+            options(joinedload('grantee_group')).
+            all())
+
+
+@require_context
+@pick_context_manager_writer
+def security_group_rule_create(context, values):
+    return _security_group_rule_create(context, values)
+
+
+@require_context
+@pick_context_manager_writer
+def security_group_rule_destroy(context, security_group_rule_id):
+    count = (_security_group_rule_get_query(context).
+                    filter_by(id=security_group_rule_id).
+                    soft_delete())
+    if count == 0:
+        raise exception.SecurityGroupNotFoundForRule(
+                                            rule_id=security_group_rule_id)
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_rule_count_by_group(context, security_group_id):
+    return (model_query(context, models.SecurityGroupIngressRule,
+                   read_deleted="no").
+                   filter_by(parent_group_id=security_group_id).
+                   count())
+
+
+###################
+
+
+def _security_group_rule_get_default_query(context):
+    return model_query(context, models.SecurityGroupIngressDefaultRule)
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_default_rule_get(context, security_group_rule_default_id):
+    result = _security_group_rule_get_default_query(context).\
+                        filter_by(id=security_group_rule_default_id).\
+                        first()
+
+    if not result:
+        raise exception.SecurityGroupDefaultRuleNotFound(
+                                        rule_id=security_group_rule_default_id)
+
+    return result
+
+
+@pick_context_manager_writer
+def security_group_default_rule_destroy(context,
+                                        security_group_rule_default_id):
+    count = _security_group_rule_get_default_query(context).\
+                        filter_by(id=security_group_rule_default_id).\
+                        soft_delete()
+    if count == 0:
+        raise exception.SecurityGroupDefaultRuleNotFound(
+                                    rule_id=security_group_rule_default_id)
+
+
+@pick_context_manager_writer
+def security_group_default_rule_create(context, values):
+    security_group_default_rule_ref = models.SecurityGroupIngressDefaultRule()
+    security_group_default_rule_ref.update(values)
+    security_group_default_rule_ref.save(context.session)
+    return security_group_default_rule_ref
+
+
+@require_context
+@pick_context_manager_reader
+def security_group_default_rule_list(context):
+    return _security_group_rule_get_default_query(context).all()
+
+
+###################
+
+
+@pick_context_manager_writer
+def provider_fw_rule_create(context, rule):
+    fw_rule_ref = models.ProviderFirewallRule()
+    fw_rule_ref.update(rule)
+    fw_rule_ref.save(context.session)
+    return fw_rule_ref
+
+
+@pick_context_manager_reader
+def provider_fw_rule_get_all(context):
+    return model_query(context, models.ProviderFirewallRule).all()
+
+
+@pick_context_manager_writer
+def provider_fw_rule_destroy(context, rule_id):
+    context.session.query(models.ProviderFirewallRule).\
+        filter_by(id=rule_id).\
+        soft_delete()
+
+
+###################
+
+
+@require_context
+@pick_context_manager_writer
+def project_get_networks(context, project_id, associate=True):
+    # NOTE(tr3buchet): as before this function will associate
+    # a project with a network if it doesn't have one and
+    # associate is true
+    result = model_query(context, models.Network, read_deleted="no").\
+                     filter_by(project_id=project_id).\
+                     all()
+
+    if not result:
+        if not associate:
+            return []
+
+        return [network_associate(context, project_id)]
+
+    return result
+
+
+###################
+
+
+@pick_context_manager_writer
+def migration_create(context, values):
+    migration = models.Migration()
+    migration.update(values)
+    migration.save(context.session)
+    return migration
+
+
+@pick_context_manager_writer
+def migration_update(context, id, values):
+    migration = migration_get(context, id)
+    migration.update(values)
+
+    return migration
+
+
+@pick_context_manager_reader
+def migration_get(context, id):
+    result = model_query(context, models.Migration, read_deleted="yes").\
+                     filter_by(id=id).\
+                     first()
+
+    if not result:
+        raise exception.MigrationNotFound(migration_id=id)
+
+    return result
+
+
+@pick_context_manager_reader
+def migration_get_by_uuid(context, migration_uuid):
+    result = model_query(context, models.Migration, read_deleted="yes").\
+                     filter_by(uuid=migration_uuid).\
+                     first()
+
+    if not result:
+        raise exception.MigrationNotFound(migration_id=migration_uuid)
+
+    return result
+
+
+@pick_context_manager_reader
+def migration_get_by_id_and_instance(context, id, instance_uuid):
+    result = model_query(context, models.Migration).\
+                     filter_by(id=id).\
+                     filter_by(instance_uuid=instance_uuid).\
+                     first()
+
+    if not result:
+        raise exception.MigrationNotFoundForInstance(migration_id=id,
+                                                     instance_id=instance_uuid)
+
+    return result
+
+
+@pick_context_manager_reader
+def migration_get_by_instance_and_status(context, instance_uuid, status):
+    result = model_query(context, models.Migration, read_deleted="yes").\
+                     filter_by(instance_uuid=instance_uuid).\
+                     filter_by(status=status).\
+                     first()
+
+    if not result:
+        raise exception.MigrationNotFoundByStatus(instance_id=instance_uuid,
+                                                  status=status)
+
+    return result
+
+
+@pick_context_manager_reader_allow_async
+def migration_get_unconfirmed_by_dest_compute(context, confirm_window,
+                                              dest_compute):
+    confirm_window = (timeutils.utcnow() -
+                      datetime.timedelta(seconds=confirm_window))
+
+    return model_query(context, models.Migration, read_deleted="yes").\
+             filter(models.Migration.updated_at <= confirm_window).\
+             filter_by(status="finished").\
+             filter_by(dest_compute=dest_compute).\
+             all()
+
+
+@pick_context_manager_reader
+def migration_get_in_progress_by_host_and_node(context, host, node):
+    # TODO(mriedem): Tracking what various code flows set for
+    # migration status is nutty, since it happens all over the place
+    # and several of the statuses are redundant (done and completed).
+    # We need to define these in an enum somewhere and just update
+    # that one central place that defines what "in progress" means.
+    # NOTE(mriedem): The 'finished' status is not in this list because
+    # 'finished' means a resize is finished on the destination host
+    # and the instance is in VERIFY_RESIZE state, so the end state
+    # for a resize is actually 'confirmed' or 'reverted'.
+    return model_query(context, models.Migration).\
+            filter(or_(and_(models.Migration.source_compute == host,
+                            models.Migration.source_node == node),
+                       and_(models.Migration.dest_compute == host,
+                            models.Migration.dest_node == node))).\
+            filter(~models.Migration.status.in_(['accepted', 'confirmed',
+                                                 'reverted', 'error',
+                                                 'failed', 'completed',
+                                                 'cancelled', 'done'])).\
+            options(joinedload_all('instance.system_metadata')).\
+            all()
+
+
+@pick_context_manager_reader
+def migration_get_in_progress_by_instance(context, instance_uuid,
+                                          migration_type=None):
+    # TODO(Shaohe Feng) we should share the in-progress list.
+    # TODO(Shaohe Feng) will also summarize all status to a new
+    # MigrationStatus class.
+    query = model_query(context, models.Migration).\
+            filter_by(instance_uuid=instance_uuid).\
+            filter(models.Migration.status.in_(['queued', 'preparing',
+                                                'running',
+                                                'post-migrating']))
+    if migration_type:
+        query = query.filter(models.Migration.migration_type == migration_type)
+
+    return query.all()
+
+
+@pick_context_manager_reader
+def migration_get_all_by_filters(context, filters,
+                                 sort_keys=None, sort_dirs=None,
+                                 limit=None, marker=None):
+    if limit == 0:
+        return []
+
+    query = model_query(context, models.Migration)
+    if "uuid" in filters:
+        # The uuid filter is here for the MigrationLister and multi-cell
+        # paging support in the compute API.
+        uuid = filters["uuid"]
+        uuid = [uuid] if isinstance(uuid, six.string_types) else uuid
+        query = query.filter(models.Migration.uuid.in_(uuid))
+
+    model_object = models.Migration
+    query = _get_query_nova_resource_by_changes_time(query,
+                                                     filters,
+                                                     model_object)
+
+    if "status" in filters:
+        status = filters["status"]
+        status = [status] if isinstance(status, six.string_types) else status
+        query = query.filter(models.Migration.status.in_(status))
+    if "host" in filters:
+        host = filters["host"]
+        query = query.filter(or_(models.Migration.source_compute == host,
+                                 models.Migration.dest_compute == host))
+    elif "source_compute" in filters:
+        host = filters['source_compute']
+        query = query.filter(models.Migration.source_compute == host)
+    if "migration_type" in filters:
+        migtype = filters["migration_type"]
+        query = query.filter(models.Migration.migration_type == migtype)
+    if "hidden" in filters:
+        hidden = filters["hidden"]
+        query = query.filter(models.Migration.hidden == hidden)
+    if "instance_uuid" in filters:
+        instance_uuid = filters["instance_uuid"]
+        query = query.filter(models.Migration.instance_uuid == instance_uuid)
+    if marker:
+        try:
+            marker = migration_get_by_uuid(context, marker)
+        except exception.MigrationNotFound:
+            raise exception.MarkerNotFound(marker=marker)
+    if limit or marker or sort_keys or sort_dirs:
+        # Default sort by desc(['created_at', 'id'])
+        sort_keys, sort_dirs = process_sort_params(sort_keys, sort_dirs,
+                                                   default_dir='desc')
+        return sqlalchemyutils.paginate_query(query,
+                                              models.Migration,
+                                              limit=limit,
+                                              sort_keys=sort_keys,
+                                              marker=marker,
+                                              sort_dirs=sort_dirs).all()
+    else:
+        return query.all()
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def migration_get_by_sort_filters(context, sort_keys, sort_dirs, values):
+    """Attempt to get a single migration based on a combination of sort
+    keys, directions and filter values. This is used to try to find a
+    marker migration when we don't have a marker uuid.
+
+    This returns just a uuid of the migration that matched.
+    """
+    model = models.Migration
+    return _model_get_uuid_by_sort_filters(context, model, sort_keys,
+                                           sort_dirs, values)
+
+
+@pick_context_manager_writer
+def migration_migrate_to_uuid(context, count):
+    # Avoid circular import
+    from nova import objects
+
+    db_migrations = model_query(context, models.Migration).filter_by(
+        uuid=None).limit(count).all()
+
+    done = 0
+    for db_migration in db_migrations:
+        mig = objects.Migration(context)
+        mig._from_db_object(context, mig, db_migration)
+        done += 1
+
+    # We don't have any situation where we can (detectably) not
+    # migrate a thing, so report anything that matched as "completed".
+    return done, done
+
+
+##################
+
+
+@pick_context_manager_writer
+def console_pool_create(context, values):
+    pool = models.ConsolePool()
+    pool.update(values)
+    try:
+        pool.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.ConsolePoolExists(
+            host=values["host"],
+            console_type=values["console_type"],
+            compute_host=values["compute_host"],
+        )
+    return pool
+
+
+@pick_context_manager_reader
+def console_pool_get_by_host_type(context, compute_host, host,
+                                  console_type):
+
+    result = model_query(context, models.ConsolePool, read_deleted="no").\
+                   filter_by(host=host).\
+                   filter_by(console_type=console_type).\
+                   filter_by(compute_host=compute_host).\
+                   options(joinedload('consoles')).\
+                   first()
+
+    if not result:
+        raise exception.ConsolePoolNotFoundForHostType(
+                host=host, console_type=console_type,
+                compute_host=compute_host)
+
+    return result
+
+
+@pick_context_manager_reader
+def console_pool_get_all_by_host_type(context, host, console_type):
+    return model_query(context, models.ConsolePool, read_deleted="no").\
+                   filter_by(host=host).\
+                   filter_by(console_type=console_type).\
+                   options(joinedload('consoles')).\
+                   all()
+
+
+##################
+
+
+@pick_context_manager_writer
+def console_create(context, values):
+    console = models.Console()
+    console.update(values)
+    console.save(context.session)
+    return console
+
+
+@pick_context_manager_writer
+def console_delete(context, console_id):
+    # NOTE(mdragon): consoles are meant to be transient.
+    context.session.query(models.Console).\
+        filter_by(id=console_id).\
+        delete()
+
+
+@pick_context_manager_reader
+def console_get_by_pool_instance(context, pool_id, instance_uuid):
+    result = model_query(context, models.Console, read_deleted="yes").\
+                   filter_by(pool_id=pool_id).\
+                   filter_by(instance_uuid=instance_uuid).\
+                   options(joinedload('pool')).\
+                   first()
+
+    if not result:
+        raise exception.ConsoleNotFoundInPoolForInstance(
+                pool_id=pool_id, instance_uuid=instance_uuid)
+
+    return result
+
+
+@pick_context_manager_reader
+def console_get_all_by_instance(context, instance_uuid, columns_to_join=None):
+    query = model_query(context, models.Console, read_deleted="yes").\
+                filter_by(instance_uuid=instance_uuid)
+    if columns_to_join:
+        for column in columns_to_join:
+            query = query.options(joinedload(column))
+    return query.all()
+
+
+@pick_context_manager_reader
+def console_get(context, console_id, instance_uuid=None):
+    query = model_query(context, models.Console, read_deleted="yes").\
+                    filter_by(id=console_id).\
+                    options(joinedload('pool'))
+
+    if instance_uuid is not None:
+        query = query.filter_by(instance_uuid=instance_uuid)
+
+    result = query.first()
+
+    if not result:
+        if instance_uuid:
+            raise exception.ConsoleNotFoundForInstance(
+                    instance_uuid=instance_uuid)
+        else:
+            raise exception.ConsoleNotFound(console_id=console_id)
+
+    return result
+
+
+##################
+
+
+@pick_context_manager_writer
+def cell_create(context, values):
+    cell = models.Cell()
+    cell.update(values)
+    try:
+        cell.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.CellExists(name=values['name'])
+    return cell
+
+
+def _cell_get_by_name_query(context, cell_name):
+    return model_query(context, models.Cell).filter_by(name=cell_name)
+
+
+@pick_context_manager_writer
+def cell_update(context, cell_name, values):
+    cell_query = _cell_get_by_name_query(context, cell_name)
+    if not cell_query.update(values):
+        raise exception.CellNotFound(cell_name=cell_name)
+    cell = cell_query.first()
+    return cell
+
+
+@pick_context_manager_writer
+def cell_delete(context, cell_name):
+    return _cell_get_by_name_query(context, cell_name).soft_delete()
+
+
+@pick_context_manager_reader
+def cell_get(context, cell_name):
+    result = _cell_get_by_name_query(context, cell_name).first()
+    if not result:
+        raise exception.CellNotFound(cell_name=cell_name)
+    return result
+
+
+@pick_context_manager_reader
+def cell_get_all(context):
+    return model_query(context, models.Cell, read_deleted="no").all()
+
+
+########################
+# User-provided metadata
+
+def _instance_metadata_get_multi(context, instance_uuids):
+    if not instance_uuids:
+        return []
+    return model_query(context, models.InstanceMetadata).filter(
+        models.InstanceMetadata.instance_uuid.in_(instance_uuids))
+
+
+def _instance_metadata_get_query(context, instance_uuid):
+    return model_query(context, models.InstanceMetadata, read_deleted="no").\
+                    filter_by(instance_uuid=instance_uuid)
+
+
+@require_context
+@pick_context_manager_reader
+def instance_metadata_get(context, instance_uuid):
+    rows = _instance_metadata_get_query(context, instance_uuid).all()
+    return {row['key']: row['value'] for row in rows}
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def instance_metadata_delete(context, instance_uuid, key):
+    _instance_metadata_get_query(context, instance_uuid).\
+        filter_by(key=key).\
+        soft_delete()
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def instance_metadata_update(context, instance_uuid, metadata, delete):
+    all_keys = metadata.keys()
+    if delete:
+        _instance_metadata_get_query(context, instance_uuid).\
+            filter(~models.InstanceMetadata.key.in_(all_keys)).\
+            soft_delete(synchronize_session=False)
+
+    already_existing_keys = []
+    meta_refs = _instance_metadata_get_query(context, instance_uuid).\
+        filter(models.InstanceMetadata.key.in_(all_keys)).\
+        all()
+
+    for meta_ref in meta_refs:
+        already_existing_keys.append(meta_ref.key)
+        meta_ref.update({"value": metadata[meta_ref.key]})
+
+    new_keys = set(all_keys) - set(already_existing_keys)
+    for key in new_keys:
+        meta_ref = models.InstanceMetadata()
+        meta_ref.update({"key": key, "value": metadata[key],
+                         "instance_uuid": instance_uuid})
+        context.session.add(meta_ref)
+
+    return metadata
+
+
+#######################
+# System-owned metadata
+
+
+def _instance_system_metadata_get_multi(context, instance_uuids):
+    if not instance_uuids:
+        return []
+    return model_query(context, models.InstanceSystemMetadata,
+                       read_deleted='yes').filter(
+        models.InstanceSystemMetadata.instance_uuid.in_(instance_uuids))
+
+
+def _instance_system_metadata_get_query(context, instance_uuid):
+    return model_query(context, models.InstanceSystemMetadata).\
+                    filter_by(instance_uuid=instance_uuid)
+
+
+@require_context
+@pick_context_manager_reader
+def instance_system_metadata_get(context, instance_uuid):
+    rows = _instance_system_metadata_get_query(context, instance_uuid).all()
+    return {row['key']: row['value'] for row in rows}
+
+
+@require_context
+@pick_context_manager_writer
+def instance_system_metadata_update(context, instance_uuid, metadata, delete):
+    all_keys = metadata.keys()
+    if delete:
+        _instance_system_metadata_get_query(context, instance_uuid).\
+            filter(~models.InstanceSystemMetadata.key.in_(all_keys)).\
+            soft_delete(synchronize_session=False)
+
+    already_existing_keys = []
+    meta_refs = _instance_system_metadata_get_query(context, instance_uuid).\
+        filter(models.InstanceSystemMetadata.key.in_(all_keys)).\
+        all()
+
+    for meta_ref in meta_refs:
+        already_existing_keys.append(meta_ref.key)
+        meta_ref.update({"value": metadata[meta_ref.key]})
+
+    new_keys = set(all_keys) - set(already_existing_keys)
+    for key in new_keys:
+        meta_ref = models.InstanceSystemMetadata()
+        meta_ref.update({"key": key, "value": metadata[key],
+                         "instance_uuid": instance_uuid})
+        context.session.add(meta_ref)
+
+    return metadata
+
+
+####################
+
+
+@pick_context_manager_writer
+def agent_build_create(context, values):
+    agent_build_ref = models.AgentBuild()
+    agent_build_ref.update(values)
+    try:
+        agent_build_ref.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.AgentBuildExists(hypervisor=values['hypervisor'],
+                        os=values['os'], architecture=values['architecture'])
+    return agent_build_ref
+
+
+@pick_context_manager_reader
+def agent_build_get_by_triple(context, hypervisor, os, architecture):
+    return model_query(context, models.AgentBuild, read_deleted="no").\
+                   filter_by(hypervisor=hypervisor).\
+                   filter_by(os=os).\
+                   filter_by(architecture=architecture).\
+                   first()
+
+
+@pick_context_manager_reader
+def agent_build_get_all(context, hypervisor=None):
+    if hypervisor:
+        return model_query(context, models.AgentBuild, read_deleted="no").\
+                   filter_by(hypervisor=hypervisor).\
+                   all()
+    else:
+        return model_query(context, models.AgentBuild, read_deleted="no").\
+                   all()
+
+
+@pick_context_manager_writer
+def agent_build_destroy(context, agent_build_id):
+    rows_affected = model_query(context, models.AgentBuild).filter_by(
+                                        id=agent_build_id).soft_delete()
+    if rows_affected == 0:
+        raise exception.AgentBuildNotFound(id=agent_build_id)
+
+
+@pick_context_manager_writer
+def agent_build_update(context, agent_build_id, values):
+    rows_affected = model_query(context, models.AgentBuild).\
+                   filter_by(id=agent_build_id).\
+                   update(values)
+    if rows_affected == 0:
+        raise exception.AgentBuildNotFound(id=agent_build_id)
+
+
+####################
+
+@require_context
+@pick_context_manager_reader_allow_async
+def bw_usage_get(context, uuid, start_period, mac):
+    values = {'start_period': start_period}
+    values = convert_objects_related_datetimes(values, 'start_period')
+    return model_query(context, models.BandwidthUsage, read_deleted="yes").\
+                           filter_by(start_period=values['start_period']).\
+                           filter_by(uuid=uuid).\
+                           filter_by(mac=mac).\
+                           first()
+
+
+@require_context
+@pick_context_manager_reader_allow_async
+def bw_usage_get_by_uuids(context, uuids, start_period):
+    values = {'start_period': start_period}
+    values = convert_objects_related_datetimes(values, 'start_period')
+    return (
+        model_query(context, models.BandwidthUsage, read_deleted="yes").
+        filter(models.BandwidthUsage.uuid.in_(uuids)).
+        filter_by(start_period=values['start_period']).
+        all()
+    )
+
+
+@require_context
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def bw_usage_update(context, uuid, mac, start_period, bw_in, bw_out,
+                    last_ctr_in, last_ctr_out, last_refreshed=None):
+
+    if last_refreshed is None:
+        last_refreshed = timeutils.utcnow()
+
+    # NOTE(comstud): More often than not, we'll be updating records vs
+    # creating records.  Optimize accordingly, trying to update existing
+    # records.  Fall back to creation when no rows are updated.
+    ts_values = {'last_refreshed': last_refreshed,
+                 'start_period': start_period}
+    ts_keys = ('start_period', 'last_refreshed')
+    ts_values = convert_objects_related_datetimes(ts_values, *ts_keys)
+    values = {'last_refreshed': ts_values['last_refreshed'],
+              'last_ctr_in': last_ctr_in,
+              'last_ctr_out': last_ctr_out,
+              'bw_in': bw_in,
+              'bw_out': bw_out}
+    # NOTE(pkholkin): order_by() is needed here to ensure that the
+    # same record is updated every time. It can be removed after adding
+    # unique constraint to this model.
+    bw_usage = model_query(context, models.BandwidthUsage,
+            read_deleted='yes').\
+                    filter_by(start_period=ts_values['start_period']).\
+                    filter_by(uuid=uuid).\
+                    filter_by(mac=mac).\
+                    order_by(asc(models.BandwidthUsage.id)).first()
+
+    if bw_usage:
+        bw_usage.update(values)
+        return bw_usage
+
+    bwusage = models.BandwidthUsage()
+    bwusage.start_period = ts_values['start_period']
+    bwusage.uuid = uuid
+    bwusage.mac = mac
+    bwusage.last_refreshed = ts_values['last_refreshed']
+    bwusage.bw_in = bw_in
+    bwusage.bw_out = bw_out
+    bwusage.last_ctr_in = last_ctr_in
+    bwusage.last_ctr_out = last_ctr_out
+    bwusage.save(context.session)
+
+    return bwusage
+
+
+####################
+
+
+@require_context
+@pick_context_manager_reader
+def vol_get_usage_by_time(context, begin):
+    """Return volumes usage that have been updated after a specified time."""
+    return model_query(context, models.VolumeUsage, read_deleted="yes").\
+                   filter(or_(models.VolumeUsage.tot_last_refreshed == null(),
+                              models.VolumeUsage.tot_last_refreshed > begin,
+                              models.VolumeUsage.curr_last_refreshed == null(),
+                              models.VolumeUsage.curr_last_refreshed > begin,
+                              )).all()
+
+
+@require_context
+@pick_context_manager_writer
+def vol_usage_update(context, id, rd_req, rd_bytes, wr_req, wr_bytes,
+                     instance_id, project_id, user_id, availability_zone,
+                     update_totals=False):
+
+    refreshed = timeutils.utcnow()
+
+    values = {}
+    # NOTE(dricco): We will be mostly updating current usage records vs
+    # updating total or creating records. Optimize accordingly.
+    if not update_totals:
+        values = {'curr_last_refreshed': refreshed,
+                  'curr_reads': rd_req,
+                  'curr_read_bytes': rd_bytes,
+                  'curr_writes': wr_req,
+                  'curr_write_bytes': wr_bytes,
+                  'instance_uuid': instance_id,
+                  'project_id': project_id,
+                  'user_id': user_id,
+                  'availability_zone': availability_zone}
+    else:
+        values = {'tot_last_refreshed': refreshed,
+                  'tot_reads': models.VolumeUsage.tot_reads + rd_req,
+                  'tot_read_bytes': models.VolumeUsage.tot_read_bytes +
+                                    rd_bytes,
+                  'tot_writes': models.VolumeUsage.tot_writes + wr_req,
+                  'tot_write_bytes': models.VolumeUsage.tot_write_bytes +
+                                     wr_bytes,
+                  'curr_reads': 0,
+                  'curr_read_bytes': 0,
+                  'curr_writes': 0,
+                  'curr_write_bytes': 0,
+                  'instance_uuid': instance_id,
+                  'project_id': project_id,
+                  'user_id': user_id,
+                  'availability_zone': availability_zone}
+
+    current_usage = model_query(context, models.VolumeUsage,
+                        read_deleted="yes").\
+                        filter_by(volume_id=id).\
+                        first()
+    if current_usage:
+        if (rd_req < current_usage['curr_reads'] or
+            rd_bytes < current_usage['curr_read_bytes'] or
+            wr_req < current_usage['curr_writes'] or
+                wr_bytes < current_usage['curr_write_bytes']):
+            LOG.info("Volume(%s) has lower stats then what is in "
+                     "the database. Instance must have been rebooted "
+                     "or crashed. Updating totals.", id)
+            if not update_totals:
+                values['tot_reads'] = (models.VolumeUsage.tot_reads +
+                                       current_usage['curr_reads'])
+                values['tot_read_bytes'] = (
+                    models.VolumeUsage.tot_read_bytes +
+                    current_usage['curr_read_bytes'])
+                values['tot_writes'] = (models.VolumeUsage.tot_writes +
+                                        current_usage['curr_writes'])
+                values['tot_write_bytes'] = (
+                    models.VolumeUsage.tot_write_bytes +
+                    current_usage['curr_write_bytes'])
+            else:
+                values['tot_reads'] = (models.VolumeUsage.tot_reads +
+                                       current_usage['curr_reads'] +
+                                       rd_req)
+                values['tot_read_bytes'] = (
+                    models.VolumeUsage.tot_read_bytes +
+                    current_usage['curr_read_bytes'] + rd_bytes)
+                values['tot_writes'] = (models.VolumeUsage.tot_writes +
+                                        current_usage['curr_writes'] +
+                                        wr_req)
+                values['tot_write_bytes'] = (
+                    models.VolumeUsage.tot_write_bytes +
+                    current_usage['curr_write_bytes'] + wr_bytes)
+
+        current_usage.update(values)
+        current_usage.save(context.session)
+        context.session.refresh(current_usage)
+        return current_usage
+
+    vol_usage = models.VolumeUsage()
+    vol_usage.volume_id = id
+    vol_usage.instance_uuid = instance_id
+    vol_usage.project_id = project_id
+    vol_usage.user_id = user_id
+    vol_usage.availability_zone = availability_zone
+
+    if not update_totals:
+        vol_usage.curr_last_refreshed = refreshed
+        vol_usage.curr_reads = rd_req
+        vol_usage.curr_read_bytes = rd_bytes
+        vol_usage.curr_writes = wr_req
+        vol_usage.curr_write_bytes = wr_bytes
+    else:
+        vol_usage.tot_last_refreshed = refreshed
+        vol_usage.tot_reads = rd_req
+        vol_usage.tot_read_bytes = rd_bytes
+        vol_usage.tot_writes = wr_req
+        vol_usage.tot_write_bytes = wr_bytes
+
+    vol_usage.save(context.session)
+
+    return vol_usage
+
+
+####################
+
+
+@pick_context_manager_reader
+def s3_image_get(context, image_id):
+    """Find local s3 image represented by the provided id."""
+    result = model_query(context, models.S3Image, read_deleted="yes").\
+                 filter_by(id=image_id).\
+                 first()
+
+    if not result:
+        raise exception.ImageNotFound(image_id=image_id)
+
+    return result
+
+
+@pick_context_manager_reader
+def s3_image_get_by_uuid(context, image_uuid):
+    """Find local s3 image represented by the provided uuid."""
+    result = model_query(context, models.S3Image, read_deleted="yes").\
+                 filter_by(uuid=image_uuid).\
+                 first()
+
+    if not result:
+        raise exception.ImageNotFound(image_id=image_uuid)
+
+    return result
+
+
+@pick_context_manager_writer
+def s3_image_create(context, image_uuid):
+    """Create local s3 image represented by provided uuid."""
+    try:
+        s3_image_ref = models.S3Image()
+        s3_image_ref.update({'uuid': image_uuid})
+        s3_image_ref.save(context.session)
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return s3_image_ref
+
+
+####################
+
+
+@pick_context_manager_writer
+def instance_fault_create(context, values):
+    """Create a new InstanceFault."""
+    fault_ref = models.InstanceFault()
+    fault_ref.update(values)
+    fault_ref.save(context.session)
+    return dict(fault_ref)
+
+
+@pick_context_manager_reader
+def instance_fault_get_by_instance_uuids(context, instance_uuids,
+                                         latest=False):
+    """Get all instance faults for the provided instance_uuids.
+
+    :param instance_uuids: List of UUIDs of instances to grab faults for
+    :param latest: Optional boolean indicating we should only return the latest
+                   fault for the instance
+    """
+    if not instance_uuids:
+        return {}
+
+    faults_tbl = models.InstanceFault.__table__
+    # NOTE(rpodolyaka): filtering by instance_uuids is performed in both
+    # code branches below for the sake of a better query plan. On change,
+    # make sure to update the other one as well.
+    query = model_query(context, models.InstanceFault,
+                        [faults_tbl],
+                        read_deleted='no')
+
+    if latest:
+        # NOTE(jaypipes): We join instance_faults to a derived table of the
+        # latest faults per instance UUID. The SQL produced below looks like
+        # this:
+        #
+        #  SELECT instance_faults.*
+        #  FROM instance_faults
+        #  JOIN (
+        #    SELECT instance_uuid, MAX(id) AS max_id
+        #    FROM instance_faults
+        #    WHERE instance_uuid IN ( ... )
+        #    AND deleted = 0
+        #    GROUP BY instance_uuid
+        #  ) AS latest_faults
+        #    ON instance_faults.id = latest_faults.max_id;
+        latest_faults = model_query(
+            context, models.InstanceFault,
+            [faults_tbl.c.instance_uuid,
+             sql.func.max(faults_tbl.c.id).label('max_id')],
+            read_deleted='no'
+        ).filter(
+            faults_tbl.c.instance_uuid.in_(instance_uuids)
+        ).group_by(
+            faults_tbl.c.instance_uuid
+        ).subquery(name="latest_faults")
+
+        query = query.join(latest_faults,
+                           faults_tbl.c.id == latest_faults.c.max_id)
+    else:
+        query = query.filter(models.InstanceFault.instance_uuid.in_(
+                                        instance_uuids)).order_by(desc("id"))
+
+    output = {}
+    for instance_uuid in instance_uuids:
+        output[instance_uuid] = []
+
+    for row in query:
+        output[row.instance_uuid].append(row._asdict())
+
+    return output
+
+
+##################
+
+
+@pick_context_manager_writer
+def action_start(context, values):
+    convert_objects_related_datetimes(values, 'start_time', 'updated_at')
+    action_ref = models.InstanceAction()
+    action_ref.update(values)
+    action_ref.save(context.session)
+    return action_ref
+
+
+@pick_context_manager_writer
+def action_finish(context, values):
+    convert_objects_related_datetimes(values, 'start_time', 'finish_time',
+                                      'updated_at')
+    query = model_query(context, models.InstanceAction).\
+                        filter_by(instance_uuid=values['instance_uuid']).\
+                        filter_by(request_id=values['request_id'])
+    if query.update(values) != 1:
+        raise exception.InstanceActionNotFound(
+                                    request_id=values['request_id'],
+                                    instance_uuid=values['instance_uuid'])
+    return query.one()
+
+
+@pick_context_manager_reader
+def actions_get(context, instance_uuid, limit=None, marker=None,
+                filters=None):
+    """Get all instance actions for the provided uuid and filters."""
+    if limit == 0:
+        return []
+
+    sort_keys = ['created_at', 'id']
+    sort_dirs = ['desc', 'desc']
+
+    query_prefix = model_query(context, models.InstanceAction).\
+        filter_by(instance_uuid=instance_uuid)
+
+    model_object = models.InstanceAction
+    query_prefix = _get_query_nova_resource_by_changes_time(query_prefix,
+                                                            filters,
+                                                            model_object)
+
+    if marker is not None:
+        marker = action_get_by_request_id(context, instance_uuid, marker)
+        if not marker:
+            raise exception.MarkerNotFound(marker=marker)
+    actions = sqlalchemyutils.paginate_query(query_prefix,
+                                             models.InstanceAction, limit,
+                                             sort_keys, marker=marker,
+                                             sort_dirs=sort_dirs).all()
+    return actions
+
+
+@pick_context_manager_reader
+def action_get_by_request_id(context, instance_uuid, request_id):
+    """Get the action by request_id and given instance."""
+    action = _action_get_by_request_id(context, instance_uuid, request_id)
+    return action
+
+
+def _action_get_by_request_id(context, instance_uuid, request_id):
+    result = model_query(context, models.InstanceAction).\
+                         filter_by(instance_uuid=instance_uuid).\
+                         filter_by(request_id=request_id).\
+                         order_by(desc("created_at"), desc("id")).\
+                         first()
+    return result
+
+
+def _action_get_last_created_by_instance_uuid(context, instance_uuid):
+    result = (model_query(context, models.InstanceAction).
+                     filter_by(instance_uuid=instance_uuid).
+                     order_by(desc("created_at"), desc("id")).
+                     first())
+    return result
+
+
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def action_event_start(context, values):
+    """Start an event on an instance action."""
+    convert_objects_related_datetimes(values, 'start_time')
+    action = _action_get_by_request_id(context, values['instance_uuid'],
+                                       values['request_id'])
+    # When nova-compute restarts, the context is generated again in
+    # init_host workflow, the request_id was different with the request_id
+    # recorded in InstanceAction, so we can't get the original record
+    # according to request_id. Try to get the last created action so that
+    # init_instance can continue to finish the recovery action, like:
+    # powering_off, unpausing, and so on.
+    update_action = True
+    if not action and not context.project_id:
+        action = _action_get_last_created_by_instance_uuid(
+            context, values['instance_uuid'])
+        # If we couldn't find an action by the request_id, we don't want to
+        # update this action since it likely represents an inactive action.
+        update_action = False
+
+    if not action:
+        raise exception.InstanceActionNotFound(
+                                    request_id=values['request_id'],
+                                    instance_uuid=values['instance_uuid'])
+
+    values['action_id'] = action['id']
+
+    event_ref = models.InstanceActionEvent()
+    event_ref.update(values)
+    context.session.add(event_ref)
+
+    # Update action updated_at.
+    if update_action:
+        action.update({'updated_at': values['start_time']})
+        action.save(context.session)
+
+    return event_ref
+
+
+# NOTE: We need the retry_on_deadlock decorator for cases like resize where
+# a lot of events are happening at once between multiple hosts trying to
+# update the same action record in a small time window.
+@oslo_db_api.wrap_db_retry(max_retries=5, retry_on_deadlock=True)
+@pick_context_manager_writer
+def action_event_finish(context, values):
+    """Finish an event on an instance action."""
+    convert_objects_related_datetimes(values, 'start_time', 'finish_time')
+    action = _action_get_by_request_id(context, values['instance_uuid'],
+                                       values['request_id'])
+    # When nova-compute restarts, the context is generated again in
+    # init_host workflow, the request_id was different with the request_id
+    # recorded in InstanceAction, so we can't get the original record
+    # according to request_id. Try to get the last created action so that
+    # init_instance can continue to finish the recovery action, like:
+    # powering_off, unpausing, and so on.
+    update_action = True
+    if not action and not context.project_id:
+        action = _action_get_last_created_by_instance_uuid(
+            context, values['instance_uuid'])
+        # If we couldn't find an action by the request_id, we don't want to
+        # update this action since it likely represents an inactive action.
+        update_action = False
+
+    if not action:
+        raise exception.InstanceActionNotFound(
+                                    request_id=values['request_id'],
+                                    instance_uuid=values['instance_uuid'])
+
+    event_ref = model_query(context, models.InstanceActionEvent).\
+                            filter_by(action_id=action['id']).\
+                            filter_by(event=values['event']).\
+                            first()
+
+    if not event_ref:
+        raise exception.InstanceActionEventNotFound(action_id=action['id'],
+                                                    event=values['event'])
+    event_ref.update(values)
+
+    if values['result'].lower() == 'error':
+        action.update({'message': 'Error'})
+
+    # Update action updated_at.
+    if update_action:
+        action.update({'updated_at': values['finish_time']})
+        action.save(context.session)
+
+    return event_ref
+
+
+@pick_context_manager_reader
+def action_events_get(context, action_id):
+    events = model_query(context, models.InstanceActionEvent).\
+                         filter_by(action_id=action_id).\
+                         order_by(desc("created_at"), desc("id")).\
+                         all()
+
+    return events
+
+
+@pick_context_manager_reader
+def action_event_get_by_id(context, action_id, event_id):
+    event = model_query(context, models.InstanceActionEvent).\
+                        filter_by(action_id=action_id).\
+                        filter_by(id=event_id).\
+                        first()
+
+    return event
+
+
+##################
+
+
+@require_context
+@pick_context_manager_writer
+def ec2_instance_create(context, instance_uuid, id=None):
+    """Create ec2 compatible instance by provided uuid."""
+    ec2_instance_ref = models.InstanceIdMapping()
+    ec2_instance_ref.update({'uuid': instance_uuid})
+    if id is not None:
+        ec2_instance_ref.update({'id': id})
+
+    ec2_instance_ref.save(context.session)
+
+    return ec2_instance_ref
+
+
+@require_context
+@pick_context_manager_reader
+def ec2_instance_get_by_uuid(context, instance_uuid):
+    result = _ec2_instance_get_query(context).\
+                    filter_by(uuid=instance_uuid).\
+                    first()
+
+    if not result:
+        raise exception.InstanceNotFound(instance_id=instance_uuid)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def ec2_instance_get_by_id(context, instance_id):
+    result = _ec2_instance_get_query(context).\
+                    filter_by(id=instance_id).\
+                    first()
+
+    if not result:
+        raise exception.InstanceNotFound(instance_id=instance_id)
+
+    return result
+
+
+@require_context
+@pick_context_manager_reader
+def get_instance_uuid_by_ec2_id(context, ec2_id):
+    result = ec2_instance_get_by_id(context, ec2_id)
+    return result['uuid']
+
+
+def _ec2_instance_get_query(context):
+    return model_query(context, models.InstanceIdMapping, read_deleted='yes')
+
+
+##################
+
+
+def _task_log_get_query(context, task_name, period_beginning,
+                        period_ending, host=None, state=None):
+    values = {'period_beginning': period_beginning,
+              'period_ending': period_ending}
+    values = convert_objects_related_datetimes(values, *values.keys())
+
+    query = model_query(context, models.TaskLog).\
+                     filter_by(task_name=task_name).\
+                     filter_by(period_beginning=values['period_beginning']).\
+                     filter_by(period_ending=values['period_ending'])
+    if host is not None:
+        query = query.filter_by(host=host)
+    if state is not None:
+        query = query.filter_by(state=state)
+    return query
+
+
+@pick_context_manager_reader
+def task_log_get(context, task_name, period_beginning, period_ending, host,
+                 state=None):
+    return _task_log_get_query(context, task_name, period_beginning,
+                               period_ending, host, state).first()
+
+
+@pick_context_manager_reader
+def task_log_get_all(context, task_name, period_beginning, period_ending,
+                     host=None, state=None):
+    return _task_log_get_query(context, task_name, period_beginning,
+                               period_ending, host, state).all()
+
+
+@pick_context_manager_writer
+def task_log_begin_task(context, task_name, period_beginning, period_ending,
+                        host, task_items=None, message=None):
+    values = {'period_beginning': period_beginning,
+              'period_ending': period_ending}
+    values = convert_objects_related_datetimes(values, *values.keys())
+
+    task = models.TaskLog()
+    task.task_name = task_name
+    task.period_beginning = values['period_beginning']
+    task.period_ending = values['period_ending']
+    task.host = host
+    task.state = "RUNNING"
+    if message:
+        task.message = message
+    if task_items:
+        task.task_items = task_items
+    try:
+        task.save(context.session)
+    except db_exc.DBDuplicateEntry:
+        raise exception.TaskAlreadyRunning(task_name=task_name, host=host)
+
+
+@pick_context_manager_writer
+def task_log_end_task(context, task_name, period_beginning, period_ending,
+                      host, errors, message=None):
+    values = dict(state="DONE", errors=errors)
+    if message:
+        values["message"] = message
+
+    rows = _task_log_get_query(context, task_name, period_beginning,
+                               period_ending, host).update(values)
+    if rows == 0:
+        # It's not running!
+        raise exception.TaskNotRunning(task_name=task_name, host=host)
+
+
+##################
+
+
+def _archive_if_instance_deleted(table, shadow_table, instances, conn,
+                                 max_rows):
+    """Look for records that pertain to deleted instances, but may not be
+    deleted themselves. This catches cases where we delete an instance,
+    but leave some residue because of a failure in a cleanup path or
+    similar.
+
+    Logic is: if I have a column called instance_uuid, and that instance
+    is deleted, then I can be deleted.
+    """
+    query_insert = shadow_table.insert(inline=True).\
+        from_select(
+            [c.name for c in table.c],
+            sql.select(
+                [table],
+                and_(instances.c.deleted != instances.c.deleted.default.arg,
+                     instances.c.uuid == table.c.instance_uuid)).
+            order_by(table.c.id).limit(max_rows))
+
+    query_delete = sql.select(
+        [table.c.id],
+        and_(instances.c.deleted != instances.c.deleted.default.arg,
+             instances.c.uuid == table.c.instance_uuid)).\
+        order_by(table.c.id).limit(max_rows)
+    delete_statement = DeleteFromSelect(table, query_delete,
+                                        table.c.id)
+
+    try:
+        with conn.begin():
+            conn.execute(query_insert)
+            result_delete = conn.execute(delete_statement)
+            return result_delete.rowcount
+    except db_exc.DBReferenceError as ex:
+        LOG.warning('Failed to archive %(table)s: %(error)s',
+                    {'table': table.name,
+                     'error': six.text_type(ex)})
+        return 0
+
+
+def _archive_deleted_rows_for_table(tablename, max_rows):
+    """Move up to max_rows rows from one tables to the corresponding
+    shadow table.
+
+    :returns: number of rows archived
+    """
+    engine = get_engine()
+    conn = engine.connect()
+    metadata = MetaData()
+    metadata.bind = engine
+    # NOTE(tdurakov): table metadata should be received
+    # from models, not db tables. Default value specified by SoftDeleteMixin
+    # is known only by models, not DB layer.
+    # IMPORTANT: please do not change source of metadata information for table.
+    table = models.BASE.metadata.tables[tablename]
+
+    shadow_tablename = _SHADOW_TABLE_PREFIX + tablename
+    rows_archived = 0
+    deleted_instance_uuids = []
+    try:
+        shadow_table = Table(shadow_tablename, metadata, autoload=True)
+    except NoSuchTableError:
+        # No corresponding shadow table; skip it.
+        return rows_archived, deleted_instance_uuids
+
+    if tablename == "dns_domains":
+        # We have one table (dns_domains) where the key is called
+        # "domain" rather than "id"
+        column = table.c.domain
+    else:
+        column = table.c.id
+    # NOTE(guochbo): Use DeleteFromSelect to avoid
+    # database's limit of maximum parameter in one SQL statement.
+    deleted_column = table.c.deleted
+    columns = [c.name for c in table.c]
+
+    # NOTE(clecomte): Tables instance_actions and instances_actions_events
+    # have to be manage differently so we soft-delete them here to let
+    # the archive work the same for all tables
+    # NOTE(takashin): The record in table migrations should be
+    # soft deleted when the instance is deleted.
+    # This is just for upgrading.
+    if tablename in ("instance_actions", "migrations"):
+        instances = models.BASE.metadata.tables["instances"]
+        deleted_instances = sql.select([instances.c.uuid]).\
+            where(instances.c.deleted != instances.c.deleted.default.arg)
+        update_statement = table.update().values(deleted=table.c.id).\
+            where(table.c.instance_uuid.in_(deleted_instances))
+
+        conn.execute(update_statement)
+
+    elif tablename == "instance_actions_events":
+        # NOTE(clecomte): we have to grab all the relation from
+        # instances because instance_actions_events rely on
+        # action_id and not uuid
+        instances = models.BASE.metadata.tables["instances"]
+        instance_actions = models.BASE.metadata.tables["instance_actions"]
+        deleted_instances = sql.select([instances.c.uuid]).\
+            where(instances.c.deleted != instances.c.deleted.default.arg)
+        deleted_actions = sql.select([instance_actions.c.id]).\
+            where(instance_actions.c.instance_uuid.in_(deleted_instances))
+
+        update_statement = table.update().values(deleted=table.c.id).\
+            where(table.c.action_id.in_(deleted_actions))
+
+        conn.execute(update_statement)
+
+    select = sql.select([column],
+                        deleted_column != deleted_column.default.arg).\
+                        order_by(column).limit(max_rows)
+    rows = conn.execute(select).fetchall()
+    records = [r[0] for r in rows]
+
+    if records:
+        insert = shadow_table.insert(inline=True).\
+                from_select(columns, sql.select([table], column.in_(records)))
+        delete = table.delete().where(column.in_(records))
+        # NOTE(tssurya): In order to facilitate the deletion of records from
+        # instance_mappings and request_specs tables in the nova_api DB, the
+        # rows of deleted instances from the instances table are stored prior
+        # to their deletion. Basically the uuids of the archived instances
+        # are queried and returned.
+        if tablename == "instances":
+            query_select = sql.select([table.c.uuid], table.c.id.in_(records))
+            rows = conn.execute(query_select).fetchall()
+            deleted_instance_uuids = [r[0] for r in rows]
+
+        try:
+            # Group the insert and delete in a transaction.
+            with conn.begin():
+                conn.execute(insert)
+                result_delete = conn.execute(delete)
+            rows_archived = result_delete.rowcount
+        except db_exc.DBReferenceError as ex:
+            # A foreign key constraint keeps us from deleting some of
+            # these rows until we clean up a dependent table.  Just
+            # skip this table for now; we'll come back to it later.
+            LOG.warning("IntegrityError detected when archiving table "
+                        "%(tablename)s: %(error)s",
+                        {'tablename': tablename, 'error': six.text_type(ex)})
+
+    if ((max_rows is None or rows_archived < max_rows)
+            and 'instance_uuid' in columns):
+        instances = models.BASE.metadata.tables['instances']
+        limit = max_rows - rows_archived if max_rows is not None else None
+        extra = _archive_if_instance_deleted(table, shadow_table, instances,
+                                             conn, limit)
+        rows_archived += extra
+
+    return rows_archived, deleted_instance_uuids
+
+
+def archive_deleted_rows(max_rows=None):
+    """Move up to max_rows rows from production tables to the corresponding
+    shadow tables.
+
+    :returns: dict that maps table name to number of rows archived from that
+              table, for example:
+
+    ::
+
+        {
+            'instances': 5,
+            'block_device_mapping': 5,
+            'pci_devices': 2,
+        }
+
+    """
+    table_to_rows_archived = {}
+    deleted_instance_uuids = []
+    total_rows_archived = 0
+    meta = MetaData(get_engine(use_slave=True))
+    meta.reflect()
+    # Reverse sort the tables so we get the leaf nodes first for processing.
+    for table in reversed(meta.sorted_tables):
+        tablename = table.name
+        rows_archived = 0
+        # skip the special sqlalchemy-migrate migrate_version table and any
+        # shadow tables
+        if (tablename == 'migrate_version' or
+                tablename.startswith(_SHADOW_TABLE_PREFIX)):
+            continue
+        rows_archived,\
+        deleted_instance_uuid = _archive_deleted_rows_for_table(
+                tablename, max_rows=max_rows - total_rows_archived)
+        total_rows_archived += rows_archived
+        if tablename == 'instances':
+            deleted_instance_uuids = deleted_instance_uuid
+        # Only report results for tables that had updates.
+        if rows_archived:
+            table_to_rows_archived[tablename] = rows_archived
+        if total_rows_archived >= max_rows:
+            break
+    return table_to_rows_archived, deleted_instance_uuids
+
+
+def _purgeable_tables(metadata):
+    return [t for t in metadata.sorted_tables
+            if (t.name.startswith(_SHADOW_TABLE_PREFIX) and not
+                t.name.endswith('migrate_version'))]
+
+
+def purge_shadow_tables(context, before_date, status_fn=None):
+    engine = get_engine(context=context)
+    conn = engine.connect()
+    metadata = MetaData()
+    metadata.bind = engine
+    metadata.reflect()
+    total_deleted = 0
+
+    if status_fn is None:
+        status_fn = lambda m: None
+
+    # Some things never get formally deleted, and thus deleted_at
+    # is never set. So, prefer specific timestamp columns here
+    # for those special cases.
+    overrides = {
+        'shadow_instance_actions': 'created_at',
+        'shadow_instance_actions_events': 'created_at',
+    }
+
+    for table in _purgeable_tables(metadata):
+        if before_date is None:
+            col = None
+        elif table.name in overrides:
+            col = getattr(table.c, overrides[table.name])
+        elif hasattr(table.c, 'deleted_at'):
+            col = table.c.deleted_at
+        elif hasattr(table.c, 'updated_at'):
+            col = table.c.updated_at
+        elif hasattr(table.c, 'created_at'):
+            col = table.c.created_at
+        else:
+            status_fn(_('Unable to purge table %(table)s because it '
+                        'has no timestamp column') % {
+                            'table': table.name})
+            continue
+
+        if col is not None:
+            delete = table.delete().where(col < before_date)
+        else:
+            delete = table.delete()
+
+        deleted = conn.execute(delete)
+        if deleted.rowcount > 0:
+            status_fn(_('Deleted %(rows)i rows from %(table)s based on '
+                        'timestamp column %(col)s') % {
+                            'rows': deleted.rowcount,
+                            'table': table.name,
+                            'col': col is None and '(n/a)' or col.name})
+        total_deleted += deleted.rowcount
+
+    return total_deleted
+
+
+@pick_context_manager_writer
+def service_uuids_online_data_migration(context, max_count):
+    from nova.objects import service
+
+    count_all = 0
+    count_hit = 0
+
+    db_services = model_query(context, models.Service).filter_by(
+        uuid=None).limit(max_count)
+    for db_service in db_services:
+        count_all += 1
+        service_obj = service.Service._from_db_object(
+            context, service.Service(), db_service)
+        if 'uuid' in service_obj:
+            count_hit += 1
+    return count_all, count_hit
+
+
+####################
+
+
+@pick_context_manager_reader
+def pci_device_get_by_addr(context, node_id, dev_addr):
+    pci_dev_ref = model_query(context, models.PciDevice).\
+                        filter_by(compute_node_id=node_id).\
+                        filter_by(address=dev_addr).\
+                        first()
+    if not pci_dev_ref:
+        raise exception.PciDeviceNotFound(node_id=node_id, address=dev_addr)
+    return pci_dev_ref
+
+
+@pick_context_manager_reader
+def pci_device_get_by_id(context, id):
+    pci_dev_ref = model_query(context, models.PciDevice).\
+                        filter_by(id=id).\
+                        first()
+    if not pci_dev_ref:
+        raise exception.PciDeviceNotFoundById(id=id)
+    return pci_dev_ref
+
+
+@pick_context_manager_reader
+def pci_device_get_all_by_node(context, node_id):
+    return model_query(context, models.PciDevice).\
+                       filter_by(compute_node_id=node_id).\
+                       all()
+
+
+@pick_context_manager_reader
+def pci_device_get_all_by_parent_addr(context, node_id, parent_addr):
+    return model_query(context, models.PciDevice).\
+                       filter_by(compute_node_id=node_id).\
+                       filter_by(parent_addr=parent_addr).\
+                       all()
+
+
+@require_context
+@pick_context_manager_reader
+def pci_device_get_all_by_instance_uuid(context, instance_uuid):
+    return model_query(context, models.PciDevice).\
+                       filter_by(status='allocated').\
+                       filter_by(instance_uuid=instance_uuid).\
+                       all()
+
+
+@pick_context_manager_reader
+def _instance_pcidevs_get_multi(context, instance_uuids):
+    if not instance_uuids:
+        return []
+    return model_query(context, models.PciDevice).\
+        filter_by(status='allocated').\
+        filter(models.PciDevice.instance_uuid.in_(instance_uuids))
+
+
+@pick_context_manager_writer
+def pci_device_destroy(context, node_id, address):
+    result = model_query(context, models.PciDevice).\
+                         filter_by(compute_node_id=node_id).\
+                         filter_by(address=address).\
+                         soft_delete()
+    if not result:
+        raise exception.PciDeviceNotFound(node_id=node_id, address=address)
+
+
+@pick_context_manager_writer
+def pci_device_update(context, node_id, address, values):
+    query = model_query(context, models.PciDevice, read_deleted="no").\
+                    filter_by(compute_node_id=node_id).\
+                    filter_by(address=address)
+    if query.update(values) == 0:
+        device = models.PciDevice()
+        device.update(values)
+        context.session.add(device)
+    return query.one()
+
+
+####################
+
+
+@pick_context_manager_writer
+def instance_tag_add(context, instance_uuid, tag):
+    tag_ref = models.Tag()
+    tag_ref.resource_id = instance_uuid
+    tag_ref.tag = tag
+
+    try:
+        _check_instance_exists_in_project(context, instance_uuid)
+        with get_context_manager(context).writer.savepoint.using(context):
+            context.session.add(tag_ref)
+    except db_exc.DBDuplicateEntry:
+        # NOTE(snikitin): We should ignore tags duplicates
+        pass
+
+    return tag_ref
+
+
+@pick_context_manager_writer
+def instance_tag_set(context, instance_uuid, tags):
+    _check_instance_exists_in_project(context, instance_uuid)
+
+    existing = context.session.query(models.Tag.tag).filter_by(
+        resource_id=instance_uuid).all()
+
+    existing = set(row.tag for row in existing)
+    tags = set(tags)
+    to_delete = existing - tags
+    to_add = tags - existing
+
+    if to_delete:
+        context.session.query(models.Tag).filter_by(
+            resource_id=instance_uuid).filter(
+            models.Tag.tag.in_(to_delete)).delete(
+            synchronize_session=False)
+
+    if to_add:
+        data = [
+            {'resource_id': instance_uuid, 'tag': tag} for tag in to_add]
+        context.session.execute(models.Tag.__table__.insert(), data)
+
+    return context.session.query(models.Tag).filter_by(
+        resource_id=instance_uuid).all()
+
+
+@pick_context_manager_reader
+def instance_tag_get_by_instance_uuid(context, instance_uuid):
+    _check_instance_exists_in_project(context, instance_uuid)
+    return context.session.query(models.Tag).filter_by(
+        resource_id=instance_uuid).all()
+
+
+@pick_context_manager_writer
+def instance_tag_delete(context, instance_uuid, tag):
+    _check_instance_exists_in_project(context, instance_uuid)
+    result = context.session.query(models.Tag).filter_by(
+        resource_id=instance_uuid, tag=tag).delete()
+
+    if not result:
+        raise exception.InstanceTagNotFound(instance_id=instance_uuid,
+                                            tag=tag)
+
+
+@pick_context_manager_writer
+def instance_tag_delete_all(context, instance_uuid):
+    _check_instance_exists_in_project(context, instance_uuid)
+    context.session.query(models.Tag).filter_by(
+        resource_id=instance_uuid).delete()
+
+
+@pick_context_manager_reader
+def instance_tag_exists(context, instance_uuid, tag):
+    _check_instance_exists_in_project(context, instance_uuid)
+    q = context.session.query(models.Tag).filter_by(
+        resource_id=instance_uuid, tag=tag)
+    return context.session.query(q.exists()).scalar()
+
+
+####################
+
+
+@pick_context_manager_writer
+def console_auth_token_create(context, values):
+    instance_uuid = values.get('instance_uuid')
+    _check_instance_exists_in_project(context, instance_uuid)
+    token_ref = models.ConsoleAuthToken()
+    token_ref.update(values)
+    context.session.add(token_ref)
+    return token_ref
+
+
+@pick_context_manager_reader
+def console_auth_token_get_valid(context, token_hash, instance_uuid=None):
+    if instance_uuid is not None:
+        _check_instance_exists_in_project(context, instance_uuid)
+    query = context.session.query(models.ConsoleAuthToken).\
+        filter_by(token_hash=token_hash)
+    if instance_uuid is not None:
+        query = query.filter_by(instance_uuid=instance_uuid)
+    return query.filter(
+        models.ConsoleAuthToken.expires > timeutils.utcnow_ts()).first()
+
+
+@pick_context_manager_writer
+def console_auth_token_destroy_all_by_instance(context, instance_uuid):
+    context.session.query(models.ConsoleAuthToken).\
+        filter_by(instance_uuid=instance_uuid).delete()
+
+
+@pick_context_manager_writer
+def console_auth_token_destroy_expired_by_host(context, host):
+    context.session.query(models.ConsoleAuthToken).\
+        filter_by(host=host).\
+        filter(models.ConsoleAuthToken.expires <= timeutils.utcnow_ts()).\
+        delete()

diff --git a/gosbs/db/sqlalchemy/models.py b/gosbs/db/sqlalchemy/models.py
new file mode 100644
index 0000000..e4fc530
--- /dev/null
+++ b/gosbs/db/sqlalchemy/models.py
@@ -0,0 +1,430 @@
+# Copyright (c) 2011 X.commerce, a business unit of eBay Inc.
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Piston Cloud Computing, Inc.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/db/sqlalchemy/models.py
+# Only small part is left from the origin
+
+"""
+SQLAlchemy models for gosbs data.
+"""
+
+import uuid
+
+from oslo_config import cfg
+from oslo_db.sqlalchemy import models
+from oslo_utils import timeutils
+from sqlalchemy import (Column, Index, Integer, BigInteger, Enum, String,
+                        schema, Unicode)
+from sqlalchemy.dialects.mysql import MEDIUMTEXT
+from sqlalchemy.ext.declarative import declarative_base
+from sqlalchemy import orm
+from sqlalchemy import ForeignKey, DateTime, Boolean, Text, Float, Time
+
+from gosbs.db.sqlalchemy import types
+
+CONF = cfg.CONF
+BASE = declarative_base()
+
+
+def MediumText():
+    return Text().with_variant(MEDIUMTEXT(), 'mysql')
+
+
+class NovaBase(models.ModelBase):
+    metadata = None
+
+    def __copy__(self):
+        """Implement a safe copy.copy().
+
+        SQLAlchemy-mapped objects travel with an object
+        called an InstanceState, which is pegged to that object
+        specifically and tracks everything about that object.  It's
+        critical within all attribute operations, including gets
+        and deferred loading.   This object definitely cannot be
+        shared among two instances, and must be handled.
+
+        The copy routine here makes use of session.merge() which
+        already essentially implements a "copy" style of operation,
+        which produces a new instance with a new InstanceState and copies
+        all the data along mapped attributes without using any SQL.
+
+        The mode we are using here has the caveat that the given object
+        must be "clean", e.g. that it has no database-loaded state
+        that has been updated and not flushed.   This is a good thing,
+        as creating a copy of an object including non-flushed, pending
+        database state is probably not a good idea; neither represents
+        what the actual row looks like, and only one should be flushed.
+
+        """
+        session = orm.Session()
+
+        copy = session.merge(self, load=False)
+        session.expunge(copy)
+        return copy
+
+
+class Service(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents a running service on a host."""
+
+    __tablename__ = 'services'
+    __table_args__ = (
+        schema.UniqueConstraint("host", "topic", "deleted",
+                                name="uniq_services0host0topic0deleted"),
+        schema.UniqueConstraint("host", "binary", "deleted",
+                                name="uniq_services0host0binary0deleted"),
+        Index('services_uuid_idx', 'uuid', unique=True),
+    )
+
+    id = Column(Integer, primary_key=True)
+    uuid = Column(String(36), nullable=True)
+    host = Column(String(255))
+    binary = Column(String(255))
+    topic = Column(String(255))
+    report_count = Column(Integer, nullable=False, default=0)
+    disabled = Column(Boolean, default=False)
+    disabled_reason = Column(String(255))
+    last_seen_up = Column(DateTime, nullable=True)
+    forced_down = Column(Boolean(), default=False)
+    version = Column(Integer, default=0)
+
+
+class Tasks(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'tasks'
+    __table_args__ = (
+    )
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    name = Column(String(255))
+    service_uuid = Column(String(36), nullable=True)
+    repet = Column(Boolean(), default=False)
+    run = Column(DateTime)
+    status = Column(Enum('failed', 'completed', 'in-progress', 'waiting'),
+                            nullable=True)
+    last = Column(DateTime, default=timeutils.now)
+    priority = Column(Integer, default=5)
+
+class Projects(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'projects'
+    __table_args__ = (
+    )
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    name = Column(String(255))
+    active = Column(Boolean(), default=False)
+    auto = Column(Boolean(), default=False)
+
+class ProjectsMetadata(BASE, NovaBase):
+    """Represents an image in the datastore."""
+    __tablename__ = 'projects_metadata'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    project_uuid = Column(String(36), ForeignKey('projects.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    project_repo_uuid = Column(String(36), ForeignKey('repos.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    project_profile = Column(String(255))
+    project_profile_repo_uuid = Column(String(36), ForeignKey('repos.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    titel = Column(String(255))
+    description = Column(Text)
+
+class ProjectsRepos(BASE, NovaBase):
+    """Represents an image in the datastore."""
+    __tablename__ = 'projects_repos'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    project_uuid = Column(String(36), ForeignKey('projects.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    repo_uuid = Column(String(36), ForeignKey('repos.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    build = Column(Boolean(), default=False)
+    test = Column(Boolean(), default=False)
+    repoman = Column(Boolean(), default=False)
+    qa = Column(Boolean(), default=False)
+    auto = Column(Boolean(), default=False)
+    depclean = Column(Boolean(), default=False)
+
+class ProjectsBuilds(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'projects_builds'
+    __table_args__ = (
+    )
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    project_uuid = Column(String(36), ForeignKey('projects.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    user_id = Column(Integer, ForeignKey('users.id'),)
+    status = Column(Enum('failed', 'completed', 'in-progress', 'waiting'),
+                            nullable=True)
+    priority = Column(Integer, default=5)
+
+
+class BuildsIUses(BASE, NovaBase):
+    __tablename__ = 'builds_uses'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    build_uuid = Column(String(36), ForeignKey('projects_builds.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    use_id = Column(Integer, ForeignKey('users.user_id'),)
+    status = Column(Boolean, default=False)
+
+
+class Users(BASE, NovaBase):
+    __tablename__ = 'users'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    user_id = Column(Integer)
+    name = Column(String(255))
+
+
+class Repos(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'repos'
+    __table_args__ = (
+    )
+
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    name = Column(String(255))
+    status = Column(Enum('failed', 'completed', 'in-progress', 'waiting'),
+                            nullable=True)
+    description = Column(Text)
+    src_url = Column(String(255))
+    auto = Column(Boolean(), default=False)
+    repo_type = Column(Enum('project', 'ebuild'), nullable=True)
+
+
+class Categories(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'categories'
+    __table_args__ = (
+    )
+
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    name = Column(String(255))
+    status = Column(Enum('failed', 'completed', 'in-progress', 'waiting'),
+                            nullable=True)
+
+class CategoriesMetadata(BASE, NovaBase):
+    """Represents an image in the datastore."""
+    __tablename__ = 'categories_metadata'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    category_uuid = Column(String(36), ForeignKey('categories.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    checksum = Column(String(255))
+    description = Column(Text)
+
+class Packages(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'packages'
+    __table_args__ = (
+    )
+
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    name = Column(String(255))
+    status = Column(Enum('failed', 'completed', 'in-progress', 'waiting'),
+                            nullable=True)
+    category_uuid = Column(String(36), ForeignKey('categories.uuid'), nullable=False,
+                   default=lambda: str(uuid.uuid4()))
+    repo_uuid = Column(String(36), ForeignKey('repos.uuid'), nullable=False,
+                   default=lambda: str(uuid.uuid4()))
+
+class PackagesMetadata(BASE, NovaBase):
+    """Represents an image in the datastore."""
+    __tablename__ = 'packages_metadata'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    package_uuid = Column(String(36), ForeignKey('packages.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    description = Column(Text)
+    gitlog = Column(Text)
+    checksum = Column(String(200))
+
+class PackagesEmails(BASE, NovaBase):
+    """Represents an image in the datastore."""
+    __tablename__ = 'packages_emails'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    package_uuid = Column(String(36), ForeignKey('packages.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    email_id = Column(Integer, ForeignKey('emails.id'))
+
+class Emails(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'emails'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    email = Column(String(255))
+
+class Uses(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'uses'
+    __table_args__ = (
+    )
+
+    id = Column(Integer, primary_key=True)
+    flag = Column(String(255))
+    description = Column(Text)
+
+class Restrictions(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'restrictions'
+    __table_args__ = (
+    )
+
+    id = Column(Integer, primary_key=True)
+    restriction = Column(String(255))
+    
+class Keywords(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents an image in the datastore."""
+    __tablename__ = 'keywords'
+    __table_args__ = (
+    )
+
+    id = Column(Integer, primary_key=True)
+    keyword = Column(String(255))
+
+class Ebuilds(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    __tablename__ = 'ebuilds'
+    __table_args__ = (
+    )
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    package_uuid = Column(String(36), ForeignKey('packages.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    version = Column(String(100))
+    checksum = Column(String(200))
+
+class EbuildsMetadata(BASE, NovaBase):
+    __tablename__ = 'ebuilds_metadata'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    commit = Column(String(100))
+    commit_msg = Column(Text)
+    description = Column(Text)
+    slot = Column(String(10))
+    homepage = Column(String(200))
+    license = Column(String(200))
+
+class EbuildsRestrictions(BASE, NovaBase):
+    __tablename__ = 'ebuilds_restrictions'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    restriction_id = Column(Integer, ForeignKey('restrictions.id'),)
+
+class EbuildsIUses(BASE, NovaBase):
+    __tablename__ = 'ebuilds_uses'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    use_id = Column(Integer, ForeignKey('uses.id'),)
+    status = Column(Boolean, default=False)
+
+class EbuildsKeywords(BASE, NovaBase):
+    __tablename__ = 'ebuilds_keywords'
+    __table_args__ = (
+    )
+    id = Column(Integer, primary_key=True)
+    ebuild_uuid = Column(String(36), ForeignKey('ebuilds.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    keyword_id = Column(Integer, ForeignKey('keywords.id'),)
+    status = Column(Enum('stable','unstable','negative'))
+
+
+class Images(BASE, NovaBase):
+    """Represents an image in the datastore."""
+    __tablename__ = 'images'
+    __table_args__ = (
+    )
+
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    name = Column(String(255))
+    size = Column(BigInteger().with_variant(Integer, "sqlite"))
+    status = Column(Integer, nullable=False)
+    min_disk = Column(Integer, nullable=False, default=0)
+    min_ram = Column(Integer, nullable=False, default=0)
+
+class Flavors(BASE, NovaBase):
+    """Represents possible flavors for instances"""
+    __tablename__ = 'flavors'
+    __table_args__ = (
+    )
+
+    id = Column(Integer, primary_key=True)
+    name = Column(String(255), nullable=False)
+    ram = Column(Integer, nullable=False)
+    vcpus = Column(Integer, nullable=False)
+    disk = Column(Integer)
+    swap = Column(Integer, nullable=False, default=0)
+    description = Column(Text)
+
+class Instances(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents a guest VM."""
+    __tablename__ = 'instances'
+    __table_args__ = (
+    )
+
+    uuid = Column(String(36), primary_key=True,
+                default=lambda: str(uuid.uuid4()))
+    image_uuid = Column(String(36), ForeignKey('images.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    flavor_uuid = Column(String(36), ForeignKey('flavors.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    project_uuid = Column(String(36), ForeignKey('projects.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    status = Column(Enum('failed', 'completed', 'in-progress', 'waiting'),
+                            nullable=True)
+
+class ServicesRepos(BASE, NovaBase, models.TimestampMixin, models.SoftDeleteMixin):
+    """Represents a guest VM."""
+    __tablename__ = 'services_repos'
+    __table_args__ = (
+    )
+
+    id = Column(Integer, primary_key=True)
+    service_uuid = Column(String(36), ForeignKey('services.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    repo_uuid = Column(String(36), ForeignKey('repos.uuid'),
+                default=lambda: str(uuid.uuid4()))
+    auto = Column(Boolean(), default=False)
+    status = Column(Enum('failed', 'completed', 'in-progress', 'waiting', 'update_db', 'rebuild_db'),
+                            nullable=True)

diff --git a/gosbs/db/sqlalchemy/types.py b/gosbs/db/sqlalchemy/types.py
new file mode 100644
index 0000000..d1431c3
--- /dev/null
+++ b/gosbs/db/sqlalchemy/types.py
@@ -0,0 +1,74 @@
+# Copyright 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/db/sqlalchemy/types.py
+
+"""Custom SQLAlchemy types."""
+
+import netaddr
+from oslo_utils import netutils
+from sqlalchemy.dialects import postgresql
+from sqlalchemy import types
+
+from gosbs import utils
+
+
+class IPAddress(types.TypeDecorator):
+    """An SQLAlchemy type representing an IP-address."""
+
+    impl = types.String
+
+    def load_dialect_impl(self, dialect):
+        if dialect.name == 'postgresql':
+            return dialect.type_descriptor(postgresql.INET())
+        else:
+            return dialect.type_descriptor(types.String(39))
+
+    def process_bind_param(self, value, dialect):
+        """Process/Formats the value before insert it into the db."""
+        if dialect.name == 'postgresql':
+            return value
+        # NOTE(maurosr): The purpose here is to convert ipv6 to the shortened
+        # form, not validate it.
+        elif netutils.is_valid_ipv6(value):
+            return utils.get_shortened_ipv6(value)
+        return value
+
+
+class CIDR(types.TypeDecorator):
+    """An SQLAlchemy type representing a CIDR definition."""
+
+    impl = types.String
+
+    def load_dialect_impl(self, dialect):
+        if dialect.name == 'postgresql':
+            return dialect.type_descriptor(postgresql.INET())
+        else:
+            return dialect.type_descriptor(types.String(43))
+
+    def process_bind_param(self, value, dialect):
+        """Process/Formats the value before insert it into the db."""
+        # NOTE(sdague): normalize all the inserts
+        if netutils.is_valid_ipv6_cidr(value):
+            return utils.get_shortened_ipv6_cidr(value)
+        return value
+
+    def process_result_value(self, value, dialect):
+        try:
+            return str(netaddr.IPNetwork(value, version=4).cidr)
+        except netaddr.AddrFormatError:
+            return str(netaddr.IPNetwork(value, version=6).cidr)
+        except TypeError:
+            return None

diff --git a/gosbs/db/sqlalchemy/utils.py b/gosbs/db/sqlalchemy/utils.py
new file mode 100644
index 0000000..e4236d8
--- /dev/null
+++ b/gosbs/db/sqlalchemy/utils.py
@@ -0,0 +1,118 @@
+# Copyright (c) 2013 Boris Pavlovic (boris@pavlovic.me).
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/db/sqlalchemy/utils.py
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as oslodbutils
+from oslo_log import log as logging
+from sqlalchemy.exc import OperationalError
+from sqlalchemy import MetaData
+from sqlalchemy import Table
+from sqlalchemy.types import NullType
+
+from gosbs.db.sqlalchemy import api as db
+from gosbs import exception
+from gosbs.i18n import _
+
+
+LOG = logging.getLogger(__name__)
+
+
+def check_shadow_table(migrate_engine, table_name):
+    """This method checks that table with ``table_name`` and
+    corresponding shadow table have same columns.
+    """
+    meta = MetaData()
+    meta.bind = migrate_engine
+
+    table = Table(table_name, meta, autoload=True)
+    shadow_table = Table(db._SHADOW_TABLE_PREFIX + table_name, meta,
+                         autoload=True)
+
+    columns = {c.name: c for c in table.columns}
+    shadow_columns = {c.name: c for c in shadow_table.columns}
+
+    for name, column in columns.items():
+        if name not in shadow_columns:
+            raise exception.NovaException(
+                _("Missing column %(table)s.%(column)s in shadow table")
+                        % {'column': name, 'table': shadow_table.name})
+        shadow_column = shadow_columns[name]
+
+        if not isinstance(shadow_column.type, type(column.type)):
+            raise exception.NovaException(
+                _("Different types in %(table)s.%(column)s and shadow table: "
+                  "%(c_type)s %(shadow_c_type)s")
+                        % {'column': name, 'table': table.name,
+                           'c_type': column.type,
+                           'shadow_c_type': shadow_column.type})
+
+    for name, column in shadow_columns.items():
+        if name not in columns:
+            raise exception.NovaException(
+                _("Extra column %(table)s.%(column)s in shadow table")
+                        % {'column': name, 'table': shadow_table.name})
+    return True
+
+
+def create_shadow_table(migrate_engine, table_name=None, table=None,
+                        **col_name_col_instance):
+    """This method create shadow table for table with name ``table_name``
+    or table instance ``table``.
+    :param table_name: Autoload table with this name and create shadow table
+    :param table: Autoloaded table, so just create corresponding shadow table.
+    :param col_name_col_instance:   contains pair column_name=column_instance.
+    column_instance is instance of Column. These params are required only for
+    columns that have unsupported types by sqlite. For example BigInteger.
+    :returns: The created shadow_table object.
+    """
+    meta = MetaData(bind=migrate_engine)
+
+    if table_name is None and table is None:
+        raise exception.NovaException(_("Specify `table_name` or `table` "
+                                        "param"))
+    if not (table_name is None or table is None):
+        raise exception.NovaException(_("Specify only one param `table_name` "
+                                        "`table`"))
+
+    if table is None:
+        table = Table(table_name, meta, autoload=True)
+
+    columns = []
+    for column in table.columns:
+        if isinstance(column.type, NullType):
+            new_column = oslodbutils._get_not_supported_column(
+                col_name_col_instance, column.name)
+            columns.append(new_column)
+        else:
+            columns.append(column.copy())
+
+    shadow_table_name = db._SHADOW_TABLE_PREFIX + table.name
+    shadow_table = Table(shadow_table_name, meta, *columns,
+                         mysql_engine='InnoDB')
+    try:
+        shadow_table.create()
+        return shadow_table
+    except (db_exc.DBError, OperationalError):
+        # NOTE(ekudryashova): At the moment there is a case in oslo.db code,
+        # which raises unwrapped OperationalError, so we should catch it until
+        # oslo.db would wraps all such exceptions
+        LOG.info(repr(shadow_table))
+        LOG.exception('Exception while creating table.')
+        raise exception.ShadowTableExists(name=shadow_table_name)
+    except Exception:
+        LOG.info(repr(shadow_table))
+        LOG.exception('Exception while creating table.')

diff --git a/gosbs/debugger.py b/gosbs/debugger.py
new file mode 100644
index 0000000..0cda17c
--- /dev/null
+++ b/gosbs/debugger.py
@@ -0,0 +1,62 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/debugger.py
+
+# NOTE(markmc): this is imported before monkey patching in nova.cmd
+# so we avoid extra imports here
+
+import sys
+
+
+def enabled():
+    return ('--remote_debug-host' in sys.argv and
+            '--remote_debug-port' in sys.argv)
+
+
+def init():
+    import gosbs.conf
+    CONF = gosbs.conf.CONF
+
+    # NOTE(markmc): gracefully handle the CLI options not being registered
+    if 'remote_debug' not in CONF:
+        return
+
+    if not (CONF.remote_debug.host and CONF.remote_debug.port):
+        return
+
+    from gobs.i18n import _LW
+    from oslo_log import log as logging
+    LOG = logging.getLogger(__name__)
+
+    LOG.debug('Listening on %(host)s:%(port)s for debug connection',
+              {'host': CONF.remote_debug.host,
+               'port': CONF.remote_debug.port})
+
+    try:
+        from pydev import pydevd
+    except ImportError:
+        import pydevd
+    pydevd.settrace(host=CONF.remote_debug.host,
+                    port=CONF.remote_debug.port,
+                    stdoutToServer=False,
+                    stderrToServer=False)
+
+    LOG.warning(_LW('WARNING: Using the remote debug option changes how '
+                    'Nova uses the eventlet library to support async IO. This '
+                    'could result in failures that do not occur under normal '
+                    'operation. Use at your own risk.'))

diff --git a/gosbs/exception.py b/gosbs/exception.py
new file mode 100644
index 0000000..4759034
--- /dev/null
+++ b/gosbs/exception.py
@@ -0,0 +1,2394 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/exception.py
+
+"""Nova base exception handling.
+
+Includes decorator for re-raising Nova-type exceptions.
+
+SHOULD include dedicated exception logging.
+
+"""
+
+from oslo_log import log as logging
+
+from gosbs.i18n import _, _LE
+
+LOG = logging.getLogger(__name__)
+
+class NovaException(Exception):
+    """Base Nova Exception
+
+    To correctly use this class, inherit from it and define
+    a 'msg_fmt' property. That msg_fmt will get printf'd
+    with the keyword arguments provided to the constructor.
+
+    """
+    msg_fmt = _("An unknown exception occurred.")
+    code = 500
+    headers = {}
+    safe = False
+
+    def __init__(self, message=None, **kwargs):
+        self.kwargs = kwargs
+
+        if 'code' not in self.kwargs:
+            try:
+                self.kwargs['code'] = self.code
+            except AttributeError:
+                pass
+
+        if not message:
+            try:
+                message = self.msg_fmt % kwargs
+
+            except Exception:
+                # NOTE(melwitt): This is done in a separate method so it can be
+                # monkey-patched during testing to make it a hard failure.
+                self._log_exception()
+                message = self.msg_fmt
+
+        self.message = message
+        super(NovaException, self).__init__(message)
+
+    def _log_exception(self):
+        # kwargs doesn't match a variable in the message
+        # log the issue and the kwargs
+        LOG.exception(_LE('Exception in string format operation'))
+        for name, value in self.kwargs.items():
+            LOG.error("%s: %s" % (name, value))  # noqa
+
+    def format_message(self):
+        # NOTE(mrodden): use the first argument to the python Exception object
+        # which should be our full NovaException message, (see __init__)
+        return self.args[0]
+
+    def __repr__(self):
+        dict_repr = self.__dict__
+        dict_repr['class'] = self.__class__.__name__
+        return str(dict_repr)
+
+
+class EncryptionFailure(NovaException):
+    msg_fmt = _("Failed to encrypt text: %(reason)s")
+
+
+class DecryptionFailure(NovaException):
+    msg_fmt = _("Failed to decrypt text: %(reason)s")
+
+
+class RevokeCertFailure(NovaException):
+    msg_fmt = _("Failed to revoke certificate for %(project_id)s")
+
+
+class VirtualInterfaceCreateException(NovaException):
+    msg_fmt = _("Virtual Interface creation failed")
+
+
+class VirtualInterfaceMacAddressException(NovaException):
+    msg_fmt = _("Creation of virtual interface with "
+                "unique mac address failed")
+
+
+class VirtualInterfacePlugException(NovaException):
+    msg_fmt = _("Virtual interface plugin failed")
+
+
+class VirtualInterfaceUnplugException(NovaException):
+    msg_fmt = _("Failed to unplug virtual interface: %(reason)s")
+
+
+class GlanceConnectionFailed(NovaException):
+    msg_fmt = _("Connection to glance host %(server)s failed: "
+        "%(reason)s")
+
+
+class CinderConnectionFailed(NovaException):
+    msg_fmt = _("Connection to cinder host failed: %(reason)s")
+
+
+class UnsupportedCinderAPIVersion(NovaException):
+    msg_fmt = _('Nova does not support Cinder API version %(version)s')
+
+
+class CinderAPIVersionNotAvailable(NovaException):
+    """Used to indicate that a requested Cinder API version, generally a
+    microversion, is not available.
+    """
+    msg_fmt = _('Cinder API version %(version)s is not available.')
+
+
+class Forbidden(NovaException):
+    msg_fmt = _("Forbidden")
+    code = 403
+
+
+class AdminRequired(Forbidden):
+    msg_fmt = _("User does not have admin privileges")
+
+
+class PolicyNotAuthorized(Forbidden):
+    msg_fmt = _("Policy doesn't allow %(action)s to be performed.")
+
+
+class ImageNotActive(NovaException):
+    # NOTE(jruzicka): IncorrectState is used for volumes only in EC2,
+    # but it still seems like the most appropriate option.
+    msg_fmt = _("Image %(image_id)s is not active.")
+
+
+class ImageNotAuthorized(NovaException):
+    msg_fmt = _("Not authorized for image %(image_id)s.")
+
+
+class Invalid(NovaException):
+    msg_fmt = _("Bad Request - Invalid Parameters")
+    code = 400
+
+
+class InvalidBDM(Invalid):
+    msg_fmt = _("Block Device Mapping is Invalid.")
+
+
+class InvalidBDMSnapshot(InvalidBDM):
+    msg_fmt = _("Block Device Mapping is Invalid: "
+                "failed to get snapshot %(id)s.")
+
+
+class InvalidBDMVolume(InvalidBDM):
+    msg_fmt = _("Block Device Mapping is Invalid: "
+                "failed to get volume %(id)s.")
+
+
+class InvalidBDMImage(InvalidBDM):
+    msg_fmt = _("Block Device Mapping is Invalid: "
+                "failed to get image %(id)s.")
+
+
+class InvalidBDMBootSequence(InvalidBDM):
+    msg_fmt = _("Block Device Mapping is Invalid: "
+                "Boot sequence for the instance "
+                "and image/block device mapping "
+                "combination is not valid.")
+
+
+class InvalidBDMLocalsLimit(InvalidBDM):
+    msg_fmt = _("Block Device Mapping is Invalid: "
+                "You specified more local devices than the "
+                "limit allows")
+
+
+class InvalidBDMEphemeralSize(InvalidBDM):
+    msg_fmt = _("Ephemeral disks requested are larger than "
+                "the instance type allows. If no size is given "
+                "in one block device mapping, flavor ephemeral "
+                "size will be used.")
+
+
+class InvalidBDMSwapSize(InvalidBDM):
+    msg_fmt = _("Swap drive requested is larger than instance type allows.")
+
+
+class InvalidBDMFormat(InvalidBDM):
+    msg_fmt = _("Block Device Mapping is Invalid: "
+                "%(details)s")
+
+
+class InvalidBDMForLegacy(InvalidBDM):
+    msg_fmt = _("Block Device Mapping cannot "
+                "be converted to legacy format. ")
+
+
+class InvalidBDMVolumeNotBootable(InvalidBDM):
+    msg_fmt = _("Block Device %(id)s is not bootable.")
+
+
+class InvalidAttribute(Invalid):
+    msg_fmt = _("Attribute not supported: %(attr)s")
+
+
+class ValidationError(Invalid):
+    msg_fmt = "%(detail)s"
+
+
+class VolumeAttachFailed(Invalid):
+    msg_fmt = _("Volume %(volume_id)s could not be attached. "
+                "Reason: %(reason)s")
+
+
+class VolumeDetachFailed(Invalid):
+    msg_fmt = _("Volume %(volume_id)s could not be detached. "
+                "Reason: %(reason)s")
+
+
+class MultiattachNotSupportedByVirtDriver(NovaException):
+    # This exception indicates the compute hosting the instance does not
+    # support multiattach volumes. This should generally be considered a
+    # 409 HTTPConflict error in the API since we expect all virt drivers to
+    # eventually support multiattach volumes.
+    msg_fmt = _("Volume %(volume_id)s has 'multiattach' set, "
+                "which is not supported for this instance.")
+    code = 409
+
+
+class MultiattachSupportNotYetAvailable(NovaException):
+    # This exception indicates the deployment is not yet new enough to support
+    # multiattach volumes, so a 409 HTTPConflict response is generally used
+    # for handling this in the API.
+    msg_fmt = _("Multiattach volume support is not yet available.")
+    code = 409
+
+
+class MultiattachNotSupportedOldMicroversion(Invalid):
+    msg_fmt = _('Multiattach volumes are only supported starting with '
+                'compute API version 2.60.')
+
+
+class VolumeTypeSupportNotYetAvailable(NovaException):
+    # This exception indicates the deployment is not yet new enough to support
+    # volume type, so a 409 HTTPConflict response is generally used
+    # for handling this in the API.
+    msg_fmt = _("Volume type support is not yet available.")
+    code = 409
+
+
+class MultiattachToShelvedNotSupported(Invalid):
+    msg_fmt = _("Attaching multiattach volumes is not supported for "
+                "shelved-offloaded instances.")
+
+
+class VolumeNotCreated(NovaException):
+    msg_fmt = _("Volume %(volume_id)s did not finish being created"
+                " even after we waited %(seconds)s seconds or %(attempts)s"
+                " attempts. And its status is %(volume_status)s.")
+
+
+class ExtendVolumeNotSupported(Invalid):
+    msg_fmt = _("Volume size extension is not supported by the hypervisor.")
+
+
+class VolumeEncryptionNotSupported(Invalid):
+    msg_fmt = _("Volume encryption is not supported for %(volume_type)s "
+                "volume %(volume_id)s")
+
+
+class VolumeTaggedAttachNotSupported(Invalid):
+    msg_fmt = _("Tagged volume attachment is not supported for this server "
+                "instance.")
+
+
+class VolumeTaggedAttachToShelvedNotSupported(VolumeTaggedAttachNotSupported):
+    msg_fmt = _("Tagged volume attachment is not supported for "
+                "shelved-offloaded instances.")
+
+
+class NetworkInterfaceTaggedAttachNotSupported(Invalid):
+    msg_fmt = _("Tagged network interface attachment is not supported for "
+                "this server instance.")
+
+
+class InvalidKeypair(Invalid):
+    msg_fmt = _("Keypair data is invalid: %(reason)s")
+
+
+class InvalidRequest(Invalid):
+    msg_fmt = _("The request is invalid.")
+
+
+class InvalidInput(Invalid):
+    msg_fmt = _("Invalid input received: %(reason)s")
+
+
+class InvalidVolume(Invalid):
+    msg_fmt = _("Invalid volume: %(reason)s")
+
+
+class InvalidVolumeAccessMode(Invalid):
+    msg_fmt = _("Invalid volume access mode: %(access_mode)s")
+
+
+class InvalidMetadata(Invalid):
+    msg_fmt = _("Invalid metadata: %(reason)s")
+
+
+class InvalidMetadataSize(Invalid):
+    msg_fmt = _("Invalid metadata size: %(reason)s")
+
+
+class InvalidPortRange(Invalid):
+    msg_fmt = _("Invalid port range %(from_port)s:%(to_port)s. %(msg)s")
+
+
+class InvalidIpProtocol(Invalid):
+    msg_fmt = _("Invalid IP protocol %(protocol)s.")
+
+
+class InvalidContentType(Invalid):
+    msg_fmt = _("Invalid content type %(content_type)s.")
+
+
+class InvalidAPIVersionString(Invalid):
+    msg_fmt = _("API Version String %(version)s is of invalid format. Must "
+                "be of format MajorNum.MinorNum.")
+
+
+class VersionNotFoundForAPIMethod(Invalid):
+    msg_fmt = _("API version %(version)s is not supported on this method.")
+
+
+class InvalidGlobalAPIVersion(Invalid):
+    msg_fmt = _("Version %(req_ver)s is not supported by the API. Minimum "
+                "is %(min_ver)s and maximum is %(max_ver)s.")
+
+
+class ApiVersionsIntersect(Invalid):
+    msg_fmt = _("Version of %(name)s %(min_ver)s %(max_ver)s intersects "
+                "with another versions.")
+
+
+# Cannot be templated as the error syntax varies.
+# msg needs to be constructed when raised.
+class InvalidParameterValue(Invalid):
+    msg_fmt = "%(err)s"
+
+
+class InvalidAggregateAction(Invalid):
+    msg_fmt = _("Unacceptable parameters.")
+    code = 400
+
+
+class InvalidAggregateActionAdd(InvalidAggregateAction):
+    msg_fmt = _("Cannot add host to aggregate "
+                "%(aggregate_id)s. Reason: %(reason)s.")
+
+
+class InvalidAggregateActionDelete(InvalidAggregateAction):
+    msg_fmt = _("Cannot remove host from aggregate "
+                "%(aggregate_id)s. Reason: %(reason)s.")
+
+
+class InvalidAggregateActionUpdate(InvalidAggregateAction):
+    msg_fmt = _("Cannot update aggregate "
+                "%(aggregate_id)s. Reason: %(reason)s.")
+
+
+class InvalidAggregateActionUpdateMeta(InvalidAggregateAction):
+    msg_fmt = _("Cannot update metadata of aggregate "
+                "%(aggregate_id)s. Reason: %(reason)s.")
+
+
+class InvalidSortKey(Invalid):
+    msg_fmt = _("Sort key supplied was not valid.")
+
+
+class InvalidStrTime(Invalid):
+    msg_fmt = _("Invalid datetime string: %(reason)s")
+
+
+class InvalidNUMANodesNumber(Invalid):
+    msg_fmt = _("The property 'numa_nodes' cannot be '%(nodes)s'. "
+                "It must be a number greater than 0")
+
+
+class InvalidName(Invalid):
+    msg_fmt = _("An invalid 'name' value was provided. "
+                "The name must be: %(reason)s")
+
+
+class InstanceInvalidState(Invalid):
+    msg_fmt = _("Instance %(instance_uuid)s in %(attr)s %(state)s. Cannot "
+                "%(method)s while the instance is in this state.")
+
+
+class InstanceNotRunning(Invalid):
+    msg_fmt = _("Instance %(instance_id)s is not running.")
+
+
+class InstanceNotInRescueMode(Invalid):
+    msg_fmt = _("Instance %(instance_id)s is not in rescue mode")
+
+
+class InstanceNotRescuable(Invalid):
+    msg_fmt = _("Instance %(instance_id)s cannot be rescued: %(reason)s")
+
+
+class InstanceNotReady(Invalid):
+    msg_fmt = _("Instance %(instance_id)s is not ready")
+
+
+class InstanceSuspendFailure(Invalid):
+    msg_fmt = _("Failed to suspend instance: %(reason)s")
+
+
+class InstanceResumeFailure(Invalid):
+    msg_fmt = _("Failed to resume instance: %(reason)s")
+
+
+class InstancePowerOnFailure(Invalid):
+    msg_fmt = _("Failed to power on instance: %(reason)s")
+
+
+class InstancePowerOffFailure(Invalid):
+    msg_fmt = _("Failed to power off instance: %(reason)s")
+
+
+class InstanceRebootFailure(Invalid):
+    msg_fmt = _("Failed to reboot instance: %(reason)s")
+
+
+class InstanceTerminationFailure(Invalid):
+    msg_fmt = _("Failed to terminate instance: %(reason)s")
+
+
+class InstanceDeployFailure(Invalid):
+    msg_fmt = _("Failed to deploy instance: %(reason)s")
+
+
+class MultiplePortsNotApplicable(Invalid):
+    msg_fmt = _("Failed to launch instances: %(reason)s")
+
+
+class InvalidFixedIpAndMaxCountRequest(Invalid):
+    msg_fmt = _("Failed to launch instances: %(reason)s")
+
+
+class ServiceUnavailable(Invalid):
+    msg_fmt = _("Service is unavailable at this time.")
+
+
+class ServiceNotUnique(Invalid):
+    msg_fmt = _("More than one possible service found.")
+
+
+class ComputeResourcesUnavailable(ServiceUnavailable):
+    msg_fmt = _("Insufficient compute resources: %(reason)s.")
+
+
+class HypervisorUnavailable(NovaException):
+    msg_fmt = _("Connection to the hypervisor is broken on host: %(host)s")
+
+
+class ComputeServiceUnavailable(ServiceUnavailable):
+    msg_fmt = _("Compute service of %(host)s is unavailable at this time.")
+
+
+class ComputeServiceInUse(NovaException):
+    msg_fmt = _("Compute service of %(host)s is still in use.")
+
+
+class UnableToMigrateToSelf(Invalid):
+    msg_fmt = _("Unable to migrate instance (%(instance_id)s) "
+                "to current host (%(host)s).")
+
+
+class InvalidHypervisorType(Invalid):
+    msg_fmt = _("The supplied hypervisor type of is invalid.")
+
+
+class HypervisorTooOld(Invalid):
+    msg_fmt = _("This compute node's hypervisor is older than the minimum "
+                "supported version: %(version)s.")
+
+
+class DestinationHypervisorTooOld(Invalid):
+    msg_fmt = _("The instance requires a newer hypervisor version than "
+                "has been provided.")
+
+
+class ServiceTooOld(Invalid):
+    msg_fmt = _("This service is older (v%(thisver)i) than the minimum "
+                "(v%(minver)i) version of the rest of the deployment. "
+                "Unable to continue.")
+
+
+class DestinationDiskExists(Invalid):
+    msg_fmt = _("The supplied disk path (%(path)s) already exists, "
+                "it is expected not to exist.")
+
+
+class InvalidDevicePath(Invalid):
+    msg_fmt = _("The supplied device path (%(path)s) is invalid.")
+
+
+class DevicePathInUse(Invalid):
+    msg_fmt = _("The supplied device path (%(path)s) is in use.")
+    code = 409
+
+
+class InvalidCPUInfo(Invalid):
+    msg_fmt = _("Unacceptable CPU info: %(reason)s")
+
+
+class InvalidIpAddressError(Invalid):
+    msg_fmt = _("%(address)s is not a valid IP v4/6 address.")
+
+
+class InvalidVLANTag(Invalid):
+    msg_fmt = _("VLAN tag is not appropriate for the port group "
+                "%(bridge)s. Expected VLAN tag is %(tag)s, "
+                "but the one associated with the port group is %(pgroup)s.")
+
+
+class InvalidVLANPortGroup(Invalid):
+    msg_fmt = _("vSwitch which contains the port group %(bridge)s is "
+                "not associated with the desired physical adapter. "
+                "Expected vSwitch is %(expected)s, but the one associated "
+                "is %(actual)s.")
+
+
+class InvalidDiskFormat(Invalid):
+    msg_fmt = _("Disk format %(disk_format)s is not acceptable")
+
+
+class InvalidDiskInfo(Invalid):
+    msg_fmt = _("Disk info file is invalid: %(reason)s")
+
+
+class DiskInfoReadWriteFail(Invalid):
+    msg_fmt = _("Failed to read or write disk info file: %(reason)s")
+
+
+class ImageUnacceptable(Invalid):
+    msg_fmt = _("Image %(image_id)s is unacceptable: %(reason)s")
+
+
+class ImageBadRequest(Invalid):
+    msg_fmt = _("Request of image %(image_id)s got BadRequest response: "
+                "%(response)s")
+
+
+class InstanceUnacceptable(Invalid):
+    msg_fmt = _("Instance %(instance_id)s is unacceptable: %(reason)s")
+
+
+class InvalidEc2Id(Invalid):
+    msg_fmt = _("Ec2 id %(ec2_id)s is unacceptable.")
+
+
+class InvalidUUID(Invalid):
+    msg_fmt = _("Expected a uuid but received %(uuid)s.")
+
+
+class InvalidID(Invalid):
+    msg_fmt = _("Invalid ID received %(id)s.")
+
+
+class ConstraintNotMet(NovaException):
+    msg_fmt = _("Constraint not met.")
+    code = 412
+
+
+class NotFound(NovaException):
+    msg_fmt = _("Resource could not be found.")
+    code = 404
+
+
+class AgentBuildNotFound(NotFound):
+    msg_fmt = _("No agent-build associated with id %(id)s.")
+
+
+class AgentBuildExists(NovaException):
+    msg_fmt = _("Agent-build with hypervisor %(hypervisor)s os %(os)s "
+                "architecture %(architecture)s exists.")
+
+
+class VolumeAttachmentNotFound(NotFound):
+    msg_fmt = _("Volume attachment %(attachment_id)s could not be found.")
+
+
+class VolumeNotFound(NotFound):
+    msg_fmt = _("Volume %(volume_id)s could not be found.")
+
+
+class VolumeTypeNotFound(NotFound):
+    msg_fmt = _("Volume type %(id_or_name)s could not be found.")
+
+
+class UndefinedRootBDM(NovaException):
+    msg_fmt = _("Undefined Block Device Mapping root: BlockDeviceMappingList "
+                "contains Block Device Mappings from multiple instances.")
+
+
+class BDMNotFound(NotFound):
+    msg_fmt = _("No Block Device Mapping with id %(id)s.")
+
+
+class VolumeBDMNotFound(NotFound):
+    msg_fmt = _("No volume Block Device Mapping with id %(volume_id)s.")
+
+
+class VolumeBDMIsMultiAttach(Invalid):
+    msg_fmt = _("Block Device Mapping %(volume_id)s is a multi-attach volume"
+                " and is not valid for this operation.")
+
+
+class VolumeBDMPathNotFound(VolumeBDMNotFound):
+    msg_fmt = _("No volume Block Device Mapping at path: %(path)s")
+
+
+class DeviceDetachFailed(NovaException):
+    msg_fmt = _("Device detach failed for %(device)s: %(reason)s")
+
+
+class DeviceNotFound(NotFound):
+    msg_fmt = _("Device '%(device)s' not found.")
+
+
+class SnapshotNotFound(NotFound):
+    msg_fmt = _("Snapshot %(snapshot_id)s could not be found.")
+
+
+class DiskNotFound(NotFound):
+    msg_fmt = _("No disk at %(location)s")
+
+
+class VolumeDriverNotFound(NotFound):
+    msg_fmt = _("Could not find a handler for %(driver_type)s volume.")
+
+
+class InvalidImageRef(Invalid):
+    msg_fmt = _("Invalid image href %(image_href)s.")
+
+
+class AutoDiskConfigDisabledByImage(Invalid):
+    msg_fmt = _("Requested image %(image)s "
+                "has automatic disk resize disabled.")
+
+
+class ImageNotFound(NotFound):
+    msg_fmt = _("Image %(image_id)s could not be found.")
+
+
+class ImageDeleteConflict(NovaException):
+    msg_fmt = _("Conflict deleting image. Reason: %(reason)s.")
+
+
+class ImageHandlerUnsupported(NovaException):
+    msg_fmt = _("Error: unsupported image handler %(image_handler)s.")
+
+
+class PreserveEphemeralNotSupported(Invalid):
+    msg_fmt = _("The current driver does not support "
+                "preserving ephemeral partitions.")
+
+
+class ProjectNotFound(NotFound):
+    msg_fmt = _("Project %(project_id)s could not be found.")
+
+
+class StorageRepositoryNotFound(NotFound):
+    msg_fmt = _("Cannot find SR to read/write VDI.")
+
+
+class InstanceMappingNotFound(NotFound):
+    msg_fmt = _("Instance %(uuid)s has no mapping to a cell.")
+
+
+class NetworkDhcpReleaseFailed(NovaException):
+    msg_fmt = _("Failed to release IP %(address)s with MAC %(mac_address)s")
+
+
+class NetworkInUse(NovaException):
+    msg_fmt = _("Network %(network_id)s is still in use.")
+
+
+class NetworkSetHostFailed(NovaException):
+    msg_fmt = _("Network set host failed for network %(network_id)s.")
+
+
+class NetworkNotCreated(Invalid):
+    msg_fmt = _("%(req)s is required to create a network.")
+
+
+class LabelTooLong(Invalid):
+    msg_fmt = _("Maximum allowed length for 'label' is 255.")
+
+
+class InvalidIntValue(Invalid):
+    msg_fmt = _("%(key)s must be an integer.")
+
+
+class InvalidCidr(Invalid):
+    msg_fmt = _("%(cidr)s is not a valid IP network.")
+
+
+class InvalidAddress(Invalid):
+    msg_fmt = _("%(address)s is not a valid IP address.")
+
+
+class AddressOutOfRange(Invalid):
+    msg_fmt = _("%(address)s is not within %(cidr)s.")
+
+
+class DuplicateVlan(NovaException):
+    msg_fmt = _("Detected existing vlan with id %(vlan)d")
+    code = 409
+
+
+class CidrConflict(NovaException):
+    msg_fmt = _('Requested cidr (%(cidr)s) conflicts '
+                'with existing cidr (%(other)s)')
+    code = 409
+
+
+class NetworkHasProject(NetworkInUse):
+    msg_fmt = _('Network must be disassociated from project '
+                '%(project_id)s before it can be deleted.')
+
+
+class NetworkNotFound(NotFound):
+    msg_fmt = _("Network %(network_id)s could not be found.")
+
+
+class PortNotFound(NotFound):
+    msg_fmt = _("Port id %(port_id)s could not be found.")
+
+
+class NetworkNotFoundForBridge(NetworkNotFound):
+    msg_fmt = _("Network could not be found for bridge %(bridge)s")
+
+
+class NetworkNotFoundForUUID(NetworkNotFound):
+    msg_fmt = _("Network could not be found for uuid %(uuid)s")
+
+
+class NetworkNotFoundForCidr(NetworkNotFound):
+    msg_fmt = _("Network could not be found with cidr %(cidr)s.")
+
+
+class NetworkNotFoundForInstance(NetworkNotFound):
+    msg_fmt = _("Network could not be found for instance %(instance_id)s.")
+
+
+class NoNetworksFound(NotFound):
+    msg_fmt = _("No networks defined.")
+
+
+class NoMoreNetworks(NovaException):
+    msg_fmt = _("No more available networks.")
+
+
+class NetworkNotFoundForProject(NetworkNotFound):
+    msg_fmt = _("Either network uuid %(network_uuid)s is not present or "
+                "is not assigned to the project %(project_id)s.")
+
+
+class NetworkAmbiguous(Invalid):
+    msg_fmt = _("More than one possible network found. Specify "
+                "network ID(s) to select which one(s) to connect to.")
+
+
+class UnableToAutoAllocateNetwork(Invalid):
+    msg_fmt = _('Unable to automatically allocate a network for project '
+                '%(project_id)s')
+
+
+class NetworkRequiresSubnet(Invalid):
+    msg_fmt = _("Network %(network_uuid)s requires a subnet in order to boot"
+                " instances on.")
+
+
+class ExternalNetworkAttachForbidden(Forbidden):
+    msg_fmt = _("It is not allowed to create an interface on "
+                "external network %(network_uuid)s")
+
+
+class NetworkMissingPhysicalNetwork(NovaException):
+    msg_fmt = _("Physical network is missing for network %(network_uuid)s")
+
+
+class VifDetailsMissingVhostuserSockPath(Invalid):
+    msg_fmt = _("vhostuser_sock_path not present in vif_details"
+                " for vif %(vif_id)s")
+
+
+class VifDetailsMissingMacvtapParameters(Invalid):
+    msg_fmt = _("Parameters %(missing_params)s not present in"
+                " vif_details for vif %(vif_id)s. Check your Neutron"
+                " configuration to validate that the macvtap parameters are"
+                " correct.")
+
+
+class OvsConfigurationFailure(NovaException):
+    msg_fmt = _("OVS configuration failed with: %(inner_exception)s.")
+
+
+class DatastoreNotFound(NotFound):
+    msg_fmt = _("Could not find the datastore reference(s) which the VM uses.")
+
+
+class PortInUse(Invalid):
+    msg_fmt = _("Port %(port_id)s is still in use.")
+
+
+class PortRequiresFixedIP(Invalid):
+    msg_fmt = _("Port %(port_id)s requires a FixedIP in order to be used.")
+
+
+class PortNotUsable(Invalid):
+    msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s.")
+
+
+class PortNotUsableDNS(Invalid):
+    msg_fmt = _("Port %(port_id)s not usable for instance %(instance)s. "
+                "Value %(value)s assigned to dns_name attribute does not "
+                "match instance's hostname %(hostname)s")
+
+
+class PortNotFree(Invalid):
+    msg_fmt = _("No free port available for instance %(instance)s.")
+
+
+class PortBindingFailed(Invalid):
+    msg_fmt = _("Binding failed for port %(port_id)s, please check neutron "
+                "logs for more information.")
+
+
+class PortBindingDeletionFailed(NovaException):
+    msg_fmt = _("Failed to delete binding for port %(port_id)s and host "
+                "%(host)s.")
+
+
+class PortBindingActivationFailed(NovaException):
+    msg_fmt = _("Failed to activate binding for port %(port_id)s and host "
+                "%(host)s.")
+
+
+class PortUpdateFailed(Invalid):
+    msg_fmt = _("Port update failed for port %(port_id)s: %(reason)s")
+
+
+class AttachSRIOVPortNotSupported(Invalid):
+    msg_fmt = _('Attaching SR-IOV port %(port_id)s to server '
+                '%(instance_uuid)s is not supported. SR-IOV ports must be '
+                'specified during server creation.')
+
+
+class FixedIpExists(NovaException):
+    msg_fmt = _("Fixed IP %(address)s already exists.")
+
+
+class FixedIpNotFound(NotFound):
+    msg_fmt = _("No fixed IP associated with id %(id)s.")
+
+
+class FixedIpNotFoundForAddress(FixedIpNotFound):
+    msg_fmt = _("Fixed IP not found for address %(address)s.")
+
+
+class FixedIpNotFoundForInstance(FixedIpNotFound):
+    msg_fmt = _("Instance %(instance_uuid)s has zero fixed IPs.")
+
+
+class FixedIpNotFoundForNetworkHost(FixedIpNotFound):
+    msg_fmt = _("Network host %(host)s has zero fixed IPs "
+                "in network %(network_id)s.")
+
+
+class FixedIpNotFoundForSpecificInstance(FixedIpNotFound):
+    msg_fmt = _("Instance %(instance_uuid)s doesn't have fixed IP '%(ip)s'.")
+
+
+class FixedIpNotFoundForNetwork(FixedIpNotFound):
+    msg_fmt = _("Fixed IP address (%(address)s) does not exist in "
+                "network (%(network_uuid)s).")
+
+
+class FixedIpAssociateFailed(NovaException):
+    msg_fmt = _("Fixed IP associate failed for network: %(net)s.")
+
+
+class FixedIpAlreadyInUse(NovaException):
+    msg_fmt = _("Fixed IP address %(address)s is already in use on instance "
+                "%(instance_uuid)s.")
+
+
+class FixedIpAssociatedWithMultipleInstances(NovaException):
+    msg_fmt = _("More than one instance is associated with fixed IP address "
+                "'%(address)s'.")
+
+
+class FixedIpInvalid(Invalid):
+    msg_fmt = _("Fixed IP address %(address)s is invalid.")
+
+
+class FixedIpInvalidOnHost(Invalid):
+    msg_fmt = _("The fixed IP associated with port %(port_id)s is not "
+                "compatible with the host.")
+
+
+class NoMoreFixedIps(NovaException):
+    msg_fmt = _("No fixed IP addresses available for network: %(net)s")
+
+
+class NoFixedIpsDefined(NotFound):
+    msg_fmt = _("Zero fixed IPs could be found.")
+
+
+class FloatingIpExists(NovaException):
+    msg_fmt = _("Floating IP %(address)s already exists.")
+
+
+class FloatingIpNotFound(NotFound):
+    msg_fmt = _("Floating IP not found for ID %(id)s.")
+
+
+class FloatingIpDNSExists(Invalid):
+    msg_fmt = _("The DNS entry %(name)s already exists in domain %(domain)s.")
+
+
+class FloatingIpNotFoundForAddress(FloatingIpNotFound):
+    msg_fmt = _("Floating IP not found for address %(address)s.")
+
+
+class FloatingIpNotFoundForHost(FloatingIpNotFound):
+    msg_fmt = _("Floating IP not found for host %(host)s.")
+
+
+class FloatingIpMultipleFoundForAddress(NovaException):
+    msg_fmt = _("Multiple floating IPs are found for address %(address)s.")
+
+
+class FloatingIpPoolNotFound(NotFound):
+    msg_fmt = _("Floating IP pool not found.")
+    safe = True
+
+
+class NoMoreFloatingIps(FloatingIpNotFound):
+    msg_fmt = _("Zero floating IPs available.")
+    safe = True
+
+
+class FloatingIpAssociated(NovaException):
+    msg_fmt = _("Floating IP %(address)s is associated.")
+
+
+class FloatingIpNotAssociated(NovaException):
+    msg_fmt = _("Floating IP %(address)s is not associated.")
+
+
+class NoFloatingIpsDefined(NotFound):
+    msg_fmt = _("Zero floating IPs exist.")
+
+
+class NoFloatingIpInterface(NotFound):
+    msg_fmt = _("Interface %(interface)s not found.")
+
+
+class FloatingIpAllocateFailed(NovaException):
+    msg_fmt = _("Floating IP allocate failed.")
+
+
+class FloatingIpAssociateFailed(NovaException):
+    msg_fmt = _("Floating IP %(address)s association has failed.")
+
+
+class FloatingIpBadRequest(Invalid):
+    msg_fmt = _("The floating IP request failed with a BadRequest")
+
+
+class CannotDisassociateAutoAssignedFloatingIP(NovaException):
+    msg_fmt = _("Cannot disassociate auto assigned floating IP")
+
+
+class KeypairNotFound(NotFound):
+    msg_fmt = _("Keypair %(name)s not found for user %(user_id)s")
+
+
+class ServiceNotFound(NotFound):
+    msg_fmt = _("Service %(service_id)s could not be found.")
+
+
+class ConfGroupForServiceTypeNotFound(ServiceNotFound):
+    msg_fmt = _("No conf group name could be found for service type "
+                "%(stype)s.")
+
+
+class ServiceBinaryExists(NovaException):
+    msg_fmt = _("Service with host %(host)s binary %(binary)s exists.")
+
+
+class ServiceTopicExists(NovaException):
+    msg_fmt = _("Service with host %(host)s topic %(topic)s exists.")
+
+
+class HostNotFound(NotFound):
+    msg_fmt = _("Host %(host)s could not be found.")
+
+
+class ComputeHostNotFound(HostNotFound):
+    msg_fmt = _("Compute host %(host)s could not be found.")
+
+
+class HostBinaryNotFound(NotFound):
+    msg_fmt = _("Could not find binary %(binary)s on host %(host)s.")
+
+
+class InvalidReservationExpiration(Invalid):
+    msg_fmt = _("Invalid reservation expiration %(expire)s.")
+
+
+class InvalidQuotaValue(Invalid):
+    msg_fmt = _("Change would make usage less than 0 for the following "
+                "resources: %(unders)s")
+
+
+class InvalidQuotaMethodUsage(Invalid):
+    msg_fmt = _("Wrong quota method %(method)s used on resource %(res)s")
+
+
+class QuotaNotFound(NotFound):
+    msg_fmt = _("Quota could not be found")
+
+
+class QuotaExists(NovaException):
+    msg_fmt = _("Quota exists for project %(project_id)s, "
+                "resource %(resource)s")
+
+
+class QuotaResourceUnknown(QuotaNotFound):
+    msg_fmt = _("Unknown quota resources %(unknown)s.")
+
+
+class ProjectUserQuotaNotFound(QuotaNotFound):
+    msg_fmt = _("Quota for user %(user_id)s in project %(project_id)s "
+                "could not be found.")
+
+
+class ProjectQuotaNotFound(QuotaNotFound):
+    msg_fmt = _("Quota for project %(project_id)s could not be found.")
+
+
+class QuotaClassNotFound(QuotaNotFound):
+    msg_fmt = _("Quota class %(class_name)s could not be found.")
+
+
+class QuotaClassExists(NovaException):
+    msg_fmt = _("Quota class %(class_name)s exists for resource %(resource)s")
+
+
+class QuotaUsageNotFound(QuotaNotFound):
+    msg_fmt = _("Quota usage for project %(project_id)s could not be found.")
+
+
+class QuotaUsageRefreshNotAllowed(Invalid):
+    msg_fmt = _("Quota usage refresh of resource %(resource)s for project "
+                "%(project_id)s, user %(user_id)s, is not allowed. "
+                "The allowed resources are %(syncable)s.")
+
+
+class ReservationNotFound(QuotaNotFound):
+    msg_fmt = _("Quota reservation %(uuid)s could not be found.")
+
+
+class OverQuota(NovaException):
+    msg_fmt = _("Quota exceeded for resources: %(overs)s")
+
+
+class SecurityGroupNotFound(NotFound):
+    msg_fmt = _("Security group %(security_group_id)s not found.")
+
+
+class SecurityGroupNotFoundForProject(SecurityGroupNotFound):
+    msg_fmt = _("Security group %(security_group_id)s not found "
+                "for project %(project_id)s.")
+
+
+class SecurityGroupNotFoundForRule(SecurityGroupNotFound):
+    msg_fmt = _("Security group with rule %(rule_id)s not found.")
+
+
+class SecurityGroupExists(Invalid):
+    msg_fmt = _("Security group %(security_group_name)s already exists "
+                "for project %(project_id)s.")
+
+
+class SecurityGroupExistsForInstance(Invalid):
+    msg_fmt = _("Security group %(security_group_id)s is already associated"
+                " with the instance %(instance_id)s")
+
+
+class SecurityGroupNotExistsForInstance(Invalid):
+    msg_fmt = _("Security group %(security_group_id)s is not associated with"
+                " the instance %(instance_id)s")
+
+
+class SecurityGroupDefaultRuleNotFound(Invalid):
+    msg_fmt = _("Security group default rule (%rule_id)s not found.")
+
+
+class SecurityGroupCannotBeApplied(Invalid):
+    msg_fmt = _("Network requires port_security_enabled and subnet associated"
+                " in order to apply security groups.")
+
+
+class NoUniqueMatch(NovaException):
+    msg_fmt = _("No Unique Match Found.")
+    code = 409
+
+
+class NoActiveMigrationForInstance(NotFound):
+    msg_fmt = _("Active live migration for instance %(instance_id)s not found")
+
+
+class MigrationNotFound(NotFound):
+    msg_fmt = _("Migration %(migration_id)s could not be found.")
+
+
+class MigrationNotFoundByStatus(MigrationNotFound):
+    msg_fmt = _("Migration not found for instance %(instance_id)s "
+                "with status %(status)s.")
+
+
+class MigrationNotFoundForInstance(MigrationNotFound):
+    msg_fmt = _("Migration %(migration_id)s not found for instance "
+                "%(instance_id)s")
+
+
+class InvalidMigrationState(Invalid):
+    msg_fmt = _("Migration %(migration_id)s state of instance "
+                "%(instance_uuid)s is %(state)s. Cannot %(method)s while the "
+                "migration is in this state.")
+
+
+class AbortQueuedLiveMigrationNotYetSupported(NovaException):
+    msg_fmt = _("Aborting live migration %(migration_id)s with status "
+                "%(status)s is not yet supported for this instance.")
+    code = 409
+
+
+class ConsoleLogOutputException(NovaException):
+    msg_fmt = _("Console log output could not be retrieved for instance "
+                "%(instance_id)s. Reason: %(reason)s")
+
+
+class ConsolePoolExists(NovaException):
+    msg_fmt = _("Console pool with host %(host)s, console_type "
+                "%(console_type)s and compute_host %(compute_host)s "
+                "already exists.")
+
+
+class ConsolePoolNotFoundForHostType(NotFound):
+    msg_fmt = _("Console pool of type %(console_type)s "
+                "for compute host %(compute_host)s "
+                "on proxy host %(host)s not found.")
+
+
+class ConsoleNotFound(NotFound):
+    msg_fmt = _("Console %(console_id)s could not be found.")
+
+
+class ConsoleNotFoundForInstance(ConsoleNotFound):
+    msg_fmt = _("Console for instance %(instance_uuid)s could not be found.")
+
+
+class ConsoleNotAvailable(NotFound):
+    msg_fmt = _("Guest does not have a console available.")
+
+
+class ConsoleNotFoundInPoolForInstance(ConsoleNotFound):
+    msg_fmt = _("Console for instance %(instance_uuid)s "
+                "in pool %(pool_id)s could not be found.")
+
+
+class ConsoleTypeInvalid(Invalid):
+    msg_fmt = _("Invalid console type %(console_type)s")
+
+
+class ConsoleTypeUnavailable(Invalid):
+    msg_fmt = _("Unavailable console type %(console_type)s.")
+
+
+class ConsolePortRangeExhausted(NovaException):
+    msg_fmt = _("The console port range %(min_port)d-%(max_port)d is "
+                "exhausted.")
+
+
+class FlavorNotFound(NotFound):
+    msg_fmt = _("Flavor %(flavor_id)s could not be found.")
+
+
+class FlavorNotFoundByName(FlavorNotFound):
+    msg_fmt = _("Flavor with name %(flavor_name)s could not be found.")
+
+
+class FlavorAccessNotFound(NotFound):
+    msg_fmt = _("Flavor access not found for %(flavor_id)s / "
+                "%(project_id)s combination.")
+
+
+class FlavorExtraSpecUpdateCreateFailed(NovaException):
+    msg_fmt = _("Flavor %(id)s extra spec cannot be updated or created "
+                "after %(retries)d retries.")
+
+
+class CellNotFound(NotFound):
+    msg_fmt = _("Cell %(cell_name)s doesn't exist.")
+
+
+class CellExists(NovaException):
+    msg_fmt = _("Cell with name %(name)s already exists.")
+
+
+class CellRoutingInconsistency(NovaException):
+    msg_fmt = _("Inconsistency in cell routing: %(reason)s")
+
+
+class CellServiceAPIMethodNotFound(NotFound):
+    msg_fmt = _("Service API method not found: %(detail)s")
+
+
+class CellTimeout(NotFound):
+    msg_fmt = _("Timeout waiting for response from cell")
+
+
+class CellMaxHopCountReached(NovaException):
+    msg_fmt = _("Cell message has reached maximum hop count: %(hop_count)s")
+
+
+class NoCellsAvailable(NovaException):
+    msg_fmt = _("No cells available matching scheduling criteria.")
+
+
+class CellsUpdateUnsupported(NovaException):
+    msg_fmt = _("Cannot update cells configuration file.")
+
+
+class InstanceUnknownCell(NotFound):
+    msg_fmt = _("Cell is not known for instance %(instance_uuid)s")
+
+
+class SchedulerHostFilterNotFound(NotFound):
+    msg_fmt = _("Scheduler Host Filter %(filter_name)s could not be found.")
+
+
+class FlavorExtraSpecsNotFound(NotFound):
+    msg_fmt = _("Flavor %(flavor_id)s has no extra specs with "
+                "key %(extra_specs_key)s.")
+
+
+class ComputeHostMetricNotFound(NotFound):
+    msg_fmt = _("Metric %(name)s could not be found on the compute "
+                "host node %(host)s.%(node)s.")
+
+
+class FileNotFound(NotFound):
+    msg_fmt = _("File %(file_path)s could not be found.")
+
+
+class SwitchNotFoundForNetworkAdapter(NotFound):
+    msg_fmt = _("Virtual switch associated with the "
+                "network adapter %(adapter)s not found.")
+
+
+class NetworkAdapterNotFound(NotFound):
+    msg_fmt = _("Network adapter %(adapter)s could not be found.")
+
+
+class ClassNotFound(NotFound):
+    msg_fmt = _("Class %(class_name)s could not be found: %(exception)s")
+
+
+class InstanceTagNotFound(NotFound):
+    msg_fmt = _("Instance %(instance_id)s has no tag '%(tag)s'")
+
+
+class KeyPairExists(NovaException):
+    msg_fmt = _("Key pair '%(key_name)s' already exists.")
+
+
+class InstanceExists(NovaException):
+    msg_fmt = _("Instance %(name)s already exists.")
+
+
+class FlavorExists(NovaException):
+    msg_fmt = _("Flavor with name %(name)s already exists.")
+
+
+class FlavorIdExists(NovaException):
+    msg_fmt = _("Flavor with ID %(flavor_id)s already exists.")
+
+
+class FlavorAccessExists(NovaException):
+    msg_fmt = _("Flavor access already exists for flavor %(flavor_id)s "
+                "and project %(project_id)s combination.")
+
+
+class InvalidSharedStorage(NovaException):
+    msg_fmt = _("%(path)s is not on shared storage: %(reason)s")
+
+
+class InvalidLocalStorage(NovaException):
+    msg_fmt = _("%(path)s is not on local storage: %(reason)s")
+
+
+class StorageError(NovaException):
+    msg_fmt = _("Storage error: %(reason)s")
+
+
+class MigrationError(NovaException):
+    msg_fmt = _("Migration error: %(reason)s")
+
+
+class MigrationPreCheckError(MigrationError):
+    msg_fmt = _("Migration pre-check error: %(reason)s")
+
+
+class MigrationSchedulerRPCError(MigrationError):
+    msg_fmt = _("Migration select destinations error: %(reason)s")
+
+
+class RPCPinnedToOldVersion(NovaException):
+    msg_fmt = _("RPC is pinned to old version")
+
+
+class MalformedRequestBody(NovaException):
+    msg_fmt = _("Malformed message body: %(reason)s")
+
+
+# NOTE(johannes): NotFound should only be used when a 404 error is
+# appropriate to be returned
+class ConfigNotFound(NovaException):
+    msg_fmt = _("Could not find config at %(path)s")
+
+
+class PasteAppNotFound(NovaException):
+    msg_fmt = _("Could not load paste app '%(name)s' from %(path)s")
+
+
+class CannotResizeToSameFlavor(NovaException):
+    msg_fmt = _("When resizing, instances must change flavor!")
+
+
+class ResizeError(NovaException):
+    msg_fmt = _("Resize error: %(reason)s")
+
+
+class CannotResizeDisk(NovaException):
+    msg_fmt = _("Server disk was unable to be resized because: %(reason)s")
+
+
+class FlavorMemoryTooSmall(NovaException):
+    msg_fmt = _("Flavor's memory is too small for requested image.")
+
+
+class FlavorDiskTooSmall(NovaException):
+    msg_fmt = _("The created instance's disk would be too small.")
+
+
+class FlavorDiskSmallerThanImage(FlavorDiskTooSmall):
+    msg_fmt = _("Flavor's disk is too small for requested image. Flavor disk "
+                "is %(flavor_size)i bytes, image is %(image_size)i bytes.")
+
+
+class FlavorDiskSmallerThanMinDisk(FlavorDiskTooSmall):
+    msg_fmt = _("Flavor's disk is smaller than the minimum size specified in "
+                "image metadata. Flavor disk is %(flavor_size)i bytes, "
+                "minimum size is %(image_min_disk)i bytes.")
+
+
+class VolumeSmallerThanMinDisk(FlavorDiskTooSmall):
+    msg_fmt = _("Volume is smaller than the minimum size specified in image "
+                "metadata. Volume size is %(volume_size)i bytes, minimum "
+                "size is %(image_min_disk)i bytes.")
+
+
+class BootFromVolumeRequiredForZeroDiskFlavor(Forbidden):
+    msg_fmt = _("Only volume-backed servers are allowed for flavors with "
+                "zero disk.")
+
+
+class InsufficientFreeMemory(NovaException):
+    msg_fmt = _("Insufficient free memory on compute node to start %(uuid)s.")
+
+
+class NoValidHost(NovaException):
+    msg_fmt = _("No valid host was found. %(reason)s")
+
+
+class RequestFilterFailed(NovaException):
+    msg_fmt = _("Scheduling failed: %(reason)s")
+
+
+class MaxRetriesExceeded(NoValidHost):
+    msg_fmt = _("Exceeded maximum number of retries. %(reason)s")
+
+
+class QuotaError(NovaException):
+    msg_fmt = _("Quota exceeded: code=%(code)s")
+    # NOTE(cyeoh): 413 should only be used for the ec2 API
+    # The error status code for out of quota for the nova api should be
+    # 403 Forbidden.
+    code = 413
+    safe = True
+
+
+class TooManyInstances(QuotaError):
+    msg_fmt = _("Quota exceeded for %(overs)s: Requested %(req)s,"
+                " but already used %(used)s of %(allowed)s %(overs)s")
+
+
+class FloatingIpLimitExceeded(QuotaError):
+    msg_fmt = _("Maximum number of floating IPs exceeded")
+
+
+class FixedIpLimitExceeded(QuotaError):
+    msg_fmt = _("Maximum number of fixed IPs exceeded")
+
+
+class MetadataLimitExceeded(QuotaError):
+    msg_fmt = _("Maximum number of metadata items exceeds %(allowed)d")
+
+
+class OnsetFileLimitExceeded(QuotaError):
+    msg_fmt = _("Personality file limit exceeded")
+
+
+class OnsetFilePathLimitExceeded(OnsetFileLimitExceeded):
+    msg_fmt = _("Personality file path exceeds maximum %(allowed)s")
+
+
+class OnsetFileContentLimitExceeded(OnsetFileLimitExceeded):
+    msg_fmt = _("Personality file content exceeds maximum %(allowed)s")
+
+
+class KeypairLimitExceeded(QuotaError):
+    msg_fmt = _("Maximum number of key pairs exceeded")
+
+
+class SecurityGroupLimitExceeded(QuotaError):
+    msg_fmt = _("Maximum number of security groups or rules exceeded")
+
+
+class PortLimitExceeded(QuotaError):
+    msg_fmt = _("Maximum number of ports exceeded")
+
+
+class AggregateError(NovaException):
+    msg_fmt = _("Aggregate %(aggregate_id)s: action '%(action)s' "
+                "caused an error: %(reason)s.")
+
+
+class AggregateNotFound(NotFound):
+    msg_fmt = _("Aggregate %(aggregate_id)s could not be found.")
+
+
+class AggregateNameExists(NovaException):
+    msg_fmt = _("Aggregate %(aggregate_name)s already exists.")
+
+
+class AggregateHostNotFound(NotFound):
+    msg_fmt = _("Aggregate %(aggregate_id)s has no host %(host)s.")
+
+
+class AggregateMetadataNotFound(NotFound):
+    msg_fmt = _("Aggregate %(aggregate_id)s has no metadata with "
+                "key %(metadata_key)s.")
+
+
+class AggregateHostExists(NovaException):
+    msg_fmt = _("Aggregate %(aggregate_id)s already has host %(host)s.")
+
+
+class InstancePasswordSetFailed(NovaException):
+    msg_fmt = _("Failed to set admin password on %(instance)s "
+                "because %(reason)s")
+    safe = True
+
+
+class InstanceNotFound(NotFound):
+    msg_fmt = _("Instance %(instance_id)s could not be found.")
+
+
+class InstanceInfoCacheNotFound(NotFound):
+    msg_fmt = _("Info cache for instance %(instance_uuid)s could not be "
+                "found.")
+
+
+class MarkerNotFound(NotFound):
+    msg_fmt = _("Marker %(marker)s could not be found.")
+
+
+class CouldNotFetchImage(NovaException):
+    msg_fmt = _("Could not fetch image %(image_id)s")
+
+
+class CouldNotUploadImage(NovaException):
+    msg_fmt = _("Could not upload image %(image_id)s")
+
+
+class TaskAlreadyRunning(NovaException):
+    msg_fmt = _("Task %(task_name)s is already running on host %(host)s")
+
+
+class TaskNotRunning(NovaException):
+    msg_fmt = _("Task %(task_name)s is not running on host %(host)s")
+
+
+class InstanceIsLocked(InstanceInvalidState):
+    msg_fmt = _("Instance %(instance_uuid)s is locked")
+
+
+class ConfigDriveInvalidValue(Invalid):
+    msg_fmt = _("Invalid value for Config Drive option: %(option)s")
+
+
+class ConfigDriveUnsupportedFormat(Invalid):
+    msg_fmt = _("Config drive format '%(format)s' is not supported.")
+
+
+class ConfigDriveMountFailed(NovaException):
+    msg_fmt = _("Could not mount vfat config drive. %(operation)s failed. "
+                "Error: %(error)s")
+
+
+class ConfigDriveUnknownFormat(NovaException):
+    msg_fmt = _("Unknown config drive format %(format)s. Select one of "
+                "iso9660 or vfat.")
+
+
+class ConfigDriveNotFound(NotFound):
+    msg_fmt = _("Instance %(instance_uuid)s requires config drive, but it "
+                "does not exist.")
+
+
+class InterfaceAttachFailed(Invalid):
+    msg_fmt = _("Failed to attach network adapter device to "
+                "%(instance_uuid)s")
+
+
+class InterfaceAttachFailedNoNetwork(InterfaceAttachFailed):
+    msg_fmt = _("No specific network was requested and none are available "
+                "for project '%(project_id)s'.")
+
+
+class InterfaceDetachFailed(Invalid):
+    msg_fmt = _("Failed to detach network adapter device from "
+                "%(instance_uuid)s")
+
+
+class InstanceUserDataMalformed(NovaException):
+    msg_fmt = _("User data needs to be valid base 64.")
+
+
+class InstanceUpdateConflict(NovaException):
+    msg_fmt = _("Conflict updating instance %(instance_uuid)s. "
+                "Expected: %(expected)s. Actual: %(actual)s")
+
+
+class UnknownInstanceUpdateConflict(InstanceUpdateConflict):
+    msg_fmt = _("Conflict updating instance %(instance_uuid)s, but we were "
+                "unable to determine the cause")
+
+
+class UnexpectedTaskStateError(InstanceUpdateConflict):
+    pass
+
+
+class UnexpectedDeletingTaskStateError(UnexpectedTaskStateError):
+    pass
+
+
+class InstanceActionNotFound(NovaException):
+    msg_fmt = _("Action for request_id %(request_id)s on instance"
+                " %(instance_uuid)s not found")
+
+
+class InstanceActionEventNotFound(NovaException):
+    msg_fmt = _("Event %(event)s not found for action id %(action_id)s")
+
+
+class CryptoCAFileNotFound(FileNotFound):
+    msg_fmt = _("The CA file for %(project)s could not be found")
+
+
+class CryptoCRLFileNotFound(FileNotFound):
+    msg_fmt = _("The CRL file for %(project)s could not be found")
+
+
+class InstanceEvacuateNotSupported(Invalid):
+    msg_fmt = _('Instance evacuate is not supported.')
+
+
+class DBNotAllowed(NovaException):
+    msg_fmt = _('%(binary)s attempted direct database access which is '
+                'not allowed by policy')
+
+
+class UnsupportedVirtType(Invalid):
+    msg_fmt = _("Virtualization type '%(virt)s' is not supported by "
+                "this compute driver")
+
+
+class UnsupportedHardware(Invalid):
+    msg_fmt = _("Requested hardware '%(model)s' is not supported by "
+                "the '%(virt)s' virt driver")
+
+
+class Base64Exception(NovaException):
+    msg_fmt = _("Invalid Base 64 data for file %(path)s")
+
+
+class BuildAbortException(NovaException):
+    msg_fmt = _("Build of instance %(instance_uuid)s aborted: %(reason)s")
+
+
+class RescheduledException(NovaException):
+    msg_fmt = _("Build of instance %(instance_uuid)s was re-scheduled: "
+                "%(reason)s")
+
+
+class ShadowTableExists(NovaException):
+    msg_fmt = _("Shadow table with name %(name)s already exists.")
+
+
+class InstanceFaultRollback(NovaException):
+    def __init__(self, inner_exception=None):
+        message = _("Instance rollback performed due to: %s")
+        self.inner_exception = inner_exception
+        super(InstanceFaultRollback, self).__init__(message % inner_exception)
+
+
+class OrphanedObjectError(NovaException):
+    msg_fmt = _('Cannot call %(method)s on orphaned %(objtype)s object')
+
+
+class ObjectActionError(NovaException):
+    msg_fmt = _('Object action %(action)s failed because: %(reason)s')
+
+
+class AgentError(NovaException):
+    msg_fmt = _('Error during following call to agent: %(method)s')
+
+
+class AgentTimeout(AgentError):
+    msg_fmt = _('Unable to contact guest agent. '
+                'The following call timed out: %(method)s')
+
+
+class AgentNotImplemented(AgentError):
+    msg_fmt = _('Agent does not support the call: %(method)s')
+
+
+class InstanceGroupNotFound(NotFound):
+    msg_fmt = _("Instance group %(group_uuid)s could not be found.")
+
+
+class InstanceGroupIdExists(NovaException):
+    msg_fmt = _("Instance group %(group_uuid)s already exists.")
+
+
+class InstanceGroupMemberNotFound(NotFound):
+    msg_fmt = _("Instance group %(group_uuid)s has no member with "
+                "id %(instance_id)s.")
+
+
+class InstanceGroupSaveException(NovaException):
+    msg_fmt = _("%(field)s should not be part of the updates.")
+
+
+class ResourceMonitorError(NovaException):
+    msg_fmt = _("Error when creating resource monitor: %(monitor)s")
+
+
+class PciDeviceWrongAddressFormat(NovaException):
+    msg_fmt = _("The PCI address %(address)s has an incorrect format.")
+
+
+class PciDeviceInvalidDeviceName(NovaException):
+    msg_fmt = _("Invalid PCI Whitelist: "
+                "The PCI whitelist can specify devname or address,"
+                " but not both")
+
+
+class PciDeviceNotFoundById(NotFound):
+    msg_fmt = _("PCI device %(id)s not found")
+
+
+class PciDeviceNotFound(NotFound):
+    msg_fmt = _("PCI Device %(node_id)s:%(address)s not found.")
+
+
+class PciDeviceInvalidStatus(Invalid):
+    msg_fmt = _(
+        "PCI device %(compute_node_id)s:%(address)s is %(status)s "
+        "instead of %(hopestatus)s")
+
+
+class PciDeviceVFInvalidStatus(Invalid):
+    msg_fmt = _(
+        "Not all Virtual Functions of PF %(compute_node_id)s:%(address)s "
+        "are free.")
+
+
+class PciDevicePFInvalidStatus(Invalid):
+    msg_fmt = _(
+        "Physical Function %(compute_node_id)s:%(address)s, related to VF"
+        " %(compute_node_id)s:%(vf_address)s is %(status)s "
+        "instead of %(hopestatus)s")
+
+
+class PciDeviceInvalidOwner(Invalid):
+    msg_fmt = _(
+        "PCI device %(compute_node_id)s:%(address)s is owned by %(owner)s "
+        "instead of %(hopeowner)s")
+
+
+class PciDeviceRequestFailed(NovaException):
+    msg_fmt = _(
+        "PCI device request %(requests)s failed")
+
+
+class PciDevicePoolEmpty(NovaException):
+    msg_fmt = _(
+        "Attempt to consume PCI device %(compute_node_id)s:%(address)s "
+        "from empty pool")
+
+
+class PciInvalidAlias(Invalid):
+    msg_fmt = _("Invalid PCI alias definition: %(reason)s")
+
+
+class PciRequestAliasNotDefined(NovaException):
+    msg_fmt = _("PCI alias %(alias)s is not defined")
+
+
+class PciConfigInvalidWhitelist(Invalid):
+    msg_fmt = _("Invalid PCI devices Whitelist config: %(reason)s")
+
+
+# Cannot be templated, msg needs to be constructed when raised.
+class InternalError(NovaException):
+    """Generic hypervisor errors.
+
+    Consider subclassing this to provide more specific exceptions.
+    """
+    msg_fmt = "%(err)s"
+
+
+class PciDevicePrepareFailed(NovaException):
+    msg_fmt = _("Failed to prepare PCI device %(id)s for instance "
+                "%(instance_uuid)s: %(reason)s")
+
+
+class PciDeviceDetachFailed(NovaException):
+    msg_fmt = _("Failed to detach PCI device %(dev)s: %(reason)s")
+
+
+class PciDeviceUnsupportedHypervisor(NovaException):
+    msg_fmt = _("%(type)s hypervisor does not support PCI devices")
+
+
+class KeyManagerError(NovaException):
+    msg_fmt = _("Key manager error: %(reason)s")
+
+
+class VolumesNotRemoved(Invalid):
+    msg_fmt = _("Failed to remove volume(s): (%(reason)s)")
+
+
+class VolumeRebaseFailed(NovaException):
+    msg_fmt = _("Volume rebase failed: %(reason)s")
+
+
+class InvalidVideoMode(Invalid):
+    msg_fmt = _("Provided video model (%(model)s) is not supported.")
+
+
+class RngDeviceNotExist(Invalid):
+    msg_fmt = _("The provided RNG device path: (%(path)s) is not "
+                "present on the host.")
+
+
+class RequestedVRamTooHigh(NovaException):
+    msg_fmt = _("The requested amount of video memory %(req_vram)d is higher "
+                "than the maximum allowed by flavor %(max_vram)d.")
+
+
+class SecurityProxyNegotiationFailed(NovaException):
+    msg_fmt = _("Failed to negotiate security type with server: %(reason)s")
+
+
+class RFBAuthHandshakeFailed(NovaException):
+    msg_fmt = _("Failed to complete auth handshake: %(reason)s")
+
+
+class RFBAuthNoAvailableScheme(NovaException):
+    msg_fmt = _("No matching auth scheme: allowed types: '%(allowed_types)s', "
+                "desired types: '%(desired_types)s'")
+
+
+class InvalidWatchdogAction(Invalid):
+    msg_fmt = _("Provided watchdog action (%(action)s) is not supported.")
+
+
+class LiveMigrationNotSubmitted(NovaException):
+    msg_fmt = _("Failed to submit live migration %(migration_uuid)s for "
+                "instance %(instance_uuid)s for processing.")
+
+
+class SelectionObjectsWithOldRPCVersionNotSupported(NovaException):
+    msg_fmt = _("Requests for Selection objects with alternates are not "
+                "supported in select_destinations() before RPC version 4.5; "
+                "version %(version)s requested.")
+
+
+class LiveMigrationURINotAvailable(NovaException):
+    msg_fmt = _('No live migration URI configured and no default available '
+                'for "%(virt_type)s" hypervisor virtualization type.')
+
+
+class UnshelveException(NovaException):
+    msg_fmt = _("Error during unshelve instance %(instance_id)s: %(reason)s")
+
+
+class ImageVCPULimitsRangeExceeded(Invalid):
+    msg_fmt = _('Image vCPU topology limits (sockets=%(image_sockets)d, '
+                'cores=%(image_cores)d, threads=%(image_threads)d) exceeds '
+                'the limits of the flavor (sockets=%(flavor_sockets)d, '
+                'cores=%(flavor_cores)d, threads=%(flavor_threads)d)')
+
+
+class ImageVCPUTopologyRangeExceeded(Invalid):
+    msg_fmt = _('Image vCPU topology (sockets=%(image_sockets)d, '
+                'cores=%(image_cores)d, threads=%(image_threads)d) exceeds '
+                'the limits of the flavor or image (sockets=%(max_sockets)d, '
+                'cores=%(max_cores)d, threads=%(max_threads)d)')
+
+
+class ImageVCPULimitsRangeImpossible(Invalid):
+    msg_fmt = _("Requested vCPU limits %(sockets)d:%(cores)d:%(threads)d "
+                "are impossible to satisfy for vcpus count %(vcpus)d")
+
+
+class InvalidArchitectureName(Invalid):
+    msg_fmt = _("Architecture name '%(arch)s' is not recognised")
+
+
+class ImageNUMATopologyIncomplete(Invalid):
+    msg_fmt = _("CPU and memory allocation must be provided for all "
+                "NUMA nodes")
+
+
+class ImageNUMATopologyForbidden(Forbidden):
+    msg_fmt = _("Image property '%(name)s' is not permitted to override "
+                "NUMA configuration set against the flavor")
+
+
+class ImageNUMATopologyAsymmetric(Invalid):
+    msg_fmt = _("Instance CPUs and/or memory cannot be evenly distributed "
+                "across instance NUMA nodes. Explicit assignment of CPUs "
+                "and memory to nodes is required")
+
+
+class ImageNUMATopologyCPUOutOfRange(Invalid):
+    msg_fmt = _("CPU number %(cpunum)d is larger than max %(cpumax)d")
+
+
+class ImageNUMATopologyCPUDuplicates(Invalid):
+    msg_fmt = _("CPU number %(cpunum)d is assigned to two nodes")
+
+
+class ImageNUMATopologyCPUsUnassigned(Invalid):
+    msg_fmt = _("CPU number %(cpuset)s is not assigned to any node")
+
+
+class ImageNUMATopologyMemoryOutOfRange(Invalid):
+    msg_fmt = _("%(memsize)d MB of memory assigned, but expected "
+                "%(memtotal)d MB")
+
+
+class InvalidHostname(Invalid):
+    msg_fmt = _("Invalid characters in hostname '%(hostname)s'")
+
+
+class NumaTopologyNotFound(NotFound):
+    msg_fmt = _("Instance %(instance_uuid)s does not specify a NUMA topology")
+
+
+class MigrationContextNotFound(NotFound):
+    msg_fmt = _("Instance %(instance_uuid)s does not specify a migration "
+                "context.")
+
+
+class SocketPortRangeExhaustedException(NovaException):
+    msg_fmt = _("Not able to acquire a free port for %(host)s")
+
+
+class SocketPortInUseException(NovaException):
+    msg_fmt = _("Not able to bind %(host)s:%(port)d, %(error)s")
+
+
+class ImageSerialPortNumberInvalid(Invalid):
+    msg_fmt = _("Number of serial ports specified in flavor is invalid: "
+                "expected an integer, got '%(num_ports)s'")
+
+
+class ImageSerialPortNumberExceedFlavorValue(Invalid):
+    msg_fmt = _("Forbidden to exceed flavor value of number of serial "
+                "ports passed in image meta.")
+
+
+class SerialPortNumberLimitExceeded(Invalid):
+    msg_fmt = _("Maximum number of serial port exceeds %(allowed)d "
+                "for %(virt_type)s")
+
+
+class InvalidImageConfigDrive(Invalid):
+    msg_fmt = _("Image's config drive option '%(config_drive)s' is invalid")
+
+
+class InvalidHypervisorVirtType(Invalid):
+    msg_fmt = _("Hypervisor virtualization type '%(hv_type)s' is not "
+                "recognised")
+
+
+class InvalidVirtualMachineMode(Invalid):
+    msg_fmt = _("Virtual machine mode '%(vmmode)s' is not recognised")
+
+
+class InvalidToken(Invalid):
+    msg_fmt = _("The token '%(token)s' is invalid or has expired")
+
+
+class TokenInUse(Invalid):
+    msg_fmt = _("The generated token is invalid")
+
+
+class InvalidConnectionInfo(Invalid):
+    msg_fmt = _("Invalid Connection Info")
+
+
+class InstanceQuiesceNotSupported(Invalid):
+    msg_fmt = _('Quiescing is not supported in instance %(instance_id)s')
+
+
+class InstanceAgentNotEnabled(Invalid):
+    msg_fmt = _('Guest agent is not enabled for the instance')
+    safe = True
+
+
+class QemuGuestAgentNotEnabled(InstanceAgentNotEnabled):
+    msg_fmt = _('QEMU guest agent is not enabled')
+
+
+class SetAdminPasswdNotSupported(Invalid):
+    msg_fmt = _('Set admin password is not supported')
+    safe = True
+
+
+class MemoryPageSizeInvalid(Invalid):
+    msg_fmt = _("Invalid memory page size '%(pagesize)s'")
+
+
+class MemoryPageSizeForbidden(Invalid):
+    msg_fmt = _("Page size %(pagesize)s forbidden against '%(against)s'")
+
+
+class MemoryPageSizeNotSupported(Invalid):
+    msg_fmt = _("Page size %(pagesize)s is not supported by the host.")
+
+
+class CPUPinningNotSupported(Invalid):
+    msg_fmt = _("CPU pinning is not supported by the host: "
+                "%(reason)s")
+
+
+class CPUPinningInvalid(Invalid):
+    msg_fmt = _("CPU set to pin %(requested)s must be a subset of "
+                "free CPU set %(free)s")
+
+
+class CPUUnpinningInvalid(Invalid):
+    msg_fmt = _("CPU set to unpin %(requested)s must be a subset of "
+                "pinned CPU set %(pinned)s")
+
+
+class CPUPinningUnknown(Invalid):
+    msg_fmt = _("CPU set to pin %(requested)s must be a subset of "
+                "known CPU set %(cpuset)s")
+
+
+class CPUUnpinningUnknown(Invalid):
+    msg_fmt = _("CPU set to unpin %(requested)s must be a subset of "
+                "known CPU set %(cpuset)s")
+
+
+class ImageCPUPinningForbidden(Forbidden):
+    msg_fmt = _("Image property 'hw_cpu_policy' is not permitted to override "
+                "CPU pinning policy set against the flavor")
+
+
+class ImageCPUThreadPolicyForbidden(Forbidden):
+    msg_fmt = _("Image property 'hw_cpu_thread_policy' is not permitted to "
+                "override CPU thread pinning policy set against the flavor")
+
+
+class UnsupportedPolicyException(Invalid):
+    msg_fmt = _("ServerGroup policy is not supported: %(reason)s")
+
+
+class CellMappingNotFound(NotFound):
+    msg_fmt = _("Cell %(uuid)s has no mapping.")
+
+
+class NUMATopologyUnsupported(Invalid):
+    msg_fmt = _("Host does not support guests with NUMA topology set")
+
+
+class MemoryPagesUnsupported(Invalid):
+    msg_fmt = _("Host does not support guests with custom memory page sizes")
+
+
+class InvalidImageFormat(Invalid):
+    msg_fmt = _("Invalid image format '%(format)s'")
+
+
+class UnsupportedImageModel(Invalid):
+    msg_fmt = _("Image model '%(image)s' is not supported")
+
+
+class HostMappingNotFound(Invalid):
+    msg_fmt = _("Host '%(name)s' is not mapped to any cell")
+
+
+class RealtimeConfigurationInvalid(Invalid):
+    msg_fmt = _("Cannot set realtime policy in a non dedicated "
+                "cpu pinning policy")
+
+
+class CPUThreadPolicyConfigurationInvalid(Invalid):
+    msg_fmt = _("Cannot set cpu thread pinning policy in a non dedicated "
+                "cpu pinning policy")
+
+
+class RequestSpecNotFound(NotFound):
+    msg_fmt = _("RequestSpec not found for instance %(instance_uuid)s")
+
+
+class UEFINotSupported(Invalid):
+    msg_fmt = _("UEFI is not supported")
+
+
+class TriggerCrashDumpNotSupported(Invalid):
+    msg_fmt = _("Triggering crash dump is not supported")
+
+
+class UnsupportedHostCPUControlPolicy(Invalid):
+    msg_fmt = _("Requested CPU control policy not supported by host")
+
+
+class LibguestfsCannotReadKernel(Invalid):
+    msg_fmt = _("Libguestfs does not have permission to read host kernel.")
+
+
+class RealtimeMaskNotFoundOrInvalid(Invalid):
+    msg_fmt = _("Realtime policy needs vCPU(s) mask configured with at least "
+                "1 RT vCPU and 1 ordinary vCPU. See hw:cpu_realtime_mask "
+                "or hw_cpu_realtime_mask")
+
+
+class OsInfoNotFound(NotFound):
+    msg_fmt = _("No configuration information found for operating system "
+                "%(os_name)s")
+
+
+class BuildRequestNotFound(NotFound):
+    msg_fmt = _("BuildRequest not found for instance %(uuid)s")
+
+
+class AttachInterfaceNotSupported(Invalid):
+    msg_fmt = _("Attaching interfaces is not supported for "
+                "instance %(instance_uuid)s.")
+
+
+class InvalidReservedMemoryPagesOption(Invalid):
+    msg_fmt = _("The format of the option 'reserved_huge_pages' is invalid. "
+                "(found '%(conf)s') Please refer to the nova "
+                "config-reference.")
+
+
+# An exception with this name is used on both sides of the placement/
+# nova interaction.
+class ResourceProviderInUse(NovaException):
+    msg_fmt = _("Resource provider has allocations.")
+
+
+class ResourceProviderRetrievalFailed(NovaException):
+    msg_fmt = _("Failed to get resource provider with UUID %(uuid)s")
+
+
+class ResourceProviderAggregateRetrievalFailed(NovaException):
+    msg_fmt = _("Failed to get aggregates for resource provider with UUID"
+                " %(uuid)s")
+
+
+class ResourceProviderTraitRetrievalFailed(NovaException):
+    msg_fmt = _("Failed to get traits for resource provider with UUID"
+                " %(uuid)s")
+
+
+class ResourceProviderCreationFailed(NovaException):
+    msg_fmt = _("Failed to create resource provider %(name)s")
+
+
+class ResourceProviderDeletionFailed(NovaException):
+    msg_fmt = _("Failed to delete resource provider %(uuid)s")
+
+
+class ResourceProviderUpdateFailed(NovaException):
+    msg_fmt = _("Failed to update resource provider via URL %(url)s: "
+                "%(error)s")
+
+
+class ResourceProviderNotFound(NotFound):
+    msg_fmt = _("No such resource provider %(name_or_uuid)s.")
+
+
+class ResourceProviderSyncFailed(NovaException):
+    msg_fmt = _("Failed to synchronize the placement service with resource "
+                "provider information supplied by the compute host.")
+
+
+class PlacementAPIConnectFailure(NovaException):
+    msg_fmt = _("Unable to communicate with the Placement API.")
+
+
+class PlacementAPIConflict(NovaException):
+    """Any 409 error from placement APIs should use (a subclass of) this
+    exception.
+    """
+    msg_fmt = _("A conflict was encountered attempting to invoke the "
+                "placement API at URL %(url)s: %(error)s")
+
+
+class ResourceProviderUpdateConflict(PlacementAPIConflict):
+    """A 409 caused by generation mismatch from attempting to update an
+    existing provider record or its associated data (aggregates, traits, etc.).
+    """
+    msg_fmt = _("A conflict was encountered attempting to update resource "
+                "provider %(uuid)s (generation %(generation)d): %(error)s")
+
+
+class InvalidResourceClass(Invalid):
+    msg_fmt = _("Resource class '%(resource_class)s' invalid.")
+
+
+class InvalidResourceAmount(Invalid):
+    msg_fmt = _("Resource amounts must be integers. Received '%(amount)s'.")
+
+
+class InvalidInventory(Invalid):
+    msg_fmt = _("Inventory for '%(resource_class)s' on "
+                "resource provider '%(resource_provider)s' invalid.")
+
+
+# An exception with this name is used on both sides of the placement/
+# nova interaction.
+class InventoryInUse(InvalidInventory):
+    # NOTE(mriedem): This message cannot change without impacting the
+    # nova.scheduler.client.report._RE_INV_IN_USE regex.
+    msg_fmt = _("Inventory for '%(resource_classes)s' on "
+                "resource provider '%(resource_provider)s' in use.")
+
+
+class UnsupportedPointerModelRequested(Invalid):
+    msg_fmt = _("Pointer model '%(model)s' requested is not supported by "
+                "host.")
+
+
+class NotSupportedWithOption(Invalid):
+    msg_fmt = _("%(operation)s is not supported in conjunction with the "
+                "current %(option)s setting.  Please refer to the nova "
+                "config-reference.")
+
+
+class Unauthorized(NovaException):
+    msg_fmt = _("Not authorized.")
+    code = 401
+
+
+class NeutronAdminCredentialConfigurationInvalid(Invalid):
+    msg_fmt = _("Networking client is experiencing an unauthorized exception.")
+
+
+class InvalidEmulatorThreadsPolicy(Invalid):
+    msg_fmt = _("CPU emulator threads option requested is invalid, "
+                "given: '%(requested)s', available: '%(available)s'.")
+
+
+class BadRequirementEmulatorThreadsPolicy(Invalid):
+    msg_fmt = _("An isolated CPU emulator threads option requires a dedicated "
+                "CPU policy option.")
+
+
+class InvalidNetworkNUMAAffinity(Invalid):
+    msg_fmt = _("Invalid NUMA network affinity configured: %(reason)s")
+
+
+class PowerVMAPIFailed(NovaException):
+    msg_fmt = _("PowerVM API failed to complete for instance=%(inst_name)s.  "
+                "%(reason)s")
+
+
+class TraitRetrievalFailed(NovaException):
+    msg_fmt = _("Failed to retrieve traits from the placement API: %(error)s")
+
+
+class TraitCreationFailed(NovaException):
+    msg_fmt = _("Failed to create trait %(name)s: %(error)s")
+
+
+class CannotMigrateWithTargetHost(NovaException):
+    msg_fmt = _("Cannot migrate with target host. Retry without a host "
+                "specified.")
+
+
+class CannotMigrateToSameHost(NovaException):
+    msg_fmt = _("Cannot migrate to the host where the server exists.")
+
+
+class VirtDriverNotReady(NovaException):
+    msg_fmt = _("Virt driver is not ready.")
+
+
+class InstanceDiskMappingFailed(NovaException):
+    msg_fmt = _("Failed to map boot disk of instance %(instance_name)s to "
+                "the management partition from any Virtual I/O Server.")
+
+
+class NewMgmtMappingNotFoundException(NovaException):
+    msg_fmt = _("Failed to find newly-created mapping of storage element "
+                "%(stg_name)s from Virtual I/O Server %(vios_name)s to the "
+                "management partition.")
+
+
+class NoDiskDiscoveryException(NovaException):
+    msg_fmt = _("Having scanned SCSI bus %(bus)x on the management partition, "
+                "disk with UDID %(udid)s failed to appear after %(polls)d "
+                "polls over %(timeout)d seconds.")
+
+
+class UniqueDiskDiscoveryException(NovaException):
+    msg_fmt = _("Expected to find exactly one disk on the management "
+                "partition at %(path_pattern)s; found %(count)d.")
+
+
+class DeviceDeletionException(NovaException):
+    msg_fmt = _("Device %(devpath)s is still present on the management "
+                "partition after attempting to delete it. Polled %(polls)d "
+                "times over %(timeout)d seconds.")
+
+
+class OptRequiredIfOtherOptValue(NovaException):
+    msg_fmt = _("The %(then_opt)s option is required if %(if_opt)s is "
+                "specified as '%(if_value)s'.")
+
+
+class AllocationCreateFailed(NovaException):
+    msg_fmt = _('Failed to create allocations for instance %(instance)s '
+                'against resource provider %(provider)s.')
+
+
+class AllocationUpdateFailed(NovaException):
+    msg_fmt = _('Failed to update allocations for consumer %(consumer_uuid)s. '
+                'Error: %(error)s')
+
+
+class AllocationMoveFailed(NovaException):
+    msg_fmt = _('Failed to move allocations from consumer %(source_consumer)s '
+                'to consumer %(target_consumer)s. '
+                'Error: %(error)s')
+
+
+class AllocationDeleteFailed(NovaException):
+    msg_fmt = _('Failed to delete allocations for consumer %(consumer_uuid)s. '
+                'Error: %(error)s')
+
+
+class TooManyComputesForHost(NovaException):
+    msg_fmt = _('Unexpected number of compute node records '
+                '(%(num_computes)d) found for host %(host)s. There should '
+                'only be a one-to-one mapping.')
+
+
+class CertificateValidationFailed(NovaException):
+    msg_fmt = _("Image signature certificate validation failed for "
+                "certificate: %(cert_uuid)s. %(reason)s")
+
+
+class CertificateValidationNotYetAvailable(NovaException):
+    msg_fmt = _("Image signature certificate validation support is "
+                "not yet available.")
+    code = 409
+
+
+class InstanceRescueFailure(NovaException):
+    msg_fmt = _("Failed to move instance to rescue mode: %(reason)s")
+
+
+class InstanceUnRescueFailure(NovaException):
+    msg_fmt = _("Failed to unrescue instance: %(reason)s")
+
+
+class IronicAPIVersionNotAvailable(NovaException):
+    msg_fmt = _('Ironic API version %(version)s is not available.')
+
+
+class ZVMDriverException(NovaException):
+    msg_fmt = _("ZVM Driver has error: %(error)s")
+
+
+class ZVMConnectorError(ZVMDriverException):
+    msg_fmt = _("zVM Cloud Connector request failed: %(results)s")
+
+    def __init__(self, message=None, **kwargs):
+        """Exception for zVM ConnectorClient calls.
+
+        :param results: The object returned from ZVMConnector.send_request.
+        """
+        super(ZVMConnectorError, self).__init__(message=message, **kwargs)
+
+        results = kwargs.get('results', {})
+        self.overallRC = results.get('overallRC')
+        self.rc = results.get('rc')
+        self.rs = results.get('rs')
+        self.errmsg = results.get('errmsg')
+
+
+class NoResourceClass(NovaException):
+    msg_fmt = _("Resource class not found for Ironic node %(node)s.")
+
+
+class ResourceProviderAllocationRetrievalFailed(NovaException):
+    msg_fmt = _("Failed to retrieve allocations for resource provider "
+                "%(rp_uuid)s: %(error)s")
+
+
+class ConsumerAllocationRetrievalFailed(NovaException):
+    msg_fmt = _("Failed to retrieve allocations for consumer "
+                "%(consumer_uuid)s: %(error)s")
+
+
+class ReshapeFailed(NovaException):
+    msg_fmt = _("Resource provider inventory and allocation data migration "
+                "failed: %(error)s")
+
+
+class ReshapeNeeded(NovaException):
+    msg_fmt = _("Virt driver indicates that provider inventories need to be "
+                "moved.")

diff --git a/gosbs/i18n.py b/gosbs/i18n.py
new file mode 100644
index 0000000..f0a769d
--- /dev/null
+++ b/gosbs/i18n.py
@@ -0,0 +1,48 @@
+# Copyright 2014 IBM Corp.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/i18n.py
+
+"""oslo.i18n integration module.
+
+See https://docs.openstack.org/oslo.i18n/latest/user/index.html .
+
+"""
+
+import oslo_i18n
+
+DOMAIN = 'gosbs'
+
+_translators = oslo_i18n.TranslatorFactory(domain=DOMAIN)
+
+# The primary translation function using the well-known name "_"
+_ = _translators.primary
+
+# Translators for log levels.
+#
+# The abbreviated names are meant to reflect the usual use of a short
+# name like '_'. The "L" is for "log" and the other letter comes from
+# the level.
+_LI = _translators.log_info
+_LW = _translators.log_warning
+_LE = _translators.log_error
+_LC = _translators.log_critical
+
+
+def translate(value, user_locale):
+    return oslo_i18n.translate(value, user_locale)
+
+
+def get_available_languages():
+    return oslo_i18n.get_available_languages(DOMAIN)

diff --git a/gosbs/manager.py b/gosbs/manager.py
new file mode 100644
index 0000000..b8e1dda
--- /dev/null
+++ b/gosbs/manager.py
@@ -0,0 +1,149 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/manager.py
+
+"""Base Manager class.
+
+Managers are responsible for a certain aspect of the system.  It is a logical
+grouping of code relating to a portion of the system.  In general other
+components should be using the manager to make changes to the components that
+it is responsible for.
+
+For example, other components that need to deal with volumes in some way,
+should do so by calling methods on the VolumeManager instead of directly
+changing fields in the database.  This allows us to keep all of the code
+relating to volumes in the same place.
+
+We have adopted a basic strategy of Smart managers and dumb data, which means
+rather than attaching methods to data objects, components should call manager
+methods that act on the data.
+
+Methods on managers that can be executed locally should be called directly. If
+a particular method must execute on a remote host, this should be done via rpc
+to the service that wraps the manager
+
+Managers should be responsible for most of the db access, and
+non-implementation specific data.  Anything implementation specific that can't
+be generalized should be done by the Driver.
+
+In general, we prefer to have one manager with multiple drivers for different
+implementations, but sometimes it makes sense to have multiple managers.  You
+can think of it this way: Abstract different overall strategies at the manager
+level(FlatNetwork vs VlanNetwork), and different implementations at the driver
+level(LinuxNetDriver vs CiscoNetDriver).
+
+Managers will often provide methods for initial setup of a host or periodic
+tasks to a wrapping service.
+
+This module provides Manager, a base class for managers.
+
+"""
+
+from oslo_service import periodic_task
+import six
+
+import gosbs.conf
+from gosbs.db import base
+#from gosbs import profiler
+from gosbs import rpc
+
+
+CONF = gosbs.conf.CONF
+
+
+class PeriodicTasks(periodic_task.PeriodicTasks):
+    def __init__(self):
+        super(PeriodicTasks, self).__init__(CONF)
+
+
+class ManagerMeta(profiler.get_traced_meta(), type(PeriodicTasks)):
+    """Metaclass to trace all children of a specific class.
+
+    This metaclass wraps every public method (not starting with _ or __)
+    of the class using it. All children classes of the class using ManagerMeta
+    will be profiled as well.
+
+    Adding this metaclass requires that the __trace_args__ attribute be added
+    to the class we want to modify. That attribute is a dictionary
+    with one mandatory key: "name". "name" defines the name
+    of the action to be traced (for example, wsgi, rpc, db).
+
+    The OSprofiler-based tracing, although, will only happen if profiler
+    instance was initiated somewhere before in the thread, that can only happen
+    if profiling is enabled in nova.conf and the API call to Nova API contained
+    specific headers.
+    """
+
+
+@six.add_metaclass(ManagerMeta)
+class Manager(base.Base, PeriodicTasks):
+    __trace_args__ = {"name": "rpc"}
+
+    def __init__(self, host=None, service_name='undefined'):
+        if not host:
+            host = CONF.host
+        self.host = host
+        self.backdoor_port = None
+        self.service_name = service_name
+        self.notifier = rpc.get_notifier(self.service_name, self.host)
+        self.additional_endpoints = []
+        super(Manager, self).__init__()
+
+    def periodic_tasks(self, context, raise_on_error=False):
+        """Tasks to be run at a periodic interval."""
+        return self.run_periodic_tasks(context, raise_on_error=raise_on_error)
+
+    def init_host(self):
+        """Hook to do additional manager initialization when one requests
+        the service be started.  This is called before any service record
+        is created.
+
+        Child classes should override this method.
+        """
+        pass
+
+    def cleanup_host(self):
+        """Hook to do cleanup work when the service shuts down.
+
+        Child classes should override this method.
+        """
+        pass
+
+    def pre_start_hook(self):
+        """Hook to provide the manager the ability to do additional
+        start-up work before any RPC queues/consumers are created. This is
+        called after other initialization has succeeded and a service
+        record is created.
+
+        Child classes should override this method.
+        """
+        pass
+
+    def post_start_hook(self):
+        """Hook to provide the manager the ability to do additional
+        start-up work immediately after a service creates RPC consumers
+        and starts 'running'.
+
+        Child classes should override this method.
+        """
+        pass
+
+    def reset(self):
+        """Hook called on SIGHUP to signal the manager to re-read any
+        dynamic configuration or do any reconfiguration tasks.
+        """
+        pass

diff --git a/gosbs/middleware.py b/gosbs/middleware.py
new file mode 100644
index 0000000..a796014
--- /dev/null
+++ b/gosbs/middleware.py
@@ -0,0 +1,39 @@
+# Copyright 2016 Hewlett Packard Enterprise Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License"); you may
+# not use this file except in compliance with the License. You may obtain
+# a copy of the License at
+#
+#      http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+# License for the specific language governing permissions and limitations
+# under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/middleware.py
+
+from oslo_middleware import cors
+
+
+def set_defaults():
+    """Update default configuration options for oslo.middleware."""
+    cors.set_defaults(
+        allow_headers=['X-Auth-Token',
+                       'X-Gosbs-Request-Id',
+                       'X-Identity-Status',
+                       'X-Roles',
+                       'X-Service-Catalog',
+                       'X-User-Id',
+                       'X-Tenant-Id'],
+        expose_headers=['X-Auth-Token',
+                        'X-Gosbs-Request-Id',
+                        'X-Subject-Token',
+                        'X-Service-Token'],
+        allow_methods=['GET',
+                       'PUT',
+                       'POST',
+                       'DELETE',
+                       'PATCH']
+    )

diff --git a/gosbs/objects/__init__.py b/gosbs/objects/__init__.py
new file mode 100644
index 0000000..e67fba4
--- /dev/null
+++ b/gosbs/objects/__init__.py
@@ -0,0 +1,52 @@
+#    Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/__init__.py
+
+# NOTE(comstud): You may scratch your head as you see code that imports
+# this module and then accesses attributes for objects such as Instance,
+# etc, yet you do not see these attributes in here. Never fear, there is
+# a little bit of magic. When objects are registered, an attribute is set
+# on this module automatically, pointing to the newest/latest version of
+# the object.
+
+
+def register_all():
+    # NOTE(danms): You must make sure your object gets imported in this
+    # function in order for it to be registered by services that may
+    # need to receive it via RPC.
+    __import__('gosbs.objects.build_iuse')
+    __import__('gosbs.objects.category')
+    __import__('gosbs.objects.category_metadata')
+    __import__('gosbs.objects.ebuild')
+    __import__('gosbs.objects.ebuild_metadata')
+    __import__('gosbs.objects.ebuild_iuse')
+    __import__('gosbs.objects.ebuild_keyword')
+    __import__('gosbs.objects.ebuild_restriction')
+    __import__('gosbs.objects.email')
+    __import__('gosbs.objects.keyword')
+    __import__('gosbs.objects.package')
+    __import__('gosbs.objects.package_metadata')
+    __import__('gosbs.objects.package_email')
+    __import__('gosbs.objects.project')
+    __import__('gosbs.objects.project_metadata')
+    __import__('gosbs.objects.project_build')
+    __import__('gosbs.objects.project_repo')
+    __import__('gosbs.objects.repo')
+    __import__('gosbs.objects.restriction')
+    __import__('gosbs.objects.task')
+    __import__('gosbs.objects.service')
+    __import__('gosbs.objects.service_repo')
+    __import__('gosbs.objects.use')
+    __import__('gosbs.objects.user')

diff --git a/gosbs/objects/base.py b/gosbs/objects/base.py
new file mode 100644
index 0000000..363269c
--- /dev/null
+++ b/gosbs/objects/base.py
@@ -0,0 +1,361 @@
+#    Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/base.py
+
+"""Nova common internal object model"""
+
+import contextlib
+import datetime
+import functools
+import traceback
+
+import netaddr
+import oslo_messaging as messaging
+from oslo_utils import versionutils
+from oslo_versionedobjects import base as ovoo_base
+from oslo_versionedobjects import exception as ovoo_exc
+import six
+
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import fields as obj_fields
+from gosbs import utils
+
+
+def get_attrname(name):
+    """Return the mangled name of the attribute's underlying storage."""
+    # FIXME(danms): This is just until we use o.vo's class properties
+    # and object base.
+    return '_obj_' + name
+
+
+class NovaObjectRegistry(ovoo_base.VersionedObjectRegistry):
+    notification_classes = []
+
+    def registration_hook(self, cls, index):
+        # NOTE(danms): This is called when an object is registered,
+        # and is responsible for maintaining nova.objects.$OBJECT
+        # as the highest-versioned implementation of a given object.
+        version = versionutils.convert_version_to_tuple(cls.VERSION)
+        if not hasattr(objects, cls.obj_name()):
+            setattr(objects, cls.obj_name(), cls)
+        else:
+            cur_version = versionutils.convert_version_to_tuple(
+                getattr(objects, cls.obj_name()).VERSION)
+            if version >= cur_version:
+                setattr(objects, cls.obj_name(), cls)
+
+    @classmethod
+    def register_notification(cls, notification_cls):
+        """Register a class as notification.
+        Use only to register concrete notification or payload classes,
+        do not register base classes intended for inheritance only.
+        """
+        cls.register_if(False)(notification_cls)
+        cls.notification_classes.append(notification_cls)
+        return notification_cls
+
+    @classmethod
+    def register_notification_objects(cls):
+        """Register previously decorated notification as normal ovos.
+        This is not intended for production use but only for testing and
+        document generation purposes.
+        """
+        for notification_cls in cls.notification_classes:
+            cls.register(notification_cls)
+
+
+remotable_classmethod = ovoo_base.remotable_classmethod
+remotable = ovoo_base.remotable
+obj_make_list = ovoo_base.obj_make_list
+NovaObjectDictCompat = ovoo_base.VersionedObjectDictCompat
+NovaTimestampObject = ovoo_base.TimestampedObject
+
+
+class NovaObject(ovoo_base.VersionedObject):
+    """Base class and object factory.
+
+    This forms the base of all objects that can be remoted or instantiated
+    via RPC. Simply defining a class that inherits from this base class
+    will make it remotely instantiatable. Objects should implement the
+    necessary "get" classmethod routines as well as "save" object methods
+    as appropriate.
+    """
+
+    OBJ_SERIAL_NAMESPACE = 'gosbs_object'
+    OBJ_PROJECT_NAMESPACE = 'gosbs'
+
+    # NOTE(ndipanov): This is nova-specific
+    @staticmethod
+    def should_migrate_data():
+        """A check that can be used to inhibit online migration behavior
+
+        This is usually used to check if all services that will be accessing
+        the db directly are ready for the new format.
+        """
+        raise NotImplementedError()
+
+    # NOTE(danms): This is nova-specific
+    @contextlib.contextmanager
+    def obj_alternate_context(self, context):
+        original_context = self._context
+        self._context = context
+        try:
+            yield
+        finally:
+            self._context = original_context
+
+    # NOTE(danms): This is nova-specific
+    @contextlib.contextmanager
+    def obj_as_admin(self):
+        """Context manager to make an object call as an admin.
+
+        This temporarily modifies the context embedded in an object to
+        be elevated() and restores it after the call completes. Example
+        usage:
+
+           with obj.obj_as_admin():
+               obj.save()
+
+        """
+        if self._context is None:
+            raise exception.OrphanedObjectError(method='obj_as_admin',
+                                                objtype=self.obj_name())
+
+        original_context = self._context
+        self._context = self._context.elevated()
+        try:
+            yield
+        finally:
+            self._context = original_context
+
+
+class NovaPersistentObject(object):
+    """Mixin class for Persistent objects.
+
+    This adds the fields that we use in common for most persistent objects.
+    """
+    fields = {
+        'created_at': obj_fields.DateTimeField(nullable=True),
+        'updated_at': obj_fields.DateTimeField(nullable=True),
+        'deleted_at': obj_fields.DateTimeField(nullable=True),
+        'deleted': obj_fields.BooleanField(default=False),
+        }
+
+class NovaPersistentObject2(object):
+    """Mixin class for Persistent objects.
+
+    This adds the fields that we use in common for most persistent objects.
+    """
+    fields = {
+        'created_at': obj_fields.DateTimeField(nullable=True),
+        'updated_at': obj_fields.DateTimeField(nullable=True),
+        }
+
+
+class ObjectListBase(ovoo_base.ObjectListBase):
+    # NOTE(danms): These are for transition to using the oslo
+    # base object and can be removed when we move to it.
+    @classmethod
+    def _obj_primitive_key(cls, field):
+        return 'gosbs_object.%s' % field
+
+    @classmethod
+    def _obj_primitive_field(cls, primitive, field,
+                             default=obj_fields.UnspecifiedDefault):
+        key = cls._obj_primitive_key(field)
+        if default == obj_fields.UnspecifiedDefault:
+            return primitive[key]
+        else:
+            return primitive.get(key, default)
+
+
+class NovaObjectSerializer(messaging.NoOpSerializer):
+    """A NovaObject-aware Serializer.
+
+    This implements the Oslo Serializer interface and provides the
+    ability to serialize and deserialize NovaObject entities. Any service
+    that needs to accept or return NovaObjects as arguments or result values
+    should pass this to its RPCClient and RPCServer objects.
+    """
+
+    @property
+    def conductor(self):
+        if not hasattr(self, '_conductor'):
+            from gobs import conductor
+            self._conductor = conductor.API()
+        return self._conductor
+
+    def _process_object(self, context, objprim):
+        try:
+            objinst = NovaObject.obj_from_primitive(objprim, context=context)
+        except ovoo_exc.IncompatibleObjectVersion:
+            objver = objprim['gobs_object.version']
+            if objver.count('.') == 2:
+                # NOTE(danms): For our purposes, the .z part of the version
+                # should be safe to accept without requiring a backport
+                objprim['gobs_object.version'] = \
+                    '.'.join(objver.split('.')[:2])
+                return self._process_object(context, objprim)
+            objname = objprim['gobs_object.name']
+            version_manifest = ovoo_base.obj_tree_get_versions(objname)
+            if objname in version_manifest:
+                objinst = self.conductor.object_backport_versions(
+                    context, objprim, version_manifest)
+            else:
+                raise
+        return objinst
+
+    def _process_iterable(self, context, action_fn, values):
+        """Process an iterable, taking an action on each value.
+        :param:context: Request context
+        :param:action_fn: Action to take on each item in values
+        :param:values: Iterable container of things to take action on
+        :returns: A new container of the same type (except set) with
+                  items from values having had action applied.
+        """
+        iterable = values.__class__
+        if issubclass(iterable, dict):
+            return iterable(**{k: action_fn(context, v)
+                            for k, v in values.items()})
+        else:
+            # NOTE(danms, gibi) A set can't have an unhashable value inside,
+            # such as a dict. Convert the set to list, which is fine, since we
+            # can't send them over RPC anyway. We convert it to list as this
+            # way there will be no semantic change between the fake rpc driver
+            # used in functional test and a normal rpc driver.
+            if iterable == set:
+                iterable = list
+            return iterable([action_fn(context, value) for value in values])
+
+    def serialize_entity(self, context, entity):
+        if isinstance(entity, (tuple, list, set, dict)):
+            entity = self._process_iterable(context, self.serialize_entity,
+                                            entity)
+        elif (hasattr(entity, 'obj_to_primitive') and
+              callable(entity.obj_to_primitive)):
+            entity = entity.obj_to_primitive()
+        return entity
+
+    def deserialize_entity(self, context, entity):
+        if isinstance(entity, dict) and 'gobs_object.name' in entity:
+            entity = self._process_object(context, entity)
+        elif isinstance(entity, (tuple, list, set, dict)):
+            entity = self._process_iterable(context, self.deserialize_entity,
+                                            entity)
+        return entity
+
+
+def obj_to_primitive(obj):
+    """Recursively turn an object into a python primitive.
+
+    A NovaObject becomes a dict, and anything that implements ObjectListBase
+    becomes a list.
+    """
+    if isinstance(obj, ObjectListBase):
+        return [obj_to_primitive(x) for x in obj]
+    elif isinstance(obj, NovaObject):
+        result = {}
+        for key in obj.obj_fields:
+            if obj.obj_attr_is_set(key) or key in obj.obj_extra_fields:
+                result[key] = obj_to_primitive(getattr(obj, key))
+        return result
+    elif isinstance(obj, netaddr.IPAddress):
+        return str(obj)
+    elif isinstance(obj, netaddr.IPNetwork):
+        return str(obj)
+    else:
+        return obj
+
+
+def obj_make_dict_of_lists(context, list_cls, obj_list, item_key):
+    """Construct a dictionary of object lists, keyed by item_key.
+
+    :param:context: Request context
+    :param:list_cls: The ObjectListBase class
+    :param:obj_list: The list of objects to place in the dictionary
+    :param:item_key: The object attribute name to use as a dictionary key
+    """
+
+    obj_lists = {}
+    for obj in obj_list:
+        key = getattr(obj, item_key)
+        if key not in obj_lists:
+            obj_lists[key] = list_cls()
+            obj_lists[key].objects = []
+        obj_lists[key].objects.append(obj)
+    for key in obj_lists:
+        obj_lists[key]._context = context
+        obj_lists[key].obj_reset_changes()
+    return obj_lists
+
+
+def serialize_args(fn):
+    """Decorator that will do the arguments serialization before remoting."""
+    def wrapper(obj, *args, **kwargs):
+        args = [utils.strtime(arg) if isinstance(arg, datetime.datetime)
+                else arg for arg in args]
+        for k, v in kwargs.items():
+            if k == 'exc_val' and v:
+                kwargs[k] = six.text_type(v)
+            elif k == 'exc_tb' and v and not isinstance(v, six.string_types):
+                kwargs[k] = ''.join(traceback.format_tb(v))
+            elif isinstance(v, datetime.datetime):
+                kwargs[k] = utils.strtime(v)
+        if hasattr(fn, '__call__'):
+            return fn(obj, *args, **kwargs)
+        # NOTE(danms): We wrap a descriptor, so use that protocol
+        return fn.__get__(None, obj)(*args, **kwargs)
+
+    # NOTE(danms): Make this discoverable
+    wrapper.remotable = getattr(fn, 'remotable', False)
+    wrapper.original_fn = fn
+    return (functools.wraps(fn)(wrapper) if hasattr(fn, '__call__')
+            else classmethod(wrapper))
+
+
+def obj_equal_prims(obj_1, obj_2, ignore=None):
+    """Compare two primitives for equivalence ignoring some keys.
+
+    This operation tests the primitives of two objects for equivalence.
+    Object primitives may contain a list identifying fields that have been
+    changed - this is ignored in the comparison. The ignore parameter lists
+    any other keys to be ignored.
+
+    :param:obj1: The first object in the comparison
+    :param:obj2: The second object in the comparison
+    :param:ignore: A list of fields to ignore
+    :returns: True if the primitives are equal ignoring changes
+    and specified fields, otherwise False.
+    """
+
+    def _strip(prim, keys):
+        if isinstance(prim, dict):
+            for k in keys:
+                prim.pop(k, None)
+            for v in prim.values():
+                _strip(v, keys)
+        if isinstance(prim, list):
+            for v in prim:
+                _strip(v, keys)
+        return prim
+
+    if ignore is not None:
+        keys = ['gosbs_object.changes'] + ignore
+    else:
+        keys = ['gosbs_object.changes']
+    prim_1 = _strip(obj_1.obj_to_primitive(), keys)
+    prim_2 = _strip(obj_2.obj_to_primitive(), keys)
+    return prim_1 == prim_2

diff --git a/gosbs/objects/build_iuse.py b/gosbs/objects/build_iuse.py
new file mode 100644
index 0000000..4aa5342
--- /dev/null
+++ b/gosbs/objects/build_iuse.py
@@ -0,0 +1,280 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+PROJECT_STATUS = ['failed', 'completed', 'in-progress', 'waiting']
+
+def _dict_with_extra_specs(build_iuse_model):
+    extra_specs = {}
+    return dict(build_iuse_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _build_iuse_create(context, values):
+    db_build_iuse = models.BuildsIUses()
+    db_build_iuse.update(values)
+
+    try:
+        db_build_iuse.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'build_iuseid' in e.columns:
+            raise exception.ImagesIdExists(build_iuse_id=values['build_iuseid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_build_iuse)
+
+
+@db_api.main_context_manager.writer
+def _build_iuse_destroy(context, build_iuse_id=None, build_iuseid=None):
+    query = context.session.query(models.EbuildsIUses)
+
+    if build_iuse_id is not None:
+        query.filter(models.BuildsIUses.id == build_iuse_id).delete()
+    else:
+        query.filter(models.BuildsIUses.id == build_iuseid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class BuildIUse(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'build_uuid': fields.UUIDField(),
+        'use_id': fields.IntegerField(),
+        'status' : fields.BooleanField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(BuildIUse, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_build_iuses = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(BuildIUse, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, build_iuse, db_build_iuse, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        build_iuse._context = context
+        for name, field in build_iuse.fields.items():
+            value = db_build_iuse[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            build_iuse[name] = value
+        
+        build_iuse.obj_reset_changes()
+        return build_iuse
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _build_iuse_get_query_from_db(context):
+        query = context.session.query(models.BuildsIUses)
+        return query
+
+    @staticmethod
+    @require_context
+    def _build_iuse_get_from_db(context, id):
+        """Returns a dict describing specific build_iuses."""
+        result = BuildIUse._build_iuse_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(build_iuse_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _build_iuse_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = BuildIUse._build_iuse_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(build_iuses_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(BuildIUse, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(BuildIUse, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_build_iuse = cls._build_iuse_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_build_iuse,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_build_iuse = cls._build_iuse_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_build_iuse,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _build_iuse_create(context, updates):
+        return _build_iuse_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_build_iuse = self._build_iuse_create(context, updates)
+        self._from_db_object(context, self, db_build_iuse)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a build_iuses.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_build_iuse = context.session.query(models.BuildsIUses).\
+            filter_by(id=self.id).first()
+        if not db_build_iuse:
+            raise exception.ImagesNotFound(build_iuse_id=self.id)
+        db_build_iuse.update(values)
+        db_build_iuse.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_build_iuse)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _build_iuse_destroy(context, build_iuse_id=None, build_iuseid=None):
+        _build_iuse_destroy(context, build_iuse_id=build_iuse_id, build_iuseid=build_iuseid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a build_iuses
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a build_iuses object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._build_iuse_destroy(context, build_iuse_id=self.id)
+        else:
+            self._build_iuse_destroy(context, build_iuseid=self.build_iuseid)
+        #self._from_db_object(context, self, db_build_iuse)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+
+        db_build_iuse = BuildIUses._build_iuse_get_query_from_db(context)
+
+        if 'status' in filters:
+            db_build_iuse = db_build_iuse.filter(
+                models.BuildsIUses.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_build_iuse,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _build_iuse_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all build_iusess.
+    """
+    filters = filters or {}
+
+    query = BuildIUse._build_iuse_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.BuildsIUses.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = BuildIUse._build_iuse_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.BuildsIUses,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class BuildIUseList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('BuildIUse'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_build_iuses = _build_iuse_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.build_iuse.BuildIUse,
+                                  db_build_iuses,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.BuildsIUses).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_build_iuse = context.session.query(models.BuildsIUses).filter_by(auto=True)
+        db_build_iuse.update(values)

diff --git a/gosbs/objects/category.py b/gosbs/objects/category.py
new file mode 100644
index 0000000..c7659c5
--- /dev/null
+++ b/gosbs/objects/category.py
@@ -0,0 +1,278 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+CATEGORY_STATUS = ['failed', 'completed', 'in-progress', 'waiting']
+
+def _dict_with_extra_specs(category_model):
+    extra_specs = {}
+    return dict(category_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _category_create(context, values):
+    db_category = models.Categories()
+    db_category.update(values)
+
+    try:
+        db_category.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'categoryid' in e.columns:
+            raise exception.ImagesIdExists(category_id=values['categoryid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_category)
+
+
+@db_api.main_context_manager.writer
+def _category_destroy(context, category_id=None, categoryid=None):
+    query = context.session.query(models.Categories)
+
+    if category_id is not None:
+        query.filter(models.Categories.uuid == category_id).delete()
+    else:
+        query.filter(models.Categories.uuid == categoryid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Category(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'uuid': fields.UUIDField(),
+        'name': fields.StringField(),
+        'status' : fields.EnumField(valid_values=CATEGORY_STATUS),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Category, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_categorys = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Project, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, category, db_category, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        category._context = context
+        for name, field in category.fields.items():
+            value = db_category[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            category[name] = value
+        
+        category.obj_reset_changes()
+        return category
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _category_get_query_from_db(context):
+        query = context.session.query(models.Categories)
+        return query
+
+    @staticmethod
+    @require_context
+    def _category_get_from_db(context, uuid):
+        """Returns a dict describing specific categorys."""
+        result = Category._category_get_query_from_db(context).\
+                        filter_by(uuid=uuid).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(category_uuid=uuid)
+        return result
+
+    @staticmethod
+    @require_context
+    def _category_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = Category._category_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Category, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Category, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_category = cls._category_get_from_db(context, uuid)
+        return cls._from_db_object(context, cls(context), db_category,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_category = cls._category_get_by_name_from_db(context, name)
+        if not db_category:
+            return None
+        return cls._from_db_object(context, cls(context), db_category,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _category_create(context, updates):
+        return _category_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_category = self._category_create(context, updates)
+        self._from_db_object(context, self, db_category)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a categorys.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_category = context.session.query(models.Categories).\
+            filter_by(uuid=self.uuid).first()
+        if not db_category:
+            raise exception.ImagesNotFound(category_id=self.uuid)
+        db_category.update(values)
+        db_category.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_category)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _category_destroy(context, category_id=None, categoryid=None):
+        _category_destroy(context, category_id=category_id, categoryid=categoryid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a categorys
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a categorys object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._category_destroy(context, category_id=self.uuid)
+        else:
+            self._category_destroy(context, categoryid=self.categoryid)
+        #self._from_db_object(context, self, db_category)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_category = Category._category_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_category = db_category.filter(
+                models.Categories.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_category,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _category_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all categoryss.
+    """
+    filters = filters or {}
+
+    query = Category._category_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.Categories.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Category._category_get_query_from_db(context).\
+                    filter_by(uuid=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Categories,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class CategoryList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Category'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+        db_categorys = _category_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.category.Category,
+                                  db_categorys,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Categories).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_category = context.session.query(models.Categories).filter_by(auto=True)
+        db_category.update(values)

diff --git a/gosbs/objects/category_metadata.py b/gosbs/objects/category_metadata.py
new file mode 100644
index 0000000..76eff64
--- /dev/null
+++ b/gosbs/objects/category_metadata.py
@@ -0,0 +1,278 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(category_metadata_model):
+    extra_specs = {}
+    return dict(category_metadata_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _category_metadata_create(context, values):
+    db_category_metadata = models.CategoriesMetadata()
+    db_category_metadata.update(values)
+
+    try:
+        db_category_metadata.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'category_metadataid' in e.columns:
+            raise exception.ImagesIdExists(category_metadata_id=values['category_metadataid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_category_metadata)
+
+
+@db_api.main_context_manager.writer
+def _category_metadata_destroy(context, category_metadata_id=None, category_metadataid=None):
+    query = context.session.query(models.CategoriesMetadata)
+
+    if category_metadata_id is not None:
+        query.filter(models.CategoriesMetadata.id == category_metadata_id).delete()
+    else:
+        query.filter(models.CategoriesMetadata.id == category_metadataid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class CategoryMetadata(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'category_uuid': fields.UUIDField(),
+        'description' : fields.StringField(),
+        'checksum': fields.StringField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(CategoryMetadata, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_category_metadatas = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(CategoryMetadata, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, category_metadata, db_category_metadata, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        category_metadata._context = context
+        for name, field in category_metadata.fields.items():
+            value = db_category_metadata[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            category_metadata[name] = value
+        
+        category_metadata.obj_reset_changes()
+        return category_metadata
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _category_metadata_get_query_from_db(context):
+        query = context.session.query(models.CategoriesMetadata)
+        return query
+
+    @staticmethod
+    @require_context
+    def _category_metadata_get_from_db(context, uuid):
+        """Returns a dict describing specific category_metadatas."""
+        result = CategoryMetadata._category_metadata_get_query_from_db(context).\
+                        filter_by(category_uuid=uuid).\
+                        first()
+        return result
+
+    @staticmethod
+    @require_context
+    def _category_metadata_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = CategoryMetadata._category_metadata_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(category_metadatas_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(CategoryMetadata, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(CategoryMetadata, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_category_metadata = cls._category_metadata_get_from_db(context, uuid)
+        if not db_category_metadata:
+            return None
+        return cls._from_db_object(context, cls(context), db_category_metadata,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_category_metadata = cls._category_metadata_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_category_metadata,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _category_metadata_create(context, updates):
+        return _category_metadata_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_category_metadata = self._category_metadata_create(context, updates)
+        self._from_db_object(context, self, db_category_metadata)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a category_metadatas.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_category_metadata = context.session.query(models.CategoriesMetadata).\
+            filter_by(id=self.id).first()
+        if not db_category_metadata:
+            raise exception.ImagesNotFound(category_metadata_id=self.id)
+        db_category_metadata.update(values)
+        db_category_metadata.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_category_metadata)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _category_metadata_destroy(context, category_metadata_id=None, category_metadataid=None):
+        _category_metadata_destroy(context, category_metadata_id=category_metadata_id, category_metadataid=category_metadataid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a category_metadatas
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a category_metadatas object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._category_metadata_destroy(context, category_metadata_id=self.id)
+        else:
+            self._category_metadata_destroy(context, category_metadataid=self.category_metadataid)
+        #self._from_db_object(context, self, db_category_metadata)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_category_metadata = CategoryMetadata._category_metadata_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_category_metadata = db_category_metadata.filter(
+                models.CategoriesMetadata.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_category_metadata,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _category_metadata_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all category_metadatass.
+    """
+    filters = filters or {}
+
+    query = CategoryMetadata._category_metadata_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.CategoriesMetadata.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = CategoryMetadata._category_metadata_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.CategoriesMetadata,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class CategoryMetadataList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('CategoryMetadata'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_category_metadatas = _category_metadata_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.category_metadata.CategoryMetadata,
+                                  db_category_metadatas,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.CategoriesMetadata).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_category_metadata = context.session.query(models.CategoriesMetadata).filter_by(auto=True)
+        db_category_metadata.update(values)

diff --git a/gosbs/objects/ebuild.py b/gosbs/objects/ebuild.py
new file mode 100644
index 0000000..4be3c64
--- /dev/null
+++ b/gosbs/objects/ebuild.py
@@ -0,0 +1,288 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+PROJECT_STATUS = ['failed', 'completed', 'in-progress', 'waiting']
+
+def _dict_with_extra_specs(ebuild_model):
+    extra_specs = {}
+    return dict(ebuild_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_create(context, values):
+    db_ebuild = models.Ebuilds()
+    db_ebuild.update(values)
+
+    try:
+        db_ebuild.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'ebuildid' in e.columns:
+            raise exception.ImagesIdExists(ebuild_id=values['ebuildid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_ebuild)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_destroy(context, ebuild_id=None, ebuildid=None):
+    query = context.session.query(models.Ebuilds)
+
+    if ebuild_id is not None:
+        query.filter(models.Ebuilds.uuid == ebuild_id).delete()
+    else:
+        query.filter(models.Ebuilds.uuid == ebuildid).delete()
+
+
+@base.NovaObjectRegistry.register
+class Ebuild(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'uuid': fields.UUIDField(),
+        'package_uuid': fields.UUIDField(),
+        'version' : fields.StringField(nullable=True),
+        'checksum': fields.StringField(nullable=True),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Ebuild, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_ebuilds = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Ebuild, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, ebuild, db_ebuild, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        ebuild._context = context
+        for name, field in ebuild.fields.items():
+            value = db_ebuild[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            ebuild[name] = value
+        
+        ebuild.obj_reset_changes()
+        return ebuild
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _ebuild_get_query_from_db(context):
+        query = context.session.query(models.Ebuilds)
+        return query
+
+    @staticmethod
+    @require_context
+    def _ebuild_get_from_db(context, uuid):
+        """Returns a dict describing specific ebuilds."""
+        result = Ebuild._ebuild_get_query_from_db(context).\
+                        filter_by(uuid=uuid).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(ebuild_id=uuid)
+        return result
+
+    @staticmethod
+    @require_context
+    def _ebuild_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = Ebuild._ebuild_get_query_from_db(context).\
+                            filter_by(version=name).\
+                            first()
+        if not result:
+            return None
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Ebuild, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Ebuild, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_ebuild = cls._ebuild_get_from_db(context, uuid)
+        return cls._from_db_object(context, cls(context), db_ebuild,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, version, filters=None):
+        filters = filters or {}
+        db_ebuild = Ebuild._ebuild_get_query_from_db(context)
+        db_ebuild = db_ebuild.filter_by(version=version)
+        if 'deleted' in filters:
+            db_ebuild = db_ebuild.filter(
+                models.Ebuilds.deleted == filters['deleted'])
+        if 'package_uuid' in filters:
+            db_ebuild = db_ebuild.filter(
+                models.Ebuilds.package_uuid == filters['package_uuid'])
+        db_ebuild = db_ebuild.first()
+        if not db_ebuild:
+            return None
+        return cls._from_db_object(context, cls(context), db_ebuild,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _ebuild_create(context, updates):
+        return _ebuild_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_ebuild = self._ebuild_create(context, updates)
+        self._from_db_object(context, self, db_ebuild)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a ebuilds.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_ebuild = context.session.query(models.Ebuilds).\
+            filter_by(uuid=self.uuid).first()
+        if not db_ebuild:
+            raise exception.ImagesNotFound(ebuild_id=self.uuid)
+        db_ebuild.update(values)
+        db_ebuild.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_ebuild)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _ebuild_destroy(context, ebuild_id=None, ebuildid=None):
+        _ebuild_destroy(context, ebuild_id=ebuild_id, ebuildid=ebuildid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a ebuilds
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a ebuilds object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'uuid' in self:
+            self._ebuild_destroy(context, ebuild_id=self.uuid)
+        else:
+            self._ebuild_destroy(context, ebuildid=self.ebuildid)
+        #self._from_db_object(context, self, db_ebuild)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_ebuild = Ebuild._ebuild_get_query_from_db(context)
+    
+        return cls._from_db_object(context, cls(context), db_ebuild,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _ebuild_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all ebuildss.
+    """
+    filters = filters or {}
+
+    query = Ebuild._ebuild_get_query_from_db(context)
+
+    if 'deleted' in filters:
+            query = query.filter(
+                models.Ebuilds.deleted == filters['deleted'])
+    if 'package_uuid' in filters:
+            query = query.filter(
+                models.Ebuilds.package_uuid == filters['package_uuid'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Ebuild._ebuild_get_query_from_db(context).\
+                    filter_by(uuid=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Ebuilds,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class EbuildList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Ebuild'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+        db_ebuilds = _ebuild_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.ebuild.Ebuild,
+                                  db_ebuilds,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Ebuilds).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_ebuild = context.session.query(models.Ebuilds).filter_by(auto=True)
+        db_ebuild.update(values)

diff --git a/gosbs/objects/ebuild_iuse.py b/gosbs/objects/ebuild_iuse.py
new file mode 100644
index 0000000..6a8c568
--- /dev/null
+++ b/gosbs/objects/ebuild_iuse.py
@@ -0,0 +1,280 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+PROJECT_STATUS = ['failed', 'completed', 'in-progress', 'waiting']
+
+def _dict_with_extra_specs(ebuild_iuse_model):
+    extra_specs = {}
+    return dict(ebuild_iuse_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_iuse_create(context, values):
+    db_ebuild_iuse = models.EbuildsIUses()
+    db_ebuild_iuse.update(values)
+
+    try:
+        db_ebuild_iuse.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'ebuild_iuseid' in e.columns:
+            raise exception.ImagesIdExists(ebuild_iuse_id=values['ebuild_iuseid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_ebuild_iuse)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_iuse_destroy(context, ebuild_iuse_id=None, ebuild_iuseid=None):
+    query = context.session.query(models.EbuildsIUses)
+
+    if ebuild_iuse_id is not None:
+        query.filter(models.EbuildsIUses.id == ebuild_iuse_id).delete()
+    else:
+        query.filter(models.EbuildsIUses.id == ebuild_iuseid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class EbuildIUse(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'ebuild_uuid': fields.UUIDField(),
+        'use_id': fields.IntegerField(),
+        'status' : fields.BooleanField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(EbuildIUse, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_ebuild_iuses = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(EbuildIUse, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, ebuild_iuse, db_ebuild_iuse, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        ebuild_iuse._context = context
+        for name, field in ebuild_iuse.fields.items():
+            value = db_ebuild_iuse[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            ebuild_iuse[name] = value
+        
+        ebuild_iuse.obj_reset_changes()
+        return ebuild_iuse
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _ebuild_iuse_get_query_from_db(context):
+        query = context.session.query(models.EbuildsIUses)
+        return query
+
+    @staticmethod
+    @require_context
+    def _ebuild_iuse_get_from_db(context, id):
+        """Returns a dict describing specific ebuild_iuses."""
+        result = EbuildIUse._ebuild_iuse_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(ebuild_iuse_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _ebuild_iuse_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = EbuildIUse._ebuild_iuse_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(ebuild_iuses_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(EbuildIUse, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(EbuildIUse, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_ebuild_iuse = cls._ebuild_iuse_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_ebuild_iuse,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_ebuild_iuse = cls._ebuild_iuse_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_ebuild_iuse,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _ebuild_iuse_create(context, updates):
+        return _ebuild_iuse_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_ebuild_iuse = self._ebuild_iuse_create(context, updates)
+        self._from_db_object(context, self, db_ebuild_iuse)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a ebuild_iuses.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_ebuild_iuse = context.session.query(models.EbuildsIUses).\
+            filter_by(id=self.id).first()
+        if not db_ebuild_iuse:
+            raise exception.ImagesNotFound(ebuild_iuse_id=self.id)
+        db_ebuild_iuse.update(values)
+        db_ebuild_iuse.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_ebuild_iuse)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _ebuild_iuse_destroy(context, ebuild_iuse_id=None, ebuild_iuseid=None):
+        _ebuild_iuse_destroy(context, ebuild_iuse_id=ebuild_iuse_id, ebuild_iuseid=ebuild_iuseid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a ebuild_iuses
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a ebuild_iuses object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._ebuild_iuse_destroy(context, ebuild_iuse_id=self.id)
+        else:
+            self._ebuild_iuse_destroy(context, ebuild_iuseid=self.ebuild_iuseid)
+        #self._from_db_object(context, self, db_ebuild_iuse)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        print('foo')
+        db_ebuild_iuse = EbuildIUses._ebuild_iuse_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_ebuild_iuse = db_ebuild_iuse.filter(
+                models.EbuildsIUses.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_ebuild_iuse,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _ebuild_iuse_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all ebuild_iusess.
+    """
+    filters = filters or {}
+
+    query = EbuildIUse._ebuild_iuse_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.EbuildsIUses.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = EbuildIUse._ebuild_iuse_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.EbuildsIUses,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class EbuildIUseList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('EbuildIUse'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_ebuild_iuses = _ebuild_iuse_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.ebuild_iuse.EbuildIUse,
+                                  db_ebuild_iuses,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.EbuildsIUses).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_ebuild_iuse = context.session.query(models.EbuildsIUses).filter_by(auto=True)
+        db_ebuild_iuse.update(values)

diff --git a/gosbs/objects/ebuild_keyword.py b/gosbs/objects/ebuild_keyword.py
new file mode 100644
index 0000000..dfde6e7
--- /dev/null
+++ b/gosbs/objects/ebuild_keyword.py
@@ -0,0 +1,280 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+KEYWORD_STATUS = ['stable','unstable','negative']
+
+def _dict_with_extra_specs(ebuild_keyword_model):
+    extra_specs = {}
+    return dict(ebuild_keyword_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_keyword_create(context, values):
+    db_ebuild_keyword = models.EbuildsKeywords()
+    db_ebuild_keyword.update(values)
+
+    try:
+        db_ebuild_keyword.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'ebuild_keywordid' in e.columns:
+            raise exception.ImagesIdExists(ebuild_keyword_id=values['ebuild_keywordid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_ebuild_keyword)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_keyword_destroy(context, ebuild_keyword_id=None, ebuild_keywordid=None):
+    query = context.session.query(models.EbuildsKeywords)
+
+    if ebuild_keyword_id is not None:
+        query.filter(models.EbuildsKeywords.id == ebuild_keyword_id).delete()
+    else:
+        query.filter(models.EbuildsKeywords.id == ebuild_keywordid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class EbuildKeyword(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'ebuild_uuid': fields.UUIDField(),
+        'keyword_id': fields.IntegerField(),
+        'status' : fields.EnumField(valid_values=KEYWORD_STATUS),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(EbuildKeyword, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_ebuild_keywords = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(EbuildKeyword, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, ebuild_keyword, db_ebuild_keyword, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        ebuild_keyword._context = context
+        for name, field in ebuild_keyword.fields.items():
+            value = db_ebuild_keyword[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            ebuild_keyword[name] = value
+        
+        ebuild_keyword.obj_reset_changes()
+        return ebuild_keyword
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _ebuild_keyword_get_query_from_db(context):
+        query = context.session.query(models.EbuildsKeywords)
+        return query
+
+    @staticmethod
+    @require_context
+    def _ebuild_keyword_get_from_db(context, id):
+        """Returns a dict describing specific ebuild_keywords."""
+        result = EbuildKeyword._ebuild_keyword_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(ebuild_keyword_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _ebuild_keyword_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = EbuildKeyword._ebuild_keyword_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(ebuild_keywords_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(EbuildKeyword, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(EbuildKeyword, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_ebuild_keyword = cls._ebuild_keyword_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_ebuild_keyword,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_ebuild_keyword = cls._ebuild_keyword_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_ebuild_keyword,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _ebuild_keyword_create(context, updates):
+        return _ebuild_keyword_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_ebuild_keyword = self._ebuild_keyword_create(context, updates)
+        self._from_db_object(context, self, db_ebuild_keyword)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a ebuild_keywords.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_ebuild_keyword = context.session.query(models.EbuildsKeywords).\
+            filter_by(id=self.id).first()
+        if not db_ebuild_keyword:
+            raise exception.ImagesNotFound(ebuild_keyword_id=self.id)
+        db_ebuild_keyword.update(values)
+        db_ebuild_keyword.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_ebuild_keyword)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _ebuild_keyword_destroy(context, ebuild_keyword_id=None, ebuild_keywordid=None):
+        _ebuild_keyword_destroy(context, ebuild_keyword_id=ebuild_keyword_id, ebuild_keywordid=ebuild_keywordid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a ebuild_keywords
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a ebuild_keywords object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._ebuild_keyword_destroy(context, ebuild_keyword_id=self.id)
+        else:
+            self._ebuild_keyword_destroy(context, ebuild_keywordid=self.ebuild_keywordid)
+        #self._from_db_object(context, self, db_ebuild_keyword)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        print('foo')
+        db_ebuild_keyword = EbuildKeywords._ebuild_keyword_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_ebuild_keyword = db_ebuild_keyword.filter(
+                models.EbuildsKeywords.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_ebuild_keyword,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _ebuild_keyword_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all ebuild_keywordss.
+    """
+    filters = filters or {}
+
+    query = EbuildKeyword._ebuild_keyword_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.EbuildsKeywords.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = EbuildKeyword._ebuild_keyword_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.EbuildsKeywords,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class EbuildKeywordList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('EbuildKeyword'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_ebuild_keywords = _ebuild_keyword_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.ebuild_keyword.EbuildKeyword,
+                                  db_ebuild_keywords,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.EbuildsKeywords).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_ebuild_keyword = context.session.query(models.EbuildsKeywords).filter_by(auto=True)
+        db_ebuild_keyword.update(values)

diff --git a/gosbs/objects/ebuild_metadata.py b/gosbs/objects/ebuild_metadata.py
new file mode 100644
index 0000000..9a886ee
--- /dev/null
+++ b/gosbs/objects/ebuild_metadata.py
@@ -0,0 +1,282 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+PROJECT_STATUS = ['failed', 'completed', 'in-progress', 'waiting']
+
+def _dict_with_extra_specs(ebuild_metadata_model):
+    extra_specs = {}
+    return dict(ebuild_metadata_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_metadata_create(context, values):
+    db_ebuild_metadata = models.EbuildsMetadata()
+    db_ebuild_metadata.update(values)
+
+    try:
+        db_ebuild_metadata.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'ebuild_metadataid' in e.columns:
+            raise exception.ImagesIdExists(ebuild_metadata_id=values['ebuild_metadataid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_ebuild_metadata)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_metadata_destroy(context, ebuild_metadata_id=None, ebuild_metadataid=None):
+    query = context.session.query(models.EbuildsMetadata)
+
+    if ebuild_metadata_id is not None:
+        query.filter(models.EbuildsMetadata.uuid == ebuild_metadata_id).delete()
+    else:
+        query.filter(models.EbuildsMetadata.uuid == ebuild_metadataid).delete()
+
+
+@base.NovaObjectRegistry.register
+class EbuildMetadata(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'ebuild_uuid': fields.UUIDField(),
+        'commit': fields.StringField(nullable=True),
+        'commit_msg' : fields.StringField(nullable=True),
+        'description' : fields.StringField(nullable=True),
+        'slot': fields.StringField(nullable=True),
+        'homepage': fields.StringField(nullable=True),
+        'license': fields.StringField(nullable=True),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(EbuildMetadata, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_ebuild_metadatas = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(EbuildMetadata, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, ebuild_metadata, db_ebuild_metadata, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        ebuild_metadata._context = context
+        for name, field in ebuild_metadata.fields.items():
+            value = db_ebuild_metadata[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            ebuild_metadata[name] = value
+        
+        ebuild_metadata.obj_reset_changes()
+        return ebuild_metadata
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _ebuild_metadata_get_query_from_db(context):
+        query = context.session.query(models.EbuildsMetadata)
+        return query
+
+    @staticmethod
+    @require_context
+    def _ebuild_metadata_get_from_db(context, uuid):
+        """Returns a dict describing specific ebuild_metadatas."""
+        result = EbuildMetadata._ebuild_metadata_get_query_from_db(context).\
+                        filter_by(uuid=uuid).\
+                        first()
+        return result
+
+    @staticmethod
+    @require_context
+    def _ebuild_metadata_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = EbuildMetadata._ebuild_metadata_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(EbuildMetadata, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(EbuildMetadata, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_ebuild_metadata = cls._ebuild_metadata_get_from_db(context, uuid)
+        if not db_ebuild_metadata:
+            return None
+        return cls._from_db_object(context, cls(context), db_ebuild_metadata,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_ebuild_metadata = cls._ebuild_metadata_get_by_name_from_db(context, name)
+        if not db_ebuild_metadata:
+            return None
+        return cls._from_db_object(context, cls(context), db_ebuild_metadata,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _ebuild_metadata_create(context, updates):
+        return _ebuild_metadata_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_ebuild_metadata = self._ebuild_metadata_create(context, updates)
+        self._from_db_object(context, self, db_ebuild_metadata)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a ebuild_metadatas.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_ebuild_metadata = context.session.query(models.EbuildsMetadata).\
+            filter_by(uuid=self.id).first()
+        if not db_ebuild_metadata:
+            raise exception.ImagesNotFound(ebuild_metadata_id=self.id)
+        db_ebuild_metadata.update(values)
+        db_ebuild_metadata.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_ebuild_metadata)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _ebuild_metadata_destroy(context, ebuild_metadata_id=None, ebuild_metadataid=None):
+        _ebuild_metadata_destroy(context, ebuild_metadata_id=ebuild_metadata_id, ebuild_metadataid=ebuild_metadataid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a ebuild_metadatas
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a ebuild_metadatas object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._ebuild_metadata_destroy(context, ebuild_metadata_id=self.id)
+        else:
+            self._ebuild_metadata_destroy(context, ebuild_metadataid=self.ebuild_metadataid)
+        #self._from_db_object(context, self, db_ebuild_metadata)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        print('foo')
+        db_ebuild_metadata = EbuildMetadata._ebuild_metadata_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_ebuild_metadata = db_ebuild_metadata.filter(
+                models.EbuildsMetadata.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_ebuild_metadata,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _ebuild_metadata_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all ebuild_metadatass.
+    """
+    filters = filters or {}
+
+    query = EbuildMetadata._ebuild_metadata_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.EbuildsMetadata.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = EbuildMetadata._ebuild_metadata_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.EbuildsMetadata,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class EbuildMetadataList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('EbuildMetadata'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_ebuild_metadatas = _ebuild_metadata_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.ebuild_metadata.EbuildMetadata,
+                                  db_ebuild_metadatas,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.EbuildsMetadata).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_ebuild_metadata = context.session.query(models.EbuildsMetadata).filter_by(auto=True)
+        db_ebuild_metadata.update(values)

diff --git a/gosbs/objects/ebuild_restriction.py b/gosbs/objects/ebuild_restriction.py
new file mode 100644
index 0000000..e3e046c
--- /dev/null
+++ b/gosbs/objects/ebuild_restriction.py
@@ -0,0 +1,281 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+PROJECT_STATUS = ['failed', 'completed', 'in-progress', 'waiting']
+
+def _dict_with_extra_specs(ebuild_restriction_model):
+    extra_specs = {}
+    return dict(ebuild_restriction_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_restriction_create(context, values):
+    db_ebuild_restriction = models.EbuildsRestrictions()
+    db_ebuild_restriction.update(values)
+
+    try:
+        db_ebuild_restriction.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'ebuild_restrictionid' in e.columns:
+            raise exception.ImagesIdExists(ebuild_restriction_id=values['ebuild_restrictionid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_ebuild_restriction)
+
+
+@db_api.main_context_manager.writer
+def _ebuild_restriction_destroy(context, ebuild_restriction_id=None, ebuild_restrictionid=None):
+    query = context.session.query(models.EbuildsRestrictions)
+
+    if ebuild_restriction_id is not None:
+        query.filter(models.EbuildsRestrictions.id == ebuild_restriction_id).delete()
+    else:
+        query.filter(models.EbuildsRestrictions.id == ebuild_restrictionid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class EbuildRestriction(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'ebuild_uuid': fields.UUIDField(),
+        'restriction_id': fields.IntegerField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(EbuildRestriction, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_ebuild_restrictions = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(EbuildRestriction, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, ebuild_restriction, db_ebuild_restriction, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        ebuild_restriction._context = context
+        for name, field in ebuild_restriction.fields.items():
+            value = db_ebuild_restriction[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            ebuild_restriction[name] = value
+        
+        ebuild_restriction.obj_reset_changes()
+        return ebuild_restriction
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _ebuild_restriction_get_query_from_db(context):
+        query = context.session.query(models.EbuildsRestrictions)
+        return query
+
+    @staticmethod
+    @require_context
+    def _ebuild_restriction_get_from_db(context, id):
+        """Returns a dict describing specific ebuild_restrictions."""
+        result = EbuildRestriction._ebuild_restriction_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(ebuild_restriction_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _ebuild_restriction_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = EbuildRestriction._ebuild_restriction_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(ebuild_restrictions_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(EbuildRestriction, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(EbuildRestriction, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_ebuild_restriction = cls._ebuild_restriction_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_ebuild_restriction,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_ebuild_restriction = cls._ebuild_restriction_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_ebuild_restriction,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _ebuild_restriction_create(context, updates):
+        return _ebuild_restriction_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_ebuild_restriction = self._ebuild_restriction_create(context, updates)
+        self._from_db_object(context, self, db_ebuild_restriction)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a ebuild_restrictions.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_ebuild_restriction = context.session.query(models.EbuildsRestrictions).\
+            filter_by(id=self.id).first()
+        if not db_ebuild_restriction:
+            raise exception.ImagesNotFound(ebuild_restriction_id=self.id)
+        db_ebuild_restriction.update(values)
+        db_ebuild_restriction.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_ebuild_restriction)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _ebuild_restriction_destroy(context, ebuild_restriction_id=None, ebuild_restrictionid=None):
+        _ebuild_restriction_destroy(context, ebuild_restriction_id=ebuild_restriction_id, ebuild_restrictionid=ebuild_restrictionid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a ebuild_restrictions
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a ebuild_restrictions object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._ebuild_restriction_destroy(context, ebuild_restriction_id=self.id)
+        else:
+            self._ebuild_restriction_destroy(context, ebuild_restrictionid=self.ebuild_restrictionid)
+        #self._from_db_object(context, self, db_ebuild_restriction)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        print('foo')
+        db_ebuild_restriction = EbuildRestrictions._ebuild_restriction_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_ebuild_restriction = db_ebuild_restriction.filter(
+                models.EbuildsRestrictions.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_ebuild_restriction,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _ebuild_restriction_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all ebuild_restrictionss.
+    """
+    filters = filters or {}
+
+    query = EbuildRestriction._ebuild_restriction_get_query_from_db(context)
+
+    if 'ebuild_uuid' in filters:
+            query = query.filter(
+                models.EbuildsRestrictions.ebuild_uuid == filters['ebuild_uuid'])
+    if not query:
+        return None
+
+    marker_row = None
+    if marker is not None:
+        marker_row = EbuildRestriction._ebuild_restriction_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.EbuildsRestrictions,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class EbuildRestrictionList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('EbuildRestriction'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_ebuild_restrictions = _ebuild_restriction_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.ebuild_restriction.EbuildRestriction,
+                                  db_ebuild_restrictions,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.EbuildsRestrictions).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_ebuild_restriction = context.session.query(models.EbuildsRestrictions).filter_by(auto=True)
+        db_ebuild_restriction.update(values)

diff --git a/gosbs/objects/email.py b/gosbs/objects/email.py
new file mode 100644
index 0000000..375c429
--- /dev/null
+++ b/gosbs/objects/email.py
@@ -0,0 +1,269 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it fit what we need.
+# I need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(email_model):
+    extra_specs = {}
+    return dict(email_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _email_create(context, values):
+    db_email = models.Emails()
+    db_email.update(values)
+
+    try:
+        db_email.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'emailid' in e.columns:
+            raise exception.ImagesIdExists(email_id=values['emailid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_email)
+
+
+@db_api.main_context_manager.writer
+def _email_destroy(context, email_id=None, emailid=None):
+    query = context.session.query(models.Emails)
+
+    if email_id is not None:
+        query.filter(models.Emails.id == email_id).delete()
+    else:
+        query.filter(models.Emails.id == emailid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Email(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'email': fields.StringField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Email, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_emails = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Email, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, email, db_email, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        email._context = context
+        for name, field in email.fields.items():
+            value = db_email[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            email[name] = value
+        
+        email.obj_reset_changes()
+        return email
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _email_get_query_from_db(context):
+        query = context.session.query(models.Emails)
+        return query
+
+    @staticmethod
+    @require_context
+    def _email_get_from_db(context, id):
+        """Returns a dict describing specific emails."""
+        result = Email._email_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(email_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _email_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = Email._email_get_query_from_db(context).\
+                            filter_by(email=name).\
+                            first()
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Email, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Email, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_email = cls._email_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_email,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_email = cls._email_get_by_name_from_db(context, name)
+        if not db_email:
+            return None
+        return cls._from_db_object(context, cls(context), db_email,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _email_create(context, updates):
+        return _email_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_email = self._email_create(context, updates)
+        self._from_db_object(context, self, db_email)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a emails.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_email = context.session.query(models.Emails).\
+            filter_by(uuid=self.id).first()
+        if not db_email:
+            raise exception.ImagesNotFound(email_id=self.id)
+        db_email.update(values)
+        db_email.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_email)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _email_destroy(context, email_id=None, emailid=None):
+        _email_destroy(context, email_id=email_id, emailid=emailid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a emails
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a emails object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._email_destroy(context, email_id=self.id)
+        else:
+            self._email_destroy(context, emailid=self.emailid)
+        #self._from_db_object(context, self, db_email)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_email = Email._email_get_query_from_db(context)
+
+        return cls._from_db_object(context, cls(context), db_email,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _email_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all emailss.
+    """
+    filters = filters or {}
+
+    query = Email._email_get_query_from_db(context)
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Email._email_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Emails,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class EmailList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Email'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_emails = _email_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.email.Email,
+                                  db_emails,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Emails).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_email = context.session.query(models.Emails).filter_by(auto=True)
+        db_email.update(values)

diff --git a/gosbs/objects/fields.py b/gosbs/objects/fields.py
new file mode 100644
index 0000000..02dd297
--- /dev/null
+++ b/gosbs/objects/fields.py
@@ -0,0 +1,70 @@
+#    Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/fields.py
+
+import os
+import re
+
+from cursive import signature_utils
+from oslo_serialization import jsonutils
+from oslo_versionedobjects import fields
+import six
+
+from gosbs import exception
+from gosbs.i18n import _
+
+
+# Import field errors from oslo.versionedobjects
+KeyTypeError = fields.KeyTypeError
+ElementTypeError = fields.ElementTypeError
+
+
+# Import fields from oslo.versionedobjects
+BooleanField = fields.BooleanField
+UnspecifiedDefault = fields.UnspecifiedDefault
+IntegerField = fields.IntegerField
+NonNegativeIntegerField = fields.NonNegativeIntegerField
+UUIDField = fields.UUIDField
+FloatField = fields.FloatField
+NonNegativeFloatField = fields.NonNegativeFloatField
+StringField = fields.StringField
+SensitiveStringField = fields.SensitiveStringField
+EnumField = fields.EnumField
+DateTimeField = fields.DateTimeField
+DictOfStringsField = fields.DictOfStringsField
+DictOfNullableStringsField = fields.DictOfNullableStringsField
+DictOfIntegersField = fields.DictOfIntegersField
+ListOfStringsField = fields.ListOfStringsField
+SetOfIntegersField = fields.SetOfIntegersField
+ListOfSetsOfIntegersField = fields.ListOfSetsOfIntegersField
+ListOfDictOfNullableStringsField = fields.ListOfDictOfNullableStringsField
+DictProxyField = fields.DictProxyField
+ObjectField = fields.ObjectField
+ListOfObjectsField = fields.ListOfObjectsField
+VersionPredicateField = fields.VersionPredicateField
+FlexibleBooleanField = fields.FlexibleBooleanField
+DictOfListOfStringsField = fields.DictOfListOfStringsField
+IPAddressField = fields.IPAddressField
+IPV4AddressField = fields.IPV4AddressField
+IPV6AddressField = fields.IPV6AddressField
+IPV4AndV6AddressField = fields.IPV4AndV6AddressField
+IPNetworkField = fields.IPNetworkField
+IPV4NetworkField = fields.IPV4NetworkField
+IPV6NetworkField = fields.IPV6NetworkField
+AutoTypedField = fields.AutoTypedField
+BaseEnumField = fields.BaseEnumField
+MACAddressField = fields.MACAddressField
+ListOfIntegersField = fields.ListOfIntegersField
+PCIAddressField = fields.PCIAddressField

diff --git a/gosbs/objects/flavor.py b/gosbs/objects/flavor.py
new file mode 100644
index 0000000..28739a8
--- /dev/null
+++ b/gosbs/objects/flavor.py
@@ -0,0 +1,228 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+
+OPTIONAL_FIELDS = []
+# Remove these fields in version 2.0 of the object.
+DEPRECATED_FIELDS = ['deleted', 'deleted_at']
+
+# Non-joined fields which can be updated.
+MUTABLE_FIELDS = set(['description'])
+
+CONF = gosbs.conf.CONF
+
+
+def _dict_with_extra_specs(flavor_model):
+    extra_specs = {}
+    return dict(flavor_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _flavor_create(context, values):
+    db_flavor = models.Flavors()
+    db_flavor.update(values)
+
+    try:
+        db_flavor.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'flavorid' in e.columns:
+            raise exception.FlavorIdExists(flavor_id=values['flavorid'])
+        raise exception.FlavorExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_flavor)
+
+
+@db_api.main_context_manager.writer
+def _flavor_destroy(context, flavor_id=None, flavorid=None):
+    query = context.session.query(models.Flavors)
+
+    if flavor_id is not None:
+        query = query.filter(models.Flavors.id == flavor_id)
+    else:
+        query = query.filter(models.Flavors.flavorid == flavorid)
+    result = query.first()
+
+    if not result:
+        raise exception.FlavorNotFound(flavor_id=(flavor_id or flavorid))
+
+    return result
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Flavor(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+    # Version 1.1: Added save_projects(), save_extra_specs(), removed
+    #              remotable from save()
+    # Version 1.2: Added description field. Note: this field should not be
+    #              persisted with the embedded instance.flavor.
+    VERSION = '1.2'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'name': fields.StringField(nullable=True),
+        'ram': fields.IntegerField(),
+        'vcpus': fields.IntegerField(),
+        'disk': fields.IntegerField(),
+        'swap': fields.IntegerField(),
+        'description': fields.StringField(nullable=True)
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Flavor, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_projects = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Flavor, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+        if target_version < (1, 2) and 'description' in primitive:
+            del primitive['description']
+
+    @staticmethod
+    def _from_db_object(context, flavor, db_flavor, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        flavor._context = context
+        for name, field in flavor.fields.items():
+            if name in OPTIONAL_FIELDS:
+                continue
+            if name in DEPRECATED_FIELDS and name not in db_flavor:
+                continue
+            value = db_flavor[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            flavor[name] = value
+
+        # NOTE(danms): This is to support processing the API flavor
+        # model, which does not have these deprecated fields. When we
+        # remove compatibility with the old InstanceType model, we can
+        # remove this as well.
+        if any(f not in db_flavor for f in DEPRECATED_FIELDS):
+            flavor.deleted_at = None
+            flavor.deleted = False
+
+        flavor.obj_reset_changes()
+        return flavor
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _flavor_get_query_from_db(context):
+        query = context.session.query(models.Flavors)
+        return query
+
+    @staticmethod
+    @require_context
+    def _flavor_get_from_db(context, id):
+        """Returns a dict describing specific flavor."""
+        result = Flavor._flavor_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.FlavorNotFound(flavor_id=id)
+        return result
+
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Flavor, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Flavor, self).obj_what_changed()
+        return changes
+
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_flavor = cls._flavor_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_flavor,
+                                   expected_attrs=[])
+
+
+    @staticmethod
+    def _flavor_create(context, updates):
+        return _flavor_create(context, updates)
+
+    @base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_flavor = self._flavor_create(context, updates)
+        self._from_db_object(context, self, db_flavor)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a flavor.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_flavor = context.session.query(models.Flavors).\
+            filter_by(id=self.id).first()
+        if not db_flavor:
+            raise exception.FlavorNotFound(flavor_id=self.id)
+        db_flavor.update(values)
+        db_flavor.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_flavor)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _flavor_destroy(context, flavor_id=None, flavorid=None):
+        return _flavor_destroy(context, flavor_id=flavor_id, flavorid=flavorid)
+
+    @base.remotable
+    def destroy(self):
+        # NOTE(danms): Historically the only way to delete a flavor
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a flavor object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            db_flavor = self._flavor_destroy(self._context,
+                                             flavor_id=self.id)
+        else:
+            db_flavor = self._flavor_destroy(self._context,
+                                             flavorid=self.flavorid)
+        self._from_db_object(self._context, self, db_flavor)

diff --git a/gosbs/objects/image.py b/gosbs/objects/image.py
new file mode 100644
index 0000000..45d48eb
--- /dev/null
+++ b/gosbs/objects/image.py
@@ -0,0 +1,204 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+
+CONF = gosbs.conf.CONF
+
+
+def _dict_with_extra_specs(image_model):
+    extra_specs = {}
+    return dict(image_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _image_create(context, values):
+    db_image = models.Images()
+    db_image.update(values)
+
+    try:
+        db_image.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'imageid' in e.columns:
+            raise exception.ImagesIdExists(image_id=values['imageid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_image)
+
+
+@db_api.main_context_manager.writer
+def _image_destroy(context, image_id=None, imageid=None):
+    query = context.session.query(models.Images)
+
+    if image_id is not None:
+        query = query.filter(models.Images.id == image_id)
+    else:
+        query = query.filter(models.Images.imageid == imageid)
+    result = query.first()
+
+    if not result:
+        raise exception.ImagesNotFound(image_id=(image_id or imageid))
+
+    return result
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Image(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.UUIDField(),
+        'name': fields.StringField(),
+        'min_ram': fields.IntegerField(),
+        'min_disk': fields.IntegerField(),
+        'size': fields.IntegerField(),
+        'status': fields.StringField()
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Image, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_projects = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Image, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, image, db_image, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        image._context = context
+        for name, field in image.fields.items():
+            value = db_image[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            image[name] = value
+        
+        image.obj_reset_changes()
+        return image
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _image_get_query_from_db(context):
+        query = context.session.query(models.Images)
+        return query
+
+    @staticmethod
+    @require_context
+    def _image_get_from_db(context, id):
+        """Returns a dict describing specific image."""
+        result = Image._image_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(image_id=id)
+        return result
+
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Image, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Image, self).obj_what_changed()
+        return changes
+
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_image = cls._image_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_image,
+                                   expected_attrs=[])
+
+
+    @staticmethod
+    def _image_create(context, updates):
+        return _image_create(context, updates)
+
+    @base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_image = self._image_create(context, updates)
+        self._from_db_object(context, self, db_image)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a image.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_image = context.session.query(models.Images).\
+            filter_by(id=self.id).first()
+        if not db_image:
+            raise exception.ImagesNotFound(image_id=self.id)
+        db_image.update(values)
+        db_image.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_image)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _image_destroy(context, image_id=None, imageid=None):
+        return _image_destroy(context, image_id=image_id, imageid=imageid)
+
+    @base.remotable
+    def destroy(self):
+        # NOTE(danms): Historically the only way to delete a image
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a image object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            db_image = self._image_destroy(self._context,
+                                             image_id=self.id)
+        else:
+            db_image = self._image_destroy(self._context,
+                                             imageid=self.imageid)
+        self._from_db_object(self._context, self, db_image)

diff --git a/gosbs/objects/keyword.py b/gosbs/objects/keyword.py
new file mode 100644
index 0000000..4cf2e0c
--- /dev/null
+++ b/gosbs/objects/keyword.py
@@ -0,0 +1,277 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(keyword_model):
+    extra_specs = {}
+    return dict(keyword_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _keyword_create(context, values):
+    db_keyword = models.Keywords()
+    db_keyword.update(values)
+
+    try:
+        db_keyword.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'keywordid' in e.columns:
+            raise exception.ImagesIdExists(keyword_id=values['keywordid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_keyword)
+
+
+@db_api.main_context_manager.writer
+def _keyword_destroy(context, keyword_id=None, keywordid=None):
+    query = context.session.query(models.Keywords)
+
+    if keyword_id is not None:
+        query.filter(models.Keywords.uuid == keyword_id).delete()
+    else:
+        query.filter(models.Keywords.uuid == keywordid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Keyword(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject2):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'keyword': fields.StringField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Keyword, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_keywords = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Keyword, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, keyword, db_keyword, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        keyword._context = context
+        for name, field in keyword.fields.items():
+            value = db_keyword[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            keyword[name] = value
+        
+        keyword.obj_reset_changes()
+        return keyword
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _keyword_get_query_from_db(context):
+        query = context.session.query(models.Keywords)
+        return query
+
+    @staticmethod
+    @require_context
+    def _keyword_get_from_db(context, id):
+        """Returns a dict describing specific keywords."""
+        result = Keyword._keyword_get_query_from_db(context).\
+                        filter_by(uuid=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(keyword_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _keyword_get_by_keyword_from_db(context, keyword):
+        """Returns a dict describing specific flavor."""
+        result = Keyword._keyword_get_query_from_db(context).\
+                            filter_by(keyword=keyword).\
+                            first()
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Keyword, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Keyword, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_keyword = cls._keyword_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_keyword,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, keyword):
+        db_keyword = cls._keyword_get_by_keyword_from_db(context, keyword)
+        if not db_keyword:
+            return None
+        return cls._from_db_object(context, cls(context), db_keyword,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _keyword_create(context, updates):
+        return _keyword_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_keyword = self._keyword_create(context, updates)
+        self._from_db_object(context, self, db_keyword)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a keywords.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_keyword = context.session.query(models.Keywords).\
+            filter_by(uuid=self.id).first()
+        if not db_keyword:
+            raise exception.ImagesNotFound(keyword_id=self.id)
+        db_keyword.update(values)
+        db_keyword.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_keyword)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _keyword_destroy(context, keyword_id=None, keywordid=None):
+        _keyword_destroy(context, keyword_id=keyword_id, keywordid=keywordid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a keywords
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a keywords object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._keyword_destroy(context, keyword_id=self.id)
+        else:
+            self._keyword_destroy(context, keywordid=self.keywordid)
+        #self._from_db_object(context, self, db_keyword)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        print('foo')
+        db_keyword = Keyword._keyword_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_keyword = db_keyword.filter(
+                models.Keywords.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_keyword,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _keyword_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all keywordss.
+    """
+    filters = filters or {}
+
+    query = Keyword._keyword_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.Keywords.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Keyword._keyword_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Keywords,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class KeywordList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Keyword'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_keywords = _keyword_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.keyword.Keyword,
+                                  db_keywords,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Keywords).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_keyword = context.session.query(models.Keywords).filter_by(auto=True)
+        db_keyword.update(values)

diff --git a/gosbs/objects/package.py b/gosbs/objects/package.py
new file mode 100644
index 0000000..7f3ac6b
--- /dev/null
+++ b/gosbs/objects/package.py
@@ -0,0 +1,300 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+PACKAGE_STATUS = ['failed', 'completed', 'in-progress', 'waiting']
+
+def _dict_with_extra_specs(package_model):
+    extra_specs = {}
+    return dict(package_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _package_create(context, values):
+    db_package = models.Packages()
+    db_package.update(values)
+
+    try:
+        db_package.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'packageid' in e.columns:
+            raise exception.ImagesIdExists(package_id=values['packageid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_package)
+
+
+@db_api.main_context_manager.writer
+def _package_destroy(context, package_id=None, packageid=None):
+    query = context.session.query(models.Packages)
+
+    if package_id is not None:
+        query.filter(models.Packages.uuid == package_id).delete()
+    else:
+        query.filter(models.Packages.uuid == packageid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Package(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'uuid': fields.UUIDField(),
+        'name': fields.StringField(),
+        'status' : fields.EnumField(valid_values=PACKAGE_STATUS),
+        'category_uuid': fields.UUIDField(),
+        'repo_uuid': fields.UUIDField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Package, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_packages = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Package, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, package, db_package, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        package._context = context
+        for name, field in package.fields.items():
+            value = db_package[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            package[name] = value
+        
+        package.obj_reset_changes()
+        return package
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _package_get_query_from_db(context):
+        query = context.session.query(models.Packages)
+        return query
+
+    @staticmethod
+    @require_context
+    def _package_get_from_db(context, uuid):
+        """Returns a dict describing specific packages."""
+        result = Package._package_get_query_from_db(context).\
+                        filter_by(uuid=uuid).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(package_id=uuid)
+        return result
+
+    @staticmethod
+    @require_context
+    def _package_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = Package._package_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            return None
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Package, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Package, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_package = cls._package_get_from_db(context, uuid)
+        return cls._from_db_object(context, cls(context), db_package,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name, filters=None):
+        filters = filters or {}
+        db_package = Package._package_get_query_from_db(context)
+        db_package = db_package.filter_by(name=name)
+        if 'repo_uuid' in filters:
+            db_package = db_package.filter(
+                models.Packages.repo_uuid == filters['repo_uuid'])
+        if 'deleted' in filters:
+            db_package = db_package.filter(
+                models.Packages.deleted == filters['deleted'])
+        if 'category_uuid' in filters:
+            db_package = db_package.filter(
+                models.Packages.category_uuid == filters['category_uuid'])
+        db_package = db_package.first()
+        if not db_package:
+            return None
+        return cls._from_db_object(context, cls(context), db_package,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _package_create(context, updates):
+        return _package_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_package = self._package_create(context, updates)
+        self._from_db_object(context, self, db_package)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a packages.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_package = context.session.query(models.Packages).\
+            filter_by(uuid=self.uuid).first()
+        if not db_package:
+            raise exception.ImagesNotFound(package_id=self.uuid)
+        db_package.update(values)
+        db_package.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_package)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _package_destroy(context, package_id=None, packageid=None):
+        _package_destroy(context, package_id=package_id, packageid=packageid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a packages
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a packages object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._package_destroy(context, package_id=self.uuid)
+        else:
+            self._package_destroy(context, packageid=self.packageid)
+        #self._from_db_object(context, self, db_package)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_package = Package._package_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_package = db_package.filter(
+                models.Packages.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_package,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _package_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all packagess.
+    """
+    filters = filters or {}
+
+    query = Package._package_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.Packages.status == filters['status'])
+    if 'repo_uuid' in filters:
+            query = query.filter(
+                models.Packages.repo_uuid == filters['repo_uuid'])
+    if 'category_uuid' in filters:
+            query = query.filter(
+                models.Packages.category_uuid == filters['category_uuid'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Package._package_get_query_from_db(context).\
+                    filter_by(uuid=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Packages,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class PackageList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Package'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+        db_packages = _package_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.package.Package,
+                                  db_packages,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Packages).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_package = context.session.query(models.Packages).filter_by(auto=True)
+        db_package.update(values)

diff --git a/gosbs/objects/package_email.py b/gosbs/objects/package_email.py
new file mode 100644
index 0000000..c6a5f2d
--- /dev/null
+++ b/gosbs/objects/package_email.py
@@ -0,0 +1,301 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(package_model):
+    extra_specs = {}
+    return dict(package_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _package_create(context, values):
+    db_package = models.PackagesEmails()
+    db_package.update(values)
+
+    try:
+        db_package.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'packageid' in e.columns:
+            raise exception.ImagesIdExists(package_id=values['packageid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_package)
+
+
+@db_api.main_context_manager.writer
+def _package_destroy(context, package_id=None, packageid=None):
+    query = context.session.query(models.PackagesEmails)
+
+    if package_id is not None:
+        query.filter(models.PackagesEmails.id == package_id).delete()
+    else:
+        query.filter(models.PackagesEmails.id == packageid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class PackageEmail(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'package_uuid': fields.UUIDField(),
+        'email_id': fields.IntegerField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(PackageEmail, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_packages = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(PackageEmail, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, package, db_package, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        package._context = context
+        for name, field in package.fields.items():
+            value = db_package[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            package[name] = value
+        
+        package.obj_reset_changes()
+        return package
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _package_get_query_from_db(context):
+        query = context.session.query(models.PackagesEmails)
+        return query
+
+    @staticmethod
+    @require_context
+    def _package_get_from_db(context, id):
+        """Returns a dict describing specific packages."""
+        result = PackageEmail._package_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(package_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _email_id_get_from_db(context, email_id):
+        """Returns a dict describing specific packages."""
+        result = PackageEmail._package_get_query_from_db(context).\
+                        filter_by(email_id=email_id).\
+                        first()
+        return result
+
+    @staticmethod
+    @require_context
+    def _package_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = PackageEmail._package_get_query_from_db(context).\
+                            filter_by(name=email).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(packages_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(PackageEmail, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(PackageEmail, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id, filters=None):
+        db_package = cls._package_get_from_db(context, id)
+        if 'package_uuid' in filters:
+            db_package = db_package.filter(
+                models.PackagesEmails.package_uuid == filters['package_uuid'])
+        return cls._from_db_object(context, cls(context), db_package,
+                                   expected_attrs=[])
+
+    @base.remotable_classmethod
+    def get_by_email_id(cls, context, email_id, filters=None):
+        filters = filters or {}
+        db_package = PackageEmail._package_get_query_from_db(context)
+        db_package = db_package.filter_by(email_id=email_id)
+        if 'package_uuid' in filters:
+            db_package = db_package.filter(
+                models.PackagesEmails.package_uuid == filters['package_uuid'])
+        db_package = db_package.first()
+        if not db_package:
+            return None
+        return cls._from_db_object(context, cls(context), db_package,
+                                   expected_attrs=[])
+
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name, filters=None):
+        filters = filters or {}
+        db_package = cls._package_get_by_name_from_db(context, name)
+        if 'package_uuid' in filters:
+            db_package = db_package.filter(
+                models.PackagesEmails.package_uuid == filters['package_uuid'])
+        return cls._from_db_object(context, cls(context), db_package,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _package_create(context, updates):
+        return _package_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_package = self._package_create(context, updates)
+        self._from_db_object(context, self, db_package)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a packages.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_package = context.session.query(models.PackagesEmails).\
+            filter_by(id=self.id).first()
+        if not db_package:
+            raise exception.ImagesNotFound(package_id=self.id)
+        db_package.update(values)
+        db_package.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_package)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _package_destroy(context, package_id=None, packageid=None):
+        _package_destroy(context, package_id=package_id, packageid=packageid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a packages
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a packages object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._package_destroy(context, package_id=self.id)
+        else:
+            self._package_destroy(context, packageid=self.packageid)
+        #self._from_db_object(context, self, db_package)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_package = PackageEmail._package_get_query_from_db(context)
+
+        return cls._from_db_object(context, cls(context), db_package,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _package_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all packagess.
+    """
+    filters = filters or {}
+
+    query = PackageEmail._package_get_query_from_db(context)
+
+    marker_row = None
+    if marker is not None:
+        marker_row = PackageEmail._package_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.PackageEmails,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class PackageEmailList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('PackageEmail'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_packages = _package_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.package.PackageEmail,
+                                  db_packages,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.PackagesEmails).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_package = context.session.query(models.PackagesEmails).filter_by(auto=True)
+        db_package.update(values)

diff --git a/gosbs/objects/package_metadata.py b/gosbs/objects/package_metadata.py
new file mode 100644
index 0000000..5f6270d
--- /dev/null
+++ b/gosbs/objects/package_metadata.py
@@ -0,0 +1,279 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(package_metadata_model):
+    extra_specs = {}
+    return dict(package_metadata_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _package_metadata_create(context, values):
+    db_package_metadata = models.PackagesMetadata()
+    db_package_metadata.update(values)
+
+    try:
+        db_package_metadata.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'package_metadataid' in e.columns:
+            raise exception.ImagesIdExists(package_metadata_id=values['package_metadataid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_package_metadata)
+
+
+@db_api.main_context_manager.writer
+def _package_metadata_destroy(context, package_metadata_id=None, package_metadataid=None):
+    query = context.session.query(models.PackagesMetadata)
+
+    if package_metadata_id is not None:
+        query.filter(models.PackagesMetadata.id == package_metadata_id).delete()
+    else:
+        query.filter(models.PackagesMetadata.id == package_metadataid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class PackageMetadata(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'package_uuid': fields.UUIDField(),
+        'description' : fields.StringField(),
+        'gitlog' : fields.StringField(),
+        'checksum': fields.StringField(nullable=True),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(PackageMetadata, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_package_metadatas = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(PackageMetadata, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, package_metadata, db_package_metadata, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        package_metadata._context = context
+        for name, field in package_metadata.fields.items():
+            value = db_package_metadata[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            package_metadata[name] = value
+        
+        package_metadata.obj_reset_changes()
+        return package_metadata
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _package_metadata_get_query_from_db(context):
+        query = context.session.query(models.PackagesMetadata)
+        return query
+
+    @staticmethod
+    @require_context
+    def _package_metadata_get_from_db(context, uuid):
+        """Returns a dict describing specific package_metadatas."""
+        result = PackageMetadata._package_metadata_get_query_from_db(context).\
+                        filter_by(package_uuid=uuid).\
+                        first()
+        return result
+
+    @staticmethod
+    @require_context
+    def _package_metadata_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = PackageMetadata._package_metadata_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(package_metadatas_name=name)
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(PackageMetadata, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(PackageMetadata, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_package_metadata = cls._package_metadata_get_from_db(context, uuid)
+        if not db_package_metadata:
+            return None
+        return cls._from_db_object(context, cls(context), db_package_metadata,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_package_metadata = cls._package_metadata_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_package_metadata,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _package_metadata_create(context, updates):
+        return _package_metadata_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_package_metadata = self._package_metadata_create(context, updates)
+        self._from_db_object(context, self, db_package_metadata)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a package_metadatas.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_package_metadata = context.session.query(models.PackagesMetadata).\
+            filter_by(id=self.id).first()
+        if not db_package_metadata:
+            raise exception.ImagesNotFound(package_metadata_id=self.id)
+        db_package_metadata.update(values)
+        db_package_metadata.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_package_metadata)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _package_metadata_destroy(context, package_metadata_id=None, package_metadataid=None):
+        _package_metadata_destroy(context, package_metadata_id=package_metadata_id, package_metadataid=package_metadataid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a package_metadatas
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a package_metadatas object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._package_metadata_destroy(context, package_metadata_id=self.id)
+        else:
+            self._package_metadata_destroy(context, package_metadataid=self.package_metadataid)
+        #self._from_db_object(context, self, db_package_metadata)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_package_metadata = PackageMetadata._package_metadata_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_package_metadata = db_package_metadata.filter(
+                models.PackagesMetadata.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_package_metadata,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _package_metadata_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all package_metadatass.
+    """
+    filters = filters or {}
+
+    query = PackageMetadata._package_metadata_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.PackagesMetadata.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = PackageMetadata._package_metadata_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.PackagesMetadata,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class PackageMetadataList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('PackageMetadata'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_package_metadatas = _package_metadata_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.package_metadata.PackageMetadata,
+                                  db_package_metadatas,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.PackagesMetadata).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_package_metadata = context.session.query(models.PackagesMetadata).filter_by(auto=True)
+        db_package_metadata.update(values)

diff --git a/gosbs/objects/project.py b/gosbs/objects/project.py
new file mode 100644
index 0000000..1e1917f
--- /dev/null
+++ b/gosbs/objects/project.py
@@ -0,0 +1,279 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+PROJECT_STATUS = ['failed', 'completed', 'in-progress', 'waiting', 'stopped']
+
+def _dict_with_extra_specs(project_model):
+    extra_specs = {}
+    return dict(project_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _project_create(context, values):
+    db_project = models.Projects()
+    db_project.update(values)
+
+    try:
+        db_project.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'project_uuid' in e.columns:
+            raise exception.ImagesIdExists(project_uuid=values['projectuuid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_project)
+
+
+@db_api.main_context_manager.writer
+def _project_destroy(context, project_uuid=None, projectuuid=None):
+    query = context.session.query(models.Projects)
+
+    if project_id is not None:
+        query.filter(models.Projects.uuid == project_uuid).delete()
+    else:
+        query.filter(models.Projects.uuid == projectuuid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Project(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject2):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'uuid': fields.UUIDField(),
+        'name': fields.StringField(),
+        'active' : fields.BooleanField(),
+        'auto' : fields.BooleanField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Project, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_projects = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Project, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, project, db_project, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        project._context = context
+        for name, field in project.fields.items():
+            value = db_project[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            project[name] = value
+        
+        project.obj_reset_changes()
+        return project
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _project_get_query_from_db(context):
+        query = context.session.query(models.Projects)
+        return query
+
+    @staticmethod
+    @require_context
+    def _project_get_from_db(context, uuid):
+        """Returns a dict describing specific projects."""
+        result = Project._project_get_query_from_db(context).\
+                        filter_by(uuid=uuid).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(project_uuid=uuid)
+        return result
+
+    @staticmethod
+    @require_context
+    def _project_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = Project._project_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(projects_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Project, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Project, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_project = cls._project_get_from_db(context, uuid)
+        return cls._from_db_object(context, cls(context), db_project,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_project = cls._project_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_project,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _project_create(context, updates):
+        return _project_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_project = self._project_create(context, updates)
+        self._from_db_object(context, self, db_project)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a projects.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_project = context.session.query(models.Projects).\
+            filter_by(uuid=self.uuid).first()
+        if not db_project:
+            raise exception.ImagesNotFound(project_uuid=self.uuid)
+        db_project.update(values)
+        db_project.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_project)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _project_destroy(context, project_uuid=None, projectuuid=None):
+        _project_destroy(context, project_uuid=project_uuid, projectuuid=projectuuid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a projects
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a projects object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'uuid' in self:
+            self._project_destroy(context, project_uuid=self.uuid)
+        else:
+            self._project_destroy(context, projectuuid=self.projectuuid)
+        #self._from_db_object(context, self, db_project)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_project = Project._project_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_project = db_project.filter(
+                models.Projects.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_project,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _project_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all projectss.
+    """
+    filters = filters or {}
+
+    query = Project._project_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.Projects.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Project._project_get_query_from_db(context).\
+                    filter_by(uuid=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Projects,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class ProjectList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Project'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+        db_projects = _project_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.project.Project,
+                                  db_projects,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Projects).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_project = context.session.query(models.Projects).filter_by(auto=True)
+        db_project.update(values)

diff --git a/gosbs/objects/project_build.py b/gosbs/objects/project_build.py
new file mode 100644
index 0000000..e8f1885
--- /dev/null
+++ b/gosbs/objects/project_build.py
@@ -0,0 +1,286 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+BUILD_STATUS = ['failed', 'completed', 'in-progress', 'waiting']
+def _dict_with_extra_specs(project_build_model):
+    extra_specs = {}
+    return dict(project_build_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _project_build_create(context, values):
+    db_project_build = models.ProjectsBuilds()
+    db_project_build.update(values)
+
+    try:
+        db_project_build.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'project_buildid' in e.columns:
+            raise exception.ImagesIdExists(project_build_id=values['project_buildid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_project_build)
+
+
+@db_api.main_context_manager.writer
+def _project_build_destroy(context, project_build_uuid=None, project_builduuid=None):
+    query = context.session.query(models.ProjectsBuilds)
+
+    if project_build_id is not None:
+        query.filter(models.ProjectsBuilds.uuid == project_build_uuid).delete()
+    else:
+        query.filter(models.ProjectsBuilds.uuid == project_builduuid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class ProjectBuild(base.NovaObject, base.NovaObjectDictCompat, ):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'uuid': fields.UUIDField(),
+        'user_id': fields.IntegerField(),
+        'project_uuid': fields.UUIDField(),
+        'ebuild_uuid': fields.UUIDField(),
+        'status' : fields.EnumField(valid_values=BUILD_STATUS),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(ProjectBuild, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_project_builds = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(ProjectBuild, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, project_build, db_project_build, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        project_build._context = context
+        for name, field in project_build.fields.items():
+            value = db_project_build[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            project_build[name] = value
+        
+        project_build.obj_reset_changes()
+        return project_build
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _project_build_get_query_from_db(context):
+        query = context.session.query(models.ProjectsBuilds)
+        return query
+
+    @staticmethod
+    @require_context
+    def _project_build_get_from_db(context, uuid):
+        """Returns a dict describing specific project_builds."""
+        result = ProjectBuild._project_build_get_query_from_db(context).\
+                        filter_by(uuid=uuid).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(project_build_uuid=uuid)
+        return result
+
+    @staticmethod
+    @require_context
+    def _project_build_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = ProjectBuild._project_build_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(project_builds_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(ProjectBuild, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(ProjectBuild, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_project_build = cls._project_build_get_from_db(context, uuid)
+        return cls._from_db_object(context, cls(context), db_project_build,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_project_build = cls._project_build_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_project_build,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _project_build_create(context, updates):
+        return _project_build_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_project_build = self._project_build_create(context, updates)
+        self._from_db_object(context, self, db_project_build)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a project_builds.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_project_build = context.session.query(models.ProjectsBuilds).\
+            filter_by(id=self.id).first()
+        if not db_project_build:
+            raise exception.ImagesNotFound(project_build_id=self.id)
+        db_project_build.update(values)
+        db_project_build.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_project_build)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _project_build_destroy(context, project_build_id=None, project_buildid=None):
+        _project_build_destroy(context, project_build_id=project_build_id, project_buildid=project_buildid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a project_builds
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a project_builds object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._project_build_destroy(context, project_build_id=self.id)
+        else:
+            self._project_build_destroy(context, project_buildid=self.project_buildid)
+        #self._from_db_object(context, self, db_project_build)
+
+    @base.remotable_classmethod
+    def get_by_filters(cls, context, filters=None):
+        filters = filters or {}
+        db_project_build = ProjectBuild._project_build_get_query_from_db(context)
+    
+        if 'project_uuid' in filters:
+            db_project_build = db_project_build.filter(
+                models.ProjectsBuilds.project_uuid == filters['project_uuid'])
+        if 'repo_uuid' in filters:
+            db_project_build = db_project_build.filter(
+                models.ProjectsBuilds.repo_uuid == filters['repo_uuid'])
+        db_project_build = db_project_build.first()
+        if not db_project_build:
+            return None
+        return cls._from_db_object(context, cls(context), db_project_build,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _project_build_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all project_buildss.
+    """
+    filters = filters or {}
+
+    query = ProjectBuild._project_build_get_query_from_db(context)
+
+    if 'project_uuid' in filters:
+            query = query.filter(
+                models.ProjectsBuilds.project_uuid == filters['project_uuid'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = ProjectBuild._project_build_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.ProjectsBuilds,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class ProjectBuildList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('ProjectBuild'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_project_builds = _project_build_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.project_build.ProjectBuild,
+                                  db_project_builds,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.ProjectsBuilds).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_project_build = context.session.query(models.ProjectsBuilds).filter_by(auto=True)
+        db_project_build.update(values)

diff --git a/gosbs/objects/project_metadata.py b/gosbs/objects/project_metadata.py
new file mode 100644
index 0000000..4edbb55
--- /dev/null
+++ b/gosbs/objects/project_metadata.py
@@ -0,0 +1,308 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(projectmetadata_model):
+    extra_specs = {}
+    return dict(projectmetadata_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _projectmetadata_create(context, values):
+    db_projectmetadata = models.ProjectsMetadata()
+    db_projectmetadata.update(values)
+
+    try:
+        db_projectmetadata.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'projectmetadataid' in e.columns:
+            raise exception.ImagesIdExists(projectmetadata_id=values['projectmetadataid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_projectmetadata)
+
+
+@db_api.main_context_manager.writer
+def _projectmetadata_destroy(context, projectmetadata_id=None, projectmetadataid=None):
+    query = context.session.query(models.ProjectsMetadata)
+
+    if projectmetadata_id is not None:
+        query.filter(models.ProjectsMetadata.id == projectmetadata_id).delete()
+    else:
+        query.filter(models.ProjectsMetadata.id == projectmetadataid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class ProjectMetadata(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'project_uuid': fields.UUIDField(),
+        'titel': fields.StringField(),
+        'description' : fields.StringField(),
+        'project_repo_uuid': fields.UUIDField(),
+        'project_profile' : fields.StringField(),
+        'project_profile_repo_uuid': fields.UUIDField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(ProjectMetadata, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_projectmetadatas = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(ProjectMetadata, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, projectmetadata, db_projectmetadata, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        projectmetadata._context = context
+        for name, field in projectmetadata.fields.items():
+            value = db_projectmetadata[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            projectmetadata[name] = value
+        
+        projectmetadata.obj_reset_changes()
+        return projectmetadata
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _projectmetadata_get_query_from_db(context):
+        query = context.session.query(models.ProjectsMetadata)
+        return query
+
+    @staticmethod
+    @require_context
+    def _projectmetadata_get_from_db(context, id):
+        """Returns a dict describing specific projectmetadatas."""
+        result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(projectmetadata_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _projectmetadata_get_from_db(context, id):
+        """Returns a dict describing specific projectmetadatas."""
+        result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(projectmetadata_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _projectmetadata_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(projectmetadatas_name=name)
+        return _dict_with_extra_specs(result)
+
+    @staticmethod
+    @require_context
+    def _projectmetadata_get_by_uuid_from_db(context, uuid):
+        """Returns a dict describing specific flavor."""
+        result = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+                            filter_by(project_uuid=uuid).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(projectmetadatas_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(ProjectMetadata, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(ProjectMetadata, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_projectmetadata = cls._projectmetadata_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_projectmetadata,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_projectmetadata = cls._projectmetadata_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_projectmetadata,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_projectmetadata = cls._projectmetadata_get_by_uuid_from_db(context, uuid)
+        return cls._from_db_object(context, cls(context), db_projectmetadata,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _projectmetadata_create(context, updates):
+        return _projectmetadata_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_projectmetadata = self._projectmetadata_create(context, updates)
+        self._from_db_object(context, self, db_projectmetadata)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a projectmetadatas.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_projectmetadata = context.session.query(models.ProjectsMetadata).\
+            filter_by(id=self.id).first()
+        if not db_projectmetadata:
+            raise exception.ImagesNotFound(projectmetadata_id=self.id)
+        db_projectmetadata.update(values)
+        db_projectmetadata.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_projectmetadata)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _projectmetadata_destroy(context, projectmetadata_id=None, projectmetadataid=None):
+        _projectmetadata_destroy(context, projectmetadata_id=projectmetadata_id, projectmetadataid=projectmetadataid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a projectmetadatas
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a projectmetadatas object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._projectmetadata_destroy(context, projectmetadata_id=self.id)
+        else:
+            self._projectmetadata_destroy(context, projectmetadataid=self.projectmetadataid)
+        #self._from_db_object(context, self, db_projectmetadata)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_projectmetadata = ProjectMetadata._projectmetadata_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_projectmetadata = db_projectmetadata.filter(
+                models.ProjectsMetadata.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_projectmetadata,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _projectmetadata_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all projectmetadatass.
+    """
+    filters = filters or {}
+
+    query = ProjectMetadata._projectmetadata_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.ProjectsMetadata.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = ProjectMetadata._projectmetadata_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.ProjectsMetadata,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class ProjectMetadataList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('ProjectMetadata'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_projectmetadatas = _projectmetadata_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.projectmetadata.ProjectMetadata,
+                                  db_projectmetadatas,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.ProjectsMetadata).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_projectmetadata = context.session.query(models.ProjectsMetadata).filter_by(auto=True)
+        db_projectmetadata.update(values)

diff --git a/gosbs/objects/project_repo.py b/gosbs/objects/project_repo.py
new file mode 100644
index 0000000..85e458d
--- /dev/null
+++ b/gosbs/objects/project_repo.py
@@ -0,0 +1,295 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(project_repo_model):
+    extra_specs = {}
+    return dict(project_repo_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _project_repo_create(context, values):
+    db_project_repo = models.ProjectsRepos()
+    db_project_repo.update(values)
+
+    try:
+        db_project_repo.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'project_repoid' in e.columns:
+            raise exception.ImagesIdExists(project_repo_id=values['project_repoid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_project_repo)
+
+
+@db_api.main_context_manager.writer
+def _project_repo_destroy(context, project_repo_id=None, project_repoid=None):
+    query = context.session.query(models.ProjectsRepos)
+
+    if project_repo_id is not None:
+        query.filter(models.ProjectsRepos.uuid == project_repo_id).delete()
+    else:
+        query.filter(models.ProjectsRepos.uuid == project_repoid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class ProjectRepo(base.NovaObject, base.NovaObjectDictCompat, ):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'repo_uuid': fields.UUIDField(),
+        'project_uuid': fields.UUIDField(),
+        'auto' : fields.BooleanField(),
+        'build' : fields.BooleanField(),
+        'test' : fields.BooleanField(),
+        'qa' : fields.BooleanField(),
+        'depclean' : fields.BooleanField(),
+        'repoman' : fields.BooleanField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(ProjectRepo, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_project_repos = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(ProjectRepo, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, project_repo, db_project_repo, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        project_repo._context = context
+        for name, field in project_repo.fields.items():
+            value = db_project_repo[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            project_repo[name] = value
+        
+        project_repo.obj_reset_changes()
+        return project_repo
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _project_repo_get_query_from_db(context):
+        query = context.session.query(models.ProjectsRepos)
+        return query
+
+    @staticmethod
+    @require_context
+    def _project_repo_get_from_db(context, id):
+        """Returns a dict describing specific project_repos."""
+        result = ProjectRepo._project_repo_get_query_from_db(context).\
+                        filter_by(uuid=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(project_repo_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _project_repo_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = ProjectRepo._project_repo_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(project_repos_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(ProjectRepo, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(ProjectRepo, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_project_repo = cls._project_repo_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_project_repo,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_project_repo = cls._project_repo_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_project_repo,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _project_repo_create(context, updates):
+        return _project_repo_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_project_repo = self._project_repo_create(context, updates)
+        self._from_db_object(context, self, db_project_repo)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a project_repos.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_project_repo = context.session.query(models.ProjectsRepos).\
+            filter_by(uuid=self.id).first()
+        if not db_project_repo:
+            raise exception.ImagesNotFound(project_repo_id=self.id)
+        db_project_repo.update(values)
+        db_project_repo.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_project_repo)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _project_repo_destroy(context, project_repo_id=None, project_repoid=None):
+        _project_repo_destroy(context, project_repo_id=project_repo_id, project_repoid=project_repoid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a project_repos
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a project_repos object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._project_repo_destroy(context, project_repo_id=self.id)
+        else:
+            self._project_repo_destroy(context, project_repoid=self.project_repoid)
+        #self._from_db_object(context, self, db_project_repo)
+
+    @base.remotable_classmethod
+    def get_by_filters(cls, context, filters=None):
+        filters = filters or {}
+        db_project_repo = ProjectRepo._project_repo_get_query_from_db(context)
+    
+        if 'project_uuid' in filters:
+            db_project_repo = db_project_repo.filter(
+                models.ProjectsRepos.project_uuid == filters['project_uuid'])
+        if 'repo_uuid' in filters:
+            db_project_repo = db_project_repo.filter(
+                models.ProjectsRepos.repo_uuid == filters['repo_uuid'])
+        db_project_repo = db_project_repo.first()
+        if not db_project_repo:
+            return None
+        return cls._from_db_object(context, cls(context), db_project_repo,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _project_repo_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all project_reposs.
+    """
+    filters = filters or {}
+
+    query = ProjectRepo._project_repo_get_query_from_db(context)
+
+    if 'project_uuid' in filters:
+            query = query.filter(
+                models.ProjectsRepos.project_uuid == filters['project_uuid'])
+    if 'repo_uuid' in filters:
+            query = query.filter(
+                models.ProjectsRepos.repo_uuid == filters['repo_uuid'])
+    if 'build' in filters:
+            query = query.filter(
+                models.ProjectsRepos.build == filters['build'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = ProjectRepo._project_repo_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.ProjectsRepos,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class ProjectRepoList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('ProjectRepo'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_project_repos = _project_repo_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.project_repo.ProjectRepo,
+                                  db_project_repos,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.ProjectsRepos).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_project_repo = context.session.query(models.ProjectsRepos).filter_by(auto=True)
+        db_project_repo.update(values)

diff --git a/gosbs/objects/repo.py b/gosbs/objects/repo.py
new file mode 100644
index 0000000..747016d
--- /dev/null
+++ b/gosbs/objects/repo.py
@@ -0,0 +1,290 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+REPO_STATUSES = ['failed', 'completed', 'in-progress', 'waiting']
+REPO2_STATUSES = ['failed', 'completed', 'in-progress', 'waiting', 'db_rebuild']
+REPO_TYPE = ['ebuild', 'project']
+
+def _dict_with_extra_specs(repo_model):
+    extra_specs = {}
+    return dict(repo_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _repo_create(context, values):
+    db_repo = models.Repos()
+    db_repo.update(values)
+
+    try:
+        db_repo.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'repoid' in e.columns:
+            raise exception.ImagesIdExists(repo_id=values['repoid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_repo)
+
+
+@db_api.main_context_manager.writer
+def _repo_destroy(context, repo_id=None, repoid=None):
+    query = context.session.query(models.Repos)
+
+    if repo_id is not None:
+        query.filter(models.Repos.uuid == repo_uuid).delete()
+    else:
+        query.filter(models.Repos.repouuid == repouuid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Repo(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject2):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'uuid': fields.UUIDField(),
+        'name': fields.StringField(),
+        'status' : fields.EnumField(valid_values=REPO_STATUSES),
+        'description' : fields.StringField(),
+        'src_url': fields.StringField(),
+        'auto' : fields.BooleanField(),
+        'repo_type' : fields.EnumField(valid_values=REPO_TYPE),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Repo, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_projects = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Repo, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, repo, db_repo, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        repo._context = context
+        for name, field in repo.fields.items():
+            value = db_repo[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            repo[name] = value
+        
+        repo.obj_reset_changes()
+        return repo
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _repo_get_query_from_db(context):
+        query = context.session.query(models.Repos)
+        return query
+
+    @staticmethod
+    @require_context
+    def _repo_get_from_db(context, uuid):
+        """Returns a dict describing specific repos."""
+        result = Repo._repo_get_query_from_db(context).\
+                        filter_by(uuid=uuid).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(repo_id=uuid)
+        return result
+
+    @staticmethod
+    @require_context
+    def _repo_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = Repo._repo_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(repos_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Repo, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Repo, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_repo = cls._repo_get_from_db(context, uuid)
+        return cls._from_db_object(context, cls(context), db_repo,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_repo = cls._repo_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_repo,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _repo_create(context, updates):
+        return _repo_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_repo = self._repo_create(context, updates)
+        self._from_db_object(context, self, db_repo)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a repos.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_repo = context.session.query(models.Repos).\
+            filter_by(uuid=self.uuid).first()
+        if not db_repo:
+            raise exception.ImagesNotFound(repo_uuid=self.uuid)
+        db_repo.update(values)
+        db_repo.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_repo)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _repo_destroy(context, repo_uuid=None, repouuid=None):
+        _repo_destroy(context, repo_uuid=repo_uuid, repouuid=repouuid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a repos
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a repos object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'uuid' in self:
+            self._repo_destroy(context, repo_uuid=self.uuid)
+        else:
+            self._repo_destroy(context, repouuid=self.repouuid)
+        #self._from_db_object(context, self, db_repo)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        db_repo = Repo._repo_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_repo = db_repo.filter(
+                models.Repos.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_repo,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _repo_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all reposs.
+    """
+    filters = filters or {}
+
+    query = Repo._repo_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.Repos.status == filters['status'])
+    if 'repo_type' in filters:
+            query = query.filter(
+                models.Repos.repo_type == filters['repo_type'])
+    if 'deleted' in filters:
+            query = query.filter(
+                models.Repos.deleted == filters['deleted'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Repo._repo_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Repos,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class RepoList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Repo'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+        db_repos = _repo_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.repo.Repo,
+                                  db_repos,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Repos).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_repo = context.session.query(models.Repos).filter_by(auto=True)
+        db_repo.update(values)

diff --git a/gosbs/objects/restriction.py b/gosbs/objects/restriction.py
new file mode 100644
index 0000000..4aede29
--- /dev/null
+++ b/gosbs/objects/restriction.py
@@ -0,0 +1,281 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(restriction_model):
+    extra_specs = {}
+    return dict(restriction_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _restriction_create(context, values):
+    db_restriction = models.Restrictions()
+    db_restriction.update(values)
+
+    try:
+        db_restriction.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'restrictionid' in e.columns:
+            raise exception.ImagesIdExists(restriction_id=values['restrictionid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_restriction)
+
+
+@db_api.main_context_manager.writer
+def _restriction_destroy(context, restriction_id=None, restrictionid=None):
+    query = context.session.query(models.Restrictions)
+
+    if restriction_id is not None:
+        query.filter(models.Restrictions.id == restriction_id).delete()
+    else:
+        query.filter(models.Restrictions.id == restrictionid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Restriction(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'restriction': fields.StringField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Restriction, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_restrictions = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Restriction, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, restriction, db_restriction, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        restriction._context = context
+        for name, field in restriction.fields.items():
+            value = db_restriction[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            restriction[name] = value
+        
+        restriction.obj_reset_changes()
+        return restriction
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _restriction_get_query_from_db(context):
+        query = context.session.query(models.Restrictions)
+        return query
+
+    @staticmethod
+    @require_context
+    def _restriction_get_from_db(context, id):
+        """Returns a dict describing specific restrictions."""
+        result = Restriction._restriction_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(restriction_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _restriction_get_by_restriction_from_db(context, restriction):
+        """Returns a dict describing specific flavor."""
+        result = Restriction._restriction_get_query_from_db(context).\
+                            filter_by(restriction=restriction).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(restrictions_restriction=restriction)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Restriction, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Restriction, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_restriction = cls._restriction_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_restriction,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, restriction):
+        db_restriction = Restriction._restriction_get_query_from_db(context)
+        db_restriction = db_restriction.filter_by(restriction=restriction)
+        db_restriction = db_restriction.first()
+        if not db_restriction:
+            return None
+        return cls._from_db_object(context, cls(context), db_restriction,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _restriction_create(context, updates):
+        return _restriction_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_restriction = self._restriction_create(context, updates)
+        self._from_db_object(context, self, db_restriction)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a restrictions.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_restriction = context.session.query(models.Restrictions).\
+            filter_by(id=self.id).first()
+        if not db_restriction:
+            raise exception.ImagesNotFound(restriction_id=self.id)
+        db_restriction.update(values)
+        db_restriction.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_restriction)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _restriction_destroy(context, restriction_id=None, restrictionid=None):
+        _restriction_destroy(context, restriction_id=restriction_id, restrictionid=restrictionid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a restrictions
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a restrictions object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._restriction_destroy(context, restriction_id=self.id)
+        else:
+            self._restriction_destroy(context, restrictionid=self.restrictionid)
+        #self._from_db_object(context, self, db_restriction)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        print('foo')
+        db_restriction = Restriction._restriction_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_restriction = db_restriction.filter(
+                models.Restrictions.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_restriction,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _restriction_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all restrictionss.
+    """
+    filters = filters or {}
+
+    query = Restriction._restriction_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.Restrictions.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Restriction._restriction_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Restrictions,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class RestrictionList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Restriction'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_restrictions = _restriction_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.restriction.Restriction,
+                                  db_restrictions,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Restrictions).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_restriction = context.session.query(models.Restrictions).filter_by(auto=True)
+        db_restriction.update(values)

diff --git a/gosbs/objects/service.py b/gosbs/objects/service.py
new file mode 100644
index 0000000..1fc0c4f
--- /dev/null
+++ b/gosbs/objects/service.py
@@ -0,0 +1,486 @@
+#    Copyright 2013 IBM Corp.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/service.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_log import log as logging
+from oslo_utils import uuidutils
+from oslo_utils import versionutils
+
+from gosbs import context as gosbs_context
+from gosbs.db import api as db
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+
+LOG = logging.getLogger(__name__)
+
+
+# NOTE(danms): This is the global service version counter
+SERVICE_VERSION = 36
+
+
+# NOTE(danms): This is our SERVICE_VERSION history. The idea is that any
+# time we bump the version, we will put an entry here to record the change,
+# along with any pertinent data. For things that we can programatically
+# detect that need a bump, we put something in _collect_things() below to
+# assemble a dict of things we can check. For example, we pretty much always
+# want to consider the compute RPC API version a thing that requires a service
+# bump so that we can drive version pins from it. We could include other
+# service RPC versions at some point, minimum object versions, etc.
+#
+# The TestServiceVersion test will fail if the calculated set of
+# things differs from the value in the last item of the list below,
+# indicating that a version bump is needed.
+#
+# Also note that there are other reasons we may want to bump this,
+# which will not be caught by the test. An example of this would be
+# triggering (or disabling) an online data migration once all services
+# in the cluster are at the same level.
+#
+# If a version bump is required for something mechanical, just document
+# that generic thing here (like compute RPC version bumps). No need to
+# replicate the details from compute/rpcapi.py here. However, for more
+# complex service interactions, extra detail should be provided
+SERVICE_VERSION_HISTORY = (
+    # Version 0: Pre-history
+    {'scheduler_rpc': '1.0'},
+)
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+@base.NovaObjectRegistry.register
+class Service(base.NovaPersistentObject, base.NovaObject,
+              base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(read_only=True),
+        'uuid': fields.UUIDField(),
+        'host': fields.StringField(nullable=True),
+        'binary': fields.StringField(nullable=True),
+        'topic': fields.StringField(nullable=True),
+        'report_count': fields.IntegerField(),
+        'disabled': fields.BooleanField(),
+        'disabled_reason': fields.StringField(nullable=True),
+        #'availability_zone': fields.StringField(nullable=True),
+        #'compute_node': fields.ObjectField('ComputeNode'),
+        'last_seen_up': fields.DateTimeField(nullable=True),
+        'forced_down': fields.BooleanField(),
+        'version': fields.IntegerField(),
+    }
+
+    _MIN_VERSION_CACHE = {}
+    _SERVICE_VERSION_CACHING = False
+
+    def __init__(self, *args, **kwargs):
+        # NOTE(danms): We're going against the rules here and overriding
+        # init. The reason is that we want to *ensure* that we're always
+        # setting the current service version on our objects, overriding
+        # whatever else might be set in the database, or otherwise (which
+        # is the normal reason not to override init).
+        #
+        # We also need to do this here so that it's set on the client side
+        # all the time, such that create() and save() operations will
+        # include the current service version.
+        if 'version' in kwargs:
+            raise exception.ObjectActionError(
+                action='init',
+                reason='Version field is immutable')
+
+        super(Service, self).__init__(*args, **kwargs)
+        self.version = SERVICE_VERSION
+
+    def obj_make_compatible_from_manifest(self, primitive, target_version,
+                                          version_manifest):
+        super(Service, self).obj_make_compatible_from_manifest(
+            primitive, target_version, version_manifest)
+        _target_version = versionutils.convert_version_to_tuple(target_version)
+        if _target_version < (1, 21) and 'uuid' in primitive:
+            del primitive['uuid']
+        if _target_version < (1, 16) and 'version' in primitive:
+            del primitive['version']
+        if _target_version < (1, 14) and 'forced_down' in primitive:
+            del primitive['forced_down']
+        if _target_version < (1, 13) and 'last_seen_up' in primitive:
+            del primitive['last_seen_up']
+        if _target_version < (1, 10):
+            # service.compute_node was not lazy-loaded, we need to provide it
+            # when called
+            self._do_compute_node(self._context, primitive,
+                                  version_manifest)
+
+    def _do_compute_node(self, context, primitive, version_manifest):
+        try:
+            target_version = version_manifest['ComputeNode']
+            # NOTE(sbauza): Ironic deployments can have multiple
+            # nodes for the same service, but for keeping same behaviour,
+            # returning only the first elem of the list
+            compute = objects.ComputeNodeList.get_all_by_host(
+                context, primitive['host'])[0]
+        except Exception:
+            return
+        primitive['compute_node'] = compute.obj_to_primitive(
+            target_version=target_version,
+            version_manifest=version_manifest)
+
+    @staticmethod
+    def _from_db_object(context, service, db_service):
+        allow_missing = ('availability_zone',)
+        for key in service.fields:
+            if key in allow_missing and key not in db_service:
+                continue
+            if key == 'compute_node':
+                #  NOTE(sbauza); We want to only lazy-load compute_node
+                continue
+            elif key == 'version':
+                # NOTE(danms): Special handling of the version field, since
+                # it is read_only and set in our init.
+                setattr(service, base.get_attrname(key), db_service[key])
+            elif key == 'uuid' and not db_service.get(key):
+                # Leave uuid off the object if undefined in the database
+                # so that it will be generated below.
+                continue
+            else:
+                service[key] = db_service[key]
+
+        service._context = context
+        service.obj_reset_changes()
+
+        # TODO(dpeschman): Drop this once all services have uuids in database
+        if 'uuid' not in service:
+            service.uuid = uuidutils.generate_uuid()
+            LOG.debug('Generated UUID %(uuid)s for service %(id)i',
+                      dict(uuid=service.uuid, id=service.id))
+            service.save()
+
+        return service
+
+    def obj_load_attr(self, attrname):
+        if not self._context:
+            raise exception.OrphanedObjectError(method='obj_load_attr',
+                                                objtype=self.obj_name())
+
+        LOG.debug("Lazy-loading '%(attr)s' on %(name)s id %(id)s",
+                  {'attr': attrname,
+                   'name': self.obj_name(),
+                   'id': self.id,
+                   })
+        if attrname != 'compute_node':
+            raise exception.ObjectActionError(
+                action='obj_load_attr',
+                reason='attribute %s not lazy-loadable' % attrname)
+        if self.binary == 'nova-compute':
+            # Only n-cpu services have attached compute_node(s)
+            compute_nodes = objects.ComputeNodeList.get_all_by_host(
+                self._context, self.host)
+        else:
+            # NOTE(sbauza); Previous behaviour was raising a ServiceNotFound,
+            # we keep it for backwards compatibility
+            raise exception.ServiceNotFound(service_id=self.id)
+        # NOTE(sbauza): Ironic deployments can have multiple nodes
+        # for the same service, but for keeping same behaviour, returning only
+        # the first elem of the list
+        #self.compute_node = compute_nodes[0]
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, service_id):
+        db_service = db.service_get(context, service_id)
+        return cls._from_db_object(context, cls(), db_service)
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, service_uuid):
+        db_service = db.service_get_by_uuid(context, service_uuid)
+        return cls._from_db_object(context, cls(), db_service)
+
+    @base.remotable_classmethod
+    def get_by_host_and_topic(cls, context, host, topic):
+        db_service = db.service_get_by_host_and_topic(context, host, topic)
+        return cls._from_db_object(context, cls(), db_service)
+
+    @base.remotable_classmethod
+    def get_by_topic(cls, context, topic):
+        db_service = db.service_get_by_topic(context, topic)
+        return cls._from_db_object(context, cls(), db_service)
+
+    @base.remotable_classmethod
+    def get_by_host_and_binary(cls, context, host, binary):
+        try:
+            db_service = db.service_get_by_host_and_binary(context,
+                                                           host, binary)
+        except exception.HostBinaryNotFound:
+            return
+        return cls._from_db_object(context, cls(), db_service)
+
+    @staticmethod
+    @db.select_db_reader_mode
+    def _db_service_get_by_compute_host(context, host, use_slave=False):
+        return db.service_get_by_compute_host(context, host)
+
+    @base.remotable_classmethod
+    def get_by_compute_host(cls, context, host, use_slave=False):
+        db_service = cls._db_service_get_by_compute_host(context, host,
+                                                         use_slave=use_slave)
+        return cls._from_db_object(context, cls(), db_service)
+
+    # NOTE(ndipanov): This is deprecated and should be removed on the next
+    # major version bump
+    @base.remotable_classmethod
+    def get_by_args(cls, context, host, binary):
+        db_service = db.service_get_by_host_and_binary(context, host, binary)
+        return cls._from_db_object(context, cls(), db_service)
+
+    def _check_minimum_version(self):
+        """Enforce that we are not older that the minimum version.
+
+        This is a loose check to avoid creating or updating our service
+        record if we would do so with a version that is older that the current
+        minimum of all services. This could happen if we were started with
+        older code by accident, either due to a rollback or an old and
+        un-updated node suddenly coming back onto the network.
+
+        There is technically a race here between the check and the update,
+        but since the minimum version should always roll forward and never
+        backwards, we don't need to worry about doing it atomically. Further,
+        the consequence for getting this wrong is minor, in that we'll just
+        fail to send messages that other services understand.
+        """
+        if not self.obj_attr_is_set('version'):
+            return
+        if not self.obj_attr_is_set('binary'):
+            return
+        minver = self.get_minimum_version(self._context, self.binary)
+        if minver > self.version:
+            raise exception.ServiceTooOld(thisver=self.version,
+                                          minver=minver)
+
+    @base.remotable
+    def create(self):
+        if self.obj_attr_is_set('id'):
+            raise exception.ObjectActionError(action='create',
+                                              reason='already created')
+        self._check_minimum_version()
+        updates = self.obj_get_changes()
+
+        if 'uuid' not in updates:
+            updates['uuid'] = uuidutils.generate_uuid()
+            self.uuid = updates['uuid']
+
+        db_service = db.service_create(self._context, updates)
+        self._from_db_object(self._context, self, db_service)
+
+    @base.remotable
+    def save(self):
+        updates = self.obj_get_changes()
+        updates.pop('id', None)
+        self._check_minimum_version()
+        db_service = db.service_update(self._context, self.id, updates)
+        self._from_db_object(self._context, self, db_service)
+
+        #self._send_status_update_notification(updates)
+
+    def _send_status_update_notification(self, updates):
+        # Note(gibi): We do not trigger notification on version as that field
+        # is always dirty, which would cause that nova sends notification on
+        # every other field change. See the comment in save() too.
+        if set(updates.keys()).intersection(
+                {'disabled', 'disabled_reason', 'forced_down'}):
+            self._send_notification(fields.NotificationAction.UPDATE)
+
+    def _send_notification(self, action):
+        payload = service_notification.ServiceStatusPayload(self)
+        service_notification.ServiceStatusNotification(
+            publisher=notification.NotificationPublisher.from_service_obj(
+                self),
+            event_type=notification.EventType(
+                object='service',
+                action=action),
+            priority=fields.NotificationPriority.INFO,
+            payload=payload).emit(self._context)
+
+    @base.remotable
+    def destroy(self):
+        db.service_destroy(self._context, self.id)
+        #self._send_notification(fields.NotificationAction.DELETE)
+
+    @classmethod
+    def enable_min_version_cache(cls):
+        cls.clear_min_version_cache()
+        cls._SERVICE_VERSION_CACHING = True
+
+    @classmethod
+    def clear_min_version_cache(cls):
+        cls._MIN_VERSION_CACHE = {}
+
+    @staticmethod
+    @db.select_db_reader_mode
+    def _db_service_get_minimum_version(context, binaries, use_slave=False):
+        return db.service_get_minimum_version(context, binaries)
+
+    @base.remotable_classmethod
+    def get_minimum_version_multi(cls, context, binaries, use_slave=False):
+        if not all(binary.startswith('gosbs-') for binary in binaries):
+            LOG.warning('get_minimum_version called with likely-incorrect '
+                        'binaries `%s\'', ','.join(binaries))
+            raise exception.ObjectActionError(action='get_minimum_version',
+                                              reason='Invalid binary prefix')
+
+        if (not cls._SERVICE_VERSION_CACHING or
+              any(binary not in cls._MIN_VERSION_CACHE
+                  for binary in binaries)):
+            min_versions = cls._db_service_get_minimum_version(
+                context, binaries, use_slave=use_slave)
+            if min_versions:
+                min_versions = {binary: version or 0
+                                for binary, version in
+                                min_versions.items()}
+                cls._MIN_VERSION_CACHE.update(min_versions)
+        else:
+            min_versions = {binary: cls._MIN_VERSION_CACHE[binary]
+                            for binary in binaries}
+
+        if min_versions:
+            version = min(min_versions.values())
+        else:
+            version = 0
+        # NOTE(danms): Since our return value is not controlled by object
+        # schema, be explicit here.
+        version = int(version)
+
+        return version
+
+    @base.remotable_classmethod
+    def get_minimum_version(cls, context, binary, use_slave=False):
+        return cls.get_minimum_version_multi(context, [binary],
+                                             use_slave=use_slave)
+
+
+def get_minimum_version_all_cells(context, binaries, require_all=False):
+    """Get the minimum service version, checking all cells.
+
+    This attempts to calculate the minimum service version for a set
+    of binaries across all the cells in the system. If require_all
+    is False, then any cells that fail to report a version will be
+    ignored (assuming they won't be candidates for scheduling and thus
+    excluding them from the minimum version calculation is reasonable).
+    If require_all is True, then a failing cell will cause this to raise
+    exception.CellTimeout, as would be appropriate for gating some
+    data migration until everything is new enough.
+
+    Note that services that do not report a positive version are excluded
+    from this, as it crosses all cells which will naturally not have all
+    services.
+    """
+
+    if not all(binary.startswith('gosbs-') for binary in binaries):
+        LOG.warning('get_minimum_version_all_cells called with '
+                    'likely-incorrect binaries `%s\'', ','.join(binaries))
+        raise exception.ObjectActionError(
+            action='get_minimum_version_all_cells',
+            reason='Invalid binary prefix')
+
+    # NOTE(danms): Instead of using Service.get_minimum_version_multi(), we
+    # replicate the call directly to the underlying DB method here because
+    # we want to defeat the caching and we need to filter non-present
+    # services differently from the single-cell method.
+
+    results = nova_context.scatter_gather_all_cells(
+        context,
+        Service._db_service_get_minimum_version,
+        binaries)
+
+    min_version = None
+    for cell_uuid, result in results.items():
+        if result is nova_context.did_not_respond_sentinel:
+            LOG.warning('Cell %s did not respond when getting minimum '
+                        'service version', cell_uuid)
+            if require_all:
+                raise exception.CellTimeout()
+        elif isinstance(result, Exception):
+            LOG.warning('Failed to get minimum service version for cell %s',
+                        cell_uuid)
+            if require_all:
+                # NOTE(danms): Okay, this isn't necessarily a timeout, but
+                # it's functionally the same from the caller's perspective
+                # and we logged the fact that it was actually a failure
+                # for the forensic investigator during the scatter/gather
+                # routine.
+                raise exception.CellTimeout()
+        else:
+            # NOTE(danms): Don't consider a zero or None result as the minimum
+            # since we're crossing cells and will likely not have all the
+            # services being probed.
+            relevant_versions = [version for version in result.values()
+                                 if version]
+            if relevant_versions:
+                min_version_cell = min(relevant_versions)
+                min_version = (min(min_version, min_version_cell)
+                               if min_version else min_version_cell)
+
+    # NOTE(danms): If we got no matches at all (such as at first startup)
+    # then report that as zero to be consistent with the other such
+    # methods.
+    return min_version or 0
+
+
+@base.NovaObjectRegistry.register
+class ServiceList(base.ObjectListBase, base.NovaObject):
+    # Version 1.0: Initial version
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Service'),
+        }
+
+    @base.remotable_classmethod
+    def get_by_topic(cls, context, topic):
+        db_services = db.service_get_all_by_topic(context, topic)
+        return base.obj_make_list(context, cls(context), objects.Service,
+                                  db_services)
+
+    # NOTE(paul-carlton2): In v2.0 of the object the include_disabled flag
+    # will be removed so both enabled and disabled hosts are returned
+    @base.remotable_classmethod
+    def get_by_binary(cls, context, binary, include_disabled=False):
+        db_services = db.service_get_all_by_binary(
+            context, binary, include_disabled=include_disabled)
+        return base.obj_make_list(context, cls(context), objects.Service,
+                                  db_services)
+
+    @base.remotable_classmethod
+    def get_by_host(cls, context, host):
+        db_services = db.service_get_all_by_host(context, host)
+        return base.obj_make_list(context, cls(context), objects.Service,
+                                  db_services)
+
+    @base.remotable_classmethod
+    def get_all(cls, context, disabled=None, set_zones=False):
+        db_services = db.service_get_all(context, disabled=disabled)
+        if set_zones:
+            db_services = availability_zones.set_availability_zones(
+                context, db_services)
+        return base.obj_make_list(context, cls(context), objects.Service,
+                                  db_services)
+
+    @base.remotable_classmethod
+    def get_all_computes_by_hv_type(cls, context, hv_type):
+        db_services = db.service_get_all_computes_by_hv_type(
+            context, hv_type, include_disabled=False)
+        return base.obj_make_list(context, cls(context), objects.Service,
+                                  db_services)

diff --git a/gosbs/objects/service_repo.py b/gosbs/objects/service_repo.py
new file mode 100644
index 0000000..f4761aa
--- /dev/null
+++ b/gosbs/objects/service_repo.py
@@ -0,0 +1,296 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+REPO_STATUSES = ['failed', 'completed', 'in-progress', 'waiting', 'update_db', 'rebuild_db']
+
+def _dict_with_extra_specs(service_repo_model):
+    extra_specs = {}
+    return dict(service_repo_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _service_repo_create(context, values):
+    db_service_repo = models.ServicesRepos()
+    db_service_repo.update(values)
+
+    try:
+        db_service_repo.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'service_repoid' in e.columns:
+            raise exception.ImagesIdExists(service_repo_id=values['service_repoid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_service_repo)
+
+
+@db_api.main_context_manager.writer
+def _service_repo_destroy(context, service_repo_id=None, service_repoid=None):
+    query = context.session.query(models.ServicesRepos)
+
+    if service_repo_id is not None:
+        query.filter(models.ServicesRepos.id == service_repo_id).delete()
+    else:
+        query.filter(models.ServicesRepos.id == service_repoid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class ServiceRepo(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject2,):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(read_only=True),
+        'repo_uuid': fields.UUIDField(),
+        'service_uuid': fields.UUIDField(),
+        'auto' : fields.BooleanField(),
+        'status' : fields.EnumField(valid_values=REPO_STATUSES),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(ServiceRepo, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_service_repos = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(ServiceRepo, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, service_repo, db_service_repo, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        service_repo._context = context
+        for name, field in service_repo.fields.items():
+            value = db_service_repo[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            service_repo[name] = value
+        
+        service_repo.obj_reset_changes()
+        return service_repo
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _service_repo_get_query_from_db(context):
+        query = context.session.query(models.ServicesRepos)
+        return query
+
+    @staticmethod
+    @require_context
+    def _service_repo_get_from_db(context, id):
+        """Returns a dict describing specific service_repos."""
+        result = ServiceRepo._service_repo_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(service_repo_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _service_repo_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = ServiceRepo._service_repo_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(service_repos_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(ServiceRepo, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(ServiceRepo, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_service_repo = cls._service_repo_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_service_repo,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_service_repo = cls._service_repo_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_service_repo,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _service_repo_create(context, updates):
+        return _service_repo_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_service_repo = self._service_repo_create(context, updates)
+        self._from_db_object(context, self, db_service_repo)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a service_repos.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_service_repo = context.session.query(models.ServicesRepos).\
+            filter_by(id=self.id).first()
+        if not db_service_repo:
+            raise exception.ImagesNotFound(service_repo_id=self.id)
+        db_service_repo.update(values)
+        db_service_repo.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_service_repo)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _service_repo_destroy(context, service_repo_id=None, service_repoid=None):
+        _service_repo_destroy(context, service_repo_id=service_repo_id, service_repoid=service_repoid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a service_repos
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a service_repos object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._service_repo_destroy(context, service_repo_id=self.id)
+        else:
+            self._service_repo_destroy(context, service_repoid=self.service_repoid)
+        #self._from_db_object(context, self, db_service_repo)
+
+    @base.remotable_classmethod
+    def get_by_filters(cls, context, filters=None):
+        filters = filters or {}
+        db_service_repo = ServiceRepo._service_repo_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_service_repo = db_service_repo.filter(
+                models.ServicesRepos.status == filters['status'])
+        if 'repo_uuid' in filters:
+            db_service_repo = db_service_repo.filter(
+                models.ServicesRepos.repo_uuid == filters['repo_uuid'])
+        if 'service_uuid' in filters:
+            db_service_repo = db_service_repo.filter(
+                models.ServicesRepos.service_uuid == filters['service_uuid'])
+        db_service_repo = db_service_repo.first()
+        if not db_service_repo:
+            return None
+        return cls._from_db_object(context, cls(context), db_service_repo,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _service_repo_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all service_reposs.
+    """
+    filters = filters or {}
+
+    query = ServiceRepo._service_repo_get_query_from_db(context)
+
+    if 'service_uuid' in filters:
+            query = query.filter(
+                models.ServicesRepos.service_uuid == filters['service_uuid'])
+    if 'status' in filters:
+            query = query.filter(
+                models.ServicesRepos.status == filters['status'])
+    if 'repo_uuid' in filters:
+            query = query.filter(
+                models.ServicesRepos.repo_uuid == filters['repo_uuid'])
+    if not query:
+        return None
+    marker_row = None
+    if marker is not None:
+        marker_row = ServiceRepo._service_repo_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.ServicesRepos,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class ServiceRepoList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('ServiceRepo'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_service_repos = _service_repo_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.service_repo.ServiceRepo,
+                                  db_service_repos,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.ServicesRepos).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_service_repo = context.session.query(models.ServicesRepos).filter_by(auto=True)
+        db_service_repo.update(values)

diff --git a/gosbs/objects/task.py b/gosbs/objects/task.py
new file mode 100644
index 0000000..83f9c5c
--- /dev/null
+++ b/gosbs/objects/task.py
@@ -0,0 +1,291 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+TASK_STATUSES = ['failed', 'completed', 'in-progress', 'waiting']
+
+def _dict_with_extra_specs(task_model):
+    extra_specs = {}
+    return dict(task_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _task_create(context, values):
+    db_task = models.Tasks()
+    db_task.update(values)
+
+    try:
+        db_task.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'taskid' in e.columns:
+            raise exception.ImagesIdExists(task_id=values['taskid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return _dict_with_extra_specs(db_task)
+
+
+@db_api.main_context_manager.writer
+def _task_destroy(context, task_id=None, taskid=None):
+    query = context.session.query(models.Tasks)
+
+    if task_id is not None:
+        query.filter(models.Tasks.id == task_id).delete()
+    else:
+        query.filter(models.Tasks.taskid == taskid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Task(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'uuid': fields.UUIDField(),
+        'name': fields.StringField(),
+        'service_uuid': fields.UUIDField(),
+        'repet': fields.BooleanField(),
+        'run': fields.DateTimeField(nullable=True),
+        'status' : fields.EnumField(valid_values=TASK_STATUSES),
+        'last' : fields.DateTimeField(nullable=True),
+        'priority' : fields.IntegerField(default=5),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Task, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_projects = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Task, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, task, db_task, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        task._context = context
+        for name, field in task.fields.items():
+            value = db_task[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            task[name] = value
+        
+        task.obj_reset_changes()
+        return task
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _task_get_query_from_db(context):
+        query = context.session.query(models.Tasks)
+        return query
+
+    @staticmethod
+    @require_context
+    def _task_get_from_db(context, uuid):
+        """Returns a dict describing specific tasks."""
+        result = Task._task_get_query_from_db(context).\
+                        filter_by(uuid=uuid).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(task_id=uuid)
+        return result
+
+    @staticmethod
+    @require_context
+    def _task_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = Task._task_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        if not result:
+            raise exception.FlavorNotFoundByName(tasks_name=name)
+        return _dict_with_extra_specs(result)
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Task, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Task, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_uuid(cls, context, uuid):
+        db_task = cls._task_get_from_db(context, uuid)
+        return cls._from_db_object(context, cls(context), db_task,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_task = cls._task_get_by_name_from_db(context, name)
+        return cls._from_db_object(context, cls(context), db_task,
+                                   expected_attrs=[])
+
+    @base.remotable_classmethod
+    def get_by_server_uuid(cls, context, service_uuid, filters=None):
+        filters = filters or {}
+        db_task = Task._task_get_query_from_db(context)
+        
+        db_task = db_task.filter_by(service_uuid=service_uuid)
+        if 'repet' in filters:
+            db_task = db_task.filter(
+                models.Tasks.repet == filters['repet'])
+        if 'name' in filters:
+            db_task = db_task.filter(
+                models.Tasks.name == filters['name'])
+        db_task = db_task.first()
+        if not db_task:
+            return None
+        return cls._from_db_object(context, cls(context), db_task,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _task_create(context, updates):
+        return _task_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_task = self._task_create(context, updates)
+        self._from_db_object(context, self, db_task)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a tasks.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_task = context.session.query(models.Tasks).\
+            filter_by(uuid=self.uuid).first()
+        if not db_task:
+            raise exception.ImagesNotFound(task_id=self.uuid)
+        db_task.update(values)
+        db_task.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_task)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _task_destroy(context, task_id=None, taskid=None):
+        _task_destroy(context, task_id=task_id, taskid=taskid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a tasks
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a tasks object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._task_destroy(context, task_id=self.uuid)
+        else:
+            self._task_destroy(context, taskid=self.taskid)
+        #self._from_db_object(context, self, db_task)
+
+@db_api.main_context_manager
+def _task_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all taskss.
+    """
+    filters = filters or {}
+
+    query = Task._task_get_query_from_db(context)
+
+    if 'name' in filters:
+        query = query.filter(
+                models.Tasks.name == filters['name'])
+
+    if 'service_uuid' in filters:
+        query = query.filter(
+                models.Tasks.service_uuid == filters['service_uuid'])
+
+    if 'status' in filters:
+        query = query.filter(
+                models.Tasks.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Task._task_get_query_from_db(context).\
+                    filter_by(uuid=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Tasks,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class TaskList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Task'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='uuid', sort_dir='asc', limit=None, marker=None):
+        db_tasks = _task_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.task.Task,
+                                  db_tasks,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Tasks).delete()

diff --git a/gosbs/objects/use.py b/gosbs/objects/use.py
new file mode 100644
index 0000000..dd71073
--- /dev/null
+++ b/gosbs/objects/use.py
@@ -0,0 +1,278 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(use_model):
+    extra_specs = {}
+    return dict(use_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _use_create(context, values):
+    db_use = models.Uses()
+    db_use.update(values)
+
+    try:
+        db_use.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'useid' in e.columns:
+            raise exception.ImagesIdExists(use_id=values['useid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return db_use
+
+
+@db_api.main_context_manager.writer
+def _use_destroy(context, use_id=None, useid=None):
+    query = context.session.query(models.Uses)
+
+    if use_id is not None:
+        query.filter(models.Uses.uuid == use_id).delete()
+    else:
+        query.filter(models.Uses.uuid == useid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class Use(base.NovaObject, base.NovaObjectDictCompat, base.NovaPersistentObject):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'flag': fields.StringField(),
+        'description' : fields.StringField(nullable=True),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(Use, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_uses = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(Use, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, use, db_use, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        use._context = context
+        for name, field in use.fields.items():
+            value = db_use[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            use[name] = value
+        
+        use.obj_reset_changes()
+        return use
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _use_get_query_from_db(context):
+        query = context.session.query(models.Uses)
+        return query
+
+    @staticmethod
+    @require_context
+    def _use_get_from_db(context, id):
+        """Returns a dict describing specific uses."""
+        result = Use._use_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(use_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _use_get_by_flag_from_db(context, flag):
+        """Returns a dict describing specific flavor."""
+        result = Use._use_get_query_from_db(context).\
+                            filter_by(flag=flag).\
+                            first()
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(Use, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(Use, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_use = cls._use_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_use,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, flag):
+        db_use = cls._use_get_by_flag_from_db(context, flag)
+        if not db_use:
+            return None
+        return cls._from_db_object(context, cls(context), db_use,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _use_create(context, updates):
+        return _use_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_use = self._use_create(context, updates)
+        self._from_db_object(context, self, db_use)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a uses.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_use = context.session.query(models.Uses).\
+            filter_by(uuid=self.id).first()
+        if not db_use:
+            raise exception.ImagesNotFound(use_id=self.id)
+        db_use.update(values)
+        db_use.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_use)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _use_destroy(context, use_id=None, useid=None):
+        _use_destroy(context, use_id=use_id, useid=useid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a uses
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a uses object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._use_destroy(context, use_id=self.id)
+        else:
+            self._use_destroy(context, useid=self.useid)
+        #self._from_db_object(context, self, db_use)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        print('foo')
+        db_use = Use._use_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_use = db_use.filter(
+                models.Uses.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_use,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _use_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all usess.
+    """
+    filters = filters or {}
+
+    query = Use._use_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.Uses.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = Use._use_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Uses,
+                                           limit,
+                                           [sort_key, 'uuid'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class UseList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('Use'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_uses = _use_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.use.Use,
+                                  db_uses,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Uses).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_use = context.session.query(models.Uses).filter_by(auto=True)
+        db_use.update(values)

diff --git a/gosbs/objects/user.py b/gosbs/objects/user.py
new file mode 100644
index 0000000..f4d6783
--- /dev/null
+++ b/gosbs/objects/user.py
@@ -0,0 +1,278 @@
+#    Copyright 2013 Red Hat, Inc
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not user this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/objects/flavor.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+from oslo_db import exception as db_exc
+from oslo_db.sqlalchemy import utils as sqlalchemyutils
+from oslo_utils import versionutils
+from sqlalchemy import or_
+from sqlalchemy.orm import joinedload
+from sqlalchemy.sql.expression import asc
+from sqlalchemy.sql import true
+
+import gosbs.conf
+from gosbs.db.sqlalchemy import api as db_api
+from gosbs.db.sqlalchemy.api import require_context
+from gosbs.db.sqlalchemy import models
+from gosbs import exception
+from gosbs import objects
+from gosbs.objects import base
+from gosbs.objects import fields
+
+CONF = gosbs.conf.CONF
+
+def _dict_with_extra_specs(userr_model):
+    extra_specs = {}
+    return dict(userr_model, extra_specs=extra_specs)
+
+
+@db_api.main_context_manager.writer
+def _userr_create(context, values):
+    db_userr = models.Users()
+    db_userr.update(values)
+
+    try:
+        db_userr.save(context.session)
+    except db_exc.DBDuplicateEntry as e:
+        if 'userrid' in e.columns:
+            raise exception.ImagesIdExists(userr_id=values['userrid'])
+        raise exception.ImagesExists(name=values['name'])
+    except Exception as e:
+        raise db_exc.DBError(e)
+
+    return db_userr
+
+
+@db_api.main_context_manager.writer
+def _userr_destroy(context, userr_id=None, userrid=None):
+    query = context.session.query(models.Users)
+
+    if user_id is not None:
+        query.filter(models.Users.uuid == user_id).delete()
+    else:
+        query.filter(models.Users.uuid == userid).delete()
+
+
+# TODO(berrange): Remove NovaObjectDictCompat
+# TODO(mriedem): Remove NovaPersistentObject in version 2.0
+@base.NovaObjectRegistry.register
+class User(base.NovaObject, base.NovaObjectDictCompat):
+    # Version 1.0: Initial version
+
+    VERSION = '1.0'
+
+    fields = {
+        'id': fields.IntegerField(),
+        'user_id': fields.IntegerField(),
+        'name': fields.StringField(),
+        }
+
+    def __init__(self, *args, **kwargs):
+        super(User, self).__init__(*args, **kwargs)
+        self._orig_extra_specs = {}
+        self._orig_users = []
+
+    def obj_make_compatible(self, primitive, target_version):
+        super(User, self).obj_make_compatible(primitive, target_version)
+        target_version = versionutils.convert_version_to_tuple(target_version)
+
+
+    @staticmethod
+    def _from_db_object(context, user, db_user, expected_attrs=None):
+        if expected_attrs is None:
+            expected_attrs = []
+        user._context = context
+        for name, field in user.fields.items():
+            value = db_user[name]
+            if isinstance(field, fields.IntegerField):
+                value = value if value is not None else 0
+            user[name] = value
+        
+        user.obj_reset_changes()
+        return user
+
+    @staticmethod
+    @db_api.main_context_manager.reader
+    def _user_get_query_from_db(context):
+        query = context.session.query(models.Users)
+        return query
+
+    @staticmethod
+    @require_context
+    def _user_get_from_db(context, id):
+        """Returns a dict describing specific users."""
+        result = User._user_get_query_from_db(context).\
+                        filter_by(id=id).\
+                        first()
+        if not result:
+            raise exception.ImagesNotFound(user_id=id)
+        return result
+
+    @staticmethod
+    @require_context
+    def _user_get_by_name_from_db(context, name):
+        """Returns a dict describing specific flavor."""
+        result = User._user_get_query_from_db(context).\
+                            filter_by(name=name).\
+                            first()
+        return result
+
+    def obj_reset_changes(self, fields=None, recursive=False):
+        super(User, self).obj_reset_changes(fields=fields,
+                recursive=recursive)
+
+    def obj_what_changed(self):
+        changes = super(User, self).obj_what_changed()
+        return changes
+
+    @base.remotable_classmethod
+    def get_by_id(cls, context, id):
+        db_user = cls._user_get_from_db(context, id)
+        return cls._from_db_object(context, cls(context), db_user,
+                                   expected_attrs=[])
+    @base.remotable_classmethod
+    def get_by_name(cls, context, name):
+        db_user = cls._user_get_by_name_from_db(context, name)
+        if not db_user:
+            return None
+        return cls._from_db_object(context, cls(context), db_user,
+                                   expected_attrs=[])
+
+    @staticmethod
+    def _user_create(context, updates):
+        return _user_create(context, updates)
+
+    #@base.remotable
+    def create(self, context):
+        #if self.obj_attr_is_set('id'):
+        #    raise exception.ObjectActionError(action='create',
+        #reason='already created')
+        updates = self.obj_get_changes()
+        db_user = self._user_create(context, updates)
+        self._from_db_object(context, self, db_user)
+
+
+    # NOTE(mriedem): This method is not remotable since we only expect the API
+    # to be able to make updates to a users.
+    @db_api.main_context_manager.writer
+    def _save(self, context, values):
+        db_user = context.session.query(models.Users).\
+            filter_by(id=self.id).first()
+        if not db_user:
+            raise exception.ImagesNotFound(user_id=self.id)
+        db_user.update(values)
+        db_user.save(context.session)
+        # Refresh ourselves from the DB object so we get the new updated_at.
+        self._from_db_object(context, self, db_user)
+        self.obj_reset_changes()
+
+    def save(self, context):
+        updates = self.obj_get_changes()
+        if updates:
+            self._save(context, updates)
+
+    @staticmethod
+    def _user_destroy(context, user_id=None, userid=None):
+        _user_destroy(context, user_id=user_id, userid=userid)
+
+    #@base.remotable
+    def destroy(self, context):
+        # NOTE(danms): Historically the only way to delete a users
+        # is via name, which is not very precise. We need to be able to
+        # support the light construction of a users object and subsequent
+        # delete request with only our name filled out. However, if we have
+        # our id property, we should instead delete with that since it's
+        # far more specific.
+        if 'id' in self:
+            self._user_destroy(context, user_id=self.id)
+        else:
+            self._user_destroy(context, userid=self.userid)
+        #self._from_db_object(context, self, db_user)
+
+    @base.remotable_classmethod
+    def get_by_filters_first(cls, context, filters=None):
+        filters = filters or {}
+        print('foo')
+        db_user = User._user_get_query_from_db(context)
+    
+        if 'status' in filters:
+            db_user = db_user.filter(
+                models.Users.status == filters['status']).first()
+        return cls._from_db_object(context, cls(context), db_user,
+                                   expected_attrs=[])
+
+
+@db_api.main_context_manager
+def _user_get_all_from_db(context, inactive, filters, sort_key, sort_dir,
+                            limit, marker):
+    """Returns all userss.
+    """
+    filters = filters or {}
+
+    query = User._user_get_query_from_db(context)
+
+    if 'status' in filters:
+            query = query.filter(
+                models.Users.status == filters['status'])
+
+    marker_row = None
+    if marker is not None:
+        marker_row = User._user_get_query_from_db(context).\
+                    filter_by(id=marker).\
+                    first()
+        if not marker_row:
+            raise exception.MarkerNotFound(marker=marker)
+
+    query = sqlalchemyutils.paginate_query(query, models.Users,
+                                           limit,
+                                           [sort_key, 'id'],
+                                           marker=marker_row,
+                                           sort_dir=sort_dir)
+    return [_dict_with_extra_specs(i) for i in query.all()]
+
+
+@base.NovaObjectRegistry.register
+class UseList(base.ObjectListBase, base.NovaObject):
+    VERSION = '1.0'
+
+    fields = {
+        'objects': fields.ListOfObjectsField('User'),
+        }
+
+    @base.remotable_classmethod
+    def get_all(cls, context, inactive=False, filters=None,
+                sort_key='id', sort_dir='asc', limit=None, marker=None):
+        db_users = _user_get_all_from_db(context,
+                                                 inactive=inactive,
+                                                 filters=filters,
+                                                 sort_key=sort_key,
+                                                 sort_dir=sort_dir,
+                                                 limit=limit,
+                                                 marker=marker)
+        return base.obj_make_list(context, cls(context), objects.user.Use,
+                                  db_users,
+                                  expected_attrs=[])
+
+    @db_api.main_context_manager.writer
+    def destroy_all(context):
+        context.session.query(models.Users).delete()
+
+    @db_api.main_context_manager.writer
+    def update_all(context):
+        values = {'status': 'waiting', }
+        db_user = context.session.query(models.Users).filter_by(auto=True)
+        db_user.update(values)

diff --git a/gosbs/policies/__init__.py b/gosbs/policies/__init__.py
new file mode 100644
index 0000000..41a3820
--- /dev/null
+++ b/gosbs/policies/__init__.py
@@ -0,0 +1,26 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/policies/__init__.py
+
+import itertools
+
+from gosbs.policies import base
+from gosbs.policies import hosts
+from gosbs.policies import services
+
+def list_rules():
+    return itertools.chain(
+        base.list_rules(),
+        hosts.list_rules(),
+        services.list_rules(),
+    )

diff --git a/gosbs/policies/base.py b/gosbs/policies/base.py
new file mode 100644
index 0000000..b6a009f
--- /dev/null
+++ b/gosbs/policies/base.py
@@ -0,0 +1,41 @@
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_policy import policy
+
+COMPUTE_API = 'os_compute_api'
+
+RULE_ADMIN_OR_OWNER = 'rule:admin_or_owner'
+RULE_ADMIN_API = 'rule:admin_api'
+RULE_ANY = '@'
+
+# NOTE(johngarbutt) The base rules here affect so many APIs the list
+# of related API operations has not been populated. It would be
+# crazy hard to manually maintain such a list.
+rules = [
+    policy.RuleDefault(
+        "context_is_admin",
+        "role:admin",
+        "Decides what is required for the 'is_admin:True' check to succeed."),
+    policy.RuleDefault(
+        "admin_or_owner",
+        "is_admin:True or project_id:%(project_id)s",
+        "Default rule for most non-Admin APIs."),
+    policy.RuleDefault(
+        "admin_api",
+        "is_admin:True",
+        "Default rule for most Admin APIs.")
+]
+
+
+def list_rules():
+    return rules

diff --git a/gosbs/policies/hosts.py b/gosbs/policies/hosts.py
new file mode 100644
index 0000000..e39ba9d
--- /dev/null
+++ b/gosbs/policies/hosts.py
@@ -0,0 +1,63 @@
+# Copyright 2016 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/policies/hosts.py
+
+from oslo_policy import policy
+
+from gosbs.policies import base
+
+
+BASE_POLICY_NAME = 'os_compute_api:os-hosts'
+
+
+hosts_policies = [
+    policy.DocumentedRuleDefault(
+        BASE_POLICY_NAME,
+        base.RULE_ADMIN_API,
+        """List, show and manage physical hosts.
+
+These APIs are all deprecated in favor of os-hypervisors and os-services.""",
+        [
+            {
+                'method': 'GET',
+                'path': '/os-hosts'
+            },
+            {
+                'method': 'GET',
+                'path': '/os-hosts/{host_name}'
+            },
+            {
+                'method': 'PUT',
+                'path': '/os-hosts/{host_name}'
+            },
+            {
+                'method': 'GET',
+                'path': '/os-hosts/{host_name}/reboot'
+            },
+            {
+                'method': 'GET',
+                'path': '/os-hosts/{host_name}/shutdown'
+            },
+            {
+                'method': 'GET',
+                'path': '/os-hosts/{host_name}/startup'
+            }
+        ]),
+]
+
+
+def list_rules():
+    return hosts_policies

diff --git a/gosbs/policies/services.py b/gosbs/policies/services.py
new file mode 100644
index 0000000..5985865
--- /dev/null
+++ b/gosbs/policies/services.py
@@ -0,0 +1,69 @@
+# Copyright 2016 Cloudbase Solutions Srl
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/policies/services.py
+
+from oslo_policy import policy
+
+from gosbs.policies import base
+
+
+BASE_POLICY_NAME = 'os_compute_api:os-services'
+
+
+services_policies = [
+    policy.DocumentedRuleDefault(
+        BASE_POLICY_NAME,
+        base.RULE_ADMIN_API,
+        "List all running Compute services in a region, enables or disable "
+        "scheduling for a Compute service, logs disabled Compute service "
+        "information, set or unset forced_down flag for the compute service "
+        "and delete a Compute service",
+        [
+            {
+                'method': 'GET',
+                'path': '/os-services'
+            },
+            {
+                'method': 'PUT',
+                'path': '/os-services/enable'
+            },
+            {
+                'method': 'PUT',
+                'path': '/os-services/disable'
+            },
+            {
+                'method': 'PUT',
+                'path': '/os-services/disable-log-reason'
+            },
+            {
+                'method': 'PUT',
+                'path': '/os-services/force-down'
+            },
+            {
+                # Added in microversion 2.53.
+                'method': 'PUT',
+                'path': '/os-services/{service_id}'
+            },
+            {
+                'method': 'DELETE',
+                'path': '/os-services/{service_id}'
+            }
+        ]),
+]
+
+
+def list_rules():
+    return services_policies

diff --git a/gosbs/policy.py b/gosbs/policy.py
new file mode 100644
index 0000000..f8f8659
--- /dev/null
+++ b/gosbs/policy.py
@@ -0,0 +1,246 @@
+# Copyright (c) 2011 OpenStack Foundation
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/policy.py
+
+"""Policy Engine For Nova."""
+import copy
+import re
+
+from oslo_config import cfg
+from oslo_log import log as logging
+from oslo_policy import policy
+from oslo_utils import excutils
+
+
+from gosbs import exception
+from gosbs.i18n import _LE, _LW
+from gosbs import policies
+
+
+CONF = cfg.CONF
+LOG = logging.getLogger(__name__)
+_ENFORCER = None
+# This list is about the resources which support user based policy enforcement.
+# Avoid sending deprecation warning for those resources.
+USER_BASED_RESOURCES = ['os-keypairs']
+# oslo_policy will read the policy configuration file again when the file
+# is changed in runtime so the old policy rules will be saved to
+# saved_file_rules and used to compare with new rules to determine the
+# rules whether were updated.
+saved_file_rules = []
+KEY_EXPR = re.compile(r'%\((\w+)\)s')
+
+
+def reset():
+    global _ENFORCER
+    if _ENFORCER:
+        _ENFORCER.clear()
+        _ENFORCER = None
+
+
+def init(policy_file=None, rules=None, default_rule=None, use_conf=True):
+    """Init an Enforcer class.
+
+       :param policy_file: Custom policy file to use, if none is specified,
+                           `CONF.policy_file` will be used.
+       :param rules: Default dictionary / Rules to use. It will be
+                     considered just in the first instantiation.
+       :param default_rule: Default rule to use, CONF.default_rule will
+                            be used if none is specified.
+       :param use_conf: Whether to load rules from config file.
+    """
+
+    global _ENFORCER
+    global saved_file_rules
+
+    if not _ENFORCER:
+        _ENFORCER = policy.Enforcer(CONF,
+                                    policy_file=policy_file,
+                                    rules=rules,
+                                    default_rule=default_rule,
+                                    use_conf=use_conf)
+        register_rules(_ENFORCER)
+        _ENFORCER.load_rules()
+
+    # Only the rules which are loaded from file may be changed.
+    current_file_rules = _ENFORCER.file_rules
+    current_file_rules = _serialize_rules(current_file_rules)
+
+    # Checks whether the rules are updated in the runtime
+    if saved_file_rules != current_file_rules:
+        _warning_for_deprecated_user_based_rules(current_file_rules)
+        saved_file_rules = copy.deepcopy(current_file_rules)
+
+
+def _serialize_rules(rules):
+    """Serialize all the Rule object as string which is used to compare the
+    rules list.
+    """
+    result = [(rule_name, str(rule))
+              for rule_name, rule in rules.items()]
+    return sorted(result, key=lambda rule: rule[0])
+
+
+def _warning_for_deprecated_user_based_rules(rules):
+    """Warning user based policy enforcement used in the rule but the rule
+    doesn't support it.
+    """
+    for rule in rules:
+        # We will skip the warning for the resources which support user based
+        # policy enforcement.
+        if [resource for resource in USER_BASED_RESOURCES
+                if resource in rule[0]]:
+            continue
+        if 'user_id' in KEY_EXPR.findall(rule[1]):
+            LOG.warning(_LW("The user_id attribute isn't supported in the "
+                            "rule '%s'. All the user_id based policy "
+                            "enforcement will be removed in the "
+                            "future."), rule[0])
+
+
+def set_rules(rules, overwrite=True, use_conf=False):
+    """Set rules based on the provided dict of rules.
+
+       :param rules: New rules to use. It should be an instance of dict.
+       :param overwrite: Whether to overwrite current rules or update them
+                         with the new rules.
+       :param use_conf: Whether to reload rules from config file.
+    """
+
+    init(use_conf=False)
+    _ENFORCER.set_rules(rules, overwrite, use_conf)
+
+
+def authorize(context, action, target, do_raise=True, exc=None):
+    """Verifies that the action is valid on the target in this context.
+
+       :param context: nova context
+       :param action: string representing the action to be checked
+           this should be colon separated for clarity.
+           i.e. ``compute:create_instance``,
+           ``compute:attach_volume``,
+           ``volume:attach_volume``
+       :param target: dictionary representing the object of the action
+           for object creation this should be a dictionary representing the
+           location of the object e.g. ``{'project_id': context.project_id}``
+       :param do_raise: if True (the default), raises PolicyNotAuthorized;
+           if False, returns False
+       :param exc: Class of the exception to raise if the check fails.
+                   Any remaining arguments passed to :meth:`authorize` (both
+                   positional and keyword arguments) will be passed to
+                   the exception class. If not specified,
+                   :class:`PolicyNotAuthorized` will be used.
+
+       :raises nova.exception.PolicyNotAuthorized: if verification fails
+           and do_raise is True. Or if 'exc' is specified it will raise an
+           exception of that type.
+
+       :return: returns a non-False value (not necessarily "True") if
+           authorized, and the exact value False if not authorized and
+           do_raise is False.
+    """
+    init()
+    credentials = context.to_policy_values()
+    if not exc:
+        exc = exception.PolicyNotAuthorized
+    try:
+        result = _ENFORCER.authorize(action, target, credentials,
+                                     do_raise=do_raise, exc=exc, action=action)
+    except policy.PolicyNotRegistered:
+        with excutils.save_and_reraise_exception():
+            LOG.exception(_LE('Policy not registered'))
+    except Exception:
+        with excutils.save_and_reraise_exception():
+            LOG.debug('Policy check for %(action)s failed with credentials '
+                      '%(credentials)s',
+                      {'action': action, 'credentials': credentials})
+    return result
+
+
+def check_is_admin(context):
+    """Whether or not roles contains 'admin' role according to policy setting.
+
+    """
+
+    init()
+    # the target is user-self
+    credentials = context.to_policy_values()
+    target = credentials
+    return _ENFORCER.authorize('context_is_admin', target, credentials)
+
+
+@policy.register('is_admin')
+class IsAdminCheck(policy.Check):
+    """An explicit check for is_admin."""
+
+    def __init__(self, kind, match):
+        """Initialize the check."""
+
+        self.expected = (match.lower() == 'true')
+
+        super(IsAdminCheck, self).__init__(kind, str(self.expected))
+
+    def __call__(self, target, creds, enforcer):
+        """Determine whether is_admin matches the requested value."""
+
+        return creds['is_admin'] == self.expected
+
+
+def get_rules():
+    if _ENFORCER:
+        return _ENFORCER.rules
+
+
+def register_rules(enforcer):
+    enforcer.register_defaults(policies.list_rules())
+
+
+def get_enforcer():
+    # This method is used by oslopolicy CLI scripts in order to generate policy
+    # files from overrides on disk and defaults in code.
+    cfg.CONF([], project='nova')
+    init()
+    return _ENFORCER
+
+
+def verify_deprecated_policy(old_policy, new_policy, default_rule, context):
+    """Check the rule of the deprecated policy action
+
+    If the current rule of the deprecated policy action is set to a non-default
+    value, then a warning message is logged stating that the new policy
+    action should be used to dictate permissions as the old policy action is
+    being deprecated.
+
+    :param old_policy: policy action that is being deprecated
+    :param new_policy: policy action that is replacing old_policy
+    :param default_rule: the old_policy action default rule value
+    :param context: the nova context
+    """
+
+    if _ENFORCER:
+        current_rule = str(_ENFORCER.rules[old_policy])
+    else:
+        current_rule = None
+
+    if current_rule != default_rule:
+        LOG.warning("Start using the new action '{0}'. The existing "
+                    "action '{1}' is being deprecated and will be "
+                    "removed in future release.".format(new_policy,
+                                                        old_policy))
+        context.can(old_policy)
+        return True
+    else:
+        return False

diff --git a/gosbs/profiler.py b/gosbs/profiler.py
new file mode 100644
index 0000000..de9ed9b
--- /dev/null
+++ b/gosbs/profiler.py
@@ -0,0 +1,51 @@
+# Copyright 2016 IBM Corporation.
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/profiler.py
+
+from oslo_utils import importutils
+
+import gosbs.conf
+
+profiler = importutils.try_import('osprofiler.profiler')
+
+CONF = gosbs.conf.CONF
+
+def get_traced_meta():
+    if profiler and 'profiler' in CONF and CONF.profiler.enabled:
+        return profiler.TracedMeta
+    else:
+        # NOTE(rpodolyaka): if we do not return a child of type, then Python
+        # fails to build a correct MRO when osprofiler is not installed
+        class NoopMeta(type):
+            pass
+        return NoopMeta
+
+
+def trace_cls(name, **kwargs):
+    """Wrap the OSProfiler trace_cls decorator so that it will not try to
+    patch the class unless OSProfiler is present and enabled in the config
+
+    :param name: The name of action. E.g. wsgi, rpc, db, etc..
+    :param kwargs: Any other keyword args used by profiler.trace_cls
+    """
+
+    def decorator(cls):
+        if profiler and 'profiler' in CONF and CONF.profiler.enabled:
+            trace_decorator = profiler.trace_cls(name, kwargs)
+            return trace_decorator(cls)
+        return cls
+
+    return decorator

diff --git a/gosbs/rpc.py b/gosbs/rpc.py
new file mode 100644
index 0000000..73154ea
--- /dev/null
+++ b/gosbs/rpc.py
@@ -0,0 +1,448 @@
+# Copyright 2013 Red Hat, Inc.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/rpc.py
+
+import functools
+
+from oslo_log import log as logging
+import oslo_messaging as messaging
+from oslo_messaging.rpc import dispatcher
+from oslo_serialization import jsonutils
+from oslo_service import periodic_task
+from oslo_utils import importutils
+import six
+
+import gosbs.conf
+import gosbs.context
+import gosbs.exception
+from gosbs.i18n import _
+
+__all__ = [
+    'init',
+    'cleanup',
+    'set_defaults',
+    'add_extra_exmods',
+    'clear_extra_exmods',
+    'get_allowed_exmods',
+    'RequestContextSerializer',
+    'get_client',
+    'get_server',
+    'get_notifier',
+]
+
+profiler = importutils.try_import("osprofiler.profiler")
+
+
+CONF = gosbs.conf.CONF
+
+LOG = logging.getLogger(__name__)
+
+# TODO(stephenfin): These should be private
+TRANSPORT = None
+LEGACY_NOTIFIER = None
+NOTIFICATION_TRANSPORT = None
+NOTIFIER = None
+
+# NOTE(danms): If rpc_response_timeout is over this value (per-call or
+# globally), we will enable heartbeating
+HEARTBEAT_THRESHOLD = 60
+
+ALLOWED_EXMODS = [
+    gosbs.exception.__name__,
+]
+EXTRA_EXMODS = []
+
+
+def init(conf):
+    global TRANSPORT, NOTIFICATION_TRANSPORT, LEGACY_NOTIFIER, NOTIFIER
+    exmods = get_allowed_exmods()
+    TRANSPORT = create_transport(get_transport_url())
+    NOTIFICATION_TRANSPORT = messaging.get_notification_transport(
+        conf, allowed_remote_exmods=exmods)
+    serializer = RequestContextSerializer(JsonPayloadSerializer())
+    if conf.notifications.notification_format == 'unversioned':
+        LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+                                             serializer=serializer)
+        NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+                                      serializer=serializer, driver='noop')
+    elif conf.notifications.notification_format == 'both':
+        LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+                                             serializer=serializer)
+        NOTIFIER = messaging.Notifier(
+            NOTIFICATION_TRANSPORT,
+            serializer=serializer,
+            topics=conf.notifications.versioned_notifications_topics)
+    else:
+        LEGACY_NOTIFIER = messaging.Notifier(NOTIFICATION_TRANSPORT,
+                                             serializer=serializer,
+                                             driver='noop')
+        NOTIFIER = messaging.Notifier(
+            NOTIFICATION_TRANSPORT,
+            serializer=serializer,
+            topics=conf.notifications.versioned_notifications_topics)
+
+
+def cleanup():
+    global TRANSPORT, NOTIFICATION_TRANSPORT, LEGACY_NOTIFIER, NOTIFIER
+    assert TRANSPORT is not None
+    assert NOTIFICATION_TRANSPORT is not None
+    assert LEGACY_NOTIFIER is not None
+    assert NOTIFIER is not None
+    TRANSPORT.cleanup()
+    NOTIFICATION_TRANSPORT.cleanup()
+    TRANSPORT = NOTIFICATION_TRANSPORT = LEGACY_NOTIFIER = NOTIFIER = None
+
+
+def set_defaults(control_exchange):
+    messaging.set_transport_defaults(control_exchange)
+
+
+def add_extra_exmods(*args):
+    EXTRA_EXMODS.extend(args)
+
+
+def clear_extra_exmods():
+    del EXTRA_EXMODS[:]
+
+
+def get_allowed_exmods():
+    return ALLOWED_EXMODS + EXTRA_EXMODS
+
+
+class JsonPayloadSerializer(messaging.NoOpSerializer):
+
+    @staticmethod
+    def fallback(obj):
+        """Serializer fallback
+
+        This method is used to serialize an object which jsonutils.to_primitive
+        does not otherwise know how to handle.
+
+        This is mostly only needed in tests because of the use of the nova
+        CheatingSerializer fixture which keeps some non-serializable fields
+        on the RequestContext, like db_connection.
+        """
+        if isinstance(obj, gosbs.context.RequestContext):
+            # This matches RequestContextSerializer.serialize_context().
+            return obj.to_dict()
+        # The default fallback in jsonutils.to_primitive() is six.text_type.
+        return six.text_type(obj)
+
+    def serialize_entity(self, context, entity):
+        return jsonutils.to_primitive(entity, convert_instances=True,
+                                      fallback=self.fallback)
+
+
+class RequestContextSerializer(messaging.Serializer):
+
+    def __init__(self, base):
+        self._base = base
+
+    def serialize_entity(self, context, entity):
+        if not self._base:
+            return entity
+        return self._base.serialize_entity(context, entity)
+
+    def deserialize_entity(self, context, entity):
+        if not self._base:
+            return entity
+        return self._base.deserialize_entity(context, entity)
+
+    def serialize_context(self, context):
+        return context.to_dict()
+
+    def deserialize_context(self, context):
+        return gosbs.context.RequestContext.from_dict(context)
+
+
+class ProfilerRequestContextSerializer(RequestContextSerializer):
+    def serialize_context(self, context):
+        _context = super(ProfilerRequestContextSerializer,
+                         self).serialize_context(context)
+
+        prof = profiler.get()
+        if prof:
+            # FIXME(DinaBelova): we'll add profiler.get_info() method
+            # to extract this info -> we'll need to update these lines
+            trace_info = {
+                "hmac_key": prof.hmac_key,
+                "base_id": prof.get_base_id(),
+                "parent_id": prof.get_id()
+            }
+            _context.update({"trace_info": trace_info})
+
+        return _context
+
+    def deserialize_context(self, context):
+        trace_info = context.pop("trace_info", None)
+        if trace_info:
+            profiler.init(**trace_info)
+
+        return super(ProfilerRequestContextSerializer,
+                     self).deserialize_context(context)
+
+
+def get_transport_url(url_str=None):
+    return messaging.TransportURL.parse(CONF, url_str)
+
+
+def get_client(target, version_cap=None, serializer=None,
+               call_monitor_timeout=None):
+    assert TRANSPORT is not None
+
+    if profiler:
+        serializer = ProfilerRequestContextSerializer(serializer)
+    else:
+        serializer = RequestContextSerializer(serializer)
+
+    return messaging.RPCClient(TRANSPORT,
+                               target,
+                               version_cap=version_cap,
+                               serializer=serializer,
+                               call_monitor_timeout=call_monitor_timeout)
+
+
+def get_server(target, endpoints, serializer=None):
+    assert TRANSPORT is not None
+
+    if profiler:
+        serializer = ProfilerRequestContextSerializer(serializer)
+    else:
+        serializer = RequestContextSerializer(serializer)
+    access_policy = dispatcher.DefaultRPCAccessPolicy
+    return messaging.get_rpc_server(TRANSPORT,
+                                    target,
+                                    endpoints,
+                                    executor='eventlet',
+                                    serializer=serializer,
+                                    access_policy=access_policy)
+
+
+def get_notifier(service, host=None, publisher_id=None):
+    assert LEGACY_NOTIFIER is not None
+    if not publisher_id:
+        publisher_id = "%s.%s" % (service, host or CONF.host)
+    return LegacyValidatingNotifier(
+            LEGACY_NOTIFIER.prepare(publisher_id=publisher_id))
+
+
+def get_versioned_notifier(publisher_id):
+    assert NOTIFIER is not None
+    return NOTIFIER.prepare(publisher_id=publisher_id)
+
+
+def if_notifications_enabled(f):
+    """Calls decorated method only if versioned notifications are enabled."""
+    @functools.wraps(f)
+    def wrapped(*args, **kwargs):
+        if (NOTIFIER.is_enabled() and
+                CONF.notifications.notification_format in ('both',
+                                                           'versioned')):
+            return f(*args, **kwargs)
+        else:
+            return None
+    return wrapped
+
+
+def create_transport(url):
+    exmods = get_allowed_exmods()
+    return messaging.get_rpc_transport(CONF,
+                                       url=url,
+                                       allowed_remote_exmods=exmods)
+
+
+class LegacyValidatingNotifier(object):
+    """Wraps an oslo.messaging Notifier and checks for allowed event_types."""
+
+    # If true an exception is thrown if the event_type is not allowed, if false
+    # then only a WARNING is logged
+    fatal = False
+
+    # This list contains the already existing therefore allowed legacy
+    # notification event_types. New items shall not be added to the list as
+    # Nova does not allow new legacy notifications any more. This list will be
+    # removed when all the notification is transformed to versioned
+    # notifications.
+    allowed_legacy_notification_event_types = [
+        'aggregate.addhost.end',
+        'aggregate.addhost.start',
+        'aggregate.create.end',
+        'aggregate.create.start',
+        'aggregate.delete.end',
+        'aggregate.delete.start',
+        'aggregate.removehost.end',
+        'aggregate.removehost.start',
+        'aggregate.updatemetadata.end',
+        'aggregate.updatemetadata.start',
+        'aggregate.updateprop.end',
+        'aggregate.updateprop.start',
+        'compute.instance.create.end',
+        'compute.instance.create.error',
+        'compute.instance.create_ip.end',
+        'compute.instance.create_ip.start',
+        'compute.instance.create.start',
+        'compute.instance.delete.end',
+        'compute.instance.delete_ip.end',
+        'compute.instance.delete_ip.start',
+        'compute.instance.delete.start',
+        'compute.instance.evacuate',
+        'compute.instance.exists',
+        'compute.instance.finish_resize.end',
+        'compute.instance.finish_resize.start',
+        'compute.instance.live.migration.abort.start',
+        'compute.instance.live.migration.abort.end',
+        'compute.instance.live.migration.force.complete.start',
+        'compute.instance.live.migration.force.complete.end',
+        'compute.instance.live_migration.post.dest.end',
+        'compute.instance.live_migration.post.dest.start',
+        'compute.instance.live_migration._post.end',
+        'compute.instance.live_migration._post.start',
+        'compute.instance.live_migration.pre.end',
+        'compute.instance.live_migration.pre.start',
+        'compute.instance.live_migration.rollback.dest.end',
+        'compute.instance.live_migration.rollback.dest.start',
+        'compute.instance.live_migration._rollback.end',
+        'compute.instance.live_migration._rollback.start',
+        'compute.instance.pause.end',
+        'compute.instance.pause.start',
+        'compute.instance.power_off.end',
+        'compute.instance.power_off.start',
+        'compute.instance.power_on.end',
+        'compute.instance.power_on.start',
+        'compute.instance.reboot.end',
+        'compute.instance.reboot.error',
+        'compute.instance.reboot.start',
+        'compute.instance.rebuild.end',
+        'compute.instance.rebuild.error',
+        'compute.instance.rebuild.scheduled',
+        'compute.instance.rebuild.start',
+        'compute.instance.rescue.end',
+        'compute.instance.rescue.start',
+        'compute.instance.resize.confirm.end',
+        'compute.instance.resize.confirm.start',
+        'compute.instance.resize.end',
+        'compute.instance.resize.error',
+        'compute.instance.resize.prep.end',
+        'compute.instance.resize.prep.start',
+        'compute.instance.resize.revert.end',
+        'compute.instance.resize.revert.start',
+        'compute.instance.resize.start',
+        'compute.instance.restore.end',
+        'compute.instance.restore.start',
+        'compute.instance.resume.end',
+        'compute.instance.resume.start',
+        'compute.instance.shelve.end',
+        'compute.instance.shelve_offload.end',
+        'compute.instance.shelve_offload.start',
+        'compute.instance.shelve.start',
+        'compute.instance.shutdown.end',
+        'compute.instance.shutdown.start',
+        'compute.instance.snapshot.end',
+        'compute.instance.snapshot.start',
+        'compute.instance.soft_delete.end',
+        'compute.instance.soft_delete.start',
+        'compute.instance.suspend.end',
+        'compute.instance.suspend.start',
+        'compute.instance.trigger_crash_dump.end',
+        'compute.instance.trigger_crash_dump.start',
+        'compute.instance.unpause.end',
+        'compute.instance.unpause.start',
+        'compute.instance.unrescue.end',
+        'compute.instance.unrescue.start',
+        'compute.instance.unshelve.start',
+        'compute.instance.unshelve.end',
+        'compute.instance.update',
+        'compute.instance.volume.attach',
+        'compute.instance.volume.detach',
+        'compute.libvirt.error',
+        'compute.metrics.update',
+        'compute_task.build_instances',
+        'compute_task.migrate_server',
+        'compute_task.rebuild_server',
+        'HostAPI.power_action.end',
+        'HostAPI.power_action.start',
+        'HostAPI.set_enabled.end',
+        'HostAPI.set_enabled.start',
+        'HostAPI.set_maintenance.end',
+        'HostAPI.set_maintenance.start',
+        'keypair.create.start',
+        'keypair.create.end',
+        'keypair.delete.start',
+        'keypair.delete.end',
+        'keypair.import.start',
+        'keypair.import.end',
+        'network.floating_ip.allocate',
+        'network.floating_ip.associate',
+        'network.floating_ip.deallocate',
+        'network.floating_ip.disassociate',
+        'scheduler.select_destinations.end',
+        'scheduler.select_destinations.start',
+        'servergroup.addmember',
+        'servergroup.create',
+        'servergroup.delete',
+        'volume.usage',
+    ]
+
+    message = _('%(event_type)s is not a versioned notification and not '
+                'whitelisted. See ./doc/source/reference/notifications.rst')
+
+    def __init__(self, notifier):
+        self.notifier = notifier
+        for priority in ['debug', 'info', 'warn', 'error', 'critical']:
+            setattr(self, priority,
+                    functools.partial(self._notify, priority))
+
+    def _is_wrap_exception_notification(self, payload):
+        # nova.exception_wrapper.wrap_exception decorator emits notification
+        # where the event_type is the name of the decorated function. This
+        # is used in many places but it will be converted to versioned
+        # notification in one run by updating the decorator so it is pointless
+        # to white list all the function names here we white list the
+        # notification itself detected by the special payload keys.
+        return {'exception', 'args'} == set(payload.keys())
+
+    def _notify(self, priority, ctxt, event_type, payload):
+        if (event_type not in self.allowed_legacy_notification_event_types and
+                not self._is_wrap_exception_notification(payload)):
+            if self.fatal:
+                raise AssertionError(self.message % {'event_type': event_type})
+            else:
+                LOG.warning(self.message, {'event_type': event_type})
+
+        getattr(self.notifier, priority)(ctxt, event_type, payload)
+
+
+class ClientRouter(periodic_task.PeriodicTasks):
+    """Creates RPC clients that honor the context's RPC transport
+    or provides a default.
+    """
+
+    def __init__(self, default_client):
+        super(ClientRouter, self).__init__(CONF)
+        self.default_client = default_client
+        self.target = default_client.target
+        self.version_cap = default_client.version_cap
+        self.serializer = default_client.serializer
+
+    def client(self, context):
+        transport = context.mq_connection
+        if transport:
+            cmt = self.default_client.call_monitor_timeout
+            return messaging.RPCClient(transport, self.target,
+                                       version_cap=self.version_cap,
+                                       serializer=self.serializer,
+                                       call_monitor_timeout=cmt)
+        else:
+            return self.default_client

diff --git a/gosbs/safe_utils.py b/gosbs/safe_utils.py
new file mode 100644
index 0000000..0ee7162
--- /dev/null
+++ b/gosbs/safe_utils.py
@@ -0,0 +1,41 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+"""Utilities and helper functions that won't produce circular imports."""
+
+
+def get_wrapped_function(function):
+    """Get the method at the bottom of a stack of decorators."""
+    if not hasattr(function, '__closure__') or not function.__closure__:
+        return function
+
+    def _get_wrapped_function(function):
+        if not hasattr(function, '__closure__') or not function.__closure__:
+            return None
+
+        for closure in function.__closure__:
+            func = closure.cell_contents
+
+            deeper_func = _get_wrapped_function(func)
+            if deeper_func:
+                return deeper_func
+            elif hasattr(closure.cell_contents, '__call__'):
+                return closure.cell_contents
+
+        return function
+
+    return _get_wrapped_function(function)

diff --git a/pym/tbc/__init__.py b/gosbs/scheduler/__init__.py
similarity index 100%
copy from pym/tbc/__init__.py
copy to gosbs/scheduler/__init__.py

diff --git a/gosbs/scheduler/category.py b/gosbs/scheduler/category.py
new file mode 100644
index 0000000..c9b98e7
--- /dev/null
+++ b/gosbs/scheduler/category.py
@@ -0,0 +1,113 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import re
+import os
+from portage.xml.metadata import MetaDataXML
+from portage.checksum import perform_checksum
+
+from oslo_log import log as logging
+from oslo_utils import uuidutils
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def get_category_metadata_tree(c_path):
+    # Make categories_metadataDict
+    categories_metadataDict = {}
+    try: 
+        metadata_xml_checksum = perform_checksum(c_path + "/metadata.xml", "SHA256")[0]
+    except Exception as e:
+        return None
+    categories_metadataDict['metadata_xml_checksum'] = metadata_xml_checksum
+    pkg_md = MetaDataXML(c_path + "/metadata.xml", None)
+    if pkg_md.descriptions():
+        metadata_xml_descriptions_tree = re.sub('\t', '', pkg_md.descriptions()[0])
+        metadata_xml_descriptions_tree = re.sub('\n', '', metadata_xml_descriptions_tree)
+    else:
+        metadata_xml_descriptions_tree = ''
+    categories_metadataDict['metadata_xml_descriptions'] = metadata_xml_descriptions_tree
+    return categories_metadataDict
+
+def create_cm_db(context, category_db, category_metadata_tree):
+    category_metadata_db = objects.category_metadata.CategoryMetadata()
+    category_metadata_db.category_uuid = category_db.uuid
+    category_metadata_db.description = category_metadata_tree['metadata_xml_descriptions']
+    category_metadata_db.checksum = category_metadata_tree['metadata_xml_checksum']
+    category_metadata_db.create(context)
+    return category_metadata_db
+
+def create_c_db(context, category):
+    category_db = objects.category.Category()
+    category_db.uuid = uuidutils.generate_uuid()
+    category_db.name = category
+    category_db.status = 'in-progress'
+    category_db.create(context)
+    return category_db
+
+def update_cm_db(context, category_metadata_db, category_metadata_tree):
+    category_metadata_db.description = category_metadata_tree['metadata_xml_descriptions']
+    category_metadata_db.checksum = category_metadata_tree['metadata_xml_checksum']
+    category_metadata_db.save(context)
+
+def check_c_db(context, category, repo_db):
+    LOG.debug("Checking %s", category)
+    c_path = CONF.repopath + '/' + repo_db.name + '.git/' + category
+    category_db = objects.category.Category.get_by_name(context, category)
+    if not os.path.isdir(c_path):
+        LOG.error("Path %s is not found for %s", c_path, category)
+        if category_db is not None:
+            category_db.deleted = True
+            category_db.save(context)
+            LOG.info("Deleting %s in the database", category)
+            return True
+        return False
+    if category_db is None:
+        LOG.info("Adding %s to the database", category)
+        category_db = create_c_db(context, category)
+    #if category_db.status == 'in-progress':
+    #    return True
+    category_db.status = 'in-progress'
+    category_db.save(context)
+    category_metadata_tree = get_category_metadata_tree(c_path)
+    if category_metadata_tree is None:
+        category_db.status = 'failed'
+        category_db.save(context)
+        LOG.error("Failed to get metadata for %s", category)
+        return False
+    category_metadata_db = objects.category_metadata.CategoryMetadata.get_by_uuid(context, category_db.uuid)
+    if category_metadata_db is None:
+        category_metadata_db = create_cm_db(context, category_db, category_metadata_tree)
+    if category_metadata_db.checksum != category_metadata_tree['metadata_xml_checksum']:
+        update_cm_db(context, category_metadata_db, category_metadata_tree)
+        LOG.debug("Update %s metadata", category)
+    category_db.status = 'completed'
+    category_db.save(context)
+    return True
+
+def destroy_c_db(context, category):
+    category_db = objects.category.Category.get_by_name(context, category)
+    category_db.destroy(context)
+    return True
+
+def remove_c_db(context, category):
+    category_db = objects.category.Category.get_by_name(context, category)
+    if category_db.deleted is True:
+        category_metadata_db = objects.category_metadata.CategoryMetadata.get_by_uuid(context, category_db.uuid)
+        category_metadata_db.remove(context)
+        category_db.remove(context)
+    else:
+        return False
+    return True

diff --git a/gosbs/scheduler/ebuild.py b/gosbs/scheduler/ebuild.py
new file mode 100644
index 0000000..646a6c1
--- /dev/null
+++ b/gosbs/scheduler/ebuild.py
@@ -0,0 +1,203 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import git
+import re
+import portage
+from portage.checksum import perform_checksum
+
+from oslo_log import log as logging
+from oslo_utils import uuidutils
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def get_all_cpv_from_package(myportdb, cp, repo_path):
+    mytree = []
+    mytree.append(repo_path)
+    return myportdb.cp_list(cp, use_cache=1, mytree=mytree)
+
+def get_git_log_ebuild(repodir, ebuild_file):
+    git_log_ebuild = ''
+    g = git.Git(repodir)
+    index = 1
+    git_log_dict = {}
+    for line in g.log('-n 1', ebuild_file).splitlines():
+        git_log_dict[index] = line
+        index = index + 1
+    git_ebuild_commit = re.sub('commit ', '', git_log_dict[1])
+    git_ebuild_commit_msg = re.sub('    ', '', git_log_dict[5])
+    return git_ebuild_commit, git_ebuild_commit_msg
+
+def get_ebuild_metadata(myportdb, cpv, repo):
+    # Get the auxdbkeys infos for the ebuild
+    try:
+        ebuild_auxdb_list = myportdb.aux_get(cpv, portage.auxdbkeys, myrepo=repo)
+    except:
+        ebuild_auxdb_list = False
+        LOG.error("Failed to get aux data for %s", cpv)
+    else:
+        for i in range(len(ebuild_auxdb_list)):
+            if ebuild_auxdb_list[i] == '':
+                ebuild_auxdb_list[i] = ''
+    return ebuild_auxdb_list
+
+def create_cpv_db(context, ebuild_version_tree, ebuild_version_checksum_tree, package_uuid):
+    ebuild_version_db = objects.ebuild.Ebuild()
+    ebuild_version_db.ebuild_uuid = uuidutils.generate_uuid()
+    ebuild_version_db.version = ebuild_version_tree
+    ebuild_version_db.checksum = ebuild_version_checksum_tree
+    ebuild_version_db.package_uuid = package_uuid
+    ebuild_version_db.create(context)
+    return ebuild_version_db
+
+def deleted_cpv_db(context, uuid):
+    ebuild_version_db = objects.ebuild.Ebuild.get_by_uuid(context, uuid)
+    ebuild_version_db.deleted = True
+    ebuild_version_db.save(context)
+
+def create_use_db(context, use):
+    use_db = objects.use.Use()
+    use_db.flag = use
+    use_db.description = ''
+    use_db.create(context)
+    return use_db
+
+def check_use_db(context, use):
+    use_db = objects.use.Use.get_by_name(context, use)
+    if use_db is None:
+        use_db = create_use_db(context, use)
+    return use_db.id
+
+def create_cpv_use_db(context, ebuild_version_uuid, ebuild_version_metadata_tree):
+    for iuse in ebuild_version_metadata_tree[10].split():
+        status = False
+        if iuse[0] in ["+"]:
+            iuse = iuse[1:]
+            status = True
+        elif iuse[0] in ["-"]:
+            iuse = iuse[1:]
+        iuse_id = check_use_db(context, iuse)
+        ebuild_iuse_db = objects.ebuild_iuse.EbuildIUse()
+        ebuild_iuse_db.ebuild_uuid = ebuild_version_uuid
+        ebuild_iuse_db.use_id = iuse_id
+        ebuild_iuse_db.status = status
+        ebuild_iuse_db.create(context)
+
+def create_keyword_db(context, keyword):
+    keyword_db = objects.keyword.Keyword()
+    keyword_db.keyword = keyword
+    keyword_db.create(context)
+    return keyword_db
+
+def check_keyword_db(context, keyword):
+    keyword_db = objects.keyword.Keyword.get_by_name(context, keyword)
+    if keyword_db is None:
+        keyword_db = create_keyword_db(context, keyword)
+    return keyword_db.id
+
+def create_cpv_keyword_db(context, ebuild_version_uuid, ebuild_version_metadata_tree):
+    for keyword in ebuild_version_metadata_tree[8].split():
+        status = 'stable'
+        if keyword[0] in ["~"]:
+            keyword = keyword[1:]
+            status = 'unstable'
+        elif keyword[0] in ["-"]:
+            keyword = keyword[1:]
+            status = 'negative'
+        keyword_id = check_keyword_db(context, keyword)
+        ebuild_keyword_db = objects.ebuild_keyword.EbuildKeyword()
+        ebuild_keyword_db.ebuild_uuid = ebuild_version_uuid
+        ebuild_keyword_db.keyword_id = keyword_id
+        ebuild_keyword_db.status = status
+        ebuild_keyword_db.create(context)
+
+def create_restriction_db(context, restriction):
+    restriction_db = objects.restriction.Restriction()
+    restriction_db.restriction = restriction
+    restriction_db.create(context)
+    return restriction_db
+
+def check_restriction_db(context, restriction):
+    restriction_db = objects.restriction.Restriction.get_by_name(context, restriction)
+    if restriction_db is None:
+        restriction_db = create_restriction_db(context, restriction)
+    return restriction_db.id
+
+def create_cpv_restriction_db(context, ebuild_version_uuid, ebuild_version_metadata_tree):
+    for restriction in ebuild_version_metadata_tree[4].split():
+        if restriction in ["!"]:
+            restriction = restriction[1:]
+        if restriction in ["?"]:
+            restriction = restriction[:1]
+        if restriction != '(' or restriction != ')':
+            restriction_id = check_restriction_db(context, restriction)
+            ebuild_restriction_db = objects.ebuild_restriction.EbuildRestriction()
+            ebuild_restriction_db.ebuild_uuid = ebuild_version_uuid
+            ebuild_restriction_db.restriction_id = restriction_id
+            ebuild_restriction_db.create(context)
+
+def create_cpv_metadata_db(context, myportdb, cpv, ebuild_file, ebuild_version_db, repo_db):
+    repo_path = CONF.repopath + '/' + repo_db.name + '.git'
+    git_commit, git_commit_msg = get_git_log_ebuild(repo_path, ebuild_file)
+    ebuild_version_metadata_tree = get_ebuild_metadata(myportdb, cpv, repo_db.name)
+    ebuild_metadata_db = objects.ebuild_metadata.EbuildMetadata()
+    ebuild_metadata_db.ebuild_uuid = ebuild_version_db.uuid
+    ebuild_metadata_db.commit = git_commit
+    ebuild_metadata_db.commit_msg = git_commit_msg
+    ebuild_metadata_db.description = ebuild_version_metadata_tree[7]
+    ebuild_metadata_db.slot = ebuild_version_metadata_tree[2]
+    ebuild_metadata_db.homepage = ebuild_version_metadata_tree[5]
+    ebuild_metadata_db.license = ebuild_version_metadata_tree[6]
+    ebuild_metadata_db.create(context)
+    create_cpv_restriction_db(context, ebuild_version_db.uuid, ebuild_version_metadata_tree)
+    create_cpv_use_db(context, ebuild_version_db.uuid, ebuild_version_metadata_tree)
+    create_cpv_keyword_db(context, ebuild_version_db.uuid, ebuild_version_metadata_tree)
+    return True
+
+def check_cpv_db(context, myportdb, cp, repo_db, package_uuid):
+    repo_path = CONF.repopath + '/' + repo_db.name + '.git'
+    filters = { 'deleted' : False,
+                'package_uuid' : package_uuid,
+            }
+    ebuild_version_tree_list = []
+    ebuild_version_tree_dict_new = {}
+    succes = True
+    for cpv in sorted(get_all_cpv_from_package(myportdb, cp, repo_path)):
+        LOG.debug("Checking %s", cpv)
+        ebuild_version_tree = portage.versions.cpv_getversion(cpv)
+        package = portage.versions.catpkgsplit(cpv)[1]
+        ebuild_file = repo_path + "/" + cp + "/" + package + "-" + ebuild_version_tree + ".ebuild"
+        if not os.path.isfile(ebuild_file):
+            LOG.error("File %s is not found for %s", ebuild_file, cpv)
+            return False, ebuild_version_tree_dict_new
+        ebuild_version_checksum_tree = perform_checksum(ebuild_file, "SHA256")[0]
+        ebuild_version_db = objects.ebuild.Ebuild.get_by_name(context, ebuild_version_tree, filters=filters)
+        if ebuild_version_db is None or ebuild_version_db.checksum != ebuild_version_checksum_tree:
+            if ebuild_version_db is not None and ebuild_version_db.checksum != ebuild_version_checksum_tree:
+                LOG.debug("Update %s", cpv)
+                deleted_cpv_db(context, ebuild_version_db.uuid)
+            else:
+                LOG.info("Adding %s to the database", cpv)
+            ebuild_version_db = create_cpv_db(context, ebuild_version_tree, ebuild_version_checksum_tree, package_uuid)
+            succes = create_cpv_metadata_db(context, myportdb, cpv, ebuild_file, ebuild_version_db, repo_db)
+            ebuild_version_tree_dict_new[cpv] = ebuild_version_db.uuid
+        ebuild_version_tree_list.append(ebuild_version_tree)
+    for ebuild_db in objects.ebuild.EbuildList.get_all(context, filters=filters):
+        if not ebuild_db.version in ebuild_version_tree_list:
+            LOG.info("Deleting %s in the database", ebuild_db.version)
+            deleted_cpv_db(context, ebuild_db.uuid)
+    return succes, ebuild_version_tree_dict_new

diff --git a/gosbs/scheduler/email.py b/gosbs/scheduler/email.py
new file mode 100644
index 0000000..581b22a
--- /dev/null
+++ b/gosbs/scheduler/email.py
@@ -0,0 +1,36 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+
+LOG = logging.getLogger(__name__)
+
+def create_email(context, email):
+    email_db = objects.email.Email()
+    email_db.email = email
+    email_db.create(context)
+    return email_db
+
+def check_email_db(context, email):
+    email_db = objects.email.Email.get_by_name(context, email)
+    if email_db is None:
+        email_db = create_email(context, email)
+    else:
+        if email_db.deleted is True:
+            email_db.deleted = False
+            email_db.save(context)
+    return email_db.id

diff --git a/gosbs/scheduler/manager.py b/gosbs/scheduler/manager.py
new file mode 100644
index 0000000..fea9923
--- /dev/null
+++ b/gosbs/scheduler/manager.py
@@ -0,0 +1,141 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/compute/manager.py
+# We have change the code so it will fit what we need.
+# It need more cleaning.
+
+"""Handles all processes relating to instances (guest vms).
+
+The :py:class:`ComputeManager` class is a :py:class:`nova.manager.Manager` that
+handles RPC calls relating to creating instances.  It is responsible for
+building a disk image, launching it via the underlying virtualization driver,
+responding to calls to check its state, attaching persistent storage, and
+terminating it.
+
+"""
+#import functools
+#import json
+from datetime import datetime
+from dateutil.relativedelta import relativedelta
+from importlib import import_module
+import pytz
+import sys
+
+from oslo_log import log as logging
+import oslo_messaging as messaging
+from oslo_service import periodic_task
+from oslo_utils import timeutils
+from openstack import connection
+
+from gosbs.scheduler import rpcapi as scheduler_rpcapi
+from gosbs import rpc
+#from gosbs import exception_wrapper
+from gosbs import manager
+from gosbs import objects
+from gosbs.objects import base as obj_base
+from gosbs.objects import fields
+from gosbs.tasks import scheduler as scheduler_tasks
+from gosbs.common.task import run_task
+
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+
+LOG = logging.getLogger(__name__)
+
+#get_notifier = functools.partial(rpc.get_notifier, service='scheduler')
+#wrap_exception = functools.partial(exception_wrapper.wrap_exception,
+#                                   get_notifier=get_notifier,
+#                                   binary='gobs-scheduler')
+
+class SchedulerManager(manager.Manager):
+    """Manages the running instances from creation to destruction."""
+
+    #target = messaging.Target(version='1.0')
+
+    def __init__(self, *args, **kwargs):
+        """Load configuration options and connect to the hypervisor."""
+        self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
+
+        super(SchedulerManager, self).__init__(service_name="scheduler",
+                                             *args, **kwargs)
+
+
+    def init_host(self):
+        context = gosbs.context.get_admin_context()
+
+    def pre_start_hook(self):
+        context = gosbs.context.get_admin_context()
+        self.openstack_conn = connection.Connection(
+            region_name = CONF.keystone.region_name,
+            auth=dict(
+                auth_url = CONF.keystone.auth_url,
+                username = CONF.keystone.username,
+                password = CONF.keystone.password,
+                project_id = CONF.keystone.project_id,
+                user_domain_id = CONF.keystone.user_domain_name),
+            scheduler_api_version = CONF.keystone.auth_version,
+            identity_interface= CONF.keystone.identity_interface)
+        self.service_ref = objects.Service.get_by_host_and_topic(
+            context, self.host, "scheduler")
+        scheduler_tasks.activete_all_tasks(context, self.service_ref.uuid)
+
+    def reset(self):
+        LOG.info('Reloading scheduler RPC API')
+        scheduler_rpcapi.LAST_VERSION = None
+        self.scheduler_rpcapi = scheduler_rpcapi.SchedulerAPI()
+
+    @periodic_task.periodic_task
+    def update_repo_task(self, context):
+        task_name = 'update_repos'
+        LOG.debug("Runing task %s", task_name)
+        filters = { 'status' : 'waiting', 
+                    'name' : task_name,
+                    'service_uuid' : self.service_ref.uuid,
+                }
+        run_task(context, filters, self.service_ref)
+
+    @periodic_task.periodic_task
+    def update_git_task(self, context):
+        task_name = 'update_git'
+        LOG.debug("Runing task %s", task_name)
+        filters = { 'status' : 'waiting', 
+                    'name' : task_name,
+                    'service_uuid' : self.service_ref.uuid,
+                }
+        run_task(context, filters, self.service_ref)
+
+    @periodic_task.periodic_task
+    def rebuild_db_task(self, context):
+        task_name = 'rebuild_db'
+        LOG.debug("Runing task %s", task_name)
+        filters = { 'status' : 'waiting', 
+                    'name' : task_name,
+                    'service_uuid' : self.service_ref.uuid,
+                }
+        run_task(context, filters, self.service_ref)
+
+    @periodic_task.periodic_task
+    def update_db_task(self, context):
+        task_name = 'update_db'
+        LOG.debug("Runing task %s", task_name)
+        filters = { 'status' : 'waiting', 
+                    'name' : task_name,
+                    'service_uuid' : self.service_ref.uuid,
+                }
+        run_task(context, filters, self.service_ref)

diff --git a/gosbs/scheduler/package.py b/gosbs/scheduler/package.py
new file mode 100644
index 0000000..07b467c
--- /dev/null
+++ b/gosbs/scheduler/package.py
@@ -0,0 +1,180 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import os
+import git
+
+import portage
+from portage.xml.metadata import MetaDataXML
+from portage.checksum import perform_checksum
+
+from oslo_log import log as logging
+from oslo_utils import uuidutils
+
+from gosbs import objects
+from gosbs.scheduler.email import check_email_db
+from gosbs.scheduler.ebuild import check_cpv_db, deleted_cpv_db
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def get_all_cp_from_repo(myportdb, repopath):
+    repo_dir_list = []
+    repo_dir_list.append(repopath)
+    # Get the package list from the repo
+    return myportdb.cp_all(trees=repo_dir_list)
+
+def get_git_changelog_text(repodir, cp):
+    n = '5'
+    git_log_pkg = ''
+    g = git.Git(repodir)
+    git_log_pkg = g.log('-n ' + n, '--grep=' + cp)
+    return git_log_pkg
+
+def get_package_metadataDict(repodir, cp):
+    # Make package_metadataDict
+    package_metadataDict = {}
+    try:
+        package_metadataDict['metadata_xml_checksum'] = perform_checksum(repodir + "/" + cp + "/metadata.xml", "SHA256")[0]
+    except Exception as e:
+        package_metadataDict['metadata_xml_checksum'] = False
+        return package_metadataDict
+    md_email_list = []
+    pkg_md = MetaDataXML(repodir + "/" + cp + "/metadata.xml", None)
+    tmp_descriptions = pkg_md.descriptions()
+    if tmp_descriptions:
+        package_metadataDict['metadata_xml_descriptions'] = tmp_descriptions[0]
+    else:
+        package_metadataDict['metadata_xml_descriptions'] = ''
+    tmp_herds = pkg_md.herds()
+    if tmp_herds:
+        package_metadataDict['metadata_xml_herds'] = tmp_herds[0]
+        md_email_list.append(package_metadataDict['metadata_xml_herds'] + '@gentoo.org')
+    for maint in pkg_md.maintainers():
+        md_email_list.append(maint.email)
+    if md_email_list != []:
+        package_metadataDict['metadata_xml_email'] = md_email_list
+    else:
+        md_email_list.append('maintainer-needed@gentoo.org')
+        package_metadataDict['metadata_xml_email'] = md_email_list
+            #log_msg = "Metadata file %s missing Email. Setting it to maintainer-needed" % (pkgdir + "/metadata.xml")
+            #write_log(self._session, log_msg, "warning", self._config_id, 'packages.get_package_metadataDict')
+    package_metadataDict['git_changlog'] = get_git_changelog_text(repodir, cp)
+    return package_metadataDict
+
+def update_cp_metadata_db(context, cp, repo_name, package_metadata_db):
+    repodir = CONF.repopath + '/' + repo_name + '.git'
+    package_metadataDict = get_package_metadataDict(repodir, cp)
+    if not package_metadataDict['metadata_xml_checksum']:
+        return False
+    package_metadata_db.gitlog = package_metadataDict['git_changlog']
+    package_metadata_db.description = package_metadataDict['metadata_xml_descriptions']
+    package_metadata_db.checksum = package_metadataDict['metadata_xml_checksum']
+    package_metadata_db.save(context)
+    return package_metadataDict['metadata_xml_email']
+    
+def create_cp_metadata_db(context, cp, repo_name, package_uuid):
+    repodir = CONF.repopath + '/' + repo_name + '.git'
+    package_metadataDict = get_package_metadataDict(repodir, cp)
+    if not package_metadataDict['metadata_xml_checksum']:
+        return False
+    package_metadata_db = objects.package_metadata.PackageMetadata()
+    package_metadata_db.package_uuid = package_uuid
+    package_metadata_db.gitlog = package_metadataDict['git_changlog']
+    package_metadata_db.description = package_metadataDict['metadata_xml_descriptions']
+    package_metadata_db.checksum = package_metadataDict['metadata_xml_checksum']
+    package_metadata_db.create(context)
+    return package_metadataDict['metadata_xml_email']
+
+def create_cp_email_db(context, email_id, package_uuid):
+    package_email_db = objects.package_email.PackageEmail()
+    package_email_db.package_uuid = package_uuid
+    package_email_db.email_id = email_id
+    package_email_db.create(context)
+
+def check_cp_email_db(context, metadata_xml_email, package_uuid):
+    filters = { 'package_uuid' : package_uuid }
+    for package_email in metadata_xml_email:
+        email_id = check_email_db(context, package_email)
+        package_email_db = objects.package_email.PackageEmail.get_by_email_id(context, email_id, filters=filters)
+        if package_email_db is None:
+            create_cp_email_db(context, email_id, package_uuid)
+    return True
+
+def check_cp_metadata_db(context, cp, repo_name, package_uuid):
+    repodir = CONF.repopath + '/' + repo_name + '.git'
+    succes = True
+    package_metadata_db = objects.package_metadata.PackageMetadata.get_by_uuid(context, package_uuid)
+    if package_metadata_db is None:
+        LOG.debug("Adding %s metadata", cp)
+        metadata_xml_email = create_cp_metadata_db(context, cp, repo_name, package_uuid)
+        succes = check_cp_email_db(context, metadata_xml_email, package_uuid)
+    else:
+        package_metadata_tree_checksum = perform_checksum(repodir + "/" + cp + "/metadata.xml", "SHA256")[0]
+        if package_metadata_tree_checksum != package_metadata_db.checksum:
+            LOG.debug("Update %s metadata", cp)
+            metadata_xml_email = update_cp_metadata_db(context, cp, repo_name, package_metadata_db)
+            succes = check_cp_email_db(context, metadata_xml_email, package_uuid)
+    return succes
+
+def deleted_cp_db(context, package_db):
+    filters = { 'deleted' : False,
+                'package_uuid' : package_db.uuid,
+            }
+    for ebuild_db in objects.ebuild.EbuildList.get_all(context, filters=filters):
+        LOG.info("Deleting %s in the database", ebuild_db.version)
+        deleted_cpv_db(context, ebuild_db.uuid)
+    package_db.deleted = True
+    package_db.status = 'completed'
+    package_db.save(context)
+
+def create_cp_db(context, package, repo_db, category_db):
+    package_db = objects.package.Package()
+    package_db.uuid = uuidutils.generate_uuid()
+    package_db.name = package
+    package_db.status = 'in-progress'
+    package_db.category_uuid = category_db.uuid
+    package_db.repo_uuid = repo_db.uuid
+    package_db.create(context)
+    return package_db
+    
+def check_cp_db(context, myportdb, cp, repo_db, category_db):
+    package = cp.split('/')[1]
+    cp_path = CONF.repopath + '/' + repo_db.name + '.git/' + cp
+    filters = { 'repo_uuid' : repo_db.uuid,
+               'category_uuid' : category_db.uuid,
+               'deleted' : False,
+            }
+    package_db = objects.package.Package.get_by_name(context, package, filters=filters)
+    if not os.path.isdir(cp_path) and package_db is None:
+        LOG.error("Path %s is not found for %s", cp_path, cp)
+        return False, {}
+    elif not os.path.isdir(cp_path) and package_db is not None:
+        LOG.info("Deleting %s in the database", cp)
+        deleted_cp_db(context, package_db)
+        return True, {}
+    elif os.path.isdir(cp_path) and package_db is None:
+        LOG.info("Adding %s to the database", cp)
+        package_db = create_cp_db(context, package, repo_db, category_db)
+    package_db.status = 'in-progress'
+    package_db.save(context)
+    succes1 = check_cp_metadata_db(context, cp, repo_db.name, package_db.uuid)
+    succes2, ebuild_version_tree_dict_new = check_cpv_db(context, myportdb, cp, repo_db, package_db.uuid)
+    if not succes1 or not succes2:
+        package_db.status = 'faild'
+        package_db.save(context)
+        return False, ebuild_version_tree_dict_new
+    package_db.status = 'completed'
+    package_db.save(context)
+    return True, ebuild_version_tree_dict_new

diff --git a/gosbs/scheduler/project.py b/gosbs/scheduler/project.py
new file mode 100644
index 0000000..5c49be6
--- /dev/null
+++ b/gosbs/scheduler/project.py
@@ -0,0 +1,35 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def get_project(context, service_repo_db):
+    project_db = objects.project.Project.get_by_name(context, CONF.scheduler.db_project_repo)
+    project_metadata_db = objects.project_metadata.ProjectMetadata.get_by_uuid(context, project_db.uuid)
+    filters = { 'project_uuid' : project_db.uuid, 
+               'repo_uuid' : service_repo_db.repo_uuid,
+               }
+    project_repo_db = objects.project_repo.ProjectRepo.get_by_filters(context, filters=filters)
+    if project_repo_db is None:
+        project_repo_db = objects.project_repo.ProjectRepo()
+        project_repo_db.project_uuid = project_db.uuid
+        project_repo_db.repo_uuid = service_repo_db.repo_uuid
+        project_repo_db.build = False
+        project_repo_db.create(context)
+    return project_db, project_metadata_db

diff --git a/gosbs/scheduler/rpcapi.py b/gosbs/scheduler/rpcapi.py
new file mode 100644
index 0000000..3af4067
--- /dev/null
+++ b/gosbs/scheduler/rpcapi.py
@@ -0,0 +1,127 @@
+# Copyright 2013 Red Hat, Inc.
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/compute/rpcapi.py
+# We have change the code so it will fit what we need.
+# It need more cleaning and work.
+
+"""
+Client side of the scheduler RPC API.
+"""
+
+from oslo_log import log as logging
+import oslo_messaging as messaging
+from oslo_serialization import jsonutils
+
+import gosbs.conf
+from gosbs import context
+from gosbs import exception
+from gosbs.i18n import _
+from gosbs import objects
+from gosbs.objects import base as objects_base
+from gosbs.objects import service as service_obj
+from gosbs import profiler
+from gosbs import rpc
+
+CONF = gosbs.conf.CONF
+RPC_TOPIC = "scheduler"
+
+LOG = logging.getLogger(__name__)
+LAST_VERSION = None
+
+@profiler.trace_cls("rpc")
+class SchedulerAPI(object):
+    '''Client side of the compute rpc API.
+
+        * 5.0  - Remove 4.x compatibility
+    '''
+
+    VERSION_ALIASES = {
+        'rocky': '1.0',
+    }
+
+    def __init__(self):
+        super(SchedulerAPI, self).__init__()
+        target = messaging.Target(topic=RPC_TOPIC, version='1.0')
+        upgrade_level = CONF.upgrade_levels.scheduler
+        if upgrade_level == 'auto':
+            version_cap = self._determine_version_cap(target)
+        else:
+            version_cap = self.VERSION_ALIASES.get(upgrade_level,
+                                                   upgrade_level)
+        serializer = objects_base.NovaObjectSerializer()
+
+        # NOTE(danms): We need to poke this path to register CONF options
+        # that we use in self.get_client()
+        rpc.get_client(target, version_cap, serializer)
+
+        default_client = self.get_client(target, version_cap, serializer)
+        self.router = rpc.ClientRouter(default_client)
+
+    def _determine_version_cap(self, target):
+        global LAST_VERSION
+        if LAST_VERSION:
+            return LAST_VERSION
+        service_version = objects.Service.get_minimum_version(
+            context.get_admin_context(), 'gosbs-scheduler')
+
+        history = service_obj.SERVICE_VERSION_HISTORY
+
+        # NOTE(johngarbutt) when there are no nova-compute services running we
+        # get service_version == 0. In that case we do not want to cache
+        # this result, because we will get a better answer next time.
+        # As a sane default, return the current version.
+        if service_version == 0:
+            LOG.debug("Not caching compute RPC version_cap, because min "
+                      "service_version is 0. Please ensure a nova-compute "
+                      "service has been started. Defaulting to current "
+                      "version.")
+            return history[service_obj.SERVICE_VERSION]['scheduler_rpc']
+
+        try:
+            version_cap = history[service_version]['scheduler_rpc']
+        except IndexError:
+            LOG.error('Failed to extract compute RPC version from '
+                      'service history because I am too '
+                      'old (minimum version is now %(version)i)',
+                      {'version': service_version})
+            raise exception.ServiceTooOld(thisver=service_obj.SERVICE_VERSION,
+                                          minver=service_version)
+        except KeyError:
+            LOG.error('Failed to extract compute RPC version from '
+                      'service history for version %(version)i',
+                      {'version': service_version})
+            return target.version
+        LAST_VERSION = version_cap
+        LOG.info('Automatically selected compute RPC version %(rpc)s '
+                 'from minimum service version %(service)i',
+                 {'rpc': version_cap,
+                  'service': service_version})
+        return version_cap
+
+    # Cells overrides this
+    def get_client(self, target, version_cap, serializer):
+        if CONF.rpc_response_timeout > rpc.HEARTBEAT_THRESHOLD:
+            # NOTE(danms): If the operator has overridden RPC timeout
+            # to be longer than rpc.HEARTBEAT_THRESHOLD then configure
+            # the call monitor timeout to be the threshold to keep the
+            # failure timing characteristics that our code likely
+            # expects (from history) while allowing healthy calls
+            # to run longer.
+            cmt = rpc.HEARTBEAT_THRESHOLD
+        else:
+            cmt = None
+        return rpc.get_client(target,
+                              version_cap=version_cap,
+                              serializer=serializer,
+                              call_monitor_timeout=cmt)

diff --git a/gosbs/service.py b/gosbs/service.py
new file mode 100644
index 0000000..ea67f81
--- /dev/null
+++ b/gosbs/service.py
@@ -0,0 +1,331 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/service.py
+# we removed class WSGIService
+
+"""Generic Node base class for all workers that run on hosts."""
+
+import os
+import random
+import sys
+
+#from oslo_concurrency import processutils
+from oslo_log import log as logging
+import oslo_messaging as messaging
+from oslo_service import service
+from oslo_utils import importutils
+
+#from nova.api import wsgi as api_wsgi
+from gosbs import baserpc
+#from gosbs import conductor
+import gosbs.conf
+from gosbs import context
+from gosbs import debugger
+from gosbs import exception
+from gosbs.i18n import _, _LE, _LI, _LW
+from gosbs import objects
+from gosbs.objects import base as objects_base
+from gosbs.objects import service as service_obj
+from gosbs import rpc
+#from gosbs import servicegroup
+from gosbs import utils
+from gosbs import version
+#from gosbs import wsgi
+
+osprofiler = importutils.try_import("osprofiler")
+osprofiler_initializer = importutils.try_import("osprofiler.initializer")
+
+
+LOG = logging.getLogger(__name__)
+
+CONF = gosbs.conf.CONF
+
+SERVICE_MANAGERS = {
+    'gosbs-gitmirror': 'gosbs.gitmirror.manager.GitMirrorManager',
+    'gosbs-scheduler': 'gosbs.scheduler.manager.SchedulerManager',
+}
+
+
+def _create_service_ref(this_service, context):
+    service = objects.Service(context)
+    service.host = this_service.host
+    service.binary = this_service.binary
+    service.topic = this_service.topic
+    service.report_count = 0
+    service.create()
+    return service
+
+
+def _update_service_ref(service):
+    if service.version != service_obj.SERVICE_VERSION:
+        LOG.info(_LI('Updating service version for %(binary)s on '
+                     '%(host)s from %(old)i to %(new)i'),
+                 {'binary': service.binary,
+                  'host': service.host,
+                  'old': service.version,
+                  'new': service_obj.SERVICE_VERSION})
+        service.version = service_obj.SERVICE_VERSION
+        service.save()
+
+
+def setup_profiler(binary, host):
+    if osprofiler and CONF.profiler.enabled:
+        osprofiler.initializer.init_from_conf(
+            conf=CONF,
+            context=context.get_admin_context().to_dict(),
+            project="gosbs",
+            service=binary,
+            host=host)
+        LOG.info(_LI("OSProfiler is enabled."))
+
+
+def assert_eventlet_uses_monotonic_clock():
+    from eventlet import hubs
+    import monotonic
+
+    hub = hubs.get_hub()
+    if hub.clock is not monotonic.monotonic:
+        raise RuntimeError(
+            'eventlet hub is not using a monotonic clock - '
+            'periodic tasks will be affected by drifts of system time.')
+
+
+class Service(service.Service):
+    """Service object for binaries running on hosts.
+
+    A service takes a manager and enables rpc by listening to queues based
+    on topic. It also periodically runs tasks on the manager and reports
+    its state to the database services table.
+    """
+
+    def __init__(self, host, binary, topic, manager, report_interval=None,
+                 periodic_enable=None, periodic_fuzzy_delay=None,
+                 periodic_interval_max=None, *args, **kwargs):
+        super(Service, self).__init__()
+        self.host = host
+        self.binary = binary
+        self.topic = topic
+        self.manager_class_name = manager
+        #self.servicegroup_api = servicegroup.API()
+        manager_class = importutils.import_class(self.manager_class_name)
+        #if objects_base.NovaObject.indirection_api:
+        #    conductor_api = conductor.API()
+        #    conductor_api.wait_until_ready(context.get_admin_context())
+        self.manager = manager_class(host=self.host, *args, **kwargs)
+        self.rpcserver = None
+        self.report_interval = report_interval
+        self.periodic_enable = periodic_enable
+        self.periodic_fuzzy_delay = periodic_fuzzy_delay
+        self.periodic_interval_max = periodic_interval_max
+        self.saved_args, self.saved_kwargs = args, kwargs
+        self.backdoor_port = None
+        setup_profiler(binary, self.host)
+
+    def __repr__(self):
+        return "<%(cls_name)s: host=%(host)s, binary=%(binary)s, " \
+               "manager_class_name=%(manager)s>" % {
+                 'cls_name': self.__class__.__name__,
+                 'host': self.host,
+                 'binary': self.binary,
+                 'manager': self.manager_class_name
+                }
+
+    def start(self):
+        """Start the service.
+
+        This includes starting an RPC service, initializing
+        periodic tasks, etc.
+        """
+        assert_eventlet_uses_monotonic_clock()
+
+        verstr = version.version_string_with_package()
+        LOG.info(_LI('Starting %(topic)s server (version %(version)s)'),
+                  {'topic': self.topic, 'version': verstr})
+        self.basic_config_check()
+        self.manager.init_host()
+        self.model_disconnected = False
+        ctxt = context.get_admin_context()
+        self.service_ref = objects.Service.get_by_host_and_binary(
+            ctxt, self.host, self.binary)
+        if self.service_ref:
+            _update_service_ref(self.service_ref)
+
+        else:
+            try:
+                self.service_ref = _create_service_ref(self, ctxt)
+            except (exception.ServiceTopicExists,
+                    exception.ServiceBinaryExists):
+                # NOTE(danms): If we race to create a record with a sibling
+                # worker, don't fail here.
+                self.service_ref = objects.Service.get_by_host_and_binary(
+                    ctxt, self.host, self.binary)
+
+        self.manager.pre_start_hook()
+
+        if self.backdoor_port is not None:
+            self.manager.backdoor_port = self.backdoor_port
+
+        LOG.debug("Creating RPC server for service %s", self.topic)
+
+        target = messaging.Target(topic=self.topic, server=self.host)
+
+        endpoints = [
+            self.manager,
+            baserpc.BaseRPCAPI(self.manager.service_name, self.backdoor_port)
+        ]
+        endpoints.extend(self.manager.additional_endpoints)
+
+        serializer = objects_base.NovaObjectSerializer()
+
+        self.rpcserver = rpc.get_server(target, endpoints, serializer)
+        #self.rpcserver.start()
+
+        self.manager.post_start_hook()
+
+        LOG.debug("Join ServiceGroup membership for this service %s",
+                  self.topic)
+        # Add service to the ServiceGroup membership group.
+        #self.servicegroup_api.join(self.host, self.topic, self)
+
+        if self.periodic_enable:
+            if self.periodic_fuzzy_delay:
+                initial_delay = random.randint(0, self.periodic_fuzzy_delay)
+            else:
+                initial_delay = None
+
+            self.tg.add_dynamic_timer(self.periodic_tasks,
+                                     initial_delay=initial_delay,
+                                     periodic_interval_max=
+                                        self.periodic_interval_max)
+
+    def __getattr__(self, key):
+        manager = self.__dict__.get('manager', None)
+        return getattr(manager, key)
+
+    @classmethod
+    def create(cls, host=None, binary=None, topic=None, manager=None,
+               report_interval=None, periodic_enable=None,
+               periodic_fuzzy_delay=None, periodic_interval_max=None):
+        """Instantiates class and passes back application object.
+
+        :param host: defaults to CONF.host
+        :param binary: defaults to basename of executable
+        :param topic: defaults to bin_name - 'nova-' part
+        :param manager: defaults to CONF.<topic>_manager
+        :param report_interval: defaults to CONF.report_interval
+        :param periodic_enable: defaults to CONF.periodic_enable
+        :param periodic_fuzzy_delay: defaults to CONF.periodic_fuzzy_delay
+        :param periodic_interval_max: if set, the max time to wait between runs
+
+        """
+        if not host:
+            host = CONF.host
+        if not binary:
+            binary = os.path.basename(sys.argv[0])
+        if not topic:
+            topic = binary.rpartition('gosbs-')[2]
+        if not manager:
+            manager = SERVICE_MANAGERS.get(binary)
+        if report_interval is None:
+            report_interval = CONF.report_interval
+        if periodic_enable is None:
+            periodic_enable = CONF.periodic_enable
+        if periodic_fuzzy_delay is None:
+            periodic_fuzzy_delay = CONF.periodic_fuzzy_delay
+
+        debugger.init()
+
+        service_obj = cls(host, binary, topic, manager,
+                          report_interval=report_interval,
+                          periodic_enable=periodic_enable,
+                          periodic_fuzzy_delay=periodic_fuzzy_delay,
+                          periodic_interval_max=periodic_interval_max)
+
+        return service_obj
+
+    def kill(self):
+        """Destroy the service object in the datastore.
+
+        NOTE: Although this method is not used anywhere else than tests, it is
+        convenient to have it here, so the tests might easily and in clean way
+        stop and remove the service_ref.
+
+        """
+        self.stop()
+        try:
+            self.service_ref.destroy()
+        except exception.NotFound:
+            LOG.warning(_LW('Service killed that has no database entry'))
+
+    def stop(self):
+        """stop the service and clean up."""
+        try:
+            self.rpcserver.stop()
+            self.rpcserver.wait()
+        except Exception:
+            pass
+
+        try:
+            self.manager.cleanup_host()
+        except Exception:
+            LOG.exception(_LE('Service error occurred during cleanup_host'))
+            pass
+
+        super(Service, self).stop()
+
+    def periodic_tasks(self, raise_on_error=False):
+        """Tasks to be run at a periodic interval."""
+        ctxt = context.get_admin_context()
+        return self.manager.periodic_tasks(ctxt, raise_on_error=raise_on_error)
+
+    def basic_config_check(self):
+        """Perform basic config checks before starting processing."""
+        # Make sure the tempdir exists and is writable
+        try:
+            with utils.tempdir():
+                pass
+        except Exception as e:
+            LOG.error(_LE('Temporary directory is invalid: %s'), e)
+            sys.exit(1)
+
+    def reset(self):
+        """reset the service."""
+        self.manager.reset()
+
+
+def process_launcher():
+    return service.ProcessLauncher(CONF, restart_method='mutate')
+
+
+# NOTE(vish): the global launcher is to maintain the existing
+#             functionality of calling service.serve +
+#             service.wait
+_launcher = None
+
+
+def serve(server, workers=None):
+    global _launcher
+    if _launcher:
+        raise RuntimeError(_('serve() can only be called once'))
+
+    _launcher = service.launch(CONF, server, workers=workers,
+                               restart_method='mutate')
+
+
+def wait():
+    _launcher.wait()

diff --git a/pym/tbc/__init__.py b/gosbs/tasks/__init__.py
similarity index 100%
copy from pym/tbc/__init__.py
copy to gosbs/tasks/__init__.py

diff --git a/gosbs/tasks/scheduler/__init__.py b/gosbs/tasks/scheduler/__init__.py
new file mode 100644
index 0000000..cda600c
--- /dev/null
+++ b/gosbs/tasks/scheduler/__init__.py
@@ -0,0 +1,18 @@
+# Copyright 1999-2020 Gentoo Authors
+# Distributed under the terms of the GNU General Public License v2
+
+from datetime import datetime
+
+from oslo_log import log as logging
+
+from gosbs import objects
+from gosbs.common.task import check_task_db
+
+LOG = logging.getLogger(__name__)
+
+def activete_all_tasks(context, service_uuid):
+    # Tasks
+    check_task_db(context, 'update_repos', datetime(1, 1, 1, 0, 15, 0, 0), True, service_uuid)
+    check_task_db(context, 'update_git', datetime(1, 1, 1, 0, 5, 0, 0), True, service_uuid)
+    check_task_db(context, 'update_db', datetime(1, 1, 1, 0, 5, 0, 0), True, service_uuid)
+    check_task_db(context, 'rebuild_db', datetime(1, 1, 1, 0, 10, 0, 0), True, service_uuid)

diff --git a/gosbs/tasks/scheduler/rebuild_db.py b/gosbs/tasks/scheduler/rebuild_db.py
new file mode 100644
index 0000000..2eee2b8
--- /dev/null
+++ b/gosbs/tasks/scheduler/rebuild_db.py
@@ -0,0 +1,52 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+
+from gosbs.common.portage_settings import get_portage_settings
+from gosbs.scheduler.category import check_c_db
+from gosbs.scheduler.package import check_cp_db, get_all_cp_from_repo
+from gosbs.scheduler.project import get_project
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def rebuild_repo_db_thread(context, service_repo_db):
+    project_db, project_metadata_db = get_project(context, service_repo_db)
+    repo_db = objects.repo.Repo.get_by_uuid(context, service_repo_db.repo_uuid)
+    mysettings, myportdb = get_portage_settings(context, project_metadata_db, project_db.name)
+    repo_path = CONF.repopath + '/' + repo_db.name + '.git'
+    LOG.debug("Rebuilding repo %s", repo_db.name)
+    for cp in sorted(get_all_cp_from_repo(myportdb, repo_path)):
+        category = cp.split('/')[0]
+        succesc = check_c_db(context, category, repo_db)
+        category_db = objects.category.Category.get_by_name(context, category)
+        succesp = check_cp_db(context, myportdb, cp, repo_db, category_db)
+    return True
+
+def task(context, service_uuid):
+    filters = {
+               'service_uuid' : service_uuid,
+               'status' : 'rebuild_db',
+               }
+    for service_repo_db in objects.service_repo.ServiceRepoList.get_all(context, filters=filters):
+        service_repo_db.status = 'in-progress'
+        service_repo_db.save(context)
+        succes = rebuild_repo_db_thread(context, service_repo_db)
+        if not succes:
+            service_repo_db.status = 'failed'
+        else:
+            service_repo_db.status = 'completed'
+        service_repo_db.save(context)

diff --git a/pym/tbc/__init__.py b/gosbs/tasks/scheduler/sub/__init__.py
similarity index 100%
rename from pym/tbc/__init__.py
rename to gosbs/tasks/scheduler/sub/__init__.py

diff --git a/gosbs/tasks/scheduler/sub/build.py b/gosbs/tasks/scheduler/sub/build.py
new file mode 100644
index 0000000..20f981a
--- /dev/null
+++ b/gosbs/tasks/scheduler/sub/build.py
@@ -0,0 +1,73 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+
+from gosbs.common.portage_settings import get_portage_settings, clean_portage_settings
+from gosbs.common.flags import get_all_cpv_use, filter_flags, get_iuse, reduce_flags
+from gosbs import objects
+
+LOG = logging.getLogger(__name__)
+
+def build_sub_task(context, cp, ebuild_version_tree_dict_new, repo_db):
+    user_db = objects.user.User.get_by_name(context, 'scheduler')
+    filters = { 'build' : True,
+               'repo_uuid' : repo_db.uuid,
+               }
+    for project_repo_db in objects.project_repo.ProjectRepoList.get_all(context, filters=filters):
+        project_db = objects.project.Project.get_by_uuid(context, project_repo_db.project_uuid)
+        if project_db.active and project_db.auto:
+            project_metadata_db = objects.project_metadata.ProjectMetadata.get_by_uuid(context, project_db.uuid)
+            mysettings, myportdb = get_portage_settings(context, project_metadata_db, project_db.name)
+            build_cpv = myportdb.xmatch('bestmatch-visible', cp)
+            if build_cpv != "" and build_cpv in ebuild_version_tree_dict_new:
+                (final_use, use_expand_hidden, usemasked, useforced) = \
+                    get_all_cpv_use(build_cpv, myportdb, mysettings)
+                iuse_flags = filter_flags(get_iuse(build_cpv, myportdb), use_expand_hidden,
+                            usemasked, useforced, mysettings)
+                final_flags = filter_flags(final_use,  use_expand_hidden,
+                            usemasked, useforced, mysettings)
+                iuse_flags2 = reduce_flags(iuse_flags)
+                iuse_flags_list = list(set(iuse_flags2))
+                use_disable = list(set(iuse_flags_list).difference(set(final_flags)))
+                # Make a dict with enable and disable use flags for ebuildqueuedwithuses
+                use_flagsDict = {}
+                for x in final_flags:
+                    use_flagsDict[x] = True
+                for x in use_disable:
+                    use_flagsDict[x] = False
+                enable_test = False
+                if project_repo_db.test and not "test" in usemasked:
+                    enable_test = True
+                restrictions_filters = { 'ebuild_uuid' : ebuild_version_tree_dict_new[build_cpv], }
+                restrictions_list = objects.ebuild_restriction.EbuildRestrictionList.get_all(context, filters=restrictions_filters)
+                if not restrictions_list is None:
+                    if ("test" in restrictions_list or "fetch" in restrictions_list) and "test" in use_flagsDict:
+                        enable_test = False
+                if "test" in use_flagsDict and enable_test:
+                    use_flagsDict['test'] = True
+                project_build_db = objects.project_build.ProjectBuild()
+                project_build_db.project_uuid = project_db.uuid
+                project_build_db.ebuild_uuid = ebuild_version_tree_dict_new[build_cpv]
+                project_build_db.status = 'waiting'
+                project_build_db.user_id = user_db.id
+                project_build_db.create(context)
+                for k, v in use_flagsDict.items():
+                    iuse_db = objects.use.Use.get_by_name(context, k)
+                    build_iuse_db = objects.build_iuse.BuildIUse()
+                    build_iuse_db.build_uuid = project_build_db.uuid
+                    build_iuse_db.use_id = iuse_db.id
+                    build_iuse_db.status = v
+                    build_iuse_db.create(context)
+            clean_portage_settings(myportdb)
+    return True

diff --git a/gosbs/tasks/scheduler/update_db.py b/gosbs/tasks/scheduler/update_db.py
new file mode 100644
index 0000000..0800e28
--- /dev/null
+++ b/gosbs/tasks/scheduler/update_db.py
@@ -0,0 +1,60 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+
+from gosbs.common.portage_settings import get_portage_settings, clean_portage_settings
+from gosbs.scheduler.package import check_cp_db
+from gosbs.scheduler.project import get_project
+from gosbs.tasks.scheduler.sub.build import build_sub_task
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def update_repo_db_multi_thread(context, myportdb, repo_db, package_db):
+    category_db = objects.category.Category.get_by_uuid(context, package_db.category_uuid)
+    cp = category_db.name + '/' + package_db.name
+    succes, ebuild_version_tree_dict_new = check_cp_db(context, myportdb, cp, repo_db, category_db)
+    # sub tasks
+    succes = build_sub_task(context, cp, ebuild_version_tree_dict_new, repo_db)
+    return True
+
+def update_repo_db_thread(context, service_repo_db):
+    project_db, project_metadata_db = get_project(context, service_repo_db)
+    repo_db = objects.repo.Repo.get_by_uuid(context, service_repo_db.repo_uuid)
+    mysettings, myportdb = get_portage_settings(context, project_metadata_db, project_db.name)
+    succes = True
+    filters = { 'repo_uuid' : repo_db.uuid,
+                       'status' : 'waiting',
+                    }
+    for package_db in objects.package.PackageList.get_all(context, filters=filters):
+        succes = update_repo_db_multi_thread(context, myportdb, repo_db, package_db)
+    clean_portage_settings(myportdb)
+    return succes
+
+def task(context, service_uuid):
+    filters = {
+               'service_uuid' : service_uuid,
+               'status' : 'update_db',
+               }
+    for service_repo_db in objects.service_repo.ServiceRepoList.get_all(context, filters=filters):
+        service_repo_db.status = 'in-progress'
+        service_repo_db.save(context)
+        succes = update_repo_db_thread(context, service_repo_db)
+        if not succes:
+            service_repo_db.status = 'failed'
+        else:
+            service_repo_db.status = 'completed'
+        service_repo_db.save(context)

diff --git a/gosbs/tasks/scheduler/update_git.py b/gosbs/tasks/scheduler/update_git.py
new file mode 100644
index 0000000..ac63966
--- /dev/null
+++ b/gosbs/tasks/scheduler/update_git.py
@@ -0,0 +1,103 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+import sys
+
+from oslo_log import log as logging
+from gosbs import objects
+from gosbs.common.git import check_git_repo, check_git_repo_db
+from gosbs.scheduler.category import check_c_db
+from gosbs.scheduler.package import create_cp_db
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def update_repo_git_thread(context, service_uuid, repo_db):
+    repo_dict = { 'repo_uuid' : repo_db.uuid,
+               'repo_name' : repo_db.name,
+               'repo_url' : repo_db.src_url,
+               'repo_type' : repo_db.repo_type,
+               'repo_path' : CONF.repopath + '/' + repo_db.name + '.git',
+               'history' : True,
+                   }
+    repo_db.status = 'in-progress'
+    repo_db.save(context)
+    filters = {
+               'repo_uuid' : repo_db.uuid,
+               'service_uuid' : service_uuid,
+               }
+    service_repo_db = objects.service_repo.ServiceRepo.get_by_filters(context, filters=filters)
+    if service_repo_db is None:
+        service_repo_db = objects.service_repo.ServiceRepo()
+        service_repo_db.repo_uuid = repo_db.uuid
+        service_repo_db.service_uuid = service_uuid
+        service_repo_db.auto = repo_db.auto
+        service_repo_db.status = 'in-progress'
+        service_repo_db.create(context)
+    else:
+        service_repo_db.status = 'in-progress'
+        service_repo_db.save(context)
+    cp_list = []
+    if repo_db.repo_type == 'project':
+        succes = check_git_repo(repo_dict)
+    else:
+        succes, cp_list = check_git_repo_db(repo_dict)
+    if not succes:
+        repo_db.status = 'failed'
+        repo_db.save(context)
+        service_repo_db.status = 'failed'
+        service_repo_db.save(context)
+        return False
+    repo_db.status = 'completed'
+    repo_db.save(context)
+    if cp_list is None:
+        service_repo_db.status = 'rebuild_db'
+        service_repo_db.save(context)
+        return True
+    if cp_list == []:
+        service_repo_db.status = 'completed'
+        service_repo_db.save(context)
+        return True
+    for cp in cp_list:
+        category = cp.split('/')[0]
+        package = cp.split('/')[1]
+        succesc = check_c_db(context, category, repo_db)
+        category_db = objects.category.Category.get_by_name(context, category)
+        filters = { 'repo_uuid' : repo_dict['repo_uuid'],
+                   'category_uuid' : category_db.uuid,
+                }
+        package_db = objects.package.Package.get_by_name(context, package, filters=filters)
+        if package_db is None:
+            LOG.info("Adding %s to the database", package)
+            package_db = create_cp_db(context, package, repo_db, category_db)
+        package_db.status = 'waiting'
+        package_db.save(context)
+    service_repo_db.status = 'update_db'
+    service_repo_db.save(context)
+    return True
+
+def task(context, service_uuid):
+    filters = { 'status' : 'waiting',
+               'deleted' : False,
+               }
+    for repo_db in objects.repo.RepoList.get_all(context, filters=filters):
+        if repo_db is None:
+            return
+        succes = update_repo_git_thread(context, service_uuid, repo_db)
+    return
+    #with futurist.GreenThreadPoolExecutor(max_workers=1) as executor:
+        # Start the load operations and mark each future with its URL
+    #    for cp in cp_list:
+    #        future = executor.submit(update_cpv_db_thread, context, cp, repo_uuid, project_uuid)
+    #        print(future.result())

diff --git a/gosbs/tasks/scheduler/update_repos.py b/gosbs/tasks/scheduler/update_repos.py
new file mode 100644
index 0000000..a28689a
--- /dev/null
+++ b/gosbs/tasks/scheduler/update_repos.py
@@ -0,0 +1,23 @@
+# Copyright 1999-2020 Gentoo Authors
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+from oslo_log import log as logging
+from gosbs import objects
+import gosbs.conf
+
+CONF = gosbs.conf.CONF
+LOG = logging.getLogger(__name__)
+
+def task(context, service_uuid):
+    LOG.info('Update Repos')
+    objects.repo.RepoList.update_all(context)

diff --git a/gosbs/utils.py b/gosbs/utils.py
new file mode 100644
index 0000000..11ee94d
--- /dev/null
+++ b/gosbs/utils.py
@@ -0,0 +1,1358 @@
+# Copyright 2010 United States Government as represented by the
+# Administrator of the National Aeronautics and Space Administration.
+# Copyright 2011 Justin Santa Barbara
+# All Rights Reserved.
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/utils.py
+
+"""Utilities and helper functions."""
+
+import contextlib
+import copy
+import datetime
+import functools
+import hashlib
+import inspect
+import os
+import random
+import re
+import shutil
+import tempfile
+import time
+
+import eventlet
+from keystoneauth1 import exceptions as ks_exc
+from keystoneauth1 import loading as ks_loading
+import netaddr
+from os_service_types import service_types
+from oslo_concurrency import lockutils
+from oslo_concurrency import processutils
+from oslo_context import context as common_context
+from oslo_log import log as logging
+import oslo_messaging as messaging
+from oslo_utils import encodeutils
+from oslo_utils import excutils
+from oslo_utils import importutils
+from oslo_utils import strutils
+from oslo_utils import timeutils
+from oslo_utils import units
+import six
+from six.moves import range
+from six.moves import reload_module
+
+import gosbs.conf
+from gosbs import debugger
+from gosbs import exception
+from gosbs.i18n import _, _LE, _LI, _LW
+#import gosbs.network
+from gosbs import safe_utils
+
+profiler = importutils.try_import('osprofiler.profiler')
+
+CONF = gosbs.conf.CONF
+
+LOG = logging.getLogger(__name__)
+
+_IS_NEUTRON = None
+
+synchronized = lockutils.synchronized_with_prefix('nova-')
+
+SM_IMAGE_PROP_PREFIX = "image_"
+SM_INHERITABLE_KEYS = (
+    'min_ram', 'min_disk', 'disk_format', 'container_format',
+)
+# Keys which hold large structured data that won't fit in the
+# size constraints of the system_metadata table, so we avoid
+# storing and/or loading them.
+SM_SKIP_KEYS = (
+    # Legacy names
+    'mappings', 'block_device_mapping',
+    # Modern names
+    'img_mappings', 'img_block_device_mapping',
+)
+# Image attributes which Cinder stores in volume image metadata
+# as regular properties
+VIM_IMAGE_ATTRIBUTES = (
+    'image_id', 'image_name', 'size', 'checksum',
+    'container_format', 'disk_format', 'min_ram', 'min_disk',
+)
+
+_FILE_CACHE = {}
+
+_SERVICE_TYPES = service_types.ServiceTypes()
+
+
+if hasattr(inspect, 'getfullargspec'):
+    getargspec = inspect.getfullargspec
+else:
+    getargspec = inspect.getargspec
+
+
+def get_root_helper():
+    if CONF.workarounds.disable_rootwrap:
+        cmd = 'sudo'
+    else:
+        cmd = 'sudo nova-rootwrap %s' % CONF.rootwrap_config
+    return cmd
+
+
+class RootwrapProcessHelper(object):
+    def trycmd(self, *cmd, **kwargs):
+        kwargs['root_helper'] = get_root_helper()
+        return processutils.trycmd(*cmd, **kwargs)
+
+    def execute(self, *cmd, **kwargs):
+        kwargs['root_helper'] = get_root_helper()
+        return processutils.execute(*cmd, **kwargs)
+
+
+class RootwrapDaemonHelper(RootwrapProcessHelper):
+    _clients = {}
+
+    @synchronized('daemon-client-lock')
+    def _get_client(cls, rootwrap_config):
+        try:
+            return cls._clients[rootwrap_config]
+        except KeyError:
+            from oslo_rootwrap import client
+            new_client = client.Client([
+                "sudo", "nova-rootwrap-daemon", rootwrap_config])
+            cls._clients[rootwrap_config] = new_client
+            return new_client
+
+    def __init__(self, rootwrap_config):
+        self.client = self._get_client(rootwrap_config)
+
+    def trycmd(self, *args, **kwargs):
+        discard_warnings = kwargs.pop('discard_warnings', False)
+        try:
+            out, err = self.execute(*args, **kwargs)
+            failed = False
+        except processutils.ProcessExecutionError as exn:
+            out, err = '', six.text_type(exn)
+            failed = True
+        if not failed and discard_warnings and err:
+            # Handle commands that output to stderr but otherwise succeed
+            err = ''
+        return out, err
+
+    def execute(self, *cmd, **kwargs):
+        # NOTE(dims): This method is to provide compatibility with the
+        # processutils.execute interface. So that calling daemon or direct
+        # rootwrap to honor the same set of flags in kwargs and to ensure
+        # that we don't regress any current behavior.
+        cmd = [str(c) for c in cmd]
+        loglevel = kwargs.pop('loglevel', logging.DEBUG)
+        log_errors = kwargs.pop('log_errors', None)
+        process_input = kwargs.pop('process_input', None)
+        delay_on_retry = kwargs.pop('delay_on_retry', True)
+        attempts = kwargs.pop('attempts', 1)
+        check_exit_code = kwargs.pop('check_exit_code', [0])
+        ignore_exit_code = False
+        if isinstance(check_exit_code, bool):
+            ignore_exit_code = not check_exit_code
+            check_exit_code = [0]
+        elif isinstance(check_exit_code, int):
+            check_exit_code = [check_exit_code]
+
+        sanitized_cmd = strutils.mask_password(' '.join(cmd))
+        LOG.info(_LI('Executing RootwrapDaemonHelper.execute '
+                     'cmd=[%(cmd)r] kwargs=[%(kwargs)r]'),
+                 {'cmd': sanitized_cmd, 'kwargs': kwargs})
+
+        while attempts > 0:
+            attempts -= 1
+            try:
+                start_time = time.time()
+                LOG.log(loglevel, _('Running cmd (subprocess): %s'),
+                        sanitized_cmd)
+
+                (returncode, out, err) = self.client.execute(
+                    cmd, process_input)
+
+                end_time = time.time() - start_time
+                LOG.log(loglevel,
+                        'CMD "%(sanitized_cmd)s" returned: %(return_code)s '
+                        'in %(end_time)0.3fs',
+                        {'sanitized_cmd': sanitized_cmd,
+                         'return_code': returncode,
+                         'end_time': end_time})
+
+                if not ignore_exit_code and returncode not in check_exit_code:
+                    out = strutils.mask_password(out)
+                    err = strutils.mask_password(err)
+                    raise processutils.ProcessExecutionError(
+                        exit_code=returncode,
+                        stdout=out,
+                        stderr=err,
+                        cmd=sanitized_cmd)
+                return (out, err)
+
+            except processutils.ProcessExecutionError as err:
+                # if we want to always log the errors or if this is
+                # the final attempt that failed and we want to log that.
+                if log_errors == processutils.LOG_ALL_ERRORS or (
+                                log_errors == processutils.LOG_FINAL_ERROR and
+                            not attempts):
+                    format = _('%(desc)r\ncommand: %(cmd)r\n'
+                               'exit code: %(code)r\nstdout: %(stdout)r\n'
+                               'stderr: %(stderr)r')
+                    LOG.log(loglevel, format, {"desc": err.description,
+                                               "cmd": err.cmd,
+                                               "code": err.exit_code,
+                                               "stdout": err.stdout,
+                                               "stderr": err.stderr})
+                if not attempts:
+                    LOG.log(loglevel, _('%r failed. Not Retrying.'),
+                            sanitized_cmd)
+                    raise
+                else:
+                    LOG.log(loglevel, _('%r failed. Retrying.'),
+                            sanitized_cmd)
+                    if delay_on_retry:
+                        time.sleep(random.randint(20, 200) / 100.0)
+
+
+def execute(*cmd, **kwargs):
+    """Convenience wrapper around oslo's execute() method."""
+    if 'run_as_root' in kwargs and kwargs.get('run_as_root'):
+        if CONF.use_rootwrap_daemon:
+            return RootwrapDaemonHelper(CONF.rootwrap_config).execute(
+                *cmd, **kwargs)
+        else:
+            return RootwrapProcessHelper().execute(*cmd, **kwargs)
+    return processutils.execute(*cmd, **kwargs)
+
+
+def ssh_execute(dest, *cmd, **kwargs):
+    """Convenience wrapper to execute ssh command."""
+    ssh_cmd = ['ssh', '-o', 'BatchMode=yes']
+    ssh_cmd.append(dest)
+    ssh_cmd.extend(cmd)
+    return execute(*ssh_cmd, **kwargs)
+
+
+def generate_uid(topic, size=8):
+    characters = '01234567890abcdefghijklmnopqrstuvwxyz'
+    choices = [random.choice(characters) for _x in range(size)]
+    return '%s-%s' % (topic, ''.join(choices))
+
+
+# Default symbols to use for passwords. Avoids visually confusing characters.
+# ~6 bits per symbol
+DEFAULT_PASSWORD_SYMBOLS = ('23456789',  # Removed: 0,1
+                            'ABCDEFGHJKLMNPQRSTUVWXYZ',   # Removed: I, O
+                            'abcdefghijkmnopqrstuvwxyz')  # Removed: l
+
+
+def last_completed_audit_period(unit=None, before=None):
+    """This method gives you the most recently *completed* audit period.
+
+    arguments:
+            units: string, one of 'hour', 'day', 'month', 'year'
+                    Periods normally begin at the beginning (UTC) of the
+                    period unit (So a 'day' period begins at midnight UTC,
+                    a 'month' unit on the 1st, a 'year' on Jan, 1)
+                    unit string may be appended with an optional offset
+                    like so:  'day@18'  This will begin the period at 18:00
+                    UTC.  'month@15' starts a monthly period on the 15th,
+                    and year@3 begins a yearly one on March 1st.
+            before: Give the audit period most recently completed before
+                    <timestamp>. Defaults to now.
+
+
+    returns:  2 tuple of datetimes (begin, end)
+              The begin timestamp of this audit period is the same as the
+              end of the previous.
+    """
+    if not unit:
+        unit = CONF.instance_usage_audit_period
+
+    offset = 0
+    if '@' in unit:
+        unit, offset = unit.split("@", 1)
+        offset = int(offset)
+
+    if before is not None:
+        rightnow = before
+    else:
+        rightnow = timeutils.utcnow()
+    if unit not in ('month', 'day', 'year', 'hour'):
+        raise ValueError(_('Time period must be hour, day, month or year'))
+    if unit == 'month':
+        if offset == 0:
+            offset = 1
+        end = datetime.datetime(day=offset,
+                                month=rightnow.month,
+                                year=rightnow.year)
+        if end >= rightnow:
+            year = rightnow.year
+            if 1 >= rightnow.month:
+                year -= 1
+                month = 12 + (rightnow.month - 1)
+            else:
+                month = rightnow.month - 1
+            end = datetime.datetime(day=offset,
+                                    month=month,
+                                    year=year)
+        year = end.year
+        if 1 >= end.month:
+            year -= 1
+            month = 12 + (end.month - 1)
+        else:
+            month = end.month - 1
+        begin = datetime.datetime(day=offset, month=month, year=year)
+
+    elif unit == 'year':
+        if offset == 0:
+            offset = 1
+        end = datetime.datetime(day=1, month=offset, year=rightnow.year)
+        if end >= rightnow:
+            end = datetime.datetime(day=1,
+                                    month=offset,
+                                    year=rightnow.year - 1)
+            begin = datetime.datetime(day=1,
+                                      month=offset,
+                                      year=rightnow.year - 2)
+        else:
+            begin = datetime.datetime(day=1,
+                                      month=offset,
+                                      year=rightnow.year - 1)
+
+    elif unit == 'day':
+        end = datetime.datetime(hour=offset,
+                               day=rightnow.day,
+                               month=rightnow.month,
+                               year=rightnow.year)
+        if end >= rightnow:
+            end = end - datetime.timedelta(days=1)
+        begin = end - datetime.timedelta(days=1)
+
+    elif unit == 'hour':
+        end = rightnow.replace(minute=offset, second=0, microsecond=0)
+        if end >= rightnow:
+            end = end - datetime.timedelta(hours=1)
+        begin = end - datetime.timedelta(hours=1)
+
+    return (begin, end)
+
+
+def generate_password(length=None, symbolgroups=DEFAULT_PASSWORD_SYMBOLS):
+    """Generate a random password from the supplied symbol groups.
+
+    At least one symbol from each group will be included. Unpredictable
+    results if length is less than the number of symbol groups.
+
+    Believed to be reasonably secure (with a reasonable password length!)
+
+    """
+    if length is None:
+        length = CONF.password_length
+
+    r = random.SystemRandom()
+
+    # NOTE(jerdfelt): Some password policies require at least one character
+    # from each group of symbols, so start off with one random character
+    # from each symbol group
+    password = [r.choice(s) for s in symbolgroups]
+    # If length < len(symbolgroups), the leading characters will only
+    # be from the first length groups. Try our best to not be predictable
+    # by shuffling and then truncating.
+    r.shuffle(password)
+    password = password[:length]
+    length -= len(password)
+
+    # then fill with random characters from all symbol groups
+    symbols = ''.join(symbolgroups)
+    password.extend([r.choice(symbols) for _i in range(length)])
+
+    # finally shuffle to ensure first x characters aren't from a
+    # predictable group
+    r.shuffle(password)
+
+    return ''.join(password)
+
+
+# TODO(sfinucan): Replace this with the equivalent from oslo.utils
+def utf8(value):
+    """Try to turn a string into utf-8 if possible.
+
+    The original code was copied from the utf8 function in
+    http://github.com/facebook/tornado/blob/master/tornado/escape.py
+
+    """
+    if value is None or isinstance(value, six.binary_type):
+        return value
+
+    if not isinstance(value, six.text_type):
+        value = six.text_type(value)
+
+    return value.encode('utf-8')
+
+
+def parse_server_string(server_str):
+    """Parses the given server_string and returns a tuple of host and port.
+    If it's not a combination of host part and port, the port element
+    is an empty string. If the input is invalid expression, return a tuple of
+    two empty strings.
+    """
+    try:
+        # First of all, exclude pure IPv6 address (w/o port).
+        if netaddr.valid_ipv6(server_str):
+            return (server_str, '')
+
+        # Next, check if this is IPv6 address with a port number combination.
+        if server_str.find("]:") != -1:
+            (address, port) = server_str.replace('[', '', 1).split(']:')
+            return (address, port)
+
+        # Third, check if this is a combination of an address and a port
+        if server_str.find(':') == -1:
+            return (server_str, '')
+
+        # This must be a combination of an address and a port
+        (address, port) = server_str.split(':')
+        return (address, port)
+
+    except (ValueError, netaddr.AddrFormatError):
+        LOG.error(_LE('Invalid server_string: %s'), server_str)
+        return ('', '')
+
+
+def get_shortened_ipv6(address):
+    addr = netaddr.IPAddress(address, version=6)
+    return str(addr.ipv6())
+
+
+def get_shortened_ipv6_cidr(address):
+    net = netaddr.IPNetwork(address, version=6)
+    return str(net.cidr)
+
+
+def safe_ip_format(ip):
+    """Transform ip string to "safe" format.
+
+    Will return ipv4 addresses unchanged, but will nest ipv6 addresses
+    inside square brackets.
+    """
+    try:
+        if netaddr.IPAddress(ip).version == 6:
+            return '[%s]' % ip
+    except (TypeError, netaddr.AddrFormatError):  # hostname
+        pass
+    # it's IPv4 or hostname
+    return ip
+
+
+def format_remote_path(host, path):
+    """Returns remote path in format acceptable for scp/rsync.
+
+    If host is IPv6 address literal, return '[host]:path', otherwise
+    'host:path' is returned.
+
+    If host is None, only path is returned.
+    """
+    if host is None:
+        return path
+
+    return "%s:%s" % (safe_ip_format(host), path)
+
+
+def make_dev_path(dev, partition=None, base='/dev'):
+    """Return a path to a particular device.
+
+    >>> make_dev_path('xvdc')
+    /dev/xvdc
+
+    >>> make_dev_path('xvdc', 1)
+    /dev/xvdc1
+    """
+    path = os.path.join(base, dev)
+    if partition:
+        path += str(partition)
+    return path
+
+
+def sanitize_hostname(hostname, default_name=None):
+    """Return a hostname which conforms to RFC-952 and RFC-1123 specs except
+       the length of hostname.
+
+       Window, Linux, and Dnsmasq has different limitation:
+
+       Windows: 255 (net_bios limits to 15, but window will truncate it)
+       Linux: 64
+       Dnsmasq: 63
+
+       Due to nova-network will leverage dnsmasq to set hostname, so we chose
+       63.
+
+       """
+
+    def truncate_hostname(name):
+        if len(name) > 63:
+            LOG.warning(_LW("Hostname %(hostname)s is longer than 63, "
+                            "truncate it to %(truncated_name)s"),
+                            {'hostname': name, 'truncated_name': name[:63]})
+        return name[:63]
+
+    if isinstance(hostname, six.text_type):
+        # Remove characters outside the Unicode range U+0000-U+00FF
+        hostname = hostname.encode('latin-1', 'ignore')
+        if six.PY3:
+            hostname = hostname.decode('latin-1')
+
+    hostname = truncate_hostname(hostname)
+    hostname = re.sub('[ _]', '-', hostname)
+    hostname = re.sub('[^\w.-]+', '', hostname)
+    hostname = hostname.lower()
+    hostname = hostname.strip('.-')
+    # NOTE(eliqiao): set hostname to default_display_name to avoid
+    # empty hostname
+    if hostname == "" and default_name is not None:
+        return truncate_hostname(default_name)
+    return hostname
+
+
+@contextlib.contextmanager
+def temporary_mutation(obj, **kwargs):
+    """Temporarily set the attr on a particular object to a given value then
+    revert when finished.
+
+    One use of this is to temporarily set the read_deleted flag on a context
+    object:
+
+        with temporary_mutation(context, read_deleted="yes"):
+            do_something_that_needed_deleted_objects()
+    """
+    def is_dict_like(thing):
+        return hasattr(thing, 'has_key') or isinstance(thing, dict)
+
+    def get(thing, attr, default):
+        if is_dict_like(thing):
+            return thing.get(attr, default)
+        else:
+            return getattr(thing, attr, default)
+
+    def set_value(thing, attr, val):
+        if is_dict_like(thing):
+            thing[attr] = val
+        else:
+            setattr(thing, attr, val)
+
+    def delete(thing, attr):
+        if is_dict_like(thing):
+            del thing[attr]
+        else:
+            delattr(thing, attr)
+
+    NOT_PRESENT = object()
+
+    old_values = {}
+    for attr, new_value in kwargs.items():
+        old_values[attr] = get(obj, attr, NOT_PRESENT)
+        set_value(obj, attr, new_value)
+
+    try:
+        yield
+    finally:
+        for attr, old_value in old_values.items():
+            if old_value is NOT_PRESENT:
+                delete(obj, attr)
+            else:
+                set_value(obj, attr, old_value)
+
+
+def generate_mac_address():
+    """Generate an Ethernet MAC address."""
+    # NOTE(vish): We would prefer to use 0xfe here to ensure that linux
+    #             bridge mac addresses don't change, but it appears to
+    #             conflict with libvirt, so we use the next highest octet
+    #             that has the unicast and locally administered bits set
+    #             properly: 0xfa.
+    #             Discussion: https://bugs.launchpad.net/nova/+bug/921838
+    mac = [0xfa, 0x16, 0x3e,
+           random.randint(0x00, 0xff),
+           random.randint(0x00, 0xff),
+           random.randint(0x00, 0xff)]
+    return ':'.join(map(lambda x: "%02x" % x, mac))
+
+
+# NOTE(mikal): I really wanted this code to go away, but I can't find a way
+# to implement what the callers of this method want with privsep. Basically,
+# if we could hand off either a file descriptor or a file like object then
+# we could make this go away.
+@contextlib.contextmanager
+def temporary_chown(path, owner_uid=None):
+    """Temporarily chown a path.
+
+    :param owner_uid: UID of temporary owner (defaults to current user)
+    """
+    if owner_uid is None:
+        owner_uid = os.getuid()
+
+    orig_uid = os.stat(path).st_uid
+
+    if orig_uid != owner_uid:
+        nova.privsep.path.chown(path, uid=owner_uid)
+    try:
+        yield
+    finally:
+        if orig_uid != owner_uid:
+            nova.privsep.path.chown(path, uid=orig_uid)
+
+
+@contextlib.contextmanager
+def tempdir(**kwargs):
+    argdict = kwargs.copy()
+    if 'dir' not in argdict:
+        argdict['dir'] = CONF.tempdir
+    tmpdir = tempfile.mkdtemp(**argdict)
+    try:
+        yield tmpdir
+    finally:
+        try:
+            shutil.rmtree(tmpdir)
+        except OSError as e:
+            LOG.error(_LE('Could not remove tmpdir: %s'), e)
+
+
+class UndoManager(object):
+    """Provides a mechanism to facilitate rolling back a series of actions
+    when an exception is raised.
+    """
+    def __init__(self):
+        self.undo_stack = []
+
+    def undo_with(self, undo_func):
+        self.undo_stack.append(undo_func)
+
+    def _rollback(self):
+        for undo_func in reversed(self.undo_stack):
+            undo_func()
+
+    def rollback_and_reraise(self, msg=None, **kwargs):
+        """Rollback a series of actions then re-raise the exception.
+
+        .. note:: (sirp) This should only be called within an
+                  exception handler.
+        """
+        with excutils.save_and_reraise_exception():
+            if msg:
+                LOG.exception(msg, **kwargs)
+
+            self._rollback()
+
+
+def metadata_to_dict(metadata, include_deleted=False):
+    result = {}
+    for item in metadata:
+        if not include_deleted and item.get('deleted'):
+            continue
+        result[item['key']] = item['value']
+    return result
+
+
+def dict_to_metadata(metadata):
+    result = []
+    for key, value in metadata.items():
+        result.append(dict(key=key, value=value))
+    return result
+
+
+def instance_meta(instance):
+    if isinstance(instance['metadata'], dict):
+        return instance['metadata']
+    else:
+        return metadata_to_dict(instance['metadata'])
+
+
+def instance_sys_meta(instance):
+    if not instance.get('system_metadata'):
+        return {}
+    if isinstance(instance['system_metadata'], dict):
+        return instance['system_metadata']
+    else:
+        return metadata_to_dict(instance['system_metadata'],
+                                include_deleted=True)
+
+
+def expects_func_args(*args):
+    def _decorator_checker(dec):
+        @functools.wraps(dec)
+        def _decorator(f):
+            base_f = safe_utils.get_wrapped_function(f)
+            argspec = getargspec(base_f)
+            if argspec[1] or argspec[2] or set(args) <= set(argspec[0]):
+                # NOTE (ndipanov): We can't really tell if correct stuff will
+                # be passed if it's a function with *args or **kwargs so
+                # we still carry on and hope for the best
+                return dec(f)
+            else:
+                raise TypeError("Decorated function %(f_name)s does not "
+                                "have the arguments expected by the "
+                                "decorator %(d_name)s" %
+                                {'f_name': base_f.__name__,
+                                 'd_name': dec.__name__})
+        return _decorator
+    return _decorator_checker
+
+
+class ExceptionHelper(object):
+    """Class to wrap another and translate the ClientExceptions raised by its
+    function calls to the actual ones.
+    """
+
+    def __init__(self, target):
+        self._target = target
+
+    def __getattr__(self, name):
+        func = getattr(self._target, name)
+
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            try:
+                return func(*args, **kwargs)
+            except messaging.ExpectedException as e:
+                six.reraise(*e.exc_info)
+        return wrapper
+
+
+def check_string_length(value, name=None, min_length=0, max_length=None):
+    """Check the length of specified string
+    :param value: the value of the string
+    :param name: the name of the string
+    :param min_length: the min_length of the string
+    :param max_length: the max_length of the string
+    """
+    try:
+        strutils.check_string_length(value, name=name,
+                                     min_length=min_length,
+                                     max_length=max_length)
+    except (ValueError, TypeError) as exc:
+        raise exception.InvalidInput(message=exc.args[0])
+
+
+def validate_integer(value, name, min_value=None, max_value=None):
+    """Make sure that value is a valid integer, potentially within range.
+
+    :param value: value of the integer
+    :param name: name of the integer
+    :param min_value: min_value of the integer
+    :param max_value: max_value of the integer
+    :returns: integer
+    :raise: InvalidInput If value is not a valid integer
+    """
+    try:
+        return strutils.validate_integer(value, name, min_value, max_value)
+    except ValueError as e:
+        raise exception.InvalidInput(reason=six.text_type(e))
+
+
+def _serialize_profile_info():
+    if not profiler:
+        return None
+    prof = profiler.get()
+    trace_info = None
+    if prof:
+        # FIXME(DinaBelova): we'll add profiler.get_info() method
+        # to extract this info -> we'll need to update these lines
+        trace_info = {
+            "hmac_key": prof.hmac_key,
+            "base_id": prof.get_base_id(),
+            "parent_id": prof.get_id()
+        }
+    return trace_info
+
+
+def spawn(func, *args, **kwargs):
+    """Passthrough method for eventlet.spawn.
+
+    This utility exists so that it can be stubbed for testing without
+    interfering with the service spawns.
+
+    It will also grab the context from the threadlocal store and add it to
+    the store on the new thread.  This allows for continuity in logging the
+    context when using this method to spawn a new thread.
+    """
+    _context = common_context.get_current()
+    profiler_info = _serialize_profile_info()
+
+    @functools.wraps(func)
+    def context_wrapper(*args, **kwargs):
+        # NOTE: If update_store is not called after spawn it won't be
+        # available for the logger to pull from threadlocal storage.
+        if _context is not None:
+            _context.update_store()
+        if profiler_info and profiler:
+            profiler.init(**profiler_info)
+        return func(*args, **kwargs)
+
+    return eventlet.spawn(context_wrapper, *args, **kwargs)
+
+
+def spawn_n(func, *args, **kwargs):
+    """Passthrough method for eventlet.spawn_n.
+
+    This utility exists so that it can be stubbed for testing without
+    interfering with the service spawns.
+
+    It will also grab the context from the threadlocal store and add it to
+    the store on the new thread.  This allows for continuity in logging the
+    context when using this method to spawn a new thread.
+    """
+    _context = common_context.get_current()
+    profiler_info = _serialize_profile_info()
+
+    @functools.wraps(func)
+    def context_wrapper(*args, **kwargs):
+        # NOTE: If update_store is not called after spawn_n it won't be
+        # available for the logger to pull from threadlocal storage.
+        if _context is not None:
+            _context.update_store()
+        if profiler_info and profiler:
+            profiler.init(**profiler_info)
+        func(*args, **kwargs)
+
+    eventlet.spawn_n(context_wrapper, *args, **kwargs)
+
+
+def is_none_string(val):
+    """Check if a string represents a None value.
+    """
+    if not isinstance(val, six.string_types):
+        return False
+
+    return val.lower() == 'none'
+
+
+def is_neutron():
+    global _IS_NEUTRON
+
+    if _IS_NEUTRON is not None:
+        return _IS_NEUTRON
+
+    _IS_NEUTRON = nova.network.is_neutron()
+    return _IS_NEUTRON
+
+
+def is_auto_disk_config_disabled(auto_disk_config_raw):
+    auto_disk_config_disabled = False
+    if auto_disk_config_raw is not None:
+        adc_lowered = auto_disk_config_raw.strip().lower()
+        if adc_lowered == "disabled":
+            auto_disk_config_disabled = True
+    return auto_disk_config_disabled
+
+
+def get_auto_disk_config_from_instance(instance=None, sys_meta=None):
+    if sys_meta is None:
+        sys_meta = instance_sys_meta(instance)
+    return sys_meta.get("image_auto_disk_config")
+
+
+def get_auto_disk_config_from_image_props(image_properties):
+    return image_properties.get("auto_disk_config")
+
+
+def get_system_metadata_from_image(image_meta, flavor=None):
+    system_meta = {}
+    prefix_format = SM_IMAGE_PROP_PREFIX + '%s'
+
+    for key, value in image_meta.get('properties', {}).items():
+        if key in SM_SKIP_KEYS:
+            continue
+
+        new_value = safe_truncate(six.text_type(value), 255)
+        system_meta[prefix_format % key] = new_value
+
+    for key in SM_INHERITABLE_KEYS:
+        value = image_meta.get(key)
+
+        if key == 'min_disk' and flavor:
+            if image_meta.get('disk_format') == 'vhd':
+                value = flavor['root_gb']
+            else:
+                value = max(value or 0, flavor['root_gb'])
+
+        if value is None:
+            continue
+
+        system_meta[prefix_format % key] = value
+
+    return system_meta
+
+
+def get_image_from_system_metadata(system_meta):
+    image_meta = {}
+    properties = {}
+
+    if not isinstance(system_meta, dict):
+        system_meta = metadata_to_dict(system_meta, include_deleted=True)
+
+    for key, value in system_meta.items():
+        if value is None:
+            continue
+
+        # NOTE(xqueralt): Not sure this has to inherit all the properties or
+        # just the ones we need. Leaving it for now to keep the old behaviour.
+        if key.startswith(SM_IMAGE_PROP_PREFIX):
+            key = key[len(SM_IMAGE_PROP_PREFIX):]
+
+        if key in SM_SKIP_KEYS:
+            continue
+
+        if key in SM_INHERITABLE_KEYS:
+            image_meta[key] = value
+        else:
+            properties[key] = value
+
+    image_meta['properties'] = properties
+
+    return image_meta
+
+
+def get_image_metadata_from_volume(volume):
+    properties = copy.copy(volume.get('volume_image_metadata', {}))
+    image_meta = {'properties': properties}
+    # Volume size is no longer related to the original image size,
+    # so we take it from the volume directly. Cinder creates
+    # volumes in Gb increments, and stores size in Gb, whereas
+    # glance reports size in bytes. As we're returning glance
+    # metadata here, we need to convert it.
+    image_meta['size'] = volume.get('size', 0) * units.Gi
+    # NOTE(yjiang5): restore the basic attributes
+    # NOTE(mdbooth): These values come from volume_glance_metadata
+    # in cinder. This is a simple key/value table, and all values
+    # are strings. We need to convert them to ints to avoid
+    # unexpected type errors.
+    for attr in VIM_IMAGE_ATTRIBUTES:
+        val = properties.pop(attr, None)
+        if attr in ('min_ram', 'min_disk'):
+            image_meta[attr] = int(val or 0)
+    # NOTE(mriedem): Set the status to 'active' as a really old hack
+    # from when this method was in the compute API class and is
+    # needed for _check_requested_image which makes sure the image
+    # is 'active'. For volume-backed servers, if the volume is not
+    # available because the image backing the volume is not active,
+    # then the compute API trying to reserve the volume should fail.
+    image_meta['status'] = 'active'
+    return image_meta
+
+
+def get_hash_str(base_str):
+    """Returns string that represents MD5 hash of base_str (in hex format).
+
+    If base_str is a Unicode string, encode it to UTF-8.
+    """
+    if isinstance(base_str, six.text_type):
+        base_str = base_str.encode('utf-8')
+    return hashlib.md5(base_str).hexdigest()
+
+
+def get_sha256_str(base_str):
+    """Returns string that represents sha256 hash of base_str (in hex format).
+
+    sha1 and md5 are known to be breakable, so sha256 is a better option
+    when the hash is being used for security purposes. If hashing passwords
+    or anything else that needs to be retained for a long period a salted
+    hash is better.
+    """
+    if isinstance(base_str, six.text_type):
+        base_str = base_str.encode('utf-8')
+    return hashlib.sha256(base_str).hexdigest()
+
+
+def get_obj_repr_unicode(obj):
+    """Returns a string representation of an object converted to unicode.
+
+    In the case of python 3, this just returns the repr() of the object,
+    else it converts the repr() to unicode.
+    """
+    obj_repr = repr(obj)
+    if not six.PY3:
+        obj_repr = six.text_type(obj_repr, 'utf-8')
+    return obj_repr
+
+
+def filter_and_format_resource_metadata(resource_type, resource_list,
+        search_filts, metadata_type=None):
+    """Get all metadata for a list of resources after filtering.
+
+    Search_filts is a list of dictionaries, where the values in the dictionary
+    can be string or regex string, or a list of strings/regex strings.
+
+    Let's call a dict a 'filter block' and an item in the dict
+    a 'filter'. A tag is returned if it matches ALL the filters in
+    a filter block. If more than one values are specified for a
+    filter, a tag is returned if it matches ATLEAST ONE value of the filter. If
+    more than one filter blocks are specified, the tag should match ALL the
+    filter blocks.
+
+    For example:
+
+        search_filts = [{'key': ['key1', 'key2'], 'value': 'val1'},
+                        {'value': 'val2'}]
+
+    The filter translates to 'match any tag for which':
+        ((key=key1 AND value=val1) OR (key=key2 AND value=val1)) AND
+            (value=val2)
+
+    This example filter will never match a tag.
+
+        :param resource_type: The resource type as a string, e.g. 'instance'
+        :param resource_list: List of resource objects
+        :param search_filts: Filters to filter metadata to be returned. Can be
+            dict (e.g. {'key': 'env', 'value': 'prod'}, or a list of dicts
+            (e.g. [{'key': 'env'}, {'value': 'beta'}]. Note that the values
+            of the dict can be regular expressions.
+        :param metadata_type: Provided to search for a specific metadata type
+            (e.g. 'system_metadata')
+
+        :returns: List of dicts where each dict is of the form {'key':
+            'somekey', 'value': 'somevalue', 'instance_id':
+            'some-instance-uuid-aaa'} if resource_type is 'instance'.
+    """
+
+    if isinstance(search_filts, dict):
+        search_filts = [search_filts]
+
+    def _get_id(resource):
+        if resource_type == 'instance':
+            return resource.get('uuid')
+
+    def _match_any(pattern_list, string):
+        if isinstance(pattern_list, six.string_types):
+            pattern_list = [pattern_list]
+        return any([re.match(pattern, string)
+                    for pattern in pattern_list])
+
+    def _filter_metadata(resource, search_filt, input_metadata):
+        ids = search_filt.get('resource_id', [])
+        keys_filter = search_filt.get('key', [])
+        values_filter = search_filt.get('value', [])
+        output_metadata = {}
+
+        if ids and _get_id(resource) not in ids:
+            return {}
+
+        for k, v in input_metadata.items():
+            # Both keys and value defined -- AND
+            if (keys_filter and values_filter and
+               not _match_any(keys_filter, k) and
+               not _match_any(values_filter, v)):
+                continue
+            # Only keys or value is defined
+            elif ((keys_filter and not _match_any(keys_filter, k)) or
+                  (values_filter and not _match_any(values_filter, v))):
+                continue
+
+            output_metadata[k] = v
+        return output_metadata
+
+    formatted_metadata_list = []
+    for res in resource_list:
+
+        if resource_type == 'instance':
+            # NOTE(rushiagr): metadata_type should be 'metadata' or
+            # 'system_metadata' if resource_type is instance. Defaulting to
+            # 'metadata' if not specified.
+            if metadata_type is None:
+                metadata_type = 'metadata'
+            metadata = res.get(metadata_type, {})
+
+        for filt in search_filts:
+            # By chaining the input to the output, the filters are
+            # ANDed together
+            metadata = _filter_metadata(res, filt, metadata)
+
+        for (k, v) in metadata.items():
+            formatted_metadata_list.append({'key': k, 'value': v,
+                             '%s_id' % resource_type: _get_id(res)})
+
+    return formatted_metadata_list
+
+
+def safe_truncate(value, length):
+    """Safely truncates unicode strings such that their encoded length is
+    no greater than the length provided.
+    """
+    b_value = encodeutils.safe_encode(value)[:length]
+
+    # NOTE(chaochin) UTF-8 character byte size varies from 1 to 6. If
+    # truncating a long byte string to 255, the last character may be
+    # cut in the middle, so that UnicodeDecodeError will occur when
+    # converting it back to unicode.
+    decode_ok = False
+    while not decode_ok:
+        try:
+            u_value = encodeutils.safe_decode(b_value)
+            decode_ok = True
+        except UnicodeDecodeError:
+            b_value = b_value[:-1]
+    return u_value
+
+
+def read_cached_file(filename, force_reload=False):
+    """Read from a file if it has been modified.
+
+    :param force_reload: Whether to reload the file.
+    :returns: A tuple with a boolean specifying if the data is fresh
+              or not.
+    """
+    global _FILE_CACHE
+
+    if force_reload:
+        delete_cached_file(filename)
+
+    reloaded = False
+    mtime = os.path.getmtime(filename)
+    cache_info = _FILE_CACHE.setdefault(filename, {})
+
+    if not cache_info or mtime > cache_info.get('mtime', 0):
+        LOG.debug("Reloading cached file %s", filename)
+        with open(filename) as fap:
+            cache_info['data'] = fap.read()
+        cache_info['mtime'] = mtime
+        reloaded = True
+    return (reloaded, cache_info['data'])
+
+
+def delete_cached_file(filename):
+    """Delete cached file if present.
+
+    :param filename: filename to delete
+    """
+    global _FILE_CACHE
+
+    if filename in _FILE_CACHE:
+        del _FILE_CACHE[filename]
+
+
+def isotime(at=None):
+    """Current time as ISO string,
+    as timeutils.isotime() is deprecated
+
+    :returns: Current time in ISO format
+    """
+    if not at:
+        at = timeutils.utcnow()
+    date_string = at.strftime("%Y-%m-%dT%H:%M:%S")
+    tz = at.tzinfo.tzname(None) if at.tzinfo else 'UTC'
+    date_string += ('Z' if tz in ['UTC', 'UTC+00:00'] else tz)
+    return date_string
+
+
+def strtime(at):
+    return at.strftime("%Y-%m-%dT%H:%M:%S.%f")
+
+
+def get_ksa_adapter(service_type, ksa_auth=None, ksa_session=None,
+                    min_version=None, max_version=None):
+    """Construct a keystoneauth1 Adapter for a given service type.
+
+    We expect to find a conf group whose name corresponds to the service_type's
+    project according to the service-types-authority.  That conf group must
+    provide at least ksa adapter options.  Depending how the result is to be
+    used, ksa auth and/or session options may also be required, or the relevant
+    parameter supplied.
+
+    A raise_exc=False adapter is returned, meaning responses >=400 return the
+    Response object rather than raising an exception.  This behavior can be
+    overridden on a per-request basis by setting raise_exc=True.
+
+    :param service_type: String name of the service type for which the Adapter
+                         is to be constructed.
+    :param ksa_auth: A keystoneauth1 auth plugin. If not specified, we attempt
+                     to find one in ksa_session.  Failing that, we attempt to
+                     load one from the conf.
+    :param ksa_session: A keystoneauth1 Session.  If not specified, we attempt
+                        to load one from the conf.
+    :param min_version: The minimum major version of the adapter's endpoint,
+                        intended to be used as the lower bound of a range with
+                        max_version.
+                        If min_version is given with no max_version it is as
+                        if max version is 'latest'.
+    :param max_version: The maximum major version of the adapter's endpoint,
+                        intended to be used as the upper bound of a range with
+                        min_version.
+    :return: A keystoneauth1 Adapter object for the specified service_type.
+    :raise: ConfGroupForServiceTypeNotFound If no conf group name could be
+            found for the specified service_type.
+    """
+    # Get the conf group corresponding to the service type.
+    confgrp = _SERVICE_TYPES.get_project_name(service_type)
+    if not confgrp or not hasattr(CONF, confgrp):
+        # Try the service type as the conf group.  This is necessary for e.g.
+        # placement, while it's still part of the nova project.
+        # Note that this might become the first thing we try if/as we move to
+        # using service types for conf group names in general.
+        confgrp = service_type
+        if not confgrp or not hasattr(CONF, confgrp):
+            raise exception.ConfGroupForServiceTypeNotFound(stype=service_type)
+
+    # Ensure we have an auth.
+    # NOTE(efried): This could be None, and that could be okay - e.g. if the
+    # result is being used for get_endpoint() and the conf only contains
+    # endpoint_override.
+    if not ksa_auth:
+        if ksa_session and ksa_session.auth:
+            ksa_auth = ksa_session.auth
+        else:
+            ksa_auth = ks_loading.load_auth_from_conf_options(CONF, confgrp)
+
+    if not ksa_session:
+        ksa_session = ks_loading.load_session_from_conf_options(
+            CONF, confgrp, auth=ksa_auth)
+
+    return ks_loading.load_adapter_from_conf_options(
+        CONF, confgrp, session=ksa_session, auth=ksa_auth,
+        min_version=min_version, max_version=max_version, raise_exc=False)
+
+
+def get_endpoint(ksa_adapter):
+    """Get the endpoint URL represented by a keystoneauth1 Adapter.
+
+    This method is equivalent to what
+
+        ksa_adapter.get_endpoint()
+
+    should do, if it weren't for a panoply of bugs.
+
+    :param ksa_adapter: keystoneauth1.adapter.Adapter, appropriately set up
+                        with an endpoint_override; or service_type, interface
+                        (list) and auth/service_catalog.
+    :return: String endpoint URL.
+    :raise EndpointNotFound: If endpoint discovery fails.
+    """
+    # TODO(efried): This will be unnecessary once bug #1707993 is fixed.
+    # (At least for the non-image case, until 1707995 is fixed.)
+    if ksa_adapter.endpoint_override:
+        return ksa_adapter.endpoint_override
+    # TODO(efried): Remove this once bug #1707995 is fixed.
+    if ksa_adapter.service_type == 'image':
+        try:
+            return ksa_adapter.get_endpoint_data().catalog_url
+        except AttributeError:
+            # ksa_adapter.auth is a _ContextAuthPlugin, which doesn't have
+            # get_endpoint_data.  Fall through to using get_endpoint().
+            pass
+    # TODO(efried): The remainder of this method reduces to
+    # TODO(efried):     return ksa_adapter.get_endpoint()
+    # TODO(efried): once bug #1709118 is fixed.
+    # NOTE(efried): Id9bd19cca68206fc64d23b0eaa95aa3e5b01b676 may also do the
+    #               trick, once it's in a ksa release.
+    # The EndpointNotFound exception happens when _ContextAuthPlugin is in play
+    # because its get_endpoint() method isn't yet set up to handle interface as
+    # a list.  (It could also happen with a real auth if the endpoint isn't
+    # there; but that's covered below.)
+    try:
+        return ksa_adapter.get_endpoint()
+    except ks_exc.EndpointNotFound:
+        pass
+
+    interfaces = list(ksa_adapter.interface)
+    for interface in interfaces:
+        ksa_adapter.interface = interface
+        try:
+            return ksa_adapter.get_endpoint()
+        except ks_exc.EndpointNotFound:
+            pass
+    raise ks_exc.EndpointNotFound(
+        "Could not find requested endpoint for any of the following "
+        "interfaces: %s" % interfaces)
+
+
+def generate_hostid(host, project_id):
+    """Generate an obfuscated host id representing the host.
+
+    This is a hashed value so will not actually look like a hostname, and is
+    hashed with data from the project_id.
+
+    :param host: The name of the compute host.
+    :param project_id: The UUID of the project.
+    :return: An obfuscated hashed host id string, return "" if host is empty
+    """
+    if host:
+        data = (project_id + host).encode('utf-8')
+        sha_hash = hashlib.sha224(data)
+        return sha_hash.hexdigest()
+    return ""
+
+
+def monkey_patch():
+    if debugger.enabled():
+        # turn off thread patching to enable the remote debugger
+        eventlet.monkey_patch(os=False, thread=False)
+    else:
+        eventlet.monkey_patch(os=False)
+
+    # NOTE(rgerganov): oslo.context is storing a global thread-local variable
+    # which keeps the request context for the current thread. If oslo.context
+    # is imported before calling monkey_patch(), then this thread-local won't
+    # be green. To workaround this, reload the module after calling
+    # monkey_patch()
+    reload_module(importutils.import_module('oslo_context.context'))
+
+
+if six.PY2:
+    nested_contexts = contextlib.nested
+else:
+    @contextlib.contextmanager
+    def nested_contexts(*contexts):
+        with contextlib.ExitStack() as stack:
+            yield [stack.enter_context(c) for c in contexts]
+
+
+def run_once(message, logger, cleanup=None):
+    """This is a utility function decorator to ensure a function
+    is run once and only once in an interpreter instance.
+    The decorated function object can be reset by calling its
+    reset function. All exceptions raised by the wrapped function,
+    logger and cleanup function will be propagated to the caller.
+    """
+    def outer_wrapper(func):
+        @functools.wraps(func)
+        def wrapper(*args, **kwargs):
+            if not wrapper.called:
+                # Note(sean-k-mooney): the called state is always
+                # updated even if the wrapped function completes
+                # by raising an exception. If the caller catches
+                # the exception it is their responsibility to call
+                # reset if they want to re-execute the wrapped function.
+                try:
+                    return func(*args, **kwargs)
+                finally:
+                    wrapper.called = True
+            else:
+                logger(message)
+
+        wrapper.called = False
+
+        def reset(wrapper, *args, **kwargs):
+            # Note(sean-k-mooney): we conditionally call the
+            # cleanup function if one is provided only when the
+            # wrapped function has been called previously. We catch
+            # and reraise any exception that may be raised and update
+            # the called state in a finally block to ensure its
+            # always updated if reset is called.
+            try:
+                if cleanup and wrapper.called:
+                    return cleanup(*args, **kwargs)
+            finally:
+                wrapper.called = False
+
+        wrapper.reset = functools.partial(reset, wrapper)
+        return wrapper
+    return outer_wrapper

diff --git a/gosbs/version.py b/gosbs/version.py
new file mode 100644
index 0000000..268086c
--- /dev/null
+++ b/gosbs/version.py
@@ -0,0 +1,90 @@
+#    Copyright 2011 OpenStack Foundation
+#
+#    Licensed under the Apache License, Version 2.0 (the "License"); you may
+#    not use this file except in compliance with the License. You may obtain
+#    a copy of the License at
+#
+#         http://www.apache.org/licenses/LICENSE-2.0
+#
+#    Unless required by applicable law or agreed to in writing, software
+#    distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
+#    WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
+#    License for the specific language governing permissions and limitations
+#    under the License.
+
+# Origin https://github.com/openstack/nova/blob/master/nova/version.py
+
+import pbr.version
+
+from gosbs.i18n import _LE
+
+NOVA_VENDOR = "Gentoo Foundation Inc"
+NOVA_PRODUCT = "OpenStack Gosbs"
+NOVA_PACKAGE = None  # OS distro package version suffix
+
+loaded = False
+version_info = pbr.version.VersionInfo('gosbs')
+version_string = version_info.version_string
+
+
+def _load_config():
+    # Don't load in global context, since we can't assume
+    # these modules are accessible when distutils uses
+    # this module
+    from six.moves import configparser
+
+    from oslo_config import cfg
+
+    from oslo_log import log as logging
+
+    global loaded, NOVA_VENDOR, NOVA_PRODUCT, NOVA_PACKAGE
+    if loaded:
+        return
+
+    loaded = True
+
+    cfgfile = cfg.CONF.find_file("release")
+    if cfgfile is None:
+        return
+
+    try:
+        cfg = configparser.RawConfigParser()
+        cfg.read(cfgfile)
+
+        if cfg.has_option("Gosbs", "vendor"):
+            NOVA_VENDOR = cfg.get("Gosbs", "vendor")
+
+        if cfg.has_option("Gobs", "product"):
+            NOVA_PRODUCT = cfg.get("Gosbs", "product")
+
+        if cfg.has_option("Gosbs", "package"):
+            NOVA_PACKAGE = cfg.get("Gosbs", "package")
+    except Exception as ex:
+        LOG = logging.getLogger(__name__)
+        LOG.error(_LE("Failed to load %(cfgfile)s: %(ex)s"),
+                  {'cfgfile': cfgfile, 'ex': ex})
+
+
+def vendor_string():
+    _load_config()
+
+    return NOVA_VENDOR
+
+
+def product_string():
+    _load_config()
+
+    return NOVA_PRODUCT
+
+
+def package_string():
+    _load_config()
+
+    return NOVA_PACKAGE
+
+
+def version_string_with_package():
+    if package_string() is None:
+        return version_info.version_string()
+    else:
+        return "%s-%s" % (version_info.version_string(), package_string())

diff --git a/licenses/Apache-2.0 b/licenses/Apache-2.0
new file mode 100644
index 0000000..261eeb9
--- /dev/null
+++ b/licenses/Apache-2.0
@@ -0,0 +1,201 @@
+                                 Apache License
+                           Version 2.0, January 2004
+                        http://www.apache.org/licenses/
+
+   TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+   1. Definitions.
+
+      "License" shall mean the terms and conditions for use, reproduction,
+      and distribution as defined by Sections 1 through 9 of this document.
+
+      "Licensor" shall mean the copyright owner or entity authorized by
+      the copyright owner that is granting the License.
+
+      "Legal Entity" shall mean the union of the acting entity and all
+      other entities that control, are controlled by, or are under common
+      control with that entity. For the purposes of this definition,
+      "control" means (i) the power, direct or indirect, to cause the
+      direction or management of such entity, whether by contract or
+      otherwise, or (ii) ownership of fifty percent (50%) or more of the
+      outstanding shares, or (iii) beneficial ownership of such entity.
+
+      "You" (or "Your") shall mean an individual or Legal Entity
+      exercising permissions granted by this License.
+
+      "Source" form shall mean the preferred form for making modifications,
+      including but not limited to software source code, documentation
+      source, and configuration files.
+
+      "Object" form shall mean any form resulting from mechanical
+      transformation or translation of a Source form, including but
+      not limited to compiled object code, generated documentation,
+      and conversions to other media types.
+
+      "Work" shall mean the work of authorship, whether in Source or
+      Object form, made available under the License, as indicated by a
+      copyright notice that is included in or attached to the work
+      (an example is provided in the Appendix below).
+
+      "Derivative Works" shall mean any work, whether in Source or Object
+      form, that is based on (or derived from) the Work and for which the
+      editorial revisions, annotations, elaborations, or other modifications
+      represent, as a whole, an original work of authorship. For the purposes
+      of this License, Derivative Works shall not include works that remain
+      separable from, or merely link (or bind by name) to the interfaces of,
+      the Work and Derivative Works thereof.
+
+      "Contribution" shall mean any work of authorship, including
+      the original version of the Work and any modifications or additions
+      to that Work or Derivative Works thereof, that is intentionally
+      submitted to Licensor for inclusion in the Work by the copyright owner
+      or by an individual or Legal Entity authorized to submit on behalf of
+      the copyright owner. For the purposes of this definition, "submitted"
+      means any form of electronic, verbal, or written communication sent
+      to the Licensor or its representatives, including but not limited to
+      communication on electronic mailing lists, source code control systems,
+      and issue tracking systems that are managed by, or on behalf of, the
+      Licensor for the purpose of discussing and improving the Work, but
+      excluding communication that is conspicuously marked or otherwise
+      designated in writing by the copyright owner as "Not a Contribution."
+
+      "Contributor" shall mean Licensor and any individual or Legal Entity
+      on behalf of whom a Contribution has been received by Licensor and
+      subsequently incorporated within the Work.
+
+   2. Grant of Copyright License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      copyright license to reproduce, prepare Derivative Works of,
+      publicly display, publicly perform, sublicense, and distribute the
+      Work and such Derivative Works in Source or Object form.
+
+   3. Grant of Patent License. Subject to the terms and conditions of
+      this License, each Contributor hereby grants to You a perpetual,
+      worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+      (except as stated in this section) patent license to make, have made,
+      use, offer to sell, sell, import, and otherwise transfer the Work,
+      where such license applies only to those patent claims licensable
+      by such Contributor that are necessarily infringed by their
+      Contribution(s) alone or by combination of their Contribution(s)
+      with the Work to which such Contribution(s) was submitted. If You
+      institute patent litigation against any entity (including a
+      cross-claim or counterclaim in a lawsuit) alleging that the Work
+      or a Contribution incorporated within the Work constitutes direct
+      or contributory patent infringement, then any patent licenses
+      granted to You under this License for that Work shall terminate
+      as of the date such litigation is filed.
+
+   4. Redistribution. You may reproduce and distribute copies of the
+      Work or Derivative Works thereof in any medium, with or without
+      modifications, and in Source or Object form, provided that You
+      meet the following conditions:
+
+      (a) You must give any other recipients of the Work or
+          Derivative Works a copy of this License; and
+
+      (b) You must cause any modified files to carry prominent notices
+          stating that You changed the files; and
+
+      (c) You must retain, in the Source form of any Derivative Works
+          that You distribute, all copyright, patent, trademark, and
+          attribution notices from the Source form of the Work,
+          excluding those notices that do not pertain to any part of
+          the Derivative Works; and
+
+      (d) If the Work includes a "NOTICE" text file as part of its
+          distribution, then any Derivative Works that You distribute must
+          include a readable copy of the attribution notices contained
+          within such NOTICE file, excluding those notices that do not
+          pertain to any part of the Derivative Works, in at least one
+          of the following places: within a NOTICE text file distributed
+          as part of the Derivative Works; within the Source form or
+          documentation, if provided along with the Derivative Works; or,
+          within a display generated by the Derivative Works, if and
+          wherever such third-party notices normally appear. The contents
+          of the NOTICE file are for informational purposes only and
+          do not modify the License. You may add Your own attribution
+          notices within Derivative Works that You distribute, alongside
+          or as an addendum to the NOTICE text from the Work, provided
+          that such additional attribution notices cannot be construed
+          as modifying the License.
+
+      You may add Your own copyright statement to Your modifications and
+      may provide additional or different license terms and conditions
+      for use, reproduction, or distribution of Your modifications, or
+      for any such Derivative Works as a whole, provided Your use,
+      reproduction, and distribution of the Work otherwise complies with
+      the conditions stated in this License.
+
+   5. Submission of Contributions. Unless You explicitly state otherwise,
+      any Contribution intentionally submitted for inclusion in the Work
+      by You to the Licensor shall be under the terms and conditions of
+      this License, without any additional terms or conditions.
+      Notwithstanding the above, nothing herein shall supersede or modify
+      the terms of any separate license agreement you may have executed
+      with Licensor regarding such Contributions.
+
+   6. Trademarks. This License does not grant permission to use the trade
+      names, trademarks, service marks, or product names of the Licensor,
+      except as required for reasonable and customary use in describing the
+      origin of the Work and reproducing the content of the NOTICE file.
+
+   7. Disclaimer of Warranty. Unless required by applicable law or
+      agreed to in writing, Licensor provides the Work (and each
+      Contributor provides its Contributions) on an "AS IS" BASIS,
+      WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+      implied, including, without limitation, any warranties or conditions
+      of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+      PARTICULAR PURPOSE. You are solely responsible for determining the
+      appropriateness of using or redistributing the Work and assume any
+      risks associated with Your exercise of permissions under this License.
+
+   8. Limitation of Liability. In no event and under no legal theory,
+      whether in tort (including negligence), contract, or otherwise,
+      unless required by applicable law (such as deliberate and grossly
+      negligent acts) or agreed to in writing, shall any Contributor be
+      liable to You for damages, including any direct, indirect, special,
+      incidental, or consequential damages of any character arising as a
+      result of this License or out of the use or inability to use the
+      Work (including but not limited to damages for loss of goodwill,
+      work stoppage, computer failure or malfunction, or any and all
+      other commercial damages or losses), even if such Contributor
+      has been advised of the possibility of such damages.
+
+   9. Accepting Warranty or Additional Liability. While redistributing
+      the Work or Derivative Works thereof, You may choose to offer,
+      and charge a fee for, acceptance of support, warranty, indemnity,
+      or other liability obligations and/or rights consistent with this
+      License. However, in accepting such obligations, You may act only
+      on Your own behalf and on Your sole responsibility, not on behalf
+      of any other Contributor, and only if You agree to indemnify,
+      defend, and hold each Contributor harmless for any liability
+      incurred by, or claims asserted against, such Contributor by reason
+      of your accepting any such warranty or additional liability.
+
+   END OF TERMS AND CONDITIONS
+
+   APPENDIX: How to apply the Apache License to your work.
+
+      To apply the Apache License to your work, attach the following
+      boilerplate notice, with the fields enclosed by brackets "[]"
+      replaced with your own identifying information. (Don't include
+      the brackets!)  The text should be enclosed in the appropriate
+      comment syntax for the file format. We also recommend that a
+      file or class name and description of purpose be included on the
+      same "printed page" as the copyright notice for easier
+      identification within third-party archives.
+
+   Copyright [yyyy] [name of copyright owner]
+
+   Licensed under the Apache License, Version 2.0 (the "License");
+   you may not use this file except in compliance with the License.
+   You may obtain a copy of the License at
+
+       http://www.apache.org/licenses/LICENSE-2.0
+
+   Unless required by applicable law or agreed to in writing, software
+   distributed under the License is distributed on an "AS IS" BASIS,
+   WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+   See the License for the specific language governing permissions and
+   limitations under the License.

diff --git a/licenses/GPL-2 b/licenses/GPL-2
new file mode 100644
index 0000000..0e845b5
--- /dev/null
+++ b/licenses/GPL-2
@@ -0,0 +1,339 @@
+                    GNU GENERAL PUBLIC LICENSE
+                       Version 2, June 1991
+
+ Copyright (C) 1989, 1991 Free Software Foundation, Inc.,
+ 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+                            Preamble
+
+  The licenses for most software are designed to take away your
+freedom to share and change it.  By contrast, the GNU General Public
+License is intended to guarantee your freedom to share and change free
+software--to make sure the software is free for all its users.  This
+General Public License applies to most of the Free Software
+Foundation's software and to any other program whose authors commit to
+using it.  (Some other Free Software Foundation software is covered by
+the GNU Lesser General Public License instead.)  You can apply it to
+your programs, too.
+
+  When we speak of free software, we are referring to freedom, not
+price.  Our General Public Licenses are designed to make sure that you
+have the freedom to distribute copies of free software (and charge for
+this service if you wish), that you receive source code or can get it
+if you want it, that you can change the software or use pieces of it
+in new free programs; and that you know you can do these things.
+
+  To protect your rights, we need to make restrictions that forbid
+anyone to deny you these rights or to ask you to surrender the rights.
+These restrictions translate to certain responsibilities for you if you
+distribute copies of the software, or if you modify it.
+
+  For example, if you distribute copies of such a program, whether
+gratis or for a fee, you must give the recipients all the rights that
+you have.  You must make sure that they, too, receive or can get the
+source code.  And you must show them these terms so they know their
+rights.
+
+  We protect your rights with two steps: (1) copyright the software, and
+(2) offer you this license which gives you legal permission to copy,
+distribute and/or modify the software.
+
+  Also, for each author's protection and ours, we want to make certain
+that everyone understands that there is no warranty for this free
+software.  If the software is modified by someone else and passed on, we
+want its recipients to know that what they have is not the original, so
+that any problems introduced by others will not reflect on the original
+authors' reputations.
+
+  Finally, any free program is threatened constantly by software
+patents.  We wish to avoid the danger that redistributors of a free
+program will individually obtain patent licenses, in effect making the
+program proprietary.  To prevent this, we have made it clear that any
+patent must be licensed for everyone's free use or not licensed at all.
+
+  The precise terms and conditions for copying, distribution and
+modification follow.
+\f
+                    GNU GENERAL PUBLIC LICENSE
+   TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+  0. This License applies to any program or other work which contains
+a notice placed by the copyright holder saying it may be distributed
+under the terms of this General Public License.  The "Program", below,
+refers to any such program or work, and a "work based on the Program"
+means either the Program or any derivative work under copyright law:
+that is to say, a work containing the Program or a portion of it,
+either verbatim or with modifications and/or translated into another
+language.  (Hereinafter, translation is included without limitation in
+the term "modification".)  Each licensee is addressed as "you".
+
+Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope.  The act of
+running the Program is not restricted, and the output from the Program
+is covered only if its contents constitute a work based on the
+Program (independent of having been made by running the Program).
+Whether that is true depends on what the Program does.
+
+  1. You may copy and distribute verbatim copies of the Program's
+source code as you receive it, in any medium, provided that you
+conspicuously and appropriately publish on each copy an appropriate
+copyright notice and disclaimer of warranty; keep intact all the
+notices that refer to this License and to the absence of any warranty;
+and give any other recipients of the Program a copy of this License
+along with the Program.
+
+You may charge a fee for the physical act of transferring a copy, and
+you may at your option offer warranty protection in exchange for a fee.
+
+  2. You may modify your copy or copies of the Program or any portion
+of it, thus forming a work based on the Program, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+    a) You must cause the modified files to carry prominent notices
+    stating that you changed the files and the date of any change.
+
+    b) You must cause any work that you distribute or publish, that in
+    whole or in part contains or is derived from the Program or any
+    part thereof, to be licensed as a whole at no charge to all third
+    parties under the terms of this License.
+
+    c) If the modified program normally reads commands interactively
+    when run, you must cause it, when started running for such
+    interactive use in the most ordinary way, to print or display an
+    announcement including an appropriate copyright notice and a
+    notice that there is no warranty (or else, saying that you provide
+    a warranty) and that users may redistribute the program under
+    these conditions, and telling the user how to view a copy of this
+    License.  (Exception: if the Program itself is interactive but
+    does not normally print such an announcement, your work based on
+    the Program is not required to print an announcement.)
+
+These requirements apply to the modified work as a whole.  If
+identifiable sections of that work are not derived from the Program,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works.  But when you
+distribute the same sections as part of a whole which is a work based
+on the Program, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Program.
+
+In addition, mere aggregation of another work not based on the Program
+with the Program (or with a work based on the Program) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+  3. You may copy and distribute the Program (or a work based on it,
+under Section 2) in object code or executable form under the terms of
+Sections 1 and 2 above provided that you also do one of the following:
+
+    a) Accompany it with the complete corresponding machine-readable
+    source code, which must be distributed under the terms of Sections
+    1 and 2 above on a medium customarily used for software interchange; or,
+
+    b) Accompany it with a written offer, valid for at least three
+    years, to give any third party, for a charge no more than your
+    cost of physically performing source distribution, a complete
+    machine-readable copy of the corresponding source code, to be
+    distributed under the terms of Sections 1 and 2 above on a medium
+    customarily used for software interchange; or,
+
+    c) Accompany it with the information you received as to the offer
+    to distribute corresponding source code.  (This alternative is
+    allowed only for noncommercial distribution and only if you
+    received the program in object code or executable form with such
+    an offer, in accord with Subsection b above.)
+
+The source code for a work means the preferred form of the work for
+making modifications to it.  For an executable work, complete source
+code means all the source code for all modules it contains, plus any
+associated interface definition files, plus the scripts used to
+control compilation and installation of the executable.  However, as a
+special exception, the source code distributed need not include
+anything that is normally distributed (in either source or binary
+form) with the major components (compiler, kernel, and so on) of the
+operating system on which the executable runs, unless that component
+itself accompanies the executable.
+
+If distribution of executable or object code is made by offering
+access to copy from a designated place, then offering equivalent
+access to copy the source code from the same place counts as
+distribution of the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+  4. You may not copy, modify, sublicense, or distribute the Program
+except as expressly provided under this License.  Any attempt
+otherwise to copy, modify, sublicense or distribute the Program is
+void, and will automatically terminate your rights under this License.
+However, parties who have received copies, or rights, from you under
+this License will not have their licenses terminated so long as such
+parties remain in full compliance.
+
+  5. You are not required to accept this License, since you have not
+signed it.  However, nothing else grants you permission to modify or
+distribute the Program or its derivative works.  These actions are
+prohibited by law if you do not accept this License.  Therefore, by
+modifying or distributing the Program (or any work based on the
+Program), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Program or works based on it.
+
+  6. Each time you redistribute the Program (or any work based on the
+Program), the recipient automatically receives a license from the
+original licensor to copy, distribute or modify the Program subject to
+these terms and conditions.  You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties to
+this License.
+
+  7. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License.  If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Program at all.  For example, if a patent
+license would not permit royalty-free redistribution of the Program by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Program.
+
+If any portion of this section is held invalid or unenforceable under
+any particular circumstance, the balance of the section is intended to
+apply and the section as a whole is intended to apply in other
+circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system, which is
+implemented by public license practices.  Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+  8. If the distribution and/or use of the Program is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Program under this License
+may add an explicit geographical distribution limitation excluding
+those countries, so that distribution is permitted only in or among
+countries not thus excluded.  In such case, this License incorporates
+the limitation as if written in the body of this License.
+
+  9. The Free Software Foundation may publish revised and/or new versions
+of the General Public License from time to time.  Such new versions will
+be similar in spirit to the present version, but may differ in detail to
+address new problems or concerns.
+
+Each version is given a distinguishing version number.  If the Program
+specifies a version number of this License which applies to it and "any
+later version", you have the option of following the terms and conditions
+either of that version or of any later version published by the Free
+Software Foundation.  If the Program does not specify a version number of
+this License, you may choose any version ever published by the Free Software
+Foundation.
+
+  10. If you wish to incorporate parts of the Program into other free
+programs whose distribution conditions are different, write to the author
+to ask for permission.  For software which is copyrighted by the Free
+Software Foundation, write to the Free Software Foundation; we sometimes
+make exceptions for this.  Our decision will be guided by the two goals
+of preserving the free status of all derivatives of our free software and
+of promoting the sharing and reuse of software generally.
+
+                            NO WARRANTY
+
+  11. BECAUSE THE PROGRAM IS LICENSED FREE OF CHARGE, THERE IS NO WARRANTY
+FOR THE PROGRAM, TO THE EXTENT PERMITTED BY APPLICABLE LAW.  EXCEPT WHEN
+OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR OTHER PARTIES
+PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY OF ANY KIND, EITHER EXPRESSED
+OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF
+MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE.  THE ENTIRE RISK AS
+TO THE QUALITY AND PERFORMANCE OF THE PROGRAM IS WITH YOU.  SHOULD THE
+PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF ALL NECESSARY SERVICING,
+REPAIR OR CORRECTION.
+
+  12. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
+WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY AND/OR
+REDISTRIBUTE THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES,
+INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING
+OUT OF THE USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED
+TO LOSS OF DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY
+YOU OR THIRD PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER
+PROGRAMS), EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE
+POSSIBILITY OF SUCH DAMAGES.
+
+                     END OF TERMS AND CONDITIONS
+\f
+            How to Apply These Terms to Your New Programs
+
+  If you develop a new program, and you want it to be of the greatest
+possible use to the public, the best way to achieve this is to make it
+free software which everyone can redistribute and change under these terms.
+
+  To do so, attach the following notices to the program.  It is safest
+to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least
+the "copyright" line and a pointer to where the full notice is found.
+
+    <one line to give the program's name and a brief idea of what it does.>
+    Copyright (C) <year>  <name of author>
+
+    This program is free software; you can redistribute it and/or modify
+    it under the terms of the GNU General Public License as published by
+    the Free Software Foundation; either version 2 of the License, or
+    (at your option) any later version.
+
+    This program is distributed in the hope that it will be useful,
+    but WITHOUT ANY WARRANTY; without even the implied warranty of
+    MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
+    GNU General Public License for more details.
+
+    You should have received a copy of the GNU General Public License along
+    with this program; if not, write to the Free Software Foundation, Inc.,
+    51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
+
+Also add information on how to contact you by electronic and paper mail.
+
+If the program is interactive, make it output a short notice like this
+when it starts in an interactive mode:
+
+    Gnomovision version 69, Copyright (C) year name of author
+    Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
+    This is free software, and you are welcome to redistribute it
+    under certain conditions; type `show c' for details.
+
+The hypothetical commands `show w' and `show c' should show the appropriate
+parts of the General Public License.  Of course, the commands you use may
+be called something other than `show w' and `show c'; they could even be
+mouse-clicks or menu items--whatever suits your program.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the program, if
+necessary.  Here is a sample; alter the names:
+
+  Yoyodyne, Inc., hereby disclaims all copyright interest in the program
+  `Gnomovision' (which makes passes at compilers) written by James Hacker.
+
+  <signature of Ty Coon>, 1 April 1989
+  Ty Coon, President of Vice
+
+This General Public License does not permit incorporating your program into
+proprietary programs.  If your program is a subroutine library, you may
+consider it more useful to permit linking proprietary applications with the
+library.  If this is what you want to do, use the GNU Lesser General
+Public License instead of this License.

diff --git a/patches/portage.patch b/patches/portage.patch
deleted file mode 100644
index 45fa880..0000000
--- a/patches/portage.patch
+++ /dev/null
@@ -1,292 +0,0 @@
-2016-11-20  Magnus Granberg  <zorry@gentoo.org>
-
-	* tbc/pym/actions.py
-	Use the patched Scheduler and add build_dict so it can be ust.
-	We use or own mydepgraph (build_mydepgraph) that call backtrack_depgraph.
-	Return the output_buffer for emerge info.
-	And pass unresolvable in action_depclean so we can use it later.
-	* tbc/pym/main.py
-	Use or own patched actions.
-	We pass build_dict and session to some functions.
-	* tbc/pym/Scheduler.py
-	We copy Scheduler.py from portage and patch it.
-	Fix so we can use add_buildlog_main()
-	We use add_buildlog_main() for loging.
-
---- a/pym/tbc/actions.py	2013-03-22 17:57:23.000000000 +0100
-+++ b/pym/tbc/actions.py	2013-03-22 19:00:43.265582143 +0100
-@@ -72,7 +72,7 @@ from _emerge.MetadataRegen import Metada
- from _emerge.Package import Package
- from _emerge.ProgressHandler import ProgressHandler
- from _emerge.RootConfig import RootConfig
--from _emerge.Scheduler import Scheduler
-+from tbc.Scheduler import Scheduler
- from _emerge.search import search
- from _emerge.SetArg import SetArg
- from _emerge.show_invalid_depstring_notice import show_invalid_depstring_notice
-@@ -83,6 +83,8 @@ from _emerge.UnmergeDepPriority import U
- from _emerge.UseFlagDisplay import pkg_use_display
- from _emerge.userquery import userquery
- 
-+from tbc.build_depgraph import build_mydepgraph
-+
- if sys.hexversion >= 0x3000000:
- 	long = int
- 	_unicode = str
-@@ -96,7 +96,7 @@ if sys.hexversion >= 0x3000000:
- else:
- 	_unicode = unicode
- 
--def action_build(emerge_config, trees=DeprecationWarning,
-+def action_build(emerge_config, build_dict, session, trees=DeprecationWarning,
- 	mtimedb=DeprecationWarning, myopts=DeprecationWarning,
- 	myaction=DeprecationWarning, myfiles=DeprecationWarning, spinner=None):
- 
-@@ -333,13 +334,8 @@ def action_build(emerge_config, trees=DeprecationWarning,
- 			print(darkgreen("emerge: It seems we have nothing to resume..."))
- 			return os.EX_OK
- 
--		try:
--			success, mydepgraph, favorites = backtrack_depgraph(
--				settings, trees, myopts, myparams, myaction, myfiles, spinner)
--		except portage.exception.PackageSetNotFound as e:
--			root_config = trees[settings['EROOT']]['root_config']
--			display_missing_pkg_set(root_config, e.value)
--			return 1
-+		success, settings, trees, mtimedb, mydepgraph = build_mydepgraph(settings,
-+		trees, mtimedb, myopts, myparams, myaction, myfiles, spinner, build_dict, session)
- 
- 		if success and mydepgraph.need_config_reload():
- 			load_emerge_config(emerge_config=emerge_config)
-@@ -351,7 +347,6 @@ def action_build(emerge_config, trees=DeprecationWarning,
- 			return 0
- 
- 		if not success:
--			mydepgraph.display_problems()
- 			return 1
- 
- 	mergecount = None
-@@ -613,7 +609,7 @@ def action_depclean(settings, trees, ldp
- 	# The calculation is done in a separate function so that depgraph
- 	# references go out of scope and the corresponding memory
- 	# is freed before we call unmerge().
--	rval, cleanlist, ordered, req_pkg_count = \
-+	rval, cleanlist, ordered, req_pkg_count, unresolvable = \
- 		calc_depclean(settings, trees, ldpath_mtimes,
- 			myopts, action, args_set, spinner)
- 
-@@ -816,7 +812,7 @@ def calc_depclean(settings, trees, ldpat
- 	resolver.display_problems()
- 
- 	if not success:
--		return 1, [], False, 0
-+		return 1, [], False, 0, []
- 
- 	def unresolved_deps():
- 
-@@ -827,7 +823,7 @@ def calc_depclean(settings, trees, ldpat
- 				unresolvable.add((dep.atom, dep.parent.cpv))
- 
- 		if not unresolvable:
--			return False
-+			return None
- 
- 		if unresolvable and not allow_missing_deps:
- 
-@@ -877,11 +873,12 @@ def calc_depclean(settings, trees, ldpat
- 					"dependencies then use %s." % good("--nodeps"))
- 			writemsg_level("".join("%s%s\n" % (prefix, line) for line in msg),
- 				level=logging.ERROR, noiselevel=-1)
--			return True
--		return False
-+			return unresolvable
-+		return None
- 
--	if unresolved_deps():
--		return 1, [], False, 0
-+	unresolvable = unresolved_deps()
-+	if not unresolvable is None:
-+		return 1, [], False, 0, unresolvable
- 
- 	graph = resolver._dynamic_config.digraph.copy()
- 	required_pkgs_total = 0
-@@ -1160,7 +1157,7 @@ def calc_depclean(settings, trees, ldpat
- 						priority=UnmergeDepPriority(runtime=True),
- 						root=pkg.root)):
- 						resolver.display_problems()
--						return 1, [], False, 0
-+						return 1, [], False, 0, []
- 
- 			writemsg_level("\nCalculating dependencies  ")
- 			success = resolver._complete_graph(
-@@ -1168,9 +1165,10 @@ def calc_depclean(settings, trees, ldpat
- 			writemsg_level("\b\b... done!\n")
- 			resolver.display_problems()
- 			if not success:
--				return 1, [], False, 0
--			if unresolved_deps():
--				return 1, [], False, 0
-+				return 1, [], False, 0, []
-+			unresolvable = unresolved_deps()
-+			if not unresolvable is None:
-+				return 1, [], False, 0, unresolvable
- 
- 			graph = resolver._dynamic_config.digraph.copy()
- 			required_pkgs_total = 0
-@@ -1179,7 +1177,7 @@ def calc_depclean(settings, trees, ldpat
- 					required_pkgs_total += 1
- 			cleanlist = create_cleanlist()
- 			if not cleanlist:
--				return 0, [], False, required_pkgs_total
-+				return 0, [], False, required_pkgs_total, []
- 			clean_set = set(cleanlist)
- 
- 	if clean_set:
-@@ -1289,8 +1287,8 @@ def calc_depclean(settings, trees, ldpat
- 					graph.remove(node)
- 					cleanlist.append(node.cpv)
- 
--		return 0, cleanlist, ordered, required_pkgs_total
--	return 0, [], False, required_pkgs_total
-+		return 0, cleanlist, ordered, required_pkgs_total, []
-+	return 0, [], False, required_pkgs_total, []
- 
- def action_deselect(settings, trees, opts, atoms):
- 	enter_invalid = '--ask-enter-invalid' in opts
-@@ -1692,11 +1692,8 @@ def action_info(settings, trees, myopts,
- 			unset_vars.append(k)
- 	if unset_vars:
- 		append("Unset:  "+", ".join(unset_vars))
--	append("")
--	append("")
--	writemsg_stdout("\n".join(output_buffer),
--		noiselevel=-1)
--	del output_buffer[:]
-+
-+	return False, output_buffer
- 
- 	# If some packages were found...
- 	if mypkgs:
-@@ -3607,7 +3607,7 @@ def repo_name_duplicate_check(trees):
- 
- 	return bool(ignored_repos)
- 
--def run_action(emerge_config):
-+def run_action(emerge_config, build_dict, session):
- 
- 	# skip global updates prior to sync, since it's called after sync
- 	if emerge_config.action not in ('help', 'info', 'sync', 'version') and \
-@@ -3258,7 +3252,7 @@ def run_action(emerge_config):
- 				except OSError:
- 					writemsg("Please install eselect to use this feature.\n",
- 							noiselevel=-1)
--		retval = action_build(emerge_config, spinner=spinner)
-+		retval = action_build(emerge_config, build_dict, session, spinner=spinner)
- 		post_emerge(emerge_config.action, emerge_config.opts,
- 			emerge_config.args, emerge_config.target_config.root,
- 			emerge_config.trees, emerge_config.target_config.mtimedb, retval)
---- a/pym/tbc/main.py	2013-03-22 17:57:23.000000000 +0100
-+++ b/pym/tbc/main.py	2012-12-06 03:32:56.104889716 +0100
-@@ -11,7 +11,7 @@ portage.proxy.lazyimport.lazyimport(glob
- 	'logging',
- 	'portage.util:writemsg_level',
- 	'textwrap',
--	'_emerge.actions:load_emerge_config,run_action,' + \
-+	'tbc.actions:load_emerge_config,run_action,' + \
- 		'validate_ebuild_environment',
- 	'_emerge.help:help@emerge_help',
- )
-@@ -968,15 +968,20 @@ def profile_check(trees, myaction):
- 		return 1
- 	return os.EX_OK
- 
--def emerge_main(args=None):
-+def emerge_main(args=None, build_dict=None, session=None):
- 	"""
- 	@param args: command arguments (default: sys.argv[1:])
- 	@type args: list
-+	@param build_dict: info of the build_job
-+	@type build_dict: dict
- 	"""
- 	if args is None:
- 		args = sys.argv[1:]
- 
- 	args = portage._decode_argv(args)
-+
-+	if build_dict is None:
-+		build_dict = {}
- 
- 	# Disable color until we're sure that it should be enabled (after
- 	# EMERGE_DEFAULT_OPTS has been parsed).
-@@ -1028,7 +1028,7 @@ def emerge_main(args=None):
- 		parse_opts(tmpcmdline)
- 
- 	try:
--		return run_action(emerge_config)
-+		return run_action(emerge_config, build_dict, session)
- 	finally:
- 		# Call destructors for our portdbapi instances.
- 		for x in emerge_config.trees.values():
---- a/pym/tbc/Scheduler.py	2013-03-22 17:57:23.000000000 +0100
-+++ b/pym/tbc/Scheduler.py	2012-12-21 02:09:28.082301168 +0100
-@@ -62,6 +62,8 @@ from _emerge.PackageMerge import Package
- from _emerge.PollScheduler import PollScheduler
- from _emerge.SequentialTaskQueue import SequentialTaskQueue
- 
-+from tbc.build_log import add_buildlog_main
-+
- if sys.hexversion >= 0x3000000:
- 	basestring = str
- 
-@@ -1254,8 +1251,9 @@ class Scheduler(PollScheduler):
- 
- 	def _do_merge_exit(self, merge):
- 		pkg = merge.merge.pkg
-+		settings = merge.merge.settings
-+		trees = self.trees
- 		if merge.returncode != os.EX_OK:
--			settings = merge.merge.settings
- 			build_dir = settings.get("PORTAGE_BUILDDIR")
- 			build_log = settings.get("PORTAGE_LOG_FILE")
- 
-@@ -1266,6 +1264,7 @@ class Scheduler(PollScheduler):
- 			if not self._terminated_tasks:
- 				self._failed_pkg_msg(self._failed_pkgs[-1], "install", "to")
- 				self._status_display.failed = len(self._failed_pkgs)
-+			add_buildlog_main(settings, pkg, trees)
- 			return
- 
- 		self._task_complete(pkg)
-@@ -1284,6 +1283,7 @@ class Scheduler(PollScheduler):
- 				self._pkg_cache.pop(pkg_to_replace, None)
- 
- 		if pkg.installed:
-+			add_buildlog_main(settings, pkg, trees)
- 			return
- 
- 		# Call mtimedb.commit() after each merge so that
-@@ -1294,6 +1294,7 @@ class Scheduler(PollScheduler):
- 		if not mtimedb["resume"]["mergelist"]:
- 			del mtimedb["resume"]
- 		mtimedb.commit()
-+		add_buildlog_main(settings, pkg, trees)
- 
- 	def _build_exit(self, build):
- 		self._running_tasks.pop(id(build), None)
-@@ -1318,6 +1319,8 @@ class Scheduler(PollScheduler):
- 				self._status_display.merges = len(self._task_queues.merge)
- 		else:
- 			settings = build.settings
-+			trees = self.trees
-+			pkg = build.pkg
- 			build_dir = settings.get("PORTAGE_BUILDDIR")
- 			build_log = settings.get("PORTAGE_LOG_FILE")
- 
-@@ -1329,6 +1332,7 @@ class Scheduler(PollScheduler):
- 				self._failed_pkg_msg(self._failed_pkgs[-1], "emerge", "for")
- 				self._status_display.failed = len(self._failed_pkgs)
- 			self._deallocate_config(build.settings)
-+			add_buildlog_main(settings, pkg, trees)
- 		self._jobs -= 1
- 		self._status_display.running = self._jobs
- 		self._schedule()

diff --git a/patches/repoman.patch b/patches/repoman.patch
deleted file mode 100644
index 64ea894..0000000
--- a/patches/repoman.patch
+++ /dev/null
@@ -1,42 +0,0 @@
-2016-11-20  Magnus Granberg  <zorry@gentoo.org>
-
-	* tbc/pym/repoman.py
-	We add config_root pkdir and remove action and return
-	vcs_settings.qatracker and qawarnings.
-
---- a/pym/tbc/repoman.py	2015-09-22 02:20:41.000000000 +0200
-+++ b/pym/tbc/repoman.py	2015-10-04 20:21:57.586494104 +0200
-@@ -45,8 +45,9 @@ bad = create_color_func("BAD")
- os.umask(0o22)
- 
- 
--def repoman_main(argv):
--	config_root = os.environ.get("PORTAGE_CONFIGROOT")
-+def repoman_main(argv, config_root=None, pkgdir=None):
-+	if config_root is None:
-+		config_root = os.environ.get("PORTAGE_CONFIGROOT")
- 	repoman_settings = portage.config(config_root=config_root, local_config=False)
- 
- 	if repoman_settings.get("NOCOLOR", "").lower() in ("yes", "true") or \
-@@ -71,6 +72,9 @@ def repoman_main(argv):
- 	# commit (like if Manifest generation fails).
- 	can_force = True
- 
-+	if not pkgdir is None:
-+            os.chdir(pkgdir)
-+
- 	portdir, portdir_overlay, mydir = utilities.FindPortdir(repoman_settings)
- 	if portdir is None:
- 		sys.exit(1)
-@@ -159,10 +163,4 @@ def repoman_main(argv):
- 	qa_output = qa_output.getvalue()
- 	qa_output = qa_output.splitlines(True)
- 
--	# output the results
--	actions = Actions(repo_settings, options, scanner, vcs_settings)
--	if actions.inform(can_force.get(), result):
--		# perform any other actions
--		actions.perform(qa_output)
--
--	sys.exit(0)
-+	return vcs_settings.qatracker, qawarnings

diff --git a/pym/tbc/ConnectionManager.py b/pym/tbc/ConnectionManager.py
deleted file mode 100644
index 40abfd5..0000000
--- a/pym/tbc/ConnectionManager.py
+++ /dev/null
@@ -1,22 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import logging
-
-def NewConnection(tbc_settings_dict):
-	backend=tbc_settings_dict['sql_backend']
-	host=tbc_settings_dict['sql_host']
-	user=tbc_settings_dict['sql_user']
-	password=tbc_settings_dict['sql_passwd']
-	database=tbc_settings_dict['sql_db']
-	if backend == 'mysql':
-		try:
-			from sqlalchemy import create_engine
-		except ImportError:
-			print("Please install a recent version of dev-python/sqlalchemy for Python")
-			sys.exit(1)
-		#logging.basicConfig()
-		#logging.getLogger('sqlalchemy.engine').setLevel(logging.INFO)
-		mysqldriver = 'mysql+mysqlconnector'
-		return create_engine(mysqldriver + '://' + user + ':' + password + '@' + host + '/' + database, pool_recycle=120)

diff --git a/pym/tbc/build_depgraph.py b/pym/tbc/build_depgraph.py
deleted file mode 100644
index 5d8b93e..0000000
--- a/pym/tbc/build_depgraph.py
+++ /dev/null
@@ -1,65 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-from _emerge.create_depgraph_params import create_depgraph_params
-from _emerge.depgraph import backtrack_depgraph
-import portage
-portage.proxy.lazyimport.lazyimport(globals(),
-	'tbc.actions:load_emerge_config',
-)
-from portage.exception import PackageSetNotFound
-
-from tbc.build_log import log_fail_queru
-
-def build_mydepgraph(settings, trees, mtimedb, myopts, myparams, myaction, myfiles, spinner, build_dict, session):
-	try:
-		success, mydepgraph, favorites = backtrack_depgraph(
-		settings, trees, myopts, myparams, myaction, myfiles, spinner)
-	except portage.exception.PackageSetNotFound as e:
-		root_config = trees[settings["ROOT"]]["root_config"]
-		display_missing_pkg_set(root_config, e.value)
-		build_dict['type_fail'] = "depgraph fail\n"
-		build_dict['check_fail'] = True
-	else:
-		if not success:
-			repeat = True
-			repeat_times = 0
-			while repeat:
-				if mydepgraph._dynamic_config._needed_p_mask_changes:
-					build_dict['type_fail'] = "Mask package or dep\n"
-					build_dict['check_fail'] = True
-				elif mydepgraph._dynamic_config._needed_use_config_changes:
-					mydepgraph._display_autounmask()
-					build_dict['type_fail'] = "Need use change\n"
-					build_dict['check_fail'] = True
-				elif mydepgraph._dynamic_config._slot_conflict_handler:
-					build_dict['type_fail'] = "Slot blocking\n"
-					build_dict['check_fail'] = True
-				elif mydepgraph._dynamic_config._circular_deps_for_display:
-					build_dict['type_fail'] = "Circular Deps\n"
-					build_dict['check_fail'] = True
-				elif mydepgraph._dynamic_config._unsolvable_blockers:
-					build_dict['type_fail'] = "Blocking packages\n"
-					build_dict['check_fail'] = True
-				else:
-					build_dict['type_fail'] = "Dep calc fail\n"
-					build_dict['check_fail'] = True
-				mydepgraph.display_problems()
-				if repeat_times is 2:
-					repeat = False
-					log_fail_queru(session, build_dict, settings)
-				else:
-					repeat_times = repeat_times + 1
-					settings, trees, mtimedb = load_emerge_config()
-					myparams = create_depgraph_params(myopts, myaction)
-					try:
-						success, mydepgraph, favorites = backtrack_depgraph(
-						settings, trees, myopts, myparams, myaction, myfiles, spinner)
-					except portage.exception.PackageSetNotFound as e:
-						root_config = trees[settings["ROOT"]]["root_config"]
-						display_missing_pkg_set(root_config, e.value)
-					if success:
-						repeat = False
-
-	return success, settings, trees, mtimedb, mydepgraph

diff --git a/pym/tbc/build_job.py b/pym/tbc/build_job.py
deleted file mode 100644
index 8d10a68..0000000
--- a/pym/tbc/build_job.py
+++ /dev/null
@@ -1,202 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import portage
-import os
-import re
-import sys
-import signal
-
-from portage import _encodings
-from portage import _unicode_decode
-from portage.versions import cpv_getkey
-from portage.dep import check_required_use
-from portage.checksum import perform_checksum
-from tbc.depclean import do_depclean
-from tbc.flags import tbc_use_flags
-from tbc.qachecks import check_file_in_manifest
-from tbc.main import emerge_main
-from tbc.build_log import log_fail_queru
-from tbc.actions import load_emerge_config
-from tbc.sqlquerys import add_logs, get_packages_to_build, update_buildjobs_status, is_build_job_done, get_ebuild_restrictions
-
-class build_job_action(object):
-
-	def __init__(self, config_id, session):
-		self._config_id = config_id
-		self._session = session
-
-	def make_build_list(self, build_dict, settings, portdb):
-		cp = build_dict['cp']
-		repo = build_dict['repo']
-		package = build_dict['package']
-		cpv = build_dict['cpv']
-		pkgdir = portdb.getRepositoryPath(repo) + "/" + cp
-		build_use_flags_list = []
-		try:
-			ebuild_version_checksum_tree = perform_checksum(pkgdir + "/" + package + "-" + build_dict['ebuild_version'] + ".ebuild", "SHA256")[0]
-		except:
-			ebuild_version_checksum_tree = None
-		if ebuild_version_checksum_tree == build_dict['checksum']:
-			manifest_error = check_file_in_manifest(pkgdir, settings, portdb, cpv, build_use_flags_list, repo)
-			if manifest_error is None:
-				init_flags = tbc_use_flags(settings, portdb, cpv)
-				build_use_flags_list = init_flags.comper_useflags(build_dict)
-				log_msg = "build_use_flags_list %s" % (build_use_flags_list,)
-				add_logs(self._session, log_msg, "info", self._config_id)
-				manifest_error = check_file_in_manifest(pkgdir, settings, portdb, cpv, build_use_flags_list, repo)
-			if manifest_error is None:
-				build_dict['check_fail'] = False
-				build_cpv_dict = {}
-				build_cpv_dict[cpv] = build_use_flags_list
-				log_msg = "build_cpv_dict: %s" % (build_cpv_dict,)
-				add_logs(self._session, log_msg, "info", self._config_id)
-				return build_cpv_dict
-			build_dict['type_fail'] = "Manifest error"
-			build_dict['check_fail'] = True
-			log_msg = "Manifest error: %s:%s" % (cpv, manifest_error)
-			add_logs(self._session, log_msg, "info", self._config_id)
-		else:
-			build_dict['type_fail'] = "Wrong ebuild checksum"
-			build_dict['check_fail'] = True
-		if build_dict['check_fail'] is True:
-				log_fail_queru(self._session, build_dict, settings)
-		return None
-
-	def build_procces(self, buildqueru_cpv_dict, build_dict, settings, portdb):
-		build_cpv_list = []
-		depclean_fail = True
-		disable_test_features = False
-		enable_test_features = False
-		restrictions_test = False
-		restrictions_list= get_ebuild_restrictions(self._session, build_dict['ebuild_id'])
-		if restrictions_list:
-			if "test" in restrictions_list:
-				restrictions_test = True
-		if restrictions_test and "test" in settings.features:
-			disable_test_features = True
-		for k, build_use_flags_list in buildqueru_cpv_dict.items():
-			build_cpv_list.append("=" + k)
-			if not build_use_flags_list == None:
-				build_use_flags = ""
-				for flags in build_use_flags_list:
-					build_use_flags = build_use_flags + flags + " "
-				filetext = '=' + k + ' ' + build_use_flags
-				log_msg = "filetext: %s" % filetext
-				add_logs(self._session, log_msg, "info", self._config_id)
-				with open("/etc/portage/package.use/99_autounmask", "a") as f:
-					f.write(filetext)
-					f.write('\n')
-					f.close
-
-			if not build_dict['build_useflags'] is None:
-				if "test" in build_dict['build_useflags']:
-					if build_dict['build_useflags']['test'] is False and "test" in settings.features:
-						disable_test_features = True
-					if build_dict['build_useflags']['test'] is True and not disable_test_features and "test" not in settings.features:
-						enable_test_features = True
-			if disable_test_features:
-				filetext = '=' + k + ' ' + 'notest.conf'
-				log_msg = "filetext: %s" % filetext
-				add_logs(self._session, log_msg, "info", self._config_id)
-				with open("/etc/portage/package.env/99_env", "a") as f:
-					f.write(filetext)
-					f.write('\n')
-					f.close
-			if enable_test_features:
-				filetext = '=' + k + ' ' + 'test.conf'
-				log_msg = "filetext: %s" % filetext
-				add_logs(self._session, log_msg, "info", self._config_id)
-				with open("/etc/portage/package.env/99_env", "a") as f:
-					f.write(filetext)
-					f.write('\n')
-					f.close
-
-		log_msg = "build_cpv_list: %s" % (build_cpv_list,)
-		add_logs(self._session, log_msg, "info", self._config_id)
-
-		# We remove the binary package if removebin is true
-		if build_dict['removebin']:
-			package = build_dict['package']
-			pv = package + "-" + build_dict['ebuild_version']
-			binfile = settings['PKGDIR'] + "/" + build_dict['category'] + "/" + pv + ".tbz2"
-			try:
-				os.remove(binfile)
-			except:
-				log_msg = "Binary file was not removed or found: %s" % (binfile,)
-				add_logs(self._session, log_msg, "info", self._config_id)
-
-		argscmd = []
-		for emerge_option in build_dict['emerge_options']:
-			if emerge_option == '--depclean':
-				pass
-			elif emerge_option == '--nodepclean':
-				pass
-			elif emerge_option == '--nooneshot':
-				pass
-			else:
-				if not emerge_option in argscmd:
-					argscmd.append(emerge_option)
-		for build_cpv in build_cpv_list:
-			argscmd.append(build_cpv)
-		print("Emerge options: %s" % argscmd)
-		log_msg = "argscmd: %s" % (argscmd,)
-		add_logs(self._session, log_msg, "info", self._config_id)
-
-		# Call main_emerge to build the package in build_cpv_list
-		print("Build: %s" % build_dict)
-		update_buildjobs_status(self._session, build_dict['build_job_id'], 'Building', self._config_id)
-		build_fail = emerge_main(argscmd, build_dict, self._session)
-		# Run depclean
-		if  '--depclean' in build_dict['emerge_options'] and not '--nodepclean' in build_dict['emerge_options']:
-			depclean_fail = do_depclean()
-		try:
-			os.remove("/etc/portage/package.use/99_autounmask")
-			with open("/etc/portage/package.use/99_autounmask", "a") as f:
-				f.close
-			os.remove("/etc/portage/package.env/99_env")
-			with open("/etc/portage/package.env/99_env/", "a") as f:
-				f.close
-		except:
-			pass
-
-		if is_build_job_done(self._session, build_dict['build_job_id']):
-			update_buildjobs_status(self._session, build_dict['build_job_id'], 'Looked', self._config_id)
-			log_msg = "build_job %s was not removed" % (build_dict['build_job_id'],)
-			add_logs(self._session, log_msg, "info", self._config_id)
-			print("qurery was not removed")
-			build_dict['type_fail'] = "Querey was not removed\n"
-			build_dict['check_fail'] = True
-			log_fail_queru(self._session, build_dict, settings)
-		if build_fail is True:
-			build_dict['type_fail'] = "Emerge faild\n"
-			build_dict['check_fail'] = True
-			log_msg = "Emerge faild!"
-			add_logs(self._session, log_msg, "info", self._config_id)
-			return True
-		return False
-
-	def procces_build_jobs(self):
-		build_dict = {}
-		build_dict = get_packages_to_build(self._session, self._config_id)
-		if build_dict is None:
-			return
-		print("build_dict: %s" % (build_dict,))
-		log_msg = "build_dict: %s" % (build_dict,)
-		add_logs(self._session, log_msg, "info", self._config_id)
-		if not build_dict['ebuild_id'] is None and build_dict['checksum'] is not None:
-			settings, trees, mtimedb = load_emerge_config()
-			portdb = trees[settings["ROOT"]]["porttree"].dbapi
-			buildqueru_cpv_dict = self.make_build_list(build_dict, settings, portdb)
-			log_msg = "buildqueru_cpv_dict: %s" % (buildqueru_cpv_dict,)
-			add_logs(self._session, log_msg, "info", self._config_id)
-			if buildqueru_cpv_dict is None:
-				return
-			fail_build_procces = self.build_procces(buildqueru_cpv_dict, build_dict, settings, portdb)
-			return
-		if not build_dict['emerge_options'] is [] and build_dict['ebuild_id'] is None:
-			return
-		if not build_dict['ebuild_id'] is None and build_dict['emerge_options'] is None:
-			pass
-			# del_old_queue(self._session, build_dict['queue_id'])

diff --git a/pym/tbc/build_log.py b/pym/tbc/build_log.py
deleted file mode 100644
index e9e5dd0..0000000
--- a/pym/tbc/build_log.py
+++ /dev/null
@@ -1,383 +0,0 @@
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import re
-import os
-import platform
-import hashlib
-import logging
-
-from portage.versions import catpkgsplit, cpv_getversion
-import portage
-from portage.util import writemsg, \
-	writemsg_level, writemsg_stdout
-from portage import _encodings
-from portage import _unicode_encode
-from portage.checksum import perform_checksum
-from _emerge.main import parse_opts
-
-portage.proxy.lazyimport.lazyimport(globals(),
-	'tbc.actions:action_info,load_emerge_config',
-)
-from tbc.irk import send_irk
-from tbc.qachecks import check_repoman, repoman_full
-from tbc.text import get_log_text_dict
-from tbc.readconf import read_config_settings
-from tbc.flags import tbc_use_flags
-from tbc.ConnectionManager import NewConnection
-from tbc.sqlquerys import get_config_id, get_ebuild_id_db, add_new_buildlog, \
-	get_package_info, get_build_job_id, get_use_id, get_config_info, get_hilight_info, get_error_info_list, \
-	add_e_info, get_fail_times, add_fail_times, update_fail_times, del_old_build_jobs, add_old_ebuild, \
-	update_buildjobs_status, add_repoman_qa, get_config_id_fqdn, get_setup_info, \
-	add_repoman_log,  get_tbc_config
-from tbc.log import write_log
-
-from sqlalchemy.orm import sessionmaker
-
-def check_repoman_full(session, pkgdir, package_id, config_id, cpv=False):
-	# Check cp with repoman repoman full
-	write_log(session, 'Repoman Check', "info", config_id, 'build_log.check_repoman_full')
-	status = repoman_full(session, pkgdir, config_id)
-	repoman_hash = hashlib.sha256()
-	if cpv:
-		ebuild_version_tree = portage.versions.cpv_getversion(cpv)
-	if status:
-		repoman_dict = {}
-		for k, v in status.items():
-			repoman_log2 = []
-			for line in v:
-				if cpv:
-					if re.search(ebuild_version_tree, line):
-						repoman_log2.append(line)
-				else: 
-					repoman_log2.append(line)
-			if not repoman_log2 == []:
-				repoman_dict[k] = repoman_log2
-		if not repoman_dict == {}:
-			repoman_log = ""
-			for k, v in repoman_dict.items():
-				repoman_log = repoman_log + k + "\n"
-				repoman_hash.update(k.encode('utf-8'))
-				for line in v:
-					repoman_log = repoman_log + line + "\n"
-					repoman_hash.update(line.encode('utf-8'))
-			add_repoman_log(session, package_id, repoman_log, repoman_hash.hexdigest())
-			write_log(session, 'Repoman Check Fail\n' + repoman_log, "warning", config_id, 'build_log.check_repoman_full')
-			return repoman_log
-	write_log(session, 'Repoman Check Pass', "info", config_id, 'build_log.check_repoman_full')
-	return False
-
-def get_build_dict_db(session, config_id, settings, tbc_settings_dict, pkg):
-	myportdb = portage.portdbapi(mysettings=settings)
-	cpvr_list = catpkgsplit(pkg.cpv, silent=1)
-	categories = cpvr_list[0]
-	package = cpvr_list[1]
-	repo = pkg.repo
-	ebuild_version = cpv_getversion(pkg.cpv)
-	log_msg = "Setting up logging for %s:%s" % (pkg.cpv, repo,)
-	write_log(session, log_msg, "info", config_id, 'build_log.get_build_dict_db')
-	PackageInfo = get_package_info(session, categories, package, repo)
-	build_dict = {}
-	build_dict['ebuild_version'] = ebuild_version
-	build_dict['package_id'] = PackageInfo.PackageId
-	build_dict['cpv'] = pkg.cpv
-	build_dict['categories'] = categories
-	build_dict['package'] = package
-	build_dict['repo'] = repo
-	build_dict['config_id'] = config_id
-	init_useflags = tbc_use_flags(settings, myportdb, pkg.cpv)
-	iuse_flags_list, final_use_list = init_useflags.get_flags_pkg(pkg, settings)
-	iuse = []
-	for iuse_line in iuse_flags_list:
-		iuse.append(init_useflags.reduce_flag(iuse_line))
-	iuse_flags_list2 = list(set(iuse))
-	use_enable = final_use_list
-	use_disable = list(set(iuse_flags_list2).difference(set(use_enable)))
-	use_flagsDict = {}
-	for x in use_enable:
-		use_id = get_use_id(session, x)
-		use_flagsDict[use_id] = True
-	for x in use_disable:
-		use_id = get_use_id(session, x)
-		use_flagsDict[use_id] = False
-	if use_enable == [] and use_disable == []:
-		build_dict['build_useflags'] = None
-	else:
-		build_dict['build_useflags'] = use_flagsDict
-	pkgdir = myportdb.getRepositoryPath(repo) + "/" + categories + "/" + package
-	ebuild_version_checksum_tree = perform_checksum(pkgdir+ "/" + package + "-" + ebuild_version + ".ebuild", "SHA256")[0]
-	build_dict['checksum'] = ebuild_version_checksum_tree
-	ebuild_id_list, status = get_ebuild_id_db(session, build_dict['checksum'], build_dict['package_id'], build_dict['ebuild_version'])
-	if status:
-		if ebuild_id_list is None:
-			log_msg = "%s:%s Don't have any ebuild_id!" % (pkg.cpv, repo,)
-			write_log(session, log_msg, "error", config_id, 'build_log.get_build_dict_db')
-		else:
-			old_ebuild_id_list = []
-			for ebuild_id in ebuild_id_list:
-				log_msg = "%s:%s:%s Dups of checksums" % (pkg.cpv, repo, ebuild_id,)
-				write_log(session, log_msg, "error", config_id, 'build_log.get_build_dict_db')
-				old_ebuild_id_list.append(ebuild_id)
-			add_old_ebuild(session, old_ebuild_id_list)
-		return
-	build_dict['ebuild_id'] = ebuild_id_list
-
-	build_job_id = get_build_job_id(session, build_dict)
-	if build_job_id is None:
-		build_dict['build_job_id'] = None
-	else:
-		build_dict['build_job_id'] = build_job_id
-	return build_dict
-
-def search_buildlog(session, logfile_text_dict, max_text_lines):
-	log_search_list = get_hilight_info(session)
-	hilight_list = []
-	for index, text_line in logfile_text_dict.items():
-		for search_pattern in log_search_list:
-			if re.search(search_pattern.HiLightSearch, text_line):
-				hilight_tmp = {}
-				hilight_tmp['startline'] = index - search_pattern.HiLightStart
-				hilight_tmp['hilight'] = search_pattern.HiLightCssId
-				if search_pattern.HiLightSearchEnd == "":
-					hilight_tmp['endline'] = index + search_pattern.HiLightEnd
-					if hilight_tmp['endline'] > max_text_lines:
-						hilight_tmp['endline'] = max_text_lines
-				elif not search_pattern.HiLightSearchEnd == "" and (index + 1) >= max_text_lines:
-						hilight_tmp['endline'] = max_text_lines
-				else:
-					i = index + 1
-					match = True
-					while match:
-						if i >= max_text_lines:
-							match = False
-							break
-						if re.search(search_pattern.HiLightSearchPattern, logfile_text_dict[i]) and re.search(search_pattern.HiLightSearchPattern, logfile_text_dict[i + 1]):
-							for search_pattern2 in log_search_list:
-								if re.search(search_pattern2.HiLightSearch, logfile_text_dict[i]):
-									match = False
-							if match:
-								i = i + 1
-						elif re.search(search_pattern.HiLightSearchPattern, logfile_text_dict[i]) and re.search(search_pattern.HiLightSearchEnd, logfile_text_dict[i + 1]):
-							i = i + 1
-							match = False
-						else:
-							match = False
-					if i >= max_text_lines:
-						hilight_tmp['endline'] = max_text_lines
-					if re.search(search_pattern.HiLightSearchEnd, logfile_text_dict[i]):
-						hilight_tmp['endline'] = i
-					else:
-						hilight_tmp['endline'] = i - 1
-				hilight_list.append(hilight_tmp)
-
-	new_hilight_dict = {}
-	for hilight_tmp in hilight_list:
-		add_new_hilight = True
-		add_new_hilight_middel = None
-		for k, v in sorted(new_hilight_dict.items()):
-			if hilight_tmp['startline'] == hilight_tmp['endline']:
-				if v['endline'] == hilight_tmp['startline'] or v['startline'] == hilight_tmp['startline']:
-					add_new_hilight = False
-				if hilight_tmp['startline'] > v['startline'] and hilight_tmp['startline'] < v['endline']:
-					add_new_hilight = False
-					add_new_hilight_middel = k
-			else:
-				if v['endline'] == hilight_tmp['startline'] or v['startline'] == hilight_tmp['startline']:
-					add_new_hilight = False
-				if hilight_tmp['startline'] > v['startline'] and hilight_tmp['startline'] < v['endline']:
-					add_new_hilight = False
-		if add_new_hilight is True:
-			adict = {}
-			adict['startline'] = hilight_tmp['startline']
-			adict['hilight_css_id'] = hilight_tmp['hilight']
-			adict['endline'] = hilight_tmp['endline']
-			new_hilight_dict[hilight_tmp['startline']] = adict
-		if not add_new_hilight_middel is None:
-			adict1 = {}
-			adict2 = {}
-			adict3 = {}
-			adict1['startline'] = new_hilight_dict[add_new_hilight_middel]['startline']
-			adict1['endline'] = hilight_tmp['startline'] -1
-			adict1['hilight_css_id'] = new_hilight_dict[add_new_hilight_middel]['hilight']
-			adict2['startline'] = hilight_tmp['startline']
-			adict2['hilight_css_id'] = hilight_tmp['hilight']
-			adict2['endline'] = hilight_tmp['endline']
-			adict3['startline'] = hilight_tmp['endline'] + 1
-			adict3['hilight_css_id'] = new_hilight_dict[add_new_hilight_middel]['hilight']
-			adict3['endline'] = new_hilight_dict[add_new_hilight_middel]['endline']
-			del new_hilight_dict[add_new_hilight_middel]
-			new_hilight_dict[adict1['startline']] = adict1
-			new_hilight_dict[adict2['startline']] = adict2
-			new_hilight_dict[adict3['startline']] = adict3
-	return new_hilight_dict
-
-def get_buildlog_info(session, settings, pkg, build_dict, config_id):
-	myportdb = portage.portdbapi(mysettings=settings)
-	logfile_text_dict, max_text_lines = get_log_text_dict(settings.get("PORTAGE_LOG_FILE"))
-	hilight_dict = search_buildlog(session, logfile_text_dict, max_text_lines)
-	error_log_list = []
-	qa_error_list = []
-	repoman_error_list = []
-	sum_build_log_list = []
-	error_info_list = get_error_info_list(session)
-	for k, v in sorted(hilight_dict.items()):
-		if v['startline'] == v['endline']:
-			error_log_list.append(logfile_text_dict[k])
-			if v['hilight_css_id'] == 3: # qa = 3
-				qa_error_list.append(logfile_text_dict[k])
-		else:
-			i = k
-			while i != (v['endline'] + 1):
-				error_log_list.append(logfile_text_dict[i])
-				if v['hilight_css_id'] == 3: # qa = 3
-					qa_error_list.append(logfile_text_dict[i])
-				i = i +1
-
-	# Run repoman full
-	element = portage.versions.cpv_getkey(build_dict['cpv']).split('/')
-	categories = element[0]
-	package = element[1]
-	pkgdir = myportdb.getRepositoryPath(build_dict['repo']) + "/" + categories + "/" + package
-	repoman_error_list = check_repoman_full(session, pkgdir, build_dict['package_id'], config_id, build_dict['cpv'])
-	build_log_dict = {}
-	error_search_line = "^ \\* ERROR: "
-	build_log_dict['fail'] = False
-	if repoman_error_list:
-		sum_build_log_list.append(1) # repoman = 1
-		build_log_dict['fail'] = True
-	if qa_error_list != []:
-		sum_build_log_list.append(2) # qa = 2
-		build_log_dict['fail'] = True
-	else:
-		qa_error_list = False
-	for error_log_line in error_log_list:
-		if re.search(error_search_line, error_log_line):
-			build_log_dict['fail'] = True
-			for error_info in error_info_list:
-				if re.search(error_info.ErrorSearch, error_log_line):
-					sum_build_log_list.append(error_info.ErrorId)
-	build_log_dict['repoman_error_list'] = repoman_error_list
-	build_log_dict['qa_error_list'] = qa_error_list
-	build_log_dict['error_log_list'] = error_log_list
-	build_log_dict['summary_error_list'] = sum_build_log_list
-	build_log_dict['hilight_dict'] = hilight_dict
-	return build_log_dict
-
-def get_emerge_info_id(settings, trees, session, config_id):
-	args = []
-	args.append("--info")
-	myaction, myopts, myfiles = parse_opts(args, silent=True)
-	status, emerge_info_list = action_info(settings, trees, myopts, myfiles)
-	emerge_info = ""
-	return "\n".join(emerge_info_list)
-
-def add_buildlog_main(settings, pkg, trees):
-	tbc_settings = read_config_settings()
-	Session = sessionmaker(bind=NewConnection(tbc_settings))
-	session = Session()
-	config_id = get_config_id_fqdn(session, tbc_settings['hostname'])
-	ConfigInfo = get_config_info(session, config_id)
-	SetupInfo = get_setup_info(session, ConfigInfo.SetupId)
-	host_config = ConfigInfo.Hostname +"/" + SetupInfo.Setup
-	if pkg.type_name == "binary":
-		build_dict = None
-	else:
-		build_dict = get_build_dict_db(session, config_id, settings, tbc_settings, pkg)
-	if build_dict is None:
-		log_msg = "Package %s:%s is NOT logged." % (pkg.cpv, pkg.repo,)
-		write_log(session, log_msg, "info", config_id, 'build_log.add_buildlog_main')
-		session.close
-		return
-	build_log_dict = {}
-	build_log_dict = get_buildlog_info(session, settings, pkg, build_dict, config_id)
-	error_log_list = build_log_dict['error_log_list']
-	build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(host_config)[1]
-	build_error = ""
-	log_hash = hashlib.sha256()
-	build_error = ""
-	if error_log_list != []:
-		for log_line in error_log_list:
-			if not re.search(build_log_dict['logfilename'], log_line):
-				build_error = build_error + log_line
-		log_hash.update(build_error.encode('utf-8'))
-	build_log_dict['build_error'] = build_error
-	build_log_dict['log_hash'] = log_hash.hexdigest()
-	log_msg = "Logfile name: %s" % (settings.get("PORTAGE_LOG_FILE"),)
-	write_log(session, log_msg, "info", config_id, 'build_log.add_buildlog_main')
-	build_log_dict['emerge_info'] = get_emerge_info_id(settings, trees, session, config_id)
-	log_id = add_new_buildlog(session, build_dict, build_log_dict)
-
-	if log_id is None:
-		log_msg = "Package %s:%s is NOT logged." % (pkg.cpv, pkg.repo,)
-		write_log(session, log_msg, "info", config_id, 'build_log.add_buildlog_main')
-	else:
-		add_repoman_qa(session, build_log_dict, log_id)
-		os.chmod(settings.get("PORTAGE_LOG_FILE"), 0o664)
-		log_msg = "Package: %s:%s is logged." % (pkg.cpv, pkg.repo,)
-		write_log(session, log_msg, "info", config_id, 'build_log.add_buildlog_main')
-		build_msg = "BUILD: PASS"
-		qa_msg = "QA: PASS"
-		repoman_msg = "REPOMAN: PASS"
-		if build_log_dict['fail']:
-			for error_id in build_log_dict['summary_error_list']:
-				if error_id == 1:
-					repoman_msg = "REPOMAN: FAILD"
-				elif error_id ==2:
-					qa_msg = "QA: FAILD"
-				else:
-					build_msg = "BUILD: FAILD"
-		tbc_config =  get_tbc_config(session)
-		msg = "Package: %s Repo: %s %s %s %s Weblink http://%s/%s\n" % (pkg.cpv, pkg.repo, build_msg, repoman_msg, qa_msg, tbc_config.WebIrker, log_id,)
-		write_log(session, msg, "info", config_id, 'build_log.add_buildlog_main')
-		send_irk(msg, tbc_config.HostIrker)
-	session.close
-
-def log_fail_queru(session, build_dict, settings):
-	config_id = build_dict['config_id']
-	if get_fail_times(session, build_dict):
-		fail_querue_dict = {}
-		fail_querue_dict['build_job_id'] = build_dict['build_job_id']
-		fail_querue_dict['fail_type'] = build_dict['type_fail']
-		fail_querue_dict['fail_times'] = 1
-		add_fail_times(session, fail_querue_dict)
-		update_buildjobs_status(session, build_dict['build_job_id'], 'Waiting', config_id)
-	else:
-		build_log_dict = {}
-		error_log_list = []
-		sum_build_log_list = []
-		sum_build_log_list.append(3) # Others errors
-		error_log_list.append(build_dict['type_fail'])
-		build_log_dict['summary_error_list'] = sum_build_log_list
-		if build_dict['type_fail'] == 'merge fail':
-			error_log_list = []
-			for k, v in build_dict['failed_merge'].items():
-				error_log_list.append(v['fail_msg'])
-		build_log_dict['error_log_list'] = error_log_list
-		build_error = ""
-		if error_log_list != []:
-			for log_line in error_log_list:
-				build_error = build_error + log_line
-		build_log_dict['build_error'] = build_error
-		build_log_dict['log_hash'] = '0'
-		useflagsdict = {}
-		if build_dict['build_useflags'] == {}:
-			for k, v in build_dict['build_useflags'].items():
-				use_id = get_use_id(session, k)
-				useflagsdict[use_id] = v
-				build_dict['build_useflags'] = useflagsdict
-		else:
-			build_dict['build_useflags'] = None
-		if settings.get("PORTAGE_LOG_FILE") is not None:
-			ConfigInfo= get_config_info(session, config_id)
-			host_config = ConfigInfo.Hostname +"/" + ConfigInfo.Config
-			build_log_dict['logfilename'] = settings.get("PORTAGE_LOG_FILE").split(host_config)[1]
-			os.chmod(settings.get("PORTAGE_LOG_FILE"), 0o664)
-		else:
-			build_log_dict['logfilename'] = ""
-			build_log_dict['hilight_dict'] = {}
-		settings2, trees, tmp = load_emerge_config()
-		build_log_dict['emerge_info'] = get_emerge_info_id(settings2, trees, session, config_id)
-		build_log_dict['fail'] = True
-		log_id = add_new_buildlog(session, build_dict, build_log_dict)
-		del_old_build_jobs(session, build_dict['build_job_id'])

diff --git a/pym/tbc/buildquerydb.py b/pym/tbc/buildquerydb.py
deleted file mode 100644
index 7fe7f82..0000000
--- a/pym/tbc/buildquerydb.py
+++ /dev/null
@@ -1,102 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import sys
-import os
-
-# Get the options from the config file set in tbc.readconf
-from tbc.readconf import get_conf_settings
-reader=get_conf_settings()
-tbc_settings_dict=reader.read_tbc_settings_all()
-config_profile = tbc_settings_dict['tbc_config']
-
-from tbc.check_setup import check_make_conf
-from tbc.sync import git_pull
-from tbc.package import tbc_package
-import portage
-import multiprocessing
-
-def add_cpv_query_pool(mysettings, myportdb, config_id, cp, repo):
-	conn =0
-	init_package = tbc_package(mysettings, myportdb)
-	# FIXME: remove the check for tbc when in tree
-	if cp != "dev-python/tbc":
-		build_dict = {}
-		packageDict = {}
-		ebuild_id_list = []
-		# split the cp to categories and package
-		element = cp.split('/')
-		categories = element[0]
-		package = element[1]
-		log_msg = "C %s:%s" % (cp, repo,)
-		add_tbc_logs(conn, log_msg, "info", config_id)
-		pkgdir = self._myportdb.getRepositoryPath(repo) + "/" + cp
-		config_id_list = []
-		config_id_list.append(config_id)
-		config_cpv_listDict = init_package.config_match_ebuild(cp, config_id_list)
-		if config_cpv_listDict != {}:
-			cpv = config_cpv_listDict[config_id]['cpv']
-			packageDict[cpv] = init_package.get_packageDict(pkgdir, cpv, repo)
-			build_dict['checksum'] = packageDict[cpv]['ebuild_version_checksum_tree']
-			build_dict['package_id'] = get_package_id(conn, categories, package, repo)
-			build_dict['ebuild_version'] = packageDict[cpv]['ebuild_version_tree']
-			ebuild_id = get_ebuild_id_db_checksum(conn, build_dict)
-			if ebuild_id is not None:
-				ebuild_id_list.append(ebuild_id)
-				init_package.add_new_ebuild_buildquery_db(ebuild_id_list, packageDict, config_cpv_listDict)
-		log_msg = "C %s:%s ... Done." % (cp, repo,)
-		add_tbc_logs(conn, log_msg, "info", config_id)
-	return
-
-def add_buildquery_main(config_id):
-	conn = 0
-	config_setup = get_config(conn, config_id)
-	log_msg = "Adding build jobs for: %s" % (config_setup,)
-	add_tbc_logs(conn, log_msg, "info", config_id)
-	check_make_conf()
-	log_msg = "Check configs done"
-	add_tbc_logs(conn, log_msg, "info", config_profile)
-	# Get default config from the configs table  and default_config=1
-	default_config_root = "/var/cache/tbc/" + tbc_settings_dict['tbc_gitreponame'] + "/" + config_setup + "/"
-	# Set config_root (PORTAGE_CONFIGROOT)  to default_config_root
-	mysettings = portage.config(config_root = default_config_root)
-	myportdb = portage.portdbapi(mysettings=mysettings)
-	init_package = tbc_package(mysettings, myportdb)
-	log_msg = "Setting default config to: %s" % (config_setup)
-	add_tbc_logs(conn, log_msg, "info", config_is)
-	# Use all exept 2 cores when multiprocessing
-	pool_cores= multiprocessing.cpu_count()
-	if pool_cores >= 3:
-		use_pool_cores = pool_cores - 2
-	else:
-		use_pool_cores = 1
-	pool = multiprocessing.Pool(processes=use_pool_cores)
-
-	repo_trees_list = myportdb.porttrees
-	for repo_dir in repo_trees_list:
-		repo = myportdb.getRepositoryName(repo_dir)
-		repo_dir_list = []
-		repo_dir_list.append(repo_dir)
-		
-		# Get the package list from the repo
-		package_list_tree = myportdb.cp_all(trees=repo_dir_list)
-		for cp in sorted(package_list_tree):
-			pool.apply_async(add_cpv_query_pool, (mysettings, myportdb, config_id, cp, repo,))
-	pool.close()
-	pool.join()
-	log_msg = "Adding build jobs for: %s ... Done." % (config_setup,)
-	add_tbc_logs(conn, log_msg, "info", config_profile)
-	return True
-
-def del_buildquery_main(config_id):
-	conn=0
-	config_setup = get_config(conn, config_id)
-	log_msg = "Removeing build jobs for: %s" % (config_setup,)
-	add_tbc_logs(conn, log_msg, "info", config_id)
-	build_job_id_list = get_build_jobs_id_list_config(conn, config_id)
-	if build_job_id_list is not None:
-		for build_job_id in build_job_id_list:
-			del_old_build_jobs(conn, build_job_id)
-	log_msg = "Removeing build jobs for: %s ... Done." % (config_setup,)
-	add_tbc_logs(conn, log_msg, "info", config_id)
-	return True

diff --git a/pym/tbc/check_setup.py b/pym/tbc/check_setup.py
deleted file mode 100644
index dcd7a0c..0000000
--- a/pym/tbc/check_setup.py
+++ /dev/null
@@ -1,74 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import portage
-import os
-import errno
-
-from portage.exception import DigestException, FileNotFound, ParseError, PermissionDenied
-from portage.checksum import perform_checksum
-from tbc.text import get_file_text
-from tbc.sqlquerys import get_config_all_info, add_logs, get_configmetadata_info, get_setup_info
-from tbc.sync import git_pull
-
-def check_make_conf(session, config_id):
-	log_msg = "Checking configs for changes and errors"
-	add_logs(session, log_msg, "info", config_id)
-	HostConfigsMetaDataInfo = get_configmetadata_info(session, config_id)
-	git_repo =  HostConfigsMetaDataInfo.RepoPath + "/"
-	git_pull(session, git_repo, config_id)
-	configsDict = {}
-	for ConfigInfo in get_config_all_info(session):
-		attDict={}
-		# Set the config dir
-		SetupInfo = get_setup_info(session, ConfigInfo.ConfigId)
-		check_config_dir = git_repo + ConfigInfo.Hostname +"/" + SetupInfo.Setup + "/"
-		make_conf_file = check_config_dir + "etc/portage/make.conf"
-		ConfigsMetaDataInfo = get_configmetadata_info(session, ConfigInfo.ConfigId)
-		# Check if we can take a checksum on it.
-		# Check if we have some error in the file. (portage.util.getconfig)
-		# Check if we envorment error with the config. (settings.validate)
-		try:
-			make_conf_checksum_tree = perform_checksum(make_conf_file, "SHA256")[0]
-			portage.util.getconfig(make_conf_file, tolerant=0, allow_sourcing=True, expand=True)
-			mysettings = portage.config(config_root = check_config_dir)
-			mysettings.validate()
-			# With errors we update the db on the config and disable the config
-		except ParseError as e:
-			ConfigsMetaDataInfo.ConfigErrorText = str(e)
-			ConfigsMetaDataInfo.Active = False
-			log_msg = "%s FAIL!" % (ConfigInfo.Hostname,)
-			add_logs(session, log_msg, "info", config_id)
-			session.commit()
-		else:
-			ConfigsMetaDataInfo.Active = True
-			log_msg = "%s PASS" % (ConfigInfo.Hostname,)
-			add_logs(session, log_msg, "info", config_id)
-			session.commit()
-		if make_conf_checksum_tree != ConfigsMetaDataInfo.Checksum:
-			ConfigsMetaDataInfo.MakeConfText = get_file_text(make_conf_file)
-			ConfigsMetaDataInfo.Checksum = make_conf_checksum_tree
-			session.commit()
-	log_msg = "Checking configs for changes and errors ... Done"
-	add_logs(session, log_msg, "info", config_id)
-
-def check_configure_guest(session, config_id):
-	GuestConfigsMetaDataInfo = get_configmetadata_info(session, config_id)
-	git_repo =  GuestConfigsMetaDataInfo.RepoPath + "/"
-	git_pull(session, git_repo, config_id)
-	make_conf_file = "/etc/portage/make.conf"
-	# Check if we can open the file and close it
-	# Check if we have some error in the file (portage.util.getconfig)
-	# Check if we envorment error with the config (settings.validate)
-	try:
-		make_conf_checksum_tree = perform_checksum(make_conf_file, "SHA256")[0]
-		portage.util.getconfig(make_conf_file, tolerant=0, allow_sourcing=True, expand=True)
-		mysettings = portage.config(config_root = "/")
-		mysettings.validate()
-		# With errors we return false
-	except Exception as e:
-		return False
-	if make_conf_checksum_tree != GuestConfigsMetaDataInfo.Checksum:
-		return False
-	return True

diff --git a/pym/tbc/db_mapping.py b/pym/tbc/db_mapping.py
deleted file mode 100644
index f48ef56..0000000
--- a/pym/tbc/db_mapping.py
+++ /dev/null
@@ -1,306 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import datetime
-from sqlalchemy.ext.declarative import declarative_base
-from sqlalchemy import Column, Integer, String, Boolean, DateTime, Enum, Text, ForeignKey
-from sqlalchemy.orm import relationship, backref
-
-Base = declarative_base()
-
-class Keywords(Base):
-	KeywordId = Column('keyword_id', Integer, primary_key=True)
-	Keyword = Column('keyword', String)
-	__tablename__ = 'keywords'
-
-class Setups(Base):
-	SetupId = Column('setup_id', Integer, primary_key=True)
-	Setup = Column('setup', String(100))
-	Profile = Column('profile', String(150))
-	Test = Column('test', Boolean, default=False)
-	Repoman = Column('repoman', Boolean, default=False)
-	__tablename__ = 'setups'
-
-class Configs(Base):
-	ConfigId = Column('config_id', Integer, primary_key=True)
-	Hostname = Column('hostname', String(150))
-	SetupId = Column('setup_id', Integer, ForeignKey('setups.setup_id'))
-	Host = Column('default_config', Boolean, default=False)
-	__tablename__ = 'configs'
-
-class Logs(Base):
-	LogId = Column('log_id', Integer, primary_key=True)
-	ConfigId = Column('config_id', Integer, ForeignKey('configs.config_id'))
-	LogType = Column('log_type', Enum('info','error','debug','qa','repoman'))
-	Msg = Column('msg', Text)
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'logs'
-
-class Jobs(Base):
-	JobId = Column('job_id', Integer, primary_key=True)
-	JobType = Column('job_type', Enum('updatedb', 'esync', 'removeold_cpv'))
-	Status = Column('status', Enum('Runing', 'Done', 'Waiting', 'Waiting_on_guest'))
-	User = Column('user', String(50))
-	ConfigId = Column('config_id', Integer, ForeignKey('configs.config_id'))
-	RunConfigId = Column('run_config_id', Integer, ForeignKey('configs.config_id'))
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'jobs'
-
-class ConfigsMetaData(Base):
-	Id = Column('id', Integer, primary_key=True)
-	ConfigId = Column('config_id', Integer, ForeignKey('configs.config_id'))
-	KeywordId = Column('keyword_id', Integer, ForeignKey('keywords.keyword_id'))
-	MakeConfText = Column('make_conf_text', Text)
-	Checksum = Column('checksum', String(100))
-	ConfigSync = Column('configsync', Boolean, default=False)
-	Active = Column('active', Boolean, default=False)
-	ConfigErrorText = Column('config_error_text', Text)
-	Updateing = Column('updateing', Boolean, default=False)
-	Status = Column('status', Enum('Stopped', 'Runing', 'Waiting'))
-	Auto = Column('auto', Boolean, default=False)
-	RepoPath = Column('repo_path', String(200))
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'configs_metadata'
-
-class Categories(Base):
-	CategoryId = Column('category_id', Integer, primary_key=True)
-	Category = Column('category', String(150))
-	Active = Column('active', Boolean, default=True)
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'categories'
-
-class CategoriesMetadata(Base):
-	Id = Column('id', Integer, primary_key=True)
-	CategoryId = Column('category_id', Integer, ForeignKey('categories.category_id'))
-	Checksum = Column('checksum', String(100))
-	Descriptions = Column('descriptions', Text)
-	__tablename__ = 'categories_metadata'
-
-class Repos(Base):
-	RepoId = Column('repo_id', Integer, primary_key=True)
-	Repo = Column('repo', String(100))
-	__tablename__ = 'repos'
-
-class Packages(Base):
-	PackageId = Column('package_id', Integer, primary_key=True)
-	CategoryId = Column('category_id', Integer, ForeignKey('categories.category_id'))
-	Package = Column('package',String(150))
-	RepoId = Column('repo_id', Integer, ForeignKey('repos.repo_id'))
-	Active = Column('active', Boolean, default=False)
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'packages'
-
-class Emails(Base):
-	EmailId = Column('email_id', Integer, primary_key=True)
-	Email = Column('email', String(150))
-	__tablename__ = 'emails'
-
-class PackagesEmails(Base):
-	Id = Column('id', Integer, primary_key=True)
-	PackageId = Column('package_id', Integer, ForeignKey('packages.package_id'))
-	EmailId = Column('email_id', Integer, ForeignKey('emails.email_id'))
-	__tablename__ = 'packages_emails'
-
-class PackagesMetadata(Base):
-	Id = Column('id', Integer, primary_key=True)
-	PackageId = Column('package_id', Integer, ForeignKey('packages.package_id'))
-	Gitlog = Column('gitlog', Text)
-	Descriptions = Column('descriptions', Text)
-	New = Column('new', Boolean, default=False)
-	__tablename__ = 'packages_metadata'
-
-class Ebuilds(Base):
-	EbuildId = Column('ebuild_id', Integer, primary_key=True)
-	PackageId = Column('package_id', Integer, ForeignKey('packages.package_id'))
-	Version = Column('version', String(150))
-	Checksum = Column('checksum', String(100))
-	Active = Column('active', Boolean, default=False)
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'ebuilds'
-
-class EmergeOptions(Base):
-	EmergeOptionId = Column('eoption_id', Integer, primary_key=True)
-	EOption = Column('eoption', String(45))
-	__tablename__ = 'emerge_options'
-
-class ConfigsEmergeOptions(Base):
-	ConfigId = Column('config_id', Integer, ForeignKey('configs.config_id'), primary_key=True)
-	EOptionId = Column('eoption_id', Integer, ForeignKey('emerge_options.eoption_id'))
-	__tablename__ = 'configs_emerge_options'
-
-class BuildJobs(Base):
-	BuildJobId = Column('build_job_id', Integer, primary_key=True)
-	EbuildId = Column('ebuild_id', Integer, ForeignKey('ebuilds.ebuild_id'))
-	SetupId = Column('setup_id', Integer, ForeignKey('setups.setup_id'))
-	ConfigId = Column('config_id', Integer, ForeignKey('configs.config_id'))
-	Status = Column('status', Enum('Waiting','Building','Looked',))
-	BuildNow = Column('build_now', Boolean, default=False)
-	RemoveBin = Column('removebin', Boolean ,default=False)
-	New = Column('new', Boolean, default=False)
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'build_jobs'
-
-class BuildJobsEmergeOptions(Base):
-	Id = Column('id', Integer, primary_key=True)
-	BuildJobId = Column('build_job_id', Integer, ForeignKey('build_jobs.build_job_id'))
-	EOption = Column('eoption_id', Integer, ForeignKey('emerge_options.eoption_id'))
-	__tablename__ = 'build_jobs_emerge_options'
-
-class BuildJobsRedo(Base):
-	Id = Column('id', Integer, primary_key=True)
-	BuildJobId = Column('build_job_id', Integer, ForeignKey('build_jobs.build_job_id'))
-	FailTimes = Column('fail_times', Integer)
-	FailType = Column('fail_type', String(50))
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'build_jobs_redo'
-
-class Uses(Base):
-	UseId = Column('use_id', Integer, primary_key=True)
-	Flag = Column('flag', String(150))
-	__tablename__ = 'uses'
-
-class BuildJobsUse(Base):
-	Id = Column('id', Integer, primary_key=True)
-	BuildJobId = Column('build_job_id', Integer, ForeignKey('build_jobs.build_job_id'))
-	UseId = Column('use_id', Integer, ForeignKey('uses.use_id'))
-	Status = Column('status', Boolean, default=False)
-	__tablename__ = 'build_jobs_use'
-
-class HiLightCss(Base):
-	HiLightCssId = Column('hilight_css_id', Integer, primary_key=True)
-	HiLightCssName = Column('hilight_css_name', String(30))
-	HiLightCssCollor = Column('hilight_css_collor', String(30))
-	__tablename__ = 'hilight_css'
-
-class HiLight(Base):
-	HiLightId = Column('hilight_id', Integer, primary_key=True)
-	HiLightSearch = Column('hilight_search', String(50))
-	HiLightSearchEnd = Column('hilight_search_end', String(50))
-	HiLightSearchPattern = Column('hilight_search_pattern', String(50))
-	HiLightCssId = Column('hilight_css_id', Integer, ForeignKey('hilight_css.hilight_css_id'))
-	HiLightStart = Column('hilight_start', Integer)
-	HiLightEnd = Column('hilight_end', Integer)
-	__tablename__ = 'hilight'
-
-class BuildLogs(Base):
-	BuildLogId = Column('build_log_id', Integer, primary_key=True)
-	EbuildId = Column('ebuild_id', Integer, ForeignKey('ebuilds.ebuild_id'))
-	Fail = Column('fail', Boolean, default=False)
-	SummeryText = Column('summery_text', Text)
-	LogHash = Column('log_hash', String(100))
-	BugId = Column('bug_id', Integer, default=0)
-	New = Column('new', Boolean, default=False)
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'build_logs'
-
-class EmergeInfo(Base):
-	EInfoId = Column('einfo_id', Integer, primary_key=True)
-	EmergeInfoText = Column('emerge_info_text', Text)
-	__tablename__ = 'emerge_info'
-
-class BuildLogsConfig(Base):
-	LogId = Column('log_id', Integer, primary_key=True)
-	BuildLogId = Column('build_log_id', Integer, ForeignKey('build_logs.build_log_id'))
-	ConfigId = Column('config_id', Integer, ForeignKey('configs.config_id'))
-	EInfoId = Column('einfo_id', Integer, ForeignKey('emerge_info.einfo_id'))
-	LogName = Column('logname', String(450))
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__  = 'build_logs_config'
-
-class BuildLogsHiLight(Base):
-	BuildLogHiLightId = Column('id', Integer, primary_key=True)
-	LogId = Column('log_id', Integer, ForeignKey('build_logs_config.log_id'))
-	StartLine = Column('start_line', Integer)
-	EndLine = Column('end_line', Integer)
-	HiLightCssId = Column('hilight_css_id', Integer, ForeignKey('hilight_css.hilight_css_id'))
-	__tablename__ = 'build_logs_hilight'
-
-class BuildLogsEmergeOptions(Base):
-	Id = Column('id', Integer, primary_key=True)
-	BuildLogId = Column('build_log_id', Integer, ForeignKey('Build_logs.Build_log_id'))
-	EmergeOptionId = Column('eoption_id', Integer, ForeignKey('emerge_options.eoption_id'))
-	__tablename__ = 'build_logs_emerge_options'
-
-class BuildLogsUse(Base):
-	Id = Column('id', Integer, primary_key=True)
-	BuildLogId = Column('build_log_id', Integer, ForeignKey('build_logs.build_log_id'))
-	UseId = Column('use_id', Integer, ForeignKey('uses.use_id'))
-	Status = Column('status', Boolean, default=False)
-	__tablename__ = 'build_logs_use'
-
-class BuildLogsRepoman(Base):
-	Id = Column('id', Integer, primary_key=True)
-	BuildLogId = Column('build_log_id', Integer, ForeignKey('build_logs.build_log_id'))
-	SummeryText = Column('summery_text', Text)
-	__tablename__ = 'build_logs_repoman'
-
-class BuildLogsQa(Base):
-	Id = Column('id', Integer, primary_key=True)
-	BuildLogId = Column('build_log_id', Integer, ForeignKey('build_logs.build_log_id'))
-	SummeryText = Column('summery_text', Text)
-	__tablename__ = 'build_logs_qa'
-
-class PackagesRepoman(Base):
-	Id = Column('id', Integer, primary_key=True)
-	PackageId = Column('package_id', Integer, ForeignKey('packages.package_id'))
-	RepomanText = Column('repoman_text', Text)
-	RepomanHash = Column('repoman_hash', String(100))
-	TimeStamp = Column('time_stamp', DateTime, nullable=False, default=datetime.datetime.utcnow)
-	__tablename__ = 'packages_repoman'
-
-class ErrorsInfo(Base):
-	ErrorId = Column('error_id', Integer, primary_key=True)
-	ErrorName = Column('error_name', String)
-	ErrorSearch = Column('error_search', String)
-	__tablename__ = 'errors_info'
-
-class BuildLogsErrors(Base):
-	BuildLogErrorId =  Column('id', Integer, primary_key=True)
-	BuildLogId = Column('build_log_id', Integer, ForeignKey('build_logs.build_log_id'))
-	ErrorId = Column('error_id', Integer, ForeignKey('errors_info.error_id'))
-	__tablename__ = 'build_logs_errors'
-
-class Restrictions(Base):
-	RestrictionId = Column('restriction_id', Integer, primary_key=True)
-	Restriction = Column('restriction', String(150))
-	__tablename__ = 'restrictions'
-
-class EbuildsRestrictions(Base):
-	Id =  Column('id', Integer, primary_key=True)
-	EbuildId = Column('ebuild_id', ForeignKey('ebuilds.ebuild_id'))
-	RestrictionId = Column('restriction_id', ForeignKey('restrictions.restriction_id'))
-	__tablename__ = 'ebuilds_restrictions'
-
-class EbuildsIUse(Base):
-	Id =  Column('id', Integer, primary_key=True)
-	EbuildId = Column('ebuild_id', ForeignKey('ebuilds.ebuild_id'))
-	UseId = Column('use_id', ForeignKey('uses.use_id'))
-	Status = Column('status', Boolean, default=False)
-	__tablename__= 'ebuilds_iuse'
-
-class EbuildsKeywords(Base):
-	Id =  Column('id', Integer, primary_key=True)
-	EbuildId = Column('ebuild_id', ForeignKey('ebuilds.ebuild_id'))
-	KeywordId = Column('keyword_id', ForeignKey('keywords.keyword_id'))
-	Status = Column('status', Enum('Stable','Unstable','Negative'))
-	__tablename__ = 'ebuilds_keywords'
-
-class EbuildsMetadata(Base):
-	Id =  Column('id', Integer, primary_key=True)
-	EbuildId = Column('ebuild_id', ForeignKey('ebuilds.ebuild_id'))
-	Commit = Column('git_commit', String(100))
-	CommitMsg = Column('git_commit_msg', String(200))
-	New = Column('new', Boolean, default=False)
-	Updated = Column('updated', Boolean, default=False)
-	Descriptions = Column('descriptions', Text)
-	Slot = Column('slot', String(10))
-	Homepage = Column('homepage', String(200))
-	License = Column('license', String(200))
-	__tablename__ = 'ebuilds_metadata'
-
-class TbcConfig(Base):
-	Id =  Column('id', Integer, primary_key=True)
-	WebIrker = Column('webirker', String)
-	HostIrker = Column('hostirker', String)
-	WebBug = Column('webbug', String)
-	__tablename__ = 'tbc_config'

diff --git a/pym/tbc/depclean.py b/pym/tbc/depclean.py
deleted file mode 100644
index 3154ac5..0000000
--- a/pym/tbc/depclean.py
+++ /dev/null
@@ -1,53 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import portage
-from portage._sets.base import InternalPackageSet
-from _emerge.main import parse_opts
-from tbc.actions import load_emerge_config, action_depclean, calc_depclean
-
-def do_depclean():
-	mysettings, mytrees, mtimedb = load_emerge_config()
-	myroot = mysettings["ROOT"]
-	root_config = mytrees[myroot]["root_config"]
-	psets = root_config.setconfig.psets
-	args_set = InternalPackageSet(allow_repo=True)
-	spinner=None
-	scheduler=None
-	tmpcmdline = []
-	tmpcmdline.append("--depclean")
-	tmpcmdline.append("--pretend")
-	print("depclean",tmpcmdline)
-	myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
-	if myfiles:
-		args_set.update(myfiles)
-		matched_packages = False
-		for x in args_set:
-			if vardb.match(x):
-				matched_packages = True
-		if not matched_packages:
-			return 0
-
-	rval, cleanlist, ordered, req_pkg_count, unresolvable = calc_depclean(mysettings, mytrees, mtimedb["ldpath"], myopts, myaction, args_set, spinner)
-	print('rval, cleanlist, ordered, req_pkg_count, unresolvable', rval, cleanlist, ordered, req_pkg_count, unresolvable)
-	if unresolvable != []:
-		return True
-	if cleanlist != []:
-		conflict_package_list = []
-		for depclean_cpv in cleanlist:
-			if portage.versions.cpv_getkey(depclean_cpv) in list(psets["system"]):
-				conflict_package_list.append(depclean_cpv)
-			if portage.versions.cpv_getkey(depclean_cpv) in list(psets['selected']):
-				conflict_package_list.append(depclean_cpv)
-		print('conflict_package_list', conflict_package_list)
-		if conflict_package_list == []:
-			tmpcmdline = []
-			tmpcmdline.append("--depclean")
-			myaction, myopts, myfiles = parse_opts(tmpcmdline, silent=False)
-			rval = action_depclean(mysettings, mytrees, mtimedb["ldpath"], myopts, myaction, myfiles, spinner, scheduler=None)
-			return True
-		else:
-			print("conflicting packages: %s", conflict_package_list)
-			return True
-	return True

diff --git a/pym/tbc/flags.py b/pym/tbc/flags.py
deleted file mode 100644
index eb3f782..0000000
--- a/pym/tbc/flags.py
+++ /dev/null
@@ -1,222 +0,0 @@
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-# Origin flags.py from portage public api repo
-from __future__ import print_function
-import portage
-import os
-
-class tbc_use_flags(object):
-	
-	def __init__(self, mysettings, myportdb, cpv):
-		self._mysettings = mysettings
-		self._myportdb = myportdb
-		self._cpv = cpv
-	
-	def get_iuse(self):
-		"""Gets the current IUSE flags from the tree
-		To be used when a gentoolkit package object is not needed
-		@type: cpv: string 
-		@param cpv: cat/pkg-ver
-		@rtype list
-		@returns [] or the list of IUSE flags
-		"""
-		return self._myportdb.aux_get(self._cpv, ["IUSE"])[0].split()
-		
-	def reduce_flag(self, flag):
-		"""Absolute value function for a USE flag
-		@type flag: string
-		@param flag: the use flag to absolute.
-		@rtype: string
-		@return absolute USE flag
-		"""
-		if flag[0] in ["+","-"]:
-			return flag[1:]
-		else:
-			return flag
-
-	def reduce_flags(self, the_list):
-		"""Absolute value function for a USE flag list
-		@type the_list: list
-		@param the_list: the use flags to absolute.
-		@rtype: list
-		@return absolute USE flags
-		"""
-		r=[]
-		for member in the_list:
-			r.append(self.reduce_flag(member))
-		return r
-
-	def filter_flags(self, use, use_expand_hidden, usemasked, useforced):
-		"""Filter function to remove hidden or otherwise not normally
-		visible USE flags from a list.
-		@type use: list
-		@param use: the USE flag list to be filtered.
-		@type use_expand_hidden: list
-		@param  use_expand_hidden: list of flags hidden.
-		@type usemasked: list
-		@param usemasked: list of masked USE flags.
-		@type useforced: list
-		@param useforced: the forced USE flags.
-		@rtype: list
-		@return the filtered USE flags.
-		"""
-		# clean out some environment flags, since they will most probably
-		# be confusing for the user
-		for f in use_expand_hidden:
-			f=f.lower() + "_"
-			for x in use:
-				if f in x:
-					use.remove(x)
-		# clean out any arch's
-		archlist = self._mysettings["PORTAGE_ARCHLIST"].split()
-		for a in use[:]:
-			if a in archlist:
-				use.remove(a)
-		# clean out any abi_ flag
-		for a in use[:]:
-			if a.startswith("abi_"):
-				use.remove(a)
-		# clean out any python_ flag
-		for a in use[:]:
-			if a.startswith("python_"):
-				use.remove(a)
-
-		# dbl check if any from usemasked  or useforced are still there
-		masked = usemasked + useforced
-		for a in use[:]:
-			if a in masked:
-				use.remove(a)
-		return use
-
-	def get_all_cpv_use(self):
-		"""Uses portage to determine final USE flags and settings for an emerge
-		@type cpv: string
-		@param cpv: eg cat/pkg-ver
-		@rtype: lists
-		@return  use, use_expand_hidden, usemask, useforce
-		"""
-		use = None
-		self._mysettings.unlock()
-		try:
-			self._mysettings.setcpv(self._cpv, use_cache=None, mydb=self._myportdb)
-			use = self._mysettings['PORTAGE_USE'].split()
-			use_expand_hidden = self._mysettings["USE_EXPAND_HIDDEN"].split()
-			usemask = list(self._mysettings.usemask)
-			useforce =  list(self._mysettings.useforce)
-		except KeyError:
-			self._mysettings.reset()
-			self._mysettings.lock()
-			return [], [], [], []
-		# reset cpv filter
-		self._mysettings.reset()
-		self._mysettings.lock()
-		return use, use_expand_hidden, usemask, useforce
-
-	def get_all_cpv_use_looked(self):
-		"""Uses portage to determine final USE flags and settings for an emerge
-		@type cpv: string
-		@param cpv: eg cat/pkg-ver
-		@rtype: lists
-		@return  use, use_expand_hidden, usemask, useforce
-		"""
-		# use = self._mysettings['PORTAGE_USE'].split()
-		use = os.environ['USE'].split()
-		use_expand_hidden = self._mysettings["USE_EXPAND_HIDDEN"].split()
-		usemask = list(self._mysettings.usemask)
-		useforce = list(self._mysettings.useforce)
-		return use, use_expand_hidden, usemask, useforce
-
-	def get_all_cpv_use_pkg(self, pkg, settings):
-		"""Uses portage to determine final USE flags and settings for an emerge
-		@type cpv: string
-		@param cpv: eg cat/pkg-ver
-		@rtype: lists
-		@return  use, use_expand_hidden, usemask, useforce
-		"""
-		# use = self._mysettings['PORTAGE_USE'].split()
-		use_list = list(pkg.use.enabled)
-		use_expand_hidden = settings["USE_EXPAND_HIDDEN"].split()
-		usemask = list(settings.usemask)
-		useforced = list(settings.useforce)
-		return use_list, use_expand_hidden, usemask, useforced
-
-	def get_flags(self):
-		"""Retrieves all information needed to filter out hidden, masked, etc.
-		USE flags for a given package.
-
-		@type cpv: string
-		@param cpv: eg. cat/pkg-ver
-		@type final_setting: boolean
-		@param final_setting: used to also determine the final
-		enviroment USE flag settings and return them as well.
-		@rtype: list or list, list
-		@return IUSE or IUSE, final_flags
-		"""
-		final_use, use_expand_hidden, usemasked, useforced = self.get_all_cpv_use()
-		iuse_flags = self.filter_flags(self.get_iuse(), use_expand_hidden, usemasked, useforced)
-		#flags = filter_flags(use_flags, use_expand_hidden, usemasked, useforced)
-		final_flags = self.filter_flags(final_use, use_expand_hidden, usemasked, useforced)
-		return iuse_flags, final_flags, usemasked
-
-	def get_flags_looked(self):
-		"""Retrieves all information needed to filter out hidden, masked, etc.
-		USE flags for a given package.
-
-		@type cpv: string
-		@param cpv: eg. cat/pkg-ver
-		@type final_setting: boolean
-		@param final_setting: used to also determine the final
-		enviroment USE flag settings and return them as well.
-		@rtype: list or list, list
-		@return IUSE or IUSE, final_flags
-		"""
-		final_use, use_expand_hidden, usemasked, useforced = self.get_all_cpv_use_looked()
-		iuse_flags = self.filter_flags(self.get_iuse(), use_expand_hidden, usemasked, useforced)
-		#flags = filter_flags(use_flags, use_expand_hidden, usemasked, useforced)
-		final_flags = self.filter_flags(final_use, use_expand_hidden, usemasked, useforced)
-		return iuse_flags, final_flags
-
-	def get_flags_pkg(self, pkg, settings):
-		"""Retrieves all information needed to filter out hidden, masked, etc.
-		USE flags for a given package.
-		@type cpv: string
-		@param cpv: eg. cat/pkg-ver
-		@type final_setting: boolean
-		@param final_setting: used to also determine the final
-		enviroment USE flag settings and return them as well.
-		@rtype: list or list, list
-		@return IUSE or IUSE, final_flags
-		"""
-		final_use, use_expand_hidden, usemasked, useforced = self.get_all_cpv_use_pkg(pkg, settings)
-		iuse_flags = self.filter_flags(list(pkg.iuse.all), use_expand_hidden, usemasked, useforced)
-		#flags = filter_flags(use_flags, use_expand_hidden, usemasked, useforced)
-		final_flags = self.filter_flags(final_use, use_expand_hidden, usemasked, useforced)
-		return iuse_flags, final_flags
-
-	def comper_useflags(self, build_dict):
-		iuse_flags, use_enable, usemasked = self.get_flags()
-		iuse = []
-		build_use_flags_dict = build_dict['build_useflags']
-		build_use_flags_list = []
-		if use_enable == []:
-			if build_use_flags_dict is None:
-				return None
-		for iuse_line in iuse_flags:
-			iuse.append(self.reduce_flag(iuse_line))
-		iuse_flags_list = list(set(iuse))
-		use_disable = list(set(iuse_flags_list).difference(set(use_enable)))
-		use_flagsDict = {}
-		for x in use_enable:
-			use_flagsDict[x] = True
-		for x in use_disable:
-			use_flagsDict[x] = False
-		for k, v in use_flagsDict.items():
-			if build_use_flags_dict[k] != v:
-				if build_use_flags_dict[k]:
-					build_use_flags_list.append(k)
-				else:
-					build_use_flags_list.append("-" + k)
-		if build_use_flags_list == []:
-			build_use_flags_list = None
-		return build_use_flags_list

diff --git a/pym/tbc/irk.py b/pym/tbc/irk.py
deleted file mode 100644
index 3b3589b..0000000
--- a/pym/tbc/irk.py
+++ /dev/null
@@ -1,28 +0,0 @@
-from __future__ import unicode_literals
-
-import json
-import socket
-import sys
-
-DEFAULT_SERVER = ("192.168.0.5", 6659)
-
-def connect(server = DEFAULT_SERVER):
-	return socket.create_connection(server)
-
-def send(s, target, message):
-	data = {"to": target, "privmsg" : message}
-	s.sendall(json.dumps(data).encode('ascii'))
-
-def irk(target, message, server = DEFAULT_SERVER):
-	s = connect(server)
-	if "irc:" not in target and "ircs:" not in target:
-		target = "irc://chat.freenode.net/{0}".format(target)
-		send(s, target, message)
-	s.close()
-
-def send_irk(msg, host):
-	target = "tinderbox-cluster"
-	try:
-		irk(target, msg, server = (host, 6659))
-	except socket.error as e:
-		sys.stderr.write("irk: write to server failed: %r\n" % e)

diff --git a/pym/tbc/jobs.py b/pym/tbc/jobs.py
deleted file mode 100644
index 800fe87..0000000
--- a/pym/tbc/jobs.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-from tbc.sync import git_sync_main
-#from tbc.buildquerydb import add_buildquery_main, del_buildquery_main
-from tbc.updatedb import update_db_main
-from tbc.old_cpv import remove_old_cpv_main
-from tbc.sqlquerys import get_config_id, get_jobs, update_job_list
-from tbc.log import write_log
-
-def jobs_main(session, config_id):
-	JobsInfo = get_jobs(session, config_id)
-	if JobsInfo is None:
-		return
-	for JobInfo in JobsInfo:
-		job = JobInfo.JobType
-		run_config_id = JobInfo.RunConfigId
-		job_id = JobInfo.JobId
-		log_msg = "Job: %s Type: %s" % (job_id, job,)
-		write_log(session, log_msg, "info", config_id, 'jobs_main')
-		if job == "addbuildquery":
-			update_job_list(session, "Runing", job_id)
-			log_msg = "Job %s is runing." % (job_id,)
-			write_log(session, log_msg, "info", config_id, 'jobs_main')
-			#result =  add_buildquery_main(run_config_id)
-			#if result is True:
-			#	update_job_list(session, "Done", job_id)
-			#	log_msg = "Job %s is done.." % (job_id,)
-			#	write_log(session, log_msg, "info", config_id, 'jobs_main')
-			#else:
-			#	update_job_list(session, "Fail", job_id)
-			#	log_msg = "Job %s did fail." % (job_id,)
-			#	write_log(session, log_msg, "info", config_id, 'jobs_main')
-		elif job == "delbuildquery":
-			update_job_list(session, "Runing", job_id)
-			log_msg = "Job %s is runing." % (job_id,)
-			write_log(session, log_msg, "info", config_id, 'jobs_main')
-			#result =  del_buildquery_main(config_id)
-			#if result is True:
-			#	update_job_list(session, "Done", job_id)
-			#	log_msg = "Job %s is done.." % (job_id,)
-			#	write_log(session, log_msg, "info", config_id, 'jobs_main')
-			#else:
-			#	update_job_list(session, "Fail", job_id)
-			#	log_msg = "Job %s did fail." % (job_id,)
-			#	write_log(session, log_msg, "info", config_id, 'jobs_main')
-		elif job == "esync":
-			update_job_list(session, "Waiting_on_guest", job_id)
-			log_msg = "Job %s is runing." % (job_id,)
-			write_log(session, log_msg, "info", config_id, 'jobs_main')
-			if update_db_main(session, git_sync_main(session), config_id):
-				update_job_list(session, "Done", job_id)
-				log_msg = "Job %s is done.." % (job_id,)
-				write_log(session, log_msg, "info", config_id, 'jobs_main')
-			else:
-				update_job_list(session, "Fail", job_id)
-				log_msg = "Job %s did fail." % (job_id,)
-				write_log(session, log_msg, "info", config_id, 'jobs_main')
-		elif job == "updatedb":
-			update_job_list(session, "Waiting_on_guest", job_id)
-			log_msg = "Job %s is runing." % (job_id,)
-			write_log(session, log_msg, "info", config_id, 'jobs_main')
-			if update_db_main(session, None, config_id):
-				update_job_list(session, "Done", job_id)
-				log_msg = "Job %s is done.." % (job_id,)
-				write_log(session, log_msg, "info", config_id, 'jobs_main')
-			else:
-				update_job_list(session, "Fail", job_id)
-				log_msg = "Job %s did fail." % (job_id,)
-				write_log(session, log_msg, "info", config_id, 'jobs_main')
-		elif job == "removeold_cpv":
-			update_job_list(session, "Runing", job_id)
-			log_msg = "Job %s is runing." % (job_id,)
-			write_log(session, log_msg, "info", config_id, 'jobs_main')
-			remove_old_cpv_main(session, config_id)
-			update_job_list(session, "Done", job_id)
-			log_msg = "Job %s is done.." % (job_id,)
-			write_log(session, log_msg, "info", config_id, 'jobs_main')
-	return

diff --git a/pym/tbc/log.py b/pym/tbc/log.py
deleted file mode 100644
index 944d3fa..0000000
--- a/pym/tbc/log.py
+++ /dev/null
@@ -1,38 +0,0 @@
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import logging
-module_logger = logging.getLogger('tbc.log')
-
-def setup_logger( tbc_settings):
-	# setupt the logger
-	log_level = getattr(logging, tbc_settings['log_level'].upper(), None)
-	format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'
-	if not isinstance(log_level, int):
-		raise ValueError('Invalid log level: %s' % tbc_settings['log_level'])
-	logging.basicConfig()
-	logger = logging.getLogger('tbc')
-	logger.setLevel(log_level)
-	if tbc_settings['log_file']:
-		fh = logging.FileHandler(tbc_settings['log_file'])
-	else:
-		fh = logging.StreamHandler()
-	formatter = logging.Formatter(format)
-	fh.setFormatter(formatter)
-	logger.addHandler(fh)
-	return logger
-
-def write_log(session, msg, level, config_id, function=False):
-	if function:
-		logger = logging.getLogger('tbc.' + function)
-	else:
-		logger = logging.getLogger('tbc')
-	if level == 'info':
-		logger.info(msg)
-	if level == 'error':
-		logger.error(msg)
-	if level == 'debug':
-		logger.debug(msg)
-	if level == 'warning':
-		logger.warning(msg)

diff --git a/pym/tbc/old_cpv.py b/pym/tbc/old_cpv.py
deleted file mode 100644
index e97b121..0000000
--- a/pym/tbc/old_cpv.py
+++ /dev/null
@@ -1,43 +0,0 @@
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-from datetime import datetime
-from tbc.log import write_log
-from tbc.sqlquerys import get_category_list_info, get_package_list_info, get_ebuild_list_info, \
-	get_build_job_all, del_old_build_jobs, del_old_ebuild, del_old_package, add_old_category
-
-def remove_old_ebuilds(session, package_id, config_id, cp, today):
-	EbuildsInfo = get_ebuild_list_info(session, package_id)
-	for EbuildInfo in EbuildsInfo:
-		cpv = cp + '-' + EbuildInfo.Version
-		log_msg = "Checking: %s" % (cpv,)
-		write_log(session, log_msg, "info", config_id, 'old_cpv.remove_old_ebuilds')
-		if not EbuildInfo.Active:
-			duration = today - EbuildInfo.TimeStamp
-			if duration.days > 30:
-				log_msg = "Removing: %s" % (cpv,)
-				write_log(session, log_msg, "info", config_id, 'old_cpv.remove_old_ebuilds')
-				build_job_id_list = get_build_job_all(session, EbuildInfo.EbuildId)
-				if build_job_id_list != []:
-					for build_job in build_job_id_list:
-						del_old_build_jobs(session, build_job.BuildJobId)
-				del_old_ebuild(session, EbuildInfo.EbuildId)
-	if get_ebuild_list_info(session, package_id) == []:
-		log_msg = "Removing: %s" % (cp,)
-		write_log(session, log_msg, "info", config_id, 'old_cpv.remove_old_cpv_ebuilds')
-		del_old_package(session, package_id)
-
-def remove_old_cpv_main(session, config_id):
-	today = datetime.utcnow()
-	CategorysInfo = get_category_list_info(session)
-	for CategoryInfo in CategorysInfo:
-		log_msg = "Checking: %s" % (CategoryInfo.Category,)
-		write_log(session, log_msg, "info", config_id, 'old_cpv.remove_old_cpv_main')
-		PackagesInfo = get_package_list_info(session, CategoryInfo.CategoryId)
-		for PackageInfo in PackagesInfo:
-			cp = CategoryInfo.Category + '/' + PackageInfo.Package
-			remove_old_ebuilds(session, PackageInfo.PackageId, config_id, cp, today)
-
-		if get_package_list_info(session, CategoryInfo.CategoryId) == []:
-			 add_old_category(session, CategoryInfo.CategoryId)

diff --git a/pym/tbc/package.py b/pym/tbc/package.py
deleted file mode 100644
index 84e3907..0000000
--- a/pym/tbc/package.py
+++ /dev/null
@@ -1,409 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import re
-import hashlib
-import os
-import git
-import portage
-import datetime
-from portage.xml.metadata import MetaDataXML
-from portage.checksum import perform_checksum
-from tbc.flags import tbc_use_flags
-from tbc.text import get_ebuild_cvs_revision, get_log_text_dict
-from tbc.flags import tbc_use_flags
-from tbc.qachecks import check_repoman
-from tbc.build_log import check_repoman_full
-from tbc.log import write_log
-from tbc.sqlquerys import get_package_info, get_config_info, \
-	add_new_build_job, add_new_ebuild_sql, get_ebuild_id_list, add_old_ebuild, \
-	get_package_metadata_sql, update_package_metadata, \
-	get_package_info_from_package_id, get_config_all_info, add_new_package_sql, \
-	get_ebuild_checksums, get_ebuild_id_db, get_configmetadata_info, get_setup_info, \
-	get_ebuild_info_ebuild_id, get_ebuild_restrictions, add_old_package
-
-class tbc_package(object):
-
-	def __init__(self, session, mysettings, myportdb, config_id):
-		self._session = session
-		self._mysettings = mysettings
-		self._myportdb = myportdb
-		self._config_id = config_id
-
-	def change_config(self, host_config, repopath):
-		# Change config_root  config_setup = table config
-		my_new_setup = repopath + "/" + host_config + "/"
-		mysettings_setup = portage.config(config_root = my_new_setup)
-		return mysettings_setup
-
-	def config_match_ebuild(self, cp, config_list, repopath):
-		config_cpv_dict ={}
-		if config_list == []:
-			return config_cpv_dict
-		for config_id in config_list:
-			ConfigInfo = get_config_info(self._session, config_id)
-			ConfigsMetaData = get_configmetadata_info(self._session, config_id)
-			if ConfigsMetaData.Auto and ConfigsMetaData.Active and ConfigsMetaData.Status != 'Stopped' and not ConfigInfo.SetupId in config_cpv_dict:
-				SetupInfo = get_setup_info(self._session, config_id)
-				mysettings_setup = self.change_config(ConfigInfo.Hostname + "/" + SetupInfo.Setup, repopath)
-				myportdb_setup = portage.portdbapi(mysettings=mysettings_setup)
-
-				# Get the latest cpv from portage with the config that we can build
-				build_cpv = myportdb_setup.xmatch('bestmatch-visible', cp)
-
-				# Check if could get cpv from portage and add it to the config_cpv_listDict.
-				if build_cpv != "":
-
-					# Get the iuse and use flags for that config/setup and cpv
-					init_useflags = tbc_use_flags(mysettings_setup, myportdb_setup, build_cpv)
-					iuse_flags_list, final_use_list, usemasked = init_useflags.get_flags()
-					iuse_flags_list2 = []
-					for iuse_line in iuse_flags_list:
-						iuse_flags_list2.append( init_useflags.reduce_flag(iuse_line))
-					enable_test = False
-					if SetupInfo.Test:
-						if not "test" in usemasked:
-							enable_test = True
-					# Dict the needed info
-					attDict = {}
-					attDict['cpv'] = build_cpv
-					attDict['useflags'] = final_use_list
-					attDict['iuse'] = iuse_flags_list2
-					attDict['test'] = enable_test
-					config_cpv_dict[ConfigInfo.SetupId] = attDict
-
-				# Clean some cache
-				myportdb_setup.close_caches()
-				portage.portdbapi.portdbapi_instances.remove(myportdb_setup)
-		return config_cpv_dict
-
-	def get_ebuild_metadata(self, cpv, repo):
-		# Get the auxdbkeys infos for the ebuild
-		try:
-			ebuild_auxdb_list = self._myportdb.aux_get(cpv, portage.auxdbkeys, myrepo=repo)
-		except:
-			ebuild_auxdb_list = False
-		else:
-			for i in range(len(ebuild_auxdb_list)):
-				if ebuild_auxdb_list[i] == '':
-					ebuild_auxdb_list[i] = ''
-			return ebuild_auxdb_list
-
-	def get_git_log_ebuild(self, repodir, ebuild_file):
-		git_log_ebuild = ''
-		g = git.Git(repodir)
-		index = 1
-		git_log_dict = {}
-		for line in g.log('-n 1', ebuild_file).splitlines():
-			git_log_dict[index] = line
-			index = index + 1
-		git_ebuild_commit = re.sub('commit ', '', git_log_dict[1])
-		git_ebuild_commit_msg = re.sub('    ', '', git_log_dict[5])
-		return git_ebuild_commit, git_ebuild_commit_msg
-
-	def get_packageDict(self, pkgdir, cpv, repo):
-
-		#Get categories, package and version from cpv
-		ebuild_version_tree = portage.versions.cpv_getversion(cpv)
-		element = portage.versions.cpv_getkey(cpv).split('/')
-		categories = element[0]
-		package = element[1]
-		ebuild_file = pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild"
-		# Make a checksum of the ebuild
-		try:
-			ebuild_version_checksum_tree = perform_checksum(ebuild_file, "SHA256")[0]
-		except:
-			ebuild_version_checksum_tree = "0"
-			log_msg = "QA: Can't checksum the ebuild file. %s on repo %s" % (cpv, repo,)
-			write_log(self._session, log_msg, "warning", self._config_id, 'packages.get_packageDict')
-			log_msg = "C %s:%s ... Fail." % (cpv, repo)
-			write_log(self._session, log_msg, "info", self._config_id, 'packages.get_packageDict')
-			git_commit = '0'
-			git_commit_msg = '0'
-		else:
-			repodir =self._myportdb.getRepositoryPath(repo)
-			git_commit, git_commit_msg = self.get_git_log_ebuild(repodir, ebuild_file)
-
-		# Get the ebuild metadata
-		ebuild_version_metadata_tree = self.get_ebuild_metadata(cpv, repo)
-		# if there some error to get the metadata we add rubish to the
-		# ebuild_version_metadata_tree and set ebuild_version_checksum_tree to 0
-		# so it can be updated next time we update the db
-		if not ebuild_version_metadata_tree:
-			log_msg = " QA: %s have broken metadata on repo %s" % (cpv, repo)
-			write_log(self._session, log_msg, "warning", self._config_id, 'packages.get_packageDict')
-			ebuild_version_metadata_tree = ['','','','','','','','','','','','','','','','','','','','','','','','','']
-			ebuild_version_checksum_tree = '0'
-
-		# add the ebuild info to the dict packages
-		PackageInfo = get_package_info(self._session, categories, package, repo)
-		attDict = {}
-		attDict['package_id'] = PackageInfo.PackageId
-		attDict['repo'] = repo
-		attDict['ebuild_version'] = ebuild_version_tree
-		attDict['checksum']= ebuild_version_checksum_tree
-		attDict['ebuild_version_metadata_tree'] = ebuild_version_metadata_tree
-		#attDict['ebuild_version_text_tree'] = ebuild_version_text_tree[0]
-		attDict['git_commit'] = git_commit
-		attDict['git_commit_msg'] = git_commit_msg
-		attDict['new'] = False
-		attDict['updated'] = False
-		attDict['ebuild_version_descriptions_tree'] = ebuild_version_metadata_tree[7]
-		return attDict
-
-	def add_new_build_job_db(self, ebuild_id_list, packageDict, config_cpv_listDict):
-		# Get the needed info from packageDict and config_cpv_listDict and put that in buildqueue
-		# Only add it if ebuild_version in packageDict and config_cpv_listDict match
-		if config_cpv_listDict is not None:
-			# Unpack config_cpv_listDict
-			for setup_id, v in config_cpv_listDict.items():
-				build_cpv = v['cpv']
-				iuse_flags_list = list(set(v['iuse']))
-				use_enable= v['useflags']
-				use_disable = list(set(iuse_flags_list).difference(set(use_enable)))
-				# Make a dict with enable and disable use flags for ebuildqueuedwithuses
-				use_flagsDict = {}
-				for x in use_enable:
-					use_flagsDict[x] = True
-				for x in use_disable:
-					use_flagsDict[x] = False
-				enable_test = v['test']
-				# Unpack packageDict
-				i = 0
-				for k, v in packageDict.items():
-					ebuild_id = ebuild_id_list[i]
-
-					# Comper and add the cpv to buildqueue
-					if build_cpv == k:
-						# check if we need to enable or disable test
-						if "test" in use_flagsDict and enable_test:
-							use_flagsDict['test'] = True
-						restrictions_list = get_ebuild_restrictions(self._session, ebuild_id)
-						if restrictions_list:
-							if "test" in restrictions_list and "test" in use_flagsDict:
-								use_flagsDict['test'] = False
-						add_new_build_job(self._session, ebuild_id, setup_id, use_flagsDict, self._config_id)
-						# B = Build cpv use-flags config
-						# FIXME log_msg need a fix to log the use flags corect.
-						log_msg = "B %s:%s USE: %s Setup: %s" % (k, v['repo'], use_flagsDict, setup_id,)
-						write_log(self._session, log_msg, "info", self._config_id, 'packages.add_new_build_job_db')
-					i = i +1
-
-	def get_git_changelog_text(self, repodir, cp):
-		n = '5'
-		git_log_pkg = ''
-		g = git.Git(repodir)
-		git_log_pkg = g.log('-n ' + n, '--grep=' + cp)
-		return git_log_pkg
-
-	def get_package_metadataDict(self, pkgdir, repodir, package_id, cp):
-		# Make package_metadataDict
-		attDict = {}
-		package_metadataDict = {}
-		md_email_list = []
-		herd = None
-		try:
-			pkg_md = MetaDataXML(pkgdir + "/metadata.xml", herd)
-		except:
-			log_msg = "Metadata file %s is missing or has errors" % (pkgdir + "/metadata.xml")
-			write_log(self._session, log_msg, "warning", self._config_id, 'packages.get_package_metadataDict')
-		else:
-			tmp_herds = pkg_md.herds()
-			if tmp_herds != ():
-				attDict['metadata_xml_herds'] = tmp_herds[0]
-				md_email_list.append(attDict['metadata_xml_herds'] + '@gentoo.org')
-			for maint in pkg_md.maintainers():
-				md_email_list.append(maint.email)
-			if md_email_list != []:
-				attDict['metadata_xml_email'] = md_email_list
-			else:
-				md_email_list.append('maintainer-needed@gentoo.org')
-				attDict['metadata_xml_email'] = md_email_list
-				log_msg = "Metadata file %s missing Email. Setting it to maintainer-needed" % (pkgdir + "/metadata.xml")
-				write_log(self._session, log_msg, "warning", self._config_id, 'packages.get_package_metadataDict')
-		attDict['git_changlog'] = self.get_git_changelog_text(repodir, cp)
-		attDict['metadata_xml_descriptions'] = ''
-		attDict['new'] = False
-		package_metadataDict[package_id] = attDict
-		return package_metadataDict
-
-	def add_package(self, packageDict, package_metadataDict, package_id, new_ebuild_id_list, old_ebuild_id_list):
-		# Use packageDict to update the db
-		ebuild_id_list = add_new_ebuild_sql(self._session, packageDict)
-
-		# Make old ebuilds unactive
-		for ebuild_id in ebuild_id_list:
-			new_ebuild_id_list.append(ebuild_id)
-		for ebuild_id in get_ebuild_id_list(self._session, package_id):
-			if not ebuild_id in new_ebuild_id_list:
-				if not ebuild_id in old_ebuild_id_list:
-					old_ebuild_id_list.append(ebuild_id)
-		if not old_ebuild_id_list == []:
-			add_old_ebuild(self._session, old_ebuild_id_list)
-
-		# update package metadata
-		update_package_metadata(self._session, package_metadataDict)
-
-		# Get the best cpv for the configs and add it to config_cpv_listDict
-		PackageInfo, CategoryInfo, RepoInfo = get_package_info_from_package_id(self._session, package_id)
-		cp = CategoryInfo.Category + '/' + PackageInfo.Package
-		config_all_info  = get_config_all_info(self._session)
-		config_list = []
-		for config in get_config_all_info(self._session):
-			if config.Host is False:
-				config_list.append(config.ConfigId)
-		ConfigsMetaData = get_configmetadata_info(self._session, self._config_id)
-		config_cpv_listDict = self.config_match_ebuild(cp, config_list, ConfigsMetaData.RepoPath)
-
-		# Add the ebuild to the build jobs table if needed
-		self.add_new_build_job_db(ebuild_id_list, packageDict, config_cpv_listDict)
-
-	def add_new_package_db(self, cp, repo):
-		# Add new categories package ebuild to tables package and ebuilds
-		# C = Checking
-		# N = New Package
-		log_msg = "C %s:%s" % (cp, repo)
-		write_log(self._session, log_msg, "info", self._config_id, 'packages.add_new_package_db')
-		log_msg = "N %s:%s" % (cp, repo)
-		write_log(self._session, log_msg, "info", self._config_id, 'packages.add_new_package_db')
-		repodir = self._myportdb.getRepositoryPath(repo)
-		mytree = []
-		mytree.append(repodir)
-		pkgdir = repodir + "/" + cp # Get RepoDIR + cp
-		package_id = add_new_package_sql(self._session, cp, repo)
-
-		# Check cp with repoman full
-		status = check_repoman_full(self._session, pkgdir, package_id, self._config_id)
-		if status:
-			log_msg = "Repoman %s::%s ... Fail." % (cp, repo)
-			write_log(self._session, log_msg, "warning", self._config_id, 'packages.add_new_package_db')
-
-		package_metadataDict = self.get_package_metadataDict(pkgdir, repodir, package_id, cp)
-		# Get the ebuild list for cp
-		ebuild_list_tree = self._myportdb.cp_list(cp, use_cache=1, mytree=mytree)
-		if ebuild_list_tree == []:
-			log_msg = "QA: Can't get the ebuilds list. %s:%s" % (cp, repo,)
-			write_log(self._session, log_msg, "error", self._config_id, 'packages.add_new_package_db')
-			log_msg = "C %s:%s ... Fail." % (cp, repo)
-			write_log(self._session, log_msg, "warning", self._config_id, 'packages.add_new_package_db')
-			return None
-
-		# Make the needed packageDict with ebuild infos so we can add it later to the db.
-		packageDict ={}
-		new_ebuild_id_list = []
-		old_ebuild_id_list = []
-		for cpv in sorted(ebuild_list_tree):
-			packageDict[cpv] = self.get_packageDict(pkgdir, cpv, repo)
-			packageDict[cpv]['new'] = True
-
-			# take package descriptions from the ebuilds
-			if package_metadataDict[package_id]['metadata_xml_descriptions'] != packageDict[cpv]['ebuild_version_descriptions_tree']:
-				package_metadataDict[package_id]['metadata_xml_descriptions'] = packageDict[cpv]['ebuild_version_descriptions_tree']
-		package_metadataDict[package_id]['new'] = True
-		self.add_package(packageDict, package_metadataDict, package_id, new_ebuild_id_list, old_ebuild_id_list)
-		log_msg = "C %s:%s ... Done." % (cp, repo)
-		write_log(self._session, log_msg, "info", self._config_id, 'packages.add_new_package_db')
-
-	def update_package_db(self, package_id):
-		# Update the categories and package with new info
-		# C = Checking
-		PackageInfo, CategoryInfo, RepoInfo = get_package_info_from_package_id(self._session, package_id)
-		cp = CategoryInfo.Category + '/' + PackageInfo.Package
-		repo = RepoInfo.Repo
-		log_msg = "C %s:%s" % (cp, repo)
-		write_log(self._session, log_msg, "info", self._config_id, 'packages.update_package_db')
-		repodir = self._myportdb.getRepositoryPath(repo)
-		pkgdir = repodir + "/" + cp # Get RepoDIR + cp
-		if not os.path.isdir(pkgdir):
-			old_ebuild_id_list = get_ebuild_id_list(self._session, package_id)
-			for ebuild_id in old_ebuild_id_list:
-				EbuildInfo = get_ebuild_info_ebuild_id(self._session, ebuild_id)
-				cpv = cp + "-" + EbuildInfo.Version
-				# R =  remove ebuild
-				log_msg = "R %s:%s" % (cpv, repo,)
-				write_log(self._session, log_msg, "info", self._config_id, 'packages.update_package_db')
-			add_old_ebuild(self._session, old_ebuild_id_list)
-			add_old_package(self._session, package_id)
-			log_msg = "C %s:%s ... Done." % (cp, repo)
-			write_log(self._session, log_msg, "info", self._config_id, 'packages.update_package_db')
-			return None
-		
-		mytree = []
-		mytree.append(repodir)
-
-		# Get the ebuild list for cp
-		old_ebuild_id_list = []
-		ebuild_list_tree = self._myportdb.cp_list(cp, use_cache=1, mytree=mytree)
-		if ebuild_list_tree == []:
-			log_msg = "QA: Can't get the ebuilds list. %s:%s" % (cp, repo,)
-			write_log(self._session, log_msg, "error", self._config_id, 'packages.update_package_db')
-			log_msg = "C %s:%s ... Fail." % (cp, repo)
-			write_log(self._session, log_msg, "warning", self._config_id, 'packages.update_package_db')
-			return None
-
-		package_metadataDict = self.get_package_metadataDict(pkgdir, repodir, package_id, cp)
-		packageDict ={}
-		new_ebuild_id_list = []
-		package_updated = False
-		for cpv in sorted(ebuild_list_tree):
-
-			# split out ebuild version
-			ebuild_version_tree = portage.versions.cpv_getversion(cpv)
-
-			# Get packageDict for cpv
-			packageDict[cpv] = self.get_packageDict(pkgdir, cpv, repo)
-
-			# take package descriptions from the ebuilds
-			if package_metadataDict[package_id]['metadata_xml_descriptions'] != packageDict[cpv]['ebuild_version_descriptions_tree']:
-				package_metadataDict[package_id]['metadata_xml_descriptions'] = packageDict[cpv]['ebuild_version_descriptions_tree']
-
-			# Get the checksum of the ebuild in tree and db
-			ebuild_version_checksum_tree = packageDict[cpv]['checksum']
-			checksums_db, fail = get_ebuild_checksums(self._session, package_id, ebuild_version_tree)
-
-			# check if we have dupes of the checksum from db
-			if checksums_db is None:
-				ebuild_version_manifest_checksum_db = None
-			elif fail:
-				dupe_ebuild_id_list = []
-				for checksum in checksums_db:
-					ebuilds_id , status = get_ebuild_id_db(self._session, checksum, package_id, ebuild_version_tree)
-					for ebuild_id in ebuilds_id:
-						log_msg = "U %s:%s:%s Dups of checksums" % (cpv, repo, ebuild_id,)
-						write_log(self._session, log_msg, "warning", self._config_id, 'packages.update_package_db')
-						dupe_ebuild_id_list.append(ebuild_id)
-				add_old_ebuild(self._session, dupe_ebuild_id_list)
-				ebuild_version_manifest_checksum_db = None
-			else:
-				ebuild_version_manifest_checksum_db = checksums_db
-
-			# Check if the checksum have change
-			if ebuild_version_manifest_checksum_db is None:
-				# N = New ebuild
-				log_msg = "N %s:%s" % (cpv, repo,)
-				write_log(self._session, log_msg, "info", self._config_id, 'packages.update_package_db')
-				packageDict[cpv]['updated'] = True
-				package_updated = True
-			elif  ebuild_version_checksum_tree != ebuild_version_manifest_checksum_db:
-				# U = Updated ebuild
-				log_msg = "U %s:%s" % (cpv, repo,)
-				write_log(self._session, log_msg, "info", self._config_id, 'packages.update_package_db')
-				packageDict[cpv]['updated'] = True
-				package_updated = True
-			else:
-				# Remove cpv from packageDict and add ebuild to new ebuils list
-				del packageDict[cpv]
-				ebuild_id , status = get_ebuild_id_db(self._session, ebuild_version_checksum_tree, package_id, ebuild_version_tree)
-				new_ebuild_id_list.append(ebuild_id)
-		self.add_package(packageDict, package_metadataDict, package_id, new_ebuild_id_list, old_ebuild_id_list)
-
-		if package_updated:
-			# Check cp with repoman full
-			status = check_repoman_full(self._session, pkgdir, package_id, self._config_id)
-			if status:
-				log_msg = "Repoman %s::%s ... Fail." % (cp, repo)
-				write_log(self._session, log_msg, "warning", self._config_id, 'packages.update_package_db')
-
-		log_msg = "C %s:%s ... Done." % (cp, repo)
-		write_log(self._session, log_msg, "info", self._config_id, 'packages.update_package_db')

diff --git a/pym/tbc/qachecks.py b/pym/tbc/qachecks.py
deleted file mode 100644
index 976cb2a..0000000
--- a/pym/tbc/qachecks.py
+++ /dev/null
@@ -1,100 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import os
-import warnings
-import sys
-import codecs
-from portage import os, _encodings, _unicode_decode
-from portage.checksum import _hash_filter
-from portage.exception import DigestException, FileNotFound
-from portage.localization import _
-from portage.manifest import Manifest
-from portage import os, _encodings, _unicode_decode, _unicode_encode
-from portage.exception import DigestException, FileNotFound, ParseError, PermissionDenied
-from _emerge.Package import Package
-from _emerge.RootConfig import RootConfig
-from repoman.modules.scan.ebuild.checks import run_checks
-from tbc.repoman import repoman_main
-from tbc.sqlquerys import get_configmetadata_info, get_config_info, get_setup_info
-import portage
-
-def check_file_in_manifest(pkgdir, mysettings, portdb, cpv, build_use_flags_list, repo):
-	myfetchlistdict = portage.FetchlistDict(pkgdir, mysettings, portdb)
-	my_manifest = portage.Manifest(pkgdir, mysettings['DISTDIR'], fetchlist_dict=myfetchlistdict, manifest1_compat=False, from_scratch=False)
-	tree = portdb.getRepositoryPath(repo)
-	cpv_fetchmap = portdb.getFetchMap(cpv, useflags=build_use_flags_list, mytree=tree)
-	mysettings.unlock()
-	try:
-		portage.fetch(cpv_fetchmap, mysettings, listonly=0, fetchonly=0, locks_in_subdir='.locks', use_locks=1, try_mirrors=1)
-	except:
-		mysettings.lock()
-		return "Can't fetch the file.\n"
-	finally:
-		mysettings.lock()
-	try:
-		my_manifest.checkCpvHashes(cpv, checkDistfiles=True, onlyDistfiles=True, checkMiscfiles=False)
-	except:
-		return "Can't fetch the file or the hash failed.\n"
-	try:
-		portdb.fetch_check(cpv, useflags=build_use_flags_list, mysettings=mysettings, all=False)
-	except:
-		return "Fetch check failed.\n"
-	return
-
-def check_repoman(mysettings, myportdb, cpv, repo):
-	# We run repoman run_checks on the ebuild
-	ebuild_version_tree = portage.versions.cpv_getversion(cpv)
-	element = portage.versions.cpv_getkey(cpv).split('/')
-	categories = element[0]
-	package = element[1]
-	pkgdir = myportdb.getRepositoryPath(repo) + "/" + categories + "/" + package
-	full_path = pkgdir + "/" + package + "-" + ebuild_version_tree + ".ebuild"
-	root = '/'
-	trees = {
-	root : {'porttree' : portage.portagetree(root, settings=mysettings)}
-	}
-	root_config = RootConfig(mysettings, trees[root], None)
-	allvars = set(x for x in portage.auxdbkeys if not x.startswith("UNUSED_"))
-	allvars.update(Package.metadata_keys)
-	allvars = sorted(allvars)
-	myaux = dict(zip(allvars, myportdb.aux_get(cpv, allvars, myrepo=repo)))
-	pkg = Package(cpv=cpv, metadata=myaux, root_config=root_config, type_name='ebuild')
-	fails = []
-	try:
-		# All ebuilds should have utf_8 encoding.
-		f = codecs.open(_unicode_encode(full_path,
-		encoding = _encodings['fs'], errors = 'strict'),
-		mode = 'r', encoding = _encodings['repo.content'])
-		try:
-			for check_name, e in run_checks(f, pkg):
-				fails.append(check_name + ": " + e + "\n")
-		finally:
-			f.close()
-	except UnicodeDecodeError:
-		# A file.UTF8 failure will have already been recorded above.
-		pass
-	# fails will have a list with repoman errors
-	if fails == []:
-		return False
-	return fails
-
-def repoman_full(session, pkgdir, config_id):
-	ConfigsMetaData = get_configmetadata_info(session, config_id)
-	ConfigInfo = get_config_info(session, config_id)
-	SetupInfo = get_setup_info(session, config_id)
-	config_root = ConfigsMetaData.RepoPath + '/' + ConfigInfo.Hostname + "/" + SetupInfo.Setup
-	argscmd = []
-	argscmd.append('--xmlparse')
-	argscmd.append('full')
-	qatracker, qawarnings = repoman_main(argscmd, config_root=config_root, pkgdir=pkgdir)
-	adict = {}
-	for key in qatracker.fails.items():
-		alist = []
-		for foo in key[1]:
-			alist.append(foo)
-			adict[key[0]] = alist
-	if adict == {}:
-		return False
-	return adict
-

diff --git a/pym/tbc/readconf.py b/pym/tbc/readconf.py
deleted file mode 100644
index ebb3b71..0000000
--- a/pym/tbc/readconf.py
+++ /dev/null
@@ -1,45 +0,0 @@
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-import os
-import sys
-import re
-from socket import getfqdn
-
-configfile = "/etc/tbc/tbc.conf"
-
-def read_config_settings():
-# It will return a dict with options from the configfile
-	log_file= False
-	tbc_settings = {}
-	try:
-		open_conffile = open(configfile, 'r')
-	except:
-		sys.exit("Fail to open config file:" + configfile)
-	textlines = open_conffile.readlines()
-	for line in textlines:
-		element = line.split('=')
-		if element[0] == 'SQLBACKEND':		# Databas backend
-			get_sql_backend = element[1]
-		if element[0] == 'SQLDB':			# Database
-			get_sql_db = element[1]
-		if element[0] == 'SQLHOST':			# Host
-			get_sql_host = element[1]
-		if element[0] == 'SQLUSER':			# User
-			get_sql_user = element[1]
-		if element[0] == 'SQLPASSWD':		# Password
-			get_sql_passwd = element[1]
-		if element[0] == 'LOG':		# Log level
-			tbc_settings['log_level'] = element[1].rstrip('\n')
-		if element[0] == 'LOGFILE':		# Log level
-			log_file = element[1].rstrip('\n')
-	open_conffile.close()
-
-	tbc_settings['sql_backend'] = get_sql_backend.rstrip('\n')
-	tbc_settings['sql_db'] = get_sql_db.rstrip('\n')
-	tbc_settings['sql_host'] = get_sql_host.rstrip('\n')
-	tbc_settings['sql_user'] = get_sql_user.rstrip('\n')
-	tbc_settings['sql_passwd'] = get_sql_passwd.rstrip('\n')
-	tbc_settings['hostname'] = getfqdn()
-	tbc_settings['log_file'] = log_file
-	return tbc_settings

diff --git a/pym/tbc/sqlquerys.py b/pym/tbc/sqlquerys.py
deleted file mode 100644
index 9c962f4..0000000
--- a/pym/tbc/sqlquerys.py
+++ /dev/null
@@ -1,665 +0,0 @@
-# Copyright 1998-2016 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import datetime
-import sys
-from tbc.db_mapping import Configs, Logs, ConfigsMetaData, Jobs, BuildJobs, Packages, Ebuilds, Repos, Categories, \
-	Uses, ConfigsEmergeOptions, EmergeOptions, HiLight, BuildLogs, BuildLogsConfig, BuildJobsUse, BuildJobsRedo, \
-	HiLightCss, BuildLogsHiLight, BuildLogsEmergeOptions, BuildLogsErrors, ErrorsInfo, EmergeInfo, BuildLogsUse, \
-	BuildJobsEmergeOptions, EbuildsMetadata, EbuildsIUse, Restrictions, EbuildsRestrictions, EbuildsKeywords, \
-	Keywords, PackagesMetadata, Emails, PackagesEmails, Setups, BuildLogsRepoman, CategoriesMetadata, \
-	PackagesRepoman, BuildLogsQa, TbcConfig
-from tbc.log import write_log
-from sqlalchemy.orm.exc import NoResultFound, MultipleResultsFound
-from sqlalchemy import and_, or_
-
-# Guest Functions
-def get_tbc_config(session):
-	TbcConfigInfo = session.query(TbcConfig).one()
-	return TbcConfigInfo
-
-def get_config_id(session, setup, host):
-	SetupInfo = session.query(Setups).filter_by(Setup = setup).one()
-	ConfigInfo = session.query(Configs).filter_by(SetupId = SetupInfo.SetupId).filter_by(Hostname = host).one()
-	return ConfigInfo.ConfigId
-
-def get_config_id_fqdn(session, host):
-	ConfigInfo = session.query(Configs).filter_by(Hostname = host).one()
-	return ConfigInfo.ConfigId
-
-def add_logs(session, log_msg, log_type, config_id):
-	Add_Log = Logs(ConfigId = config_id, LogType = log_type, Msg = log_msg)
-	session.add(Add_Log)
-	session.commit()
-
-def update_deamon_status(session, status, config_id):
-	ConfigInfo = session.query(ConfigsMetaData).filter_by(ConfigId = config_id).one()
-	ConfigInfo.Status = status
-	session.commit()
-
-def get_jobs(session, config_id):
-	JobsInfo = session.query(Jobs).filter_by(Status = 'Waiting').filter_by(ConfigId = config_id).order_by(Jobs.JobId).all()
-	if JobsInfo == []:
-		return None
-	return JobsInfo
-
-def get_job_status_waiting_on_guest(session):
-	try:
-		JobsInfo = session.query(Jobs).filter_by(Status = 'Waiting_on_guest').one()
-	except NoResultFound as e:
-		return None
-	return JobsInfo.JobId
-
-def update_job_list(session, status, job_id):
-	JobInfo = session.query(Jobs).filter_by(JobId = job_id).one()
-	JobInfo.Status = status
-	if status == 'Done':
-		JobInfo.TimeStamp = datetime.datetime.utcnow()
-	session.commit()
-
-def get_config_all_info(session):
-	return session.query(Configs).all()
-
-def get_config_info(session, config_id):
-	ConfigInfo = session.query(Configs).filter_by(ConfigId = config_id).one()
-	return ConfigInfo
-
-def get_setup_info(session, config_id):
-	ConfigInfo = get_config_info(session, config_id)
-	SetupInfo = session.query(Setups).filter_by(SetupId = ConfigInfo.SetupId).one()
-	return SetupInfo
-
-def update_buildjobs_status(session, build_job_id, status, config_id):
-	BuildJobsInfo = session.query(BuildJobs).filter_by(BuildJobId = build_job_id).one()
-	BuildJobsInfo.Status = status
-	BuildJobsInfo.ConfigId = config_id
-	session.commit()
-
-def get_configmetadata_info(session, config_id):
-	return session.query(ConfigsMetaData).filter_by(ConfigId = config_id).one()
-
-def is_build_job_done(session, build_job_id):
-	try:
-		BuildJobsInfo = session.query(BuildJobs).filter_by(BuildJobId = build_job_id).one()
-	except NoResultFound as e:
-		return False
-	return True
-
-def get_packages_to_build(session, config_id):
-	SetupInfo = get_setup_info(session, config_id)
-	BuildJobsTmp = session.query(BuildJobs).filter(BuildJobs.SetupId==SetupInfo.SetupId). \
-		order_by(BuildJobs.BuildJobId).filter_by(Status = 'Waiting')
-	if BuildJobsTmp.all() == []:
-		return None
-	elif BuildJobsTmp.filter_by(BuildNow = True).all() != []:
-		BuildJobsInfo = BuildJobsTmp.filter_by(BuildNow = True).first()
-	elif BuildJobsTmp.filter_by(BuildNow = False).all() != []:
-		BuildJobsInfo = BuildJobsTmp.filter_by(BuildNow = False).first()
-	else:
-		return None
-	update_buildjobs_status(session, BuildJobsInfo.BuildJobId, 'Looked', config_id)
-	EbuildsInfo = session.query(Ebuilds).filter_by(EbuildId = BuildJobsInfo.EbuildId).one()
-	PackagesInfo, CategoriesInfo = session.query(Packages, Categories).filter(Packages.PackageId==EbuildsInfo.PackageId).filter(Packages.CategoryId==Categories.CategoryId).one()
-	ReposInfo = session.query(Repos).filter_by(RepoId = PackagesInfo.RepoId).one()
-	uses={}
-	for BuildJobsUseInfo, UsesInfo in session.query(BuildJobsUse, Uses).filter(BuildJobsUse.BuildJobId==BuildJobsInfo.BuildJobId).filter(BuildJobsUse.UseId==Uses.UseId).all():
-		uses[UsesInfo.Flag] = BuildJobsUseInfo.Status
-	if uses == {}:
-		uses = None
-	emerge_options_list = []
-	for ConfigsEmergeOptionsInfo, EmergeOptionsInfo in session.query(ConfigsEmergeOptions, EmergeOptions). \
-			filter(ConfigsEmergeOptions.ConfigId==config_id). \
-			filter(ConfigsEmergeOptions.EOptionId==EmergeOptions.EmergeOptionId).all():
-		emerge_options_list.append(EmergeOptionsInfo.EOption)
-	build_dict={}
-	build_dict['config_id'] = config_id
-	build_dict['setup_id'] = BuildJobsInfo.SetupId
-	build_dict['build_job_id'] = BuildJobsInfo.BuildJobId
-	build_dict['ebuild_id']= EbuildsInfo.EbuildId
-	build_dict['package_id'] = EbuildsInfo.PackageId
-	build_dict['package'] = PackagesInfo.Package
-	build_dict['category'] = CategoriesInfo.Category
-	build_dict['repo'] = ReposInfo.Repo
-	build_dict['removebin'] = BuildJobsInfo.RemoveBin
-	build_dict['ebuild_version'] = EbuildsInfo.Version
-	build_dict['checksum'] = EbuildsInfo.Checksum
-	build_dict['cp'] = CategoriesInfo.Category + '/' + PackagesInfo.Package
-	build_dict['cpv'] = build_dict['cp'] + '-' + EbuildsInfo.Version
-	build_dict['build_useflags'] = uses
-	build_dict['emerge_options'] = emerge_options_list
-	return build_dict
-
-def get_category_info(session, category):
-	try:
-		CategoryInfo = session.query(Categories).filter_by(Category = category).filter_by(Active = True).one()
-	except NoResultFound as e:
-		return False
-	return CategoryInfo
-
-def get_repo_info(session, repo):
-	try:
-		RepoInfo = session.query(Repos).filter_by(Repo = repo).one()
-	except NoResultFound as e:
-		return False
-	return RepoInfo
-
-def get_package_info(session, category, package, repo):
-	CategoryInfo = get_category_info(session, category)
-	RepoInfo = get_repo_info(session, repo)
-	try:
-		PackageInfo = session.query(Packages).filter_by(CategoryId = CategoryInfo.CategoryId). \
-			filter_by(Package = package).filter_by(RepoId = RepoInfo.RepoId).filter_by(Active = True).one()
-	except NoResultFound as e:
-		return False
-	return PackageInfo
-
-def get_ebuild_info(session, build_dict):
-	EbuildInfo = session.query(Ebuilds).filter_by(Version = build_dict['ebuild_version']).filter_by(Checksum = build_dict['checksum']).\
-		filter_by(PackageId = build_dict['package_id']).filter_by(Active = True)
-	if EbuildInfo.all() == []:
-		return None, True
-	try:
-		EbuildInfo2 = EbuildInfo.one()
-	except (MultipleResultsFound) as e:
-		return EbuildInfo.all(), True
-	return EbuildInfo2, False
-
-def get_ebuild_info_ebuild_id(session, ebuild_id):
-	return session.query(Ebuilds).filter_by(EbuildId = ebuild_id).filter_by(Active = True).one()
-
-def get_build_job_id(session, build_dict):
-	BuildJobsIdInfo = session.query(BuildJobs.BuildJobId).filter_by(EbuildId = build_dict['ebuild_id']).filter_by(ConfigId = build_dict['config_id']).all()
-	if BuildJobsIdInfo == []:
-		return None
-	for build_job_id in BuildJobsIdInfo:
-		BuildJobsUseInfo = session.query(BuildJobsUse).filter_by(BuildJobId = build_job_id.BuildJobId).all()
-		useflagsdict = {}
-		if BuildJobsUseInfo == []:
-			useflagsdict = None
-		else:
-			for x in BuildJobsUseInfo:
-				useflagsdict[x.UseId] = x.Status
-		if useflagsdict == build_dict['build_useflags']:
-			return build_job_id.BuildJobId
-	return None
-
-def get_use_id(session, use_flag):
-	try:
-		UseIdInfo = session.query(Uses).filter_by(Flag = use_flag).one()
-	except NoResultFound as e:
-		return None
-	return UseIdInfo.UseId
-
-def get_hilight_info(session):
-	return session.query(HiLight).all()
-
-def get_error_info_list(session):
-	return session.query(ErrorsInfo).all()
-
-def add_e_info(session, emerge_info):
-	AddEmergeInfo = EmergeInfo(EmergeInfoText = emerge_info)
-	session.add(AddEmergeInfo)
-	session.flush()
-	EmergeInfoId = AddEmergeInfo.EInfoId
-	session.commit()
-	return EmergeInfoId
-
-def del_old_build_jobs(session, build_job_id):
-	session.query(BuildJobsUse).filter(BuildJobsUse.BuildJobId == build_job_id).delete()
-	session.query(BuildJobsRedo).filter(BuildJobsRedo.BuildJobId == build_job_id).delete()
-	session.query(BuildJobsEmergeOptions).filter(BuildJobsEmergeOptions.BuildJobId == build_job_id).delete()
-	session.query(BuildJobs).filter(BuildJobs.BuildJobId == build_job_id).delete()
-	session.commit()
-
-def add_new_buildlog(session, build_dict, build_log_dict):
-	build_log_id_list = session.query(BuildLogs.BuildLogId).filter_by(EbuildId = build_dict['ebuild_id']).all()
-
-	def add_new_hilight(log_id, build_log_dict):
-		for k, hilight_tmp in sorted(build_log_dict['hilight_dict'].items()):
-			NewHiLight = BuildLogsHiLight(LogId = log_id, StartLine = hilight_tmp['startline'], EndLine = hilight_tmp['endline'], HiLightCssId = hilight_tmp['hilight_css_id'])
-			session.add(NewHiLight)
-			session.commit()
-
-	def build_log_id_match(build_log_id_list, build_dict, build_log_dict):
-		for build_log_id in build_log_id_list:
-			log_hash = session.query(BuildLogs.LogHash).filter_by(BuildLogId = build_log_id[0]).one()
-			use_list = session.query(BuildLogsUse).filter_by(BuildLogId = build_log_id[0]).all()
-			useflagsdict = {}
-			if use_list == []:
-				useflagsdict = None
-			else:
-				for use in use_list:
-					useflagsdict[use.UseId] = use.Status
-			msg = 'Log_hash: %s Log_hash_sql: %s Build_log_id: %s' % (build_log_dict['log_hash'], log_hash[0], build_log_id,)
-			write_log(session, msg, "debug", build_dict['config_id'], 'sqlquerys.add_new_buildlog.build_log_id_match')
-			if log_hash[0] == build_log_dict['log_hash'] and build_dict['build_useflags'] == useflagsdict:
-				if session.query(BuildLogsConfig).filter(BuildLogsConfig.ConfigId.in_([build_dict['config_id']])).filter_by(BuildLogId = build_log_id[0]):
-					return None, True
-				e_info_id = add_e_info(session, build_log_dict['emerge_info'])
-				NewBuildLogConfig = BuildLogsConfig(BuildLogId = build_log_id[0], ConfigId = build_dict['config_id'], LogName = build_log_dict['logfilename'], EInfoId = e_info_id)
-				session.add(NewBuildLogConfig)
-				session.commit()
-				return build_log_id[0], True
-		return None, False
-
-	def build_log_id_no_match(build_dict, build_log_dict):
-		NewBuildLog = BuildLogs(EbuildId = build_dict['ebuild_id'], Fail = build_log_dict['fail'], SummeryText = build_log_dict['build_error'], LogHash = build_log_dict['log_hash'], New = True)
-		session.add(NewBuildLog)
-		session.flush()
-		build_log_id = NewBuildLog.BuildLogId
-		session.commit()
-		if build_log_dict['summary_error_list'] != []:
-			for error in build_log_dict['summary_error_list']:
-				NewError = BuildLogsErrors(BuildLogId = build_log_id, ErrorId = error)
-				session.add(NewError)
-				session.commit()
-		e_info_id = add_e_info(session, build_log_dict['emerge_info'])
-		NewBuildLogConfig = BuildLogsConfig(BuildLogId = build_log_id, ConfigId = build_dict['config_id'], LogName = build_log_dict['logfilename'], EInfoId = e_info_id)
-		session.add(NewBuildLogConfig)
-		session.flush()
-		log_id = NewBuildLogConfig.LogId
-		session.commit()
-		add_new_hilight(log_id, build_log_dict)
-		if not build_dict['build_useflags'] is None:
-			for use_id, status in  build_dict['build_useflags'].items():
-				NewBuildLogUse = BuildLogsUse(BuildLogId = build_log_id, UseId = use_id, Status = status)
-				session.add(NewBuildLogUse)
-				session.flush()
-			session.commit()
-		return build_log_id
-
-	msg = 'build_job_id: %s build_log_id_list: %s' % (build_dict['build_job_id'], build_log_id_list,)
-	write_log(session, msg, "debug", build_dict['config_id'], 'sqlquerys.add_new_buildlog')
-	if build_dict['build_job_id'] is None and build_log_id_list == []:
-		build_log_id = build_log_id_no_match(build_dict, build_log_dict)
-		return build_log_id
-	elif build_dict['build_job_id'] is None and build_log_id_list != []:
-		build_log_id, match = build_log_id_match(build_log_id_list, build_dict, build_log_dict)
-		if not match:
-			build_log_id = build_log_id_no_match(build_dict, build_log_dict)
-		return build_log_id
-	elif not build_dict['build_job_id'] is None and build_log_id_list != []:
-		build_log_id, match = build_log_id_match(build_log_id_list, build_dict, build_log_dict)
-		if not match:
-			build_log_id = build_log_id_no_match(build_dict, build_log_dict)
-			del_old_build_jobs(session, build_dict['build_job_id'])
-		return build_log_id
-	elif not build_dict['build_job_id'] is None and build_log_id_list == []:
-		build_log_id = build_log_id_no_match(build_dict, build_log_dict)
-		del_old_build_jobs(session, build_dict['build_job_id'])
-		return build_log_id
-
-def add_repoman_qa(session, build_log_dict, log_id):
-	repoman_error = ""
-	qa_error = ""
-	if build_log_dict['repoman_error_list']:
-		for repoman_text in build_log_dict['repoman_error_list']:
-			repoman_error = repoman_error + repoman_text
-		NewBuildLogRepoman = BuildLogsRepoman(BuildLogId = log_id, SummeryText = repoman_error)
-		session.add(NewBuildLogRepoman)
-		session.commit()
-	if build_log_dict['qa_error_list']:
-		for qa_text in build_log_dict['qa_error_list']:
-			qa_error = qa_error + qa_text
-		NewBuildLogQa = BuildLogsQa(BuildLogId = log_id, SummeryText = qa_error)
-		session.add(NewBuildLogQa)
-		session.commit()
-
-def update_fail_times(session, FailInfo):
-	NewBuildJobs = session.query(BuildJobs).filter_by(BuildJobId = FailInfo.BuildJobId).one()
-	NewBuildJobs.TimeStamp = datetime.datetime.utcnow()
-	session.commit()
-
-def get_fail_times(session, build_dict):
-	try:
-		FailInfo = session.query(BuildJobsRedo).filter_by(BuildJobId = build_dict['build_job_id']).filter_by(FailType = build_dict['type_fail']).one()
-	except NoResultFound as e:
-		return False
-	return True
-
-def add_fail_times(session, fail_querue_dict):
-	NewBuildJobsRedo = BuildJobsRedo(BuildJobId = fail_querue_dict['build_job_id'], FailType = fail_querue_dict['fail_type'], FailTimes = fail_querue_dict['fail_times'])
-	session.add(NewBuildJobsRedo)
-	session.commit()
-
-def check_host_updatedb(session):
-	jobs = False
-	try:
-		JobsInfo = session.query(Jobs).filter_by(Status = 'Done').filter_by(JobType = 'esync').one()
-	except NoResultFound as e:
-		jobs = True
-	try:
-		JobsInfo = session.query(Jobs).filter_by(Status = 'Done').filter_by(JobType = 'updatedb').one()
-	except NoResultFound as e:
-		jobs = True
-	return jobs
-
-# Host Functions
-def update_repo_db(session, repo_list):
-	for repo in repo_list:
-		if not get_repo_info(session, repo):
-			session.add(Repos(Repo = repo))
-			session.commit()
-
-def update_categories_db(session, category, categories_metadataDict):
-	CategoryInfo = get_category_info(session, category)
-	if not CategoryInfo:
-		session.add(Categories(Category = category))
-		session.commit()
-		CategoryInfo = get_category_info(session, category)
-	try:
-		CategoriesMetadataInfo = session.query(CategoriesMetadata).filter_by(CategoryId = CategoryInfo.CategoryId).one()
-	except NoResultFound as e:
-		NewCategoriesMetadata = CategoriesMetadata(CategoryId = CategoryInfo.CategoryId, Checksum = categories_metadataDict['metadata_xml_checksum'], Descriptions = categories_metadataDict['metadata_xml_descriptions'])
-		session.add(NewCategoriesMetadata)
-		session.commit()
-		return
-	if CategoriesMetadataInfo.Checksum != categories_metadataDict['metadata_xml_checksum']:
-		CategoriesMetadataInfo.Checksum = categories_metadataDict['metadata_xml_checksum']
-		CategoriesMetadataInfo.Descriptions = categories_metadataDict['metadata_xml_descriptions']
-		session.commit()
-
-def get_keyword_id(session, keyword):
-	try:
-		KeywordsInfo = session.query(Keywords).filter_by(Keyword = keyword).one()
-	except NoResultFound as e:
-		return None
-	return KeywordsInfo.KeywordId
-
-def add_new_ebuild_metadata_sql(session, ebuild_id, keywords, restrictions, iuse_list):
-	for restriction in restrictions:
-		if restriction in ["!"]:
-			restriction = restriction[1:]
-		if restriction in ["?"]:
-			restriction = restriction[:1]
-		if restriction != '(' or restriction != ')':
-			try:
-				RestrictionInfo = session.query(Restrictions).filter_by(Restriction = restriction).one()
-			except NoResultFound as e:
-				session.add(Restrictions(Restriction = restriction))
-				session.commit()
-				RestrictionInfo = session.query(Restrictions).filter_by(Restriction = restriction).one()
-			session.add(EbuildsRestrictions(EbuildId = ebuild_id, RestrictionId = RestrictionInfo.RestrictionId))
-			session.commit()
-	for iuse in iuse_list:
-		status = False
-		if iuse[0] in ["+"]:
-			iuse = iuse[1:]
-			status = True
-		elif iuse[0] in ["-"]:
-			iuse = iuse[1:]
-		use_id = get_use_id(session, iuse)
-		if use_id is None:
-			session.add(Uses(Flag = iuse))
-			session.commit()
-			use_id = get_use_id(session, iuse)
-		session.add(EbuildsIUse(EbuildId = ebuild_id, UseId = use_id, Status = status))
-		session.commit()
-	for keyword in keywords:
-		status = 'Stable'
-		if keyword[0] in ["~"]:
-			keyword = keyword[1:]
-			status = 'Unstable'
-		elif keyword[0] in ["-"]:
-			keyword = keyword[1:]
-			status = 'Negative'
-		keyword_id = get_keyword_id(session, keyword)
-		if keyword_id is None:
-			session.add(Keywords(Keyword = keyword))
-			session.commit()
-			keyword_id = get_keyword_id(session, keyword)
-		session.add(EbuildsKeywords(EbuildId = ebuild_id, KeywordId = keyword_id, Status = status)) 
-		session.commit()
-
-def add_new_ebuild_sql(session, packageDict):
-	ebuild_id_list = []
-	for k, v in packageDict.items():
-		session.add(Ebuilds(PackageId = v['package_id'], Version = v['ebuild_version'], Checksum = v['checksum'], Active = True))
-		session.flush()
-		try:
-			EbuildInfo = session.query(Ebuilds).filter_by(Version = v['ebuild_version']).filter_by(Checksum = v['checksum']).\
-				filter_by(PackageId = v['package_id']).filter_by(Active = True).one()
-		except (MultipleResultsFound) as e:
-			for x in session.query(Ebuilds).filter_by(Version = v['ebuild_version']).filter_by(Checksum = v['checksum']).\
-				filter_by(PackageId = v['package_id']).filter_by(Active = True).all():
-				x.Checksum = 0
-				x.Active = False
-				session.commit()
-			try:
-				EbuildInfo = session.query(Ebuilds).filter_by(Version = v['ebuild_version']).filter_by(Checksum = v['checksum']).\
-					filter_by(PackageId = v['package_id']).filter_by(Active = True).one()
-			except (MultipleResultsFound) as e:
-				# FIXME
-				sys.exit()
-		session.add(EbuildsMetadata(EbuildId = EbuildInfo.EbuildId, New = v['new'], Updated = v['updated'], Commit = v['git_commit'], \
-			CommitMsg = v['git_commit_msg'], Descriptions = v['ebuild_version_descriptions_tree'], Slot = v['ebuild_version_metadata_tree'][2], \
-			Homepage = v['ebuild_version_metadata_tree'][5], License = v['ebuild_version_metadata_tree'][6]))
-		session.commit()
-		ebuild_id_list.append(EbuildInfo.EbuildId)
-		restrictions = []
-		keywords = []
-		iuse = []
-		for i in v['ebuild_version_metadata_tree'][4].split():
-			restrictions.append(i)
-		for i in v['ebuild_version_metadata_tree'][8].split():
-			keywords.append(i)
-		for i in v['ebuild_version_metadata_tree'][10].split():
-			iuse.append(i)
-		add_new_ebuild_metadata_sql(session, EbuildInfo.EbuildId, keywords, restrictions, iuse)
-	return ebuild_id_list
-
-def get_ebuild_id_list(session, package_id):
-	ebuild_id_list = []
-	for EbuildInfo in session.query(Ebuilds).filter_by(PackageId = package_id).filter_by(Active = True).all():
-		ebuild_id_list.append(EbuildInfo.EbuildId)
-	return ebuild_id_list
-
-def get_build_job_all(session, ebuild_id):
-	return session.query(BuildJobs).filter_by(EbuildId = ebuild_id).all()
-
-def add_old_ebuild(session, old_ebuild_list):
-	for ebuild_id in  old_ebuild_list:
-		EbuildInfo = session.query(Ebuilds).filter_by(EbuildId = ebuild_id).one()
-		EbuildInfo.Active = False
-		session.commit()
-		build_job_id_list = get_build_job_all(session, ebuild_id)
-		if build_job_id_list != []:
-			for build_job in build_job_id_list:
-				del_old_build_jobs(session, build_job.BuildJobId)
-
-def add_old_package(session, package_id):
-	PackagesInfo = session.query(Packages).filter_by(PackageId = package_id).one()
-	PackagesInfo.Active = False
-	session.commit()
-
-def add_new_package_sql(session, cp, repo):
-	element = cp.split('/')
-	categories = element[0]
-	package = element[1]
-	RepoInfo =get_repo_info(session, repo)
-	repo_id = RepoInfo.RepoId
-	CategoriesInfo = get_category_info(session, categories)
-	category_id = CategoriesInfo.CategoryId
-	session.add(Packages(Package = package, CategoryId = category_id, RepoId = repo_id, Active = True))
-	session.commit()
-	PackageInfo = get_package_info(session, categories, package, repo)
-	return PackageInfo.PackageId
-
-def get_package_metadata_sql(session, package_id):
-	try:
-		PackagesMetadataInfo = session.query(PackagesMetadata).filter_by(PackageId = package_id).one()
-	except NoResultFound as e:
-		return False
-	return PackagesMetadataInfo
-
-def update_email_info(session, email):
-	try:
-		EmailInfo = session.query(Emails).filter_by(Email = email).one()
-	except NoResultFound as e:
-		session.add(Emails(Email = email))
-		session.commit()
-		EmailInfo = session.query(Emails).filter_by(Email = email).one()
-	return EmailInfo
-
-def update_package_email_info(session, email_id, package_id):
-	try:
-		PackagesEmailInfo = session.query(PackagesEmails).filter_by(EmailId = email_id).filter_by(PackageId = package_id).one()
-	except NoResultFound as e:
-		session.add(PackagesEmails(EmailId = email_id, PackageId = package_id))
-		session.commit()
-		PackagesEmailInfo = session.query(PackagesEmails).filter_by(EmailId = email_id).filter_by(PackageId = package_id).one()
-	return PackagesEmailInfo
-
-def update_package_metadata(session, package_metadataDict):
-	for k, v in package_metadataDict.items():
-		try:
-			PackagesMetadataInfo = session.query(PackagesMetadata).filter_by(PackageId = k).one()
-		except NoResultFound as e:
-			session.add(PackagesMetadata(PackageId = k, Gitlog = v['git_changlog'], Descriptions = v['metadata_xml_descriptions'], New = v['new']))
-			session.commit()
-		else:
-			PackagesMetadataInfo.Gitlog = v['git_changlog']
-			PackagesMetadataInfo.Descriptions = v['metadata_xml_descriptions']
-			session.commit()
-		if v['metadata_xml_email']:
-			for email in v['metadata_xml_email']:
-				EmailInfo = update_email_info(session, email)
-				PackagesEmailInfo = update_package_email_info(session, EmailInfo.EmailId, k)
-
-def get_package_info_from_package_id(session, package_id):
-	PackageInfo = session.query(Packages).filter_by(PackageId = package_id).one()
-	CategoryInfo = session.query(Categories).filter_by(CategoryId = PackageInfo.CategoryId).one()
-	RepoInfo = session.query(Repos).filter_by(RepoId = PackageInfo.RepoId).one()
-	return PackageInfo, CategoryInfo, RepoInfo
-
-def add_new_build_job(session, ebuild_id, setup_id, use_flagsDict, config_id):
-	NewBuildJobs = BuildJobs(EbuildId = ebuild_id, SetupId = setup_id, ConfigId = config_id, Status = 'Waiting', BuildNow = False, RemoveBin = True, New = True)
-	session.add(NewBuildJobs)
-	session.flush()
-	build_job_id = NewBuildJobs.BuildJobId
-	session.commit()
-	for k, v in use_flagsDict.items():
-		use_id = get_use_id(session, k)
-		session.add(BuildJobsUse(BuildJobId = build_job_id, UseId = use_id, Status = v))
-		session.commit()
-
-def get_ebuild_checksums(session, package_id, ebuild_version):
-	ebuild_checksum_list = []
-	try:
-		EbuildInfo = session.query(Ebuilds).filter_by(PackageId = package_id).filter_by(Version = ebuild_version).filter_by(Active = True).one()
-	except NoResultFound as e:
-		return None, False
-	except MultipleResultsFound as e:
-		EbuildInfo2 = session.query(Ebuilds).filter_by(PackageId = package_id).filter_by(Version = ebuild_version).filter_by(Active = True).all()
-		for Ebuild in EbuildInfo2:
-			ebuild_checksum_list.append(Ebuild.Checksum)
-		return ebuild_checksum_list, True
-	return EbuildInfo.Checksum, False
-
-def get_ebuild_id_db(session, checksum, package_id, ebuild_version):
-	try:
-		EbuildInfos = session.query(Ebuilds).filter_by(PackageId = package_id).filter_by(Checksum = checksum).filter_by(Version = ebuild_version).filter_by(Active = True).one()
-	except NoResultFound as e:
-		return None, True
-	except MultipleResultsFound as e:
-		EbuildInfos = session.query(Ebuilds).filter_by(PackageId = package_id).filter_by(Checksum = checksum).filter_by(Version = ebuild_version).filter_by(Active = True).all()
-		ebuilds_id = []
-		for EbuildInfo in EbuildInfos:
-			ebuilds_id.append(EbuildInfo.EbuildId)
-		return ebuilds_id, True
-	return EbuildInfos.EbuildId, False
-
-def get_ebuild_restrictions(session, ebuild_id):
-	restrictions = []
-	EbuildsRestrictionsInfos = session.query(EbuildsRestrictions).filter_by(EbuildId = ebuild_id).all()
-	if EbuildsRestrictionsInfos == []:
-		return False
-	for EbuildsRestrictionsInfo in EbuildsRestrictionsInfos:
-		RestrictionsInfo = session.query(Restrictions).filter_by(RestrictionId = EbuildsRestrictionsInfo.RestrictionId).one()
-		restrictions.append(RestrictionsInfo.Restriction)
-	return restrictions
-
-def add_repoman_log(session, package_id, repoman_log, repoman_hash):
-	try:
-		PackagesRepomanInfo = session.query(PackagesRepoman).filter_by(PackageId = package_id).one()
-	except NoResultFound as e:
-		session.add(PackagesRepoman(PackageId = package_id, RepomanText = repoman_log, RepomanHash = repoman_hash))
-		session.commit()
-	else:
-		if PackagesRepomanInfo.RepomanHash != repoman_hash:
-			PackagesRepomanInfo.RepomanHash = repoman_hash
-			PackagesRepomanInfo.RepomanText = repoman_log
-			session.commit()
-
-def get_category_list_info(session):
-	return session.query(Categories).all()
-
-def get_package_list_info(session, category_id):
-	return session.query(Packages).filter_by(CategoryId = category_id).all()
-
-def get_ebuild_list_info(session, package_id):
-	return session.query(Ebuilds).filter_by(PackageId = package_id).all()
-
-def del_old_ebuild(session, ebuild_id):
-	session.query(EbuildsRestrictions).filter(EbuildsRestrictions.EbuildId == ebuild_id).delete()
-	session.query(EbuildsIUse).filter(EbuildsIUse.EbuildId == ebuild_id).delete()
-	session.query(EbuildsKeywords).filter(EbuildsKeywords.EbuildId == ebuild_id).delete()
-	session.query(EbuildsMetadata).filter(EbuildsMetadata.EbuildId == ebuild_id).delete()
-	session.query(Ebuilds).filter(Ebuilds.EbuildId == ebuild_id).delete()
-	session.commit()
-
-def del_old_package(session, package_id):
-	session.query(PackagesRepoman).filter(PackagesRepoman.PackageId == package_id).delete()
-	session.query(PackagesEmails).filter(PackagesEmails.PackageId== package_id).delete()
-	session.query(PackagesMetadata).filter(PackagesMetadata.PackageId == package_id).delete()
-	session.query(Packages).filter(Packages.PackageId == package_id).delete()
-	session.commit()
-
-def add_old_category(session, category_id):
-	CategorysInfo = session.query(Categories).filter_by(CategoryId = category_id).one()
-	CategorysInfo.Active = False
-	session.commit()
-
-def reset_new_updated(session):
-	try:
-		PMInfo = session.query(PackagesMetadata).filter(PackagesMetadata.New == True).all()
-	except NoResultFound as e:
-		pass
-	else:
-		for x in PMInfo:
-			x.New = False
-			session.flush()
-	try:
-		EMInfo = session.query(EbuildsMetadata).filter(EbuildsMetadata.New == True).all()
-	except NoResultFound as e:
-		pass
-	else:
-		for x in EMInfo:
-			x.New = False
-			session.flush()
-	try:
-		BLInfo = session.query(BuildLogs).filter(BuildLogs.New == True).all()
-	except NoResultFound as e:
-		pass
-	else:
-		for x in BLInfo:
-			x.New = False
-			session.flush()
-	try:
-		BJInfo = session.query(BuildJobs).filter(BuildJobs.New == True).all()
-	except NoResultFound as e:
-		pass
-	else:
-		for x in BJInfo:
-			x.New = False
-			session.flush()
-	try:
-		EMInfo = session.query(EbuildsMetadata).filter(EbuildsMetadata.Updated == True).all()
-	except NoResultFound as e:
-		pass
-	else:
-		for x in EMInfo:
-			x.Updated = False
-			session.flush()
-	session.commit()

diff --git a/pym/tbc/sync.py b/pym/tbc/sync.py
deleted file mode 100644
index 8488c50..0000000
--- a/pym/tbc/sync.py
+++ /dev/null
@@ -1,122 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import portage
-import os
-import errno
-import sys
-import time
-import re
-import git
-
-from tbc.sqlquerys import get_config_id_fqdn, add_logs, get_config_all_info, \
-	get_configmetadata_info, get_config_info, get_setup_info, get_job_status_waiting_on_guest, \
-	update_job_list
-from tbc.readconf import read_config_settings
-from tbc.log import write_log
-
-def git_repos_list(myportdb):
-	repo_trees_list = myportdb.porttrees
-	repo_dir_list = []
-	for repo_dir in repo_trees_list:
-		repo_dir_list.append(repo_dir)
-	return repo_dir_list
-
-def git_fetch(repo):
-	repouptodate = True
-	remote = git.remote.Remote(repo, 'origin')
-	info_list = remote.fetch()
-	local_commit = repo.commit()
-	remote_commit = info_list[0].commit
-	if local_commit.hexsha != remote_commit.hexsha:
-		repouptodate = False
-	return info_list, repouptodate
-
-def git_merge(repo, info):
-	repo.git.merge(info.commit)
-
-def git_sync_main(session):
-	tbc_settings = read_config_settings()
-	config_id = get_config_id_fqdn(session, tbc_settings['hostname'])
-	ConfigsMetaDataInfo = get_configmetadata_info(session, config_id)
-	ConfigInfo = get_config_info(session, config_id)
-	SetupInfo = get_setup_info(session, ConfigInfo.SetupId)
-	host_config = ConfigInfo.Hostname +"/" + SetupInfo.Setup
-	default_config_root = ConfigsMetaDataInfo.RepoPath + "/" + host_config + "/"
-	mysettings = portage.config(config_root = default_config_root)
-	myportdb = portage.portdbapi(mysettings=mysettings)
-	GuestBusy = True
-	log_msg = "Waiting for Guest to be idel"
-	write_log(session, log_msg, "info", config_id, 'sync.git_sync_main')
-	guestid_list = []
-	# check if the guests is idel
-	for config in get_config_all_info(session):
-		if not config.Host:
-			guestid_list.append(config.ConfigId)
-	while GuestBusy:
-		Status_list = []
-		for guest_id in guestid_list:
-			ConfigMetadataGuest = get_configmetadata_info(session, guest_id)
-			Status_list.append(ConfigMetadataGuest.Status)
-		write_log(session, 'Guset status: %s' % (Status_list,), "debug", config_id, 'sync.git_sync_main')
-		if not 'Runing' in Status_list:
-			GuestBusy = False
-		else:
-			time.sleep(60)
-
-	job_id = get_job_status_waiting_on_guest(session)
-	if not job_id is None:
-		update_job_list(session, 'Runing', job_id)
-
-	# check git diffs witch get updated and pass that to a dict
-	# fetch and merge the repo
-	repo_cp_dict = {}
-	search_list = [ '^metadata', '^eclass', '^licenses', '^profiles', '^scripts', '^skel.', '^header.txt']
-	for repo_dir in git_repos_list(myportdb):
-		reponame = myportdb.getRepositoryName(repo_dir)
-		repo = git.Repo(repo_dir)
-		log_msg = "Checking repo %s" % (reponame)
-		write_log(session, log_msg, "info", config_id, 'sync.git_sync_main')
-		info_list, repouptodate = git_fetch(repo)
-		if not repouptodate:
-			cp_list = []
-			attr = {}
-			# We check for dir changes and add the package to a list
-			repo_diff = repo.git.diff('origin', '--name-only')
-			write_log(session, 'Git dir diff:\n%s' % (repo_diff,), "debug", config_id, 'sync.git_sync_main')
-			for diff_line in repo_diff.splitlines():
-				find_search = True
-				for search_line in search_list:
-					if re.search(search_line, diff_line):
-						find_search = False
-				if find_search:
-					splited_diff_line = re.split('/', diff_line)
-					cp = splited_diff_line[0] + '/' + splited_diff_line[1]
-					if not cp in cp_list:
-						cp_list.append(cp)
-			attr['cp_list'] = cp_list
-			write_log(session, 'Git CP Diff: %s' % (cp_list,), "debug", config_id, 'sync.git_sync_main')
-			repo_cp_dict[reponame] = attr
-			git_merge(repo, info_list[0])
-		else:
-			log_msg = "Repo %s is up to date" % (reponame)
-			write_log(session, log_msg, "info", config_id, 'sync.git_sync_main')
-		log_msg = "Checking repo %s Done" % (reponame)
-		write_log(session, log_msg, "info", config_id, 'sync.git_sync_main')
-
-	log_msg = "Repo sync ... Done."
-	write_log(session, log_msg, "info", config_id, 'sync.git_sync_main')
-	write_log(session, 'Updated Packages: %s' % (repo_cp_dict,), "debug", config_id, 'sync.git_sync_main')
-	return repo_cp_dict
-
-def git_pull(session, repo_dir, config_id):
-	log_msg = "Git pull"
-	write_log(session, log_msg, "info", config_id, 'sync.git_pull')
-	repo = git.Repo(repo_dir)
-	info_list, repouptodate = git_fetch(repo)
-	if not repouptodate:
-		git_merge(repo, info_list[0])
-	log_msg = "Git pull ... Done"
-	write_log(session, log_msg, "info", config_id, 'sync.git_pull')
-	return True

diff --git a/pym/tbc/text.py b/pym/tbc/text.py
deleted file mode 100644
index c78c432..0000000
--- a/pym/tbc/text.py
+++ /dev/null
@@ -1,49 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import sys
-import re
-import os
-import errno
-from portage.util import grablines
-
-def  get_file_text(filename):
-	# Return the filename contents
-	try:
-		textfile = open(filename, encoding='utf-8')
-	except:
-		return "No file", filename
-	text = ""
-	for line in textfile:
-		text += line
-	textfile.close()
-	return text
-
-def  get_ebuild_cvs_revision(filename):
-	"""Return the ebuild contents"""
-	try:
-		ebuildfile = open(filename, encoding='utf-8')
-	except:
-		return "No Ebuild file there"
-	text = ""
-	dataLines = ebuildfile.readlines()
-	for i in dataLines:
-		text = text + i + " "
-	line2 = dataLines[2]
-	field = line2.split(" ")
-	ebuildfile.close()
-	try:
-		cvs_revision = field[3]
-	except:
-		cvs_revision = ''
-	return cvs_revision
-
-def  get_log_text_dict(filename):
-	"""Return the log contents as a dict"""
-	logfile_dict = {}
-	index = 1
-	for text_line in grablines(filename):
-		logfile_dict[index] = text_line
-		index = index + 1
-	return logfile_dict, index - 1

diff --git a/pym/tbc/updatedb.py b/pym/tbc/updatedb.py
deleted file mode 100644
index d32f0ac..0000000
--- a/pym/tbc/updatedb.py
+++ /dev/null
@@ -1,173 +0,0 @@
-# Copyright 1998-2015 Gentoo Foundation
-# Distributed under the terms of the GNU General Public License v2
-
-from __future__ import print_function
-import sys
-import os
-import multiprocessing
-import time
-import re
-import portage
-from portage.xml.metadata import MetaDataXML
-from portage.checksum import perform_checksum
-from sqlalchemy.orm import scoped_session, sessionmaker
-from tbc.ConnectionManager import NewConnection
-from tbc.sqlquerys import get_package_info, update_repo_db, \
-	update_categories_db, get_configmetadata_info, get_config_all_info, add_new_build_job, \
-	get_config_info, get_setup_info, get_job_status_waiting_on_guest, update_job_list, \
-	reset_new_updated
-from tbc.check_setup import check_make_conf
-from tbc.package import tbc_package
-# Get the options from the config file tbc.conf
-from tbc.readconf import  read_config_settings
-from tbc.log import write_log
-
-def init_portage_settings(session, config_id):
-	# check config setup
-	check_make_conf(session, config_id)
-	log_msg = "Check configs done"
-	write_log(session, log_msg, "info", config_id, 'updatedb.init_portage_settings')
-
-	# setup default root
-	ConfigsMetaDataInfo = get_configmetadata_info(session, config_id)
-	ConfigInfo = get_config_info(session, config_id)
-	SetupInfo = get_setup_info(session, ConfigInfo.SetupId)
-	host_config = ConfigInfo.Hostname +"/" + SetupInfo.Setup
-	default_config_root = ConfigsMetaDataInfo.RepoPath + "/" + host_config + "/"
-
-	# Set config_root (PORTAGE_CONFIGROOT)  to default_config_root
-	mysettings = portage.config(config_root = default_config_root)
-	log_msg = "Setting default config to: %s" % (host_config,)
-	write_log(session, log_msg, "info", config_id, 'updatedb.init_portage_settings')
-	return mysettings
-
-def get_categories_metadataDict(pkgdir):
-		# Make categories_metadataDict
-		categories_metadataDict = {}
-		pkg_md = MetaDataXML(pkgdir + "/metadata.xml", None)
-		metadata_xml_descriptions_tree = re.sub('\t', '', pkg_md.descriptions()[0])
-		categories_metadataDict['metadata_xml_descriptions'] = re.sub('\n', '', metadata_xml_descriptions_tree)
-		categories_metadataDict['metadata_xml_checksum'] =  perform_checksum(pkgdir + "/metadata.xml", "SHA256")[0]
-		return categories_metadataDict
-
-def update_cpv_db_pool(mysettings, myportdb, cp, repo, tbc_settings, config_id):
-	session_factory = sessionmaker(bind=NewConnection(tbc_settings))
-	Session = scoped_session(session_factory)
-	session2 = Session()
-	init_package = tbc_package(session2, mysettings, myportdb, config_id)
-
-	# split the cp to categories and package
-	element = cp.split('/')
-	categories = element[0]
-	package = element[1]
-
-	# update the categories table
-	repodir = myportdb.getRepositoryPath('gentoo')
-	pkgdir = repodir + "/" + categories
-	categories_metadataDict = get_categories_metadataDict(pkgdir)
-	update_categories_db(session2, categories, categories_metadataDict)
-
-	# Check if we have the cp in the package table
-	PackagesInfo = get_package_info(session2, categories, package, repo)
-	if PackagesInfo:  
-		# Update the packages with ebuilds
-		init_package.update_package_db(PackagesInfo.PackageId)
-	else:
-		# Add new package with ebuilds
-		init_package.add_new_package_db(cp, repo)
-	session2.close
-	Session.remove()
-
-def update_cpv_db(session, repo_cp_dict, config_id, tbc_settings):
-	GuestBusy = True
-	log_msg = "Waiting for Guest to be idel"
-	write_log(session, log_msg, "info", config_id, 'updatedb.update_cpv_db')
-	guestid_list = []
-	for config in get_config_all_info(session):
-		if not config.Host:
-			guestid_list.append(config.ConfigId)
-	while GuestBusy:
-		Status_list = []
-		for guest_id in guestid_list:
-			ConfigMetadata = get_configmetadata_info(session, guest_id)
-			Status_list.append(ConfigMetadata.Status)
-		if not 'Runing' in Status_list:
-			break
-		time.sleep(30)
-
-	job_id = get_job_status_waiting_on_guest(session)
-	if not job_id is None:
-		update_job_list(session, 'Runing', job_id)
-
-	log_msg = "Checking categories, package, ebuilds"
-	write_log(session, log_msg, "info", config_id, 'updatedb.update_cpv_db')
-	new_build_jobs_list = []
-
-	# Setup settings, portdb and pool
-	mysettings =  init_portage_settings(session, config_id)
-	myportdb = portage.portdbapi(mysettings=mysettings)
-	
-	# Use all cores when multiprocessing
-	#pool_cores = multiprocessing.cpu_count()
-	#pool = multiprocessing.Pool(processes = pool_cores)
-
-	# Get packages and repo
-	if repo_cp_dict is None:
-		repo_list = []
-		repos_trees_list = []
-
-		# Get the repos and update the repos db
-		repo_list = myportdb.getRepositories()
-		update_repo_db(session, repo_list)
-
-		# Get the rootdirs for the repos
-		repo_trees_list = myportdb.porttrees
-		for repo_dir in repo_trees_list:
-			repo = myportdb.getRepositoryName(repo_dir)
-			repo_dir_list = []
-			repo_dir_list.append(repo_dir)
-
-			# Get the package list from the repo
-			package_list_tree = myportdb.cp_all(trees=repo_dir_list)
-
-			# Run the update package for all package in the list and in a multiprocessing pool
-			for cp in sorted(package_list_tree):
-				# pool.apply_async(update_cpv_db_pool, (mysettings, myportdb, cp, repo, tbc_settings, config_id,))
-				# use this when debuging
-				update_cpv_db_pool(mysettings, myportdb, cp, repo, tbc_settings, config_id)
-	else:
-		# Update needed repos and packages in the dict
-		for repo, v in repo_cp_dict.items():
-			# Get the repos and update the repos db
-			repo_list = []
-			repo_list.append(repo)
-			update_repo_db(session, repo_list)
-
-			# Run the update package for all package in the list and in a multiprocessing pool
-			for cp in v['cp_list']:
-				# pool.apply_async(update_cpv_db_pool, (mysettings, myportdb, cp, repo, tbc_settings, config_id,))
-				# use this when debuging
-				update_cpv_db_pool(mysettings, myportdb, cp, repo, tbc_settings, config_id)
-
-
-	#close and join the multiprocessing pools
-	# pool.close()
-	# pool.join()
-	log_msg = "Checking categories, package and ebuilds ... done"
-	write_log(session, log_msg, "info", config_id, 'updatedb.update_cpv_db')
-
-def update_db_main(session, repo_cp_dict, config_id):
-	# Main
-	reset_new_updated(session)
-	if repo_cp_dict == {}:
-		return True
-	# Logging
-	tbc_settings = read_config_settings()
-	log_msg = "Update db started."
-	write_log(session, log_msg, "info", config_id, 'updatedb.update_db_main')
-
-	# Update the cpv db
-	update_cpv_db(session, repo_cp_dict, config_id, tbc_settings)
-	log_msg = "Update db ... Done."
-	write_log(session, log_msg, "info", config_id, 'updatedb.update_db_main')
-	return True

diff --git a/requirements.txt b/requirements.txt
new file mode 100644
index 0000000..34d697a
--- /dev/null
+++ b/requirements.txt
@@ -0,0 +1,18 @@
+pbr!=2.1.0,>=2.0.0 # Apache-2.0
+SQLAlchemy>=1.2.19 # MIT
+keystonemiddleware>=4.20.0 # Apache-2.0
+greenlet>=0.4.10,!=0.4.14 # MIT
+keystoneauth1>=3.16.0 # Apache-2.0
+oslo.config>=6.1.0 # Apache-2.0
+oslo.context>=2.21.0 # Apache-2.0
+oslo.log>=3.36.0 # Apache-2.0
+oslo.serialization!=2.19.1,>=2.21.1 # Apache-2.0
+oslo.utils>=3.40.2 # Apache-2.0
+oslo.db>=4.44.0 # Apache-2.0
+oslo.messaging>=10.3.0 # Apache-2.0
+oslo.policy>=2.3.0 # Apache-2.0
+oslo.i18n>=3.15.3 # Apache-2.0
+oslo.service>=1.40.1 # Apache-2.0
+oslo.middleware>=3.31.0 # Apache-2.0
+oslo.versionedobjects>=1.35.0 # Apache-2.0
+openstacksdk>=0.35.0 # Apache-2.0

diff --git a/setup.cfg b/setup.cfg
new file mode 100644
index 0000000..54a9a02
--- /dev/null
+++ b/setup.cfg
@@ -0,0 +1,35 @@
+[metadata]
+name = gosbs
+summary = Gentoo OpenStack Build System
+description-file =
+    README.rst
+author = Gentoo tinderbox clyster project
+author-email = tinderbox-cluster@gentoo.org
+home-page = https://wiki.gentoo.org/wiki/Project:Tinderbox-cluster
+python-requires = >=3.6
+classifier =
+    Environment :: OpenStack
+    Intended Audience :: Information Technology
+    Intended Audience :: System Administrators
+    License :: OSI Approved :: Apache Software License
+    License :: OSI Approved :: GNU General Public License v2 (GPLv2)
+    Operating System :: POSIX :: Linux
+    Programming Language :: Python
+    Programming Language :: Python :: 3
+    Programming Language :: Python :: 3.6
+    Programming Language :: Python :: 3.7
+    Programming Language :: Python :: 3.8
+    Programming Language :: Python :: 3 :: Only
+    Programming Language :: Python :: Implementation :: CPython
+
+[extras]
+osprofiler =
+    osprofiler>=1.4.0 # Apache-2.0
+
+[files]
+packages =
+    gosbs
+
+[entry_points]
+console_scripts =
+    gosbs-scheduler = gosbs.cmd.scheduler:main

diff --git a/setup.py b/setup.py
index 6a96128..566d844 100644
--- a/setup.py
+++ b/setup.py
@@ -1,27 +1,29 @@
-import os
+# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+#    http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+# implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
 
+# THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT
+import setuptools
+
+# In python < 2.7.4, a lazy loading of package `pbr` will break
+# setuptools if some other modules registered functions in `atexit`.
+# solution from: http://bugs.python.org/issue15881#msg170215
 try:
-	from setuptools import setup
+    import multiprocessing  # noqa
 except ImportError:
-	raise
-	from ez_setup import use_setuptools
-	use_setuptools()
-	from setuptools import setup
-
-def find_packages():
-	for dirpath, dirnames, filenames in os.walk('pym'):
-		if '__init__.py' in filenames:
-			yield os.path.relpath(dirpath, 'pym')
+    pass
 
-setup(
-	version = os.path.split(os.path.abspath(__file__))[-2].split('-')[-1],
-	packages = list(find_packages()),
-	package_dir = {'': 'pym'},
-	name="tbc",
-	author='Zorry',
-	author_email='tinderbox-cluster@gentoo.org',
-	url='https://anongit.gentoo.org/git/proj/tinderbox-cluster.git',
-	description='Tinderbox cluster',
-	platforms=["any"],
-	license="GPL2",
-)
+setuptools.setup(
+    setup_requires=['pbr>=2.0.0'],
+    pbr=True)

diff --git a/sql/data_dump.sql b/sql/data_dump.sql
deleted file mode 100644
index 46102b5..0000000
--- a/sql/data_dump.sql
+++ /dev/null
@@ -1,193 +0,0 @@
--- phpMyAdmin SQL Dump
--- version 4.2.13
--- http://www.phpmyadmin.net
---
--- Host: localhost
--- Generation Time: Feb 13, 2016 at 02:37 PM
--- Server version: 10.0.22-MariaDB-log
--- PHP Version: 5.6.16-pl0-gentoo
-
-SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
-SET time_zone = "+00:00";
-
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-
---
--- Database: `tbc`
---
-
---
--- Dumping data for table `configs`
---
-
-INSERT INTO `configs` (`config_id`, `hostname`, `setup_id`, `default_config`) VALUES
-(1, 'sandra.ume.nu', 1, 1),
-(2, 'virtual1.ume.nu', 2, 0),
-(3, 'virtual2.ume.nu', 3, 0),
-(4, 'virtual3.ume.nu', 3, 0),
-(5, 'virtual4.ume.nu', 3, 0),
-(6, 'virtual5.ume.nu', 3, 0);
-
---
--- Dumping data for table `configs_emerge_options`
---
-
-INSERT INTO `configs_emerge_options` (`id`, `config_id`, `eoption_id`) VALUES
-(1, 2, 1),
-(2, 2, 2),
-(3, 2, 5),
-(4, 2, 6),
-(5, 3, 1),
-(6, 3, 2),
-(7, 3, 5),
-(8, 3, 6),
-(9, 4, 1),
-(10, 4, 2),
-(11, 4, 5),
-(12, 4, 6),
-(13, 5, 1),
-(14, 5, 2),
-(15, 5, 5),
-(16, 5, 6),
-(17, 6, 1),
-(18, 6, 2),
-(19, 6, 5),
-(20, 6, 6);
-
---
--- Dumping data for table `configs_metadata`
---
-
-INSERT INTO `configs_metadata` (`id`, `config_id`, `keyword_id`, `make_conf_text`, `checksum`, `configsync`, `active`, `config_error_text`, `updateing`, `status`, `auto`, `repo_path`, `time_stamp`) VALUES
-(1, 1, 1, '# This is for the base config\nCHOST="x86_64-pc-linux-gnu"\nACCEPT_KEYWORDS=""\nARCH="amd64"\nFEATURES="-metadata-transfer -news distlocks"\nACCEPT_LICENSE="*"\nPORTAGE_TMPDIR=/var/tmp\nDISTDIR=/var/cache/portage/distfiles\nPORT_LOGDIR="/var/log/portage"\nGENTOO_MIRRORS="ftp://ftp.sunet.se/pub/Linux/distributions/gentoo http://distfiles.gentoo.org http://www.ibiblio.org/pub/Linux/distributions/gentoo"\nPORTAGE_TMPFS="/dev/shm"\nPORTAGE_ELOG_CLASSES=""\nPORTAGE_ELOG_SYSTEM=""\nPORTDIR_OVERLAY=""\nsource /var/cache/layman/make.conf\n', 'd29a21ae48f047f5036cb17c278ad96a8c7818bbcda29b2e766a8cc6a34a358f', 0, 1, '', 0, 'Waiting', 1, '/var/cache/gobs/tinderboxs_configs', '2015-07-17 22:54:09'),
-(2, 2, 1, 'CFLAGS="-O2 -pipe -march=native"\nCXXFLAGS="-O2 -pipe -march=native"\nCHOST="x86_64-pc-linux-gnu"\nUSE="qt3support xattr semantic-desktop"\nACCEPT_KEYWORDS="~amd64 amd64"\nACCEPT_LICENSE="*"\nPORTAGE_TMPDIR=/var/tmp\nDISTDIR="/var/cache/portage/distfiles"\nPORT_LOGDIR="/var/cache/portage/logs/virtual1.ume.nu/amd64_hardened_unstable"\nPKGDIR="/var/cache/portage/packages/virtual1.ume.nu/amd64_hardened_unstable"\nGENTOO_MIRRORS="ftp://mirror.mdfnet.se/gentoo http://distfiles.gentoo.org"\nEMERGE_DEFAULT_OPTS="-v --binpkg-respect-use=y --rebuild-if-new-rev=y --rebuilt-binaries=y --autounmask=y --autounmask-write=y --jobs=3 --load-average=5.0"\nMAKEOPTS="-j6"\nAUTOCLEAN="yes"\nNOCOLOR="true"\nPORTAGE_TMPFS="/dev/shm"\nFEATURES="sandbox distlocks parallel-fetch strict -news test test-fail-continue"\nPORTAGE_ELOG_CLASSES=""\nPORTAGE_ELOG_SYSTEM="save"\nPORTDIR_OVERLAY="/usr/local/portage"\nLINGUAS="en"\nINPUT_DEVICES="keyboard mouse synaptics evdev"\nVIDEO_CARDS="radeon"\nALSA_CA
 RDS="hda-intel intel8x0 
-intel8x0m"\nALSA_PCM_PLUGINS="adpcm alaw asym copy dmix dshare dsnoop empty extplug file hooks iec958 ioplug ladspa lfloat linear meter mmap_emul mulaw multi null plug rate route share shm softvol"\nCONFIG_PROTECT_MASK="/etc/portage/package.use/99_autounmask"\n\n# for layman stuff\nsource /var/cache/layman/make.conf\n', '91820213d60929995132060b1d917740ff3b23af2a0745fde592df26ef58b5d7', 0, 1, '', 0, 'Stoped', 0, '', '2015-03-23 22:21:39'),
-(3, 3, 1, 'CFLAGS="-O2 -pipe -march=native -fno-diagnostics-color"\nCXXFLAGS="-O2 -pipe -march=native -fno-diagnostics-color"\nCHOST="x86_64-pc-linux-gnu"\nUSE="X qt3support"\nACCEPT_KEYWORDS="~amd64 amd64"\nABI_X86="32 64"\nCPU_FLAGS_X86=""\nACCEPT_LICENSE="*"\nPYTHON_TARGETS="python2_7 python3_4 python3_5"\nRUBY_TARGETS="ruby20 ruby21 ruby22"\nPORTAGE_TMPDIR=/var/tmp\nDISTDIR="/var/cache/portage/distfiles"\nGENTOO_MIRRORS="ftp://mirror.mdfnet.se/gentoo ftp://trumpetti.atm.tut.fi/gentoo/ http://distfiles.gentoo.org"\nEMERGE_DEFAULT_OPTS="-v --binpkg-respect-use=y --rebuild-if-new-rev=y --rebuilt-binaries=y --binpkg-changed-deps=y --autounmask=y --autounmask-write=y --jobs=3 --load-average=5.0"\nMAKEOPTS="-j6"\nAUTOCLEAN="yes"\nNOCOLOR="true"\nPORTAGE_TMPFS="/dev/shm"\nFEATURES="sandbox distlocks parallel-fetch strict -news test-fail-continue fail-clean"\nPORTAGE_ELOG_CLASSES=""\nPORTAGE_ELOG_SYSTEM="save"\nPORTDIR_OVERLAY="/usr/local/portage"\nLINGUAS="en"\nINPUT_DEVICES="keyboard 
 mouse synaptics evdev"\
-nVIDEO_CARDS="radeon"\nALSA_CARDS="hda-intel intel8x0 intel8x0m"\nALSA_PCM_PLUGINS="adpcm alaw asym copy dmix dshare dsnoop empty extplug file hooks iec958 ioplug ladspa lfloat linear meter mmap_emul mulaw multi null plug rate route share shm softvol"\nCONFIG_PROTECT_MASK="/etc/portage/package.use/99_autounmask"\nGRUB_PLATFORMS="pc"\n\n# is in the host.conf\n#PORT_LOGDIR="/var/cache/portage/logs/host/setup"\n#PKGDIR="/var/cache/portage/packages/host/setup"\nsource host.conf\n\n# for layman stuff\n#source /var/cache/layman/make.conf\n', '35c38a4f7f2b4873cf6b6df9b21ca390602c1146b61cd514ca5b146a9d3000d0', 1, 1, '', 0, 'Runing', 1, '/var/cache/zobcs/tinderboxs_configs', '2016-02-13 14:36:20'),
-(4, 4, 1, 'CFLAGS="-O2 -pipe -march=native -fno-diagnostics-color"\nCXXFLAGS="-O2 -pipe -march=native -fno-diagnostics-color"\nCHOST="x86_64-pc-linux-gnu"\nUSE="X qt3support"\nACCEPT_KEYWORDS="~amd64 amd64"\nABI_X86="32 64"\nCPU_FLAGS_X86=""\nACCEPT_LICENSE="*"\nPYTHON_TARGETS="python2_7 python3_4 python3_5"\nRUBY_TARGETS="ruby20 ruby21 ruby22"\nPORTAGE_TMPDIR=/var/tmp\nDISTDIR="/var/cache/portage/distfiles"\nGENTOO_MIRRORS="ftp://mirror.mdfnet.se/gentoo ftp://trumpetti.atm.tut.fi/gentoo/ http://distfiles.gentoo.org"\nEMERGE_DEFAULT_OPTS="-v --binpkg-respect-use=y --rebuild-if-new-rev=y --rebuilt-binaries=y --binpkg-changed-deps=y --autounmask=y --autounmask-write=y --jobs=3 --load-average=5.0"\nMAKEOPTS="-j6"\nAUTOCLEAN="yes"\nNOCOLOR="true"\nPORTAGE_TMPFS="/dev/shm"\nFEATURES="sandbox distlocks parallel-fetch strict -news test-fail-continue fail-clean"\nPORTAGE_ELOG_CLASSES=""\nPORTAGE_ELOG_SYSTEM="save"\nPORTDIR_OVERLAY="/usr/local/portage"\nLINGUAS="en"\nINPUT_DEVICES="keyboard 
 mouse synaptics evdev"\
-nVIDEO_CARDS="radeon"\nALSA_CARDS="hda-intel intel8x0 intel8x0m"\nALSA_PCM_PLUGINS="adpcm alaw asym copy dmix dshare dsnoop empty extplug file hooks iec958 ioplug ladspa lfloat linear meter mmap_emul mulaw multi null plug rate route share shm softvol"\nCONFIG_PROTECT_MASK="/etc/portage/package.use/99_autounmask"\nGRUB_PLATFORMS="pc"\n\n# is in the host.conf\n#PORT_LOGDIR="/var/cache/portage/logs/host/setup"\n#PKGDIR="/var/cache/portage/packages/host/setup"\nsource host.conf\n\n# for layman stuff\n#source /var/cache/layman/make.conf\n', '35c38a4f7f2b4873cf6b6df9b21ca390602c1146b61cd514ca5b146a9d3000d0', 1, 1, '', 0, 'Runing', 1, '/var/cache/zobcs/tinderboxs_configs', '2016-02-13 14:36:20'),
-(5, 5, 1, 'CFLAGS="-O2 -pipe -march=native -fno-diagnostics-color"\nCXXFLAGS="-O2 -pipe -march=native -fno-diagnostics-color"\nCHOST="x86_64-pc-linux-gnu"\nUSE="X qt3support"\nACCEPT_KEYWORDS="~amd64 amd64"\nABI_X86="32 64"\nCPU_FLAGS_X86=""\nACCEPT_LICENSE="*"\nPYTHON_TARGETS="python2_7 python3_4 python3_5"\nRUBY_TARGETS="ruby20 ruby21 ruby22"\nPORTAGE_TMPDIR=/var/tmp\nDISTDIR="/var/cache/portage/distfiles"\nGENTOO_MIRRORS="ftp://mirror.mdfnet.se/gentoo ftp://trumpetti.atm.tut.fi/gentoo/ http://distfiles.gentoo.org"\nEMERGE_DEFAULT_OPTS="-v --binpkg-respect-use=y --rebuild-if-new-rev=y --rebuilt-binaries=y --binpkg-changed-deps=y --autounmask=y --autounmask-write=y --jobs=3 --load-average=5.0"\nMAKEOPTS="-j6"\nAUTOCLEAN="yes"\nNOCOLOR="true"\nPORTAGE_TMPFS="/dev/shm"\nFEATURES="sandbox distlocks parallel-fetch strict -news test-fail-continue fail-clean"\nPORTAGE_ELOG_CLASSES=""\nPORTAGE_ELOG_SYSTEM="save"\nPORTDIR_OVERLAY="/usr/local/portage"\nLINGUAS="en"\nINPUT_DEVICES="keyboard 
 mouse synaptics evdev"\
-nVIDEO_CARDS="radeon"\nALSA_CARDS="hda-intel intel8x0 intel8x0m"\nALSA_PCM_PLUGINS="adpcm alaw asym copy dmix dshare dsnoop empty extplug file hooks iec958 ioplug ladspa lfloat linear meter mmap_emul mulaw multi null plug rate route share shm softvol"\nCONFIG_PROTECT_MASK="/etc/portage/package.use/99_autounmask"\nGRUB_PLATFORMS="pc"\n\n# is in the host.conf\n#PORT_LOGDIR="/var/cache/portage/logs/host/setup"\n#PKGDIR="/var/cache/portage/packages/host/setup"\nsource host.conf\n\n# for layman stuff\n#source /var/cache/layman/make.conf\n', '35c38a4f7f2b4873cf6b6df9b21ca390602c1146b61cd514ca5b146a9d3000d0', 1, 1, '', 0, 'Runing', 1, '/var/cache/zobcs/tinderboxs_configs', '2016-02-13 14:36:25'),
-(6, 6, 1, 'CFLAGS="-O2 -pipe -march=native -fno-diagnostics-color"\nCXXFLAGS="-O2 -pipe -march=native -fno-diagnostics-color"\nCHOST="x86_64-pc-linux-gnu"\nUSE="X qt3support"\nACCEPT_KEYWORDS="~amd64 amd64"\nABI_X86="32 64"\nCPU_FLAGS_X86=""\nACCEPT_LICENSE="*"\nPYTHON_TARGETS="python2_7 python3_4 python3_5"\nRUBY_TARGETS="ruby20 ruby21 ruby22"\nPORTAGE_TMPDIR=/var/tmp\nDISTDIR="/var/cache/portage/distfiles"\nGENTOO_MIRRORS="ftp://mirror.mdfnet.se/gentoo ftp://trumpetti.atm.tut.fi/gentoo/ http://distfiles.gentoo.org"\nEMERGE_DEFAULT_OPTS="-v --binpkg-respect-use=y --rebuild-if-new-rev=y --rebuilt-binaries=y --binpkg-changed-deps=y --autounmask=y --autounmask-write=y --jobs=3 --load-average=5.0"\nMAKEOPTS="-j6"\nAUTOCLEAN="yes"\nNOCOLOR="true"\nPORTAGE_TMPFS="/dev/shm"\nFEATURES="sandbox distlocks parallel-fetch strict -news test-fail-continue fail-clean"\nPORTAGE_ELOG_CLASSES=""\nPORTAGE_ELOG_SYSTEM="save"\nPORTDIR_OVERLAY="/usr/local/portage"\nLINGUAS="en"\nINPUT_DEVICES="keyboard 
 mouse synaptics evdev"\
-nVIDEO_CARDS="radeon"\nALSA_CARDS="hda-intel intel8x0 intel8x0m"\nALSA_PCM_PLUGINS="adpcm alaw asym copy dmix dshare dsnoop empty extplug file hooks iec958 ioplug ladspa lfloat linear meter mmap_emul mulaw multi null plug rate route share shm softvol"\nCONFIG_PROTECT_MASK="/etc/portage/package.use/99_autounmask"\nGRUB_PLATFORMS="pc"\n\n# is in the host.conf\n#PORT_LOGDIR="/var/cache/portage/logs/host/setup"\n#PKGDIR="/var/cache/portage/packages/host/setup"\nsource host.conf\n\n# for layman stuff\n#source /var/cache/layman/make.conf\n', '35c38a4f7f2b4873cf6b6df9b21ca390602c1146b61cd514ca5b146a9d3000d0', 1, 1, '', 0, 'Runing', 1, '/var/cache/zobcs/tinderboxs_configs', '2016-02-13 14:36:02');
-
---
--- Dumping data for table `emerge_options`
---
-
-INSERT INTO `emerge_options` (`eoption_id`, `eoption`) VALUES
-(1, '--oneshot'),
-(2, '--depclean'),
-(3, '--nodepclean'),
-(4, '--nooneshot'),
-(5, '--buildpkg'),
-(6, '--usepkg');
-
---
--- Dumping data for table `errors_info`
---
-
-INSERT INTO `errors_info` (`error_id`, `error_name`, `error_search`) VALUES
-(1, 'repoman', 'repoman'),
-(2, 'qa', 'qa'),
-(3, 'others', 'others'),
-(4, 'configure', 'configure phase'),
-(5, 'test', 'test phase'),
-(6, 'install', 'install phase'),
-(7, 'prepare', 'prepare phase'),
-(8, 'compile', 'compile phase'),
-(9, 'setup', 'setup phase');
-
---
--- Dumping data for table `hilight`
---
-
-INSERT INTO `hilight` (`hilight_id`, `hilight_search`, `hilight_search_end`, `hilight_search_pattern`, `hilight_css_id`, `hilight_start`, `hilight_end`) VALUES
-(3, '^ \\* QA Notice:', '^ \\* ', '^ \\* ', 3, 0, 0),
-(4, '^ \\* Package:', '', '', 2, 0, 4),
-(5, '>>> Unpacking', '', '', 1, 0, 0),
-(6, '\\[ ok ]', '', '', 2, 0, 0),
-(7, '\\[ !! ]', '', '', 5, 0, 0),
-(8, '>>> Source', '', '', 1, 0, 0),
-(9, '>>> Preparing', '', '', 1, 0, 0),
-(10, '^ \\* Applying', '', '', 2, 0, 0),
-(11, '>>> Configuring', '', '', 1, 0, 0),
-(12, '^ \\* econf', '', '', 1, 0, 0),
-(13, '>>> Compiling', '', '', 1, 0, 0),
-(14, '>>> Done.', '', '', 1, 0, 0),
-(15, '>>> Merging', '', '', 1, 0, 0),
-(16, '>>> Safely', '', '', 1, 0, 0),
-(17, '>>> Original', '', '', 1, 0, 0),
-(18, 'merged.$', '', '', 1, 0, 0),
-(19, '>>> Extracting info', '', '', 1, 0, 0),
-(20, '>>> Extracting (?!info)', '', '', 1, 0, 0),
-(21, '>>> Regenerating', '', '', 1, 0, 0),
-(22, '>>> Installing', '', '', 1, 0, 0),
-(23, '>>> Test phase', '', '', 1, 0, 0),
-(24, '^ \\* Running', '', '', 1, 0, 0),
-(25, '>>> Install', '', '', 1, 0, 0),
-(26, '>>> Completed installing', '', '', 1, 0, 0),
-(27, '^ \\* ERROR:', '^ \\* S:', '^ \\* (?!S:)', 5, 0, 0),
-(28, ' Error 1', '', '', 5, 2, 1),
-(29, 'undefined reference to', '', '', 5, 0, 0),
-(30, '^ \\* Generating', '', '', 1, 0, 0),
-(31, ': fatal error:', '', '', 5, 0, 0),
-(32, '^ \\* Done', '', '', 2, 0, 0),
-(33, '.patch ...$', '', '', 2, 0, 0),
-(35, '^ \\* Disabling', '', '', 2, 0, 0),
-(37, '^ \\* abi_x86_', '', '', 2, 0, 0),
-(38, '^ \\* >>> SetUID:', '', '', 1, 0, 0),
-(39, '^ \\* >>> SetGID:', '', '', 1, 0, 0),
-(40, 'CMake Error', '', '', 5, 0, 1),
-(41, 'No such file or directory$', '', '', 5, 0, 0),
-(43, '^ \\* Updating', '', '', 1, 0, 0),
-(44, '^strip:', '', '', 1, 0, 0),
-(45, '^ \\* checking', '', '', 1, 0, 0),
-(46, 'files checked ...$', '', '', 1, 0, 0),
-(49, '^ \\* Installing', '', '', 1, 0, 0),
-(48, '^SyntaxError: invalid syntax', '', '', 5, 3, 0),
-(50, '^ \\* Skipping make', '', '', 1, 0, 0),
-(51, 'command not found$', '', '', 5, 0, 0);
-
---
--- Dumping data for table `hilight_css`
---
-
-INSERT INTO `hilight_css` (`hilight_css_id`, `hilight_css_name`, `hilight_css_collor`) VALUES
-(1, 'info1', '#7FFF00'),
-(2, 'info2', 'Green'),
-(3, 'qa1', 'Yellow'),
-(5, 'fail1', 'Red');
-
---
--- Dumping data for table `jobs`
---
-
-INSERT INTO `jobs` (`job_id`, `job_type`, `status`, `user`, `config_id`, `run_config_id`, `time_stamp`) VALUES
-(1, 'updatedb', 'Done', 'cron', 1, 1, '2016-02-20 14:13:32'),
-(2, 'esync', 'Done', 'cron', 1, 1, '2016-02-21 20:02:08'),
-(3, 'removeold_cpv', 'Done', 'cron', 1, 1, '2016-02-21 20:04:51');
-
---
--- Dumping data for table `setups`
---
-
-INSERT INTO `setups` (`setup_id`, `setup`, `profile`) VALUES
-(1, 'base', 'base'),
-(2, 'amd64_hardened_unstable', 'hardened/linux/amd64'),
-(3, 'amd64_default_unstable', 'default/linux/amd64/13.0');
-
---
--- Dumping data for table `tbc_config`
---
-
-INSERT INTO `tbc_config` (`id`, `webirker`, `hostirker`, `webbug`) VALUES
-(1, '77.110.8.76', '192.168.1.4', 'bugs.gentoo.org');
-
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;

diff --git a/sql/structure_dump.sql b/sql/structure_dump.sql
index 2a6f847..320aca7 100644
--- a/sql/structure_dump.sql
+++ b/sql/structure_dump.sql
@@ -1,1061 +1,477 @@
--- phpMyAdmin SQL Dump
--- version 4.2.13
--- http://www.phpmyadmin.net
---
--- Host: localhost
--- Generation Time: Mar 03, 2016 at 03:48 PM
--- Server version: 10.0.22-MariaDB-log
--- PHP Version: 7.0.3-pl0-gentoo
-
-SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
-SET time_zone = "+00:00";
-
-
-/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
-/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
-/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
-/*!40101 SET NAMES utf8 */;
-
---
--- Database: `tbc`
---
-
-DELIMITER $$
---
--- Procedures
---
-CREATE DEFINER=`tbc`@`localhost` PROCEDURE `add_jobs_esync`()
-    MODIFIES SQL DATA
-BEGIN
-  DECLARE in_config_id INT;
-  DECLARE in_job_id INT;
-  SET in_config_id = (SELECT config_id
-    FROM configs WHERE default_config = True);
-  SET in_job_id = (SELECT job_id FROM jobs 
-    WHERE job_type = 'esync'
-    AND config_id = in_config_id 
-    AND status = 'Done'
-    LIMIT 1);
-  IF in_job_id >= 1 THEN
-    UPDATE jobs SET user = 'cron', status = 'Waiting' WHERE job_type = 'esync';
-  ELSE
-  	SET in_job_id = 0;
-  END IF;
-END$$
-
-CREATE DEFINER=`tbc`@`localhost` PROCEDURE `add_jobs_removeold_cpv`()
-    MODIFIES SQL DATA
-BEGIN
-  DECLARE in_config_id INT;
-  DECLARE in_job_id INT;
-  SET in_config_id = (SELECT config_id
-    FROM configs WHERE default_config = True);
-  SET in_job_id = (SELECT job_id FROM jobs 
-    WHERE job_type = 'removeold_cpv'
-    AND config_id = in_config_id 
-    AND status = 'Done'
-    LIMIT 1);
-  IF in_job_id >= 1 THEN
-    UPDATE jobs SET user = 'cron', status = 'Waiting' WHERE job_type = 'removeold_cpv';
-  ELSE
-  	SET in_job_id = 0;
-  END IF;
-END$$
-
-DELIMITER ;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_jobs`
---
-
-CREATE TABLE IF NOT EXISTS `build_jobs` (
-`build_job_id` int(11) NOT NULL,
-  `ebuild_id` int(11) NOT NULL,
-  `setup_id` int(11) NOT NULL,
-  `config_id` int(11) NOT NULL,
-  `status` enum('Waiting','Building','Looked') NOT NULL DEFAULT 'Waiting',
-  `build_now` tinyint(1) NOT NULL,
-  `removebin` tinyint(1) NOT NULL,
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='The build work list';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_jobs_emerge_options`
---
-
-CREATE TABLE IF NOT EXISTS `build_jobs_emerge_options` (
-`id` int(11) NOT NULL,
-  `build_job_id` int(11) NOT NULL,
-  `eoption_id` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_jobs_redo`
---
-
-CREATE TABLE IF NOT EXISTS `build_jobs_redo` (
-`id` int(11) NOT NULL,
-  `build_job_id` int(11) NOT NULL COMMENT 'build job id',
-  `fail_times` int(1) NOT NULL COMMENT 'Fail times max 5',
-  `fail_type` varchar(30) NOT NULL COMMENT 'Type of fail',
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP COMMENT 'Time'
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Build jobs that need to be redone';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_jobs_use`
---
-
-CREATE TABLE IF NOT EXISTS `build_jobs_use` (
-`id` int(11) NOT NULL,
-  `build_job_id` int(11) NOT NULL,
-  `use_id` int(11) NOT NULL,
-  `status` tinyint(1) NOT NULL DEFAULT '0'
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_logs`
---
-
-CREATE TABLE IF NOT EXISTS `build_logs` (
-`build_log_id` int(11) NOT NULL,
-  `ebuild_id` int(11) NOT NULL,
-  `fail` tinyint(1) NOT NULL DEFAULT '0',
-  `summery_text` longtext NOT NULL,
-  `log_hash` varchar(100) NOT NULL,
-  `bug_id` int(10) NOT NULL DEFAULT '0',
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Main log info for the builds';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_logs_config`
---
-
-CREATE TABLE IF NOT EXISTS `build_logs_config` (
-`log_id` int(11) NOT NULL,
-  `build_log_id` int(11) NOT NULL,
-  `config_id` int(11) NOT NULL,
-  `einfo_id` int(11) NOT NULL,
-  `logname` varchar(150) NOT NULL COMMENT 'filename of the log',
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_logs_emerge_options`
---
-
-CREATE TABLE IF NOT EXISTS `build_logs_emerge_options` (
-`id` int(11) NOT NULL,
-  `build_logs_id` int(11) NOT NULL,
-  `eoption_id` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_logs_errors`
---
-
-CREATE TABLE IF NOT EXISTS `build_logs_errors` (
-`id` int(11) NOT NULL,
-  `build_log_id` int(11) NOT NULL,
-  `error_id` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_logs_hilight`
---
-
-CREATE TABLE IF NOT EXISTS `build_logs_hilight` (
-`id` int(11) NOT NULL,
-  `log_id` int(11) NOT NULL,
-  `start_line` int(11) NOT NULL,
-  `end_line` int(11) NOT NULL,
-  `hilight_css_id` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_logs_qa`
---
-
-CREATE TABLE IF NOT EXISTS `build_logs_qa` (
-`id` int(11) NOT NULL,
-  `build_log_id` int(11) NOT NULL,
-  `summery_text` text NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_logs_repoman`
---
-
-CREATE TABLE IF NOT EXISTS `build_logs_repoman` (
-`id` int(11) NOT NULL,
-  `build_log_id` int(11) NOT NULL,
-  `summery_text` text NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `build_logs_use`
---
-
-CREATE TABLE IF NOT EXISTS `build_logs_use` (
-`id` int(11) NOT NULL,
-  `build_log_id` int(11) NOT NULL,
-  `use_id` int(11) NOT NULL,
-  `status` tinyint(1) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `categories`
---
-
-CREATE TABLE IF NOT EXISTS `categories` (
-`category_id` int(11) NOT NULL,
-  `category` varchar(50) NOT NULL,
-  `active` tinyint(1) NOT NULL DEFAULT '0',
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Categories main table (C)';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `categories_metadata`
---
-
-CREATE TABLE IF NOT EXISTS `categories_metadata` (
-`id` int(11) NOT NULL,
-  `category_id` int(11) NOT NULL,
-  `checksum` varchar(100) NOT NULL,
-  `descriptions` text NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Categories main table (C)';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `configs`
---
-
-CREATE TABLE IF NOT EXISTS `configs` (
-`config_id` int(11) NOT NULL COMMENT 'Config index',
-  `hostname` varchar(50) NOT NULL,
-  `setup_id` int(11) NOT NULL COMMENT 'setup',
-  `default_config` tinyint(1) NOT NULL COMMENT 'Host setup'
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Main config table';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `configs_emerge_options`
---
-
-CREATE TABLE IF NOT EXISTS `configs_emerge_options` (
-`id` int(11) NOT NULL,
-  `config_id` int(11) NOT NULL COMMENT 'config id',
-  `eoption_id` int(11) NOT NULL COMMENT 'emerge option id'
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Emerge command options for the configs';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `configs_metadata`
---
-
-CREATE TABLE IF NOT EXISTS `configs_metadata` (
-`id` int(11) NOT NULL,
-  `config_id` int(11) NOT NULL,
-  `keyword_id` int(11) NOT NULL,
-  `make_conf_text` text NOT NULL,
-  `checksum` varchar(100) NOT NULL,
-  `configsync` tinyint(1) NOT NULL,
-  `active` tinyint(1) NOT NULL,
-  `config_error_text` text NOT NULL,
-  `updateing` tinyint(1) NOT NULL,
-  `status` enum('Waiting','Runing','Stoped') NOT NULL,
-  `auto` tinyint(1) NOT NULL,
-  `repo_path` varchar(100) NOT NULL COMMENT 'git repo path for etc/portage',
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Config Status';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `ebuilds`
---
-
-CREATE TABLE IF NOT EXISTS `ebuilds` (
-`ebuild_id` int(11) NOT NULL,
-  `package_id` int(11) NOT NULL,
-  `version` varchar(50) NOT NULL,
-  `checksum` varchar(100) NOT NULL,
-  `active` tinyint(1) NOT NULL DEFAULT '0',
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Version main table (V)';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `ebuilds_iuse`
---
-
-CREATE TABLE IF NOT EXISTS `ebuilds_iuse` (
-`id` int(11) NOT NULL,
-  `ebuild_id` int(11) NOT NULL,
-  `use_id` int(11) NOT NULL,
-  `status` tinyint(1) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `ebuilds_keywords`
---
-
-CREATE TABLE IF NOT EXISTS `ebuilds_keywords` (
-`id` int(11) NOT NULL,
-  `ebuild_id` int(11) NOT NULL,
-  `keyword_id` int(11) NOT NULL,
-  `status` enum('Stable','Unstable','Negative') NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `ebuilds_metadata`
---
-
-CREATE TABLE IF NOT EXISTS `ebuilds_metadata` (
-`id` int(11) NOT NULL,
-  `ebuild_id` int(11) NOT NULL,
-  `commit` varchar(100) NOT NULL COMMENT 'Git commit',
-  `new` tinyint(1) NOT NULL,
-  `descriptions` varchar(200) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `ebuilds_restrictions`
---
-
-CREATE TABLE IF NOT EXISTS `ebuilds_restrictions` (
-`id` int(11) NOT NULL,
-  `ebuild_id` int(11) NOT NULL,
-  `restriction_id` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `emails`
---
-
-CREATE TABLE IF NOT EXISTS `emails` (
-`email_id` int(11) NOT NULL,
-  `email` varchar(150) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `emerge_info`
---
-
-CREATE TABLE IF NOT EXISTS `emerge_info` (
-`einfo_id` int(11) NOT NULL,
-  `emerge_info_text` text NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `emerge_options`
---
-
-CREATE TABLE IF NOT EXISTS `emerge_options` (
-`eoption_id` int(11) NOT NULL COMMENT 'emerge command options id',
-  `eoption` varchar(15) NOT NULL COMMENT 'emerge command options'
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `errors_info`
---
-
-CREATE TABLE IF NOT EXISTS `errors_info` (
-`error_id` int(11) NOT NULL,
-  `error_name` varchar(10) NOT NULL,
-  `error_search` varchar(20) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `hilight`
---
-
-CREATE TABLE IF NOT EXISTS `hilight` (
-`hilight_id` int(11) NOT NULL,
-  `hilight_search` varchar(30) NOT NULL,
-  `hilight_search_end` varchar(30) NOT NULL,
-  `hilight_search_pattern` varchar(30) NOT NULL,
-  `hilight_css_id` int(11) NOT NULL,
-  `hilight_start` int(11) NOT NULL,
-  `hilight_end` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `hilight_css`
---
-
-CREATE TABLE IF NOT EXISTS `hilight_css` (
-`hilight_css_id` int(11) NOT NULL,
-  `hilight_css_name` varchar(11) NOT NULL,
-  `hilight_css_collor` varchar(10) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `jobs`
---
-
-CREATE TABLE IF NOT EXISTS `jobs` (
-`job_id` int(11) NOT NULL,
-  `job_type` enum('esync','updatedb','removeold_cpv') NOT NULL,
-  `status` enum('Runing','Done','Waiting') NOT NULL DEFAULT 'Waiting',
-  `user` varchar(20) NOT NULL,
-  `config_id` int(11) NOT NULL,
-  `run_config_id` int(11) NOT NULL,
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `keywords`
---
-
-CREATE TABLE IF NOT EXISTS `keywords` (
-`keyword_id` int(11) NOT NULL COMMENT 'keyword index',
-  `keyword` varchar(15) NOT NULL COMMENT 'keyword'
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='KEYWORD';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `logs`
---
-
-CREATE TABLE IF NOT EXISTS `logs` (
-`log_id` int(11) NOT NULL,
-  `config_id` int(11) NOT NULL,
-  `log_type` enum('info','error','debug','qa') NOT NULL,
-  `msg` text NOT NULL,
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `packages`
---
-
-CREATE TABLE IF NOT EXISTS `packages` (
-`package_id` int(11) NOT NULL,
-  `category_id` int(11) NOT NULL,
-  `package` varchar(50) NOT NULL,
-  `repo_id` int(11) NOT NULL,
-  `active` tinyint(1) NOT NULL,
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Packages main table (P)';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `packages_emails`
---
-
-CREATE TABLE IF NOT EXISTS `packages_emails` (
-`id` int(11) NOT NULL,
-  `package_id` int(11) NOT NULL,
-  `email_id` int(11) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `packages_metadata`
---
-
-CREATE TABLE IF NOT EXISTS `packages_metadata` (
-`id` int(11) NOT NULL,
-  `package_id` int(11) NOT NULL,
-  `gitlog` text NOT NULL,
-  `descriptions` text NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `packages_repoman`
---
-
-CREATE TABLE IF NOT EXISTS `packages_repoman` (
-`id` int(11) NOT NULL,
-  `package_id` int(11) NOT NULL,
-  `repoman_hash` varchar(100) NOT NULL,
-  `repoman_text` text NOT NULL,
-  `time_stamp` timestamp NOT NULL DEFAULT CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `repos`
---
-
-CREATE TABLE IF NOT EXISTS `repos` (
-`repo_id` int(11) NOT NULL,
-  `repo` varchar(100) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Repo main table (repo)';
-
--- --------------------------------------------------------
-
---
--- Table structure for table `restrictions`
---
-
-CREATE TABLE IF NOT EXISTS `restrictions` (
-`restriction_id` int(11) NOT NULL,
-  `restriction` varchar(50) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `setups`
---
-
-CREATE TABLE IF NOT EXISTS `setups` (
-`setup_id` int(11) NOT NULL,
-  `setup` varchar(100) NOT NULL,
-  `profile` varchar(150) NOT NULL,
-  `test` tinyint(1) NOT NULL DEFAULT '0',
-  `repoman` tinyint(1) NOT NULL DEFAULT '0'
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `tbc_config`
---
-
-CREATE TABLE IF NOT EXISTS `tbc_config` (
-`id` int(11) NOT NULL,
-  `webirker` varchar(100) NOT NULL,
-  `hostirker` varchar(100) NOT NULL,
-  `webbug` varchar(100) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8;
-
--- --------------------------------------------------------
-
---
--- Table structure for table `uses`
---
-
-CREATE TABLE IF NOT EXISTS `uses` (
-`use_id` int(11) NOT NULL,
-  `flag` varchar(50) NOT NULL
-) ENGINE=MyISAM DEFAULT CHARSET=utf8 COMMENT='Use flags main table';
-
---
--- Indexes for dumped tables
---
-
---
--- Indexes for table `build_jobs`
---
-ALTER TABLE `build_jobs`
- ADD PRIMARY KEY (`build_job_id`), ADD KEY `ebuild_id` (`ebuild_id`), ADD KEY `config_id` (`config_id`), ADD KEY `time_stamp` (`time_stamp`);
-
---
--- Indexes for table `build_jobs_emerge_options`
---
-ALTER TABLE `build_jobs_emerge_options`
- ADD PRIMARY KEY (`id`), ADD KEY `build_job_id` (`build_job_id`), ADD KEY `eoption_id` (`eoption_id`);
-
---
--- Indexes for table `build_jobs_redo`
---
-ALTER TABLE `build_jobs_redo`
- ADD PRIMARY KEY (`id`), ADD KEY `build_job_id` (`build_job_id`);
-
---
--- Indexes for table `build_jobs_use`
---
-ALTER TABLE `build_jobs_use`
- ADD PRIMARY KEY (`id`), ADD KEY `build_job_id` (`build_job_id`), ADD KEY `use_id` (`use_id`);
-
---
--- Indexes for table `build_logs`
---
-ALTER TABLE `build_logs`
- ADD PRIMARY KEY (`build_log_id`), ADD KEY `ebuild_id` (`ebuild_id`);
-
---
--- Indexes for table `build_logs_config`
---
-ALTER TABLE `build_logs_config`
- ADD PRIMARY KEY (`log_id`), ADD KEY `config_id` (`config_id`), ADD KEY `build_log_id` (`build_log_id`), ADD KEY `einfo_id` (`einfo_id`);
-
---
--- Indexes for table `build_logs_emerge_options`
---
-ALTER TABLE `build_logs_emerge_options`
- ADD PRIMARY KEY (`id`), ADD KEY `eoption_id` (`eoption_id`), ADD KEY `build_logs_id` (`build_logs_id`);
-
---
--- Indexes for table `build_logs_errors`
---
-ALTER TABLE `build_logs_errors`
- ADD PRIMARY KEY (`id`), ADD KEY `build_log_id` (`build_log_id`), ADD KEY `error_id` (`error_id`);
-
---
--- Indexes for table `build_logs_hilight`
---
-ALTER TABLE `build_logs_hilight`
- ADD PRIMARY KEY (`id`), ADD KEY `log_id` (`log_id`), ADD KEY `hilight_id` (`hilight_css_id`), ADD KEY `hilight_css_id` (`hilight_css_id`);
-
---
--- Indexes for table `build_logs_qa`
---
-ALTER TABLE `build_logs_qa`
- ADD PRIMARY KEY (`id`), ADD KEY `build_logs_id` (`build_log_id`);
-
---
--- Indexes for table `build_logs_repoman`
---
-ALTER TABLE `build_logs_repoman`
- ADD PRIMARY KEY (`id`), ADD KEY `build_logs_id` (`build_log_id`);
-
---
--- Indexes for table `build_logs_use`
---
-ALTER TABLE `build_logs_use`
- ADD PRIMARY KEY (`id`), ADD KEY `build_log_id` (`build_log_id`), ADD KEY `use_id` (`use_id`);
-
---
--- Indexes for table `categories`
---
-ALTER TABLE `categories`
- ADD PRIMARY KEY (`category_id`);
-
---
--- Indexes for table `categories_metadata`
---
-ALTER TABLE `categories_metadata`
- ADD PRIMARY KEY (`id`);
-
---
--- Indexes for table `configs`
---
-ALTER TABLE `configs`
- ADD PRIMARY KEY (`config_id`);
-
---
--- Indexes for table `configs_emerge_options`
---
-ALTER TABLE `configs_emerge_options`
- ADD PRIMARY KEY (`id`), ADD KEY `config_id` (`config_id`), ADD KEY `eoption_id` (`eoption_id`);
-
---
--- Indexes for table `configs_metadata`
---
-ALTER TABLE `configs_metadata`
- ADD PRIMARY KEY (`id`), ADD KEY `keyword_id` (`keyword_id`), ADD KEY `config_id` (`config_id`);
-
---
--- Indexes for table `ebuilds`
---
-ALTER TABLE `ebuilds`
- ADD PRIMARY KEY (`ebuild_id`), ADD KEY `package_id` (`package_id`), ADD KEY `checksum` (`checksum`), ADD KEY `version` (`version`);
-
---
--- Indexes for table `ebuilds_iuse`
---
-ALTER TABLE `ebuilds_iuse`
- ADD PRIMARY KEY (`id`), ADD KEY `ebuild_id` (`ebuild_id`), ADD KEY `use_id` (`use_id`);
-
---
--- Indexes for table `ebuilds_keywords`
---
-ALTER TABLE `ebuilds_keywords`
- ADD PRIMARY KEY (`id`), ADD KEY `ebuild_id` (`ebuild_id`), ADD KEY `keyword_id` (`keyword_id`);
-
---
--- Indexes for table `ebuilds_metadata`
---
-ALTER TABLE `ebuilds_metadata`
- ADD PRIMARY KEY (`id`), ADD KEY `ebuild_id` (`ebuild_id`);
-
---
--- Indexes for table `ebuilds_restrictions`
---
-ALTER TABLE `ebuilds_restrictions`
- ADD PRIMARY KEY (`id`), ADD KEY `restriction_id` (`restriction_id`), ADD KEY `ebuild_id` (`ebuild_id`);
-
---
--- Indexes for table `emails`
---
-ALTER TABLE `emails`
- ADD PRIMARY KEY (`email_id`);
-
---
--- Indexes for table `emerge_info`
---
-ALTER TABLE `emerge_info`
- ADD UNIQUE KEY `einfo_id` (`einfo_id`);
-
---
--- Indexes for table `emerge_options`
---
-ALTER TABLE `emerge_options`
- ADD PRIMARY KEY (`eoption_id`);
-
---
--- Indexes for table `errors_info`
---
-ALTER TABLE `errors_info`
- ADD PRIMARY KEY (`error_id`);
-
---
--- Indexes for table `hilight`
---
-ALTER TABLE `hilight`
- ADD PRIMARY KEY (`hilight_id`), ADD KEY `hilight_css_id` (`hilight_css_id`);
-
---
--- Indexes for table `hilight_css`
---
-ALTER TABLE `hilight_css`
- ADD PRIMARY KEY (`hilight_css_id`);
-
---
--- Indexes for table `jobs`
---
-ALTER TABLE `jobs`
- ADD PRIMARY KEY (`job_id`), ADD KEY `config_id` (`config_id`), ADD KEY `run_config_id` (`run_config_id`), ADD KEY `job_type_id` (`job_type`);
-
---
--- Indexes for table `keywords`
---
-ALTER TABLE `keywords`
- ADD PRIMARY KEY (`keyword_id`);
-
---
--- Indexes for table `logs`
---
-ALTER TABLE `logs`
- ADD PRIMARY KEY (`log_id`), ADD KEY `config_id` (`config_id`);
-
---
--- Indexes for table `packages`
---
-ALTER TABLE `packages`
- ADD PRIMARY KEY (`package_id`), ADD KEY `category_id` (`category_id`), ADD KEY `repo_id` (`repo_id`), ADD KEY `package` (`package`);
-
---
--- Indexes for table `packages_emails`
---
-ALTER TABLE `packages_emails`
- ADD PRIMARY KEY (`id`), ADD KEY `package_id` (`package_id`,`email_id`);
-
---
--- Indexes for table `packages_metadata`
---
-ALTER TABLE `packages_metadata`
- ADD PRIMARY KEY (`id`), ADD KEY `package_id` (`package_id`);
-
---
--- Indexes for table `packages_repoman`
---
-ALTER TABLE `packages_repoman`
- ADD PRIMARY KEY (`id`);
-
---
--- Indexes for table `repos`
---
-ALTER TABLE `repos`
- ADD PRIMARY KEY (`repo_id`);
-
---
--- Indexes for table `restrictions`
---
-ALTER TABLE `restrictions`
- ADD PRIMARY KEY (`restriction_id`);
-
---
--- Indexes for table `setups`
---
-ALTER TABLE `setups`
- ADD PRIMARY KEY (`setup_id`), ADD UNIQUE KEY `setup_id` (`setup_id`);
-
---
--- Indexes for table `tbc_config`
---
-ALTER TABLE `tbc_config`
- ADD PRIMARY KEY (`id`);
-
---
--- Indexes for table `uses`
---
-ALTER TABLE `uses`
- ADD PRIMARY KEY (`use_id`);
-
---
--- AUTO_INCREMENT for dumped tables
---
-
---
--- AUTO_INCREMENT for table `build_jobs`
---
-ALTER TABLE `build_jobs`
-MODIFY `build_job_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_jobs_emerge_options`
---
-ALTER TABLE `build_jobs_emerge_options`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_jobs_redo`
---
-ALTER TABLE `build_jobs_redo`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_jobs_use`
---
-ALTER TABLE `build_jobs_use`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_logs`
---
-ALTER TABLE `build_logs`
-MODIFY `build_log_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_logs_config`
---
-ALTER TABLE `build_logs_config`
-MODIFY `log_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_logs_emerge_options`
---
-ALTER TABLE `build_logs_emerge_options`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_logs_errors`
---
-ALTER TABLE `build_logs_errors`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_logs_hilight`
---
-ALTER TABLE `build_logs_hilight`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_logs_qa`
---
-ALTER TABLE `build_logs_qa`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_logs_repoman`
---
-ALTER TABLE `build_logs_repoman`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `build_logs_use`
---
-ALTER TABLE `build_logs_use`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `categories`
---
-ALTER TABLE `categories`
-MODIFY `category_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `categories_metadata`
---
-ALTER TABLE `categories_metadata`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `configs`
---
-ALTER TABLE `configs`
-MODIFY `config_id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'Config index';
---
--- AUTO_INCREMENT for table `configs_emerge_options`
---
-ALTER TABLE `configs_emerge_options`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `configs_metadata`
---
-ALTER TABLE `configs_metadata`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `ebuilds`
---
-ALTER TABLE `ebuilds`
-MODIFY `ebuild_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `ebuilds_iuse`
---
-ALTER TABLE `ebuilds_iuse`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `ebuilds_keywords`
---
-ALTER TABLE `ebuilds_keywords`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `ebuilds_metadata`
---
-ALTER TABLE `ebuilds_metadata`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `ebuilds_restrictions`
---
-ALTER TABLE `ebuilds_restrictions`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `emails`
---
-ALTER TABLE `emails`
-MODIFY `email_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `emerge_info`
---
-ALTER TABLE `emerge_info`
-MODIFY `einfo_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `emerge_options`
---
-ALTER TABLE `emerge_options`
-MODIFY `eoption_id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'emerge command options id';
---
--- AUTO_INCREMENT for table `errors_info`
---
-ALTER TABLE `errors_info`
-MODIFY `error_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `hilight`
---
-ALTER TABLE `hilight`
-MODIFY `hilight_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `hilight_css`
---
-ALTER TABLE `hilight_css`
-MODIFY `hilight_css_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `jobs`
---
-ALTER TABLE `jobs`
-MODIFY `job_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `keywords`
---
-ALTER TABLE `keywords`
-MODIFY `keyword_id` int(11) NOT NULL AUTO_INCREMENT COMMENT 'keyword index';
---
--- AUTO_INCREMENT for table `logs`
---
-ALTER TABLE `logs`
-MODIFY `log_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `packages`
---
-ALTER TABLE `packages`
-MODIFY `package_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `packages_emails`
---
-ALTER TABLE `packages_emails`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `packages_metadata`
---
-ALTER TABLE `packages_metadata`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `packages_repoman`
---
-ALTER TABLE `packages_repoman`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `repos`
---
-ALTER TABLE `repos`
-MODIFY `repo_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `restrictions`
---
-ALTER TABLE `restrictions`
-MODIFY `restriction_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `setups`
---
-ALTER TABLE `setups`
-MODIFY `setup_id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `tbc_config`
---
-ALTER TABLE `tbc_config`
-MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
---
--- AUTO_INCREMENT for table `uses`
---
-ALTER TABLE `uses`
-MODIFY `use_id` int(11) NOT NULL AUTO_INCREMENT;
-DELIMITER $$
---
--- Events
---
-CREATE DEFINER=`tbc`@`localhost` EVENT `add_esync_jobs` ON SCHEDULE EVERY 1 HOUR STARTS '2012-12-23 17:15:13' ON COMPLETION NOT PRESERVE ENABLE DO BEGIN
-  CALL add_jobs_esync();
-END$$
-
-CREATE DEFINER=`tbc`@`localhost` EVENT `add_removeold_cpv_jobs` ON SCHEDULE EVERY 24 HOUR STARTS '2016-02-21 21:00:22' ON COMPLETION NOT PRESERVE ENABLE DO BEGIN
-  CALL add_jobs_removeold_cpv();
-END$$
-
-DELIMITER ;
-
-/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
-/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
-/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;
+SET SQL_MODE = "NO_AUTO_VALUE_ON_ZERO";
+SET AUTOCOMMIT = 0;
+START TRANSACTION;
+SET time_zone = "+00:00";
+
+/*!40101 SET @OLD_CHARACTER_SET_CLIENT=@@CHARACTER_SET_CLIENT */;
+/*!40101 SET @OLD_CHARACTER_SET_RESULTS=@@CHARACTER_SET_RESULTS */;
+/*!40101 SET @OLD_COLLATION_CONNECTION=@@COLLATION_CONNECTION */;
+/*!40101 SET NAMES utf8mb4 */;
+
+
+CREATE TABLE `builds_uses` (
+  `id` int(11) NOT NULL,
+  `build_uuid` varchar(36) NOT NULL,
+  `use_id` int(11) NOT NULL,
+  `status` tinyint(1) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `categories` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `uuid` varchar(36) NOT NULL,
+  `status` enum('failed','completed','in-progress','waiting') DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `categories_metadata` (
+  `id` int(11) NOT NULL,
+  `category_uuid` varchar(36) NOT NULL,
+  `checksum` varchar(255) DEFAULT NULL,
+  `description` text DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `ebuilds` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `version` varchar(255) NOT NULL,
+  `checksum` varchar(255) NOT NULL,
+  `uuid` varchar(36) NOT NULL,
+  `package_uuid` varchar(36) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `ebuilds_keywords` (
+  `id` int(11) NOT NULL,
+  `ebuild_uuid` varchar(36) NOT NULL,
+  `keyword_id` int(11) NOT NULL,
+  `status` enum('stable','unstable','negative') DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `ebuilds_metadata` (
+  `id` int(11) NOT NULL,
+  `ebuild_uuid` varchar(36) NOT NULL,
+  `commit` varchar(255) NOT NULL,
+  `commit_msg` text DEFAULT NULL,
+  `description` text DEFAULT NULL,
+  `slot` varchar(30) NOT NULL,
+  `homepage` varchar(500) NOT NULL,
+  `license` varchar(500) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `ebuilds_restrictions` (
+  `id` int(11) NOT NULL,
+  `ebuild_uuid` varchar(36) NOT NULL,
+  `restriction_id` int(11) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `ebuilds_uses` (
+  `id` int(11) NOT NULL,
+  `ebuild_uuid` varchar(36) NOT NULL,
+  `use_id` int(11) NOT NULL,
+  `status` tinyint(1) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `emails` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `id` int(11) NOT NULL,
+  `email` varchar(255) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `keywords` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `id` int(11) NOT NULL,
+  `keyword` varchar(255) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `migrate_version` (
+  `repository_id` varchar(250) NOT NULL,
+  `repository_path` text DEFAULT NULL,
+  `version` int(11) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `packages` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `uuid` varchar(36) NOT NULL,
+  `status` enum('failed','completed','in-progress','waiting') DEFAULT NULL,
+  `category_uuid` varchar(36) NOT NULL,
+  `repo_uuid` varchar(36) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `packages_emails` (
+  `id` int(11) NOT NULL,
+  `package_uuid` varchar(36) NOT NULL,
+  `email_id` int(11) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `packages_metadata` (
+  `id` int(11) NOT NULL,
+  `package_uuid` varchar(36) NOT NULL,
+  `gitlog` text DEFAULT NULL,
+  `description` text DEFAULT NULL,
+  `checksum` varchar(255) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `projects` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `uuid` varchar(36) NOT NULL,
+  `active` tinyint(1) DEFAULT NULL,
+  `auto` tinyint(1) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `projects_builds` (
+  `uuid` varchar(36) NOT NULL,
+  `ebuild_uuid` varchar(36) NOT NULL,
+  `project_uuid` varchar(36) NOT NULL,
+  `user_id` int(10) NOT NULL,
+  `status` enum('failed','completed','in-progress','waiting') NOT NULL,
+  `priority` int(1) NOT NULL DEFAULT 5,
+  `deleted` tinyint(1) NOT NULL DEFAULT 0,
+  `deleted_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `created_at` datetime DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `projects_metadata` (
+  `id` int(11) NOT NULL,
+  `project_uuid` varchar(36) NOT NULL,
+  `titel` varchar(50) NOT NULL,
+  `description` text NOT NULL,
+  `project_repo_uuid` varchar(36) NOT NULL,
+  `project_profile` varchar(50) NOT NULL,
+  `project_profile_repo_uuid` varchar(36) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `projects_repos` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `id` int(11) NOT NULL,
+  `repo_uuid` varchar(36) DEFAULT NULL,
+  `project_uuid` varchar(36) DEFAULT NULL,
+  `build` tinyint(1) DEFAULT NULL,
+  `test` tinyint(1) NOT NULL,
+  `repoman` tinyint(1) NOT NULL,
+  `qa` tinyint(1) NOT NULL,
+  `depclean` tinyint(1) NOT NULL,
+  `auto` tinyint(1) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `repos` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `name` varchar(255) NOT NULL,
+  `uuid` varchar(36) NOT NULL,
+  `src_url` varchar(255) NOT NULL,
+  `description` text DEFAULT NULL,
+  `auto` tinyint(1) DEFAULT NULL,
+  `status` enum('failed','completed','in-progress','waiting') DEFAULT NULL,
+  `repo_type` enum('project','ebuild') DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `restrictions` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `id` int(11) NOT NULL,
+  `restriction` varchar(255) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `services` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `id` int(11) NOT NULL,
+  `uuid` varchar(36) DEFAULT NULL,
+  `host` varchar(255) DEFAULT NULL,
+  `binary` varchar(255) DEFAULT NULL,
+  `topic` varchar(255) DEFAULT NULL,
+  `report_count` int(11) NOT NULL,
+  `disabled` tinyint(1) DEFAULT NULL,
+  `disabled_reason` varchar(255) DEFAULT NULL,
+  `last_seen_up` datetime DEFAULT NULL,
+  `forced_down` varchar(255) DEFAULT NULL,
+  `version` int(11) DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `services_repos` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `id` int(11) NOT NULL,
+  `repo_uuid` varchar(36) DEFAULT NULL,
+  `service_uuid` varchar(36) DEFAULT NULL,
+  `auto` tinyint(1) NOT NULL,
+  `status` enum('failed','completed','in-progress','waiting','stopped','rebuild_db','update_db') NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `tasks` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `uuid` varchar(36) NOT NULL,
+  `name` varchar(255) DEFAULT NULL,
+  `service_uuid` varchar(36) DEFAULT NULL,
+  `repet` tinyint(1) DEFAULT NULL,
+  `run` datetime DEFAULT NULL,
+  `status` enum('failed','completed','in-progress','waiting') DEFAULT NULL,
+  `last` datetime DEFAULT NULL,
+  `priority` int(11) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `users` (
+  `id` int(10) NOT NULL,
+  `user_id` int(11) NOT NULL,
+  `name` varchar(255) NOT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+CREATE TABLE `uses` (
+  `created_at` datetime DEFAULT NULL,
+  `updated_at` datetime DEFAULT NULL,
+  `deleted_at` datetime DEFAULT NULL,
+  `deleted` tinyint(1) DEFAULT NULL,
+  `id` int(11) NOT NULL,
+  `flag` varchar(255) NOT NULL,
+  `description` text DEFAULT NULL
+) ENGINE=InnoDB DEFAULT CHARSET=utf8;
+
+
+ALTER TABLE `builds_uses`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `builds_uses_uuid_fkey` (`build_uuid`) USING BTREE,
+  ADD KEY `builds_uses_use_id_fkey` (`use_id`) USING BTREE;
+
+ALTER TABLE `categories`
+  ADD PRIMARY KEY (`uuid`);
+
+ALTER TABLE `categories_metadata`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `categories_metadata_uuid_fkey` (`category_uuid`) USING BTREE;
+
+ALTER TABLE `ebuilds`
+  ADD PRIMARY KEY (`uuid`),
+  ADD KEY `ebuilds_package_uuid_fkey` (`package_uuid`);
+
+ALTER TABLE `ebuilds_keywords`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `ebuilds_keywords_keyword_id_fkey` (`keyword_id`),
+  ADD KEY `ebuild_uuid` (`ebuild_uuid`) USING BTREE;
+
+ALTER TABLE `ebuilds_metadata`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `ebuild_uuid` (`ebuild_uuid`) USING BTREE;
+
+ALTER TABLE `ebuilds_restrictions`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `ebuilds_restrictions_uuid_fkey` (`ebuild_uuid`),
+  ADD KEY `ebuilds_restrictions_restriction_id_fkey` (`restriction_id`);
+
+ALTER TABLE `ebuilds_uses`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `ebuilds_uses_uuid_fkey` (`ebuild_uuid`),
+  ADD KEY `ebuilds_uses_use_id_fkey` (`use_id`);
+
+ALTER TABLE `emails`
+  ADD PRIMARY KEY (`id`);
+
+ALTER TABLE `keywords`
+  ADD PRIMARY KEY (`id`);
+
+ALTER TABLE `migrate_version`
+  ADD PRIMARY KEY (`repository_id`);
+
+ALTER TABLE `packages`
+  ADD PRIMARY KEY (`uuid`),
+  ADD KEY `packages_category_uuid_fkey` (`category_uuid`),
+  ADD KEY `packages_repo_uuid_fkey` (`repo_uuid`);
+
+ALTER TABLE `packages_emails`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `packages_email_email_id_fkey` (`email_id`),
+  ADD KEY `package_uuid` (`package_uuid`) USING BTREE;
+
+ALTER TABLE `packages_metadata`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `packages_metadata_uuid_fkey` (`package_uuid`) USING BTREE;
+
+ALTER TABLE `projects`
+  ADD PRIMARY KEY (`uuid`),
+  ADD UNIQUE KEY `name` (`name`);
+
+ALTER TABLE `projects_builds`
+  ADD PRIMARY KEY (`uuid`);
+
+ALTER TABLE `projects_metadata`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `projects_metadata_uuid_fkey` (`project_uuid`) USING BTREE,
+  ADD KEY `project_repo_uuid` (`project_repo_uuid`),
+  ADD KEY `project_profile_repo_uuid` (`project_profile_repo_uuid`);
+
+ALTER TABLE `projects_repos`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `projects_repos_repo_uuid_fkey` (`repo_uuid`),
+  ADD KEY `projects_repos_project_uuid_fkey` (`project_uuid`);
+
+ALTER TABLE `repos`
+  ADD PRIMARY KEY (`uuid`);
+
+ALTER TABLE `restrictions`
+  ADD PRIMARY KEY (`id`);
+
+ALTER TABLE `services`
+  ADD PRIMARY KEY (`id`),
+  ADD UNIQUE KEY `uniq_services0host0topic0deleted` (`host`,`topic`,`deleted`),
+  ADD UNIQUE KEY `uniq_services0host0binary0deleted` (`host`,`binary`,`deleted`);
+
+ALTER TABLE `services_repos`
+  ADD PRIMARY KEY (`id`),
+  ADD KEY `projects_repos_repo_uuid_fkey` (`repo_uuid`),
+  ADD KEY `projects_repos_project_uuid_fkey` (`service_uuid`);
+
+ALTER TABLE `tasks`
+  ADD PRIMARY KEY (`uuid`),
+  ADD UNIQUE KEY `uuid` (`uuid`);
+
+ALTER TABLE `users`
+  ADD PRIMARY KEY (`id`);
+
+ALTER TABLE `uses`
+  ADD PRIMARY KEY (`id`);
+
+
+ALTER TABLE `builds_uses`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `categories_metadata`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `ebuilds_keywords`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `ebuilds_metadata`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `ebuilds_restrictions`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `ebuilds_uses`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `emails`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `keywords`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `packages_emails`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `packages_metadata`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `projects_metadata`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `projects_repos`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `restrictions`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `services`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `services_repos`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `users`
+  MODIFY `id` int(10) NOT NULL AUTO_INCREMENT;
+
+ALTER TABLE `uses`
+  MODIFY `id` int(11) NOT NULL AUTO_INCREMENT;
+
+
+ALTER TABLE `categories_metadata`
+  ADD CONSTRAINT `categories_metadata_ibfk_1` FOREIGN KEY (`category_uuid`) REFERENCES `categories` (`uuid`),
+  ADD CONSTRAINT `categories_metadata_uuid_fkey` FOREIGN KEY (`category_uuid`) REFERENCES `categories` (`uuid`);
+
+ALTER TABLE `ebuilds`
+  ADD CONSTRAINT `ebuilds_ibfk_1` FOREIGN KEY (`package_uuid`) REFERENCES `packages` (`uuid`),
+  ADD CONSTRAINT `ebuilds_package_uuid_fkey` FOREIGN KEY (`package_uuid`) REFERENCES `packages` (`uuid`);
+
+ALTER TABLE `ebuilds_keywords`
+  ADD CONSTRAINT `ebuilds_keywords_ibfk_1` FOREIGN KEY (`ebuild_uuid`) REFERENCES `ebuilds` (`uuid`),
+  ADD CONSTRAINT `ebuilds_keywords_ibfk_2` FOREIGN KEY (`keyword_id`) REFERENCES `keywords` (`id`),
+  ADD CONSTRAINT `ebuilds_keywords_keyword_id_fkey` FOREIGN KEY (`keyword_id`) REFERENCES `keywords` (`id`),
+  ADD CONSTRAINT `ebuilds_keywords_uuid_fkey` FOREIGN KEY (`ebuild_uuid`) REFERENCES `ebuilds` (`uuid`);
+
+ALTER TABLE `ebuilds_metadata`
+  ADD CONSTRAINT `ebuilds_metadata_ibfk_1` FOREIGN KEY (`ebuild_uuid`) REFERENCES `ebuilds` (`uuid`);
+
+ALTER TABLE `ebuilds_restrictions`
+  ADD CONSTRAINT `ebuilds_restrictions_ibfk_1` FOREIGN KEY (`ebuild_uuid`) REFERENCES `ebuilds` (`uuid`),
+  ADD CONSTRAINT `ebuilds_restrictions_ibfk_2` FOREIGN KEY (`restriction_id`) REFERENCES `restrictions` (`id`),
+  ADD CONSTRAINT `ebuilds_restrictions_restriction_id_fkey` FOREIGN KEY (`restriction_id`) REFERENCES `restrictions` (`id`),
+  ADD CONSTRAINT `ebuilds_restrictions_uuid_fkey` FOREIGN KEY (`ebuild_uuid`) REFERENCES `ebuilds` (`uuid`);
+
+ALTER TABLE `ebuilds_uses`
+  ADD CONSTRAINT `ebuilds_uses_ibfk_1` FOREIGN KEY (`ebuild_uuid`) REFERENCES `ebuilds` (`uuid`),
+  ADD CONSTRAINT `ebuilds_uses_ibfk_2` FOREIGN KEY (`use_id`) REFERENCES `uses` (`id`),
+  ADD CONSTRAINT `ebuilds_uses_use_id_fkey` FOREIGN KEY (`use_id`) REFERENCES `uses` (`id`),
+  ADD CONSTRAINT `ebuilds_uses_uuid_fkey` FOREIGN KEY (`ebuild_uuid`) REFERENCES `ebuilds` (`uuid`);
+
+ALTER TABLE `packages`
+  ADD CONSTRAINT `packages_category_uuid_fkey` FOREIGN KEY (`category_uuid`) REFERENCES `categories` (`uuid`),
+  ADD CONSTRAINT `packages_ibfk_1` FOREIGN KEY (`category_uuid`) REFERENCES `categories` (`uuid`),
+  ADD CONSTRAINT `packages_ibfk_2` FOREIGN KEY (`repo_uuid`) REFERENCES `repos` (`uuid`),
+  ADD CONSTRAINT `packages_repo_uuid_fkey` FOREIGN KEY (`repo_uuid`) REFERENCES `repos` (`uuid`);
+
+ALTER TABLE `packages_emails`
+  ADD CONSTRAINT `packages_email_email_id_fkey` FOREIGN KEY (`email_id`) REFERENCES `emails` (`id`),
+  ADD CONSTRAINT `packages_emails_ibfk_1` FOREIGN KEY (`package_uuid`) REFERENCES `packages` (`uuid`),
+  ADD CONSTRAINT `packages_emails_ibfk_2` FOREIGN KEY (`email_id`) REFERENCES `emails` (`id`);
+
+ALTER TABLE `packages_metadata`
+  ADD CONSTRAINT `packages_metadata_ibfk_1` FOREIGN KEY (`package_uuid`) REFERENCES `packages` (`uuid`),
+  ADD CONSTRAINT `packages_metadata_uuid_fkey` FOREIGN KEY (`package_uuid`) REFERENCES `packages` (`uuid`);
+
+ALTER TABLE `projects_metadata`
+  ADD CONSTRAINT `projects_metadata_ibfk_1` FOREIGN KEY (`project_uuid`) REFERENCES `projects` (`uuid`),
+  ADD CONSTRAINT `projects_metadata_profile_repo_uuid_fkey` FOREIGN KEY (`project_profile_repo_uuid`) REFERENCES `repos` (`uuid`),
+  ADD CONSTRAINT `projects_metadata_repo_uuid_fkey` FOREIGN KEY (`project_repo_uuid`) REFERENCES `repos` (`uuid`),
+  ADD CONSTRAINT `projects_metadata_uuid_fkey` FOREIGN KEY (`project_uuid`) REFERENCES `projects` (`uuid`);
+
+ALTER TABLE `projects_repos`
+  ADD CONSTRAINT `projects_repos_ibfk_1` FOREIGN KEY (`repo_uuid`) REFERENCES `repos` (`uuid`),
+  ADD CONSTRAINT `projects_repos_ibfk_2` FOREIGN KEY (`project_uuid`) REFERENCES `projects` (`uuid`),
+  ADD CONSTRAINT `projects_repos_project_uuid_fkey` FOREIGN KEY (`project_uuid`) REFERENCES `projects` (`uuid`),
+  ADD CONSTRAINT `projects_repos_repo_uuid_fkey` FOREIGN KEY (`repo_uuid`) REFERENCES `repos` (`uuid`);
+COMMIT;
+
+/*!40101 SET CHARACTER_SET_CLIENT=@OLD_CHARACTER_SET_CLIENT */;
+/*!40101 SET CHARACTER_SET_RESULTS=@OLD_CHARACTER_SET_RESULTS */;
+/*!40101 SET COLLATION_CONNECTION=@OLD_COLLATION_CONNECTION */;


^ permalink raw reply related	[flat|nested] only message in thread

only message in thread, other threads:[~2020-04-04 22:53 UTC | newest]

Thread overview: (only message) (download: mbox.gz follow: Atom feed
-- links below jump to the message on this page --
2020-04-04 22:53 [gentoo-commits] proj/tinderbox-cluster:master commit in: etc/, pym/tbc/, gosbs/tasks/, gosbs/db/, gosbs/, gosbs/cmd/, bin/, conf/, Magnus Granberg

This is a public inbox, see mirroring instructions
for how to clone and mirror all data and code used for this inbox